Version in base suite: 9.2.3+ds-1+deb12u1 Base version: trafficserver_9.2.3+ds-1+deb12u1 Target version: trafficserver_9.2.4+ds-0+deb12u1 Base file: /srv/ftp-master.debian.org/ftp/pool/main/t/trafficserver/trafficserver_9.2.3+ds-1+deb12u1.dsc Target file: /srv/ftp-master.debian.org/policy/pool/main/t/trafficserver/trafficserver_9.2.4+ds-0+deb12u1.dsc .style.yapf | 421 ++++++++++ .yapfignore | 18 CHANGELOG-9.2.4 | 29 Makefile.am | 10 Makefile.in | 10 configure | 30 configure.ac | 4 contrib/python/compare_RecordsConfigcc.py | 1 debian/changelog | 9 debian/patches/0006-fix-doc-build.patch | 5 doc/admin-guide/files/records.config.en.rst | 25 doc/admin-guide/files/sni.yaml.en.rst | 6 doc/admin-guide/monitoring/statistics/core/http-connection.en.rst | 11 doc/admin-guide/plugins/block_errors.en.rst | 4 doc/admin-guide/plugins/header_freq.en.rst | 20 doc/appendices/command-line/traffic_server.en.rst | 2 doc/conf.py | 59 - doc/ext/doxygen.py | 18 doc/ext/traffic-server.py | 44 - doc/manpages.py | 91 -- include/tscore/ink_file.h | 4 iocore/aio/AIO.cc | 10 iocore/aio/I_AIO.h | 2 iocore/aio/P_AIO.h | 15 iocore/cache/Cache.cc | 19 iocore/cache/CacheDir.cc | 2 iocore/cache/CacheDisk.cc | 6 iocore/cache/CacheVol.cc | 2 iocore/cache/P_CacheVol.h | 4 iocore/cache/test/main.h | 19 iocore/net/P_SNIActionPerformer.h | 17 iocore/net/P_UnixNet.h | 10 iocore/net/SSLSNIConfig.cc | 4 iocore/net/TLSSNISupport.h | 1 iocore/net/UnixNet.cc | 20 iocore/net/UnixNetAccept.cc | 36 iocore/net/YamlSNIConfig.cc | 4 iocore/net/YamlSNIConfig.h | 2 mgmt/RecordsConfig.cc | 4 plugins/experimental/header_freq/header_freq.cc | 186 +++- plugins/experimental/slice/Config.h | 12 plugins/experimental/slice/Data.h | 9 plugins/experimental/slice/server.cc | 25 plugins/experimental/slice/slice.cc | 6 plugins/experimental/slice/util.cc | 2 plugins/experimental/ssl_session_reuse/tests/plug-load.test.py | 9 plugins/experimental/traffic_dump/json_utils.cc | 2 plugins/experimental/traffic_dump/post_process.py | 51 - plugins/experimental/uri_signing/python_signer/uri_signer.py | 8 plugins/header_rewrite/operators.cc | 2 plugins/s3_auth/s3_auth.cc | 5 proxy/Main.h | 2 proxy/ParentSelection.cc | 5 proxy/Plugin.cc | 2 proxy/http/HttpTransact.cc | 2 proxy/http/HttpTunnel.cc | 13 proxy/http/remap/unit-tests/test_NextHopConsistentHash.cc | 67 - proxy/http2/HTTP2.cc | 66 - proxy/http2/HTTP2.h | 2 proxy/http2/Http2ConnectionState.cc | 64 + proxy/http2/Http2ConnectionState.h | 12 proxy/http2/unit_tests/test_HpackIndexingTable.cc | 30 src/traffic_server/traffic_server.cc | 20 tests/gold_tests/autest-site/cli_tools.test.ext | 14 tests/gold_tests/autest-site/conditions.test.ext | 42 tests/gold_tests/autest-site/curl_header.test.ext | 43 - tests/gold_tests/autest-site/httpbin.test.ext | 8 tests/gold_tests/autest-site/init.cli.ext | 20 tests/gold_tests/autest-site/ip.test.ext | 1 tests/gold_tests/autest-site/microDNS.test.ext | 1 tests/gold_tests/autest-site/microserver.test.ext | 38 tests/gold_tests/autest-site/ordered_set_queue.py | 7 tests/gold_tests/autest-site/ports.py | 80 - tests/gold_tests/autest-site/setup.cli.ext | 27 tests/gold_tests/autest-site/traffic_replay.test.ext | 4 tests/gold_tests/autest-site/trafficserver.test.ext | 102 -- tests/gold_tests/autest-site/trafficserver_plugins.test.ext | 22 tests/gold_tests/autest-site/verifier_client.test.ext | 50 - tests/gold_tests/autest-site/verifier_server.test.ext | 64 + tests/gold_tests/autest-site/when.test.ext | 19 tests/gold_tests/basic/deny0.test.py | 53 - tests/gold_tests/bigobj/bigobj.test.py | 92 -- tests/gold_tests/body_factory/http204_response.test.py | 14 tests/gold_tests/body_factory/http304_response.test.py | 11 tests/gold_tests/body_factory/http_head_no_origin.test.py | 1 tests/gold_tests/body_factory/http_with_origin.test.py | 68 - tests/gold_tests/cache/alternate-caching.test.py | 29 tests/gold_tests/cache/background_fill.test.py | 33 tests/gold_tests/cache/cache-control.test.py | 77 + tests/gold_tests/cache/cache-generation-clear.test.py | 32 tests/gold_tests/cache/cache-generation-disjoint.test.py | 33 tests/gold_tests/cache/cache-range-response.test.py | 19 tests/gold_tests/cache/cache-request-method.test.py | 53 - tests/gold_tests/cache/conditional-get-hit.test.py | 16 tests/gold_tests/cache/disjoint-wait-for-cache.test.py | 34 tests/gold_tests/cache/negative-caching.test.py | 96 +- tests/gold_tests/cache/negative-revalidating.test.py | 43 - tests/gold_tests/cache/vary-handling.test.py | 19 tests/gold_tests/chunked_encoding/bad_chunked_encoding.test.py | 102 +- tests/gold_tests/chunked_encoding/chunked_encoding.test.py | 81 - tests/gold_tests/chunked_encoding/chunked_encoding_disabled.test.py | 37 tests/gold_tests/chunked_encoding/chunked_encoding_h2.test.py | 34 tests/gold_tests/chunked_encoding/replays/malformed_chunked_header.replay.yaml | 49 + tests/gold_tests/command_argument/verify_global_plugin.test.py | 55 - tests/gold_tests/command_argument/verify_remap_plugin.test.py | 46 - tests/gold_tests/connect/connect.test.py | 19 tests/gold_tests/cont_schedule/schedule.test.py | 20 tests/gold_tests/cont_schedule/schedule_on_pool.test.py | 20 tests/gold_tests/cont_schedule/schedule_on_thread.test.py | 20 tests/gold_tests/cont_schedule/thread_affinity.test.py | 20 tests/gold_tests/continuations/double.test.py | 21 tests/gold_tests/continuations/double_h2.test.py | 48 - tests/gold_tests/continuations/openclose.test.py | 48 - tests/gold_tests/continuations/openclose_h2.test.py | 62 - tests/gold_tests/continuations/session_id.test.py | 49 - tests/gold_tests/dns/dns_down_nameserver.test.py | 38 tests/gold_tests/dns/dns_host_down.test.py | 50 - tests/gold_tests/dns/dns_ttl.test.py | 53 - tests/gold_tests/dns/splitdns.test.py | 30 tests/gold_tests/forward_proxy/forward_proxy.test.py | 38 tests/gold_tests/h2/h2active_timeout.py | 12 tests/gold_tests/h2/h2client.py | 21 tests/gold_tests/h2/h2disable.test.py | 28 tests/gold_tests/h2/h2disable_no_accept_threads.test.py | 28 tests/gold_tests/h2/h2enable.test.py | 30 tests/gold_tests/h2/h2enable_no_accept_threads.test.py | 30 tests/gold_tests/h2/h2spec.test.py | 29 tests/gold_tests/h2/http2.test.py | 152 +-- tests/gold_tests/h2/http2_flow_control.test.py | 69 - tests/gold_tests/h2/http2_priority.test.py | 43 - tests/gold_tests/h2/httpbin.test.py | 33 tests/gold_tests/h2/nghttp.test.py | 38 tests/gold_tests/headers/accept_webp.test.py | 30 tests/gold_tests/headers/cache_and_req_body.test.py | 85 +- tests/gold_tests/headers/cachedIMSRange.test.py | 105 +- tests/gold_tests/headers/domain-blacklist-30x.test.py | 18 tests/gold_tests/headers/field_name_space.test.py | 12 tests/gold_tests/headers/forwarded.test.py | 153 +-- tests/gold_tests/headers/general-connection-failure-502.test.py | 4 tests/gold_tests/headers/good_request_after_bad.test.py | 47 - tests/gold_tests/headers/hsts.test.py | 33 tests/gold_tests/headers/http408.test.py | 4 tests/gold_tests/headers/invalid_range_header.test.py | 30 tests/gold_tests/headers/normalize_ae.test.py | 20 tests/gold_tests/headers/syntax.test.py | 4 tests/gold_tests/headers/via.test.py | 29 tests/gold_tests/ip_allow/ip_allow.test.py | 142 +-- tests/gold_tests/logging/all_headers.test.py | 20 tests/gold_tests/logging/custom-log.test.py | 41 tests/gold_tests/logging/log-debug-client-ip.test.py | 29 tests/gold_tests/logging/log-field-json.test.py | 71 - tests/gold_tests/logging/log-field.test.py | 75 - tests/gold_tests/logging/log-filenames.test.py | 109 +- tests/gold_tests/logging/log-filter.test.py | 33 tests/gold_tests/logging/log_pipe.test.py | 62 - tests/gold_tests/logging/log_retention.test.py | 188 +--- tests/gold_tests/logging/new_log_flds.test.py | 51 - tests/gold_tests/logging/new_log_flds_observer.py | 10 tests/gold_tests/logging/pipe_buffer_is_larger_than.py | 21 tests/gold_tests/logging/pqsi-pqsp.test.py | 38 tests/gold_tests/logging/sigusr2.test.py | 99 +- tests/gold_tests/logging/ts_process_handler.py | 16 tests/gold_tests/next_hop/strategies_ch/strategies_ch.test.py | 83 - tests/gold_tests/next_hop/strategies_ch2/strategies_ch2.test.py | 92 -- tests/gold_tests/next_hop/strategies_stale/strategies_stale.test.py | 87 -- tests/gold_tests/next_hop/zzz_strategies_peer/zzz_strategies_peer.test.py | 114 +- tests/gold_tests/next_hop/zzz_strategies_peer2/zzz_strategies_peer2.test.py | 97 +- tests/gold_tests/null_transform/null_transform.test.py | 29 tests/gold_tests/origin_connection/per_server_connection_max.test.py | 26 tests/gold_tests/parent_proxy/parent-retry.test.py | 47 + tests/gold_tests/pluginTest/CppDelayTransformation/CppDelayTransformation.test.py | 34 tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests.test.py | 252 ++--- tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_cache_complete_responses.test.py | 314 +++---- tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_cachekey.test.py | 142 +-- tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_cachekey_global.test.py | 163 +-- tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_ims.test.py | 72 - tests/gold_tests/pluginTest/cert_update/cert_update.test.py | 63 - tests/gold_tests/pluginTest/client_context_dump/client_context_dump.test.py | 45 - tests/gold_tests/pluginTest/combo_handler/combo_handler.test.py | 50 - tests/gold_tests/pluginTest/compress/compress.test.py | 58 - tests/gold_tests/pluginTest/cookie_remap/bucketcookie.test.py | 27 tests/gold_tests/pluginTest/cookie_remap/collapseslashes.test.py | 20 tests/gold_tests/pluginTest/cookie_remap/connector.test.py | 27 tests/gold_tests/pluginTest/cookie_remap/existscookie.test.py | 27 tests/gold_tests/pluginTest/cookie_remap/matchcookie.test.py | 27 tests/gold_tests/pluginTest/cookie_remap/matchuri.test.py | 27 tests/gold_tests/pluginTest/cookie_remap/matrixparams.test.py | 53 - tests/gold_tests/pluginTest/cookie_remap/notexistscookie.test.py | 27 tests/gold_tests/pluginTest/cookie_remap/pcollapseslashes.test.py | 20 tests/gold_tests/pluginTest/cookie_remap/psubstitute.test.py | 40 tests/gold_tests/pluginTest/cookie_remap/regexcookie.test.py | 27 tests/gold_tests/pluginTest/cookie_remap/setstatus.test.py | 13 tests/gold_tests/pluginTest/cookie_remap/subcookie.test.py | 27 tests/gold_tests/pluginTest/cookie_remap/substitute.test.py | 34 tests/gold_tests/pluginTest/cppapi/cppapi.test.py | 1 tests/gold_tests/pluginTest/esi/esi.test.py | 88 -- tests/gold_tests/pluginTest/esi/esi_304.test.py | 54 - tests/gold_tests/pluginTest/header_rewrite/header_rewrite.test.py | 12 tests/gold_tests/pluginTest/header_rewrite/header_rewrite_cond_cache.test.py | 30 tests/gold_tests/pluginTest/header_rewrite/header_rewrite_cond_method.test.py | 19 tests/gold_tests/pluginTest/header_rewrite/header_rewrite_cond_ssn_txn_count.test.py | 55 - tests/gold_tests/pluginTest/header_rewrite/header_rewrite_l_value.test.py | 12 tests/gold_tests/pluginTest/header_rewrite/header_rewrite_url.test.py | 15 tests/gold_tests/pluginTest/lua/lua_debug_tags.test.py | 8 tests/gold_tests/pluginTest/lua/lua_header_table.test.py | 8 tests/gold_tests/pluginTest/lua/lua_states_stats.test.py | 32 tests/gold_tests/pluginTest/lua/lua_watermark.test.py | 20 tests/gold_tests/pluginTest/money_trace/money_trace.test.py | 62 - tests/gold_tests/pluginTest/money_trace/money_trace_global.test.py | 38 tests/gold_tests/pluginTest/multiplexer/multiplexer.test.py | 131 +-- tests/gold_tests/pluginTest/parent_select/parent_select.test.py | 76 - tests/gold_tests/pluginTest/parent_select/parent_select_optional_scheme_matching.test.py | 75 - tests/gold_tests/pluginTest/parent_select/parent_select_peer.test.py | 114 +- tests/gold_tests/pluginTest/parent_select/parent_select_peer2.test.py | 100 +- tests/gold_tests/pluginTest/prefetch_simple/prefetch_simple.test.py | 45 - tests/gold_tests/pluginTest/regex_remap/regex_remap.test.py | 59 - tests/gold_tests/pluginTest/regex_revalidate/regex_revalidate.test.py | 171 +--- tests/gold_tests/pluginTest/regex_revalidate/regex_revalidate_miss.test.py | 92 -- tests/gold_tests/pluginTest/regex_revalidate/regex_revalidate_state.test.py | 80 - tests/gold_tests/pluginTest/remap_stats/remap_stats.test.py | 27 tests/gold_tests/pluginTest/remap_stats/remap_stats_post.test.py | 27 tests/gold_tests/pluginTest/s3_auth/s3_auth_config.test.py | 12 tests/gold_tests/pluginTest/server_push_preload/server_push_preload.test.py | 71 - tests/gold_tests/pluginTest/slice/slice.test.py | 76 - tests/gold_tests/pluginTest/slice/slice_error.test.py | 354 +++----- tests/gold_tests/pluginTest/slice/slice_prefetch.test.py | 112 +- tests/gold_tests/pluginTest/slice/slice_purge.test.py | 38 tests/gold_tests/pluginTest/slice/slice_regex.test.py | 133 +-- tests/gold_tests/pluginTest/slice/slice_rm_range.test.py | 23 tests/gold_tests/pluginTest/slice/slice_selfhealing.test.py | 338 +++----- tests/gold_tests/pluginTest/sslheaders/sslheaders.test.py | 50 - tests/gold_tests/pluginTest/stek_share/stek_share.test.py | 297 ++++--- tests/gold_tests/pluginTest/test_hooks/hook_add.test.py | 23 tests/gold_tests/pluginTest/test_hooks/ssn_start_delay_hook.test.py | 25 tests/gold_tests/pluginTest/test_hooks/test_hooks.test.py | 49 - tests/gold_tests/pluginTest/traffic_dump/traffic_dump.test.py | 105 +- tests/gold_tests/pluginTest/traffic_dump/traffic_dump_http3.test.py | 77 - tests/gold_tests/pluginTest/traffic_dump/traffic_dump_ip_filter.test.py | 41 tests/gold_tests/pluginTest/traffic_dump/traffic_dump_response_body.test.py | 54 - tests/gold_tests/pluginTest/traffic_dump/traffic_dump_sni_filter.test.py | 64 - tests/gold_tests/pluginTest/traffic_dump/verify_replay.py | 82 - tests/gold_tests/pluginTest/transform/transaction_data_sink.test.py | 22 tests/gold_tests/pluginTest/tsapi/tsapi.test.py | 51 - tests/gold_tests/pluginTest/uri_signing/uri_signing.test.py | 80 - tests/gold_tests/pluginTest/url_sig/url_sig.test.py | 108 -- tests/gold_tests/pluginTest/xdebug/x_cache_info/x_cache_info.test.py | 26 tests/gold_tests/pluginTest/xdebug/x_effective_url/x_effective_url.test.py | 27 tests/gold_tests/pluginTest/xdebug/x_remap/x_remap.test.py | 37 tests/gold_tests/post/post-continue.test.py | 49 - tests/gold_tests/post/post-early-return.test.py | 39 tests/gold_tests/post_slow_server/post_slow_server.test.py | 42 tests/gold_tests/proxy_protocol/proxy_protocol.test.py | 27 tests/gold_tests/proxy_protocol/proxy_serve_stale.test.py | 42 tests/gold_tests/proxy_protocol/proxy_serve_stale_dns_fail.test.py | 47 - tests/gold_tests/redirect/number_of_redirects.test.py | 37 tests/gold_tests/redirect/redirect.test.py | 63 - tests/gold_tests/redirect/redirect_actions.test.py | 71 - tests/gold_tests/redirect/redirect_post.test.py | 49 - tests/gold_tests/redirect/redirect_stale.test.py | 40 tests/gold_tests/remap/conf_remap_float.test.py | 6 tests/gold_tests/remap/regex_map.test.py | 27 tests/gold_tests/remap/remap_http.test.py | 60 - tests/gold_tests/remap/remap_https.test.py | 45 - tests/gold_tests/remap/remap_ip_resolve.test.py | 35 tests/gold_tests/remap/remap_ws.test.py | 38 tests/gold_tests/runroot/runroot_init.test.py | 4 tests/gold_tests/runroot/runroot_manager.test.py | 4 tests/gold_tests/runroot/runroot_use.test.py | 4 tests/gold_tests/runroot/runroot_verify.test.py | 1 tests/gold_tests/session_sharing/session_match.test.py | 92 +- tests/gold_tests/shutdown/emergency.test.py | 20 tests/gold_tests/shutdown/fatal.test.py | 20 tests/gold_tests/slow_post/http_utils.py | 5 tests/gold_tests/slow_post/quick_server.py | 34 tests/gold_tests/slow_post/quick_server.test.py | 35 tests/gold_tests/slow_post/slow_post.test.py | 37 tests/gold_tests/slow_post/slow_post_client.py | 42 tests/gold_tests/slow_post/slow_post_clients.py | 11 tests/gold_tests/thread_config/check_threads.py | 16 tests/gold_tests/thread_config/thread_config.test.py | 240 +++-- tests/gold_tests/timeout/accept_timeout.test.py | 29 tests/gold_tests/timeout/active_timeout.test.py | 28 tests/gold_tests/timeout/conn_timeout.test.py | 23 tests/gold_tests/timeout/inactive_client_timeout.test.py | 36 tests/gold_tests/timeout/inactive_timeout.test.py | 28 tests/gold_tests/timeout/tls_conn_timeout.test.py | 28 tests/gold_tests/tls/h2_early_decode.py | 12 tests/gold_tests/tls/h2_early_gen.py | 26 tests/gold_tests/tls/ssl_multicert_loader.test.py | 33 tests/gold_tests/tls/test-0rtt-s_client.py | 12 tests/gold_tests/tls/tls.test.py | 40 tests/gold_tests/tls/tls_0rtt_server.test.py | 64 - tests/gold_tests/tls/tls_bad_alpn.test.py | 13 tests/gold_tests/tls/tls_check_cert_selection.test.py | 33 tests/gold_tests/tls/tls_check_cert_selection_reload.test.py | 42 tests/gold_tests/tls/tls_check_dual_cert_selection.test.py | 37 tests/gold_tests/tls/tls_check_dual_cert_selection2.test.py | 41 tests/gold_tests/tls/tls_client_cert.test.py | 125 +- tests/gold_tests/tls/tls_client_cert2.test.py | 99 +- tests/gold_tests/tls/tls_client_cert_override.test.py | 87 -- tests/gold_tests/tls/tls_client_verify.test.py | 57 - tests/gold_tests/tls/tls_client_verify2.test.py | 46 - tests/gold_tests/tls/tls_client_verify3.test.py | 93 -- tests/gold_tests/tls/tls_client_versions.test.py | 33 tests/gold_tests/tls/tls_engine.test.py | 91 +- tests/gold_tests/tls/tls_forward_nonhttp.test.py | 25 tests/gold_tests/tls/tls_hooks_client_verify.test.py | 37 tests/gold_tests/tls/tls_hooks_verify.test.py | 48 - tests/gold_tests/tls/tls_keepalive.test.py | 31 tests/gold_tests/tls/tls_ocsp.test.py | 27 tests/gold_tests/tls/tls_origin_session_reuse.test.py | 194 ++-- tests/gold_tests/tls/tls_partial_blind_tunnel.test.py | 40 tests/gold_tests/tls/tls_session_key_logging.test.py | 36 tests/gold_tests/tls/tls_session_reuse.test.py | 113 +- tests/gold_tests/tls/tls_sni_host_policy.test.py | 59 - tests/gold_tests/tls/tls_sni_yaml_reload.test.py | 45 - tests/gold_tests/tls/tls_ticket.test.py | 51 - tests/gold_tests/tls/tls_tunnel.test.py | 58 - tests/gold_tests/tls/tls_tunnel_forward.test.py | 56 - tests/gold_tests/tls/tls_verify.test.py | 110 +- tests/gold_tests/tls/tls_verify2.test.py | 96 +- tests/gold_tests/tls/tls_verify3.test.py | 79 - tests/gold_tests/tls/tls_verify4.test.py | 133 +-- tests/gold_tests/tls/tls_verify_base.test.py | 84 - tests/gold_tests/tls/tls_verify_ca_override.test.py | 61 - tests/gold_tests/tls/tls_verify_not_pristine.test.py | 46 - tests/gold_tests/tls/tls_verify_override.test.py | 119 +- tests/gold_tests/tls/tls_verify_override_base.test.py | 109 +- tests/gold_tests/tls/tls_verify_override_sni.test.py | 105 +- tests/gold_tests/tls_hooks/tls_hooks.test.py | 24 tests/gold_tests/tls_hooks/tls_hooks10.test.py | 21 tests/gold_tests/tls_hooks/tls_hooks11.test.py | 21 tests/gold_tests/tls_hooks/tls_hooks12.test.py | 21 tests/gold_tests/tls_hooks/tls_hooks13.test.py | 21 tests/gold_tests/tls_hooks/tls_hooks14.test.py | 23 tests/gold_tests/tls_hooks/tls_hooks15.test.py | 21 tests/gold_tests/tls_hooks/tls_hooks16.test.py | 25 tests/gold_tests/tls_hooks/tls_hooks17.test.py | 25 tests/gold_tests/tls_hooks/tls_hooks18.test.py | 25 tests/gold_tests/tls_hooks/tls_hooks2.test.py | 21 tests/gold_tests/tls_hooks/tls_hooks3.test.py | 21 tests/gold_tests/tls_hooks/tls_hooks4.test.py | 31 tests/gold_tests/tls_hooks/tls_hooks6.test.py | 21 tests/gold_tests/tls_hooks/tls_hooks7.test.py | 21 tests/gold_tests/tls_hooks/tls_hooks8.test.py | 21 tests/gold_tests/tls_hooks/tls_hooks9.test.py | 21 tests/gold_tests/traffic_ctl/remap_inc/remap_inc.test.py | 41 tests/gold_tests/url/uri.test.py | 10 tests/tools/plugins/test_log_interface.cc | 4 tests/tools/tcp_client.py | 7 tools/git/pre-commit | 38 tools/package/trafficserver.spec | 2 tools/yapf.sh | 109 ++ 353 files changed, 8073 insertions(+), 8609 deletions(-) diff -Nru trafficserver-9.2.3+ds/.style.yapf trafficserver-9.2.4+ds/.style.yapf --- trafficserver-9.2.3+ds/.style.yapf 1970-01-01 00:00:00.000000000 +0000 +++ trafficserver-9.2.4+ds/.style.yapf 2024-04-03 15:38:30.000000000 +0000 @@ -0,0 +1,421 @@ +####################### +# +# Licensed to the Apache Software Foundation (ASF) under one or more contributor license +# agreements. See the NOTICE file distributed with this work for additional information regarding +# copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +# or implied. See the License for the specific language governing permissions and limitations under +# the License. +# +####################### + +[style] +based_on_style = yapf + +# Align closing bracket with visual indentation. +align_closing_bracket_with_visual_indent=False + +# Allow dictionary keys to exist on multiple lines. For example: +# +# x = { +# ('this is the first element of a tuple', +# 'this is the second element of a tuple'): +# value, +# } +allow_multiline_dictionary_keys=True + +# Allow lambdas to be formatted on more than one line. +allow_multiline_lambdas=False + +# Allow splitting before a default / named assignment in an argument list. +allow_split_before_default_or_named_assigns=False + +# Allow splits before the dictionary value. +allow_split_before_dict_value=False + +# Let spacing indicate operator precedence. For example: +# +# a = 1 * 2 + 3 / 4 +# b = 1 / 2 - 3 * 4 +# c = (1 + 2) * (3 - 4) +# d = (1 - 2) / (3 + 4) +# e = 1 * 2 - 3 +# f = 1 + 2 + 3 + 4 +# +# will be formatted as follows to indicate precedence: +# +# a = 1*2 + 3/4 +# b = 1/2 - 3*4 +# c = (1+2) * (3-4) +# d = (1-2) / (3+4) +# e = 1*2 - 3 +# f = 1 + 2 + 3 + 4 +# +arithmetic_precedence_indication=False + +# Number of blank lines surrounding top-level function and class +# definitions. +blank_lines_around_top_level_definition=2 + +# Number of blank lines between top-level imports and variable +# definitions. +blank_lines_between_top_level_imports_and_variables=1 + +# Insert a blank line before a class-level docstring. +blank_line_before_class_docstring=False + +# Insert a blank line before a module docstring. +blank_line_before_module_docstring=False + +# Insert a blank line before a 'def' or 'class' immediately nested +# within another 'def' or 'class'. For example: +# +# class Foo: +# # <------ this blank line +# def method(): +# pass +blank_line_before_nested_class_or_def=True + +# Do not split consecutive brackets. Only relevant when +# dedent_closing_brackets is set. For example: +# +# call_func_that_takes_a_dict( +# { +# 'key1': 'value1', +# 'key2': 'value2', +# } +# ) +# +# would reformat to: +# +# call_func_that_takes_a_dict({ +# 'key1': 'value1', +# 'key2': 'value2', +# }) +coalesce_brackets=False + +# The column limit. +column_limit=132 + +# The style for continuation alignment. Possible values are: +# +# - SPACE: Use spaces for continuation alignment. This is default behavior. +# - FIXED: Use fixed number (CONTINUATION_INDENT_WIDTH) of columns +# (ie: CONTINUATION_INDENT_WIDTH/INDENT_WIDTH tabs or +# CONTINUATION_INDENT_WIDTH spaces) for continuation alignment. +# - VALIGN-RIGHT: Vertically align continuation lines to multiple of +# INDENT_WIDTH columns. Slightly right (one tab or a few spaces) if +# cannot vertically align continuation lines with indent characters. +continuation_align_style=SPACE + +# Indent width used for line continuations. +continuation_indent_width=4 + +# Put closing brackets on a separate line, dedented, if the bracketed +# expression can't fit in a single line. Applies to all kinds of brackets, +# including function definitions and calls. For example: +# +# config = { +# 'key1': 'value1', +# 'key2': 'value2', +# } # <--- this bracket is dedented and on a separate line +# +# time_series = self.remote_client.query_entity_counters( +# entity='dev3246.region1', +# key='dns.query_latency_tcp', +# transform=Transformation.AVERAGE(window=timedelta(seconds=60)), +# start_ts=now()-timedelta(days=3), +# end_ts=now(), +# ) # <--- this bracket is dedented and on a separate line +dedent_closing_brackets=False + +# Disable the heuristic which places each list element on a separate line +# if the list is comma-terminated. +disable_ending_comma_heuristic=False + +# Place each dictionary entry onto its own line. +each_dict_entry_on_separate_line=True + +# Require multiline dictionary even if it would normally fit on one line. +# For example: +# +# config = { +# 'key1': 'value1' +# } +force_multiline_dict=False + +# The regex for an i18n comment. The presence of this comment stops +# reformatting of that line, because the comments are required to be +# next to the string they translate. +i18n_comment=#\..* + +# The i18n function call names. The presence of this function stops +# reformattting on that line, because the string it has cannot be moved +# away from the i18n comment. +i18n_function_call=N_, _ + +# Indent blank lines. +indent_blank_lines=False + +# Put closing brackets on a separate line, indented, if the bracketed +# expression can't fit in a single line. Applies to all kinds of brackets, +# including function definitions and calls. For example: +# +# config = { +# 'key1': 'value1', +# 'key2': 'value2', +# } # <--- this bracket is indented and on a separate line +# +# time_series = self.remote_client.query_entity_counters( +# entity='dev3246.region1', +# key='dns.query_latency_tcp', +# transform=Transformation.AVERAGE(window=timedelta(seconds=60)), +# start_ts=now()-timedelta(days=3), +# end_ts=now(), +# ) # <--- this bracket is indented and on a separate line +indent_closing_brackets=False + +# Indent the dictionary value if it cannot fit on the same line as the +# dictionary key. For example: +# +# config = { +# 'key1': +# 'value1', +# 'key2': value1 + +# value2, +# } +indent_dictionary_value=True + +# The number of columns to use for indentation. +indent_width=4 + +# Join short lines into one line. E.g., single line 'if' statements. +join_multiple_lines=False + +# Do not include spaces around selected binary operators. For example: +# +# 1 + 2 * 3 - 4 / 5 +# +# will be formatted as follows when configured with "*,/": +# +# 1 + 2*3 - 4/5 +no_spaces_around_selected_binary_operators= + +# Use spaces around default or named assigns. +spaces_around_default_or_named_assign=False + +# Adds a space after the opening '{' and before the ending '}' dict +# delimiters. +# +# {1: 2} +# +# will be formatted as: +# +# { 1: 2 } +spaces_around_dict_delimiters=False + +# Adds a space after the opening '[' and before the ending ']' list +# delimiters. +# +# [1, 2] +# +# will be formatted as: +# +# [ 1, 2 ] +spaces_around_list_delimiters=False + +# Use spaces around the power operator. +spaces_around_power_operator=False + +# Use spaces around the subscript / slice operator. For example: +# +# my_list[1 : 10 : 2] +spaces_around_subscript_colon=False + +# Adds a space after the opening '(' and before the ending ')' tuple +# delimiters. +# +# (1, 2, 3) +# +# will be formatted as: +# +# ( 1, 2, 3 ) +spaces_around_tuple_delimiters=False + +# The number of spaces required before a trailing comment. +# This can be a single value (representing the number of spaces +# before each trailing comment) or list of values (representing +# alignment column values; trailing comments within a block will +# be aligned to the first column value that is greater than the maximum +# line length within the block). For example: +# +# With spaces_before_comment=5: +# +# 1 + 1 # Adding values +# +# will be formatted as: +# +# 1 + 1 # Adding values <-- 5 spaces between the end of the +# # statement and comment +# +# With spaces_before_comment=15, 20: +# +# 1 + 1 # Adding values +# two + two # More adding +# +# longer_statement # This is a longer statement +# short # This is a shorter statement +# +# a_very_long_statement_that_extends_beyond_the_final_column # Comment +# short # This is a shorter statement +# +# will be formatted as: +# +# 1 + 1 # Adding values <-- end of line comments in block +# # aligned to col 15 +# two + two # More adding +# +# longer_statement # This is a longer statement <-- end of line +# # comments in block aligned to col 20 +# short # This is a shorter statement +# +# a_very_long_statement_that_extends_beyond_the_final_column # Comment <-- the end of line comments are aligned based on the line length +# short # This is a shorter statement +# +spaces_before_comment=2 + +# Insert a space between the ending comma and closing bracket of a list, +# etc. +space_between_ending_comma_and_closing_bracket=False + +# Use spaces inside brackets, braces, and parentheses. For example: +# +# method_call( 1 ) +# my_dict[ 3 ][ 1 ][ get_index( *args, **kwargs ) ] +# my_set = { 1, 2, 3 } +space_inside_brackets=False + +# Split before arguments. +split_all_comma_separated_values=False + +# Split before arguments, but do not split all subexpressions recursively +# (unless needed). +split_all_top_level_comma_separated_values=False + +# Split before arguments if the argument list is terminated by a +# comma. +split_arguments_when_comma_terminated=False + +# Set to True to prefer splitting before '+', '-', '*', '/', '//', or '@' +# rather than after. +split_before_arithmetic_operator=False + +# Set to True to prefer splitting before '&', '|' or '^' rather than +# after. +split_before_bitwise_operator=True + +# Split before the closing bracket if a list or dict literal doesn't fit on +# a single line. +split_before_closing_bracket=True + +# Split before a dictionary or set generator (comp_for). For example, note +# the split before the 'for': +# +# foo = { +# variable: 'Hello world, have a nice day!' +# for variable in bar if variable != 42 +# } +split_before_dict_set_generator=False + +# Split before the '.' if we need to split a longer expression: +# +# foo = ('This is a really long string: {}, {}, {}, {}'.format(a, b, c, d)) +# +# would reformat to something like: +# +# foo = ('This is a really long string: {}, {}, {}, {}' +# .format(a, b, c, d)) +split_before_dot=True + +# Split after the opening paren which surrounds an expression if it doesn't +# fit on a single line. +split_before_expression_after_opening_paren=True + +# If an argument / parameter list is going to be split, then split before +# the first argument. +split_before_first_argument=True + +# Set to True to prefer splitting before 'and' or 'or' rather than +# after. +split_before_logical_operator=False + +# Split named assignments onto individual lines. +split_before_named_assigns=True + +# Set to True to split list comprehensions and generators that have +# non-trivial expressions and multiple clauses before each of these +# clauses. For example: +# +# result = [ +# a_long_var + 100 for a_long_var in xrange(1000) +# if a_long_var % 10] +# +# would reformat to something like: +# +# result = [ +# a_long_var + 100 +# for a_long_var in xrange(1000) +# if a_long_var % 10] +split_complex_comprehension=True + +# The penalty for splitting right after the opening bracket. +split_penalty_after_opening_bracket=300 + +# The penalty for splitting the line after a unary operator. +split_penalty_after_unary_operator=10000 + +# The penalty of splitting the line around the '+', '-', '*', '/', '//', +# `%`, and '@' operators. +split_penalty_arithmetic_operator=300 + +# The penalty for splitting right before an if expression. +split_penalty_before_if_expr=0 + +# The penalty of splitting the line around the '&', '|', and '^' operators. +split_penalty_bitwise_operator=300 + +# The penalty for splitting a list comprehension or generator +# expression. +split_penalty_comprehension=2100 + +# The penalty for characters over the column limit. +split_penalty_excess_character=7000 + +# The penalty incurred by adding a line split to the logical line. The +# more line splits added the higher the penalty. +split_penalty_for_added_line_split=30 + +# The penalty of splitting a list of "import as" names. For example: +# +# from a_very_long_or_indented_module_name_yada_yad import (long_argument_1, +# long_argument_2, +# long_argument_3) +# +# would reformat to something like: +# +# from a_very_long_or_indented_module_name_yada_yad import ( +# long_argument_1, long_argument_2, long_argument_3) +split_penalty_import_names=0 + +# The penalty of splitting the line around the 'and' and 'or' operators. +split_penalty_logical_operator=300 + +# Use the Tab character for indentation. +use_tabs=False + diff -Nru trafficserver-9.2.3+ds/.yapfignore trafficserver-9.2.4+ds/.yapfignore --- trafficserver-9.2.3+ds/.yapfignore 1970-01-01 00:00:00.000000000 +0000 +++ trafficserver-9.2.4+ds/.yapfignore 2024-04-03 15:38:30.000000000 +0000 @@ -0,0 +1,18 @@ +####################### +# +# Licensed to the Apache Software Foundation (ASF) under one or more contributor license +# agreements. See the NOTICE file distributed with this work for additional information regarding +# copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +# or implied. See the License for the specific language governing permissions and limitations under +# the License. +# +####################### + +lib/* diff -Nru trafficserver-9.2.3+ds/CHANGELOG-9.2.4 trafficserver-9.2.4+ds/CHANGELOG-9.2.4 --- trafficserver-9.2.3+ds/CHANGELOG-9.2.4 1970-01-01 00:00:00.000000000 +0000 +++ trafficserver-9.2.4+ds/CHANGELOG-9.2.4 2024-04-03 15:38:30.000000000 +0000 @@ -0,0 +1,29 @@ +Changes with Apache Traffic Server 9.2.4 + #10286 - Abort a read when the disk is known to be bad + #10503 - CID 1508894: Traffic Dump restore ostream format + #10507 - CID 1518601: dead code in test_log_interface.cc + #10520 - CID 1513224: Cleanup dl handle while testing a plugin + #10540 - LSan: Fix memory leak of Cache Unit Tests + #10543 - LSan: Fix memory leak of test_libhttp2 + #10551 - Make NextHopConsistentHash unit test stable + #10556 - s3_auth: Clear handling TSAction in the config_reloader + #10579 - Fix typo in docs for sni.yml + #10590 - applying additional accepts PR to 9.2.x + #10591 - Fix typo in block_errors documentation + #10621 - Fixed h2spec 6.4.3 test (#10584) + #10622 - Fix H2 debug message for a rate limit (#10583) + #10625 - Make bad disk detection more robust (backport for 9.2.x) + #10640 - Add VIA RWW cache result as acceptable for an IMS_HIT + #10673 - Do not overwrite the error code of GOAWAY frame + #10744 - Update accept thread configuration changes from #10687 applied to 9.2.x + #10896 - 9.2.x: yapf: use yapf instead of autopep8 + #10925 - Use slice req method type in Data obj + #10962 - adds stdint.h include to ink_file.h needed for rocky9/clang16 + #10988 - header_freq: Fix msg lock issues + #11009 - Segmentation crash caused by setting unavailable_server_retry_responses in parent.config + #11021 - Ensure connection retry attempts can reach the config specified value + #11030 - Allows the set-body to run as a pseudo remap hook + #11044 - delete simple_server_retry_responses + #11073 - Fix a bug in parsing chunked messages + #11085 - doc: max_rst_stream_frames_per_minute defaults to 200 + #11206 - Add proxy.config.http2.max_continuation_frames_per_minute diff -Nru trafficserver-9.2.3+ds/Makefile.am trafficserver-9.2.4+ds/Makefile.am --- trafficserver-9.2.3+ds/Makefile.am 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/Makefile.am 2024-04-03 15:38:30.000000000 +0000 @@ -113,9 +113,9 @@ rat: java -jar $(top_srcdir)/ci/apache-rat-0.13-SNAPSHOT.jar -E $(top_srcdir)/ci/rat-regex.txt -d $(top_srcdir) -.PHONY: autopep8 -autopep8: - @$(top_srcdir)/tools/autopep8.sh $(top_srcdir) +.PHONY: yapf +yapf: + @$(top_srcdir)/tools/yapf.sh $(top_srcdir) # # These are rules to make clang-format easy and fast to run. Run it with e.g. @@ -174,7 +174,7 @@ # Run the various format targets. perltidy is not included because the user may # not have it installed. .PHONY: format -format: clang-format autopep8 +format: clang-format yapf .PHONY: perltidy perltidy: @@ -200,4 +200,4 @@ @echo 'rat produce a RAT licence compliance report of the source' @echo 'rel-candidate recreate a signed relelease candidate source package and a signed git tag' @echo 'release recreate a signed release source package and a signed git tag' - @echo 'autopep8 run autopep8 over python files' + @echo 'yapf run yapf over python files' diff -Nru trafficserver-9.2.3+ds/Makefile.in trafficserver-9.2.4+ds/Makefile.in --- trafficserver-9.2.3+ds/Makefile.in 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/Makefile.in 2024-04-03 15:38:30.000000000 +0000 @@ -1560,9 +1560,9 @@ rat: java -jar $(top_srcdir)/ci/apache-rat-0.13-SNAPSHOT.jar -E $(top_srcdir)/ci/rat-regex.txt -d $(top_srcdir) -.PHONY: autopep8 -autopep8: - @$(top_srcdir)/tools/autopep8.sh $(top_srcdir) +.PHONY: yapf +yapf: + @$(top_srcdir)/tools/yapf.sh $(top_srcdir) .PHONY: $(CLANG_FORMAT_DIR_TARGETS) $(CLANG_FORMAT_SENTINEL): @@ -1603,7 +1603,7 @@ # Run the various format targets. perltidy is not included because the user may # not have it installed. .PHONY: format -format: clang-format autopep8 +format: clang-format yapf .PHONY: perltidy perltidy: @@ -1629,7 +1629,7 @@ @echo 'rat produce a RAT licence compliance report of the source' @echo 'rel-candidate recreate a signed relelease candidate source package and a signed git tag' @echo 'release recreate a signed release source package and a signed git tag' - @echo 'autopep8 run autopep8 over python files' + @echo 'yapf run yapf over python files' # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. diff -Nru trafficserver-9.2.3+ds/configure trafficserver-9.2.4+ds/configure --- trafficserver-9.2.3+ds/configure 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/configure 2024-04-03 15:38:30.000000000 +0000 @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.71 for Apache Traffic Server 9.2.3. +# Generated by GNU Autoconf 2.71 for Apache Traffic Server 9.2.4. # # Report bugs to . # @@ -621,8 +621,8 @@ # Identity of this package. PACKAGE_NAME='Apache Traffic Server' PACKAGE_TARNAME='trafficserver' -PACKAGE_VERSION='9.2.3' -PACKAGE_STRING='Apache Traffic Server 9.2.3' +PACKAGE_VERSION='9.2.4' +PACKAGE_STRING='Apache Traffic Server 9.2.4' PACKAGE_BUGREPORT='dev@trafficserver.apache.org' PACKAGE_URL='https://trafficserver.apache.org' @@ -1744,7 +1744,7 @@ # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures Apache Traffic Server 9.2.3 to adapt to many kinds of systems. +\`configure' configures Apache Traffic Server 9.2.4 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1815,7 +1815,7 @@ if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of Apache Traffic Server 9.2.3:";; + short | recursive ) echo "Configuration of Apache Traffic Server 9.2.4:";; esac cat <<\_ACEOF @@ -2065,7 +2065,7 @@ test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -Apache Traffic Server configure 9.2.3 +Apache Traffic Server configure 9.2.4 generated by GNU Autoconf 2.71 Copyright (C) 2021 Free Software Foundation, Inc. @@ -2879,7 +2879,7 @@ This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by Apache Traffic Server $as_me 9.2.3, which was +It was created by Apache Traffic Server $as_me 9.2.4, which was generated by GNU Autoconf 2.71. Invocation command line was $ $0$ac_configure_args_raw @@ -4371,7 +4371,7 @@ # Define the identity of the package. PACKAGE='trafficserver' - VERSION='9.2.3' + VERSION='9.2.4' printf "%s\n" "#define PACKAGE \"$PACKAGE\"" >>confdefs.h @@ -4673,13 +4673,13 @@ # convention that attempts to solve problems that most people just # don't have and which just causes confusion for most end users. # -TS_VERSION_MAJOR=$((9002003 / 1000000 )) -TS_VERSION_MINOR=$(((9002003 / 1000) % 1000 )) -TS_VERSION_MICRO=$((9002003 % 1000 )) +TS_VERSION_MAJOR=$((9002004 / 1000000 )) +TS_VERSION_MINOR=$(((9002004 / 1000) % 1000 )) +TS_VERSION_MICRO=$((9002004 % 1000 )) TS_LIBTOOL_MAJOR=`echo $((${TS_VERSION_MAJOR} + ${TS_VERSION_MINOR}))` TS_LIBTOOL_VERSION=$TS_LIBTOOL_MAJOR:$TS_VERSION_MICRO:$TS_VERSION_MINOR -TS_VERSION_STRING=9.2.3 -TS_VERSION_NUMBER=9002003 +TS_VERSION_STRING=9.2.4 +TS_VERSION_NUMBER=9002004 # # Substitute the above version numbers into the various files below. @@ -34772,7 +34772,7 @@ # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by Apache Traffic Server $as_me 9.2.3, which was +This file was extended by Apache Traffic Server $as_me 9.2.4, which was generated by GNU Autoconf 2.71. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -34841,7 +34841,7 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config='$ac_cs_config_escaped' ac_cs_version="\\ -Apache Traffic Server config.status 9.2.3 +Apache Traffic Server config.status 9.2.4 configured by $0, generated by GNU Autoconf 2.71, with options \\"\$ac_cs_config\\" diff -Nru trafficserver-9.2.3+ds/configure.ac trafficserver-9.2.4+ds/configure.ac --- trafficserver-9.2.3+ds/configure.ac 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/configure.ac 2024-04-03 15:38:30.000000000 +0000 @@ -32,8 +32,8 @@ # Version number is calculated as MAJOR * 1000000 + MINOR * 1000 + MICRO # Version string is in the form of MAJOR.MINOR.MICRO[sufix] # -m4_define([TS_VERSION_S],[9.2.3]) -m4_define([TS_VERSION_N],[9002003]) +m4_define([TS_VERSION_S],[9.2.4]) +m4_define([TS_VERSION_N],[9002004]) AC_INIT([Apache Traffic Server],[TS_VERSION_S()],[dev@trafficserver.apache.org],[trafficserver],[https://trafficserver.apache.org]) AC_PREREQ([2.69]) diff -Nru trafficserver-9.2.3+ds/contrib/python/compare_RecordsConfigcc.py trafficserver-9.2.4+ds/contrib/python/compare_RecordsConfigcc.py --- trafficserver-9.2.3+ds/contrib/python/compare_RecordsConfigcc.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/contrib/python/compare_RecordsConfigcc.py 2024-04-03 15:38:30.000000000 +0000 @@ -102,7 +102,6 @@ for d in sorted(defaults): print("\t%s %s -> %s" % (d, "%s %s" % rc_cc[d], "%s %s" % rc_doc[d])) - # Search for stale documentation ... stale = [k for k in rc_doc if k not in rc_cc] if (len(stale) > 0): diff -Nru trafficserver-9.2.3+ds/debian/changelog trafficserver-9.2.4+ds/debian/changelog --- trafficserver-9.2.3+ds/debian/changelog 2023-11-02 14:01:39.000000000 +0000 +++ trafficserver-9.2.4+ds/debian/changelog 2024-04-13 09:21:19.000000000 +0000 @@ -1,3 +1,12 @@ +trafficserver (9.2.4+ds-0+deb12u1) bookworm-security; urgency=medium + + * New upstream version 9.2.4+ds + * Refresh d/patches for 9.2.4 release + * CVEs fix (Closes: #1068417) + - CVE-2024-31309: HTTP/2 CONTINUATION DoS attack + + -- Jean Baptiste Favre Sat, 13 Apr 2024 11:21:19 +0200 + trafficserver (9.2.3+ds-1+deb12u1) bookworm-security; urgency=medium * Multiple CVE fixes for 9.2.x (Closes: #1054427, Closes: #1053801) diff -Nru trafficserver-9.2.3+ds/debian/patches/0006-fix-doc-build.patch trafficserver-9.2.4+ds/debian/patches/0006-fix-doc-build.patch --- trafficserver-9.2.3+ds/debian/patches/0006-fix-doc-build.patch 2023-06-22 06:51:52.000000000 +0000 +++ trafficserver-9.2.4+ds/debian/patches/0006-fix-doc-build.patch 2024-04-13 09:19:49.000000000 +0000 @@ -9,12 +9,11 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ --- a/doc/ext/traffic-server.py +++ b/doc/ext/traffic-server.py -@@ -398,8 +398,7 @@ with open(CONFIGURE_AC, 'r') as f: +@@ -385,7 +385,7 @@ with open(CONFIGURE_AC, 'r') as f: # get the current branch the local repository is on REPO_GIT_DIR = os.path.join(REPO_ROOT, ".git") --git_branch = subprocess.check_output(['git', '--git-dir', REPO_GIT_DIR, -- 'rev-parse', '--abbrev-ref', 'HEAD']) +-git_branch = subprocess.check_output(['git', '--git-dir', REPO_GIT_DIR, 'rev-parse', '--abbrev-ref', 'HEAD']) +git_branch = "master" diff -Nru trafficserver-9.2.3+ds/doc/admin-guide/files/records.config.en.rst trafficserver-9.2.4+ds/doc/admin-guide/files/records.config.en.rst --- trafficserver-9.2.3+ds/doc/admin-guide/files/records.config.en.rst 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/doc/admin-guide/files/records.config.en.rst 2024-04-03 15:38:30.000000000 +0000 @@ -430,6 +430,18 @@ Network ======= +.. ts:cv:: CONFIG proxy.config.net.additional_accepts INT -1 + :reloadable: + + This config addresses an issue that can sometimes happen if threads are caught in + a net accept while loop, become busy exclusviely accepting connections, and are prevented + from doing other work. This can cause an increase in latency and average event + loop time. When set to 0, a thread accepts only 1 connection per event loop. + When set to any other positive integer x, a thread will accept up to x+1 connections + per event loop. When set to -1 (default), a thread will accept connections as long + as there are connections waiting in its listening queue.is equivalent to "accept all", + and setting to 0 is equivalent to "accept one". + .. ts:cv:: CONFIG proxy.config.net.connections_throttle INT 30000 The total number of client and origin server connections that the server @@ -4272,11 +4284,18 @@ This limit only will be enforced if :ts:cv:`proxy.config.http2.stream_priority_enabled` is set to 1. -.. ts:cv:: CONFIG proxy.config.http2.max_rst_stream_frames_per_minute INT 14 +.. ts:cv:: CONFIG proxy.config.http2.max_rst_stream_frames_per_minute INT 200 :reloadable: - Specifies how many RST_STREAM frames |TS| receives for a minute at maximum. - Clients exceeded this limit will be immediately disconnected with an error + Specifies how many RST_STREAM frames |TS| receives per minute at maximum. + Clients exceeding this limit will be immediately disconnected with an error + code of ENHANCE_YOUR_CALM. + +.. ts:cv:: CONFIG proxy.config.http2.max_continuation_frames_per_minute INT 120 + :reloadable: + + Specifies how many CONTINUATION frames |TS| receives per minute at maximum. + Clients exceeding this limit will be immediately disconnected with an error code of ENHANCE_YOUR_CALM. .. ts:cv:: CONFIG proxy.config.http2.min_avg_window_update FLOAT 2560.0 diff -Nru trafficserver-9.2.3+ds/doc/admin-guide/files/sni.yaml.en.rst trafficserver-9.2.4+ds/doc/admin-guide/files/sni.yaml.en.rst --- trafficserver-9.2.3+ds/doc/admin-guide/files/sni.yaml.en.rst 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/doc/admin-guide/files/sni.yaml.en.rst 2024-04-03 15:38:30.000000000 +0000 @@ -145,15 +145,15 @@ NOTE: Connection coalescing may prevent this from taking effect. http2_max_ping_frames_per_minute Inbound Specifies how many PING frames |TS| receives per minute at maximum. - By default this is :ts:cv:`proxy.config.http2.max_settings_frames_per_minute`. + By default this is :ts:cv:`proxy.config.http2.max_ping_frames_per_minute`. NOTE: Connection coalescing may prevent this from taking effect. http2_max_priority_frames_per_minute Inbound Specifies how many PRIORITY frames |TS| receives per minute at maximum. - By default this is :ts:cv:`proxy.config.http2.max_settings_frames_per_minute`. + By default this is :ts:cv:`proxy.config.http2.max_priority_frames_per_minute`. NOTE: Connection coalescing may prevent this from taking effect. http2_max_rst_stream_frames_per_minute Inbound Specifies how many RST_STREAM frames |TS| receives per minute at maximum. - By default this is :ts:cv:`proxy.config.http2.max_settings_frames_per_minute`. + By default this is :ts:cv:`proxy.config.http2.max_rst_stream_frames_per_minute`. NOTE: Connection coalescing may prevent this from taking effect. disable_h2 Inbound Deprecated for the more general h2 setting. Setting disable_h2 diff -Nru trafficserver-9.2.3+ds/doc/admin-guide/monitoring/statistics/core/http-connection.en.rst trafficserver-9.2.4+ds/doc/admin-guide/monitoring/statistics/core/http-connection.en.rst --- trafficserver-9.2.3+ds/doc/admin-guide/monitoring/statistics/core/http-connection.en.rst 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/doc/admin-guide/monitoring/statistics/core/http-connection.en.rst 2024-04-03 15:38:30.000000000 +0000 @@ -263,10 +263,17 @@ .. ts:stat:: global proxy.process.http2.max_rst_stream_frames_per_minute_exceeded integer :type: counter - Represents the total number of closed HTTP/2 connections for exceeding the - maximum allowed number of rst_stream frames per minute limit which is configured by + Represents the total number of HTTP/2 connections closed for exceeding the + maximum allowed number of ``RST_STREAM`` frames per minute limit which is configured by :ts:cv:`proxy.config.http2.max_rst_stream_frames_per_minute`. +.. ts:stat:: global proxy.process.http2.max_continuation_frames_per_minute_exceeded integer + :type: counter + + Represents the total number of HTTP/2 connections closed for exceeding the + maximum allowed number of ``CONTINUATION`` frames per minute limit which is + configured by :ts:cv:`proxy.config.http2.max_continuation_frames_per_minute`. + .. ts:stat:: global proxy.process.http2.insufficient_avg_window_update integer :type: counter diff -Nru trafficserver-9.2.3+ds/doc/admin-guide/plugins/block_errors.en.rst trafficserver-9.2.4+ds/doc/admin-guide/plugins/block_errors.en.rst --- trafficserver-9.2.3+ds/doc/admin-guide/plugins/block_errors.en.rst 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/doc/admin-guide/plugins/block_errors.en.rst 2024-04-03 15:38:30.000000000 +0000 @@ -59,7 +59,7 @@ - ``block_errors.error_limit``: Set the error limit. Takes a single argument, the number of errors allowed before blocking the client. - ``block_errors.timeout``: Set the block timeout. Takes a single argument, the number of minutes to block the client. - ``block_errors.shutdown``: Set the shutdown mode. Takes a single argument, 0 to downgrade to HTTP/1.1, 1 to close the connection. -- ``block_errors.enable``: Enable or disable the plugin. Takes a single argument, 0 to disable, 1 to enable. +- ``block_errors.enabled``: Enable or disable the plugin. Takes a single argument, 0 to disable, 1 to enable. Example Run Time Configuration ============================== @@ -70,4 +70,4 @@ traffic_ctl plugin msg block_errors.shutdown 1 - traffic_ctl plugin msg block_errors.enable 1 + traffic_ctl plugin msg block_errors.enabled 1 diff -Nru trafficserver-9.2.3+ds/doc/admin-guide/plugins/header_freq.en.rst trafficserver-9.2.4+ds/doc/admin-guide/plugins/header_freq.en.rst --- trafficserver-9.2.3+ds/doc/admin-guide/plugins/header_freq.en.rst 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/doc/admin-guide/plugins/header_freq.en.rst 2024-04-03 15:38:30.000000000 +0000 @@ -20,21 +20,29 @@ specific language governing permissions and limitations under the License. -The Header Frequency plugin keeps track of the number of times headers -have been seen in transactions. Two separate counteres are kept for -the origin and the client. This information is accessible via :: +The Header Frequency plugin keeps track of the number of times headers have been +seen in transactions. Two separate counteres are kept for the origin and the +client. This information is accessible via the ``log`` plugin message. By +default the data is sent to traffic.out but it can alternatively be appended to +an arbitrary file. The following logs the stats to ``traffic.out``:: traffic_ctl plugin msg header_freq log +The following appends the stats to ``/tmp/log.txt``. Note that this file must be +writeable by the traffic_server process's user:: + + traffic_ctl plugin msg header_freq log:/tmp/log.txt + + Installation ------------ -This plugin is only built if the configure option :: +Since Header Frequency plugin is an expiremental plugin, traffic_server must be configured +to build experimental plugins in order to use it:: --enable-experimental-plugins -is given at build time. -Add the following line to :file:`plugin.config`:: +Once built, add the following line to :file:`plugin.config` and restart traffic_server to use it:: header_freq.so diff -Nru trafficserver-9.2.3+ds/doc/appendices/command-line/traffic_server.en.rst trafficserver-9.2.4+ds/doc/appendices/command-line/traffic_server.en.rst --- trafficserver-9.2.3+ds/doc/appendices/command-line/traffic_server.en.rst 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/doc/appendices/command-line/traffic_server.en.rst 2024-04-03 15:38:30.000000000 +0000 @@ -32,8 +32,6 @@ .. option:: -a, --accepts_thread -.. option:: -b, --accept_till_done - .. option:: -B TAGS, --action_tags TAGS .. option:: --bind_stdout FILE diff -Nru trafficserver-9.2.3+ds/doc/conf.py trafficserver-9.2.4+ds/doc/conf.py --- trafficserver-9.2.3+ds/doc/conf.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/doc/conf.py 2024-04-03 15:38:30.000000000 +0000 @@ -54,8 +54,8 @@ def setup(app): app.add_css_file('override.css') -# -- General configuration ----------------------------------------------------- +# -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. @@ -110,7 +110,6 @@ # work identically when building with Autotools (e.g. $ make html) # and without (e.g. on Read the Docs) - contents = open('../configure.ac').read() match = re.compile(r'm4_define\(\[TS_VERSION_S],\[(.*?)]\)').search(contents) @@ -182,24 +181,24 @@ #modindex_common_prefix = [] nitpicky = True -nitpick_ignore = [('c:identifier', 'int64_t'), - ('c:identifier', 'uint64_t'), - ('c:identifier', 'uint8_t'), - ('c:identifier', 'int32_t'), - ('c:identifier', 'size_t'), - ('c:identifier', 'ssize_t'), - ('c:identifier', 'sockaddr'), - ('c:identifier', 'time_t'), - ('cpp:identifier', 'T'), # template arg - ('cpp:identifier', 'F'), # template arg - ('cpp:identifier', 'Args'), # variadic template arg - ('cpp:identifier', 'Rest'), # variadic template arg - ] +nitpick_ignore = [ + ('c:identifier', 'int64_t'), + ('c:identifier', 'uint64_t'), + ('c:identifier', 'uint8_t'), + ('c:identifier', 'int32_t'), + ('c:identifier', 'size_t'), + ('c:identifier', 'ssize_t'), + ('c:identifier', 'sockaddr'), + ('c:identifier', 'time_t'), + ('cpp:identifier', 'T'), # template arg + ('cpp:identifier', 'F'), # template arg + ('cpp:identifier', 'Args'), # variadic template arg + ('cpp:identifier', 'Rest'), # variadic template arg +] # Autolink issue references. # See Customizing the Parser in the docutils.parsers.rst module. - # Customize parser.inliner in the only way that Sphinx supports. # docutils.parsers.rst.Parser takes an instance of states.Inliner or a # subclass, but Sphinx initializes the parser without any arguments, @@ -211,6 +210,7 @@ class Inliner(states.Inliner): + def init_customizations(self, settings): self.__class__ = BaseInliner BaseInliner.init_customizations(self, settings) @@ -219,22 +219,17 @@ # Copied from states.Inliner.init_customizations(). # In Docutils 0.13 these are locals. if not hasattr(self, 'start_string_prefix'): - self.start_string_prefix = (u'(^|(?<=\\s|[%s%s]))' % - (punctuation_chars.openers, - punctuation_chars.delimiters)) + self.start_string_prefix = (u'(^|(?<=\\s|[%s%s]))' % (punctuation_chars.openers, punctuation_chars.delimiters)) if not hasattr(self, 'end_string_suffix'): - self.end_string_suffix = (u'($|(?=\\s|[\x00%s%s%s]))' % - (punctuation_chars.closing_delimiters, - punctuation_chars.delimiters, - punctuation_chars.closers)) + self.end_string_suffix = ( + u'($|(?=\\s|[\x00%s%s%s]))' % + (punctuation_chars.closing_delimiters, punctuation_chars.delimiters, punctuation_chars.closers)) issue = re.compile( r''' {start_string_prefix} TS-\d+ - {end_string_suffix}'''.format( - start_string_prefix=self.start_string_prefix, - end_string_suffix=self.end_string_suffix), + {end_string_suffix}'''.format(start_string_prefix=self.start_string_prefix, end_string_suffix=self.end_string_suffix), re.VERBOSE | re.UNICODE) self.implicit_dispatch.append((issue, self.issue_reference)) @@ -352,8 +347,7 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'ApacheTrafficServer.tex', u'Apache Traffic Server Documentation', - u'dev@trafficserver.apache.org', 'manual'), + ('index', 'ApacheTrafficServer.tex', u'Apache Traffic Server Documentation', u'dev@trafficserver.apache.org', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of @@ -388,7 +382,6 @@ # documents and includes the same brief description in both the HTML # and manual page outputs. - # Override ManualPageWriter and ManualPageTranslator in the only way # that Sphinx supports @@ -396,6 +389,7 @@ class ManualPageWriter(BaseWriter): + def translate(self): transform = frontmatter.DocTitle(self.document) @@ -427,6 +421,7 @@ class ManualPageTranslator(BaseTranslator): + def __init__(self, builder, *args, **kwds): BaseTranslator.__init__(self, builder, *args, **kwds) @@ -443,9 +438,9 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ('index', 'ApacheTrafficServer', u'Apache Traffic Server Documentation', - u'dev@trafficserver.apache.org', 'ApacheTrafficServer', 'One line description of project.', - 'Miscellaneous'), + ( + 'index', 'ApacheTrafficServer', u'Apache Traffic Server Documentation', u'dev@trafficserver.apache.org', + 'ApacheTrafficServer', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. diff -Nru trafficserver-9.2.3+ds/doc/ext/doxygen.py trafficserver-9.2.4+ds/doc/ext/doxygen.py --- trafficserver-9.2.3+ds/doc/ext/doxygen.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/doc/ext/doxygen.py 2024-04-03 15:38:30.000000000 +0000 @@ -44,7 +44,7 @@ Partial reimplementation in Python of Doxygen escapeCharsInString() """ - return name.replace('_', '__').replace(':', '_1').replace('/', '_2').replace('<', '_3').replace('>', '_4').replace('*', '_5').replace('&', '_6').replace('|', '_7').replace('.', '_8').replace('!', '_9').replace(',', '_00').replace(' ', '_01').replace('{', '_02').replace('}', '_03').replace('?', '_04').replace('^', '_05').replace('%', '_06').replace('(', '_07').replace(')', '_08').replace('+', '_09').replace('=', '_0A').replace('$', '_0B').replace('\\', '_0C') # nopep8 + return name.replace('_', '__').replace(':', '_1').replace('/', '_2').replace('<', '_3').replace('>', '_4').replace('*', '_5').replace('&', '_6').replace('|', '_7').replace('.', '_8').replace('!', '_9').replace(',', '_00').replace(' ', '_01').replace('{', '_02').replace('}', '_03').replace('?', '_04').replace('^', '_05').replace('%', '_06').replace('(', '_07').replace(')', '_08').replace('+', '_09').replace('=', '_0A').replace('$', '_0B').replace('\\', '_0C') # yapf: disable class doctree_resolved: @@ -100,7 +100,9 @@ # Lookup the object in the Doxygen index try: compound, = index.xpath( - 'descendant::compound[(not($owner) or name[text() = $owner]) and descendant::name[text() = $name]][1]', owner=signature_owner or owner, name=name) + 'descendant::compound[(not($owner) or name[text() = $owner]) and descendant::name[text() = $name]][1]', + owner=signature_owner or owner, + name=name) except ValueError: continue @@ -112,7 +114,7 @@ # An enumvalue has no location memberdef, = cache[filename].xpath( 'descendant::compounddef[compoundname[text() = $name]]', name=name) or cache[filename].xpath( - 'descendant::memberdef[name[text() = $name] | enumvalue[name[text() = $name]]]', name=name) + 'descendant::memberdef[name[text() = $name] | enumvalue[name[text() = $name]]]', name=name) # Append the link after the object's signature. # Get the source file and line number from Doxygen and use @@ -137,8 +139,8 @@ else: refuri = 'http://docs.trafficserver.apache.org/en/latest/' + refuri - reference = nodes.reference('', '', emphasis, classes=[ - 'viewcode-link'], reftitle='Source code', refuri=refuri) + reference = nodes.reference( + '', '', emphasis, classes=['viewcode-link'], reftitle='Source code', refuri=refuri) desc_child += reference # Style the links @@ -159,13 +161,15 @@ else: if not etree: - app.warn('''Python lxml library not found + app.warn( + '''Python lxml library not found The library is used to add links from an API description to the source code for that object. Depending on your system, try installing the python-lxml package.''') if not path.isfile('xml/index.xml'): - app.warn('''Doxygen files not found: xml/index.xml + app.warn( + '''Doxygen files not found: xml/index.xml The files are used to add links from an API description to the source code for that object. Run "$ make doxygen" to generate these XML files.''') diff -Nru trafficserver-9.2.3+ds/doc/ext/traffic-server.py trafficserver-9.2.4+ds/doc/ext/traffic-server.py --- trafficserver-9.2.3+ds/doc/ext/traffic-server.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/doc/ext/traffic-server.py 2024-04-03 15:38:30.000000000 +0000 @@ -15,7 +15,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """ TS Sphinx Directives ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -45,6 +44,7 @@ def is_string_type(s): return isinstance(s, basestring) except NameError: + def is_string_type(s): return isinstance(s, str) @@ -161,6 +161,7 @@ class TSConfVarRef(XRefRole): + def process_link(self, env, ref_node, explicit_title_p, title, target): return title, target @@ -171,18 +172,9 @@ def metricunits(unitname): return directives.choice( - unitname.lower(), - ('ratio', - 'percent', - 'kbits', - 'mbits', - 'bytes', - 'kbytes', - 'mbytes', - 'nanoseconds', - 'microseconds', - 'milliseconds', - 'seconds')) + unitname.lower(), ( + 'ratio', 'percent', 'kbits', 'mbits', 'bytes', 'kbytes', 'mbytes', 'nanoseconds', 'microseconds', 'milliseconds', + 'seconds')) class TSStat(std.Target): @@ -301,6 +293,7 @@ class TSStatRef(XRefRole): + def process_link(self, env, ref_node, explicit_title_p, title, target): return title, target @@ -319,15 +312,9 @@ 'stat': ObjType(_('statistic'), 'stat') } - directives = { - 'cv': TSConfVar, - 'stat': TSStat - } + directives = {'cv': TSConfVar, 'stat': TSStat} - roles = { - 'cv': TSConfVarRef(), - 'stat': TSStatRef() - } + roles = {'cv': TSConfVarRef(), 'stat': TSStatRef()} initial_data = { 'cv': {}, # full name -> docname @@ -380,6 +367,7 @@ for var, doc in self.data['stat'].iteritems(): yield var, var, 'stat', doc, var, 1 except AttributeError: + def get_objects(self): for var, doc in self.data['cv'].items(): yield var, var, 'cv', doc, var, 1 @@ -388,8 +376,7 @@ # get the branch this documentation is building for in X.X.x form -REPO_ROOT = os.path.join(os.path.dirname(os.path.dirname( - os.environ['DOCUTILSCONFIG']))) +REPO_ROOT = os.path.join(os.path.dirname(os.path.dirname(os.environ['DOCUTILSCONFIG']))) CONFIGURE_AC = os.path.join(REPO_ROOT, 'configure.ac') with open(CONFIGURE_AC, 'r') as f: contents = f.read() @@ -398,8 +385,7 @@ # get the current branch the local repository is on REPO_GIT_DIR = os.path.join(REPO_ROOT, ".git") -git_branch = subprocess.check_output(['git', '--git-dir', REPO_GIT_DIR, - 'rev-parse', '--abbrev-ref', 'HEAD']) +git_branch = subprocess.check_output(['git', '--git-dir', REPO_GIT_DIR, 'rev-parse', '--abbrev-ref', 'HEAD']) def make_github_link(name, rawtext, text, lineno, inliner, options=None, content=None): @@ -429,9 +415,7 @@ def setup(app): - app.add_crossref_type('configfile', 'file', - objname='Configuration file', - indextemplate='pair: %s; Configuration files') + app.add_crossref_type('configfile', 'file', objname='Configuration file', indextemplate='pair: %s; Configuration files') # Very ugly, but as of Sphinx 1.8 it must be done. There is an `override` option to add_crossref_type # but it only applies to the directive, not the role (`file` in this case). If this isn't cleared @@ -440,9 +424,7 @@ # names are disjoint sets. del app.registry.domain_roles['std']['file'] - app.add_crossref_type('logfile', 'file', - objname='Log file', - indextemplate='pair: %s; Log files') + app.add_crossref_type('logfile', 'file', objname='Log file', indextemplate='pair: %s; Log files') rst.roles.register_generic_role('arg', nodes.emphasis) rst.roles.register_generic_role('const', nodes.literal) diff -Nru trafficserver-9.2.3+ds/doc/manpages.py trafficserver-9.2.4+ds/doc/manpages.py --- trafficserver-9.2.3+ds/doc/manpages.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/doc/manpages.py 2024-04-03 15:38:30.000000000 +0000 @@ -19,68 +19,47 @@ man_pages = [ # Add all files in the reference/api directory to the list of manual # pages - ('developer-guide/api/functions/' + filename[:-4], filename.split('.', 1)[0], filename.split('.', 1)[0] + ' API function', None, '3ts') for filename in os.listdir('developer-guide/api/functions/') if filename != 'index.en.rst' and filename.endswith('.rst')] + [ + ( + 'developer-guide/api/functions/' + filename[:-4], filename.split( + '.', 1)[0], filename.split('.', 1)[0] + ' API function', None, '3ts') + for filename in os.listdir('developer-guide/api/functions/') + if filename != 'index.en.rst' and filename.endswith('.rst') +] + [ # Add all files in the appendices/command-line directory to the list # of manual pages - ('appendices/command-line/traffic_cache_tool.en', 'traffic_cache_tool', - u'Traffic Server cache management tool', None, '1'), - ('appendices/command-line/traffic_crashlog.en', 'traffic_crashlog', - u'Traffic Server crash log helper', None, '8'), - ('appendices/command-line/traffic_ctl.en', 'traffic_ctl', - u'Traffic Server command line tool', None, '8'), - ('appendices/command-line/traffic_layout.en', 'traffic_layout', - u'Traffic Server sandbox management tool', None, '1'), - ('appendices/command-line/traffic_logcat.en', 'traffic_logcat', - u'Traffic Server log spooler', None, '8'), - ('appendices/command-line/traffic_logstats.en', 'traffic_logstats', - u'Traffic Server analyzer', None, '8'), - ('appendices/command-line/traffic_manager.en', 'traffic_manager', - u'Traffic Server process manager', None, '8'), - ('appendices/command-line/traffic_server.en', 'traffic_server', - u'Traffic Server', None, '8'), - ('appendices/command-line/traffic_top.en', 'traffic_top', - u'Display Traffic Server statistics', None, '1'), - ('appendices/command-line/traffic_via.en', 'traffic_via', - u'Traffic Server Via header decoder', None, '1'), - ('appendices/command-line/traffic_wccp.en', 'traffic_wccp', - u'Traffic Server WCCP client', None, '1'), - ('appendices/command-line/tspush.en', 'tspush', - u'Push objects into the Traffic Server cache', None, '1'), - ('appendices/command-line/tsxs.en', 'tsxs', - u'Traffic Server plugin tool', None, '1'), + ('appendices/command-line/traffic_cache_tool.en', 'traffic_cache_tool', u'Traffic Server cache management tool', None, '1'), + ('appendices/command-line/traffic_crashlog.en', 'traffic_crashlog', u'Traffic Server crash log helper', None, '8'), + ('appendices/command-line/traffic_ctl.en', 'traffic_ctl', u'Traffic Server command line tool', None, '8'), + ('appendices/command-line/traffic_layout.en', 'traffic_layout', u'Traffic Server sandbox management tool', None, '1'), + ('appendices/command-line/traffic_logcat.en', 'traffic_logcat', u'Traffic Server log spooler', None, '8'), + ('appendices/command-line/traffic_logstats.en', 'traffic_logstats', u'Traffic Server analyzer', None, '8'), + ('appendices/command-line/traffic_manager.en', 'traffic_manager', u'Traffic Server process manager', None, '8'), + ('appendices/command-line/traffic_server.en', 'traffic_server', u'Traffic Server', None, '8'), + ('appendices/command-line/traffic_top.en', 'traffic_top', u'Display Traffic Server statistics', None, '1'), + ('appendices/command-line/traffic_via.en', 'traffic_via', u'Traffic Server Via header decoder', None, '1'), + ('appendices/command-line/traffic_wccp.en', 'traffic_wccp', u'Traffic Server WCCP client', None, '1'), + ('appendices/command-line/tspush.en', 'tspush', u'Push objects into the Traffic Server cache', None, '1'), + ('appendices/command-line/tsxs.en', 'tsxs', u'Traffic Server plugin tool', None, '1'), # Add all files in the admin-guide/files directory to the list # of manual pages - ('admin-guide/files/cache.config.en', 'cache.config', - u'Traffic Server cache configuration file', None, '5'), - ('admin-guide/files/hosting.config.en', 'hosting.config', - u'Traffic Server domain hosting configuration file', None, '5'), - ('admin-guide/files/ip_allow.yaml.en', 'ip_allow.yaml', - u'Traffic Server IP access control configuration file', None, '5'), - ('admin-guide/files/logging.yaml.en', 'logging.yaml', - u'Traffic Server logging configuration file', None, '5'), - ('admin-guide/files/parent.config.en', 'parent.config', - u'Traffic Server parent cache configuration file', None, '5'), - ('admin-guide/files/plugin.config.en', 'plugin.config', - u'Traffic Server global plugin configuration file', None, '5'), - ('admin-guide/files/records.config.en', 'records.config', - u'Traffic Server configuration file', None, '5'), - ('admin-guide/files/remap.config.en', 'remap.config', - u'Traffic Server remap rules configuration file', None, '5'), - ('admin-guide/files/sni.yaml.en', 'sni.yaml', - u'Traffic Server sni rules configuration file', None, '5'), - ('admin-guide/files/splitdns.config.en', 'splitdns.config', - u'Traffic Server split DNS configuration file', None, '5'), - ('admin-guide/files/ssl_multicert.config.en', 'ssl_multicert.config', - u'Traffic Server SSL certificate configuration file', None, '5'), - ('admin-guide/files/storage.config.en', 'storage.config', - u'Traffic Server cache storage configuration file', None, '5'), - ('admin-guide/files/strategies.yaml.en', 'strategies.yaml', - u'Traffic Server cache hierarchy configuration file', None, '5'), - ('admin-guide/files/volume.config.en', 'volume.config', - u'Traffic Server cache volume configuration file', None, '5'), - + ('admin-guide/files/cache.config.en', 'cache.config', u'Traffic Server cache configuration file', None, '5'), + ('admin-guide/files/hosting.config.en', 'hosting.config', u'Traffic Server domain hosting configuration file', None, '5'), + ('admin-guide/files/ip_allow.yaml.en', 'ip_allow.yaml', u'Traffic Server IP access control configuration file', None, '5'), + ('admin-guide/files/logging.yaml.en', 'logging.yaml', u'Traffic Server logging configuration file', None, '5'), + ('admin-guide/files/parent.config.en', 'parent.config', u'Traffic Server parent cache configuration file', None, '5'), + ('admin-guide/files/plugin.config.en', 'plugin.config', u'Traffic Server global plugin configuration file', None, '5'), + ('admin-guide/files/records.config.en', 'records.config', u'Traffic Server configuration file', None, '5'), + ('admin-guide/files/remap.config.en', 'remap.config', u'Traffic Server remap rules configuration file', None, '5'), + ('admin-guide/files/sni.yaml.en', 'sni.yaml', u'Traffic Server sni rules configuration file', None, '5'), + ('admin-guide/files/splitdns.config.en', 'splitdns.config', u'Traffic Server split DNS configuration file', None, '5'), + ( + 'admin-guide/files/ssl_multicert.config.en', 'ssl_multicert.config', u'Traffic Server SSL certificate configuration file', + None, '5'), + ('admin-guide/files/storage.config.en', 'storage.config', u'Traffic Server cache storage configuration file', None, '5'), + ('admin-guide/files/strategies.yaml.en', 'strategies.yaml', u'Traffic Server cache hierarchy configuration file', None, '5'), + ('admin-guide/files/volume.config.en', 'volume.config', u'Traffic Server cache volume configuration file', None, '5'), ] if __name__ == '__main__': diff -Nru trafficserver-9.2.3+ds/include/tscore/ink_file.h trafficserver-9.2.4+ds/include/tscore/ink_file.h --- trafficserver-9.2.3+ds/include/tscore/ink_file.h 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/include/tscore/ink_file.h 2024-04-03 15:38:30.000000000 +0000 @@ -50,6 +50,10 @@ #include #endif +#ifdef HAVE_STDINT_H +#include // NOLINT(modernize-deprecated-headers) +#endif + /*===========================================================================* Function Prototypes diff -Nru trafficserver-9.2.3+ds/iocore/aio/AIO.cc trafficserver-9.2.4+ds/iocore/aio/AIO.cc --- trafficserver-9.2.3+ds/iocore/aio/AIO.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/iocore/aio/AIO.cc 2024-04-03 15:38:30.000000000 +0000 @@ -53,8 +53,8 @@ RecInt cache_config_threads_per_disk = 12; RecInt api_config_threads_per_disk = 12; -RecRawStatBlock *aio_rsb = nullptr; -Continuation *aio_err_callbck = nullptr; +RecRawStatBlock *aio_rsb = nullptr; +Continuation *aio_err_callback = nullptr; // AIO Stats uint64_t aio_num_read = 0; uint64_t aio_bytes_read = 0; @@ -140,9 +140,9 @@ } void -ink_aio_set_callback(Continuation *callback) +ink_aio_set_err_callback(Continuation *callback) { - aio_err_callbck = callback; + aio_err_callback = callback; } void @@ -401,7 +401,7 @@ res += err; } op->aio_result = res; - ink_assert(op->aio_result == (int64_t)a->aio_nbytes); + ink_assert(op->ok()); } return 1; } diff -Nru trafficserver-9.2.3+ds/iocore/aio/I_AIO.h trafficserver-9.2.4+ds/iocore/aio/I_AIO.h --- trafficserver-9.2.3+ds/iocore/aio/I_AIO.h 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/iocore/aio/I_AIO.h 2024-04-03 15:38:30.000000000 +0000 @@ -137,7 +137,7 @@ void ink_aio_init(ts::ModuleVersion version); int ink_aio_start(); -void ink_aio_set_callback(Continuation *error_callback); +void ink_aio_set_err_callback(Continuation *error_callback); int ink_aio_read(AIOCallback *op, int fromAPI = 0); // fromAPI is a boolean to indicate if this is from a API call such as upload proxy feature diff -Nru trafficserver-9.2.3+ds/iocore/aio/P_AIO.h trafficserver-9.2.4+ds/iocore/aio/P_AIO.h --- trafficserver-9.2.3+ds/iocore/aio/P_AIO.h 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/iocore/aio/P_AIO.h 2024-04-03 15:38:30.000000000 +0000 @@ -41,10 +41,10 @@ TS_INLINE int AIOCallback::ok() { - return (off_t)aiocb.aio_nbytes == (off_t)aio_result; + return (aiocb.aio_nbytes == static_cast(aio_result)) && (aio_result >= 0); } -extern Continuation *aio_err_callbck; +extern Continuation *aio_err_callback; #if AIO_MODE == AIO_MODE_NATIVE @@ -110,13 +110,16 @@ { (void)event; (void)data; - if (aio_err_callbck && !ok()) { + if (aio_err_callback && !ok()) { AIOCallback *err_op = new AIOCallbackInternal(); err_op->aiocb.aio_fildes = this->aiocb.aio_fildes; err_op->aiocb.aio_lio_opcode = this->aiocb.aio_lio_opcode; - err_op->mutex = aio_err_callbck->mutex; - err_op->action = aio_err_callbck; - eventProcessor.schedule_imm(err_op); + err_op->mutex = aio_err_callback->mutex; + err_op->action = aio_err_callback; + + // Take this lock in-line because we want to stop other I/O operations on this disk ASAP + SCOPED_MUTEX_LOCK(lock, aio_err_callback->mutex, this_ethread()); + err_op->action.continuation->handleEvent(EVENT_NONE, err_op); } if (!action.cancelled) { action.continuation->handleEvent(AIO_EVENT_DONE, this); diff -Nru trafficserver-9.2.3+ds/iocore/cache/Cache.cc trafficserver-9.2.4+ds/iocore/cache/Cache.cc --- trafficserver-9.2.3+ds/iocore/cache/Cache.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/iocore/cache/Cache.cc 2024-04-03 15:38:30.000000000 +0000 @@ -605,7 +605,7 @@ memset(sds, 0, sizeof(Span *) * gndisks); gndisks = 0; - ink_aio_set_callback(new AIO_Callback_handler()); + ink_aio_set_err_callback(new AIO_failure_handler()); config_volumes.read_config_file(); @@ -1335,7 +1335,7 @@ if (event == AIO_EVENT_DONE) { op = static_cast(data); - if (static_cast(op->aio_result) != op->aiocb.aio_nbytes) { + if (!op->ok()) { Warning("unable to clear cache directory '%s'", hash_text.get()); disk->incrErrors(op); fd = -1; @@ -1364,7 +1364,7 @@ AIOCallback *op = static_cast(data); if (event == AIO_EVENT_DONE) { - if (static_cast(op->aio_result) != op->aiocb.aio_nbytes) { + if (!op->ok()) { Note("Directory read failed: clearing cache directory %s", this->hash_text.get()); clear_dir(); return EVENT_DONE; @@ -1459,7 +1459,7 @@ io.aiocb.aio_nbytes = (skip + len) - recover_pos; } } else if (event == AIO_EVENT_DONE) { - if (io.aiocb.aio_nbytes != static_cast(io.aio_result)) { + if (!io.ok()) { Warning("disk read error on recover '%s', clearing", hash_text.get()); disk->incrErrors(&io); goto Lclear; @@ -1722,7 +1722,7 @@ for (auto &i : hf) { ink_assert(op != nullptr); i = static_cast(op->aiocb.aio_buf); - if (static_cast(op->aio_result) != op->aiocb.aio_nbytes) { + if (!op->ok()) { Note("Header read failed: clearing cache directory %s", this->hash_text.get()); clear_dir(); return EVENT_DONE; @@ -2004,7 +2004,7 @@ } int -AIO_Callback_handler::handle_disk_failure(int /* event ATS_UNUSED */, void *data) +AIO_failure_handler::handle_disk_failure(int /* event ATS_UNUSED */, void *data) { /* search for the matching file descriptor */ if (!CacheProcessor::cache_ready) { @@ -2188,6 +2188,11 @@ } else if (is_io_in_progress()) { return EVENT_CONT; } + if (DISK_BAD(vol->disk)) { + io.aio_result = -1; + Warning("Canceling cache read: disk %s is bad.", vol->hash_text.get()); + goto Ldone; + } { MUTEX_TRY_LOCK(lock, vol->mutex, mutex->thread_holding); if (!lock.is_locked()) { @@ -2430,7 +2435,7 @@ goto Lcollision; } // check read completed correct FIXME: remove bad vols - if (static_cast(io.aio_result) != io.aiocb.aio_nbytes) { + if (!io.ok()) { goto Ldone; } { diff -Nru trafficserver-9.2.3+ds/iocore/cache/CacheDir.cc trafficserver-9.2.4+ds/iocore/cache/CacheDir.cc --- trafficserver-9.2.3+ds/iocore/cache/CacheDir.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/iocore/cache/CacheDir.cc 2024-04-03 15:38:30.000000000 +0000 @@ -1086,7 +1086,7 @@ if (event == AIO_EVENT_DONE) { // AIO Thread - if (io.aio_result != static_cast(io.aiocb.aio_nbytes)) { + if (!io.ok()) { Warning("vol write error during directory sync '%s'", gvol[vol_idx]->hash_text.get()); event = EVENT_NONE; goto Ldone; diff -Nru trafficserver-9.2.3+ds/iocore/cache/CacheDisk.cc trafficserver-9.2.4+ds/iocore/cache/CacheDisk.cc --- trafficserver-9.2.3+ds/iocore/cache/CacheDisk.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/iocore/cache/CacheDisk.cc 2024-04-03 15:38:30.000000000 +0000 @@ -147,7 +147,7 @@ { ink_assert(event == AIO_EVENT_DONE); - if (io.aiocb.aio_nbytes != static_cast(io.aio_result)) { + if (!io.ok()) { Warning("Could not clear disk header for disk %s: declaring disk bad", path); incrErrors(&io); SET_DISK_BAD(this); @@ -163,7 +163,7 @@ { ink_assert(event == AIO_EVENT_DONE); - if (io.aiocb.aio_nbytes != static_cast(io.aio_result)) { + if (!io.ok()) { Warning("could not read disk header for disk %s: declaring disk bad", path); // the header could have random values by the AIO read error @@ -237,7 +237,7 @@ { ink_assert(event == AIO_EVENT_DONE); - if (io.aiocb.aio_nbytes != static_cast(io.aio_result)) { + if (!io.ok()) { Warning("Error writing disk header for disk %s:disk bad", path); incrErrors(&io); SET_DISK_BAD(this); diff -Nru trafficserver-9.2.3+ds/iocore/cache/CacheVol.cc trafficserver-9.2.4+ds/iocore/cache/CacheVol.cc --- trafficserver-9.2.3+ds/iocore/cache/CacheVol.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/iocore/cache/CacheVol.cc 2024-04-03 15:38:30.000000000 +0000 @@ -202,7 +202,7 @@ goto Lread; } - if (static_cast(io.aio_result) != io.aiocb.aio_nbytes) { + if (!io.ok()) { result = (void *)-ECACHE_READ_FAIL; goto Ldone; } diff -Nru trafficserver-9.2.3+ds/iocore/cache/P_CacheVol.h trafficserver-9.2.4+ds/iocore/cache/P_CacheVol.h --- trafficserver-9.2.3+ds/iocore/cache/P_CacheVol.h 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/iocore/cache/P_CacheVol.h 2024-04-03 15:38:30.000000000 +0000 @@ -278,10 +278,10 @@ ~Vol() override { ats_free(agg_buffer); } }; -struct AIO_Callback_handler : public Continuation { +struct AIO_failure_handler : public Continuation { int handle_disk_failure(int event, void *data); - AIO_Callback_handler() : Continuation(new_ProxyMutex()) { SET_HANDLER(&AIO_Callback_handler::handle_disk_failure); } + AIO_failure_handler() : Continuation(new_ProxyMutex()) { SET_HANDLER(&AIO_failure_handler::handle_disk_failure); } }; struct CacheVol { diff -Nru trafficserver-9.2.3+ds/iocore/cache/test/main.h trafficserver-9.2.4+ds/iocore/cache/test/main.h --- trafficserver-9.2.3+ds/iocore/cache/test/main.h 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/iocore/cache/test/main.h 2024-04-03 15:38:30.000000000 +0000 @@ -191,6 +191,16 @@ build_hdrs(this->info, url); } + ~CacheWriteTest() override + { + if (this->_write_buffer) { + free_MIOBuffer(this->_write_buffer); + this->_write_buffer = nullptr; + } + info.destroy(); + old_info.destroy(); + } + int start_test(int event, void *e) override; int write_event(int event, void *e); void fill_data(); @@ -218,6 +228,15 @@ build_hdrs(this->info, url); } + ~CacheReadTest() override + { + if (this->_read_buffer) { + free_MIOBuffer(this->_read_buffer); + this->_read_buffer = nullptr; + } + info.destroy(); + } + int start_test(int event, void *e) override; int read_event(int event, void *e); void do_io_read(size_t size = 0) override; diff -Nru trafficserver-9.2.3+ds/iocore/net/P_SNIActionPerformer.h trafficserver-9.2.4+ds/iocore/net/P_SNIActionPerformer.h --- trafficserver-9.2.3+ds/iocore/net/P_SNIActionPerformer.h 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/iocore/net/P_SNIActionPerformer.h 2024-04-03 15:38:30.000000000 +0000 @@ -186,6 +186,23 @@ int value = -1; }; +class HTTP2MaxContinuationFramesPerMinute : public ActionItem +{ +public: + HTTP2MaxContinuationFramesPerMinute(int value) : value(value) {} + ~HTTP2MaxContinuationFramesPerMinute() override {} + + int + SNIAction(TLSSNISupport *snis, const Context &ctx) const override + { + snis->hints_from_sni.http2_max_continuation_frames_per_minute = value; + return SSL_TLSEXT_ERR_OK; + } + +private: + int value = -1; +}; + class TunnelDestination : public ActionItem { public: diff -Nru trafficserver-9.2.3+ds/iocore/net/P_UnixNet.h trafficserver-9.2.4+ds/iocore/net/P_UnixNet.h --- trafficserver-9.2.3+ds/iocore/net/P_UnixNet.h 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/iocore/net/P_UnixNet.h 2024-04-03 15:38:30.000000000 +0000 @@ -23,6 +23,8 @@ #pragma once +#include + #include #include "tscore/ink_platform.h" @@ -327,6 +329,7 @@ void remove_from_keep_alive_queue(NetEvent *ne); bool add_to_active_queue(NetEvent *ne); void remove_from_active_queue(NetEvent *ne); + static int get_additional_accepts(); /// Per process initialization logic. static void init_for_process(); @@ -384,6 +387,13 @@ NetHandler(); private: + // The following settings are used potentially by accept threads. These are + // shared across threads via std::atomic rather than being pulled through a + // TS_EVENT_MGMT_UPDATE event like with the Config settings above because + // accept threads are not always on a standard NET thread with a NetHandler + // that has TS_EVENT_MGMT_UPDATE handling logic. + static std::atomic additional_accepts; + void _close_ne(NetEvent *ne, ink_hrtime now, int &handle_event, int &closed, int &total_idle_time, int &total_idle_count); /// Static method used as the callback for runtime configuration updates. diff -Nru trafficserver-9.2.3+ds/iocore/net/SSLSNIConfig.cc trafficserver-9.2.4+ds/iocore/net/SSLSNIConfig.cc --- trafficserver-9.2.3+ds/iocore/net/SSLSNIConfig.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/iocore/net/SSLSNIConfig.cc 2024-04-03 15:38:30.000000000 +0000 @@ -151,6 +151,10 @@ ai->actions.push_back( std::make_unique(item.http2_max_rst_stream_frames_per_minute.value())); } + if (item.http2_max_continuation_frames_per_minute.has_value()) { + ai->actions.push_back( + std::make_unique(item.http2_max_continuation_frames_per_minute.value())); + } ai->actions.push_back(std::make_unique(item.ip_allow, item.fqdn)); diff -Nru trafficserver-9.2.3+ds/iocore/net/TLSSNISupport.h trafficserver-9.2.4+ds/iocore/net/TLSSNISupport.h --- trafficserver-9.2.3+ds/iocore/net/TLSSNISupport.h 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/iocore/net/TLSSNISupport.h 2024-04-03 15:38:30.000000000 +0000 @@ -56,6 +56,7 @@ std::optional http2_max_ping_frames_per_minute; std::optional http2_max_priority_frames_per_minute; std::optional http2_max_rst_stream_frames_per_minute; + std::optional http2_max_continuation_frames_per_minute; } hints_from_sni; protected: diff -Nru trafficserver-9.2.3+ds/iocore/net/UnixNet.cc trafficserver-9.2.4+ds/iocore/net/UnixNet.cc --- trafficserver-9.2.3+ds/iocore/net/UnixNet.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/iocore/net/UnixNet.cc 2024-04-03 15:38:30.000000000 +0000 @@ -25,6 +25,8 @@ using namespace std::literals; +std::atomic NetHandler::additional_accepts{0}; + ink_hrtime last_throttle_warning; ink_hrtime last_shedding_warning; int net_connections_throttle; @@ -294,6 +296,9 @@ } else if (name == "proxy.config.net.default_inactivity_timeout"sv) { updated_member = &NetHandler::global_config.default_inactivity_timeout; Debug("net_queue", "proxy.config.net.default_inactivity_timeout updated to %" PRId64, data.rec_int); + } else if (name == "proxy.config.net.additional_accepts"sv) { + NetHandler::additional_accepts.store(data.rec_int, std::memory_order_relaxed); + Debug("net_queue", "proxy.config.net.additional_accepts updated to %" PRId64, data.rec_int); } if (updated_member) { @@ -330,12 +335,19 @@ REC_ReadConfigInt32(global_config.keep_alive_no_activity_timeout_in, "proxy.config.net.keep_alive_no_activity_timeout_in"); REC_ReadConfigInt32(global_config.default_inactivity_timeout, "proxy.config.net.default_inactivity_timeout"); + // Atomic configurations. + uint32_t val = 0; + + REC_ReadConfigInt32(val, "proxy.config.net.additional_accepts"); + additional_accepts.store(val, std::memory_order_relaxed); + RecRegisterConfigUpdateCb("proxy.config.net.max_connections_in", update_nethandler_config, nullptr); RecRegisterConfigUpdateCb("proxy.config.net.max_requests_in", update_nethandler_config, nullptr); RecRegisterConfigUpdateCb("proxy.config.net.inactive_threshold_in", update_nethandler_config, nullptr); RecRegisterConfigUpdateCb("proxy.config.net.transaction_no_activity_timeout_in", update_nethandler_config, nullptr); RecRegisterConfigUpdateCb("proxy.config.net.keep_alive_no_activity_timeout_in", update_nethandler_config, nullptr); RecRegisterConfigUpdateCb("proxy.config.net.default_inactivity_timeout", update_nethandler_config, nullptr); + RecRegisterConfigUpdateCb("proxy.config.net.additional_accepts", update_nethandler_config, nullptr); Debug("net_queue", "proxy.config.net.max_connections_in updated to %d", global_config.max_connections_in); Debug("net_queue", "proxy.config.net.max_requests_in updated to %d", global_config.max_requests_in); @@ -345,6 +357,7 @@ Debug("net_queue", "proxy.config.net.keep_alive_no_activity_timeout_in updated to %d", global_config.keep_alive_no_activity_timeout_in); Debug("net_queue", "proxy.config.net.default_inactivity_timeout updated to %d", global_config.default_inactivity_timeout); + Debug("net_queue", "proxy.config.net.additional_accepts updated to %d", additional_accepts.load(std::memory_order_relaxed)); } // @@ -782,3 +795,10 @@ --active_queue_size; } } + +int +NetHandler::get_additional_accepts() +{ + int config_value = additional_accepts.load(std::memory_order_relaxed) + 1; + return (config_value > 0 ? config_value : INT32_MAX - 1); +} \ No newline at end of file diff -Nru trafficserver-9.2.3+ds/iocore/net/UnixNetAccept.cc trafficserver-9.2.4+ds/iocore/net/UnixNetAccept.cc --- trafficserver-9.2.3+ds/iocore/net/UnixNetAccept.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/iocore/net/UnixNetAccept.cc 2024-04-03 15:38:30.000000000 +0000 @@ -27,7 +27,6 @@ #include "P_Net.h" using NetAcceptHandler = int (NetAccept::*)(int, void *); -int accept_till_done = 1; // we need to protect naVec since it might be accessed // in different threads at the same time @@ -48,10 +47,11 @@ Event *e = static_cast(ep); int res = 0; int count = 0; - int loop = accept_till_done; UnixNetVConnection *vc = nullptr; Connection con; + int additional_accepts = NetHandler::get_additional_accepts(); + if (!blockable) { if (!MUTEX_TAKE_TRY_LOCK(na->action_->mutex, e->ethread)) { return 0; @@ -83,7 +83,7 @@ goto Ldone; // note: @a con will clean up the socket when it goes out of scope. } - ++count; + count++; NET_SUM_GLOBAL_DYN_STAT(net_connections_currently_open_stat, 1); vc->id = net_next_connection_number(); vc->con.move(con); @@ -124,12 +124,20 @@ vc->mutex = h->mutex; t->schedule_imm(vc); } - } while (loop); + } while (count < additional_accepts); Ldone: if (!blockable) { MUTEX_UNTAKE_LOCK(na->action_->mutex, e->ethread); } + + // if we stop looping as a result of hitting the accept limit, + // resechedule accepting to the end of the thread event queue + // for the goal of fairness between accepting and other work + Debug("iocore_net_accepts", "exited accept loop - count: %d, limit: %d", count, additional_accepts); + if (count >= additional_accepts) { + this_ethread()->schedule_imm_local(na); + } return count; } @@ -285,11 +293,13 @@ NetAccept::do_blocking_accept(EThread *t) { int res = 0; - int loop = accept_till_done; UnixNetVConnection *vc = nullptr; Connection con; con.sock_type = SOCK_STREAM; + int count = 0; + int additional_accepts = NetHandler::get_additional_accepts(); + // do-while for accepting all the connections // added by YTS Team, yamsat do { @@ -340,6 +350,7 @@ return -1; } + count++; NET_SUM_GLOBAL_DYN_STAT(net_connections_currently_open_stat, 1); vc->id = net_next_connection_number(); vc->con.move(con); @@ -372,7 +383,7 @@ // Assign NetHandler->mutex to NetVC vc->mutex = h->mutex; localt->schedule_imm(vc); - } while (loop); + } while (count < additional_accepts); return 1; } @@ -428,7 +439,8 @@ con.sock_type = SOCK_STREAM; UnixNetVConnection *vc = nullptr; - int loop = accept_till_done; + int count = 0; + int additional_accepts = NetHandler::get_additional_accepts(); do { socklen_t sz = sizeof(con.addr); @@ -493,6 +505,7 @@ vc = (UnixNetVConnection *)this->getNetProcessor()->allocate_vc(e->ethread); ink_release_assert(vc); + count++; NET_SUM_GLOBAL_DYN_STAT(net_connections_currently_open_stat, 1); vc->id = net_next_connection_number(); vc->con.move(con); @@ -528,9 +541,16 @@ SCOPED_MUTEX_LOCK(lock, vc->mutex, e->ethread); vc->handleEvent(EVENT_NONE, nullptr); vc = nullptr; - } while (loop); + } while (count < additional_accepts); Ldone: + // if we stop looping as a result of hitting the accept limit, + // resechedule accepting to the end of the thread event queue + // for the goal of fairness between accepting and other work + Debug("iocore_net_accepts", "exited accept loop - count: %d, limit: %d", count, additional_accepts); + if (count >= additional_accepts) { + this_ethread()->schedule_imm_local(this); + } return EVENT_CONT; Lerror: diff -Nru trafficserver-9.2.3+ds/iocore/net/YamlSNIConfig.cc trafficserver-9.2.4+ds/iocore/net/YamlSNIConfig.cc --- trafficserver-9.2.3+ds/iocore/net/YamlSNIConfig.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/iocore/net/YamlSNIConfig.cc 2024-04-03 15:38:30.000000000 +0000 @@ -148,6 +148,7 @@ TS_http2_max_ping_frames_per_minute, TS_http2_max_priority_frames_per_minute, TS_http2_max_rst_stream_frames_per_minute, + TS_http2_max_continuation_frames_per_minute, TS_ip_allow, #if TS_USE_HELLO_CB || defined(OPENSSL_IS_BORINGSSL) TS_valid_tls_versions_in, @@ -193,6 +194,9 @@ if (node[TS_http2_max_rst_stream_frames_per_minute]) { item.http2_max_rst_stream_frames_per_minute = node[TS_http2_max_rst_stream_frames_per_minute].as(); } + if (node[TS_http2_max_continuation_frames_per_minute]) { + item.http2_max_continuation_frames_per_minute = node[TS_http2_max_continuation_frames_per_minute].as(); + } // enum if (node[TS_verify_client]) { diff -Nru trafficserver-9.2.3+ds/iocore/net/YamlSNIConfig.h trafficserver-9.2.4+ds/iocore/net/YamlSNIConfig.h --- trafficserver-9.2.3+ds/iocore/net/YamlSNIConfig.h 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/iocore/net/YamlSNIConfig.h 2024-04-03 15:38:30.000000000 +0000 @@ -60,6 +60,7 @@ TSDECL(http2_max_ping_frames_per_minute); TSDECL(http2_max_priority_frames_per_minute); TSDECL(http2_max_rst_stream_frames_per_minute); +TSDECL(http2_max_continuation_frames_per_minute); TSDECL(host_sni_policy); #undef TSDECL @@ -94,6 +95,7 @@ std::optional http2_max_ping_frames_per_minute; std::optional http2_max_priority_frames_per_minute; std::optional http2_max_rst_stream_frames_per_minute; + std::optional http2_max_continuation_frames_per_minute; bool tunnel_prewarm_srv = false; uint32_t tunnel_prewarm_min = 0; diff -Nru trafficserver-9.2.3+ds/mgmt/RecordsConfig.cc trafficserver-9.2.4+ds/mgmt/RecordsConfig.cc --- trafficserver-9.2.3+ds/mgmt/RecordsConfig.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/mgmt/RecordsConfig.cc 2024-04-03 15:38:30.000000000 +0000 @@ -759,6 +759,8 @@ //# Net Subsystem //# //############################################################################## + {RECT_CONFIG, "proxy.config.net.additional_accepts", RECD_INT, "-1", RECU_DYNAMIC, RR_NULL, RECC_INT, "^-1|[0-9]+$", RECA_NULL} + , {RECT_CONFIG, "proxy.config.net.connections_throttle", RECD_INT, "30000", RECU_RESTART_TS, RR_REQUIRED, RECC_STR, "^[0-9]+$", RECA_NULL} , {RECT_CONFIG, "proxy.config.net.listen_backlog", RECD_INT, "-1", RECU_NULL, RR_NULL, RECC_NULL, nullptr, RECA_NULL} @@ -1393,6 +1395,8 @@ , {RECT_CONFIG, "proxy.config.http2.max_rst_stream_frames_per_minute", RECD_INT, "200", RECU_DYNAMIC, RR_NULL, RECC_STR, "^[0-9]+$", RECA_NULL} , + {RECT_CONFIG, "proxy.config.http2.max_continuation_frames_per_minute", RECD_INT, "120", RECU_DYNAMIC, RR_NULL, RECC_STR, "^[0-9]+$", RECA_NULL} + , {RECT_CONFIG, "proxy.config.http2.min_avg_window_update", RECD_FLOAT, "2560.0", RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL} , {RECT_CONFIG, "proxy.config.http2.header_table_size_limit", RECD_INT, "65536", RECU_DYNAMIC, RR_NULL, RECC_STR, "^[0-9]+$", RECA_NULL} diff -Nru trafficserver-9.2.3+ds/plugins/experimental/header_freq/header_freq.cc trafficserver-9.2.4+ds/plugins/experimental/header_freq/header_freq.cc --- trafficserver-9.2.3+ds/plugins/experimental/header_freq/header_freq.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/plugins/experimental/header_freq/header_freq.cc 2024-04-03 15:38:30.000000000 +0000 @@ -22,12 +22,19 @@ limitations under the License. */ -#include -#include -#include +#include +#include #include #include +#include +#include +#include +#include #include +#include +#include + +#include "ts/apidefs.h" #include namespace @@ -43,9 +50,14 @@ // debug messages in continuation callbacks const char DEBUG_TAG_HOOK[] = "header_freq.hook"; -// maps from header name to # of times encountered -std::map client_freq; -std::map origin_freq; +// A map from header name to the number of times the header was encountered. +using CountMap_t = std::unordered_map>; +CountMap_t client_freq; +CountMap_t origin_freq; +std::shared_mutex map_mutex; + +// A vector for when we want to sort the map. +using CountVector_t = std::vector>; // for traffic_ctl, name is a convenient identifier const char *ctl_tag = PLUGIN_NAME; @@ -53,21 +65,32 @@ const size_t CONTROL_MSG_LOG_LEN = sizeof(CONTROL_MSG_LOG) - 1; void +Log_Sorted_Map(CountMap_t const &map, std::ostream &ss) +{ + CountVector_t sorted_vector; + { + std::shared_lock lock(map_mutex); + sorted_vector = CountVector_t(map.begin(), map.end()); + } + std::sort(sorted_vector.begin(), sorted_vector.end(), [](const auto &a, const auto &b) -> bool { return a.second > b.second; }); + + for (auto const &[header_name, count] : sorted_vector) { + ss << header_name << ": " << count << std::endl; + } +} + +void Log_Data(std::ostream &ss) { ss << std::endl << std::string(100, '+') << std::endl; ss << "CLIENT HEADERS" << std::endl; - for (auto &elem : client_freq) { - ss << elem.first << ": " << elem.second << std::endl; - } + Log_Sorted_Map(client_freq, ss); ss << std::endl; ss << "ORIGIN HEADERS" << std::endl; - for (auto &elem : origin_freq) { - ss << elem.first << ": " << elem.second << std::endl; - } + Log_Sorted_Map(origin_freq, ss); ss << std::string(100, '+') << std::endl; } @@ -80,6 +103,11 @@ CB_Command_Log(TSCont contp, TSEvent event, void *edata) { std::string *command = static_cast(TSContDataGet(contp)); + if (nullptr == command) { + TSError("[%s] Could not get the message argument from the log handler.", PLUGIN_NAME); + return TS_ERROR; + } + std::string::size_type colon_idx; if (std::string::npos != (colon_idx = command->find(':'))) { @@ -94,12 +122,13 @@ if (out.is_open()) { Log_Data(out); } else { - TSError("[%s] Failed to open file '%s' for logging", PLUGIN_NAME, path.c_str()); + TSError("[%s] Failed to open file '%s' for logging: %s", PLUGIN_NAME, path.c_str(), strerror(errno)); } } else { TSError("[%s] Invalid (zero length) file name for logging", PLUGIN_NAME); } } else { + // No filename provided, log to stdout (traffic.out). Log_Data(std::cout); } @@ -114,7 +143,7 @@ * against existing entries is case-insensitive. */ static void -count_all_headers(TSMBuffer &bufp, TSMLoc &hdr_loc, std::map &map) +count_all_headers(TSMBuffer &bufp, TSMLoc &hdr_loc, CountMap_t &map) { TSMLoc hdr, next_hdr; hdr = TSMimeHdrFieldGet(bufp, hdr_loc, 0); @@ -132,7 +161,20 @@ c = tolower(c); } - ++map[str]; + { // For lock scoping. + std::shared_lock reader_lock{map_mutex}; + if (map.find(str) == map.end()) { + // Upgrade the lock to be exclusive. + reader_lock.unlock(); + std::unique_lock ulock{map_mutex}; + // There's a potential race condition here such that another thread may + // have inserted the key while we were upgrading the lock. Regardless, + // incrementing the value here always does the right thing. + ++map[str]; + } else { + ++map[str]; + } + } next_hdr = TSMimeHdrFieldNext(bufp, hdr_loc, hdr); TSHandleMLocRelease(bufp, hdr_loc, hdr); @@ -142,47 +184,73 @@ TSHandleMLocRelease(bufp, TS_NULL_MLOC, hdr_loc); } -/** - * Continuation callback. Invoked to count headers on READ_REQUEST_HDR and - * SEND_RESPONSE_HDR hooks and to log through traffic_ctl's LIFECYCLE_MSG. +/** Handle common logic between the request and response headers. + * @param[in] txnp The transaction pointer for this HTTP message. + * @param[in] event The event that triggered this callback. + * @param[out] freq_map The map to update with the header counts. + * @return TS_SUCCESS if the event was handled successfully, TS_ERROR otherwise. */ int -handle_hook(TSCont contp, TSEvent event, void *edata) +handle_header_event(TSHttpTxn txnp, TSEvent event, CountMap_t &freq_map) { - TSHttpTxn txnp; TSMBuffer bufp; TSMLoc hdr_loc; - int ret_val = 0; + TSReturnCode ret; + + char const *message_type = nullptr; + if (event == TS_EVENT_HTTP_READ_REQUEST_HDR) { + message_type = "request"; + ret = TSHttpTxnClientReqGet(txnp, &bufp, &hdr_loc); + } else { // TS_EVENT_HTTP_SEND_RESPONSE_HDR + message_type = "response"; + ret = TSHttpTxnClientRespGet(txnp, &bufp, &hdr_loc); + } + + if (ret != TS_SUCCESS) { + TSError("[%s] could not get %s headers", PLUGIN_NAME, message_type); + TSHttpTxnReenable(txnp, TS_EVENT_HTTP_ERROR); + return TS_ERROR; + } + + count_all_headers(bufp, hdr_loc, freq_map); + TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE); + return TS_SUCCESS; +} + +/** Continuation callback. Invoked to count headers on READ_REQUEST_HDR and + * SEND_RESPONSE_HDR hooks. + */ +int +header_handle_hook(TSCont contp, TSEvent event, void *edata) +{ + TSHttpTxn txnp = reinterpret_cast(edata); + int ret_val = TS_SUCCESS; switch (event) { case TS_EVENT_HTTP_READ_REQUEST_HDR: // count client headers - { TSDebug(DEBUG_TAG_HOOK, "event TS_EVENT_HTTP_READ_REQUEST_HDR"); - txnp = reinterpret_cast(edata); - // get the client request so we can loop through the headers - if (TSHttpTxnClientReqGet(txnp, &bufp, &hdr_loc) != TS_SUCCESS) { - TSError("[%s] could not get request headers", PLUGIN_NAME); - TSHttpTxnReenable(txnp, TS_EVENT_HTTP_ERROR); - ret_val = -1; - break; - } - count_all_headers(bufp, hdr_loc, client_freq); - TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE); - } break; + ret_val = handle_header_event(txnp, event, client_freq); + break; case TS_EVENT_HTTP_SEND_RESPONSE_HDR: // count origin headers - { TSDebug(DEBUG_TAG_HOOK, "event TS_EVENT_HTTP_SEND_RESPONSE_HDR"); - // get the response so we can loop through the headers - txnp = reinterpret_cast(edata); - if (TSHttpTxnClientRespGet(txnp, &bufp, &hdr_loc) != TS_SUCCESS) { - TSError("[%s] could not get response headers", PLUGIN_NAME); - TSHttpTxnReenable(txnp, TS_EVENT_HTTP_ERROR); - ret_val = -2; - break; - } - count_all_headers(bufp, hdr_loc, origin_freq); - TSHttpTxnReenable(txnp, TS_EVENT_HTTP_CONTINUE); - } break; + ret_val = handle_header_event(txnp, event, origin_freq); + break; + default: + TSError("[%s] unexpected event in header handler: %d", PLUGIN_NAME, event); + break; + } + + return ret_val; +} + +/** + * Continuation callback. Invoked to handler the LIFE_CYCLE_MSG event to log + * header stats. + */ +int +msg_handle_hook(TSCont contp, TSEvent event, void *edata) +{ + switch (event) { case TS_EVENT_LIFECYCLE_MSG: // Handle external command { TSPluginMsg *msgp = static_cast(edata); @@ -203,15 +271,15 @@ } } } break; - // do nothing in any of the other states default: + TSError("[%s] unexpected event in message handler: %d", PLUGIN_NAME, event); break; } - return ret_val; + return TS_SUCCESS; } -} // namespace +} // anonymous namespace /// Registration entry point for plugin. void @@ -225,15 +293,21 @@ TSError("[%s](%s) Plugin registration failed. \n", PLUGIN_NAME, __FUNCTION__); } - TSCont contp = TSContCreate(handle_hook, TSMutexCreate()); - if (contp == nullptr) { + TSCont header_contp = TSContCreate(header_handle_hook, nullptr); + if (header_contp == nullptr) { // Continuation initialization failed. Unrecoverable, report and exit. - TSError("[%s](%s) could not create continuation", PLUGIN_NAME, __FUNCTION__); + TSError("[%s](%s) could not create the header handler continuation", PLUGIN_NAME, __FUNCTION__); + abort(); + } + // Continuation initialization succeeded + TSHttpHookAdd(TS_HTTP_READ_REQUEST_HDR_HOOK, header_contp); + TSHttpHookAdd(TS_HTTP_SEND_RESPONSE_HDR_HOOK, header_contp); + + TSCont msg_contp = TSContCreate(msg_handle_hook, nullptr); + if (msg_contp == nullptr) { + // Continuation initialization failed. Unrecoverable, report and exit. + TSError("[%s](%s) could not create the message handler continuation", PLUGIN_NAME, __FUNCTION__); abort(); - } else { - // Continuation initialization succeeded - TSHttpHookAdd(TS_HTTP_READ_REQUEST_HDR_HOOK, contp); - TSHttpHookAdd(TS_HTTP_SEND_RESPONSE_HDR_HOOK, contp); - TSLifecycleHookAdd(TS_LIFECYCLE_MSG_HOOK, contp); } + TSLifecycleHookAdd(TS_LIFECYCLE_MSG_HOOK, msg_contp); } diff -Nru trafficserver-9.2.3+ds/plugins/experimental/slice/Config.h trafficserver-9.2.4+ds/plugins/experimental/slice/Config.h --- trafficserver-9.2.3+ds/plugins/experimental/slice/Config.h 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/plugins/experimental/slice/Config.h 2024-04-03 15:38:30.000000000 +0000 @@ -45,9 +45,8 @@ int m_paceerrsecs{0}; // -1 disable logging, 0 no pacing, max 60s int m_prefetchcount{0}; // 0 disables prefetching enum RefType { First, Relative }; - RefType m_reftype{First}; // reference slice is relative to request - const char *m_method_type{nullptr}; // type of header request - bool m_head_strip_range{false}; // strip range header for head requests + RefType m_reftype{First}; // reference slice is relative to request + bool m_head_strip_range{false}; // strip range header for head requests std::string m_skip_header; std::string m_crr_ims_header; @@ -71,13 +70,6 @@ return None != m_regex_type; } - // Check if response only expects header - bool - onlyHeader() const - { - return (m_method_type == TS_HTTP_METHOD_HEAD || m_method_type == TS_HTTP_METHOD_PURGE); - } - // If no null reg, true, otherwise check against regex bool matchesRegex(char const *const url, int const urllen) const; diff -Nru trafficserver-9.2.3+ds/plugins/experimental/slice/Data.h trafficserver-9.2.4+ds/plugins/experimental/slice/Data.h --- trafficserver-9.2.3+ds/plugins/experimental/slice/Data.h 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/plugins/experimental/slice/Data.h 2024-04-03 15:38:30.000000000 +0000 @@ -71,6 +71,8 @@ TSHttpStatus m_statustype{TS_HTTP_STATUS_NONE}; // 200 or 206 + const char *m_method_type{nullptr}; // type of header request + Range m_req_range; // converted to half open interval int64_t m_blocknum{-1}; // block number to work on, -1 bad/stop @@ -108,6 +110,13 @@ m_lastmodified[0] = '\0'; } + // Check if response only expects header + bool + onlyHeader() const + { + return (m_method_type == TS_HTTP_METHOD_HEAD || m_method_type == TS_HTTP_METHOD_PURGE); + } + ~Data() { if (nullptr != m_urlbuf) { diff -Nru trafficserver-9.2.3+ds/plugins/experimental/slice/server.cc trafficserver-9.2.4+ds/plugins/experimental/slice/server.cc --- trafficserver-9.2.3+ds/plugins/experimental/slice/server.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/plugins/experimental/slice/server.cc 2024-04-03 15:38:30.000000000 +0000 @@ -111,7 +111,7 @@ // Should run TSVIONSetBytes(output_io, hlen + bodybytes); int64_t const hlen = TSHttpHdrLengthGet(header.m_buffer, header.m_lochdr); int64_t const clen = contentLengthFrom(header); - if (TS_HTTP_STATUS_OK == header.status() && data->m_config->onlyHeader()) { + if (TS_HTTP_STATUS_OK == header.status() && data->onlyHeader()) { DEBUG_LOG("HEAD/PURGE request stripped Range header: expects 200"); data->m_bytestosend = hlen; data->m_blockexpected = 0; @@ -220,7 +220,7 @@ int const hbytes = TSHttpHdrLengthGet(header.m_buffer, header.m_lochdr); // HEAD request only sends header - if (data->m_config->onlyHeader()) { + if (data->onlyHeader()) { data->m_bytestosend = hbytes; data->m_blockexpected = 0; } else { @@ -364,7 +364,7 @@ switch (header.status()) { case TS_HTTP_STATUS_NOT_FOUND: - if (data->m_config->onlyHeader()) { + if (data->onlyHeader()) { return false; } // need to reissue reference slice @@ -374,7 +374,7 @@ case TS_HTTP_STATUS_PARTIAL_CONTENT: break; default: - if (data->m_config->onlyHeader() && header.status() == TS_HTTP_STATUS_OK) { + if (data->onlyHeader() && header.status() == TS_HTTP_STATUS_OK) { return true; } DEBUG_LOG("Non 206/404 internal block response encountered"); @@ -640,7 +640,7 @@ // corner condition, good source header + 0 length aborted content // results in no header being read, just an EOS. // trying to delete the upstream will crash ATS (??) - if (0 == data->m_blockexpected && !data->m_config->onlyHeader()) { + if (0 == data->m_blockexpected && !data->onlyHeader()) { shutdown(contp, data); // this will crash if first block return; } @@ -673,12 +673,15 @@ // continue processing blocks if more requests need to be made // HEAD requests only has one slice block if (data->m_req_range.blockIsInside(data->m_config->m_blockbytes, data->m_blocknum) && - data->m_config->m_method_type != TS_HTTP_METHOD_HEAD) { + data->m_method_type != TS_HTTP_METHOD_HEAD) { // Don't immediately request the next slice if the client // isn't keeping up - bool start_next_block = true; - if (data->m_dnstream.m_write.isOpen()) { + bool start_next_block = false; + if (data->m_method_type == TS_HTTP_METHOD_PURGE) { + // for PURGE requests, clients won't request more data (no body content) + start_next_block = true; + } else if (data->m_dnstream.m_write.isOpen()) { // check throttle condition TSVIO const output_vio = data->m_dnstream.m_write.m_vio; int64_t const output_done = TSVIONDoneGet(output_vio); @@ -686,10 +689,10 @@ int64_t const threshout = data->m_config->m_blockbytes; int64_t const buffered = output_sent - output_done; - // for PURGE requests, clients won't request more data (no body content) - if (threshout < buffered && !data->m_config->onlyHeader()) { - start_next_block = false; + if (threshout < buffered) { DEBUG_LOG("%p handle_server_resp: throttling %" PRId64, data, buffered); + } else { + start_next_block = true; } } if (start_next_block) { diff -Nru trafficserver-9.2.3+ds/plugins/experimental/slice/slice.cc trafficserver-9.2.4+ds/plugins/experimental/slice/slice.cc --- trafficserver-9.2.3+ds/plugins/experimental/slice/slice.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/plugins/experimental/slice/slice.cc 2024-04-03 15:38:30.000000000 +0000 @@ -50,9 +50,6 @@ return false; } - // set header method config to only expect header response - config->m_method_type = header.method(); - if (config->hasRegex()) { int urllen = 0; char *const urlstr = TSHttpTxnEffectiveUrlStringGet(txnp, &urllen); @@ -84,7 +81,8 @@ TSAssert(nullptr != config); Data *const data = new Data(config); - data->m_txnp = txnp; + data->m_method_type = header.method(); + data->m_txnp = txnp; // set up feedback connect if (AF_INET == ip->sa_family) { diff -Nru trafficserver-9.2.3+ds/plugins/experimental/slice/util.cc trafficserver-9.2.4+ds/plugins/experimental/slice/util.cc --- trafficserver-9.2.3+ds/plugins/experimental/slice/util.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/plugins/experimental/slice/util.cc 2024-04-03 15:38:30.000000000 +0000 @@ -80,7 +80,7 @@ HttpHeader header(data->m_req_hdrmgr.m_buffer, data->m_req_hdrmgr.m_lochdr); // if configured, remove range header from head requests - if (data->m_config->m_method_type == TS_HTTP_METHOD_HEAD && data->m_config->m_head_strip_range) { + if (data->m_method_type == TS_HTTP_METHOD_HEAD && data->m_config->m_head_strip_range) { header.removeKey(TS_MIME_FIELD_RANGE, TS_MIME_LEN_RANGE); } else { // add/set sub range key and add slicer tag diff -Nru trafficserver-9.2.3+ds/plugins/experimental/ssl_session_reuse/tests/plug-load.test.py trafficserver-9.2.4+ds/plugins/experimental/ssl_session_reuse/tests/plug-load.test.py --- trafficserver-9.2.3+ds/plugins/experimental/ssl_session_reuse/tests/plug-load.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/plugins/experimental/ssl_session_reuse/tests/plug-load.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -15,6 +15,7 @@ # limitations under the License. import os + Test.Summary = ''' Test a basic remap of a http connection ''' @@ -40,12 +41,8 @@ 'proxy.config.diags.debug.tags': f'{pluginName}', }) -ts.Disk.plugin_config.AddLine( - f'# {path}/{pluginName}.so {configFile}' -) -ts.Disk.remap_config.AddLine( - f'map http://www.example.com http://127.0.0.1:{server.Variables.Port}' -) +ts.Disk.plugin_config.AddLine(f'# {path}/{pluginName}.so {configFile}') +ts.Disk.remap_config.AddLine(f'map http://www.example.com http://127.0.0.1:{server.Variables.Port}') goldFile = os.path.join(Test.RunDirectory, f"{pluginName}.gold") with open(goldFile, 'w+') as jf: diff -Nru trafficserver-9.2.3+ds/plugins/experimental/traffic_dump/json_utils.cc trafficserver-9.2.4+ds/plugins/experimental/traffic_dump/json_utils.cc --- trafficserver-9.2.3+ds/plugins/experimental/traffic_dump/json_utils.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/plugins/experimental/traffic_dump/json_utils.cc 2024-04-03 15:38:30.000000000 +0000 @@ -115,8 +115,10 @@ } default: { if (c <= '\x1f') { + auto const original_flags = jsonfile.flags(); write_buffered_context(buf, prevIdx, idx, jsonfile); jsonfile << "\\u" << std::hex << std::setw(4) << std::setfill('0') << static_cast(c); + jsonfile.flags(original_flags); } break; // else: The character does not need to be escaped. Do not call diff -Nru trafficserver-9.2.3+ds/plugins/experimental/traffic_dump/post_process.py trafficserver-9.2.4+ds/plugins/experimental/traffic_dump/post_process.py --- trafficserver-9.2.3+ds/plugins/experimental/traffic_dump/post_process.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/plugins/experimental/traffic_dump/post_process.py 2024-04-03 15:38:30.000000000 +0000 @@ -35,7 +35,6 @@ client and, by default, formats the output files with human readable spacing. ''' - # Base replay file template with basic elements TEMPLATE = json.loads('{"meta": {"version":"1.0"},"sessions":[]}') @@ -264,11 +263,9 @@ if not connection_time: connection_time = session['start-time'] if connection_time: - logging.debug("Omitting session in %s with connection-time: %d: %s", - replay_file, session['connection-time'], e) + logging.debug("Omitting session in %s with connection-time: %d: %s", replay_file, session['connection-time'], e) else: - logging.debug("Omitting a session in %s, could not find a connection time: %s", - replay_file, e) + logging.debug("Omitting a session in %s, could not find a connection time: %s", replay_file, e) continue sessions.append(session) session_count += 1 @@ -335,24 +332,33 @@ ''' parser = argparse.ArgumentParser(description=description) - parser.add_argument("in_dir", type=str, - help='''The input directory of traffic_dump replay + parser.add_argument( + "in_dir", + type=str, + help='''The input directory of traffic_dump replay files. The expectation is that this will contain sub-directories that themselves contain replay files. This is written to accommodate the directory populated by traffic_dump via the --logdir option.''') - parser.add_argument("out_dir", type=str, - help="The output directory of post processed replay files.") - parser.add_argument("-n", "--num_sessions", type=int, default=10, - help='''The maximum number of sessions merged into + parser.add_argument("out_dir", type=str, help="The output directory of post processed replay files.") + parser.add_argument( + "-n", + "--num_sessions", + type=int, + default=10, + help='''The maximum number of sessions merged into single replay output files. The default is 10.''') - parser.add_argument("--no-human-readable", action="store_true", - help='''By default, post processor will generate replay + parser.add_argument( + "--no-human-readable", + action="store_true", + help='''By default, post processor will generate replay files that are spaced out in a human readable format. This turns off that behavior and leaves the files as single-line entries.''') - parser.add_argument("--no-fabricate-proxy-requests", action="store_true", - help='''By default, post processor will fabricate proxy + parser.add_argument( + "--no-fabricate-proxy-requests", + action="store_true", + help='''By default, post processor will fabricate proxy requests and server responses for transactions served out of the proxy. Presumably in replay conditions, these fabricated requests and responses will not hurt @@ -362,10 +368,8 @@ the server will not know how to reply to these requests. Using this option turns off this fabrication behavior.''') - parser.add_argument("-j", "--num_threads", type=int, default=32, - help='''The maximum number of threads to use.''') - parser.add_argument("-d", "--debug", action="store_true", - help="Enable debug level logging.") + parser.add_argument("-j", "--num_threads", type=int, default=32, help='''The maximum number of threads to use.''') + parser.add_argument("-d", "--debug", action="store_true", help="Enable debug level logging.") return parser.parse_args() @@ -389,10 +393,11 @@ # Start up the threads. for _ in range(nthreads): - t = Thread(target=post_process, - args=(args.in_dir, subdir_q, args.out_dir, - args.num_sessions, args.no_human_readable, - not args.no_fabricate_proxy_requests, cnt_q)) + t = Thread( + target=post_process, + args=( + args.in_dir, subdir_q, args.out_dir, args.num_sessions, args.no_human_readable, + not args.no_fabricate_proxy_requests, cnt_q)) t.start() threads.append(t) diff -Nru trafficserver-9.2.3+ds/plugins/experimental/uri_signing/python_signer/uri_signer.py trafficserver-9.2.4+ds/plugins/experimental/uri_signing/python_signer/uri_signer.py --- trafficserver-9.2.3+ds/plugins/experimental/uri_signing/python_signer/uri_signer.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/plugins/experimental/uri_signing/python_signer/uri_signer.py 2024-04-03 15:38:30.000000000 +0000 @@ -27,12 +27,8 @@ def main(): parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', - help="Configuration File", - required=True) - parser.add_argument('-u', '--uri', - help="URI to sign", - required=True) + parser.add_argument('-c', '--config', help="Configuration File", required=True) + parser.add_argument('-u', '--uri', help="URI to sign", required=True) # helpers parser.add_argument('--key_index', type=int, nargs=1) diff -Nru trafficserver-9.2.3+ds/plugins/header_rewrite/operators.cc trafficserver-9.2.4+ds/plugins/header_rewrite/operators.cc --- trafficserver-9.2.3+ds/plugins/header_rewrite/operators.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/plugins/header_rewrite/operators.cc 2024-04-03 15:38:30.000000000 +0000 @@ -691,7 +691,7 @@ void OperatorSetBody::initialize_hooks() { - add_allowed_hook(TS_HTTP_READ_RESPONSE_HDR_HOOK); + add_allowed_hook(TS_REMAP_PSEUDO_HOOK); add_allowed_hook(TS_HTTP_SEND_RESPONSE_HDR_HOOK); } diff -Nru trafficserver-9.2.3+ds/plugins/s3_auth/s3_auth.cc trafficserver-9.2.4+ds/plugins/s3_auth/s3_auth.cc --- trafficserver-9.2.3+ds/plugins/s3_auth/s3_auth.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/plugins/s3_auth/s3_auth.cc 2024-04-03 15:38:30.000000000 +0000 @@ -1050,7 +1050,9 @@ config_reloader(TSCont cont, TSEvent event, void *edata) { TSDebug(PLUGIN_NAME, "reloading configs"); - S3Config *s3 = static_cast(TSContDataGet(cont)); + S3Config *s3 = static_cast(TSContDataGet(cont)); + s3->check_current_action(edata); + S3Config *file_config = gConfCache.get(s3->conf_fname()); if (!file_config || !file_config->valid()) { @@ -1061,7 +1063,6 @@ { std::unique_lock lock(s3->reload_mutex); s3->copy_changes_from(file_config); - s3->check_current_action(edata); } if (s3->expiration() == 0) { diff -Nru trafficserver-9.2.3+ds/proxy/Main.h trafficserver-9.2.4+ds/proxy/Main.h --- trafficserver-9.2.3+ds/proxy/Main.h 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/proxy/Main.h 2024-04-03 15:38:30.000000000 +0000 @@ -34,8 +34,6 @@ // Global Data // // Global Configuration - -extern int accept_till_done; extern int auto_clear_hostdb_flag; // diff -Nru trafficserver-9.2.3+ds/proxy/ParentSelection.cc trafficserver-9.2.4+ds/proxy/ParentSelection.cc --- trafficserver-9.2.3+ds/proxy/ParentSelection.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/proxy/ParentSelection.cc 2024-04-03 15:38:30.000000000 +0000 @@ -775,7 +775,7 @@ modulePrefix, line_num); delete unavailable_server_retry_responses; unavailable_server_retry_responses = nullptr; - } else if (unavailable_server_retry_responses == nullptr && (parent_retry & PARENT_RETRY_UNAVAILABLE_SERVER)) { + } else if (unavailable_server_retry_responses == nullptr && parent_retry) { // initialize UnavailableServerResponseCodes to the default value if unavailable_server_retry is enabled. Warning("%s initializing UnavailableServerResponseCodes on line %d to 503 default.", modulePrefix, line_num); unavailable_server_retry_responses = new UnavailableServerResponseCodes(nullptr); @@ -787,7 +787,7 @@ line_num); delete simple_server_retry_responses; simple_server_retry_responses = nullptr; - } else if (simple_server_retry_responses == nullptr && (parent_retry & PARENT_RETRY_SIMPLE)) { + } else if (simple_server_retry_responses == nullptr && parent_retry) { // initialize simple server respones codes to the default value if simple_retry is enabled. Warning("%s initializing SimpleRetryResponseCodes on line %d to 404 default.", modulePrefix, line_num); simple_server_retry_responses = new SimpleRetryResponseCodes(nullptr); @@ -860,6 +860,7 @@ ats_free(secondary_parents); delete selection_strategy; delete unavailable_server_retry_responses; + delete simple_server_retry_responses; } void diff -Nru trafficserver-9.2.3+ds/proxy/Plugin.cc trafficserver-9.2.4+ds/proxy/Plugin.cc --- trafficserver-9.2.3+ds/proxy/Plugin.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/proxy/Plugin.cc 2024-04-03 15:38:30.000000000 +0000 @@ -114,6 +114,8 @@ if (!init) { error.assign("unable to find TSPluginInit function in '").append(path).append("': ").append(dlerror()); Error("%s", error.c_str()); + dlclose(handle); + handle = nullptr; return false; } diff -Nru trafficserver-9.2.3+ds/proxy/http/HttpTransact.cc trafficserver-9.2.4+ds/proxy/http/HttpTransact.cc --- trafficserver-9.2.3+ds/proxy/http/HttpTransact.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/proxy/http/HttpTransact.cc 2024-04-03 15:38:30.000000000 +0000 @@ -3692,7 +3692,7 @@ s->current.attempts++; // Are we done with this particular parent? - if ((s->current.attempts - 1) % s->txn_conf->per_parent_connect_attempts != 0) { + if (s->current.attempts % s->txn_conf->per_parent_connect_attempts != 0) { // No we are not done with this parent so retry HTTP_INCREMENT_DYN_STAT(http_total_parent_switches_stat); s->next_action = how_to_open_connection(s); diff -Nru trafficserver-9.2.3+ds/proxy/http/HttpTunnel.cc trafficserver-9.2.4+ds/proxy/http/HttpTunnel.cc --- trafficserver-9.2.3+ds/proxy/http/HttpTunnel.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/proxy/http/HttpTunnel.cc 2024-04-03 15:38:30.000000000 +0000 @@ -153,8 +153,9 @@ } } else { // We are done parsing size - if (num_digits == 0 || running_sum < 0) { - // Bogus chunk size + if ((num_digits == 0 || running_sum < 0) || /* Bogus chunk size */ + (!ParseRules::is_wslfcr(*tmp) && *tmp != ';') /* Unexpected character */ + ) { state = CHUNK_READ_ERROR; done = true; break; @@ -171,10 +172,16 @@ break; } } else if (state == CHUNK_READ_SIZE_START) { - if (ParseRules::is_lf(*tmp)) { + if (ParseRules::is_cr(*tmp)) { + // Skip it + } else if (ParseRules::is_lf(*tmp) && + bytes_used <= 2) { // bytes_used should be 2 if it's CRLF, but permit a single LF as well running_sum = 0; num_digits = 0; state = CHUNK_READ_SIZE; + } else { // Unexpected character + state = CHUNK_READ_ERROR; + done = true; } } tmp++; diff -Nru trafficserver-9.2.3+ds/proxy/http/remap/unit-tests/test_NextHopConsistentHash.cc trafficserver-9.2.4+ds/proxy/http/remap/unit-tests/test_NextHopConsistentHash.cc --- trafficserver-9.2.3+ds/proxy/http/remap/unit-tests/test_NextHopConsistentHash.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/proxy/http/remap/unit-tests/test_NextHopConsistentHash.cc 2024-04-03 15:38:30.000000000 +0000 @@ -251,29 +251,37 @@ strategy->markNextHop(txnp, result->hostname, result->port, NH_MARK_DOWN); // fourth request - result->reset(); - build_request(20004, &sm, nullptr, "rabbit.net", nullptr); - strategy->findNextHop(txnp); - REQUIRE(result->result == ParentResultType::PARENT_SPECIFIED); - CHECK(strcmp(result->hostname, "s1.bar.com") == 0); + { + time_t now = time(nullptr) - 1; ///< make sure down hosts are not retryable + result->reset(); + build_request(20004, &sm, nullptr, "rabbit.net", nullptr); + strategy->findNextHop(txnp, nullptr, now); + REQUIRE(result->result == ParentResultType::PARENT_SPECIFIED); + CHECK(strcmp(result->hostname, "s1.bar.com") == 0); + } // mark down s1.bar.com strategy->markNextHop(txnp, result->hostname, result->port, NH_MARK_DOWN); // fifth request - result->reset(); - build_request(20005, &sm, nullptr, "rabbit.net/asset1", nullptr); - strategy->findNextHop(txnp); - REQUIRE(result->result == ParentResultType::PARENT_SPECIFIED); - CHECK(strcmp(result->hostname, "q1.bar.com") == 0); + { + time_t now = time(nullptr) - 1; ///< make sure down hosts are not retryable + result->reset(); + build_request(20005, &sm, nullptr, "rabbit.net/asset1", nullptr); + strategy->findNextHop(txnp, nullptr, now); + REQUIRE(result->result == ParentResultType::PARENT_SPECIFIED); + CHECK(strcmp(result->hostname, "q1.bar.com") == 0); + } // sixth request - wait and p1 should now become available - time_t now = time(nullptr) + 5; - result->reset(); - build_request(20006, &sm, nullptr, "rabbit.net", nullptr); - strategy->findNextHop(txnp, nullptr, now); - REQUIRE(result->result == ParentResultType::PARENT_SPECIFIED); - CHECK(strcmp(result->hostname, "p1.foo.com") == 0); + { + time_t now = time(nullptr) + 5; + result->reset(); + build_request(20006, &sm, nullptr, "rabbit.net", nullptr); + strategy->findNextHop(txnp, nullptr, now); + REQUIRE(result->result == ParentResultType::PARENT_SPECIFIED); + CHECK(strcmp(result->hostname, "p1.foo.com") == 0); + } } // free up request resources. br_destroy(sm); @@ -602,19 +610,24 @@ // mark it down strategy->markNextHop(txnp, result->hostname, result->port, NH_MARK_DOWN); // seventh request - new request with all hosts down and go_direct is false. - result->reset(); - build_request(30007, &sm, nullptr, "bunny.net/asset4", nullptr); - strategy->findNextHop(txnp); - REQUIRE(result->result == ParentResultType::PARENT_FAIL); - CHECK(result->hostname == nullptr); + { + time_t now = time(nullptr) - 1; ///< make sure down hosts are not retryable + result->reset(); + build_request(30007, &sm, nullptr, "bunny.net/asset4", nullptr); + strategy->findNextHop(txnp, nullptr, now); + REQUIRE(result->result == ParentResultType::PARENT_FAIL); + CHECK(result->hostname == nullptr); + } // eighth request - retry after waiting for the retry window to expire. - time_t now = time(nullptr) + 5; - result->reset(); - build_request(30008, &sm, nullptr, "bunny.net/asset4", nullptr); - strategy->findNextHop(txnp, nullptr, now); - REQUIRE(result->result == ParentResultType::PARENT_SPECIFIED); - CHECK(strcmp(result->hostname, "c2.foo.com") == 0); + { + time_t now = time(nullptr) + 5; + result->reset(); + build_request(30008, &sm, nullptr, "bunny.net/asset4", nullptr); + strategy->findNextHop(txnp, nullptr, now); + REQUIRE(result->result == ParentResultType::PARENT_SPECIFIED); + CHECK(strcmp(result->hostname, "c2.foo.com") == 0); + } } // free up request resources. br_destroy(sm); diff -Nru trafficserver-9.2.3+ds/proxy/http2/HTTP2.cc trafficserver-9.2.4+ds/proxy/http2/HTTP2.cc --- trafficserver-9.2.3+ds/proxy/http2/HTTP2.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/proxy/http2/HTTP2.cc 2024-04-03 15:38:30.000000000 +0000 @@ -85,6 +85,8 @@ "proxy.process.http2.max_priority_frames_per_minute_exceeded"; static const char *const HTTP2_STAT_MAX_RST_STREAM_FRAMES_PER_MINUTE_EXCEEDED_NAME = "proxy.process.http2.max_rst_stream_frames_per_minute_exceeded"; +static const char *const HTTP2_STAT_MAX_CONTINUATION_FRAMES_PER_MINUTE_EXCEEDED_NAME = + "proxy.process.http2.max_continuation_frames_per_minute_exceeded"; static const char *const HTTP2_STAT_INSUFFICIENT_AVG_WINDOW_UPDATE_NAME = "proxy.process.http2.insufficient_avg_window_update"; static const char *const HTTP2_STAT_MAX_CONCURRENT_STREAMS_EXCEEDED_IN_NAME = "proxy.process.http2.max_concurrent_streams_exceeded_in"; @@ -798,36 +800,37 @@ } // Initialize this subsystem with librecords configs (for now) -uint32_t Http2::max_concurrent_streams_in = 100; -uint32_t Http2::min_concurrent_streams_in = 10; -uint32_t Http2::max_active_streams_in = 0; -bool Http2::throttling = false; -uint32_t Http2::stream_priority_enabled = 0; -uint32_t Http2::initial_window_size = 65535; -uint32_t Http2::max_frame_size = 16384; -uint32_t Http2::header_table_size = 4096; -uint32_t Http2::max_header_list_size = 4294967295; -uint32_t Http2::accept_no_activity_timeout = 120; -uint32_t Http2::no_activity_timeout_in = 120; -uint32_t Http2::active_timeout_in = 0; -uint32_t Http2::push_diary_size = 256; -uint32_t Http2::zombie_timeout_in = 0; -float Http2::stream_error_rate_threshold = 0.1; -uint32_t Http2::stream_error_sampling_threshold = 10; -uint32_t Http2::max_settings_per_frame = 7; -uint32_t Http2::max_settings_per_minute = 14; -uint32_t Http2::max_settings_frames_per_minute = 14; -uint32_t Http2::max_ping_frames_per_minute = 60; -uint32_t Http2::max_priority_frames_per_minute = 120; -uint32_t Http2::max_rst_stream_frames_per_minute = 200; -float Http2::min_avg_window_update = 2560.0; -uint32_t Http2::con_slow_log_threshold = 0; -uint32_t Http2::stream_slow_log_threshold = 0; -uint32_t Http2::header_table_size_limit = 65536; -uint32_t Http2::write_buffer_block_size = 262144; -float Http2::write_size_threshold = 0.5; -uint32_t Http2::write_time_threshold = 100; -uint32_t Http2::buffer_water_mark = 0; +uint32_t Http2::max_concurrent_streams_in = 100; +uint32_t Http2::min_concurrent_streams_in = 10; +uint32_t Http2::max_active_streams_in = 0; +bool Http2::throttling = false; +uint32_t Http2::stream_priority_enabled = 0; +uint32_t Http2::initial_window_size = 65535; +uint32_t Http2::max_frame_size = 16384; +uint32_t Http2::header_table_size = 4096; +uint32_t Http2::max_header_list_size = 4294967295; +uint32_t Http2::accept_no_activity_timeout = 120; +uint32_t Http2::no_activity_timeout_in = 120; +uint32_t Http2::active_timeout_in = 0; +uint32_t Http2::push_diary_size = 256; +uint32_t Http2::zombie_timeout_in = 0; +float Http2::stream_error_rate_threshold = 0.1; +uint32_t Http2::stream_error_sampling_threshold = 10; +uint32_t Http2::max_settings_per_frame = 7; +uint32_t Http2::max_settings_per_minute = 14; +uint32_t Http2::max_settings_frames_per_minute = 14; +uint32_t Http2::max_ping_frames_per_minute = 60; +uint32_t Http2::max_priority_frames_per_minute = 120; +uint32_t Http2::max_rst_stream_frames_per_minute = 200; +uint32_t Http2::max_continuation_frames_per_minute = 120; +float Http2::min_avg_window_update = 2560.0; +uint32_t Http2::con_slow_log_threshold = 0; +uint32_t Http2::stream_slow_log_threshold = 0; +uint32_t Http2::header_table_size_limit = 65536; +uint32_t Http2::write_buffer_block_size = 262144; +float Http2::write_size_threshold = 0.5; +uint32_t Http2::write_time_threshold = 100; +uint32_t Http2::buffer_water_mark = 0; void Http2::init() @@ -853,6 +856,7 @@ REC_EstablishStaticConfigInt32U(max_ping_frames_per_minute, "proxy.config.http2.max_ping_frames_per_minute"); REC_EstablishStaticConfigInt32U(max_priority_frames_per_minute, "proxy.config.http2.max_priority_frames_per_minute"); REC_EstablishStaticConfigInt32U(max_rst_stream_frames_per_minute, "proxy.config.http2.max_rst_stream_frames_per_minute"); + REC_EstablishStaticConfigInt32U(max_continuation_frames_per_minute, "proxy.config.http2.max_continuation_frames_per_minute"); REC_EstablishStaticConfigFloat(min_avg_window_update, "proxy.config.http2.min_avg_window_update"); REC_EstablishStaticConfigInt32U(con_slow_log_threshold, "proxy.config.http2.connection.slow.log.threshold"); REC_EstablishStaticConfigInt32U(stream_slow_log_threshold, "proxy.config.http2.stream.slow.log.threshold"); @@ -923,6 +927,8 @@ static_cast(HTTP2_STAT_MAX_PRIORITY_FRAMES_PER_MINUTE_EXCEEDED), RecRawStatSyncSum); RecRegisterRawStat(http2_rsb, RECT_PROCESS, HTTP2_STAT_MAX_RST_STREAM_FRAMES_PER_MINUTE_EXCEEDED_NAME, RECD_INT, RECP_PERSISTENT, static_cast(HTTP2_STAT_MAX_RST_STREAM_FRAMES_PER_MINUTE_EXCEEDED), RecRawStatSyncSum); + RecRegisterRawStat(http2_rsb, RECT_PROCESS, HTTP2_STAT_MAX_CONTINUATION_FRAMES_PER_MINUTE_EXCEEDED_NAME, RECD_INT, + RECP_PERSISTENT, static_cast(HTTP2_STAT_MAX_CONTINUATION_FRAMES_PER_MINUTE_EXCEEDED), RecRawStatSyncSum); RecRegisterRawStat(http2_rsb, RECT_PROCESS, HTTP2_STAT_INSUFFICIENT_AVG_WINDOW_UPDATE_NAME, RECD_INT, RECP_PERSISTENT, static_cast(HTTP2_STAT_INSUFFICIENT_AVG_WINDOW_UPDATE), RecRawStatSyncSum); RecRegisterRawStat(http2_rsb, RECT_PROCESS, HTTP2_STAT_MAX_CONCURRENT_STREAMS_EXCEEDED_IN_NAME, RECD_INT, RECP_PERSISTENT, diff -Nru trafficserver-9.2.3+ds/proxy/http2/HTTP2.h trafficserver-9.2.4+ds/proxy/http2/HTTP2.h --- trafficserver-9.2.3+ds/proxy/http2/HTTP2.h 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/proxy/http2/HTTP2.h 2024-04-03 15:38:30.000000000 +0000 @@ -105,6 +105,7 @@ HTTP2_STAT_MAX_PING_FRAMES_PER_MINUTE_EXCEEDED, HTTP2_STAT_MAX_PRIORITY_FRAMES_PER_MINUTE_EXCEEDED, HTTP2_STAT_MAX_RST_STREAM_FRAMES_PER_MINUTE_EXCEEDED, + HTTP2_STAT_MAX_CONTINUATION_FRAMES_PER_MINUTE_EXCEEDED, HTTP2_STAT_INSUFFICIENT_AVG_WINDOW_UPDATE, HTTP2_STAT_MAX_CONCURRENT_STREAMS_EXCEEDED_IN, HTTP2_STAT_MAX_CONCURRENT_STREAMS_EXCEEDED_OUT, @@ -404,6 +405,7 @@ static uint32_t max_ping_frames_per_minute; static uint32_t max_priority_frames_per_minute; static uint32_t max_rst_stream_frames_per_minute; + static uint32_t max_continuation_frames_per_minute; static float min_avg_window_update; static uint32_t con_slow_log_threshold; static uint32_t stream_slow_log_threshold; diff -Nru trafficserver-9.2.3+ds/proxy/http2/Http2ConnectionState.cc trafficserver-9.2.4+ds/proxy/http2/Http2ConnectionState.cc --- trafficserver-9.2.3+ds/proxy/http2/Http2ConnectionState.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/proxy/http2/Http2ConnectionState.cc 2024-04-03 15:38:30.000000000 +0000 @@ -521,6 +521,13 @@ "reset access stream with invalid id"); } + // A RST_STREAM frame with a length other than 4 octets MUST be treated + // as a connection error (Section 5.4.1) of type FRAME_SIZE_ERROR. + if (frame.header().length != HTTP2_RST_STREAM_LEN) { + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_FRAME_SIZE_ERROR, + "reset frame wrong length"); + } + Http2Stream *stream = cstate.find_stream(stream_id); if (stream == nullptr) { if (cstate.is_valid_streamid(stream_id)) { @@ -531,13 +538,6 @@ } } - // A RST_STREAM frame with a length other than 4 octets MUST be treated - // as a connection error (Section 5.4.1) of type FRAME_SIZE_ERROR. - if (frame.header().length != HTTP2_RST_STREAM_LEN) { - return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_FRAME_SIZE_ERROR, - "reset frame wrong length"); - } - // Update RST_STREAM frame count per minute cstate.increment_received_rst_stream_frame_count(); // Close this connection if its RST_STREAM frame count exceeds a limit @@ -545,7 +545,7 @@ cstate.get_received_rst_stream_frame_count() > cstate.configured_max_rst_stream_frames_per_minute) { HTTP2_INCREMENT_THREAD_DYN_STAT(HTTP2_STAT_MAX_RST_STREAM_FRAMES_PER_MINUTE_EXCEEDED, this_ethread()); Http2StreamDebug(cstate.session, stream_id, "Observed too frequent RST_STREAM frames: %u frames within a last minute", - cstate.get_received_settings_frame_count()); + cstate.get_received_rst_stream_frame_count()); return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_ENHANCE_YOUR_CALM, "reset too frequent RST_STREAM frames"); } @@ -753,7 +753,7 @@ { Http2Goaway goaway; char buf[HTTP2_GOAWAY_LEN]; - unsigned nbytes = 0; + char *end; const Http2StreamId stream_id = frame.header().streamid; Http2StreamDebug(cstate.session, stream_id, "Received GOAWAY frame"); @@ -765,13 +765,11 @@ "goaway id non-zero"); } - while (nbytes < frame.header().length) { - unsigned read_bytes = read_rcv_buffer(buf, sizeof(buf), nbytes, frame); + end = frame.reader()->memcpy(buf, sizeof(buf), 0); - if (!http2_parse_goaway(make_iovec(buf, read_bytes), goaway)) { - return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, - "goaway failed parse"); - } + if (!http2_parse_goaway(make_iovec(buf, end - buf), goaway)) { + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_PROTOCOL_ERROR, + "goaway failed parse"); } Http2StreamDebug(cstate.session, stream_id, "GOAWAY: last stream id=%d, error code=%d", goaway.last_streamid, @@ -926,6 +924,18 @@ } } + // Update CONTINUATION frame count per minute. + cstate.increment_received_continuation_frame_count(); + // Close this connection if its CONTINUATION frame count exceeds a limit. + if (cstate.configured_max_continuation_frames_per_minute != 0 && + cstate.get_received_continuation_frame_count() > cstate.configured_max_continuation_frames_per_minute) { + HTTP2_INCREMENT_THREAD_DYN_STAT(HTTP2_STAT_MAX_CONTINUATION_FRAMES_PER_MINUTE_EXCEEDED, this_ethread()); + Http2StreamDebug(cstate.session, stream_id, "Observed too frequent CONTINUATION frames: %u frames within a last minute", + cstate.get_received_continuation_frame_count()); + return Http2Error(Http2ErrorClass::HTTP2_ERROR_CLASS_CONNECTION, Http2ErrorCode::HTTP2_ERROR_ENHANCE_YOUR_CALM, + "reset too frequent CONTINUATION frames"); + } + uint32_t header_blocks_offset = stream->header_blocks_length; stream->header_blocks_length += payload_length; @@ -1090,10 +1100,11 @@ dependency_tree = new DependencyTree(Http2::max_concurrent_streams_in); } - configured_max_settings_frames_per_minute = Http2::max_settings_frames_per_minute; - configured_max_ping_frames_per_minute = Http2::max_ping_frames_per_minute; - configured_max_priority_frames_per_minute = Http2::max_priority_frames_per_minute; - configured_max_rst_stream_frames_per_minute = Http2::max_rst_stream_frames_per_minute; + configured_max_settings_frames_per_minute = Http2::max_settings_frames_per_minute; + configured_max_ping_frames_per_minute = Http2::max_ping_frames_per_minute; + configured_max_priority_frames_per_minute = Http2::max_priority_frames_per_minute; + configured_max_rst_stream_frames_per_minute = Http2::max_rst_stream_frames_per_minute; + configured_max_continuation_frames_per_minute = Http2::max_continuation_frames_per_minute; if (auto snis = dynamic_cast(session->get_netvc()); snis) { if (snis->hints_from_sni.http2_max_settings_frames_per_minute.has_value()) { configured_max_settings_frames_per_minute = snis->hints_from_sni.http2_max_settings_frames_per_minute.value(); @@ -1107,6 +1118,9 @@ if (snis->hints_from_sni.http2_max_rst_stream_frames_per_minute.has_value()) { configured_max_rst_stream_frames_per_minute = snis->hints_from_sni.http2_max_rst_stream_frames_per_minute.value(); } + if (snis->hints_from_sni.http2_max_continuation_frames_per_minute.has_value()) { + configured_max_continuation_frames_per_minute = snis->hints_from_sni.http2_max_continuation_frames_per_minute.value(); + } } _cop = ActivityCop(this->mutex, &stream_list, 1); @@ -2142,6 +2156,18 @@ return this->_received_rst_stream_frame_counter.get_count(); } +void +Http2ConnectionState::increment_received_continuation_frame_count() +{ + this->_received_continuation_frame_counter.increment(); +} + +uint32_t +Http2ConnectionState::get_received_continuation_frame_count() +{ + return this->_received_continuation_frame_counter.get_count(); +} + // Return min_concurrent_streams_in when current client streams number is larger than max_active_streams_in. // Main purpose of this is preventing DDoS Attacks. unsigned diff -Nru trafficserver-9.2.3+ds/proxy/http2/Http2ConnectionState.h trafficserver-9.2.4+ds/proxy/http2/Http2ConnectionState.h --- trafficserver-9.2.3+ds/proxy/http2/Http2ConnectionState.h 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/proxy/http2/Http2ConnectionState.h 2024-04-03 15:38:30.000000000 +0000 @@ -102,10 +102,11 @@ Http2ConnectionSettings server_settings; Http2ConnectionSettings client_settings; - uint32_t configured_max_settings_frames_per_minute = 0; - uint32_t configured_max_ping_frames_per_minute = 0; - uint32_t configured_max_priority_frames_per_minute = 0; - uint32_t configured_max_rst_stream_frames_per_minute = 0; + uint32_t configured_max_settings_frames_per_minute = 0; + uint32_t configured_max_ping_frames_per_minute = 0; + uint32_t configured_max_priority_frames_per_minute = 0; + uint32_t configured_max_rst_stream_frames_per_minute = 0; + uint32_t configured_max_continuation_frames_per_minute = 0; void init(Http2CommonSession *ssn); void send_connection_preface(); @@ -174,6 +175,8 @@ uint32_t get_received_priority_frame_count(); void increment_received_rst_stream_frame_count(); uint32_t get_received_rst_stream_frame_count(); + void increment_received_continuation_frame_count(); + uint32_t get_received_continuation_frame_count(); ssize_t client_rwnd() const; Http2ErrorCode increment_client_rwnd(size_t amount); @@ -220,6 +223,7 @@ Http2FrequencyCounter _received_ping_frame_counter; Http2FrequencyCounter _received_priority_frame_counter; Http2FrequencyCounter _received_rst_stream_frame_counter; + Http2FrequencyCounter _received_continuation_frame_counter; // NOTE: Id of stream which MUST receive CONTINUATION frame. // - [RFC 7540] 6.2 HEADERS diff -Nru trafficserver-9.2.3+ds/proxy/http2/unit_tests/test_HpackIndexingTable.cc trafficserver-9.2.4+ds/proxy/http2/unit_tests/test_HpackIndexingTable.cc --- trafficserver-9.2.3+ds/proxy/http2/unit_tests/test_HpackIndexingTable.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/proxy/http2/unit_tests/test_HpackIndexingTable.cc 2024-04-03 15:38:30.000000000 +0000 @@ -36,6 +36,20 @@ static constexpr int MAX_REQUEST_HEADER_SIZE = 131072; static constexpr int MAX_TABLE_SIZE = 4096; +namespace +{ +/** + When HTTHdr::create is called, HTTPHdr::destroy needs to be called to free HdrHeap. + When Issue #10541 is fixed, we don't need this helper. +*/ +void +destroy_http_hdr(HTTPHdr *hdr) +{ + hdr->destroy(); + delete hdr; +} +} // namespace + TEST_CASE("HPACK low level APIs", "[hpack]") { SECTION("indexed_header_field") @@ -71,7 +85,7 @@ HpackIndexingTable indexing_table(4096); for (const auto &i : indexed_test_case) { - std::unique_ptr headers(new HTTPHdr); + std::unique_ptr headers(new HTTPHdr, destroy_http_hdr); headers->create(HTTP_TYPE_REQUEST); MIMEField *field = mime_field_create(headers->m_heap, headers->m_http->m_fields_impl); MIMEFieldWrapper header(field, headers->m_heap, headers->m_http->m_fields_impl); @@ -224,7 +238,7 @@ HpackIndexingTable indexing_table(4096); for (const auto &i : literal_test_case) { - std::unique_ptr headers(new HTTPHdr); + std::unique_ptr headers(new HTTPHdr, destroy_http_hdr); headers->create(HTTP_TYPE_REQUEST); MIMEField *field = mime_field_create(headers->m_heap, headers->m_http->m_fields_impl); MIMEFieldWrapper header(field, headers->m_heap, headers->m_http->m_fields_impl); @@ -349,7 +363,7 @@ indexing_table.update_maximum_size(DYNAMIC_TABLE_SIZE_FOR_REGRESSION_TEST); for (unsigned int i = 0; i < sizeof(encoded_field_response_test_case) / sizeof(encoded_field_response_test_case[0]); i++) { - std::unique_ptr headers(new HTTPHdr); + std::unique_ptr headers(new HTTPHdr, destroy_http_hdr); headers->create(HTTP_TYPE_RESPONSE); for (unsigned int j = 0; j < sizeof(raw_field_response_test_case[i]) / sizeof(raw_field_response_test_case[i][0]); j++) { @@ -455,7 +469,7 @@ HpackIndexingTable indexing_table(4096); for (unsigned int i = 0; i < sizeof(encoded_field_request_test_case) / sizeof(encoded_field_request_test_case[0]); i++) { - std::unique_ptr headers(new HTTPHdr); + std::unique_ptr headers(new HTTPHdr, destroy_http_hdr); headers->create(HTTP_TYPE_REQUEST); hpack_decode_header_block(indexing_table, headers.get(), encoded_field_request_test_case[i].encoded_field, @@ -488,7 +502,7 @@ // add entries in dynamic table { - std::unique_ptr headers(new HTTPHdr); + std::unique_ptr headers(new HTTPHdr, destroy_http_hdr); headers->create(HTTP_TYPE_REQUEST); // C.3.1. First Request @@ -504,7 +518,7 @@ // clear all entries by setting a maximum size of 0 { - std::unique_ptr headers(new HTTPHdr); + std::unique_ptr headers(new HTTPHdr, destroy_http_hdr); headers->create(HTTP_TYPE_REQUEST); uint8_t data[] = {0x20}; @@ -518,7 +532,7 @@ // make the maximum size back to 4096 { - std::unique_ptr headers(new HTTPHdr); + std::unique_ptr headers(new HTTPHdr, destroy_http_hdr); headers->create(HTTP_TYPE_REQUEST); uint8_t data[] = {0x3f, 0xe1, 0x1f}; @@ -532,7 +546,7 @@ // error with exceeding the limit (MAX_TABLE_SIZE) { - std::unique_ptr headers(new HTTPHdr); + std::unique_ptr headers(new HTTPHdr, destroy_http_hdr); headers->create(HTTP_TYPE_REQUEST); uint8_t data[] = {0x3f, 0xe2, 0x1f}; diff -Nru trafficserver-9.2.3+ds/src/traffic_server/traffic_server.cc trafficserver-9.2.4+ds/src/traffic_server/traffic_server.cc --- trafficserver-9.2.3+ds/src/traffic_server/traffic_server.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/src/traffic_server/traffic_server.cc 2024-04-03 15:38:30.000000000 +0000 @@ -197,7 +197,6 @@ {"net_threads", 'n', "Number of Net Threads", "I", &num_of_net_threads, "PROXY_NET_THREADS", nullptr}, {"udp_threads", 'U', "Number of UDP Threads", "I", &num_of_udp_threads, "PROXY_UDP_THREADS", nullptr}, {"accept_thread", 'a', "Use an Accept Thread", "T", &num_accept_threads, "PROXY_ACCEPT_THREAD", nullptr}, - {"accept_till_done", 'b', "Accept Till Done", "T", &accept_till_done, "PROXY_ACCEPT_TILL_DONE", nullptr}, {"httpport", 'p', "Port descriptor for HTTP Accept", "S*", &http_accept_port_descriptor, "PROXY_HTTP_ACCEPT_PORT", nullptr}, {"disable_freelist", 'f', "Disable the freelist memory allocator", "T", &cmd_disable_freelist, "PROXY_DPRINTF_LEVEL", nullptr}, {"disable_pfreelist", 'F', "Disable the freelist memory allocator in ProxyAllocator", "T", &cmd_disable_pfreelist, @@ -977,6 +976,11 @@ /** Attempt to load a plugin shared object file. * + * Note that this function is only used to load plugins for the purpose of + * verifying that they are valid plugins. It is not used to load plugins for + * normal operation. Any loaded plugin will be closed immediately after loading + * it. + * * @param[in] plugin_type The type of plugin for which to create a PluginInfo. * @param[in] plugin_path The path to the plugin's shared object file. * @param[out] error Some description of why the plugin failed to load if @@ -985,12 +989,18 @@ * @return True if the plugin loaded successfully, false otherwise. */ static bool -load_plugin(plugin_type_t plugin_type, const fs::path &plugin_path, std::string &error) +try_loading_plugin(plugin_type_t plugin_type, const fs::path &plugin_path, std::string &error) { switch (plugin_type) { case plugin_type_t::GLOBAL: { - void *handle, *initptr; - return plugin_dso_load(plugin_path.c_str(), handle, initptr, error); + void *handle = nullptr; + void *initptr = nullptr; + bool const plugin_loaded = plugin_dso_load(plugin_path.c_str(), handle, initptr, error); + if (handle != nullptr) { + dlclose(handle); + handle = nullptr; + } + return plugin_loaded; } case plugin_type_t::REMAP: { auto temporary_directory = fs::temp_directory_path(); @@ -1044,7 +1054,7 @@ auto ret = CMD_OK; std::string error; - if (load_plugin(plugin_type, plugin_path, error)) { + if (try_loading_plugin(plugin_type, plugin_path, error)) { fprintf(stderr, "NOTE: verifying plugin '%s' Success\n", plugin_filename); } else { fprintf(stderr, "ERROR: verifying plugin '%s' Fail: %s\n", plugin_filename, error.c_str()); diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/autest-site/cli_tools.test.ext trafficserver-9.2.4+ds/tests/gold_tests/autest-site/cli_tools.test.ext --- trafficserver-9.2.3+ds/tests/gold_tests/autest-site/cli_tools.test.ext 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/autest-site/cli_tools.test.ext 2024-04-03 15:38:30.000000000 +0000 @@ -17,7 +17,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - # allows the same command to be repeatedly run in parallel. # # example usage: @@ -31,25 +30,18 @@ # # Note that by default, the Default Process is created in this function. + def spawn_commands(self, cmdstr, count, retcode=0, use_default=True): ret = [] if use_default: count = int(count) - 1 for cnt in range(0, count): - ret.append( - self.Processes.Process( - name="cmdline-{num}".format(num=cnt), - cmdstr=cmdstr, - returncode=retcode - ) - ) + ret.append(self.Processes.Process(name="cmdline-{num}".format(num=cnt), cmdstr=cmdstr, returncode=retcode)) if use_default: self.Processes.Default.Command = cmdstr self.Processes.Default.ReturnCode = retcode - self.Processes.Default.StartBefore( - *ret - ) + self.Processes.Default.StartBefore(*ret) return ret diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/autest-site/conditions.test.ext trafficserver-9.2.4+ds/tests/gold_tests/autest-site/conditions.test.ext --- trafficserver-9.2.3+ds/tests/gold_tests/autest-site/conditions.test.ext 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/autest-site/conditions.test.ext 2024-04-03 15:38:30.000000000 +0000 @@ -22,44 +22,33 @@ def HasOpenSSLVersion(self, version): - output = subprocess.check_output( - os.path.join(self.Variables.BINDIR, "traffic_layout") + " info --versions --json", shell=True - ) + output = subprocess.check_output(os.path.join(self.Variables.BINDIR, "traffic_layout") + " info --versions --json", shell=True) json_data = output.decode('utf-8') openssl_str = json.loads(json_data)['openssl_str'] exe_ver = re.search(r'\d\.\d\.\d', openssl_str).group(0) if exe_ver == '': raise ValueError("Error determining version of OpenSSL library needed by traffic_server executable") - return self.Condition( - lambda: exe_ver >= version, - "OpenSSL library version is " + exe_ver + ", must be at least " + version - ) + return self.Condition(lambda: exe_ver >= version, "OpenSSL library version is " + exe_ver + ", must be at least " + version) def IsBoringSSL(self): - output = subprocess.check_output( - os.path.join(self.Variables.BINDIR, "traffic_layout") + " info --versions --json", shell=True - ) + output = subprocess.check_output(os.path.join(self.Variables.BINDIR, "traffic_layout") + " info --versions --json", shell=True) json_data = output.decode('utf-8') openssl_str = json.loads(json_data)['openssl_str'] return self.Condition( # OpenSSL 1.1.1 (compatible; BoringSSL) lambda: "compatible; BoringSSL" in openssl_str, - "SSL library is not BoringSSL" - ) + "SSL library is not BoringSSL") def IsOpenSSL(self): - output = subprocess.check_output( - os.path.join(self.Variables.BINDIR, "traffic_layout") + " info --versions --json", shell=True - ) + output = subprocess.check_output(os.path.join(self.Variables.BINDIR, "traffic_layout") + " info --versions --json", shell=True) json_data = output.decode('utf-8') openssl_str = json.loads(json_data)['openssl_str'] return self.Condition( # OpenSSL 1.1.1k 25 Mar 2021 lambda: "OpenSSL" in openssl_str and "compatible; BoringSSL" not in openssl_str, - "SSL library is not OpenSSL" - ) + "SSL library is not OpenSSL") def HasCurlVersion(self, version): @@ -87,14 +76,11 @@ return True return False - return self.CheckOutput( - ['curl', '--version'], - default, - "Curl needs to support feature: {feature}".format(feature=feature) - ) + return self.CheckOutput(['curl', '--version'], default, "Curl needs to support feature: {feature}".format(feature=feature)) def HasCurlOption(self, option): + def default(output): tag = option.lower() for line in output.splitlines(): @@ -105,21 +91,15 @@ return True return False - return self.CheckOutput( - ['curl', '--help', 'all'], - default, - "Curl needs to support option: {option}".format(option=option) - ) + return self.CheckOutput(['curl', '--help', 'all'], default, "Curl needs to support option: {option}".format(option=option)) def HasATSFeature(self, feature): val = self.Variables.get(feature, None) - return self.Condition( - lambda: val, - "ATS feature not enabled: {feature}".format(feature=feature) - ) + return self.Condition(lambda: val, "ATS feature not enabled: {feature}".format(feature=feature)) + # test if a plugin exists in the libexec folder diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/autest-site/curl_header.test.ext trafficserver-9.2.4+ds/tests/gold_tests/autest-site/curl_header.test.ext --- trafficserver-9.2.3+ds/tests/gold_tests/autest-site/curl_header.test.ext 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/autest-site/curl_header.test.ext 2024-04-03 15:38:30.000000000 +0000 @@ -24,20 +24,13 @@ class CurlHeader(Tester): - def __init__(self, - value, - test_value=None, - kill_on_failure=False, - description_group=None, - description=None): + + def __init__(self, value, test_value=None, kill_on_failure=False, description_group=None, description=None): self._stack = host.getCurrentStack(1) if not is_a.Dictionary(value): - host.WriteError( - "CurlHeader Input: Need to provide a dictionary.", - stack=host.getCurrentStack(1) - ) + host.WriteError("CurlHeader Input: Need to provide a dictionary.", stack=host.getCurrentStack(1)) ops = ("equal", "equal_re") gold_dict = value @@ -52,8 +45,7 @@ if not isinstance(header, str): host.WriteError( 'CurlHeader Input: Unsupported type for header {}. Header needs to be a string.'.format(header), - stack=self._stack - ) + stack=self._stack) if target is None or isinstance(target, str): continue @@ -61,24 +53,28 @@ for op, pos_val in target.items(): if op not in ops: host.WriteError( - 'CurlHeader Input: Unsupported operation \'{}\' for value at header \'{}\'. The available operations are: {}.'.format( - op, header, ', '.join(ops)), stack=self._stack) + 'CurlHeader Input: Unsupported operation \'{}\' for value at header \'{}\'. The available operations are: {}.' + .format(op, header, ', '.join(ops)), + stack=self._stack) elif pos_val is None or isinstance(pos_val, str): continue elif isinstance(pos_val, list): for str_ in pos_val: if not isinstance(str_, str) and str_ is not None: host.WriteError( - 'CurlHeader Input: Value {} has unsupported type \'{}\' for header \'{}\'. Need to provide a string or None.'.format( - str_, str_.__class__.__name__, header), stack=self._stack) + 'CurlHeader Input: Value {} has unsupported type \'{}\' for header \'{}\'. Need to provide a string or None.' + .format(str_, str_.__class__.__name__, header), + stack=self._stack) else: host.WriteError( - 'CurlHeader Input: Value {} has unsupported type \'{}\' for header \'{}\'. Need to provide a string, a list or None for possible curl values.'.format( - pos_val, pos_val.__class__.__name__, header), stack=self._stack) + 'CurlHeader Input: Value {} has unsupported type \'{}\' for header \'{}\'. Need to provide a string, a list or None for possible curl values.' + .format(pos_val, pos_val.__class__.__name__, header), + stack=self._stack) else: host.WriteError( - 'CurlHeader Input: Value {} has unsupported type \'{}\' for header \'{}\'. Need to provide either a string, a dictionary or None.'.format( - target, target.__class__.__name__, header), stack=self._stack) + 'CurlHeader Input: Value {} has unsupported type \'{}\' for header \'{}\'. Need to provide either a string, a dictionary or None.' + .format(target, target.__class__.__name__, header), + stack=self._stack) super(CurlHeader, self).__init__( value=value, @@ -146,9 +142,7 @@ if self.KillOnFailure: raise KillOnFailureError host.WriteVerbose( - ["testers.CurlHeader", "testers"], - "{0} - ".format(tester.ResultType.to_color_string(self.Result)), - self.Reason) + ["testers.CurlHeader", "testers"], "{0} - ".format(tester.ResultType.to_color_string(self.Result)), self.Reason) # Optional operations to do: # equal: complete string match @@ -200,8 +194,7 @@ return None ret = '' - ops = {'equal': 'Any of the following strings: ', - 'equal_re': 'Any of the following regular expression: '} + ops = {'equal': 'Any of the following strings: ', 'equal_re': 'Any of the following regular expression: '} for op, pos_val in target.items(): ret += ' {}: \'{}\'\n'.format(ops[op], '\', \''.join(pos_val)) if isinstance(pos_val, list) \ diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/autest-site/httpbin.test.ext trafficserver-9.2.4+ds/tests/gold_tests/autest-site/httpbin.test.ext --- trafficserver-9.2.3+ds/tests/gold_tests/autest-site/httpbin.test.ext 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/autest-site/httpbin.test.ext 2024-04-03 15:38:30.000000000 +0000 @@ -19,8 +19,7 @@ from ports import get_port -def MakeHttpBinServer(self, name, ip='127.0.0.1', port=None, - options={}) -> 'Process': +def MakeHttpBinServer(self, name, ip='127.0.0.1', port=None, options={}) -> 'Process': data_dir = os.path.join(self.RunDirectory, name) # create Process p = self.Processes.Process(name) @@ -28,10 +27,7 @@ port = get_port(p, "Port") self._RootRunable.SkipUnless( - Condition.HasProgram( - "go-httpbin", - "go-httpbin needs be installed and in PATH for this extension to run") - ) + Condition.HasProgram("go-httpbin", "go-httpbin needs be installed and in PATH for this extension to run")) command = f"go-httpbin -host {ip} -port {port} " for flag, value in options.items(): diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/autest-site/init.cli.ext trafficserver-9.2.4+ds/tests/gold_tests/autest-site/init.cli.ext --- trafficserver-9.2.3+ds/tests/gold_tests/autest-site/init.cli.ext 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/autest-site/init.cli.ext 2024-04-03 15:38:30.000000000 +0000 @@ -20,8 +20,7 @@ import microserver if sys.version_info < (3, 6, 0): - host.WriteError( - "You need python 3.6 or later to run these tests\n", show_stack=False) + host.WriteError("You need python 3.6 or later to run these tests\n", show_stack=False) needed_autest_version = "1.10.4" found_autest_version = AuTestVersion() @@ -31,7 +30,6 @@ "Please update AuTest:\n pipenv --rm && pipenv install\n", show_stack=False) - needed_microserver_version = "1.0.6" found_microserver_version = microserver.__version__ if found_microserver_version < needed_microserver_version: @@ -40,14 +38,8 @@ "Please update MicroServer:\n pipenv --rm && pipenv install\n", show_stack=False) -Settings.path_argument(["--ats-bin"], - required=True, - help="A user provided directory to ATS bin") - -Settings.path_argument(["--build-root"], - required=False, - help="The location of the build root for out of source builds") - -Settings.path_argument(["--proxy-verifier-bin"], - required=False, - help="A location for system proxy-verifier binaries to test with.") +Settings.path_argument(["--ats-bin"], required=True, help="A user provided directory to ATS bin") + +Settings.path_argument(["--build-root"], required=False, help="The location of the build root for out of source builds") + +Settings.path_argument(["--proxy-verifier-bin"], required=False, help="A location for system proxy-verifier binaries to test with.") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/autest-site/ip.test.ext trafficserver-9.2.4+ds/tests/gold_tests/autest-site/ip.test.ext --- trafficserver-9.2.3+ds/tests/gold_tests/autest-site/ip.test.ext 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/autest-site/ip.test.ext 2024-04-03 15:38:30.000000000 +0000 @@ -17,7 +17,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - from ports import get_port # this forms is for the global process define diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/autest-site/microDNS.test.ext trafficserver-9.2.4+ds/tests/gold_tests/autest-site/microDNS.test.ext --- trafficserver-9.2.3+ds/tests/gold_tests/autest-site/microDNS.test.ext 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/autest-site/microDNS.test.ext 2024-04-03 15:38:30.000000000 +0000 @@ -30,6 +30,7 @@ record[hostname] = list_ip_addr return record + # dict in format {'domain': [IPs]} # json file in the same mappings/otherwise format that uDNS takes diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/autest-site/microserver.test.ext trafficserver-9.2.4+ds/tests/gold_tests/autest-site/microserver.test.ext --- trafficserver-9.2.3+ds/tests/gold_tests/autest-site/microserver.test.ext 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/autest-site/microserver.test.ext 2024-04-03 15:38:30.000000000 +0000 @@ -33,6 +33,7 @@ def addMethod(self, testName, request_header, functionName): return + # creates the full request or response block using headers and message data @@ -60,13 +61,11 @@ # addResponse adds customized response with respect to request_header. request_header and response_header are both dictionaries def addResponse(self, filename, request_header, response_header): client_request = Request.fromRequestLine( - request_header["headers"], - "" if "body" not in request_header else request_header["body"], + request_header["headers"], "" if "body" not in request_header else request_header["body"], None if "options" not in request_header else request_header["options"]) server_response = Response.fromRequestLine( - response_header["headers"], - "" if "body" not in response_header else response_header["body"], + response_header["headers"], "" if "body" not in response_header else response_header["body"], None if "options" not in response_header else response_header["options"]) # timestamp field is left None because that needs to be revised for better implementation @@ -135,16 +134,10 @@ try: sock.connect((serverHost, port)) except ConnectionRefusedError: - host.WriteDebug( - ['uServerUpAndRunning', 'when'], - "Connection refused: {0}:{1}".format( - serverHost, port)) + host.WriteDebug(['uServerUpAndRunning', 'when'], "Connection refused: {0}:{1}".format(serverHost, port)) return False except ssl.SSLError as e: - host.WriteDebug( - ['uServerUpAndRunning', 'when'], - "SSL connection error: {0}:{1}:{2}".format( - serverHost, port, e)) + host.WriteDebug(['uServerUpAndRunning', 'when'], "SSL connection error: {0}:{1}:{2}".format(serverHost, port, e)) return False sock.sendall(request.encode()) @@ -164,11 +157,7 @@ if decoded_output == expected_response: return True - host.WriteError('\n'.join([ - 'Got invalid response from microserver:', - '----', - decoded_output, - '----'])) + host.WriteError('\n'.join(['Got invalid response from microserver:', '----', decoded_output, '----'])) AddWhenFunction(uServerUpAndRunning) @@ -243,12 +232,15 @@ } # Set up health check. - addResponse(p, "healthcheck.json", healthcheck_request, { - "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", - "body": "imok", - "options": {"skipHooks": None} - }) + addResponse( + p, "healthcheck.json", healthcheck_request, { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "imok", + "options": { + "skipHooks": None + } + }) p.Ready = When.uServerUpAndRunning( ipaddr, diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/autest-site/ordered_set_queue.py trafficserver-9.2.4+ds/tests/gold_tests/autest-site/ordered_set_queue.py --- trafficserver-9.2.3+ds/tests/gold_tests/autest-site/ordered_set_queue.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/autest-site/ordered_set_queue.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,7 +17,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - import collections try: collectionsAbc = collections.abc @@ -29,18 +28,18 @@ except ImportError: import Queue - # # This is borrowed from the following (MIT licensed) recipe: # https://code.activestate.com/recipes/576694/ # + class OrderedSet(collectionsAbc.MutableSet): def __init__(self, iterable=None): self.end = end = [] - end += [None, end, end] # sentinel node for doubly linked list - self.map = {} # key --> [key, prev, next] + end += [None, end, end] # sentinel node for doubly linked list + self.map = {} # key --> [key, prev, next] if iterable is not None: self |= iterable diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/autest-site/ports.py trafficserver-9.2.4+ds/tests/gold_tests/autest-site/ports.py --- trafficserver-9.2.3+ds/tests/gold_tests/autest-site/ports.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/autest-site/ports.py 2024-04-03 15:38:30.000000000 +0000 @@ -27,7 +27,6 @@ from ordered_set_queue import OrderedSetQueue - g_ports = None # ports we can use @@ -61,9 +60,7 @@ address = "localhost" if port in listening_ports: - host.WriteDebug( - 'PortOpen', - f"{port} is open because it is in the listening sockets set.") + host.WriteDebug('PortOpen', f"{port} is open because it is in the listening sockets set.") return True address = (address, port) @@ -75,18 +72,15 @@ s.close() ret = True host.WriteDebug( - 'PortOpen', - f"Connection to port {port} succeeded, the port is open, " + 'PortOpen', f"Connection to port {port} succeeded, the port is open, " "and a future connection cannot use it") except socket.error: host.WriteDebug( - 'PortOpen', - f"socket error for port {port}, port is closed, " + 'PortOpen', f"socket error for port {port}, port is closed, " "and therefore a future connection can use it") except socket.timeout: host.WriteDebug( - 'PortOpen', - f"Timeout error for port {port}, port is closed, " + 'PortOpen', f"Timeout error for port {port}, port is closed, " "and therefore a future connection can use it") return ret @@ -111,19 +105,15 @@ if queue.qsize() == 0: host.WriteWarning("Port queue is empty.") - raise PortQueueSelectionError( - "Could not get a valid port because the queue is empty") + raise PortQueueSelectionError("Could not get a valid port because the queue is empty") listening_ports = _get_listening_ports() port = queue.get() while PortOpen(port, listening_ports=listening_ports): - host.WriteDebug( - '_get_available_port', - f"Port was closed but now is used: {port}") + host.WriteDebug('_get_available_port', f"Port was closed but now is used: {port}") if queue.qsize() == 0: host.WriteWarning("Port queue is empty.") - raise PortQueueSelectionError( - "Could not get a valid port because the queue is empty") + raise PortQueueSelectionError("Could not get a valid port because the queue is empty") port = queue.get() return port @@ -158,15 +148,11 @@ """ global g_ports if g_ports is None: - host.WriteDebug( - '_setup_port_queue', - "Populating the port queue.") + host.WriteDebug('_setup_port_queue', "Populating the port queue.") g_ports = OrderedSetQueue() else: # The queue has already been populated. - host.WriteDebug( - '_setup_port_queue', - f"Queue was previously populated. Queue size: {g_ports.qsize()}") + host.WriteDebug('_setup_port_queue', f"Queue was previously populated. Queue size: {g_ports.qsize()}") return try: # Use sysctl to find the range of ports that the OS publishes it uses. @@ -174,19 +160,11 @@ new_env = os.environ.copy() new_env['PATH'] = "/sbin:/usr/sbin:" + new_env['PATH'] if 'Darwin' == platform.system(): - dmin = subprocess.check_output( - ["sysctl", "net.inet.ip.portrange.first"], - env=new_env - ).decode().split(":")[1].split()[0] - dmax = subprocess.check_output( - ["sysctl", "net.inet.ip.portrange.last"], - env=new_env - ).decode().split(":")[1].split()[0] + dmin = subprocess.check_output(["sysctl", "net.inet.ip.portrange.first"], env=new_env).decode().split(":")[1].split()[0] + dmax = subprocess.check_output(["sysctl", "net.inet.ip.portrange.last"], env=new_env).decode().split(":")[1].split()[0] else: - dmin, dmax = subprocess.check_output( - ["sysctl", "net.ipv4.ip_local_port_range"], - env=new_env - ).decode().split("=")[1].split() + dmin, dmax = subprocess.check_output(["sysctl", "net.ipv4.ip_local_port_range"], + env=new_env).decode().split("=")[1].split() dmin = int(dmin) dmax = int(dmax) except Exception: @@ -202,13 +180,9 @@ port = dmax + 1 while port < 65536 and g_ports.qsize() < amount: if PortOpen(port, listening_ports=listening_ports): - host.WriteDebug( - '_setup_port_queue', - f"Rejecting an already open port: {port}") + host.WriteDebug('_setup_port_queue', f"Rejecting an already open port: {port}") else: - host.WriteDebug( - '_setup_port_queue', - f"Adding a possible port to connect to: {port}") + host.WriteDebug('_setup_port_queue', f"Adding a possible port to connect to: {port}") g_ports.put(port) port += 1 if rmin > amount and g_ports.qsize() < amount: @@ -217,13 +191,9 @@ # and going up until the minimum port range used by the OS. while port < dmin and g_ports.qsize() < amount: if PortOpen(port, listening_ports=listening_ports): - host.WriteDebug( - '_setup_port_queue', - f"Rejecting an already open port: {port}") + host.WriteDebug('_setup_port_queue', f"Rejecting an already open port: {port}") else: - host.WriteDebug( - '_setup_port_queue', - f"Adding a possible port to connect to: {port}") + host.WriteDebug('_setup_port_queue', f"Adding a possible port to connect to: {port}") g_ports.put(port) port += 1 @@ -263,23 +233,17 @@ if g_ports.qsize() > 0: try: port = _get_available_port(g_ports) - host.WriteVerbose( - "get_port", - f"Using port from port queue: {port}") + host.WriteVerbose("get_port", f"Using port from port queue: {port}") # setup clean up step to recycle the port - obj.Setup.Lambda(func_cleanup=lambda: g_ports.put( - port), description=f"recycling port: {port}, queue size: {g_ports.qsize()}") + obj.Setup.Lambda( + func_cleanup=lambda: g_ports.put(port), description=f"recycling port: {port}, queue size: {g_ports.qsize()}") except PortQueueSelectionError: port = _get_port_by_bind() - host.WriteVerbose( - "get_port", - f"Queue was drained. Using port from a bound socket: {port}") + host.WriteVerbose("get_port", f"Queue was drained. Using port from a bound socket: {port}") else: # Since the queue could not be populated, use a port via bind. port = _get_port_by_bind() - host.WriteVerbose( - "get_port", - f"Queue is empty. Using port from a bound socket: {port}") + host.WriteVerbose("get_port", f"Queue is empty. Using port from a bound socket: {port}") # Assign to the named variable. obj.Variables[name] = port diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/autest-site/setup.cli.ext trafficserver-9.2.4+ds/tests/gold_tests/autest-site/setup.cli.ext --- trafficserver-9.2.3+ds/tests/gold_tests/autest-site/setup.cli.ext 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/autest-site/setup.cli.ext 2024-04-03 15:38:30.000000000 +0000 @@ -45,29 +45,20 @@ if Arguments.proxy_verifier_bin is not None: ENV['VERIFIER_BIN'] = Arguments.proxy_verifier_bin - host.WriteVerbose( - ['ats'], - "Expecting Proxy Verifier to be in user-supplied bin path: ", - ENV['VERIFIER_BIN']) + host.WriteVerbose(['ats'], "Expecting Proxy Verifier to be in user-supplied bin path: ", ENV['VERIFIER_BIN']) else: # No Verifier bin path was specified. First see if a Proxy Verifier was # unpacked as a part of preparing for this test. unpack_bin = os.path.join(test_root, 'proxy-verifier', 'unpack', proxy_verifer_version, 'bin') if os.path.exists(os.path.join(unpack_bin, 'verifier-client')): ENV['VERIFIER_BIN'] = unpack_bin - host.WriteVerbose( - ['ats'], - "Using locally unpacked Proxy Verifier: ", - ENV['VERIFIER_BIN']) + host.WriteVerbose(['ats'], "Using locally unpacked Proxy Verifier: ", ENV['VERIFIER_BIN']) else: # Finally check the PATH. path_search = shutil.which('verifier-client') if path_search is not None: ENV['VERIFIER_BIN'] = dirname(path_search) - host.WriteVerbose( - ['ats'], - "Using Proxy Verifier found in PATH: ", - ENV['VERIFIER_BIN']) + host.WriteVerbose(['ats'], "Using Proxy Verifier found in PATH: ", ENV['VERIFIER_BIN']) else: prepare_proxy_verifier_path = os.path.join(test_root, "prepare_proxy_verifier.sh") host.WriteError("Could not find Proxy Verifier binaries. " @@ -82,10 +73,7 @@ f"Proxy Verifier at {verifier_client} is too old. " f"Version required: {required_pv_version}, version found: {pv_version}") else: - host.WriteVerbose( - ['ats'], - f"Proxy Verifier at {verifier_client} has version: {pv_version}") - + host.WriteVerbose(['ats'], f"Proxy Verifier at {verifier_client} has version: {pv_version}") if ENV['ATS_BIN'] is not None: # Add variables for Tests @@ -125,12 +113,7 @@ # this queries tsxs for build flags so we can build code for the tests and get certain # useful flags as which openssl to use when we don't use the system version - out = { - 'CPPFLAGS': '', - 'LIBS': '', - 'LDFLAGS': '', - 'CXX': '' - } + out = {'CPPFLAGS': '', 'LIBS': '', 'LDFLAGS': '', 'CXX': ''} if os.path.isfile(tsxs): for flag in out.keys(): try: diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/autest-site/traffic_replay.test.ext trafficserver-9.2.4+ds/tests/gold_tests/autest-site/traffic_replay.test.ext --- trafficserver-9.2.3+ds/tests/gold_tests/autest-site/traffic_replay.test.ext 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/autest-site/traffic_replay.test.ext 2024-04-03 15:38:30.000000000 +0000 @@ -29,9 +29,7 @@ ts.addSSLfile(os.path.join(obj.Variables["AtsTestToolsDir"], "microserver", "ssl", "server.pem")) ts.addSSLfile(os.path.join(obj.Variables["AtsTestToolsDir"], "microserver", "ssl", "server.crt")) - ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.pem' - ) + ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.pem') # MicroServer setup - NOTE: expand to multiple microserver in future? server = obj.MakeOriginServer("server", both=True, lookup_key='{%uuid}') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/autest-site/trafficserver.test.ext trafficserver-9.2.4+ds/tests/gold_tests/autest-site/trafficserver.test.ext --- trafficserver-9.2.3+ds/tests/gold_tests/autest-site/trafficserver.test.ext 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/autest-site/trafficserver.test.ext 2024-04-03 15:38:30.000000000 +0000 @@ -28,20 +28,23 @@ # A mapping from log type to the log name. 'stdout' and 'stderr' are handled # specially and are used to indicate the stdout and stderr streams, # respectively. -default_log_data = { - 'diags': 'diags.log', - 'error': 'error.log', - 'manager': 'manager.log' -} +default_log_data = {'diags': 'diags.log', 'error': 'error.log', 'manager': 'manager.log'} # 'block_for_debug', if True, causes traffic_server to run with the --block option enabled, and effectively # disables timeouts that could be triggered by running traffic_server under a debugger. -def MakeATSProcess(obj, name, command='traffic_server', select_ports=True, - enable_tls=False, enable_cache=True, enable_quic=False, - block_for_debug=False, log_data=default_log_data, - use_traffic_out=True): +def MakeATSProcess( + obj, + name, + command='traffic_server', + select_ports=True, + enable_tls=False, + enable_cache=True, + enable_quic=False, + block_for_debug=False, + log_data=default_log_data, + use_traffic_out=True): ##################################### # common locations @@ -141,8 +144,7 @@ p.Env['PROXY_CONFIG_BODY_FACTORY_TEMPLATE_SETS_DIR'] = template_dir p.Variables.BODY_FACTORY_TEMPLATE_DIR = template_dir - p.Setup.Copy( - os.path.join(p.Variables.SYSCONFDIR, 'body_factory'), template_dir) + p.Setup.Copy(os.path.join(p.Variables.SYSCONFDIR, 'body_factory'), template_dir) ######################################################### # setup cache directory @@ -228,10 +230,8 @@ tmpname = os.path.join(log_dir, fname) p.Disk.File(tmpname, id='diags_log') # add this test back once we have network namespaces working again - p.Disk.diags_log.Content = Testers.ExcludesExpression( - "ERROR:", f"Diags log file {fname} should not contain errors") - p.Disk.diags_log.Content += Testers.ExcludesExpression( - "FATAL:", f"Diags log file {fname} should not contain errors") + p.Disk.diags_log.Content = Testers.ExcludesExpression("ERROR:", f"Diags log file {fname} should not contain errors") + p.Disk.diags_log.Content += Testers.ExcludesExpression("FATAL:", f"Diags log file {fname} should not contain errors") p.Disk.diags_log.Content += Testers.ExcludesExpression( "Unrecognized configuration value", f"Diags log file {fname} should not contain a warning about an unrecognized configuration") @@ -353,16 +353,15 @@ # rely upon the cache will not function correctly if ATS starts # processing traffic before the cache is ready. Thus we set the # wait_for_cache configuration. - p.Disk.records_config.update({ - # Do not accept connections from clients until cache subsystem is - # operational. - 'proxy.config.http.wait_for_cache': 1, - }) + p.Disk.records_config.update( + { + # Do not accept connections from clients until cache subsystem is + # operational. + 'proxy.config.http.wait_for_cache': 1, + }) else: # The user wants the cache to be disabled. - p.Disk.records_config.update({ - 'proxy.config.http.cache.http': 0 - }) + p.Disk.records_config.update({'proxy.config.http.cache.http': 0}) if enable_quic: p.Disk.records_config.update({ @@ -387,11 +386,9 @@ 'proxy.config.http.server_ports': port_str, }) - p.Env['PROXY_CONFIG_PROCESS_MANAGER_MGMT_PORT'] = str( - p.Variables.manager_port) + p.Env['PROXY_CONFIG_PROCESS_MANAGER_MGMT_PORT'] = str(p.Variables.manager_port) p.Env['PROXY_CONFIG_ADMIN_SYNTHETIC_PORT'] = str(p.Variables.admin_port) - p.Env['PROXY_CONFIG_ADMIN_AUTOCONF_PORT'] = str( - p.Variables.admin_port) # support pre ATS 6.x + p.Env['PROXY_CONFIG_ADMIN_AUTOCONF_PORT'] = str(p.Variables.admin_port) # support pre ATS 6.x if 'traffic_manager' in command: p.ReturnCode = 2 @@ -419,23 +416,8 @@ Class to represent a config file ''' - def __init__(self, - runable, - name, - exists=None, - size=None, - content_tester=None, - execute=False, - runtime=True, - content=None): - super(Config, self).__init__( - runable, - name, - exists=None, - size=None, - content_tester=None, - execute=False, - runtime=True) + def __init__(self, runable, name, exists=None, size=None, content_tester=None, execute=False, runtime=True, content=None): + super(Config, self).__init__(runable, name, exists=None, size=None, content_tester=None, execute=False, runtime=True) self.content = content self._added = False @@ -448,8 +430,7 @@ ''' Write contents to disk ''' - host.WriteVerbosef('ats-config-file', "Writing out file {0}", - self.Name) + host.WriteVerbosef('ats-config-file', "Writing out file {0}", self.Name) if self.content is not None: with open(name, 'w') as f: f.write(self.content) @@ -484,22 +465,8 @@ line_template = 'CONFIG {name} {kind} {val}\n' - def __init__(self, - runable, - name, - exists=None, - size=None, - content_tester=None, - execute=False, - runtime=True): - super(RecordsConfig, self).__init__( - runable, - name, - exists=None, - size=None, - content_tester=None, - execute=False, - runtime=True) + def __init__(self, runable, name, exists=None, size=None, content_tester=None, execute=False, runtime=True): + super(RecordsConfig, self).__init__(runable, name, exists=None, size=None, content_tester=None, execute=False, runtime=True) self.WriteCustomOn(self._do_write) def _do_write(self, name): @@ -507,15 +474,8 @@ if len(self) > 0: with open(name, 'w') as f: for name, val in self.items(): - f.write( - self.line_template.format( - name=name, - kind=self.reverse_kind_map[type(val)], - val=val) - ) - return (True, - "Writing config file {0}".format(os.path.split(self.Name)[-1]), - "Success") + f.write(self.line_template.format(name=name, kind=self.reverse_kind_map[type(val)], val=val)) + return (True, "Writing config file {0}".format(os.path.split(self.Name)[-1]), "Success") ########################################################################## diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/autest-site/trafficserver_plugins.test.ext trafficserver-9.2.4+ds/tests/gold_tests/autest-site/trafficserver_plugins.test.ext --- trafficserver-9.2.3+ds/tests/gold_tests/autest-site/trafficserver_plugins.test.ext 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/autest-site/trafficserver_plugins.test.ext 2024-04-03 15:38:30.000000000 +0000 @@ -46,22 +46,15 @@ plugin_dir = tsproc.Env['PROXY_CONFIG_PLUGIN_PLUGIN_DIR'] if copy_plugin: - host.WriteVerbose( - "prepare_plugin", - "Copying down {} into {}.".format(so_name, plugin_dir)) + host.WriteVerbose("prepare_plugin", "Copying down {} into {}.".format(so_name, plugin_dir)) tsproc.Setup.Copy(so_name, plugin_dir) else: - host.WriteVerbose( - "prepare_plugin", - "Skipping copying {} into {} due to configuration.".format( - so_name, plugin_dir)) + host.WriteVerbose("prepare_plugin", "Skipping copying {} into {} due to configuration.".format(so_name, plugin_dir)) # Add an entry to plugin.config. basename = os.path.basename(so_name) config_line = "{0} {1}".format(basename, plugin_args) - host.WriteVerbose( - "prepare_plugin", - 'Adding line to plugin.config: "{}"'.format(config_line)) + host.WriteVerbose("prepare_plugin", 'Adding line to plugin.config: "{}"'.format(config_line)) tsproc.Disk.plugin_config.AddLine(config_line) @@ -79,8 +72,7 @@ plugin.config. """ if not os.path.exists(so_path): - raise ValueError( - 'PrepareTestPlugin: file does not exist: "{}"'.format(so_path)) + raise ValueError('PrepareTestPlugin: file does not exist: "{}"'.format(so_path)) prepare_plugin_helper(so_path, tsproc, plugin_args, copy_plugin=True) @@ -99,9 +91,8 @@ plugin.config. """ if os.path.dirname(so_name): - raise ValueError( - 'PrepareInstalledPlugin expects a filename not a path: ' - '"{}"'.format(so_name)) + raise ValueError('PrepareInstalledPlugin expects a filename not a path: ' + '"{}"'.format(so_name)) prepare_plugin_helper(so_name, tsproc, plugin_args, copy_plugin=False) @@ -110,7 +101,6 @@ be copied down into the sandbox directory. """ ExtendTest(prepare_test_plugin, name="PrepareTestPlugin") - """ PrepareInstalledPlugin should be used for the plugins installed via Automake make install. They are already sym linked into the test directory via the diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/autest-site/verifier_client.test.ext trafficserver-9.2.4+ds/tests/gold_tests/autest-site/verifier_client.test.ext --- trafficserver-9.2.3+ds/tests/gold_tests/autest-site/verifier_client.test.ext 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/autest-site/verifier_client.test.ext 2024-04-03 15:38:30.000000000 +0000 @@ -17,15 +17,24 @@ # See the License for the specific language governing permissions and # limitations under the License. - import os from verifier_common import create_address_argument, substitute_context_in_replay_file -def _configure_client(obj, process, name, replay_path, http_ports=None, - https_ports=None, http3_ports=None, keys=None, - ssl_cert='', ca_cert='', verbose=True, other_args='', - context=None): +def _configure_client( + obj, + process, + name, + replay_path, + http_ports=None, + https_ports=None, + http3_ports=None, + keys=None, + ssl_cert='', + ca_cert='', + verbose=True, + other_args='', + context=None): """ Configure the process for running the verifier-client. @@ -91,8 +100,7 @@ if https_ports or http3_ports: if ssl_cert == '': - ssl_cert = os.path.join(obj.Variables["AtsTestToolsDir"], - "proxy-verifier", "ssl", "client.pem") + ssl_cert = os.path.join(obj.Variables["AtsTestToolsDir"], "proxy-verifier", "ssl", "client.pem") if not os.path.isfile(ssl_cert): raise ValueError("Tried to use '{}' for --client-cert, but it is not " @@ -108,8 +116,7 @@ obj.Variables['tls_secrets_log_path'] = tls_secrets_log_path if ca_cert == '': - ca_cert = os.path.join(obj.Variables["AtsTestToolsDir"], - "proxy-verifier", "ssl", "ca.pem") + ca_cert = os.path.join(obj.Variables["AtsTestToolsDir"], "proxy-verifier", "ssl", "ca.pem") if not os.path.isfile(ca_cert): raise ValueError("Tried to use '{}' for --ca-certs, but it is not " @@ -132,14 +139,22 @@ process.ReturnCode = 0 process.Streams.stdout = Testers.ExcludesExpression( - "Violation|Invalid status", - "There should be no Proxy Verifier violation errors.") + "Violation|Invalid status", "There should be no Proxy Verifier violation errors.") -def AddVerifierClientProcess(run, name, replay_path, http_ports=None, - https_ports=None, http3_ports=None, keys=None, - ssl_cert='', ca_cert='', verbose=True, other_args='', - context=None): +def AddVerifierClientProcess( + run, + name, + replay_path, + http_ports=None, + https_ports=None, + http3_ports=None, + keys=None, + ssl_cert='', + ca_cert='', + verbose=True, + other_args='', + context=None): """ Set the Default process of the test run to a verifier-client Process. @@ -187,9 +202,8 @@ """ p = run.Processes.Default - _configure_client(run, p, name, replay_path, http_ports, https_ports, - http3_ports, keys, ssl_cert, ca_cert, verbose, - other_args, context) + _configure_client( + run, p, name, replay_path, http_ports, https_ports, http3_ports, keys, ssl_cert, ca_cert, verbose, other_args, context) return p diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/autest-site/verifier_server.test.ext trafficserver-9.2.4+ds/tests/gold_tests/autest-site/verifier_server.test.ext --- trafficserver-9.2.3+ds/tests/gold_tests/autest-site/verifier_server.test.ext 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/autest-site/verifier_server.test.ext 2024-04-03 15:38:30.000000000 +0000 @@ -23,9 +23,19 @@ from verifier_common import create_address_argument, substitute_context_in_replay_file -def _configure_server(obj, process, name, replay_path, http_ports=None, https_ports=None, - http3_ports=None, ssl_cert='', ca_cert='', verbose=True, - other_args='', context=None): +def _configure_server( + obj, + process, + name, + replay_path, + http_ports=None, + https_ports=None, + http3_ports=None, + ssl_cert='', + ca_cert='', + verbose=True, + other_args='', + context=None): """ Configure the provided process to run a verifier-server command. @@ -91,8 +101,7 @@ if https_ports or http3_ports: if ssl_cert == '': - ssl_cert = os.path.join(obj.Variables["AtsTestToolsDir"], - "proxy-verifier", "ssl", "server.pem") + ssl_cert = os.path.join(obj.Variables["AtsTestToolsDir"], "proxy-verifier", "ssl", "server.pem") if not os.path.isfile(ssl_cert): raise ValueError("Tried to use '{}' for --server-cert, but it is not " @@ -108,8 +117,7 @@ obj.Variables['tls_secrets_log_path'] = tls_secrets_log_path if ca_cert == '': - ca_cert = os.path.join(obj.Variables["AtsTestToolsDir"], - "proxy-verifier", "ssl", "ca.pem") + ca_cert = os.path.join(obj.Variables["AtsTestToolsDir"], "proxy-verifier", "ssl", "ca.pem") if not os.path.isfile(ca_cert): raise ValueError("Tried to use '{}' for --ca-certs, but it is not " @@ -140,14 +148,21 @@ process.Ready = When.PortOpenv4(process.Variables.http_port) process.ReturnCode = 0 - process.Streams.stdout = Testers.ExcludesExpression( - "Violation", - "There should be no Proxy Verifier violation errors.") + process.Streams.stdout = Testers.ExcludesExpression("Violation", "There should be no Proxy Verifier violation errors.") -def MakeVerifierServerProcess(test, name, replay_path, http_ports=None, - https_ports=None, http3_ports=None, ssl_cert='', - ca_cert='', verbose=True, other_args='', context=None): +def MakeVerifierServerProcess( + test, + name, + replay_path, + http_ports=None, + https_ports=None, + http3_ports=None, + ssl_cert='', + ca_cert='', + verbose=True, + other_args='', + context=None): """ Create a verifier-server process for the Test. @@ -196,14 +211,23 @@ KeyError if placeholders are missing from the mapping between context and the replay file. """ server = test.Processes.Process(name) - _configure_server(test, server, name, replay_path, http_ports, https_ports, - http3_ports, ssl_cert, ca_cert, verbose, other_args, context) + _configure_server( + test, server, name, replay_path, http_ports, https_ports, http3_ports, ssl_cert, ca_cert, verbose, other_args, context) return server -def AddVerifierServerProcess(run, name, replay_path, http_ports=None, - https_ports=None, http3_ports=None, ssl_cert='', - ca_cert='', verbose=True, other_args='', context=None): +def AddVerifierServerProcess( + run, + name, + replay_path, + http_ports=None, + https_ports=None, + http3_ports=None, + ssl_cert='', + ca_cert='', + verbose=True, + other_args='', + context=None): """ Create a verifier-server process and configure it for the given TestRun. @@ -217,8 +241,8 @@ """ server = run.Processes.Process(name) - _configure_server(run, server, name, replay_path, http_ports, https_ports, - http3_ports, ssl_cert, ca_cert, verbose, other_args, context) + _configure_server( + run, server, name, replay_path, http_ports, https_ports, http3_ports, ssl_cert, ca_cert, verbose, other_args, context) client = run.Processes.Default client.StartBefore(server) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/autest-site/when.test.ext trafficserver-9.2.4+ds/tests/gold_tests/autest-site/when.test.ext --- trafficserver-9.2.3+ds/tests/gold_tests/autest-site/when.test.ext 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/autest-site/when.test.ext 2024-04-03 15:38:30.000000000 +0000 @@ -47,9 +47,7 @@ if not os.path.exists(haystack): host.WriteDebug( - ['FileContains', 'when'], - "Testing for file content '{0}' in file '{1}': file does not exist".format( - needle, haystack)) + ['FileContains', 'when'], "Testing for file content '{0}' in file '{1}': file does not exist".format(needle, haystack)) return False needle_regex = re.compile(needle) @@ -60,24 +58,19 @@ line_count += 1 if needle_regex.search(line): host.WriteDebug( - ['FileContains', 'when'], - "Found '{0}' in file '{1}' in line: '{2}', line number: {3}".format( + ['FileContains', 'when'], "Found '{0}' in file '{1}' in line: '{2}', line number: {3}".format( needle, haystack, line.rstrip(), line_count)) needle_seen_count += 1 if needle_seen_count >= desired_count: host.WriteDebug( - ['FileContains', 'when'], - "Testing for file content '{0}' in file '{1}', " - "successfully found it the desired {2} times".format( - needle, haystack, needle_seen_count)) + ['FileContains', 'when'], "Testing for file content '{0}' in file '{1}', " + "successfully found it the desired {2} times".format(needle, haystack, needle_seen_count)) return True host.WriteDebug( - ['FileContains', 'when'], - "Testing for file content '{0}' in file '{1}', only seen {2} " - "out of the desired {3} times".format( - needle, haystack, needle_seen_count, desired_count)) + ['FileContains', 'when'], "Testing for file content '{0}' in file '{1}', only seen {2} " + "out of the desired {3} times".format(needle, haystack, needle_seen_count, desired_count)) return False diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/basic/deny0.test.py trafficserver-9.2.4+ds/tests/gold_tests/basic/deny0.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/basic/deny0.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/basic/deny0.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -34,14 +34,15 @@ dns.addRecords(records={HOST1: ['127.0.0.1']}) ts = Test.MakeATSProcess("ts", enable_cache=False) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|redirect', - 'proxy.config.http.number_of_redirections': 1, - 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), - 'proxy.config.dns.resolv_conf': 'NULL', - 'proxy.config.url_remap.remap_required': 0 # need this so the domain gets a chance to be evaluated through DNS -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|redirect', + 'proxy.config.http.number_of_redirections': 1, + 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), + 'proxy.config.dns.resolv_conf': 'NULL', + 'proxy.config.url_remap.remap_required': 0 # need this so the domain gets a chance to be evaluated through DNS + }) Test.Setup.Copy(os.path.join(Test.Variables.AtsTestToolsDir, 'tcp_client.py')) @@ -77,32 +78,36 @@ tr.StillRunningAfter = dns -buildMetaTest('RejectInterfaceAnyIpv4', - 'GET / HTTP/1.1\r\nHost: 0:{port}\r\nConnection: close\r\n\r\n'.format(port=ts.Variables.port)) - - -buildMetaTest('RejectInterfaceAnyIpv6', - 'GET / HTTP/1.1\r\nHost: [::]:{port}\r\nConnection: close\r\n\r\n'.format(port=ts.Variables.portv6)) +buildMetaTest( + 'RejectInterfaceAnyIpv4', 'GET / HTTP/1.1\r\nHost: 0:{port}\r\nConnection: close\r\n\r\n'.format(port=ts.Variables.port)) +buildMetaTest( + 'RejectInterfaceAnyIpv6', 'GET / HTTP/1.1\r\nHost: [::]:{port}\r\nConnection: close\r\n\r\n'.format(port=ts.Variables.portv6)) # Sets up redirect to IPv4 ANY address redirect_request_header = {"headers": "GET /redirect-0 HTTP/1.1\r\nHost: *\r\n\r\n", "timestamp": "5678", "body": ""} -redirect_response_header = {"headers": "HTTP/1.1 302 Found\r\nLocation: http://0:{0}/\r\nConnection: close\r\n\r\n".format( - ts.Variables.port), "timestamp": "5678", "body": ""} +redirect_response_header = { + "headers": "HTTP/1.1 302 Found\r\nLocation: http://0:{0}/\r\nConnection: close\r\n\r\n".format(ts.Variables.port), + "timestamp": "5678", + "body": "" +} redirect_serv.addResponse("sessionfile.log", redirect_request_header, redirect_response_header) -buildMetaTest('RejectRedirectToInterfaceAnyIpv4', - 'GET /redirect-0 HTTP/1.1\r\nHost: {host}:{port}\r\n\r\n'.format(host=HOST1, port=redirect_serv.Variables.Port)) - +buildMetaTest( + 'RejectRedirectToInterfaceAnyIpv4', + 'GET /redirect-0 HTTP/1.1\r\nHost: {host}:{port}\r\n\r\n'.format(host=HOST1, port=redirect_serv.Variables.Port)) # Sets up redirect to IPv6 ANY address redirect_request_header = {"headers": "GET /redirect-0v6 HTTP/1.1\r\nHost: *\r\n\r\n", "timestamp": "5678", "body": ""} -redirect_response_header = {"headers": "HTTP/1.1 302 Found\r\nLocation: http://[::]:{0}/\r\nConnection: close\r\n\r\n".format( - ts.Variables.port), "timestamp": "5678", "body": ""} +redirect_response_header = { + "headers": "HTTP/1.1 302 Found\r\nLocation: http://[::]:{0}/\r\nConnection: close\r\n\r\n".format(ts.Variables.port), + "timestamp": "5678", + "body": "" +} redirect_serv.addResponse("sessionfile.log", redirect_request_header, redirect_response_header) -buildMetaTest('RejectRedirectToInterfaceAnyIpv6', - 'GET /redirect-0v6 HTTP/1.1\r\nHost: {host}:{port}\r\n\r\n'.format(host=HOST1, port=redirect_serv.Variables.Port)) - +buildMetaTest( + 'RejectRedirectToInterfaceAnyIpv6', + 'GET /redirect-0v6 HTTP/1.1\r\nHost: {host}:{port}\r\n\r\n'.format(host=HOST1, port=redirect_serv.Variables.Port)) Test.Setup.Copy(data_path) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/bigobj/bigobj.test.py trafficserver-9.2.4+ds/tests/gold_tests/bigobj/bigobj.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/bigobj/bigobj.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/bigobj/bigobj.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,9 +24,7 @@ # by increasing the value of the obj_kilobytes variable below. (But do not increase it on any shared branch # that we do CI runs on.) -Test.SkipUnless( - Condition.HasCurlFeature('http2') -) +Test.SkipUnless(Condition.HasCurlFeature('http2')) # push_request and check_ramp are built via `make`. Here we copy the built binary down to the test # directory so that the test runs in this file can use it. @@ -36,24 +34,21 @@ ts = Test.MakeATSProcess("ts1", enable_tls=True) ts.addDefaultSSLFiles() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|cache', - 'proxy.config.http.cache.required_headers': 0, # No required headers for caching - 'proxy.config.http.push_method_enabled': 1, - 'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name. - 'proxy.config.ssl.server.cert.path': ts.Variables.SSLDir, - 'proxy.config.ssl.server.private_key.path': ts.Variables.SSLDir, - 'proxy.config.url_remap.remap_required': 0 -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) - -ts.Disk.remap_config.AddLine( - 'map https://localhost http://localhost' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|cache', + 'proxy.config.http.cache.required_headers': 0, # No required headers for caching + 'proxy.config.http.push_method_enabled': 1, + 'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name. + 'proxy.config.ssl.server.cert.path': ts.Variables.SSLDir, + 'proxy.config.ssl.server.private_key.path': ts.Variables.SSLDir, + 'proxy.config.url_remap.remap_required': 0 + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + +ts.Disk.remap_config.AddLine('map https://localhost http://localhost') # Set up to check the output after the tests have run. # @@ -71,41 +66,35 @@ tr.Processes.Default.StartBefore(ts) # # Put object with URL http://localhost/bigobj in cache using PUSH request. -tr.Processes.Default.Command = ( - f'./push_request {obj_kilobytes} | nc localhost {ts.Variables.port}' -) +tr.Processes.Default.Command = (f'./push_request {obj_kilobytes} | nc localhost {ts.Variables.port}') tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun("GET bigobj: cleartext, HTTP/1.1, IPv4") tr.Processes.Default.Command = ( 'curl --verbose --ipv4 --http1.1 --header "Host: localhost"' f' http://localhost:{ts.Variables.port}/bigobj 2>> log.txt |' - f' ./check_ramp {obj_kilobytes}' -) + f' ./check_ramp {obj_kilobytes}') tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun("GET bigobj: TLS, HTTP/1.1, IPv4") tr.Processes.Default.Command = ( 'curl --verbose --ipv4 --http1.1 --insecure --header "Host: localhost"' f' https://localhost:{ts.Variables.ssl_port}/bigobj 2>> log.txt |' - f' ./check_ramp {obj_kilobytes}' -) + f' ./check_ramp {obj_kilobytes}') tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun("GET bigobj: TLS, HTTP/2, IPv4") tr.Processes.Default.Command = ( 'curl --verbose --ipv4 --http2 --insecure --header "Host: localhost"' f' https://localhost:{ts.Variables.ssl_port}/bigobj 2>> log.txt |' - f' ./check_ramp {obj_kilobytes}' -) + f' ./check_ramp {obj_kilobytes}') tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun("GET bigobj: TLS, HTTP/2, IPv6") tr.Processes.Default.Command = ( 'curl --verbose --ipv6 --http2 --insecure --header "Host: localhost"' f' https://localhost:{ts.Variables.ssl_portv6}/bigobj 2>> log.txt |' - f' ./check_ramp {obj_kilobytes}' -) + f' ./check_ramp {obj_kilobytes}') tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() @@ -117,31 +106,24 @@ ts = Test.MakeATSProcess("ts2", enable_tls=True) ts.addDefaultSSLFiles() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|cache', - 'proxy.config.http.cache.required_headers': 0, # No required headers for caching - 'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name. - 'proxy.config.ssl.server.cert.path': ts.Variables.SSLDir, - 'proxy.config.ssl.server.private_key.path': ts.Variables.SSLDir, - 'proxy.config.url_remap.remap_required': 0 -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) - -ts.Disk.remap_config.AddLine( - 'map https://localhost http://localhost' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|cache', + 'proxy.config.http.cache.required_headers': 0, # No required headers for caching + 'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name. + 'proxy.config.ssl.server.cert.path': ts.Variables.SSLDir, + 'proxy.config.ssl.server.private_key.path': ts.Variables.SSLDir, + 'proxy.config.url_remap.remap_required': 0 + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + +ts.Disk.remap_config.AddLine('map https://localhost http://localhost') tr = Test.AddTestRun("PUSH request is rejected when push_method_enabled is 0") tr.Processes.Default.StartBefore(ts) -tr.Processes.Default.Command = ( - f'./push_request {obj_kilobytes} | nc localhost {ts.Variables.port}' -) +tr.Processes.Default.Command = (f'./push_request {obj_kilobytes} | nc localhost {ts.Variables.port}') tr.Processes.Default.ReturnCode = 1 tr.Processes.Default.Streams.stdout = Testers.ContainsExpression( - "403 Access Denied", - "The PUSH request should have received a 403 response." -) + "403 Access Denied", "The PUSH request should have received a 403 response.") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/body_factory/http204_response.test.py trafficserver-9.2.4+ds/tests/gold_tests/body_factory/http204_response.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/body_factory/http204_response.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/body_factory/http204_response.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -58,17 +58,12 @@ regex_remap_conf_file = "maps.reg" ts.Disk.remap_config.AddLine( - 'map http://{0} http://127.0.0.1:{1} @plugin=regex_remap.so @pparam={2} @pparam=no-query-string @pparam=host' - .format(DEFAULT_204_HOST, server.Variables.Port, regex_remap_conf_file) -) + 'map http://{0} http://127.0.0.1:{1} @plugin=regex_remap.so @pparam={2} @pparam=no-query-string @pparam=host'.format( + DEFAULT_204_HOST, server.Variables.Port, regex_remap_conf_file)) ts.Disk.remap_config.AddLine( 'map http://{0} http://127.0.0.1:{1} @plugin=regex_remap.so @pparam={2} @pparam=no-query-string @pparam=host @plugin=conf_remap.so @pparam=proxy.config.body_factory.template_base={0}' - .format(CUSTOM_TEMPLATE_204_HOST, server.Variables.Port, regex_remap_conf_file) -) -ts.Disk.MakeConfigFile(regex_remap_conf_file).AddLine( - '//.*/ http://127.0.0.1:{0} @status=204' - .format(server.Variables.Port) -) + .format(CUSTOM_TEMPLATE_204_HOST, server.Variables.Port, regex_remap_conf_file)) +ts.Disk.MakeConfigFile(regex_remap_conf_file).AddLine('//.*/ http://127.0.0.1:{0} @status=204'.format(server.Variables.Port)) Test.Setup.Copy(os.path.join(os.pardir, os.pardir, 'tools', 'tcp_client.py')) Test.Setup.Copy('data') @@ -82,7 +77,6 @@ defaultTr.Processes.Default.ReturnCode = 0 defaultTr.Processes.Default.Streams.stdout = "gold/http-204.gold" - customTemplateTr = Test.AddTestRun(f"Test domain {CUSTOM_TEMPLATE_204_HOST}") customTemplateTr.StillRunningBefore = ts customTemplateTr.StillRunningAfter = ts diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/body_factory/http304_response.test.py trafficserver-9.2.4+ds/tests/gold_tests/body_factory/http304_response.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/body_factory/http304_response.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/body_factory/http304_response.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -29,17 +29,12 @@ DEFAULT_304_HOST = 'www.default304.test' - regex_remap_conf_file = "maps.reg" ts.Disk.remap_config.AddLine( - 'map http://{0} http://127.0.0.1:{1} @plugin=regex_remap.so @pparam={2} @pparam=no-query-string @pparam=host' - .format(DEFAULT_304_HOST, server.Variables.Port, regex_remap_conf_file) -) -ts.Disk.MakeConfigFile(regex_remap_conf_file).AddLine( - '//.*/ http://127.0.0.1:{0} @status=304' - .format(server.Variables.Port) -) + 'map http://{0} http://127.0.0.1:{1} @plugin=regex_remap.so @pparam={2} @pparam=no-query-string @pparam=host'.format( + DEFAULT_304_HOST, server.Variables.Port, regex_remap_conf_file)) +ts.Disk.MakeConfigFile(regex_remap_conf_file).AddLine('//.*/ http://127.0.0.1:{0} @status=304'.format(server.Variables.Port)) Test.Setup.Copy(os.path.join(os.pardir, os.pardir, 'tools', 'tcp_client.py')) Test.Setup.Copy('data') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/body_factory/http_head_no_origin.test.py trafficserver-9.2.4+ds/tests/gold_tests/body_factory/http_head_no_origin.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/body_factory/http_head_no_origin.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/body_factory/http_head_no_origin.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -29,7 +29,6 @@ HOST = 'www.example.test' - Test.Setup.Copy(os.path.join(os.pardir, os.pardir, 'tools', 'tcp_client.py')) Test.Setup.Copy('data') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/body_factory/http_with_origin.test.py trafficserver-9.2.4+ds/tests/gold_tests/body_factory/http_with_origin.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/body_factory/http_with_origin.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/body_factory/http_with_origin.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -30,40 +30,40 @@ server = Test.MakeOriginServer("server") -ts.Disk.remap_config.AddLine( - 'map http://{0} http://127.0.0.1:{1}'.format(HOST, server.Variables.Port) -) - -server.addResponse("sessionfile.log", { - "headers": "HEAD /head200 HTTP/1.1\r\nHost: {0}\r\n\r\n".format(HOST), - "timestamp": "1469733493.993", - "body": "" -}, { - "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", - "body": "This body should not be returned for a HEAD request." -}) - -server.addResponse("sessionfile.log", { - "headers": "GET /get200 HTTP/1.1\r\nHost: {0}\r\n\r\n".format(HOST), - "timestamp": "1469733493.993", - "body": "" -}, { - "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", - "body": "This body should be returned for a GET request." -}) - -server.addResponse("sessionfile.log", { - "headers": "GET /get304 HTTP/1.1\r\nHost: {0}\r\n\r\n".format(HOST), - "timestamp": "1469733493.993", - "body": "" -}, { - "headers": "HTTP/1.1 304 Not Modified\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", - "body": "" -}) +ts.Disk.remap_config.AddLine('map http://{0} http://127.0.0.1:{1}'.format(HOST, server.Variables.Port)) +server.addResponse( + "sessionfile.log", { + "headers": "HEAD /head200 HTTP/1.1\r\nHost: {0}\r\n\r\n".format(HOST), + "timestamp": "1469733493.993", + "body": "" + }, { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "This body should not be returned for a HEAD request." + }) + +server.addResponse( + "sessionfile.log", { + "headers": "GET /get200 HTTP/1.1\r\nHost: {0}\r\n\r\n".format(HOST), + "timestamp": "1469733493.993", + "body": "" + }, { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "This body should be returned for a GET request." + }) + +server.addResponse( + "sessionfile.log", { + "headers": "GET /get304 HTTP/1.1\r\nHost: {0}\r\n\r\n".format(HOST), + "timestamp": "1469733493.993", + "body": "" + }, { + "headers": "HTTP/1.1 304 Not Modified\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + }) Test.Setup.Copy(os.path.join(os.pardir, os.pardir, 'tools', 'tcp_client.py')) Test.Setup.Copy('data') @@ -79,7 +79,6 @@ trhead200.Processes.Default.ReturnCode = 0 trhead200.Processes.Default.Streams.stdout = "gold/http-head-200.gold" - trget200 = Test.AddTestRun("Test domain {0}".format(HOST)) trget200.StillRunningBefore = ts trget200.StillRunningBefore = server @@ -91,7 +90,6 @@ trget200.Processes.Default.ReturnCode = 0 trget200.Processes.Default.Streams.stdout = "gold/http-get-200.gold" - trget304 = Test.AddTestRun("Test domain {0}".format(HOST)) trget304.StillRunningBefore = ts trget304.StillRunningBefore = server diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/cache/alternate-caching.test.py trafficserver-9.2.4+ds/tests/gold_tests/cache/alternate-caching.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/cache/alternate-caching.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/cache/alternate-caching.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -25,27 +25,24 @@ # Verify disabled negative_revalidating behavior. # ts = Test.MakeATSProcess("ts-alternate-caching") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|cache', +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|cache', + 'proxy.config.http.cache.max_stale_age': 6, + 'proxy.config.cache.select_alternate': 1, + 'proxy.config.cache.limits.http.max_alts': 4, - 'proxy.config.http.cache.max_stale_age': 6, - 'proxy.config.cache.select_alternate': 1, - 'proxy.config.cache.limits.http.max_alts': 4, - - # Try with and without this - 'proxy.config.http.negative_revalidating_enabled': 1, - 'proxy.config.http.negative_caching_enabled': 1, - 'proxy.config.http.negative_caching_lifetime': 30 - -}) + # Try with and without this + 'proxy.config.http.negative_revalidating_enabled': 1, + 'proxy.config.http.negative_caching_enabled': 1, + 'proxy.config.http.negative_caching_lifetime': 30 + }) tr = Test.AddTestRun("Verify disabled negative revalidating behavior.") replay_file = "replay/alternate-caching-update-size.yaml" server = tr.AddVerifierServerProcess("server1", replay_file) server_port = server.Variables.http_port tr.AddVerifierClientProcess("client1", replay_file, http_ports=[ts.Variables.port]) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server_port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server_port)) tr.Processes.Default.StartBefore(ts) tr.StillRunningAfter = ts diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/cache/background_fill.test.py trafficserver-9.2.4+ds/tests/gold_tests/cache/background_fill.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/cache/background_fill.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/cache/background_fill.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -19,9 +19,7 @@ from enum import Enum Test.Summary = 'Exercise Background Fill' -Test.SkipUnless( - Condition.HasCurlFeature('http2'), -) +Test.SkipUnless(Condition.HasCurlFeature('http2'),) Test.ContinueOnFail = True @@ -46,24 +44,23 @@ self.httpbin = Test.MakeHttpBinServer("httpbin") def __setupTS(self): - self.ts = Test.MakeATSProcess( - "ts", select_ports=True, enable_tls=True, enable_cache=True) + self.ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True, enable_cache=True) self.ts.addDefaultSSLFiles() - self.ts.Disk.ssl_multicert_config.AddLine( - "dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key") + self.ts.Disk.ssl_multicert_config.AddLine("dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key") - self.ts.Disk.records_config.update({ - "proxy.config.http.server_ports": f"{self.ts.Variables.port} {self.ts.Variables.ssl_port}:ssl", - 'proxy.config.ssl.server.cert.path': f"{self.ts.Variables.SSLDir}", - 'proxy.config.ssl.server.private_key.path': f"{self.ts.Variables.SSLDir}", - "proxy.config.diags.debug.enabled": 1, - "proxy.config.diags.debug.tags": "http", - "proxy.config.http.background_fill_active_timeout": "0", - "proxy.config.http.background_fill_completed_threshold": "0.0", - "proxy.config.http.cache.required_headers": 0, # Force cache - "proxy.config.http.insert_response_via_str": 2, - }) + self.ts.Disk.records_config.update( + { + "proxy.config.http.server_ports": f"{self.ts.Variables.port} {self.ts.Variables.ssl_port}:ssl", + 'proxy.config.ssl.server.cert.path': f"{self.ts.Variables.SSLDir}", + 'proxy.config.ssl.server.private_key.path': f"{self.ts.Variables.SSLDir}", + "proxy.config.diags.debug.enabled": 1, + "proxy.config.diags.debug.tags": "http", + "proxy.config.http.background_fill_active_timeout": "0", + "proxy.config.http.background_fill_completed_threshold": "0.0", + "proxy.config.http.cache.required_headers": 0, # Force cache + "proxy.config.http.insert_response_via_str": 2, + }) self.ts.Disk.remap_config.AddLines([ f"map / http://127.0.0.1:{self.httpbin.Variables.Port}/", diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/cache/cache-control.test.py trafficserver-9.2.4+ds/tests/gold_tests/cache/cache-control.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/cache/cache-control.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/cache/cache-control.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -30,32 +30,46 @@ # **testname is required** testName = "" request_header1 = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} -response_header1 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nCache-Control: max-age=300\r\n\r\n", - "timestamp": "1469733493.993", "body": "xxx"} -request_header2 = {"headers": "GET /no_cache_control HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} -response_header2 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", "body": "the flinstones"} -request_header3 = {"headers": "GET /max_age_10sec HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} -response_header3 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nCache-Control: max-age=10,public\r\n\r\n", - "timestamp": "1469733493.993", "body": "yabadabadoo"} +response_header1 = { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nCache-Control: max-age=300\r\n\r\n", + "timestamp": "1469733493.993", + "body": "xxx" +} +request_header2 = { + "headers": "GET /no_cache_control HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} +response_header2 = { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "the flinstones" +} +request_header3 = { + "headers": "GET /max_age_10sec HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} +response_header3 = { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nCache-Control: max-age=10,public\r\n\r\n", + "timestamp": "1469733493.993", + "body": "yabadabadoo" +} server.addResponse("sessionlog.json", request_header1, response_header1) server.addResponse("sessionlog.json", request_header2, response_header2) server.addResponse("sessionlog.json", request_header3, response_header3) # ATS Configuration ts.Disk.plugin_config.AddLine('xdebug.so') -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.http.response_via_str': 3, - 'proxy.config.http.insert_age_in_response': 0, -}) - -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.http.response_via_str': 3, + 'proxy.config.http.insert_age_in_response': 0, + }) + +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) # Test 1 - 200 response and cache fill tr = Test.AddTestRun() @@ -114,18 +128,17 @@ ts = Test.MakeATSProcess("ts-for-proxy-verifier") replay_file = "replay/cache-control-max-age.replay.yaml" server = Test.MakeVerifierServerProcess("proxy-verifier-server", replay_file) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.http.insert_age_in_response': 0, - - # Disable ignoring max-age in the client request so we can test that - # behavior too. - 'proxy.config.http.cache.ignore_client_cc_max_age': 0, -}) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.http_port) -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.http.insert_age_in_response': 0, + + # Disable ignoring max-age in the client request so we can test that + # behavior too. + 'proxy.config.http.cache.ignore_client_cc_max_age': 0, + }) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.http_port)) tr = Test.AddTestRun("Verify correct max-age cache-control behavior.") tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(ts) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/cache/cache-generation-clear.test.py trafficserver-9.2.4+ds/tests/gold_tests/cache/cache-generation-clear.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/cache/cache-generation-clear.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/cache/cache-generation-clear.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -26,23 +26,23 @@ ts = Test.MakeATSProcess("ts", command="traffic_manager") # setup some config file for this server -ts.Disk.records_config.update({ - 'proxy.config.body_factory.enable_customizations': 3, # enable domain specific body factory - 'proxy.config.http.cache.generation': -1, # Start with cache turned off - 'proxy.config.config_update_interval_ms': 1, -}) +ts.Disk.records_config.update( + { + 'proxy.config.body_factory.enable_customizations': 3, # enable domain specific body factory + 'proxy.config.http.cache.generation': -1, # Start with cache turned off + 'proxy.config.config_update_interval_ms': 1, + }) ts.Disk.plugin_config.AddLine('xdebug.so') -ts.Disk.remap_config.AddLines([ - 'map /default/ http://127.0.0.1/ @plugin=generator.so', - # line 2 - 'map /generation1/ http://127.0.0.1/' + - ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=1' + - ' @plugin=generator.so', - # line 3 - 'map /generation2/ http://127.0.0.1/' + - ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=2' + - ' @plugin=generator.so' -]) +ts.Disk.remap_config.AddLines( + [ + 'map /default/ http://127.0.0.1/ @plugin=generator.so', + # line 2 + 'map /generation1/ http://127.0.0.1/' + ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=1' + + ' @plugin=generator.so', + # line 3 + 'map /generation2/ http://127.0.0.1/' + ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=2' + + ' @plugin=generator.so', + ]) objectid = uuid.uuid4() # first test is a miss for default diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/cache/cache-generation-disjoint.test.py trafficserver-9.2.4+ds/tests/gold_tests/cache/cache-generation-disjoint.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/cache/cache-generation-disjoint.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/cache/cache-generation-disjoint.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -27,24 +27,23 @@ ts = Test.MakeATSProcess("ts") # setup some config file for this server -ts.Disk.records_config.update({ - 'proxy.config.body_factory.enable_customizations': 3, # enable domain specific body factory - 'proxy.config.http.cache.generation': -1, # Start with cache turned off - 'proxy.config.config_update_interval_ms': 1, - -}) +ts.Disk.records_config.update( + { + 'proxy.config.body_factory.enable_customizations': 3, # enable domain specific body factory + 'proxy.config.http.cache.generation': -1, # Start with cache turned off + 'proxy.config.config_update_interval_ms': 1, + }) ts.Disk.plugin_config.AddLine('xdebug.so') -ts.Disk.remap_config.AddLines([ - 'map /default/ http://127.0.0.1/ @plugin=generator.so', - # line 2 - 'map /generation1/ http://127.0.0.1/' + - ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=1' + - ' @plugin=generator.so', - # line 3 - 'map /generation2/ http://127.0.0.1/' + - ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=2' + - ' @plugin=generator.so' -]) +ts.Disk.remap_config.AddLines( + [ + 'map /default/ http://127.0.0.1/ @plugin=generator.so', + # line 2 + 'map /generation1/ http://127.0.0.1/' + ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=1' + + ' @plugin=generator.so', + # line 3 + 'map /generation2/ http://127.0.0.1/' + ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=2' + + ' @plugin=generator.so', + ]) objectid = uuid.uuid4() # first test is a miss for default diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/cache/cache-range-response.test.py trafficserver-9.2.4+ds/tests/gold_tests/cache/cache-range-response.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/cache/cache-range-response.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/cache/cache-range-response.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,16 +24,15 @@ ts = Test.MakeATSProcess("ts") replay_file = "replay/cache-range-response.replay.yaml" server = Test.MakeVerifierServerProcess("server0", replay_file) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http.*|cache.*', - 'proxy.config.http.cache.range.write': 1, - 'proxy.config.http.cache.when_to_revalidate': 4, - 'proxy.config.http.insert_response_via_str': 3, -}) -ts.Disk.remap_config.AddLine( - f'map / http://127.0.0.1:{server.Variables.http_port}' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http.*|cache.*', + 'proxy.config.http.cache.range.write': 1, + 'proxy.config.http.cache.when_to_revalidate': 4, + 'proxy.config.http.insert_response_via_str': 3, + }) +ts.Disk.remap_config.AddLine(f'map / http://127.0.0.1:{server.Variables.http_port}') tr = Test.AddTestRun("Verify range request is transformed from a 200 response") tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(ts) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/cache/cache-request-method.test.py trafficserver-9.2.4+ds/tests/gold_tests/cache/cache-request-method.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/cache/cache-request-method.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/cache/cache-request-method.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -26,18 +26,17 @@ ts = Test.MakeATSProcess("ts") replay_file = "replay/post_with_post_caching_disabled.replay.yaml" server = Test.MakeVerifierServerProcess("server0", replay_file) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http.*|cache.*', - 'proxy.config.http.insert_age_in_response': 0, +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http.*|cache.*', + 'proxy.config.http.insert_age_in_response': 0, - # Caching of POST responses is disabled by default. Verify default behavior - # by leaving it unconfigured. - # 'proxy.config.http.cache.post_method': 0, -}) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.http_port) -) + # Caching of POST responses is disabled by default. Verify default behavior + # by leaving it unconfigured. + # 'proxy.config.http.cache.post_method': 0, + }) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.http_port)) tr = Test.AddTestRun("Verify correct with POST response caching disabled.") tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(ts) @@ -48,15 +47,14 @@ ts = Test.MakeATSProcess("ts-cache-post") replay_file = "replay/post_with_post_caching_enabled.replay.yaml" server = Test.MakeVerifierServerProcess("server1", replay_file) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http.*|cache.*', - 'proxy.config.http.insert_age_in_response': 0, - 'proxy.config.http.cache.post_method': 1, -}) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.http_port) -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http.*|cache.*', + 'proxy.config.http.insert_age_in_response': 0, + 'proxy.config.http.cache.post_method': 1, + }) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.http_port)) tr = Test.AddTestRun("Verify correct with POST response caching enabled.") tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(ts) @@ -66,14 +64,13 @@ ts = Test.MakeATSProcess("ts-cache-head") replay_file = "replay/head_with_get_cached.replay.yaml" server = Test.MakeVerifierServerProcess("server2", replay_file) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http.*|cache.*', - 'proxy.config.http.insert_age_in_response': 0, -}) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.http_port) -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http.*|cache.*', + 'proxy.config.http.insert_age_in_response': 0, + }) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.http_port)) tr = Test.AddTestRun("Verify correct with HEAD response.") tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(ts) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/cache/conditional-get-hit.test.py trafficserver-9.2.4+ds/tests/gold_tests/cache/conditional-get-hit.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/cache/conditional-get-hit.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/cache/conditional-get-hit.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,19 +22,17 @@ ''' ts = Test.MakeATSProcess("ts-conditional-get-caching") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|cache', - - 'proxy.config.http.cache.max_stale_age': 6, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|cache', + 'proxy.config.http.cache.max_stale_age': 6, + }) tr = Test.AddTestRun("Verify conditional get with cache hit drain client body") replay_file = "replay/conditional-get-cache-hit.yaml" server = tr.AddVerifierServerProcess("server1", replay_file) server_port = server.Variables.http_port tr.AddVerifierClientProcess("client1", replay_file, http_ports=[ts.Variables.port]) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server_port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server_port)) tr.Processes.Default.StartBefore(ts) tr.StillRunningAfter = ts diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/cache/disjoint-wait-for-cache.test.py trafficserver-9.2.4+ds/tests/gold_tests/cache/disjoint-wait-for-cache.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/cache/disjoint-wait-for-cache.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/cache/disjoint-wait-for-cache.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -29,24 +29,24 @@ # Setup some config file for this server. Note that setting wait_for_cache to 3 # will intentionally override the value set in MakeATSProcess. -ts.Disk.records_config.update({ - 'proxy.config.body_factory.enable_customizations': 3, # enable domain specific body factory - 'proxy.config.http.cache.generation': -1, # Start with cache turned off - 'proxy.config.config_update_interval_ms': 1, - 'proxy.config.http.wait_for_cache': 3, -}) +ts.Disk.records_config.update( + { + 'proxy.config.body_factory.enable_customizations': 3, # enable domain specific body factory + 'proxy.config.http.cache.generation': -1, # Start with cache turned off + 'proxy.config.config_update_interval_ms': 1, + 'proxy.config.http.wait_for_cache': 3, + }) ts.Disk.plugin_config.AddLine('xdebug.so') -ts.Disk.remap_config.AddLines([ - 'map /default/ http://127.0.0.1/ @plugin=generator.so', - # line 2 - 'map /generation1/ http://127.0.0.1/' + - ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=1' + - ' @plugin=generator.so', - # line 3 - 'map /generation2/ http://127.0.0.1/' + - ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=2' + - ' @plugin=generator.so' -]) +ts.Disk.remap_config.AddLines( + [ + 'map /default/ http://127.0.0.1/ @plugin=generator.so', + # line 2 + 'map /generation1/ http://127.0.0.1/' + ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=1' + + ' @plugin=generator.so', + # line 3 + 'map /generation2/ http://127.0.0.1/' + ' @plugin=conf_remap.so @pparam=proxy.config.http.cache.generation=2' + + ' @plugin=generator.so', + ]) objectid = uuid.uuid4() # first test is a miss for default diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/cache/negative-caching.test.py trafficserver-9.2.4+ds/tests/gold_tests/cache/negative-caching.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/cache/negative-caching.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/cache/negative-caching.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -27,16 +27,14 @@ ts = Test.MakeATSProcess("ts-disabled") replay_file = "replay/negative-caching-disabled.replay.yaml" server = Test.MakeVerifierServerProcess("server-disabled", replay_file) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.http.insert_age_in_response': 0, - - 'proxy.config.http.negative_caching_enabled': 0 -}) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.http_port) -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.http.insert_age_in_response': 0, + 'proxy.config.http.negative_caching_enabled': 0 + }) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.http_port)) tr = Test.AddTestRun("Verify correct behavior without negative caching enabled.") tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(ts) @@ -48,16 +46,14 @@ ts = Test.MakeATSProcess("ts-default") replay_file = "replay/negative-caching-default.replay.yaml" server = Test.MakeVerifierServerProcess("server-default", replay_file) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.http.insert_age_in_response': 0, - - 'proxy.config.http.negative_caching_enabled': 1 -}) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.http_port) -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.http.insert_age_in_response': 0, + 'proxy.config.http.negative_caching_enabled': 1 + }) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.http_port)) tr = Test.AddTestRun("Verify default negative caching behavior") tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(ts) @@ -69,17 +65,15 @@ ts = Test.MakeATSProcess("ts-customized") replay_file = "replay/negative-caching-customized.replay.yaml" server = Test.MakeVerifierServerProcess("server-customized", replay_file) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.http.insert_age_in_response': 0, - - 'proxy.config.http.negative_caching_enabled': 1, - 'proxy.config.http.negative_caching_list': "400" -}) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.http_port) -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.http.insert_age_in_response': 0, + 'proxy.config.http.negative_caching_enabled': 1, + 'proxy.config.http.negative_caching_list': "400" + }) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.http_port)) tr = Test.AddTestRun("Verify customized negative caching list") tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(ts) @@ -89,14 +83,14 @@ # Verify correct proxy.config.http.negative_caching_lifetime behavior. # ts = Test.MakeATSProcess("ts-lifetime") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.http.insert_age_in_response': 0, - - 'proxy.config.http.negative_caching_enabled': 1, - 'proxy.config.http.negative_caching_lifetime': 2 -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.http.insert_age_in_response': 0, + 'proxy.config.http.negative_caching_enabled': 1, + 'proxy.config.http.negative_caching_lifetime': 2 + }) # This should all behave the same as the default enabled case above. tr = Test.AddTestRun("Add a 404 response to the cache") replay_file = "replay/negative-caching-default.replay.yaml" @@ -105,9 +99,7 @@ # across both. server_port = server.Variables.http_port tr.AddVerifierClientProcess("client-lifetime-no-cc", replay_file, http_ports=[ts.Variables.port]) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server_port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server_port)) tr.Processes.Default.StartBefore(ts) tr.StillRunningAfter = ts @@ -128,14 +120,14 @@ # proxy.config.http.negative_caching_lifetime. # ts = Test.MakeATSProcess("ts-lifetime-2") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.http.insert_age_in_response': 0, - - 'proxy.config.http.negative_caching_enabled': 1, - 'proxy.config.http.negative_caching_lifetime': 2 -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.http.insert_age_in_response': 0, + 'proxy.config.http.negative_caching_enabled': 1, + 'proxy.config.http.negative_caching_lifetime': 2 + }) tr = Test.AddTestRun("Add a 404 response with explicit max-age=300 to the cache") replay_file = "replay/negative-caching-300-second-timeout.replay.yaml" server = tr.AddVerifierServerProcess("server-lifetime-cc", replay_file) @@ -143,9 +135,7 @@ # across both. server_port = server.Variables.http_port tr.AddVerifierClientProcess("client-lifetime-cc", replay_file, http_ports=[ts.Variables.port]) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server_port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server_port)) tr.Processes.Default.StartBefore(ts) tr.StillRunningAfter = ts diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/cache/negative-revalidating.test.py trafficserver-9.2.4+ds/tests/gold_tests/cache/negative-revalidating.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/cache/negative-revalidating.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/cache/negative-revalidating.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -25,22 +25,20 @@ # Verify disabled negative_revalidating behavior. # ts = Test.MakeATSProcess("ts-negative-revalidating-disabled") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|cache', - 'proxy.config.http.insert_age_in_response': 0, - - 'proxy.config.http.negative_revalidating_enabled': 0, - 'proxy.config.http.cache.max_stale_age': 6 -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|cache', + 'proxy.config.http.insert_age_in_response': 0, + 'proxy.config.http.negative_revalidating_enabled': 0, + 'proxy.config.http.cache.max_stale_age': 6 + }) tr = Test.AddTestRun("Verify disabled negative revalidating behavior.") replay_file = "replay/negative-revalidating-disabled.replay.yaml" server = tr.AddVerifierServerProcess("server1", replay_file) server_port = server.Variables.http_port tr.AddVerifierClientProcess("client1", replay_file, http_ports=[ts.Variables.port]) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server_port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server_port)) tr.Processes.Default.StartBefore(ts) tr.StillRunningAfter = ts @@ -48,23 +46,22 @@ # Verify enabled negative_revalidating behavior. # ts = Test.MakeATSProcess("ts-negative-revalidating-enabled") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|cache', - 'proxy.config.http.insert_age_in_response': 0, +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|cache', + 'proxy.config.http.insert_age_in_response': 0, - # Negative revalidating is on by default. Verify this by leaving out the - # following line and expect negative_revalidating to be enabled. - # 'proxy.config.http.negative_revalidating_enabled': 1, - 'proxy.config.http.cache.max_stale_age': 6 -}) + # Negative revalidating is on by default. Verify this by leaving out the + # following line and expect negative_revalidating to be enabled. + # 'proxy.config.http.negative_revalidating_enabled': 1, + 'proxy.config.http.cache.max_stale_age': 6 + }) tr = Test.AddTestRun("Verify negative revalidating behavior.") replay_file = "replay/negative-revalidating-enabled.replay.yaml" server = tr.AddVerifierServerProcess("server2", replay_file) server_port = server.Variables.http_port tr.AddVerifierClientProcess("client2", replay_file, http_ports=[ts.Variables.port]) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server_port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server_port)) tr.Processes.Default.StartBefore(ts) tr.StillRunningAfter = ts diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/cache/vary-handling.test.py trafficserver-9.2.4+ds/tests/gold_tests/cache/vary-handling.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/cache/vary-handling.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/cache/vary-handling.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,17 +24,16 @@ ts = Test.MakeATSProcess("ts") replay_file = "replay/varied_transactions.replay.yaml" server = Test.MakeVerifierServerProcess("server", replay_file) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.http.insert_age_in_response': 0, - 'proxy.config.cache.limits.http.max_alts': 4, - 'proxy.config.cache.log.alternate.eviction': 1, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.http.insert_age_in_response': 0, + 'proxy.config.cache.limits.http.max_alts': 4, + 'proxy.config.cache.log.alternate.eviction': 1, + }) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.http_port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.http_port)) tr = Test.AddTestRun("Run traffic with max_alts behavior when set to 4") tr.Processes.Default.StartBefore(server) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/chunked_encoding/bad_chunked_encoding.test.py trafficserver-9.2.4+ds/tests/gold_tests/chunked_encoding/bad_chunked_encoding.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/chunked_encoding/bad_chunked_encoding.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/chunked_encoding/bad_chunked_encoding.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -29,22 +29,22 @@ server = Test.MakeOriginServer("server") testName = "" -request_header = {"headers": "POST /case1 HTTP/1.1\r\nHost: www.example.com\r\nuuid:1\r\n\r\n", - "timestamp": "1469733493.993", - "body": "stuff" - } -response_header = {"headers": "HTTP/1.1 200 OK\r\nServer: uServer\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n", - "timestamp": "1469733493.993", - "body": "more stuff"} +request_header = { + "headers": "POST /case1 HTTP/1.1\r\nHost: www.example.com\r\nuuid:1\r\n\r\n", + "timestamp": "1469733493.993", + "body": "stuff" +} +response_header = { + "headers": "HTTP/1.1 200 OK\r\nServer: uServer\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n", + "timestamp": "1469733493.993", + "body": "more stuff" +} server.addResponse("sessionlog.json", request_header, response_header) -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'http'}) +ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 0, 'proxy.config.diags.debug.tags': 'http'}) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) # HTTP1.1 POST: www.example.com/case1 with gzip transfer-encoding tr = Test.AddTestRun() @@ -84,19 +84,16 @@ def setupTS(self): self.ts = Test.MakeATSProcess("ts2", enable_tls=True, enable_cache=False) self.ts.addDefaultSSLFiles() - self.ts.Disk.records_config.update({ - "proxy.config.diags.debug.enabled": 1, - "proxy.config.diags.debug.tags": "http", - "proxy.config.ssl.server.cert.path": f'{self.ts.Variables.SSLDir}', - "proxy.config.ssl.server.private_key.path": f'{self.ts.Variables.SSLDir}', - "proxy.config.ssl.client.verify.server.policy": 'PERMISSIVE', - }) - self.ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' - ) - self.ts.Disk.remap_config.AddLine( - f"map / http://127.0.0.1:{self.server.Variables.http_port}/", - ) + self.ts.Disk.records_config.update( + { + "proxy.config.diags.debug.enabled": 1, + "proxy.config.diags.debug.tags": "http", + "proxy.config.ssl.server.cert.path": f'{self.ts.Variables.SSLDir}', + "proxy.config.ssl.server.private_key.path": f'{self.ts.Variables.SSLDir}', + "proxy.config.ssl.client.verify.server.policy": 'PERMISSIVE', + }) + self.ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + self.ts.Disk.remap_config.AddLine(f"map / http://127.0.0.1:{self.server.Variables.http_port}/",) def runChunkedTraffic(self): tr = Test.AddTestRun() @@ -128,41 +125,35 @@ def setupOriginServer(self): self.server = Test.MakeVerifierServerProcess("verifier-server2", self.chunkedReplayFile) - # The server's responses will fail the first two transactions + # The server's responses will fail the first three transactions # because ATS will close the connection due to the malformed # chunk headers. self.server.Streams.stdout += Testers.ContainsExpression( - "Unexpected chunked content for key 1: too small", - "Verify that writing the first response failed.") + "Unexpected chunked content for key 1: too small", "Verify that writing the first response failed.") + self.server.Streams.stdout += Testers.ExcludesExpression( + "chunked body of 3 bytes for key 2 with chunk stream", "Verify that writing the second response failed.") self.server.Streams.stdout += Testers.ContainsExpression( - "Unexpected chunked content for key 2: too small", - "Verify that writing the second response failed.") + "Unexpected chunked content for key 3: too small", "Verify that writing the third response failed.") # ATS should close the connection before any body gets through. "abc" # is the body sent by the client for each of these chunked cases. - self.server.Streams.stdout += Testers.ExcludesExpression( - "abc", - "Verify that the body never got through.") + self.server.Streams.stdout += Testers.ExcludesExpression("abc", "Verify that the body never got through.") def setupTS(self): self.ts = Test.MakeATSProcess("ts3", enable_tls=True, enable_cache=False) self.ts.addDefaultSSLFiles() - self.ts.Disk.records_config.update({ - "proxy.config.diags.debug.enabled": 1, - "proxy.config.diags.debug.tags": "http", - "proxy.config.ssl.server.cert.path": f'{self.ts.Variables.SSLDir}', - "proxy.config.ssl.server.private_key.path": f'{self.ts.Variables.SSLDir}', - "proxy.config.ssl.client.verify.server.policy": 'PERMISSIVE', - }) - self.ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' - ) - self.ts.Disk.remap_config.AddLine( - f"map / http://127.0.0.1:{self.server.Variables.http_port}/", - ) + self.ts.Disk.records_config.update( + { + "proxy.config.diags.debug.enabled": 1, + "proxy.config.diags.debug.tags": "http", + "proxy.config.ssl.server.cert.path": f'{self.ts.Variables.SSLDir}', + "proxy.config.ssl.server.private_key.path": f'{self.ts.Variables.SSLDir}', + "proxy.config.ssl.client.verify.server.policy": 'PERMISSIVE', + }) + self.ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + self.ts.Disk.remap_config.AddLine(f"map / http://127.0.0.1:{self.server.Variables.http_port}/",) self.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "user agent post chunk decoding error", - "Verify that ATS detected a problem parsing a chunk.") + "user agent post chunk decoding error", "Verify that ATS detected a problem parsing a chunk.") def runChunkedTraffic(self): tr = Test.AddTestRun() @@ -181,17 +172,18 @@ # code from the verifier client. tr.Processes.Default.ReturnCode = 1 tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( - r"(Unexpected chunked content for key 3: too small|Failed HTTP/1 transaction with key: 3)", - "Verify that ATS closed the third transaction.") - tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( r"(Unexpected chunked content for key 4: too small|Failed HTTP/1 transaction with key: 4)", - "Verify that ATS closed the fourth transaction.") + "Verify that ATS closed the forth transaction.") + tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( + r"(Unexpected chunked content for key 5: too small|Failed HTTP/1 transaction with key: 5)", + "Verify that ATS closed the fifth transaction.") + tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( + r"(Unexpected chunked content for key 6: too small|Failed HTTP/1 transaction with key: 6)", + "Verify that ATS closed the sixth transaction.") # ATS should close the connection before any body gets through. "def" # is the body sent by the server for each of these chunked cases. - tr.Processes.Default.Streams.stdout += Testers.ExcludesExpression( - "def", - "Verify that the body never got through.") + tr.Processes.Default.Streams.stdout += Testers.ExcludesExpression("def", "Verify that the body never got through.") def run(self): self.runChunkedTraffic() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/chunked_encoding/chunked_encoding.test.py trafficserver-9.2.4+ds/tests/gold_tests/chunked_encoding/chunked_encoding.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/chunked_encoding/chunked_encoding.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/chunked_encoding/chunked_encoding.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,9 +22,7 @@ Test chunked encoding processing ''' -Test.SkipUnless( - Condition.HasCurlFeature('http2') -) +Test.SkipUnless(Condition.HasCurlFeature('http2')) Test.ContinueOnFail = True Test.GetTcpPort("upstream_port") @@ -36,29 +34,36 @@ server3 = Test.MakeOriginServer("server3") testName = "" -request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", - "body": "" - } -response_header = {"headers": "HTTP/1.1 200 OK\r\nServer: uServer\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +response_header = { + "headers": "HTTP/1.1 200 OK\r\nServer: uServer\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} request_header2 = { - "headers": "POST / HTTP/1.1\r\nHost: www.anotherexample.com\r\nContent-Type: application/x-www-form-urlencoded\r\nContent-Length: 11\r\n\r\n", + "headers": + "POST / HTTP/1.1\r\nHost: www.anotherexample.com\r\nContent-Type: application/x-www-form-urlencoded\r\nContent-Length: 11\r\n\r\n", "timestamp": "1415926535.898", - "body": "knock knock"} -response_header2 = {"headers": "HTTP/1.1 200 OK\r\nServer: uServer\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n", - "timestamp": "1415926535.898", - "body": ""} + "body": "knock knock" +} +response_header2 = { + "headers": "HTTP/1.1 200 OK\r\nServer: uServer\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n", + "timestamp": "1415926535.898", + "body": "" +} request_header3 = { - "headers": "POST / HTTP/1.1\r\nHost: www.yetanotherexample.com\r\nContent-Type: application/x-www-form-urlencoded\r\nContent-Length: 11\r\n\r\n", + "headers": + "POST / HTTP/1.1\r\nHost: www.yetanotherexample.com\r\nContent-Type: application/x-www-form-urlencoded\r\nContent-Length: 11\r\n\r\n", + "timestamp": "1415926535.898", + "body": "knock knock" +} +response_header3 = { + "headers": "HTTP/1.1 200 OK\r\nServer: uServer\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n", "timestamp": "1415926535.898", - "body": "knock knock"} -response_header3 = {"headers": "HTTP/1.1 200 OK\r\nServer: uServer\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n", - "timestamp": "1415926535.898", - "body": ""} + "body": "" +} server.addResponse("sessionlog.json", request_header, response_header) server2.addResponse("sessionlog.json", request_header2, response_header2) @@ -67,29 +72,22 @@ # add ssl materials like key, certificates for the server ts.addDefaultSSLFiles() -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', - }) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) +ts.Disk.remap_config.AddLine('map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts.Disk.remap_config.AddLine('map http://www.yetanotherexample.com http://127.0.0.1:{0}'.format(server3.Variables.Port)) ts.Disk.remap_config.AddLine( - 'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - 'map http://www.yetanotherexample.com http://127.0.0.1:{0}'.format(server3.Variables.Port) -) -ts.Disk.remap_config.AddLine( - 'map https://www.anotherexample.com https://127.0.0.1:{0}'.format(server2.Variables.SSL_Port, ts.Variables.ssl_port) -) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(Test.Variables.upstream_port) -) + 'map https://www.anotherexample.com https://127.0.0.1:{0}'.format(server2.Variables.SSL_Port, ts.Variables.ssl_port)) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(Test.Variables.upstream_port)) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # smuggle-client is built via `make`. Here we copy the built binary down to the # test directory so that the test runs in this file can use it. @@ -98,8 +96,7 @@ # HTTP1.1 GET: www.example.com tr = Test.AddTestRun() tr.TimeOut = 5 -tr.Processes.Default.Command = 'curl --http1.1 --proxy 127.0.0.1:{0} http://www.example.com --verbose'.format( - ts.Variables.port) +tr.Processes.Default.Command = 'curl --http1.1 --proxy 127.0.0.1:{0} http://www.example.com --verbose'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(server2) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/chunked_encoding/chunked_encoding_disabled.test.py trafficserver-9.2.4+ds/tests/gold_tests/chunked_encoding/chunked_encoding_disabled.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/chunked_encoding/chunked_encoding_disabled.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/chunked_encoding/chunked_encoding_disabled.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -33,31 +33,28 @@ def setupTS(self): self.ts = Test.MakeATSProcess("ts", enable_tls=True, enable_cache=False) self.ts.addDefaultSSLFiles() - self.ts.Disk.records_config.update({ - "proxy.config.diags.debug.enabled": 1, - "proxy.config.diags.debug.tags": "http", - "proxy.config.ssl.server.cert.path": f'{self.ts.Variables.SSLDir}', - "proxy.config.ssl.server.private_key.path": f'{self.ts.Variables.SSLDir}', - "proxy.config.ssl.client.verify.server.policy": 'PERMISSIVE', + self.ts.Disk.records_config.update( + { + "proxy.config.diags.debug.enabled": 1, + "proxy.config.diags.debug.tags": "http", + "proxy.config.ssl.server.cert.path": f'{self.ts.Variables.SSLDir}', + "proxy.config.ssl.server.private_key.path": f'{self.ts.Variables.SSLDir}', + "proxy.config.ssl.client.verify.server.policy": 'PERMISSIVE', - # Never respond with chunked encoding. - "proxy.config.http.chunking_enabled": 0, - }) - self.ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' - ) - self.ts.Disk.remap_config.AddLines([ - f"map /for/http http://127.0.0.1:{self.server.Variables.http_port}/", - f"map /for/tls https://127.0.0.1:{self.server.Variables.https_port}/", - ]) + # Never respond with chunked encoding. + "proxy.config.http.chunking_enabled": 0, + }) + self.ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + self.ts.Disk.remap_config.AddLines( + [ + f"map /for/http http://127.0.0.1:{self.server.Variables.http_port}/", + f"map /for/tls https://127.0.0.1:{self.server.Variables.https_port}/", + ]) def runChunkedTraffic(self): tr = Test.AddTestRun() tr.AddVerifierClientProcess( - "client", - self.chunkedReplayFile, - http_ports=[self.ts.Variables.port], - https_ports=[self.ts.Variables.ssl_port]) + "client", self.chunkedReplayFile, http_ports=[self.ts.Variables.port], https_ports=[self.ts.Variables.ssl_port]) tr.Processes.Default.Streams.stdout += "gold/verifier_client_chunked.gold" tr.Processes.Default.StartBefore(self.server) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/chunked_encoding/chunked_encoding_h2.test.py trafficserver-9.2.4+ds/tests/gold_tests/chunked_encoding/chunked_encoding_h2.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/chunked_encoding/chunked_encoding_h2.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/chunked_encoding/chunked_encoding_h2.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,8 +22,7 @@ Test.SkipUnless( Condition.HasProgram("nghttp", "Nghttp need to be installed on system for this test to work"), - Condition.HasCurlFeature('http2') -) + Condition.HasCurlFeature('http2')) Test.ContinueOnFail = True Test.GetTcpPort("upstream_port") @@ -34,24 +33,19 @@ # add ssl materials like key, certificates for the server ts.addDefaultSSLFiles() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', -}) - -ts.Disk.remap_config.AddLine( - 'map /delay-chunked-response http://127.0.0.1:{0}'.format(Test.Variables.upstream_port) -) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(Test.Variables.upstream_port) -) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) + +ts.Disk.remap_config.AddLine('map /delay-chunked-response http://127.0.0.1:{0}'.format(Test.Variables.upstream_port)) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(Test.Variables.upstream_port)) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Using netcat as a cheap origin server in case 1 so we can insert a delay in sending back the response. # Replaced microserver for cases 2 and 3 as well because I was getting python exceptions when running diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/chunked_encoding/replays/malformed_chunked_header.replay.yaml trafficserver-9.2.4+ds/tests/gold_tests/chunked_encoding/replays/malformed_chunked_header.replay.yaml --- trafficserver-9.2.3+ds/tests/gold_tests/chunked_encoding/replays/malformed_chunked_header.replay.yaml 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/chunked_encoding/replays/malformed_chunked_header.replay.yaml 2024-04-03 15:38:30.000000000 +0000 @@ -42,7 +42,7 @@ - client-request: method: "POST" version: "1.1" - url: /large/chunk/size/header + url: /malformed/chunk/header2 headers: fields: - [ Host, example.com ] @@ -51,6 +51,26 @@ content: transfer: plain encoding: uri + # Chunk header sizes are in hex, so a size of `3z` is malformed. + data: 3z%0D%0Aabc%0D%0A0%0D%0A%0D%0A + + # The connection will be dropped and this response will not go out. + server-response: + status: 200 + +- transactions: + - client-request: + method: "POST" + version: "1.1" + url: /large/chunk/size/header + headers: + fields: + - [ Host, example.com ] + - [ Transfer-Encoding, chunked ] + - [ uuid, 3 ] + content: + transfer: plain + encoding: uri # Super large chunk header, larger than will fit in an int. data: 111111113%0D%0Aabc%0D%0A0%0D%0A%0D%0A @@ -70,7 +90,7 @@ headers: fields: - [ Host, example.com ] - - [ uuid, 3 ] + - [ uuid, 4 ] # The connection will be dropped and this response will not go out. server-response: @@ -89,11 +109,34 @@ - client-request: method: "GET" version: "1.1" + url: /response/malformed/chunk/size2 + headers: + fields: + - [ Host, example.com ] + - [ uuid, 5 ] + + # The connection will be dropped and this response will not go out. + server-response: + status: 200 + reason: OK + headers: + fields: + - [ Transfer-Encoding, chunked ] + content: + transfer: plain + encoding: uri + # Chunk header sizes are in hex, so a size of `1z` is malformed. + data: 1z%0D%0Adef%0D%0A0%0D%0A%0D%0A + +- transactions: + - client-request: + method: "GET" + version: "1.1" url: /response/large/chunk/size headers: fields: - [ Host, example.com ] - - [ uuid, 4 ] + - [ uuid, 6 ] # The connection will be dropped and this response will not go out. server-response: diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/command_argument/verify_global_plugin.test.py trafficserver-9.2.4+ds/tests/gold_tests/command_argument/verify_global_plugin.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/command_argument/verify_global_plugin.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/command_argument/verify_global_plugin.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -58,10 +58,7 @@ tr.Processes.Default.ReturnCode = 1 tr.Processes.Default.StartBefore(ts) tr.Processes.Default.Streams.stderr = Testers.ContainsExpression( - "ERROR: verifying a plugin requires a plugin SO file path argument", - "Should warn about the need for an SO file argument") - - + "ERROR: verifying a plugin requires a plugin SO file path argument", "Should warn about the need for an SO file argument") """ TEST: verify_global_plugin should complain if the argument doesn't reference a shared object file. @@ -75,20 +72,14 @@ tr.Processes.Default.ReturnCode = 1 tr.Processes.Default.StartBefore(ts) tr.Processes.Default.Streams.stderr = Testers.ContainsExpression( - "ERROR: .*No such file or directory", - "Should warn about the non-existent SO file argument") - - + "ERROR: .*No such file or directory", "Should warn about the non-existent SO file argument") """ TEST: verify_global_plugin should complain if the shared object file doesn't have the expected Plugin symbols. """ tr = Test.AddTestRun("Verify the requirement of our Plugin API.") ts = create_ts_process() -Test.PrepareTestPlugin( - os.path.join(Test.Variables.AtsTestPluginsDir, - 'missing_ts_plugin_init.so'), - ts) +Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'missing_ts_plugin_init.so'), ts) tr.Processes.Default.Env = ts.Env tr.Processes.Default.Command = \ "traffic_server -C 'verify_global_plugin {filename}'".format( @@ -96,23 +87,15 @@ tr.Processes.Default.ReturnCode = 1 tr.Processes.Default.StartBefore(ts) tr.Processes.Default.Streams.stderr = Testers.ContainsExpression( - "ERROR: .*unable to find TSPluginInit function in", - "Should warn about the need for the TSPluginInit symbol") -ts.Disk.diags_log.Content = Testers.ContainsExpression( - "ERROR", - "ERROR: .*unable to find TSPluginInit function in") - - + "ERROR: .*unable to find TSPluginInit function in", "Should warn about the need for the TSPluginInit symbol") +ts.Disk.diags_log.Content = Testers.ContainsExpression("ERROR", "ERROR: .*unable to find TSPluginInit function in") """ TEST: Verify that passing a remap plugin produces a warning because it doesn't have the global plugin symbols. """ tr = Test.AddTestRun("Verify a properly formed plugin works as expected.") ts = create_ts_process() -Test.PrepareTestPlugin( - os.path.join(Test.Variables.AtsTestPluginsDir, - 'conf_remap_stripped.so'), - ts) +Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'conf_remap_stripped.so'), ts) tr.Processes.Default.Env = ts.Env tr.Processes.Default.Command = \ "traffic_server -C 'verify_global_plugin {filename}'".format( @@ -120,23 +103,15 @@ tr.Processes.Default.ReturnCode = 1 tr.Processes.Default.StartBefore(ts) tr.Processes.Default.Streams.stderr = Testers.ContainsExpression( - "ERROR: .*unable to find TSPluginInit function in", - "Should warn about the need for the TSPluginInit symbol") -ts.Disk.diags_log.Content = Testers.ContainsExpression( - "ERROR", - "ERROR: .*unable to find TSPluginInit function in") - - + "ERROR: .*unable to find TSPluginInit function in", "Should warn about the need for the TSPluginInit symbol") +ts.Disk.diags_log.Content = Testers.ContainsExpression("ERROR", "ERROR: .*unable to find TSPluginInit function in") """ TEST: The happy case: a global plugin shared object file is passed as an argument that has the definition for the expected Plugin symbols. """ tr = Test.AddTestRun("Verify a properly formed plugin works as expected.") ts = create_ts_process() -Test.PrepareTestPlugin( - os.path.join(Test.Variables.AtsTestPluginsDir, - 'ssl_hook_test.so'), - ts) +Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts) tr.Processes.Default.Env = ts.Env tr.Processes.Default.Command = \ "traffic_server -C 'verify_global_plugin {filename}'".format( @@ -144,10 +119,7 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.StartBefore(ts) tr.Processes.Default.Streams.stderr = Testers.ContainsExpression( - "NOTE: verifying plugin '.*' Success", - "Verification should succeed") - - + "NOTE: verifying plugin '.*' Success", "Verification should succeed") """ TEST: This is a regression test for a shared object file that doesn't have all of the required symbols defined because of a malformed interaction between C @@ -166,8 +138,5 @@ tr.Processes.Default.ReturnCode = 1 tr.Processes.Default.StartBefore(ts) tr.Processes.Default.Streams.stderr = Testers.ContainsExpression( - "ERROR:.*unable to load", - "Should log failure to load shared object") -ts.Disk.diags_log.Content = Testers.ContainsExpression( - "ERROR", - "Should log failure to load shared object") + "ERROR:.*unable to load", "Should log failure to load shared object") +ts.Disk.diags_log.Content = Testers.ContainsExpression("ERROR", "Should log failure to load shared object") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/command_argument/verify_remap_plugin.test.py trafficserver-9.2.4+ds/tests/gold_tests/command_argument/verify_remap_plugin.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/command_argument/verify_remap_plugin.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/command_argument/verify_remap_plugin.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -58,10 +58,7 @@ tr.Processes.Default.ReturnCode = 1 tr.Processes.Default.StartBefore(ts) tr.Processes.Default.Streams.stderr = Testers.ContainsExpression( - "ERROR: verifying a plugin requires a plugin SO file path argument", - "Should warn about the need for an SO file argument") - - + "ERROR: verifying a plugin requires a plugin SO file path argument", "Should warn about the need for an SO file argument") """ TEST: verify_remap_plugin should complain if the argument doesn't reference a shared object file. @@ -75,20 +72,14 @@ tr.Processes.Default.ReturnCode = 1 tr.Processes.Default.StartBefore(ts) tr.Processes.Default.Streams.stderr = Testers.ContainsExpression( - "ERROR: .*No such file or directory", - "Should warn about the non-existent SO file argument") - - + "ERROR: .*No such file or directory", "Should warn about the non-existent SO file argument") """ TEST: verify_remap_plugin should complain if the shared object file doesn't have the expected Plugin symbols. """ tr = Test.AddTestRun("Verify the requirement of our Plugin API.") ts = create_ts_process() -Test.PrepareTestPlugin( - os.path.join(Test.Variables.AtsTestPluginsDir, - 'missing_ts_plugin_init.so'), - ts) +Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'missing_ts_plugin_init.so'), ts) tr.Processes.Default.Env = ts.Env tr.Processes.Default.Command = \ "traffic_server -C 'verify_remap_plugin {filename}'".format( @@ -96,23 +87,15 @@ tr.Processes.Default.ReturnCode = 1 tr.Processes.Default.StartBefore(ts) tr.Processes.Default.Streams.stderr = Testers.ContainsExpression( - "ERROR: .*missing required function TSRemapInit", - "Should warn about the need for the TSRemapInit symbol") -ts.Disk.diags_log.Content = Testers.ContainsExpression( - "ERROR", - "ERROR: .*missing required function TSRemapInit") - - + "ERROR: .*missing required function TSRemapInit", "Should warn about the need for the TSRemapInit symbol") +ts.Disk.diags_log.Content = Testers.ContainsExpression("ERROR", "ERROR: .*missing required function TSRemapInit") """ TEST: verify_remap_plugin should complain if the plugin has the global plugin symbols but not the remap ones. """ tr = Test.AddTestRun("Verify a global plugin argument produces warning.") ts = create_ts_process() -Test.PrepareTestPlugin( - os.path.join(Test.Variables.AtsTestPluginsDir, - 'ssl_hook_test.so'), - ts) +Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts) tr.Processes.Default.Env = ts.Env tr.Processes.Default.Command = \ "traffic_server -C 'verify_remap_plugin {filename}'".format( @@ -120,23 +103,15 @@ tr.Processes.Default.ReturnCode = 1 tr.Processes.Default.StartBefore(ts) tr.Processes.Default.Streams.stderr = Testers.ContainsExpression( - "ERROR: .*missing required function TSRemapInit", - "Should warn about the need for the TSRemapInit symbol") -ts.Disk.diags_log.Content = Testers.ContainsExpression( - "ERROR", - "ERROR: .*missing required function TSRemapInit") - - + "ERROR: .*missing required function TSRemapInit", "Should warn about the need for the TSRemapInit symbol") +ts.Disk.diags_log.Content = Testers.ContainsExpression("ERROR", "ERROR: .*missing required function TSRemapInit") """ TEST: The happy case: a remap plugin shared object file is passed as an argument that has the definition for the expected Plugin symbols. """ tr = Test.AddTestRun("Verify a properly formed plugin works as expected.") ts = create_ts_process() -Test.PrepareTestPlugin( - os.path.join(Test.Variables.AtsTestPluginsDir, - 'conf_remap_stripped.so'), - ts) +Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'conf_remap_stripped.so'), ts) tr.Processes.Default.Env = ts.Env tr.Processes.Default.Command = \ "traffic_server -C 'verify_remap_plugin {filename}'".format( @@ -144,5 +119,4 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.StartBefore(ts) tr.Processes.Default.Streams.stderr = Testers.ContainsExpression( - "NOTE: verifying plugin '.*' Success", - "Verification should succeed") + "NOTE: verifying plugin '.*' Success", "Verification should succeed") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/connect/connect.test.py trafficserver-9.2.4+ds/tests/gold_tests/connect/connect.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/connect/connect.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/connect/connect.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -43,12 +43,13 @@ def __setupTS(self): self.ts = Test.MakeATSProcess("ts", select_ports=True) - self.ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.http.server_ports': f"{self.ts.Variables.port}", - 'proxy.config.http.connect_ports': f"{self.httpbin.Variables.Port}", - }) + self.ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.http.server_ports': f"{self.ts.Variables.port}", + 'proxy.config.http.connect_ports': f"{self.httpbin.Variables.Port}", + }) self.ts.Disk.remap_config.AddLines([ f"map http://foo.com/ http://127.0.0.1:{self.httpbin.Variables.Port}/", @@ -63,8 +64,7 @@ logs: - filename: access format: common -'''.split("\n") - ) +'''.split("\n")) def __checkProcessBefore(self, tr): if self.state == self.State.RUNNING: @@ -96,8 +96,7 @@ tr = Test.AddTestRun() tr.Processes.Default.Command = ( os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + - os.path.join(self.ts.Variables.LOGDIR, 'access.log') - ) + os.path.join(self.ts.Variables.LOGDIR, 'access.log')) tr.Processes.Default.ReturnCode = 0 def run(self): diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/cont_schedule/schedule.test.py trafficserver-9.2.4+ds/tests/gold_tests/cont_schedule/schedule.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/cont_schedule/schedule.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/cont_schedule/schedule.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -16,7 +16,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - import os Test.Summary = 'Test TSContSchedule API' @@ -27,15 +26,16 @@ Test.testName = 'Test TSContSchedule API' -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 32, - 'proxy.config.accept_threads': 1, - 'proxy.config.task_threads': 2, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'TSContSchedule_test' -}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 32, + 'proxy.config.accept_threads': 1, + 'proxy.config.task_threads': 2, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'TSContSchedule_test' + }) # Load plugin Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'cont_schedule.so'), ts, 'thread') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/cont_schedule/schedule_on_pool.test.py trafficserver-9.2.4+ds/tests/gold_tests/cont_schedule/schedule_on_pool.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/cont_schedule/schedule_on_pool.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/cont_schedule/schedule_on_pool.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -16,7 +16,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - import os Test.Summary = 'Test TSContScheduleOnPool API' @@ -27,15 +26,16 @@ Test.testName = 'Test TSContScheduleOnPool API' -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 32, - 'proxy.config.accept_threads': 1, - 'proxy.config.task_threads': 2, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'TSContSchedule_test' -}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 32, + 'proxy.config.accept_threads': 1, + 'proxy.config.task_threads': 2, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'TSContSchedule_test' + }) # Load plugin Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'cont_schedule.so'), ts, 'pool') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/cont_schedule/schedule_on_thread.test.py trafficserver-9.2.4+ds/tests/gold_tests/cont_schedule/schedule_on_thread.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/cont_schedule/schedule_on_thread.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/cont_schedule/schedule_on_thread.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -16,7 +16,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - import os Test.Summary = 'Test TSContScheduleOnThread API' @@ -27,15 +26,16 @@ Test.testName = 'Test TSContScheduleOnThread API' -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 32, - 'proxy.config.accept_threads': 1, - 'proxy.config.task_threads': 2, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'TSContSchedule_test' -}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 32, + 'proxy.config.accept_threads': 1, + 'proxy.config.task_threads': 2, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'TSContSchedule_test' + }) # Load plugin Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'cont_schedule.so'), ts, 'thread') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/cont_schedule/thread_affinity.test.py trafficserver-9.2.4+ds/tests/gold_tests/cont_schedule/thread_affinity.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/cont_schedule/thread_affinity.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/cont_schedule/thread_affinity.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -16,7 +16,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - import os Test.Summary = 'Test TSContThreadAffinity APIs' @@ -27,15 +26,16 @@ Test.testName = 'Test TSContThreadAffinity APIs' -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 32, - 'proxy.config.accept_threads': 1, - 'proxy.config.task_threads': 2, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'TSContSchedule_test' -}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 32, + 'proxy.config.accept_threads': 1, + 'proxy.config.task_threads': 2, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'TSContSchedule_test' + }) # Load plugin Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'cont_schedule.so'), ts, 'affinity') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/continuations/double.test.py trafficserver-9.2.4+ds/tests/gold_tests/continuations/double.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/continuations/double.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/continuations/double.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -18,6 +18,7 @@ import os import subprocess + Test.Summary = ''' Test transactions and sessions for http1, making sure the two continuations catch the same number of hooks. ''' @@ -30,16 +31,17 @@ Test.testName = "" request_header = {"headers": "GET / HTTP/1.1\r\nHost: double_h2.test\r\n\r\n", "timestamp": "1469733493.993", "body": ""} # expected response from the origin server -response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length:0\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +response_header = { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length:0\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # add response to the server dictionary server.addResponse("sessionfile.log", request_header, response_header) # add port and remap rule -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) ts.Disk.records_config.update({ 'proxy.config.diags.debug.enabled': 1, @@ -47,8 +49,7 @@ }) # add plugin to assist with test metrics -Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, - 'continuations_verify.so'), ts) +Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'continuations_verify.so'), ts) comparator_command = ''' if test "`traffic_ctl metric get continuations_verify.{0}.close.1 | cut -d ' ' -f 2`" -eq "`traffic_ctl metric get continuations_verify.{0}.close.2 | cut -d ' ' -f 2`" ; then\ @@ -72,8 +73,7 @@ tr.Processes.Default.ReturnCode = Any(0, 2) # Execution order is: ts/server, ps(curl cmds), Default Process. -tr.Processes.Default.StartBefore( - server, ready=When.PortOpen(server.Variables.Port)) +tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port)) tr.Processes.Default.StartBefore(Test.Processes.ts) ts.StartAfter(*ps) server.StartAfter(*ps) @@ -93,8 +93,7 @@ "let N=N-1 ; " "done ; " "echo TIMEOUT ; " - "exit 1" -) + "exit 1") tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Env = ts.Env tr.StillRunningAfter = ts diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/continuations/double_h2.test.py trafficserver-9.2.4+ds/tests/gold_tests/continuations/double_h2.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/continuations/double_h2.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/continuations/double_h2.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -18,12 +18,11 @@ import os import subprocess + Test.Summary = ''' Test transactions and sessions for http2, making sure the two continuations catch the same number of hooks. ''' -Test.SkipUnless( - Condition.HasCurlFeature('http2') -) +Test.SkipUnless(Condition.HasCurlFeature('http2')) Test.ContinueOnFail = True # Define default ATS. Disable the cache to simplify the test. ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True, command="traffic_manager", enable_cache=False) @@ -33,8 +32,11 @@ Test.testName = "" request_header = {"headers": "GET / HTTP/1.1\r\nHost: double_h2.test\r\n\r\n", "timestamp": "1469733493.993", "body": ""} # expected response from the origin server -response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length:0\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +response_header = { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length:0\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # add response to the server dictionary server.addResponse("sessionfile.log", request_header, response_header) @@ -43,26 +45,22 @@ ts.addDefaultSSLFiles() # add port and remap rule -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) - -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'continuations_verify', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.cache.enable_read_while_writer': 0, - 'proxy.config.http2.max_concurrent_streams_in': 65535 -}) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'continuations_verify', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.cache.enable_read_while_writer': 0, + 'proxy.config.http2.max_concurrent_streams_in': 65535 + }) # add plugin to assist with test metrics -Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, - 'continuations_verify.so'), ts) +Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'continuations_verify.so'), ts) comparator_command = ''' if test "`traffic_ctl metric get continuations_verify.{0}.close.1 | cut -d ' ' -f 2`" -eq "`traffic_ctl metric get continuations_verify.{0}.close.2 | cut -d ' ' -f 2`" ; then\ @@ -87,8 +85,7 @@ tr.Processes.Default.ReturnCode = Any(0, 2) # Execution order is: ts/server, ps(curl cmds), Default Process. -tr.Processes.Default.StartBefore( - server, ready=When.PortOpen(server.Variables.Port)) +tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port)) # Adds a delay once the ts port is ready. This is because we cannot test the ts state. tr.Processes.Default.StartBefore(Test.Processes.ts) ts.StartAfter(*ps) @@ -108,6 +105,7 @@ def make_done_stat_ready(tsenv): + def done_stat_ready(process, hasRunFor, **kw): retval = subprocess.run( "traffic_ctl metric get continuations_verify.test.done", diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/continuations/openclose.test.py trafficserver-9.2.4+ds/tests/gold_tests/continuations/openclose.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/continuations/openclose.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/continuations/openclose.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -18,6 +18,7 @@ import os import subprocess + Test.Summary = ''' Test transactions and sessions, making sure they open and close in the proper order. ''' @@ -29,27 +30,26 @@ server2 = Test.MakeOriginServer("server2") Test.testName = "" -request_header = {"headers": "GET / HTTP/1.1\r\nHost: oc.test\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: oc.test\r\n\r\n", "timestamp": "1469733493.993", "body": ""} # expected response from the origin server -response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length:0\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +response_header = { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length:0\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} -Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, - 'ssntxnorder_verify.so'), ts) +Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssntxnorder_verify.so'), ts) # add response to the server dictionary server.addResponse("sessionfile.log", request_header, response_header) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'ssntxnorder_verify.*', - 'proxy.config.cache.enable_read_while_writer': 0 -}) - -ts.Disk.remap_config.AddLine( - 'map http://oc.test:{0} http://127.0.0.1:{1}'.format( - ts.Variables.port, server.Variables.Port) -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'ssntxnorder_verify.*', + 'proxy.config.cache.enable_read_while_writer': 0 + }) + +ts.Disk.remap_config.AddLine('map http://oc.test:{0} http://127.0.0.1:{1}'.format(ts.Variables.port, server.Variables.Port)) # Add connection close to ensure that the client connection closes promptly after completing the transaction cmd = 'curl -H "Connection: close" -vs -H "host:oc.test" http://127.0.0.1:{0}'.format(ts.Variables.port) @@ -64,8 +64,7 @@ tr.Processes.Default.ReturnCode = Any(0, 2) # Execution order is: ts/server, ps(curl cmds), Default Process. -tr.Processes.Default.StartBefore( - server, ready=When.PortOpen(server.Variables.Port)) +tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port)) tr.Processes.Default.StartBefore(Test.Processes.ts) ts.StartAfter(*ps) server.StartAfter(*ps) @@ -85,6 +84,7 @@ def make_done_stat_ready(tsenv): + def done_stat_ready(process, hasRunFor, **kw): retval = subprocess.run( "traffic_ctl metric get ssntxnorder_verify.test.done", @@ -124,10 +124,8 @@ tr.Processes.Default.Command = comparator_command.format('ssn') tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Env = ts.Env -tr.Processes.Default.Streams.stdout = Testers.ContainsExpression( - "yes", 'should verify contents') -tr.Processes.Default.Streams.stdout += Testers.ExcludesExpression( - "ssntxnorder_verify.ssn.start 0", 'should be nonzero') +tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("yes", 'should verify contents') +tr.Processes.Default.Streams.stdout += Testers.ExcludesExpression("ssntxnorder_verify.ssn.start 0", 'should be nonzero') tr.StillRunningAfter = ts tr.StillRunningAfter = server @@ -135,10 +133,8 @@ tr.Processes.Default.Command = comparator_command.format('txn') tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Env = ts.Env -tr.Processes.Default.Streams.stdout = Testers.ContainsExpression( - "yes", 'should verify contents') -tr.Processes.Default.Streams.stdout += Testers.ExcludesExpression( - "ssntxnorder_verify.txn.start 0", 'should be nonzero') +tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("yes", 'should verify contents') +tr.Processes.Default.Streams.stdout += Testers.ExcludesExpression("ssntxnorder_verify.txn.start 0", 'should be nonzero') # and we receive the same number of transactions as we asked it to make tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( "ssntxnorder_verify.txn.start {}".format(numberOfRequests), 'should be the number of transactions we made') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/continuations/openclose_h2.test.py trafficserver-9.2.4+ds/tests/gold_tests/continuations/openclose_h2.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/continuations/openclose_h2.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/continuations/openclose_h2.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -18,13 +18,12 @@ import os import subprocess + Test.Summary = ''' Test transactions and sessions over http2, making sure they open and close in the proper order. ''' -Test.SkipUnless( - Condition.HasCurlFeature('http2') -) +Test.SkipUnless(Condition.HasCurlFeature('http2')) # Define default ATS. Disable the cache to simplify the test. ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True, command="traffic_manager", enable_cache=False) @@ -33,36 +32,33 @@ server2 = Test.MakeOriginServer("server2") Test.testName = "" -request_header = {"headers": "GET / HTTP/1.1\r\nHost: oc.test\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: oc.test\r\n\r\n", "timestamp": "1469733493.993", "body": ""} # expected response from the origin server -response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length:0\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +response_header = { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length:0\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # add ssl materials like key, certificates for the server ts.addDefaultSSLFiles() -Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, - 'ssntxnorder_verify.so'), ts) +Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssntxnorder_verify.so'), ts) # add response to the server dictionary server.addResponse("sessionfile.log", request_header, response_header) -ts.Disk.records_config.update({ - 'proxy.config.http2.zombie_debug_timeout_in': 10, - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'ssntxnorder_verify', - 'proxy.config.cache.enable_read_while_writer': 0, - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), -}) - -ts.Disk.remap_config.AddLine( - 'map https://oc.test:{0} http://127.0.0.1:{1}'.format( - ts.Variables.ssl_port, server.Variables.Port) -) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.http2.zombie_debug_timeout_in': 10, + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'ssntxnorder_verify', + 'proxy.config.cache.enable_read_while_writer': 0, + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.remap_config.AddLine('map https://oc.test:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port)) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') cmd = 'curl -k --resolve oc.test:{0}:127.0.0.1 --http2 https://oc.test:{0}'.format(ts.Variables.ssl_port) numberOfRequests = 100 @@ -76,8 +72,7 @@ tr.Processes.Default.ReturnCode = Any(0, 2) # Execution order is: ts/server, ps(curl cmds), Default Process. -tr.Processes.Default.StartBefore( - server, ready=When.PortOpen(server.Variables.Port)) +tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port)) tr.Processes.Default.StartBefore(Test.Processes.ts) # Don't know why we need both the start before and the start after ts.StartAfter(*ps) @@ -98,6 +93,7 @@ def make_done_stat_ready(tsenv): + def done_stat_ready(process, hasRunFor, **kw): retval = subprocess.run( "traffic_ctl metric get ssntxnorder_verify.test.done", @@ -137,10 +133,8 @@ tr.Processes.Default.Command = comparator_command.format('ssn') tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Env = ts.Env -tr.Processes.Default.Streams.stdout = Testers.ContainsExpression( - "yes", 'should verify contents') -tr.Processes.Default.Streams.stdout += Testers.ExcludesExpression( - "ssntxnorder_verify.ssn.start 0", 'should be nonzero') +tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("yes", 'should verify contents') +tr.Processes.Default.Streams.stdout += Testers.ExcludesExpression("ssntxnorder_verify.ssn.start 0", 'should be nonzero') tr.StillRunningAfter = ts tr.StillRunningAfter = server @@ -148,10 +142,8 @@ tr.Processes.Default.Command = comparator_command.format('txn') tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Env = ts.Env -tr.Processes.Default.Streams.stdout = Testers.ContainsExpression( - "yes", 'should verify contents') -tr.Processes.Default.Streams.stdout += Testers.ExcludesExpression( - "ssntxnorder_verify.txn.start 0", 'should be nonzero') +tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("yes", 'should verify contents') +tr.Processes.Default.Streams.stdout += Testers.ExcludesExpression("ssntxnorder_verify.txn.start 0", 'should be nonzero') # and we receive the same number of transactions as we asked it to make tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( "ssntxnorder_verify.txn.start {}".format(numberOfRequests), 'should be the number of transactions we made') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/continuations/session_id.test.py trafficserver-9.2.4+ds/tests/gold_tests/continuations/session_id.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/continuations/session_id.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/continuations/session_id.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,20 +17,21 @@ # limitations under the License. import os + Test.Summary = ''' Verify session ID properties. ''' -Test.SkipUnless( - Condition.HasCurlFeature('http2') -) +Test.SkipUnless(Condition.HasCurlFeature('http2')) # Configure the server. server = Test.MakeOriginServer("server") -request_header = {"headers": "GET / HTTP/1.1\r\nHost: example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} -response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length:0\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +response_header = { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length:0\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} server.addResponse("sessionfile.log", request_header, response_header) # Configure ATS. Disable the cache to simplify the test. @@ -38,24 +39,21 @@ ts.addDefaultSSLFiles() -Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsBuildGoldTestsDir, - 'continuations', 'plugins', '.libs', 'session_id_verify.so'), ts) +Test.PrepareTestPlugin( + os.path.join(Test.Variables.AtsBuildGoldTestsDir, 'continuations', 'plugins', '.libs', 'session_id_verify.so'), ts) + +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'session_id_verify', + 'proxy.config.cache.enable_read_while_writer': 0, + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'session_id_verify', - 'proxy.config.cache.enable_read_while_writer': 0, - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), -}) - -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # # Run some HTTP/1 traffic. @@ -70,8 +68,7 @@ ps = tr.SpawnCommands(cmdstr=cmd, count=numberOfRequests, retcode=Any(0, 2)) tr.Processes.Default.Env = ts.Env tr.Processes.Default.ReturnCode = Any(0, 2) -tr.Processes.Default.StartBefore( - server, ready=When.PortOpen(server.Variables.Port)) +tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port)) tr.Processes.Default.StartBefore(Test.Processes.ts) ts.StartAfter(*ps) server.StartAfter(*ps) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/dns/dns_down_nameserver.test.py trafficserver-9.2.4+ds/tests/gold_tests/dns/dns_down_nameserver.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/dns/dns_down_nameserver.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/dns/dns_down_nameserver.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -19,7 +19,6 @@ from ports import get_port - # This value tracks DNS_PRIMARY_RETRY_PERIOD in P_DNSProcessor.h. DNS_PRIMARY_RETRY_PERIOD = 5 @@ -67,19 +66,21 @@ # the responses come out of the cache without going to the origin. self._ts = Test.MakeATSProcess("ts", enable_cache=False) - self._ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'hostdb|dns', - 'proxy.config.dns.nameservers': f'127.0.0.1:{self._dns_port}', - 'proxy.config.dns.resolv_conf': 'NULL' - }) + self._ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'hostdb|dns', + 'proxy.config.dns.nameservers': f'127.0.0.1:{self._dns_port}', + 'proxy.config.dns.resolv_conf': 'NULL' + }) # Cause a name resolution for each, unique path. - self._ts.Disk.remap_config.AddLines([ - f'map /first/host http://first.host.com:{self._server.Variables.http_port}/', - f'map /second/host http://second.host.com:{self._server.Variables.http_port}/', - f'map /third/host http://third.host.com:{self._server.Variables.http_port}/', - ]) + self._ts.Disk.remap_config.AddLines( + [ + f'map /first/host http://first.host.com:{self._server.Variables.http_port}/', + f'map /second/host http://second.host.com:{self._server.Variables.http_port}/', + f'map /third/host http://third.host.com:{self._server.Variables.http_port}/', + ]) def _run_transaction(self, start_dns: bool, keyname: str): """Run a transaction with the name server reachable. @@ -99,10 +100,7 @@ self._client_counter += 1 if start_dns: - dns = tr.MakeDNServer( - f'dns{self._dns_counter}', - default='127.0.0.1', - port=self._dns_port) + dns = tr.MakeDNServer(f'dns{self._dns_counter}', default='127.0.0.1', port=self._dns_port) self._dns_counter += 1 tr.Processes.Default.StartBefore(dns) @@ -113,8 +111,7 @@ # Verify that the client tried to send the transaction. tr.Processes.Default.Streams.All += Testers.ContainsExpression( - f'uuid: {keyname}', - f'The client should have sent a transaction with uuid {keyname}') + f'uuid: {keyname}', f'The client should have sent a transaction with uuid {keyname}') # The client will report an error if ATS could not complete the # transaction due to DNS resolution issues. @@ -126,10 +123,7 @@ tr = Test.AddTestRun() if start_dns: - dns = tr.MakeDNServer( - f'dns{self._dns_counter}', - default='127.0.0.1', - port=self._dns_port) + dns = tr.MakeDNServer(f'dns{self._dns_counter}', default='127.0.0.1', port=self._dns_port) self._dns_counter += 1 tr.Processes.Default.StartBefore(dns) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/dns/dns_host_down.test.py trafficserver-9.2.4+ds/tests/gold_tests/dns/dns_host_down.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/dns/dns_host_down.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/dns/dns_host_down.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -41,25 +41,24 @@ """Configure Traffic Server.""" self._ts = Test.MakeATSProcess("ts", enable_cache=False) - self._ts.Disk.remap_config.AddLine( - f"map / http://resolve.this.com:{self._server.Variables.http_port}/" - ) - - self._ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'hostdb|dns|http|socket', - 'proxy.config.http.connect_attempts_max_retries': 0, - 'proxy.config.http.connect_attempts_rr_retries': 0, - 'proxy.config.hostdb.fail.timeout': 10, - 'proxy.config.dns.resolv_conf': 'NULL', - 'proxy.config.hostdb.ttl_mode': 1, - 'proxy.config.hostdb.timeout': 2, - 'proxy.config.hostdb.lookup_timeout': 2, - 'proxy.config.http.transaction_no_activity_timeout_in': 2, - 'proxy.config.http.connect_attempts_timeout': 2, - 'proxy.config.hostdb.host_file.interval': 1, - 'proxy.config.hostdb.host_file.path': os.path.join(Test.TestDirectory, "hosts_file"), - }) + self._ts.Disk.remap_config.AddLine(f"map / http://resolve.this.com:{self._server.Variables.http_port}/") + + self._ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'hostdb|dns|http|socket', + 'proxy.config.http.connect_attempts_max_retries': 0, + 'proxy.config.http.connect_attempts_rr_retries': 0, + 'proxy.config.hostdb.fail.timeout': 10, + 'proxy.config.dns.resolv_conf': 'NULL', + 'proxy.config.hostdb.ttl_mode': 1, + 'proxy.config.hostdb.timeout': 2, + 'proxy.config.hostdb.lookup_timeout': 2, + 'proxy.config.http.transaction_no_activity_timeout_in': 2, + 'proxy.config.http.connect_attempts_timeout': 2, + 'proxy.config.hostdb.host_file.interval': 1, + 'proxy.config.hostdb.host_file.path': os.path.join(Test.TestDirectory, "hosts_file"), + }) # Even when the origin server is down, SM will return a hit-fresh domain from HostDB. # After request has failed, SM should mark the IP as down @@ -70,10 +69,7 @@ tr.Processes.Default.StartBefore(self._ts) tr.AddVerifierClientProcess( - "client-1", - DownCachedOriginServerTest.replay_file, - http_ports=[self._ts.Variables.port], - other_args='--keys 1') + "client-1", DownCachedOriginServerTest.replay_file, http_ports=[self._ts.Variables.port], other_args='--keys 1') # After host has been marked down from previous test, HostDB should not return # the host as available and DNS lookup should fail. @@ -81,18 +77,14 @@ tr = Test.AddTestRun() tr.AddVerifierClientProcess( - "client-2", - DownCachedOriginServerTest.replay_file, - http_ports=[self._ts.Variables.port], - other_args='--keys 2') + "client-2", DownCachedOriginServerTest.replay_file, http_ports=[self._ts.Variables.port], other_args='--keys 2') # Verify error log marking host down exists def _test_error_log(self): tr = Test.AddTestRun() tr.Processes.Default.Command = ( os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + - os.path.join(self._ts.Variables.LOGDIR, 'error.log') - ) + os.path.join(self._ts.Variables.LOGDIR, 'error.log')) self._ts.Disk.error_log.Content = Testers.ContainsExpression("/dns/mark/down' marking down", "host should be marked down") self._ts.Disk.error_log.Content = Testers.ContainsExpression( diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/dns/dns_ttl.test.py trafficserver-9.2.4+ds/tests/gold_tests/dns/dns_ttl.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/dns/dns_ttl.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/dns/dns_ttl.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -63,40 +63,35 @@ return dns def setupOriginServer(self): - self.server = Test.MakeVerifierServerProcess( - f"server-{self.server_process_counter}", TtlDnsTest.single_transaction_replay) + self.server = Test.MakeVerifierServerProcess(f"server-{self.server_process_counter}", TtlDnsTest.single_transaction_replay) def setupTS(self): - self.ts = Test.MakeATSProcess( - f"ts-{self.server_process_counter}", select_ports=True, enable_cache=False) + self.ts = Test.MakeATSProcess(f"ts-{self.server_process_counter}", select_ports=True, enable_cache=False) self.dns_port = ports.get_port(self.ts, 'dns_port') - self.ts.Disk.records_config.update({ - "proxy.config.diags.debug.enabled": 1, - "proxy.config.diags.debug.tags": "dns", - - 'proxy.config.dns.nameservers': f'127.0.0.1:{self.dns_port}', - 'proxy.config.dns.resolv_conf': 'NULL', - - # Configure ATS to treat each resolved name to have a 1 second - # time to live. - "proxy.config.hostdb.ttl_mode": 1, - "proxy.config.hostdb.timeout": self.dnsTTL, - - # MicroDNS will be down for the second transaction. Have ATS give - # up trying to talk to it after one second. - "proxy.config.hostdb.lookup_timeout": self.queryTimeout, - }) + self.ts.Disk.records_config.update( + { + "proxy.config.diags.debug.enabled": 1, + "proxy.config.diags.debug.tags": "dns", + 'proxy.config.dns.nameservers': f'127.0.0.1:{self.dns_port}', + 'proxy.config.dns.resolv_conf': 'NULL', + + # Configure ATS to treat each resolved name to have a 1 second + # time to live. + "proxy.config.hostdb.ttl_mode": 1, + "proxy.config.hostdb.timeout": self.dnsTTL, + + # MicroDNS will be down for the second transaction. Have ATS give + # up trying to talk to it after one second. + "proxy.config.hostdb.lookup_timeout": self.queryTimeout, + }) if self.configure_serve_stale: if self.exceed_serve_stale: stale_timeout = 1 else: stale_timeout = 300 - self.ts.Disk.records_config.update({ - "proxy.config.hostdb.serve_stale_for": stale_timeout - }) - self.ts.Disk.remap_config.AddLine( - f"map / http://resolve.this.com:{self.server.Variables.http_port}/") + self.ts.Disk.records_config.update({"proxy.config.hostdb.serve_stale_for": stale_timeout}) + self.ts.Disk.remap_config.AddLine(f"map / http://resolve.this.com:{self.server.Variables.http_port}/") def testRunWithDNS(self): tr = Test.AddTestRun() @@ -106,9 +101,7 @@ dns = self.addDNSServerToTestRun(tr) process_number = TtlDnsTest.get_unique_process_counter() tr.AddVerifierClientProcess( - f"client-{process_number}", - TtlDnsTest.single_transaction_replay, - http_ports=[self.ts.Variables.port]) + f"client-{process_number}", TtlDnsTest.single_transaction_replay, http_ports=[self.ts.Variables.port]) tr.Processes.Default.StartBefore(dns) tr.Processes.Default.StartBefore(self.server) @@ -135,9 +128,7 @@ # resolved. replay_file = TtlDnsTest.server_error_replay process_number = TtlDnsTest.get_unique_process_counter() - tr.AddVerifierClientProcess( - f"client-{process_number}", - replay_file, http_ports=[self.ts.Variables.port]) + tr.AddVerifierClientProcess(f"client-{process_number}", replay_file, http_ports=[self.ts.Variables.port]) tr.StillRunningAfter = self.server tr.StillRunningAfter = self.ts diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/dns/splitdns.test.py trafficserver-9.2.4+ds/tests/gold_tests/dns/splitdns.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/dns/splitdns.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/dns/splitdns.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -20,6 +20,7 @@ class SplitDNSTest: + def __init__(self): self.setupDNSServer() self.setupOriginServer() @@ -31,24 +32,21 @@ def setupOriginServer(self): self.origin_server = Test.MakeOriginServer("origin_server") - self.origin_server.addResponse("sessionlog.json", - {"headers": "GET / HTTP/1.1\r\nHost: localhost\r\n\r\n"}, - {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\n\r\n"}) + self.origin_server.addResponse( + "sessionlog.json", {"headers": "GET / HTTP/1.1\r\nHost: localhost\r\n\r\n"}, + {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\n\r\n"}) def setupTS(self): - self.ts = Test.MakeATSProcess( - "ts", select_ports=True, enable_cache=False) - self.ts.Disk.records_config.update({ - "proxy.config.dns.splitDNS.enabled": 1, - "proxy.config.diags.debug.enabled": 1, - "proxy.config.diags.debug.tags": "dns|splitdns", - }) - self.ts.Disk.splitdns_config.AddLine( - f"dest_domain=foo.ts.a.o named=127.0.0.1:{self.dns.Variables.Port}") - self.ts.Disk.remap_config.AddLine( - f"map /foo/ http://foo.ts.a.o:{self.origin_server.Variables.Port}/") - self.ts.Disk.remap_config.AddLine( - f"map /bar/ http://127.0.0.1:{self.origin_server.Variables.Port}/") + self.ts = Test.MakeATSProcess("ts", select_ports=True, enable_cache=False) + self.ts.Disk.records_config.update( + { + "proxy.config.dns.splitDNS.enabled": 1, + "proxy.config.diags.debug.enabled": 1, + "proxy.config.diags.debug.tags": "dns|splitdns", + }) + self.ts.Disk.splitdns_config.AddLine(f"dest_domain=foo.ts.a.o named=127.0.0.1:{self.dns.Variables.Port}") + self.ts.Disk.remap_config.AddLine(f"map /foo/ http://foo.ts.a.o:{self.origin_server.Variables.Port}/") + self.ts.Disk.remap_config.AddLine(f"map /bar/ http://127.0.0.1:{self.origin_server.Variables.Port}/") def addTestCase0(self): tr = Test.AddTestRun() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/forward_proxy/forward_proxy.test.py trafficserver-9.2.4+ds/tests/gold_tests/forward_proxy/forward_proxy.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/forward_proxy/forward_proxy.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/forward_proxy/forward_proxy.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -47,12 +47,10 @@ ForwardProxyTest._server_counter += 1 if self._scheme_proto_mismatch_policy in (2, None): self.server.Streams.All = Testers.ExcludesExpression( - 'Received an HTTP/1 request with key 1', - 'Verify that the server did not receive the request.') + 'Received an HTTP/1 request with key 1', 'Verify that the server did not receive the request.') else: self.server.Streams.All = Testers.ContainsExpression( - 'Received an HTTP/1 request with key 1', - 'Verify that the server received the request.') + 'Received an HTTP/1 request with key 1', 'Verify that the server received the request.') def setupTS(self): """Configure the Traffic Server process.""" @@ -61,22 +59,22 @@ ForwardProxyTest._ts_counter += 1 self.ts.addDefaultSSLFiles() self.ts.Disk.ssl_multicert_config.AddLine("dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key") - self.ts.Disk.remap_config.AddLine( - f"map / http://127.0.0.1:{self.server.Variables.http_port}/") + self.ts.Disk.remap_config.AddLine(f"map / http://127.0.0.1:{self.server.Variables.http_port}/") - self.ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': self.ts.Variables.SSLDir, - 'proxy.config.ssl.server.private_key.path': self.ts.Variables.SSLDir, - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', - - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': "http", - }) + self.ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': self.ts.Variables.SSLDir, + 'proxy.config.ssl.server.private_key.path': self.ts.Variables.SSLDir, + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': "http", + }) if self._scheme_proto_mismatch_policy is not None: - self.ts.Disk.records_config.update({ - 'proxy.config.ssl.client.scheme_proto_mismatch_policy': self._scheme_proto_mismatch_policy, - }) + self.ts.Disk.records_config.update( + { + 'proxy.config.ssl.client.scheme_proto_mismatch_policy': self._scheme_proto_mismatch_policy, + }) def addProxyHttpsToHttpCase(self): """Test ATS as an HTTPS forward proxy behind an HTTP server.""" @@ -93,12 +91,10 @@ if self._scheme_proto_mismatch_policy in (2, None): tr.Processes.Default.Streams.All = Testers.ContainsExpression( - '< HTTP/1.1 400 Invalid HTTP Request', - 'Verify that the request was rejected.') + '< HTTP/1.1 400 Invalid HTTP Request', 'Verify that the request was rejected.') else: tr.Processes.Default.Streams.All = Testers.ContainsExpression( - '< HTTP/1.1 200 OK', - 'Verify that curl received a 200 OK response.') + '< HTTP/1.1 200 OK', 'Verify that curl received a 200 OK response.') def run(self): """Configure the TestRun instances for this set of tests.""" diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/h2/h2active_timeout.py trafficserver-9.2.4+ds/tests/gold_tests/h2/h2active_timeout.py --- trafficserver-9.2.3+ds/tests/gold_tests/h2/h2active_timeout.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/h2/h2active_timeout.py 2024-04-03 15:38:30.000000000 +0000 @@ -1,5 +1,4 @@ #!/usr/bin/env python3 - ''' An h2 client built to trigger active timeout. ''' @@ -132,14 +131,9 @@ def main(): parser = argparse.ArgumentParser() - parser.add_argument("port", - type=int, - help="Port to use") - parser.add_argument("path", - help="The path to request") - parser.add_argument("delay", - type=int, - help="The number of seconds to delay betwen requests in a stream") + parser.add_argument("port", type=int, help="Port to use") + parser.add_argument("path", help="The path to request") + parser.add_argument("delay", type=int, help="The number of seconds to delay betwen requests in a stream") args = parser.parse_args() makerequest(args.port, args.path, args.delay) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/h2/h2client.py trafficserver-9.2.4+ds/tests/gold_tests/h2/h2client.py --- trafficserver-9.2.3+ds/tests/gold_tests/h2/h2client.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/h2/h2client.py 2024-04-03 15:38:30.000000000 +0000 @@ -1,5 +1,4 @@ #!/usr/bin/env python3 - ''' A basic, ad-hoc HTTP/2 client. ''' @@ -140,21 +139,11 @@ def main(): parser = argparse.ArgumentParser() - parser.add_argument("port", - type=int, - help="Port to use") - parser.add_argument("path", - help="The path to request") - parser.add_argument("--repeat", - type=int, - default=1, - help="Number of times to repeat the request") - parser.add_argument("--verify_default_body", - action="store_true", - help="Verify the default body content: abbb...") - parser.add_argument("--print_body", - action="store_true", - help="Print the response body") + parser.add_argument("port", type=int, help="Port to use") + parser.add_argument("path", help="The path to request") + parser.add_argument("--repeat", type=int, default=1, help="Number of times to repeat the request") + parser.add_argument("--verify_default_body", action="store_true", help="Verify the default body content: abbb...") + parser.add_argument("--print_body", action="store_true", help="Print the response body") args = parser.parse_args() for i in range(args.repeat): diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/h2/h2disable.test.py trafficserver-9.2.4+ds/tests/gold_tests/h2/h2disable.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/h2/h2disable.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/h2/h2disable.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -20,9 +20,7 @@ Test disabling H2 on a per domain basis ''' -Test.SkipUnless( - Condition.HasCurlFeature('http2') -) +Test.SkipUnless(Condition.HasCurlFeature('http2')) # Define default ATS ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True) @@ -35,23 +33,21 @@ # add ssl materials like key, certificates for the server ts.addDefaultSSLFiles() -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'http|ssl', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.accept_threads': 1 -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'http|ssl', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.accept_threads': 1 + }) ts.Disk.sni_yaml.AddLines([ 'sni:', diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/h2/h2disable_no_accept_threads.test.py trafficserver-9.2.4+ds/tests/gold_tests/h2/h2disable_no_accept_threads.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/h2/h2disable_no_accept_threads.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/h2/h2disable_no_accept_threads.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -20,9 +20,7 @@ Test disabling H2 on a per domain basis ''' -Test.SkipUnless( - Condition.HasCurlFeature('http2') -) +Test.SkipUnless(Condition.HasCurlFeature('http2')) # Define default ATS ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True) @@ -35,23 +33,21 @@ # add ssl materials like key, certificates for the server ts.addDefaultSSLFiles() -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'http|ssl', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.accept_threads': 0 -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'http|ssl', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.accept_threads': 0 + }) ts.Disk.sni_yaml.AddLines([ 'sni:', diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/h2/h2enable.test.py trafficserver-9.2.4+ds/tests/gold_tests/h2/h2enable.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/h2/h2enable.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/h2/h2enable.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -20,9 +20,7 @@ Test enabling H2 on a per domain basis ''' -Test.SkipUnless( - Condition.HasCurlFeature('http2') -) +Test.SkipUnless(Condition.HasCurlFeature('http2')) # Define default ATS ts = Test.MakeATSProcess("ts", enable_tls=True) @@ -35,23 +33,21 @@ # add ssl materials like key, certificates for the server ts.addDefaultSSLFiles() -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Set up port 4444 with HTTP1 only, no HTTP/2 -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'http|ssl', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.accept_threads': 1, - 'proxy.config.http.server_ports': '{0}:ssl:proto=http {1}'.format(ts.Variables.ssl_port, ts.Variables.port) -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'http|ssl', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.accept_threads': 1, + 'proxy.config.http.server_ports': '{0}:ssl:proto=http {1}'.format(ts.Variables.ssl_port, ts.Variables.port) + }) ts.Disk.sni_yaml.AddLines([ 'sni:', diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/h2/h2enable_no_accept_threads.test.py trafficserver-9.2.4+ds/tests/gold_tests/h2/h2enable_no_accept_threads.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/h2/h2enable_no_accept_threads.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/h2/h2enable_no_accept_threads.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -20,9 +20,7 @@ Test enabling H2 on a per domain basis ''' -Test.SkipUnless( - Condition.HasCurlFeature('http2') -) +Test.SkipUnless(Condition.HasCurlFeature('http2')) # Define default ATS ts = Test.MakeATSProcess("ts", enable_tls=True) @@ -35,23 +33,21 @@ # add ssl materials like key, certificates for the server ts.addDefaultSSLFiles() -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Set up port 4444 with HTTP1 only, no HTTP/2 -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'http|ssl', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.http.server_ports': '{0}:ssl:proto=http {1}'.format(ts.Variables.ssl_port, ts.Variables.port), - 'proxy.config.accept_threads': 0 -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'http|ssl', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.http.server_ports': '{0}:ssl:proto=http {1}'.format(ts.Variables.ssl_port, ts.Variables.port), + 'proxy.config.accept_threads': 0 + }) ts.Disk.sni_yaml.AddLines([ 'sni:', diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/h2/h2spec.test.py trafficserver-9.2.4+ds/tests/gold_tests/h2/h2spec.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/h2/h2spec.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/h2/h2spec.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -21,9 +21,7 @@ Test HTTP/2 with httpspec ''' -Test.SkipUnless( - Condition.HasProgram("h2spec", "h2spec need to be installed on system for this test to work"), -) +Test.SkipUnless(Condition.HasProgram("h2spec", "h2spec need to be installed on system for this test to work"),) Test.ContinueOnFail = True # ---- @@ -39,20 +37,17 @@ # add ssl materials like key, certificates for the server ts.addDefaultSSLFiles() -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(httpbin.Variables.Port) -) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) -ts.Disk.records_config.update({ - 'proxy.config.http.insert_request_via_str': 1, - 'proxy.config.http.insert_response_via_str': 1, - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'http', -}) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(httpbin.Variables.Port)) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') +ts.Disk.records_config.update( + { + 'proxy.config.http.insert_request_via_str': 1, + 'proxy.config.http.insert_response_via_str': 1, + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'http', + }) # ---- # Test Cases diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/h2/http2.test.py trafficserver-9.2.4+ds/tests/gold_tests/h2/http2.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/h2/http2.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/h2/http2.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -23,9 +23,7 @@ Test a basic remap of a http/2 connection ''' -Test.SkipUnless( - Condition.HasCurlFeature('http2') -) +Test.SkipUnless(Condition.HasCurlFeature('http2')) Test.ContinueOnFail = True # ---- @@ -34,75 +32,97 @@ server = Test.MakeOriginServer("server") # For Test Case 1 & 5 - / -server.addResponse("sessionlog.json", - {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""}, - {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""}) +server.addResponse( + "sessionlog.json", { + "headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + }, { + "headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + }) # For Test Case 2 - /bigfile # Add info for the large H2 download test -server.addResponse("sessionlog.json", - {"headers": "GET /bigfile HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""}, - {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\nCache-Control: max-age=3600\r\nContent-Length: 191414\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""}) +server.addResponse( + "sessionlog.json", { + "headers": "GET /bigfile HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + }, { + "headers": + "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\nCache-Control: max-age=3600\r\nContent-Length: 191414\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + }) # For Test Case 3 - /test2 -server.addResponse("sessionlog.json", - {"headers": "GET /test2 HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""}, - {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nTransfer-Encoding: chunked\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""}) +server.addResponse( + "sessionlog.json", { + "headers": "GET /test2 HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + }, { + "headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nTransfer-Encoding: chunked\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + }) # For Test Case 6 - /postchunked post_body = "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890" -server.addResponse("sessionlog.json", - {"headers": "POST /postchunked HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", - "body": post_body}, - {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\nContent-Length: 10\r\n\r\n", - "timestamp": "1469733493.993", - "body": "0123456789"}) +server.addResponse( + "sessionlog.json", { + "headers": "POST /postchunked HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": post_body + }, { + "headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\nContent-Length: 10\r\n\r\n", + "timestamp": "1469733493.993", + "body": "0123456789" + }) # For Test Case 7 - /bigpostchunked # Make a post body that will be split across at least two frames big_post_body = "0123456789" * 131070 -server.addResponse("sessionlog.json", - {"headers": "POST /bigpostchunked HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", - "body": big_post_body}, - {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\nContent-Length: 10\r\n\r\n", - "timestamp": "1469733493.993", - "body": "0123456789"}) +server.addResponse( + "sessionlog.json", { + "headers": "POST /bigpostchunked HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": big_post_body + }, { + "headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\nContent-Length: 10\r\n\r\n", + "timestamp": "1469733493.993", + "body": "0123456789" + }) big_post_body_file = open(os.path.join(Test.RunDirectory, "big_post_body"), "w") big_post_body_file.write(big_post_body) big_post_body_file.close() # For Test Case 8 - /huge_resp_hdrs -server.addResponse("sessionlog.json", - {"headers": "GET /huge_resp_hdrs HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""}, - {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\nContent-Length: 6\r\n\r\n", - "timestamp": "1469733493.993", - "body": "200 OK"}) +server.addResponse( + "sessionlog.json", { + "headers": "GET /huge_resp_hdrs HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + }, { + "headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\nContent-Length: 6\r\n\r\n", + "timestamp": "1469733493.993", + "body": "200 OK" + }) # For Test Case 9 - /status/204 -server.addResponse("sessionlog.json", - {"headers": "GET /status/204 HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""}, - {"headers": "HTTP/1.1 204 No Content\r\nServer: microserver\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""}) +server.addResponse( + "sessionlog.json", { + "headers": "GET /status/204 HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + }, { + "headers": "HTTP/1.1 204 No Content\r\nServer: microserver\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + }) # ---- # Setup ATS @@ -115,24 +135,20 @@ ts.Setup.CopyAs('rules/huge_resp_hdrs.conf', Test.RunDirectory) ts.Disk.remap_config.AddLine( 'map /huge_resp_hdrs http://127.0.0.1:{0}/huge_resp_hdrs @plugin=header_rewrite.so @pparam={1}/huge_resp_hdrs.conf '.format( - server.Variables.Port, Test.RunDirectory) -) + server.Variables.Port, Test.RunDirectory)) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.http2.active_timeout_in': 3, - 'proxy.config.http2.max_concurrent_streams_in': 65535, -}) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.http2.active_timeout_in': 3, + 'proxy.config.http2.max_concurrent_streams_in': 65535, + }) ts.Setup.CopyAs('h2client.py', Test.RunDirectory) ts.Setup.CopyAs('h2active_timeout.py', Test.RunDirectory) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/h2/http2_flow_control.test.py trafficserver-9.2.4+ds/tests/gold_tests/h2/http2_flow_control.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/h2/http2_flow_control.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/h2/http2_flow_control.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -20,7 +20,6 @@ import re from typing import List, Optional - Test.Summary = __doc__ @@ -65,13 +64,11 @@ self._initial_window_size = initial_window_size self._expected_initial_window_size = ( - initial_window_size if initial_window_size is not None - else self._default_initial_window_size) + initial_window_size if initial_window_size is not None else self._default_initial_window_size) self._max_concurrent_streams_in = max_concurrent_streams_in self._expected_max_concurrent_streams_in = ( - max_concurrent_streams_in if max_concurrent_streams_in is not None - else self._default_max_concurrent_streams_in) + max_concurrent_streams_in if max_concurrent_streams_in is not None else self._default_max_concurrent_streams_in) self._verify_window_update_frames = verify_window_update_frames @@ -87,30 +84,25 @@ def _configure_server(self): """Configure the test server.""" - server = Test.MakeVerifierServerProcess( - f'server-{Http2FlowControlTest._server_counter}', - self._replay_file) + server = Test.MakeVerifierServerProcess(f'server-{Http2FlowControlTest._server_counter}', self._replay_file) Http2FlowControlTest._server_counter += 1 return server def _configure_trafficserver(self): """Configure a Traffic Server process.""" - ts = Test.MakeATSProcess( - f'ts-{Http2FlowControlTest._ts_counter}', - enable_tls=True, - enable_cache=False) + ts = Test.MakeATSProcess(f'ts-{Http2FlowControlTest._ts_counter}', enable_tls=True, enable_cache=False) Http2FlowControlTest._ts_counter += 1 ts.addDefaultSSLFiles() - ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': f'{ts.Variables.SSLDir}', - 'proxy.config.ssl.server.private_key.path': f'{ts.Variables.SSLDir}', - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', - 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(self._dns.Variables.Port), - - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - }) + ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': f'{ts.Variables.SSLDir}', + 'proxy.config.ssl.server.private_key.path': f'{ts.Variables.SSLDir}', + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(self._dns.Variables.Port), + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + }) if self._initial_window_size is not None: ts.Disk.records_config.update({ @@ -122,13 +114,9 @@ 'proxy.config.http2.max_concurrent_streams_in': self._max_concurrent_streams_in, }) - ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' - ) - - ts.Disk.remap_config.AddLine( - f'map / http://127.0.0.1:{self._server.Variables.http_port}' - ) + ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + + ts.Disk.remap_config.AddLine(f'map / http://127.0.0.1:{self._server.Variables.http_port}') return ts @@ -138,9 +126,7 @@ :param tr: The TestRun to associate the client with. """ tr.AddVerifierClientProcess( - f'client-{Http2FlowControlTest._client_counter}', - self._replay_file, - https_ports=[self._ts.Variables.ssl_port]) + f'client-{Http2FlowControlTest._client_counter}', self._replay_file, https_ports=[self._ts.Variables.ssl_port]) Http2FlowControlTest._client_counter += 1 tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( @@ -154,20 +140,16 @@ if self._verify_window_update_frames: tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( - f'WINDOW_UPDATE.*id 0: {self._expected_initial_window_size}', - "Client should receive a session WINDOW_UPDATE.") + f'WINDOW_UPDATE.*id 0: {self._expected_initial_window_size}', "Client should receive a session WINDOW_UPDATE.") tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( - f'WINDOW_UPDATE.*id 3: {self._expected_initial_window_size}', - "Client should receive a stream WINDOW_UPDATE.") + f'WINDOW_UPDATE.*id 3: {self._expected_initial_window_size}', "Client should receive a stream WINDOW_UPDATE.") tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( - f'WINDOW_UPDATE.*id 5: {self._expected_initial_window_size}', - "Client should receive a stream WINDOW_UPDATE.") + f'WINDOW_UPDATE.*id 5: {self._expected_initial_window_size}', "Client should receive a stream WINDOW_UPDATE.") tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( - f'WINDOW_UPDATE.*id 7: {self._expected_initial_window_size}', - "Client should receive a stream WINDOW_UPDATE.") + f'WINDOW_UPDATE.*id 7: {self._expected_initial_window_size}', "Client should receive a stream WINDOW_UPDATE.") def run(self): """Configure the TestRun.""" @@ -188,20 +170,15 @@ # # Configuring max_concurrent_streams_in. # -test = Http2FlowControlTest( - description="Configure max_concurrent_streams_in", - max_concurrent_streams_in=53) +test = Http2FlowControlTest(description="Configure max_concurrent_streams_in", max_concurrent_streams_in=53) test.run() # # Configuring initial_window_size. # -test = Http2FlowControlTest( - description="Configure a large initial_window_size_in", - initial_window_size=100123) +test = Http2FlowControlTest(description="Configure a large initial_window_size_in", initial_window_size=100123) test.run() - test = Http2FlowControlTest( description="Configure a small initial_window_size_in", max_concurrent_streams_in=10, diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/h2/http2_priority.test.py trafficserver-9.2.4+ds/tests/gold_tests/h2/http2_priority.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/h2/http2_priority.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/h2/http2_priority.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -32,13 +32,17 @@ server = Test.MakeOriginServer("server") # Test Case 0: -server.addResponse("sessionlog.json", - {"headers": "GET /bigfile HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""}, - {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\nCache-Control: max-age=3600\r\nContent-Length: 1048576\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""}) +server.addResponse( + "sessionlog.json", { + "headers": "GET /bigfile HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + }, { + "headers": + "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\nCache-Control: max-age=3600\r\nContent-Length: 1048576\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + }) # ---- # Setup ATS @@ -47,20 +51,17 @@ ts.addDefaultSSLFiles() -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) -ts.Disk.records_config.update({ - 'proxy.config.http2.stream_priority_enabled': 1, - 'proxy.config.http2.no_activity_timeout_in': 3, - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http2', -}) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') +ts.Disk.records_config.update( + { + 'proxy.config.http2.stream_priority_enabled': 1, + 'proxy.config.http2.no_activity_timeout_in': 3, + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http2', + }) # ---- # Test Cases diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/h2/httpbin.test.py trafficserver-9.2.4+ds/tests/gold_tests/h2/httpbin.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/h2/httpbin.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/h2/httpbin.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -45,21 +45,17 @@ # add ssl materials like key, certificates for the server ts.addDefaultSSLFiles() -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(httpbin.Variables.Port) -) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) -ts.Disk.records_config.update({ - 'proxy.config.http.insert_request_via_str': 1, - 'proxy.config.http.insert_response_via_str': 1, - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http2', - -}) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(httpbin.Variables.Port)) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') +ts.Disk.records_config.update( + { + 'proxy.config.http.insert_request_via_str': 1, + 'proxy.config.http.insert_response_via_str': 1, + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http2', + }) ts.Disk.logging_yaml.AddLines( ''' logging: @@ -70,8 +66,7 @@ logs: - filename: access format: access -'''.split("\n") -) +'''.split("\n")) Test.Disk.File(os.path.join(ts.Variables.LOGDIR, 'access.log'), exists=True, content='gold/httpbin_access.gold') @@ -129,7 +124,5 @@ # Wait for log file to appear, then wait one extra second to make sure TS is done writing it. test_run = Test.AddTestRun() test_run.Processes.Default.Command = ( - os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + - os.path.join(ts.Variables.LOGDIR, 'access.log') -) + os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + os.path.join(ts.Variables.LOGDIR, 'access.log')) test_run.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/h2/nghttp.test.py trafficserver-9.2.4+ds/tests/gold_tests/h2/nghttp.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/h2/nghttp.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/h2/nghttp.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,13 +17,12 @@ # limitations under the License. import os + Test.Summary = ''' Test with nghttp ''' -Test.SkipUnless( - Condition.HasProgram("nghttp", "Nghttp need to be installed on system for this test to work"), -) +Test.SkipUnless(Condition.HasProgram("nghttp", "Nghttp need to be installed on system for this test to work"),) Test.ContinueOnFail = True # ---- @@ -40,29 +39,28 @@ # ---- # Setup ATS # ---- -ts = Test.MakeATSProcess("ts", select_ports=True, - enable_tls=True, enable_cache=False) +ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True, enable_cache=False) # add ssl materials like key, certificates for the server ts.addDefaultSSLFiles() ts.Setup.CopyAs('rules/graceful_shutdown.conf', Test.RunDirectory) -ts.Disk.remap_config.AddLines([ - 'map /httpbin/ http://127.0.0.1:{0}/ @plugin=header_rewrite.so @pparam={1}/graceful_shutdown.conf'.format( - httpbin.Variables.Port, Test.RunDirectory) -]) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) - -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http2_cs', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir) -}) +ts.Disk.remap_config.AddLines( + [ + 'map /httpbin/ http://127.0.0.1:{0}/ @plugin=header_rewrite.so @pparam={1}/graceful_shutdown.conf'.format( + httpbin.Variables.Port, Test.RunDirectory) + ]) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http2_cs', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir) + }) # ---- # Test Cases diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/headers/accept_webp.test.py trafficserver-9.2.4+ds/tests/gold_tests/headers/accept_webp.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/headers/accept_webp.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/headers/accept_webp.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -29,28 +29,30 @@ testName = "accept_webp" request_header = { - "headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\nAccept: image/webp,image/png,image/svg+xml,image/*;q=0.8,video/*;q=0.8,*/*;q=0.5\r\n\r\n", + "headers": + "GET / HTTP/1.1\r\nHost: www.example.com\r\nAccept: image/webp,image/png,image/svg+xml,image/*;q=0.8,video/*;q=0.8,*/*;q=0.5\r\n\r\n", "timestamp": "1469733493.993", - "body": ""} + "body": "" +} response_header = { "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Type: image/webp\r\nCache-Control: max-age=300\r\n", "timestamp": "1469733493.993", - "body": "xxx"} + "body": "xxx" +} server.addResponse("sessionlog.json", request_header, response_header) # ATS Configuration -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http_match', - 'proxy.config.http.cache.ignore_accept_mismatch': 0, - 'proxy.config.http.insert_response_via_str': 3, - 'proxy.config.http.cache.http': 1, - 'proxy.config.http.wait_for_cache': 1, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http_match', + 'proxy.config.http.cache.ignore_accept_mismatch': 0, + 'proxy.config.http.insert_response_via_str': 3, + 'proxy.config.http.cache.http': 1, + 'proxy.config.http.wait_for_cache': 1, + }) -ts.Disk.remap_config.AddLine( - 'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)) # Test 1 - Request with image/webp support from the origin tr = Test.AddTestRun() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/headers/cache_and_req_body.test.py trafficserver-9.2.4+ds/tests/gold_tests/headers/cache_and_req_body.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/headers/cache_and_req_body.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/headers/cache_and_req_body.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -31,58 +31,91 @@ testName = "" request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} response_header = { - "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nLast-Modified: Tue, 08 May 2018 15:49:41 GMT\r\nCache-Control: max-age=1\r\n\r\n", + "headers": + "HTTP/1.1 200 OK\r\nConnection: close\r\nLast-Modified: Tue, 08 May 2018 15:49:41 GMT\r\nCache-Control: max-age=1\r\n\r\n", "timestamp": "1469733493.993", - "body": "xxx"} + "body": "xxx" +} server.addResponse("sessionlog.json", request_header, response_header) # ATS Configuration ts.Disk.plugin_config.AddLine('xdebug.so') -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.http.response_via_str': 3, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.http.response_via_str': 3, + }) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) cache_and_req_body_miss = { 'Connection': 'keep-alive', - 'Via': {'equal_re': None}, - 'Server': {'equal_re': '.*'}, - 'X-Cache-Key': {'equal_re': 'http://127.0.0.1.*'}, + 'Via': { + 'equal_re': None + }, + 'Server': { + 'equal_re': '.*' + }, + 'X-Cache-Key': { + 'equal_re': 'http://127.0.0.1.*' + }, 'X-Cache': 'miss', - 'Last-Modified': {'equal_re': '.*'}, + 'Last-Modified': { + 'equal_re': '.*' + }, 'cache-control': 'max-age=1', 'Content-Length': '3', - 'Date': {'equal_re': '.*'}, - 'Age': {'equal_re': '.*'} + 'Date': { + 'equal_re': '.*' + }, + 'Age': { + 'equal_re': '.*' + } } cache_and_req_body_hit = { - 'Last-Modified': {'equal_re': '.*'}, + 'Last-Modified': { + 'equal_re': '.*' + }, 'cache-control': 'max-age=1', 'Content-Length': '3', - 'Date': {'equal_re': '.*'}, - 'Age': {'equal_re': '.*'}, + 'Date': { + 'equal_re': '.*' + }, + 'Age': { + 'equal_re': '.*' + }, 'Connection': 'keep-alive', - 'Via': {'equal_re': '.*'}, - 'Server': {'equal_re': '.*'}, + 'Via': { + 'equal_re': '.*' + }, + 'Server': { + 'equal_re': '.*' + }, 'X-Cache': 'hit-fresh', 'HTTP/1.1 200 OK': '' } cache_and_req_body_hit_close = { - 'Last-Modified': {'equal_re': '.*'}, + 'Last-Modified': { + 'equal_re': '.*' + }, 'cache-control': 'max-age=1', 'Content-Length': '3', - 'Date': {'equal_re': '.*'}, - 'Age': {'equal_re': '.*'}, + 'Date': { + 'equal_re': '.*' + }, + 'Age': { + 'equal_re': '.*' + }, 'Connection': 'close', - 'Via': {'equal_re': '.*'}, - 'Server': {'equal_re': '.*'}, + 'Via': { + 'equal_re': '.*' + }, + 'Server': { + 'equal_re': '.*' + }, 'X-Cache': 'hit-fresh', 'HTTP/1.1 200 OK': '' } diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/headers/cachedIMSRange.test.py trafficserver-9.2.4+ds/tests/gold_tests/headers/cachedIMSRange.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/headers/cachedIMSRange.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/headers/cachedIMSRange.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -29,87 +29,108 @@ # lookup_key is to make unique response in origin for header "UID" that will pass in ATS request server = Test.MakeOriginServer("server", lookup_key="{%UID}") # Initial request -request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\nUID: Fill\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = { + "headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\nUID: Fill\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} response_header = { - "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nLast-Modified: Tue, 08 May 2018 15:49:41 GMT\r\nCache-Control: max-age=1\r\n\r\n", + "headers": + "HTTP/1.1 200 OK\r\nConnection: close\r\nLast-Modified: Tue, 08 May 2018 15:49:41 GMT\r\nCache-Control: max-age=1\r\n\r\n", "timestamp": "1469733493.993", - "body": "xxx"} + "body": "xxx" +} server.addResponse("sessionlog.json", request_header, response_header) # IMS revalidation request request_IMS_header = { "headers": "GET / HTTP/1.1\r\nUID: IMS\r\nIf-Modified-Since: Tue, 08 May 2018 15:49:41 GMT\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", - "body": ""} -response_IMS_header = {"headers": "HTTP/1.1 304 Not Modified\r\nConnection: close\r\nCache-Control: max-age=1\r\n\r\n", - "timestamp": "1469733493.993", "body": None} + "body": "" +} +response_IMS_header = { + "headers": "HTTP/1.1 304 Not Modified\r\nConnection: close\r\nCache-Control: max-age=1\r\n\r\n", + "timestamp": "1469733493.993", + "body": None +} server.addResponse("sessionlog.json", request_IMS_header, response_IMS_header) # EtagFill -request_etagfill_header = {"headers": "GET /etag HTTP/1.1\r\nHost: www.example.com\r\nUID: EtagFill\r\n\r\n", - "timestamp": "1469733493.993", "body": None} +request_etagfill_header = { + "headers": "GET /etag HTTP/1.1\r\nHost: www.example.com\r\nUID: EtagFill\r\n\r\n", + "timestamp": "1469733493.993", + "body": None +} response_etagfill_header = { "headers": "HTTP/1.1 200 OK\r\nETag: myetag\r\nConnection: close\r\nCache-Control: max-age=1\r\n\r\n", "timestamp": "1469733493.993", - "body": "xxx"} + "body": "xxx" +} server.addResponse("sessionlog.json", request_etagfill_header, response_etagfill_header) # INM revalidation -request_INM_header = {"headers": "GET /etag HTTP/1.1\r\nUID: INM\r\nIf-None-Match: myetag\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": None} +request_INM_header = { + "headers": "GET /etag HTTP/1.1\r\nUID: INM\r\nIf-None-Match: myetag\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": None +} response_INM_header = { "headers": "HTTP/1.1 304 Not Modified\r\nConnection: close\r\nETag: myetag\r\nCache-Control: max-age=1\r\n\r\n", "timestamp": "1469733493.993", - "body": None} + "body": None +} server.addResponse("sessionlog.json", request_INM_header, response_INM_header) # object changed to 0 byte -request_noBody_header = {"headers": "GET / HTTP/1.1\r\nUID: noBody\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_noBody_header = { + "headers": "GET / HTTP/1.1\r\nUID: noBody\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} response_noBody_header = { "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 0\r\nCache-Control: max-age=3\r\n\r\n", "timestamp": "1469733493.993", - "body": ""} + "body": "" +} server.addResponse("sessionlog.json", request_noBody_header, response_noBody_header) # etag object now is a 404. Yeah, 404s don't usually have Cache-Control, but, ATS's default is to cache 404s for a while. -request_etagfill_header = {"headers": "GET /etag HTTP/1.1\r\nHost: www.example.com\r\nUID: EtagError\r\n\r\n", - "timestamp": "1469733493.993", "body": None} +request_etagfill_header = { + "headers": "GET /etag HTTP/1.1\r\nHost: www.example.com\r\nUID: EtagError\r\n\r\n", + "timestamp": "1469733493.993", + "body": None +} response_etagfill_header = { "headers": "HTTP/1.1 404 Not Found\r\nConnection: close\r\nContent-Length: 0\r\nCache-Control: max-age=3\r\n\r\n", "timestamp": "1469733493.993", - "body": ""} + "body": "" +} server.addResponse("sessionlog.json", request_etagfill_header, response_etagfill_header) # ATS Configuration ts = Test.MakeATSProcess("ts", enable_tls=True) ts.Disk.plugin_config.AddLine('xdebug.so') ts.addDefaultSSLFiles() -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.http.response_via_str': 3, - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), -}) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.http.response_via_str': 3, + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) default_304_host = 'www.default304.test' regex_remap_conf_file = "maps.reg" -ts.Disk.remap_config.AddLines([ - f'map https://{default_304_host}/ http://127.0.0.1:{server.Variables.Port}/ ' - f'@plugin=regex_remap.so @pparam={regex_remap_conf_file} @pparam=no-query-string @pparam=host', - - f'map http://{default_304_host}/ http://127.0.0.1:{server.Variables.Port}/ ' - f'@plugin=regex_remap.so @pparam={regex_remap_conf_file} @pparam=no-query-string @pparam=host', - - f'map / http://127.0.0.1:{server.Variables.Port}', -]) - -ts.Disk.MakeConfigFile(regex_remap_conf_file).AddLine( - f'//.*/ http://127.0.0.1:{server.Variables.Port} @status=304' -) +ts.Disk.remap_config.AddLines( + [ + f'map https://{default_304_host}/ http://127.0.0.1:{server.Variables.Port}/ ' + f'@plugin=regex_remap.so @pparam={regex_remap_conf_file} @pparam=no-query-string @pparam=host', + f'map http://{default_304_host}/ http://127.0.0.1:{server.Variables.Port}/ ' + f'@plugin=regex_remap.so @pparam={regex_remap_conf_file} @pparam=no-query-string @pparam=host', + f'map / http://127.0.0.1:{server.Variables.Port}', + ]) + +ts.Disk.MakeConfigFile(regex_remap_conf_file).AddLine(f'//.*/ http://127.0.0.1:{server.Variables.Port} @status=304') # Test 0 - Fill a 3 byte object with Last-Modified time into cache. tr = Test.AddTestRun() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/headers/domain-blacklist-30x.test.py trafficserver-9.2.4+ds/tests/gold_tests/headers/domain-blacklist-30x.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/headers/domain-blacklist-30x.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/headers/domain-blacklist-30x.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -34,20 +34,21 @@ REDIRECT_0_HOST = 'www.redirect0.test' PASSTHRU_HOST = 'www.passthrough.test' -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'header_rewrite|dbg_header_rewrite', - 'proxy.config.body_factory.enable_logging': 1, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'header_rewrite|dbg_header_rewrite', + 'proxy.config.body_factory.enable_logging': 1, + }) -ts.Disk.remap_config.AddLine("""\ +ts.Disk.remap_config.AddLine( + """\ regex_map http://{0}/ http://{0}/ @plugin=header_rewrite.so @pparam=header_rewrite_rules_301.conf regex_map http://{1}/ http://{1}/ @plugin=header_rewrite.so @pparam=header_rewrite_rules_302.conf regex_map http://{2}/ http://{2}/ @plugin=header_rewrite.so @pparam=header_rewrite_rules_307.conf regex_map http://{3}/ http://{3}/ @plugin=header_rewrite.so @pparam=header_rewrite_rules_308.conf regex_map http://{4}/ http://{4}/ @plugin=header_rewrite.so @pparam=header_rewrite_rules_0.conf -""".format(REDIRECT_301_HOST, REDIRECT_302_HOST, REDIRECT_307_HOST, REDIRECT_308_HOST, REDIRECT_0_HOST) -) +""".format(REDIRECT_301_HOST, REDIRECT_302_HOST, REDIRECT_307_HOST, REDIRECT_308_HOST, REDIRECT_0_HOST)) for x in (0, 301, 302, 307, 308): ts.Disk.MakeConfigFile("header_rewrite_rules_{0}.conf".format(x)).AddLine("""\ @@ -73,7 +74,6 @@ redirect302tr.Processes.Default.ReturnCode = 0 redirect302tr.Processes.Default.Streams.stdout = "redirect302_get.gold" - redirect307tr = Test.AddTestRun(f"Test domain {REDIRECT_307_HOST}") redirect302tr.StillRunningBefore = ts redirect307tr.StillRunningAfter = ts diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/headers/field_name_space.test.py trafficserver-9.2.4+ds/tests/gold_tests/headers/field_name_space.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/headers/field_name_space.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/headers/field_name_space.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -28,19 +28,15 @@ server = Test.MakeOriginServer("server") testName = "field_name_space" -request_header = { - "headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} response_header = { "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nFoo : 123\r\nFoo: 456\r\n", "timestamp": "1469733493.993", - "body": "xxx"} + "body": "xxx" +} server.addResponse("sessionlog.json", request_header, response_header) -ts.Disk.remap_config.AddLine( - 'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)) # Test spaces at the end of the field name and before the : tr = Test.AddTestRun() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/headers/forwarded.test.py trafficserver-9.2.4+ds/tests/gold_tests/headers/forwarded.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/headers/forwarded.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/headers/forwarded.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -33,42 +33,50 @@ server = Test.MakeOriginServer("server", options={'--load': os.path.join(Test.TestDirectory, 'forwarded-observer.py')}) -request_header = { - "headers": "GET / HTTP/1.1\r\nHost: www.no-oride.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.no-oride.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) -request_header = { - "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-none.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.forwarded-none.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) -request_header = { - "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-for.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.forwarded-for.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) -request_header = { - "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-by-ip.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.forwarded-by-ip.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) request_header = { - "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-by-unknown.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} + "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-by-unknown.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} server.addResponse("sessionlog.json", request_header, response_header) request_header = { - "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-by-server-name.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} + "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-by-server-name.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} server.addResponse("sessionlog.json", request_header, response_header) -request_header = { - "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-by-uuid.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.forwarded-by-uuid.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) -request_header = { - "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-proto.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.forwarded-proto.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) -request_header = { - "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-host.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.forwarded-host.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) request_header = { - "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-connection-compact.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} + "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-connection-compact.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} server.addResponse("sessionlog.json", request_header, response_header) request_header = { - "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-connection-std.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} + "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-connection-std.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} server.addResponse("sessionlog.json", request_header, response_header) request_header = { - "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-connection-full.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} + "headers": "GET / HTTP/1.1\r\nHost: www.forwarded-connection-full.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} server.addResponse("sessionlog.json", request_header, response_header) # Set up to check the output after the tests have run. @@ -81,21 +89,18 @@ ts.addDefaultSSLFiles() - ts.Disk.records_config.update({ - # 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.url_remap.pristine_host_hdr': 1, # Retain Host header in original incoming client request. - 'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name. - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir) - }) + ts.Disk.records_config.update( + { + # 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.url_remap.pristine_host_hdr': 1, # Retain Host header in original incoming client request. + 'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name. + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir) + }) + + ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') - ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' - ) - - ts.Disk.remap_config.AddLine( - 'map http://www.no-oride.com http://127.0.0.1:{0}'.format(server.Variables.Port) - ) + ts.Disk.remap_config.AddLine('map http://www.no-oride.com http://127.0.0.1:{0}'.format(server.Variables.Port)) # Disable the cache to make sure each request is forwarded to the origin @@ -106,48 +111,37 @@ ts.Disk.remap_config.AddLine( 'map http://www.forwarded-none.com http://127.0.0.1:{0}'.format(server.Variables.Port) + - ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=none' -) + ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=none') ts.Disk.remap_config.AddLine( 'map http://www.forwarded-for.com http://127.0.0.1:{0}'.format(server.Variables.Port) + - ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=for' -) + ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=for') ts.Disk.remap_config.AddLine( 'map http://www.forwarded-by-ip.com http://127.0.0.1:{0}'.format(server.Variables.Port) + - ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=by=ip' -) + ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=by=ip') ts.Disk.remap_config.AddLine( 'map http://www.forwarded-by-unknown.com http://127.0.0.1:{0}'.format(server.Variables.Port) + - ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=by=unknown' -) + ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=by=unknown') ts.Disk.remap_config.AddLine( 'map http://www.forwarded-by-server-name.com http://127.0.0.1:{0}'.format(server.Variables.Port) + - ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=by=serverName' -) + ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=by=serverName') ts.Disk.remap_config.AddLine( 'map http://www.forwarded-by-uuid.com http://127.0.0.1:{0}'.format(server.Variables.Port) + - ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=by=uuid' -) + ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=by=uuid') ts.Disk.remap_config.AddLine( 'map http://www.forwarded-proto.com http://127.0.0.1:{0}'.format(server.Variables.Port) + - ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=proto' -) + ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=proto') ts.Disk.remap_config.AddLine( 'map http://www.forwarded-host.com http://127.0.0.1:{0}'.format(server.Variables.Port) + - ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=host' -) + ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=host') ts.Disk.remap_config.AddLine( 'map http://www.forwarded-connection-compact.com http://127.0.0.1:{0}'.format(server.Variables.Port) + - ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=connection=compact' -) + ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=connection=compact') ts.Disk.remap_config.AddLine( 'map http://www.forwarded-connection-std.com http://127.0.0.1:{0}'.format(server.Variables.Port) + - ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=connection=std' -) + ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=connection=std') ts.Disk.remap_config.AddLine( 'map http://www.forwarded-connection-full.com http://127.0.0.1:{0}'.format(server.Variables.Port) + - ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=connection=full' -) + ' @plugin=conf_remap.so @pparam=proxy.config.http.insert_forwarded=connection=full') # Basic HTTP 1.1 -- No Forwarded by default tr = Test.AddTestRun() @@ -157,8 +151,7 @@ tr.Processes.Default.StartBefore(Test.Processes.ts) # tr.Processes.Default.Command = ( - 'curl --verbose --ipv4 --http1.1 --proxy localhost:{} http://www.no-oride.com'.format(ts.Variables.port) -) + 'curl --verbose --ipv4 --http1.1 --proxy localhost:{} http://www.no-oride.com'.format(ts.Variables.port)) tr.Processes.Default.ReturnCode = 0 @@ -166,8 +159,7 @@ tr = Test.AddTestRun() tr.Processes.Default.Command = ( - 'curl --verbose --ipv4 --http1.1 --proxy localhost:{} http://{}'.format(ts.Variables.port, host) - ) + 'curl --verbose --ipv4 --http1.1 --proxy localhost:{} http://{}'.format(ts.Variables.port, host)) tr.Processes.Default.ReturnCode = 0 @@ -196,13 +188,13 @@ baselineTsSetup(ts2) -ts2.Disk.records_config.update({ - 'proxy.config.url_remap.pristine_host_hdr': 1, # Retain Host header in original incoming client request. - 'proxy.config.http.insert_forwarded': 'by=uuid'}) - -ts2.Disk.remap_config.AddLine( - 'map https://www.no-oride.com http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts2.Disk.records_config.update( + { + 'proxy.config.url_remap.pristine_host_hdr': 1, # Retain Host header in original incoming client request. + 'proxy.config.http.insert_forwarded': 'by=uuid' + }) + +ts2.Disk.remap_config.AddLine('map https://www.no-oride.com http://127.0.0.1:{0}'.format(server.Variables.Port)) # Forwarded header with UUID of 2nd ATS. tr = Test.AddTestRun() @@ -210,16 +202,14 @@ tr.Processes.Default.StartBefore(Test.Processes.ts2) # tr.Processes.Default.Command = ( - 'curl --verbose --ipv4 --http1.1 --proxy localhost:{} http://www.no-oride.com'.format(ts2.Variables.port) -) + 'curl --verbose --ipv4 --http1.1 --proxy localhost:{} http://www.no-oride.com'.format(ts2.Variables.port)) tr.Processes.Default.ReturnCode = 0 # Call traffic_ctrl to set insert_forwarded tr = Test.AddTestRun() tr.Processes.Default.Command = ( 'traffic_ctl --debug config set proxy.config.http.insert_forwarded' + - ' "for|by=ip|by=unknown|by=servername|by=uuid|proto|host|connection=compact|connection=std|connection=full"' -) + ' "for|by=ip|by=unknown|by=servername|by=uuid|proto|host|connection=compact|connection=std|connection=full"') tr.Processes.Default.ForceUseShell = False tr.Processes.Default.Env = ts2.Env tr.Processes.Default.ReturnCode = 0 @@ -229,52 +219,45 @@ # Delay to give traffic_ctl config change time to take effect. tr.DelayStart = 15 tr.Processes.Default.Command = ( - 'curl --verbose --ipv4 --http1.1 --proxy localhost:{} http://www.no-oride.com'.format(ts2.Variables.port) -) + 'curl --verbose --ipv4 --http1.1 --proxy localhost:{} http://www.no-oride.com'.format(ts2.Variables.port)) tr.Processes.Default.ReturnCode = 0 # HTTP 1.0 tr = Test.AddTestRun() tr.Processes.Default.Command = ( - 'curl --verbose --ipv4 --http1.0 --proxy localhost:{} http://www.no-oride.com'.format(ts2.Variables.port) -) + 'curl --verbose --ipv4 --http1.0 --proxy localhost:{} http://www.no-oride.com'.format(ts2.Variables.port)) tr.Processes.Default.ReturnCode = 0 # HTTP 1.0 -- Forwarded headers already present tr = Test.AddTestRun() tr.Processes.Default.Command = ( "curl --verbose -H 'forwarded:for=0.6.6.6' -H 'forwarded:for=_argh' --ipv4 --http1.0" + - " --proxy localhost:{} http://www.no-oride.com".format(ts2.Variables.port) -) + " --proxy localhost:{} http://www.no-oride.com".format(ts2.Variables.port)) tr.Processes.Default.ReturnCode = 0 # HTTP 2 tr = Test.AddTestRun() tr.Processes.Default.Command = ( 'curl --verbose --ipv4 --http2 --insecure --header "Host: www.no-oride.com"' + - ' https://localhost:{}'.format(ts2.Variables.ssl_port) -) + ' https://localhost:{}'.format(ts2.Variables.ssl_port)) tr.Processes.Default.ReturnCode = 0 # TLS tr = Test.AddTestRun() tr.Processes.Default.Command = ( - 'curl --verbose --ipv4 --http1.1 --insecure --header "Host: www.no-oride.com" https://localhost:{}' - .format(ts2.Variables.ssl_port) -) + 'curl --verbose --ipv4 --http1.1 --insecure --header "Host: www.no-oride.com" https://localhost:{}'.format( + ts2.Variables.ssl_port)) tr.Processes.Default.ReturnCode = 0 # IPv6 tr = Test.AddTestRun() tr.Processes.Default.Command = ( - 'curl --verbose --ipv6 --http1.1 --proxy localhost:{} http://www.no-oride.com'.format(ts2.Variables.portv6) -) + 'curl --verbose --ipv6 --http1.1 --proxy localhost:{} http://www.no-oride.com'.format(ts2.Variables.portv6)) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() tr.Processes.Default.Command = ( 'curl --verbose --ipv6 --http1.1 --insecure --header "Host: www.no-oride.com" https://localhost:{}'.format( - ts2.Variables.ssl_portv6) -) + ts2.Variables.ssl_portv6)) tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/headers/general-connection-failure-502.test.py trafficserver-9.2.4+ds/tests/gold_tests/headers/general-connection-failure-502.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/headers/general-connection-failure-502.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/headers/general-connection-failure-502.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -28,9 +28,7 @@ HOST = 'www.connectfail502.test' server = Test.MakeOriginServer("server", ssl=False) # Reserves a port across autest. -ts.Disk.remap_config.AddLine( - 'map http://{host} http://{ip}:{uport}'.format(host=HOST, ip='127.0.0.1', uport=server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map http://{host} http://{ip}:{uport}'.format(host=HOST, ip='127.0.0.1', uport=server.Variables.Port)) Test.Setup.Copy(os.path.join(Test.Variables.AtsTestToolsDir, 'tcp_client.py')) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/headers/good_request_after_bad.test.py trafficserver-9.2.4+ds/tests/gold_tests/headers/good_request_after_bad.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/headers/good_request_after_bad.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/headers/good_request_after_bad.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -25,39 +25,36 @@ Test.ContinueOnFail = True ts = Test.MakeATSProcess("ts", enable_cache=True) Test.ContinueOnFail = True -ts.Disk.records_config.update({'proxy.config.diags.debug.tags': 'http', - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.http.strict_uri_parsing': 1 - }) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.http.strict_uri_parsing': 1 + }) ts2 = Test.MakeATSProcess("ts2", enable_cache=True) -ts2.Disk.records_config.update({'proxy.config.diags.debug.tags': 'http', - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.http.strict_uri_parsing': 2 - }) - +ts2.Disk.records_config.update( + { + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.http.strict_uri_parsing': 2 + }) server = Test.MakeOriginServer("server") request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} response_header = { - "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nLast-Modified: Tue, 08 May 2018 15:49:41 GMT\r\nCache-Control: max-age=1000\r\n\r\n", + "headers": + "HTTP/1.1 200 OK\r\nConnection: close\r\nLast-Modified: Tue, 08 May 2018 15:49:41 GMT\r\nCache-Control: max-age=1000\r\n\r\n", "timestamp": "1469733493.993", - "body": "xxx"} + "body": "xxx" +} server.addResponse("sessionlog.json", request_header, response_header) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - 'map /bob<> http://127.0.0.1:{0}'.format(server.Variables.Port) -) -ts2.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) -ts2.Disk.remap_config.AddLine( - 'map /bob<> http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts.Disk.remap_config.AddLine('map /bob<> http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts2.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts2.Disk.remap_config.AddLine('map /bob<> http://127.0.0.1:{0}'.format(server.Variables.Port)) trace_out = Test.Disk.File("trace_curl.txt") @@ -110,7 +107,6 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.stdout = 'gold/bad_good_request_header.gold' - # TRACE request with a body tr = Test.AddTestRun("Trace request with a body") tr.Processes.Default.Command = 'printf "TRACE /foo HTTP/1.1\r\nHost: bob\r\nContent-length:2\r\n\r\nokGET / HTTP/1.1\r\nHost: boa\r\n\r\n" | nc 127.0.0.1 {}'.format( @@ -136,8 +132,7 @@ tr.Processes.Default.Command = 'curl -v --http1.1 -X TRACE -k http://127.0.0.1:{}/bar'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.All = Testers.ContainsExpression( - r"HTTP/1.1 501 Unsupported method \('TRACE'\)", - "microserver does not support TRACE") + r"HTTP/1.1 501 Unsupported method \('TRACE'\)", "microserver does not support TRACE") # Methods are case sensitive. Verify that "gET" is not confused with "GET". tr = Test.AddTestRun("mixed case method") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/headers/hsts.test.py trafficserver-9.2.4+ds/tests/gold_tests/headers/hsts.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/headers/hsts.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/headers/hsts.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -36,30 +36,26 @@ # ATS Configuration ts.addDefaultSSLFiles() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.hsts_max_age': 300, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.hsts_max_age': 300, + }) -ts.Disk.remap_config.AddLine( - 'map https://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map https://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Test 1 - 200 Response tr = Test.AddTestRun() tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(Test.Processes.ts) tr.Processes.Default.Command = ( - 'curl -s -D - --verbose --ipv4 --http1.1 --insecure --header "Host: {0}" https://localhost:{1}' - .format('www.example.com', ts.Variables.ssl_port) -) + 'curl -s -D - --verbose --ipv4 --http1.1 --insecure --header "Host: {0}" https://localhost:{1}'.format( + 'www.example.com', ts.Variables.ssl_port)) tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.stdout = "hsts.200.gold" tr.StillRunningAfter = ts @@ -67,9 +63,8 @@ # Test 2 - 404 Not Found on Accelerator tr = Test.AddTestRun() tr.Processes.Default.Command = ( - 'curl -s -D - --verbose --ipv4 --http1.1 --insecure --header "Host: {0}" https://localhost:{1}' - .format('bad_host', ts.Variables.ssl_port) -) + 'curl -s -D - --verbose --ipv4 --http1.1 --insecure --header "Host: {0}" https://localhost:{1}'.format( + 'bad_host', ts.Variables.ssl_port)) tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.stdout = "hsts.404.gold" tr.StillRunningAfter = server diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/headers/http408.test.py trafficserver-9.2.4+ds/tests/gold_tests/headers/http408.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/headers/http408.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/headers/http408.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -36,9 +36,7 @@ response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) -ts.Disk.remap_config.AddLine( - 'map http://{0} http://127.0.0.1:{1}'.format(HTTP_408_HOST, server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map http://{0} http://127.0.0.1:{1}'.format(HTTP_408_HOST, server.Variables.Port)) TIMEOUT = 2 ts.Disk.records_config.update({ diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/headers/invalid_range_header.test.py trafficserver-9.2.4+ds/tests/gold_tests/headers/invalid_range_header.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/headers/invalid_range_header.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/headers/invalid_range_header.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -36,23 +36,21 @@ def setupTS(self): self.ts = Test.MakeATSProcess("ts1") - self.ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.http.cache.http': 1, - 'proxy.config.http.cache.range.write': 1, - 'proxy.config.http.cache.required_headers': 0, - 'proxy.config.http.insert_age_in_response': 0}) - self.ts.Disk.remap_config.AddLine( - f"map / http://127.0.0.1:{self.server.Variables.http_port}/", - ) + self.ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.http.cache.http': 1, + 'proxy.config.http.cache.range.write': 1, + 'proxy.config.http.cache.required_headers': 0, + 'proxy.config.http.insert_age_in_response': 0 + }) + self.ts.Disk.remap_config.AddLine(f"map / http://127.0.0.1:{self.server.Variables.http_port}/",) def runTraffic(self): tr = Test.AddTestRun() tr.AddVerifierClientProcess( - "client1", - self.invalidRangeRequestReplayFile, - http_ports=[self.ts.Variables.port], - other_args='--thread-limit 1') + "client1", self.invalidRangeRequestReplayFile, http_ports=[self.ts.Variables.port], other_args='--thread-limit 1') tr.Processes.Default.StartBefore(self.server) tr.Processes.Default.StartBefore(self.ts) tr.StillRunningAfter = self.server @@ -60,11 +58,9 @@ # verification tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( - r"Received an HTTP/1 416 response for key 2", - "Verify that client receives a 416 response") + r"Received an HTTP/1 416 response for key 2", "Verify that client receives a 416 response") tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( - r"x-responseheader: failed_response", - "Verify that the response came from the server") + r"x-responseheader: failed_response", "Verify that the response came from the server") def run(self): self.runTraffic() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/headers/normalize_ae.test.py trafficserver-9.2.4+ds/tests/gold_tests/headers/normalize_ae.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/headers/normalize_ae.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/headers/normalize_ae.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -23,9 +23,7 @@ Test normalizations of the Accept-Encoding header field. ''' -Test.SkipUnless( - Condition.HasATSFeature('TS_HAS_BROTLI') -) +Test.SkipUnless(Condition.HasATSFeature('TS_HAS_BROTLI')) Test.ContinueOnFail = True @@ -54,25 +52,19 @@ # 'proxy.config.diags.debug.enabled': 1, }) - ts.Disk.remap_config.AddLine( - 'map http://www.no-oride.com http://127.0.0.1:{0}'.format(server.Variables.Port) - ) + ts.Disk.remap_config.AddLine('map http://www.no-oride.com http://127.0.0.1:{0}'.format(server.Variables.Port)) ts.Disk.remap_config.AddLine( 'map http://www.ae-0.com http://127.0.0.1:{0}'.format(server.Variables.Port) + - ' @plugin=conf_remap.so @pparam=proxy.config.http.normalize_ae=0' - ) + ' @plugin=conf_remap.so @pparam=proxy.config.http.normalize_ae=0') ts.Disk.remap_config.AddLine( 'map http://www.ae-1.com http://127.0.0.1:{0}'.format(server.Variables.Port) + - ' @plugin=conf_remap.so @pparam=proxy.config.http.normalize_ae=1' - ) + ' @plugin=conf_remap.so @pparam=proxy.config.http.normalize_ae=1') ts.Disk.remap_config.AddLine( 'map http://www.ae-2.com http://127.0.0.1:{0}'.format(server.Variables.Port) + - ' @plugin=conf_remap.so @pparam=proxy.config.http.normalize_ae=2' - ) + ' @plugin=conf_remap.so @pparam=proxy.config.http.normalize_ae=2') ts.Disk.remap_config.AddLine( 'map http://www.ae-3.com http://127.0.0.1:{0}'.format(server.Variables.Port) + - ' @plugin=conf_remap.so @pparam=proxy.config.http.normalize_ae=3' - ) + ' @plugin=conf_remap.so @pparam=proxy.config.http.normalize_ae=3') baselineTsSetup(ts) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/headers/syntax.test.py trafficserver-9.2.4+ds/tests/gold_tests/headers/syntax.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/headers/syntax.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/headers/syntax.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -33,9 +33,7 @@ response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) -ts.Disk.remap_config.AddLine( - 'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)) # Test 0 - 200 Response tr = Test.AddTestRun() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/headers/via.test.py trafficserver-9.2.4+ds/tests/gold_tests/headers/via.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/headers/via.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/headers/via.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,10 +24,7 @@ Check VIA header for protocol stack data. ''' -Test.SkipUnless( - Condition.HasCurlFeature('http2'), - Condition.HasCurlFeature('IPv6') -) +Test.SkipUnless(Condition.HasCurlFeature('http2'), Condition.HasCurlFeature('IPv6')) Test.ContinueOnFail = True # Define default ATS @@ -44,23 +41,19 @@ # These should be promoted rather than other tests like this reaching around. ts.addDefaultSSLFiles() -ts.Disk.records_config.update({ - 'proxy.config.http.insert_request_via_str': 4, - 'proxy.config.http.insert_response_via_str': 4, - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), -}) +ts.Disk.records_config.update( + { + 'proxy.config.http.insert_request_via_str': 4, + 'proxy.config.http.insert_response_via_str': 4, + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) +ts.Disk.remap_config.AddLine('map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)) ts.Disk.remap_config.AddLine( - 'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - 'map https://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port, ts.Variables.ssl_port) -) + 'map https://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port, ts.Variables.ssl_port)) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Set up to check the output after the tests have run. via_log_id = Test.Disk.File("via.log") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/ip_allow/ip_allow.test.py trafficserver-9.2.4+ds/tests/gold_tests/ip_allow/ip_allow.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/ip_allow/ip_allow.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/ip_allow/ip_allow.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,83 +24,82 @@ Test.ContinueOnFail = True # Define default ATS -ts = Test.MakeATSProcess("ts", command="traffic_manager", select_ports=True, - enable_tls=True, enable_cache=False) +ts = Test.MakeATSProcess("ts", command="traffic_manager", select_ports=True, enable_tls=True, enable_cache=False) server = Test.MakeOriginServer("server", ssl=True) testName = "" request = { - "headers": - "GET /get HTTP/1.1\r\n" - "Host: www.example.com:80\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""} + "headers": "GET /get HTTP/1.1\r\n" + "Host: www.example.com:80\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} response = { - "headers": - "HTTP/1.1 200 OK\r\n" - "Content-Length: 3\r\n" - "Connection: close\r\n\r\n", - "timestamp": - "1469733493.993", "body": "xxx"} + "headers": "HTTP/1.1 200 OK\r\n" + "Content-Length: 3\r\n" + "Connection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "xxx" +} server.addResponse("sessionlog.json", request, response) # The following shouldn't come to the server, but in the event that there is a # bug in ip_allow and they are sent through, have them return a 200 OK. This # will fail the match with the gold file which expects a 403. request = { - "headers": - "CONNECT www.example.com:80/connect HTTP/1.1\r\n" - "Host: www.example.com:80\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""} + "headers": "CONNECT www.example.com:80/connect HTTP/1.1\r\n" + "Host: www.example.com:80\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} response = { - "headers": - "HTTP/1.1 200 OK\r\n" - "Content-Length: 3\r\n" - "Connection: close\r\n\r\n", - "timestamp": - "1469733493.993", "body": "xxx"} + "headers": "HTTP/1.1 200 OK\r\n" + "Content-Length: 3\r\n" + "Connection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "xxx" +} server.addResponse("sessionlog.json", request, response) request = { - "headers": - "PUSH www.example.com:80/h2_push HTTP/2\r\n" - "Host: www.example.com:80\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""} + "headers": "PUSH www.example.com:80/h2_push HTTP/2\r\n" + "Host: www.example.com:80\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} response = { - "headers": - "HTTP/2 200 OK\r\n" - "Content-Length: 3\r\n" - "Connection: close\r\n\r\n", - "timestamp": - "1469733493.993", "body": "xxx"} + "headers": "HTTP/2 200 OK\r\n" + "Content-Length: 3\r\n" + "Connection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "xxx" +} server.addResponse("sessionlog.json", request, response) # Configure TLS for Traffic Server for HTTP/2. ts.addDefaultSSLFiles() -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) - -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ip-allow', - 'proxy.config.http.push_method_enabled': 1, - 'proxy.config.http.connect_ports': '{0}'.format(server.Variables.SSL_Port), - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', - 'proxy.config.http2.active_timeout_in': 3, - 'proxy.config.http2.max_concurrent_streams_in': 65535, -}) - -format_string = ('%-% % % % %/% % ' - '% % %/% % %<{Y-RID}pqh> ' - '%<{Y-YPCS}pqh> %<{Host}cqh> %<{CHAD}pqh> ' - 'sftover=%<{x-safet-overlimit-rules}cqh> sftmat=%<{x-safet-matched-rules}cqh> ' - 'sftcls=%<{x-safet-classification}cqh> ' - 'sftbadclf=%<{x-safet-bad-classifiers}cqh> yra=%<{Y-RA}cqh> scheme=%') +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ip-allow', + 'proxy.config.http.push_method_enabled': 1, + 'proxy.config.http.connect_ports': '{0}'.format(server.Variables.SSL_Port), + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + 'proxy.config.http2.active_timeout_in': 3, + 'proxy.config.http2.max_concurrent_streams_in': 65535, + }) + +format_string = ( + '%-% % % % %/% % ' + '% % %/% % %<{Y-RID}pqh> ' + '%<{Y-YPCS}pqh> %<{Host}cqh> %<{CHAD}pqh> ' + 'sftover=%<{x-safet-overlimit-rules}cqh> sftmat=%<{x-safet-matched-rules}cqh> ' + 'sftcls=%<{x-safet-classification}cqh> ' + 'sftbadclf=%<{x-safet-bad-classifiers}cqh> yra=%<{Y-RA}cqh> scheme=%') ts.Disk.logging_yaml.AddLines( ''' logging: @@ -110,12 +109,9 @@ logs: - filename: squid.log format: custom -'''.format(format_string).split("\n") -) +'''.format(format_string).split("\n")) -ts.Disk.remap_config.AddLine( - 'map / https://127.0.0.1:{0}'.format(server.Variables.SSL_Port) -) +ts.Disk.remap_config.AddLine('map / https://127.0.0.1:{0}'.format(server.Variables.SSL_Port)) # Note that CONNECT is not in the allowed list. ts.Disk.ip_allow_yaml.AddLines( @@ -129,15 +125,12 @@ action: allow methods: [GET, HEAD, POST ] -'''.split("\n") -) +'''.split("\n")) ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Line 1 denial for 'CONNECT' from 127.0.0.1", - "The CONNECT request should be denied by ip_allow") + "Line 1 denial for 'CONNECT' from 127.0.0.1", "The CONNECT request should be denied by ip_allow") ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Line 1 denial for 'PUSH' from 127.0.0.1", - "The PUSH request should be denied by ip_allow") + "Line 1 denial for 'PUSH' from 127.0.0.1", "The PUSH request should be denied by ip_allow") # # TEST 1: Perform a GET request. Should be allowed because GET is in the allowlist. @@ -146,8 +139,8 @@ tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.SSL_Port)) tr.Processes.Default.StartBefore(Test.Processes.ts) -tr.Processes.Default.Command = ('curl --verbose -H "Host: www.example.com" http://localhost:{ts_port}/get'. - format(ts_port=ts.Variables.port)) +tr.Processes.Default.Command = ( + 'curl --verbose -H "Host: www.example.com" http://localhost:{ts_port}/get'.format(ts_port=ts.Variables.port)) tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.stderr = 'gold/200.gold' tr.StillRunningAfter = ts @@ -158,8 +151,8 @@ # not in the allowlist. # tr = Test.AddTestRun() -tr.Processes.Default.Command = ('curl --verbose -X CONNECT -H "Host: localhost" http://localhost:{ts_port}/connect'. - format(ts_port=ts.Variables.port)) +tr.Processes.Default.Command = ( + 'curl --verbose -X CONNECT -H "Host: localhost" http://localhost:{ts_port}/connect'.format(ts_port=ts.Variables.port)) tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.stderr = 'gold/403.gold' tr.StillRunningAfter = ts @@ -170,8 +163,9 @@ # PUSH is not in the allowlist. # tr = Test.AddTestRun() -tr.Processes.Default.Command = ('curl --http2 --verbose -k -X PUSH -H "Host: localhost" https://localhost:{ts_port}/h2_push'. - format(ts_port=ts.Variables.ssl_port)) +tr.Processes.Default.Command = ( + 'curl --http2 --verbose -k -X PUSH -H "Host: localhost" https://localhost:{ts_port}/h2_push'.format( + ts_port=ts.Variables.ssl_port)) tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.stderr = 'gold/403_h2.gold' tr.StillRunningAfter = ts diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/logging/all_headers.test.py trafficserver-9.2.4+ds/tests/gold_tests/logging/all_headers.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/logging/all_headers.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/logging/all_headers.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -32,8 +32,11 @@ server = Test.MakeOriginServer("server") request_header = {"headers": "GET / HTTP/1.1\r\nHost: does.not.matter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} -response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nCache-control: max-age=85000\r\n\r\n", - "timestamp": "1469733493.993", "body": "xxx"} +response_header = { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nCache-control: max-age=85000\r\n\r\n", + "timestamp": "1469733493.993", + "body": "xxx" +} server.addResponse("sessionlog.json", request_header, response_header) ts.Disk.records_config.update({ @@ -41,9 +44,7 @@ 'proxy.config.diags.debug.tags': 'http|dns', }) -ts.Disk.remap_config.AddLine( - 'map http://127.0.0.1:{0} http://127.0.0.1:{1}'.format(ts.Variables.port, server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map http://127.0.0.1:{0} http://127.0.0.1:{1}'.format(ts.Variables.port, server.Variables.Port)) # Mix in a numeric log field. Hopefull this will detect any binary alignment problems. # @@ -56,8 +57,7 @@ logs: - filename: test_all_headers format: custom -'''.split("\n") -) +'''.split("\n")) # Configure comparison of "sanitized" log file with gold file at end of test. # @@ -80,16 +80,14 @@ tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(Test.Processes.ts) tr.Processes.Default.Command = ( - 'curl "http://127.0.0.1:{0}" --user-agent "007" --verbose '.format(ts.Variables.port) + reallyLong() -) + 'curl "http://127.0.0.1:{0}" --user-agent "007" --verbose '.format(ts.Variables.port) + reallyLong()) tr.Processes.Default.ReturnCode = 0 # Repeat same curl, will be answered from the ATS cache. # tr = Test.AddTestRun() tr.Processes.Default.Command = ( - 'curl "http://127.0.0.1:{0}" --user-agent "007" --verbose '.format(ts.Variables.port) + reallyLong() -) + 'curl "http://127.0.0.1:{0}" --user-agent "007" --verbose '.format(ts.Variables.port) + reallyLong()) tr.Processes.Default.ReturnCode = 0 # Delay to allow TS to flush report to disk, then "sanitize" generated log. diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/logging/custom-log.test.py trafficserver-9.2.4+ds/tests/gold_tests/logging/custom-log.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/logging/custom-log.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/logging/custom-log.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -23,17 +23,13 @@ ''' # this test depends on Linux specific behavior regarding loopback addresses -Test.SkipUnless( - Condition.IsPlatform("linux") -) +Test.SkipUnless(Condition.IsPlatform("linux")) # Define default ATS ts = Test.MakeATSProcess("ts") # setup some config file for this server -ts.Disk.remap_config.AddLine( - 'map / http://www.linkedin.com/ @action=deny' -) +ts.Disk.remap_config.AddLine('map / http://www.linkedin.com/ @action=deny') ts.Disk.logging_yaml.AddLines( ''' @@ -44,62 +40,51 @@ logs: - filename: test_log_field format: custom -'''.split("\n") -) +'''.split("\n")) # ######################################################################### # at the end of the different test run a custom log file should exist # Because of this we expect the testruns to pass the real test is if the # customlog file exists and passes the format check -Test.Disk.File(os.path.join(ts.Variables.LOGDIR, 'test_log_field.log'), - exists=True, content='gold/custom.gold') +Test.Disk.File(os.path.join(ts.Variables.LOGDIR, 'test_log_field.log'), exists=True, content='gold/custom.gold') # first test is a miss for default tr = Test.AddTestRun() -tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}" --verbose'.format( - ts.Variables.port) +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}" --verbose'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.StartBefore(Test.Processes.ts) tr = Test.AddTestRun() -tr.Processes.Default.Command = 'curl "http://127.1.1.1:{0}" --verbose'.format( - ts.Variables.port) +tr.Processes.Default.Command = 'curl "http://127.1.1.1:{0}" --verbose'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = 'curl "http://127.2.2.2:{0}" --verbose'.format( - ts.Variables.port) +tr.Processes.Default.Command = 'curl "http://127.2.2.2:{0}" --verbose'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = 'curl "http://127.3.3.3:{0}" --verbose'.format( - ts.Variables.port) +tr.Processes.Default.Command = 'curl "http://127.3.3.3:{0}" --verbose'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = 'curl "http://127.3.0.1:{0}" --verbose'.format( - ts.Variables.port) +tr.Processes.Default.Command = 'curl "http://127.3.0.1:{0}" --verbose'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = 'curl "http://127.43.2.1:{0}" --verbose'.format( - ts.Variables.port) +tr.Processes.Default.Command = 'curl "http://127.43.2.1:{0}" --verbose'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = 'curl "http://127.213.213.132:{0}" --verbose'.format( - ts.Variables.port) +tr.Processes.Default.Command = 'curl "http://127.213.213.132:{0}" --verbose'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = 'curl "http://127.123.32.243:{0}" --verbose'.format( - ts.Variables.port) +tr.Processes.Default.Command = 'curl "http://127.123.32.243:{0}" --verbose'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 # Wait for log file to appear, then wait one extra second to make sure TS is done writing it. test_run = Test.AddTestRun() test_run.Processes.Default.Command = ( os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + - os.path.join(ts.Variables.LOGDIR, 'test_log_field.log') -) + os.path.join(ts.Variables.LOGDIR, 'test_log_field.log')) test_run.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/logging/log-debug-client-ip.test.py trafficserver-9.2.4+ds/tests/gold_tests/logging/log-debug-client-ip.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/logging/log-debug-client-ip.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/logging/log-debug-client-ip.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -27,30 +27,25 @@ server = Test.MakeVerifierServerProcess("server", replay_file) nameserver = Test.MakeDNServer("dns", default='127.0.0.1') -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 2, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.diags.debug.client_ip': '127.0.0.1', - 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", -}) -ts.Disk.remap_config.AddLine( - 'map / http://localhost:{}/'.format(server.Variables.http_port) -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 2, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.diags.debug.client_ip': '127.0.0.1', + 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", + }) +ts.Disk.remap_config.AddLine('map / http://localhost:{}/'.format(server.Variables.http_port)) # Verify that the various aspects of the expected debug output for the # transaction are logged. ts.Disk.traffic_out.Content = Testers.ContainsExpression( - r"\+ Incoming Request \+", - "Make sure the client request information is present.") + r"\+ Incoming Request \+", "Make sure the client request information is present.") ts.Disk.traffic_out.Content += Testers.ContainsExpression( - r"\+ Proxy's Request after hooks \+", - "Make sure the proxy request information is present.") + r"\+ Proxy's Request after hooks \+", "Make sure the proxy request information is present.") ts.Disk.traffic_out.Content += Testers.ContainsExpression( - r"\+ Incoming O.S. Response \+", - "Make sure the server's response information is present.") + r"\+ Incoming O.S. Response \+", "Make sure the server's response information is present.") ts.Disk.traffic_out.Content += Testers.ContainsExpression( - r"\+ Proxy's Response 2 \+", - "Make sure the proxy response information is present.") + r"\+ Proxy's Response 2 \+", "Make sure the proxy response information is present.") tr = Test.AddTestRun() tr.Processes.Default.StartBefore(server) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/logging/log-field-json.test.py trafficserver-9.2.4+ds/tests/gold_tests/logging/log-field-json.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/logging/log-field-json.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/logging/log-field-json.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -28,35 +28,43 @@ request_header = {'timestamp': 100, "headers": "GET /test-1 HTTP/1.1\r\nHost: test-1\r\n\r\n", "body": ""} response_header = { 'timestamp': 100, - "headers": "HTTP/1.1 200 OK\r\nTest: 1\r\nContent-Type: application/json\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", - "body": "Test 1"} + "headers": + "HTTP/1.1 200 OK\r\nTest: 1\r\nContent-Type: application/json\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", + "body": "Test 1" +} server.addResponse("sessionlog.json", request_header, response_header) -server.addResponse("sessionlog.json", - {'timestamp': 101, - "headers": "GET /test-2 HTTP/1.1\r\nHost: test-2\r\n\r\n", - "body": ""}, - {'timestamp': 101, - "headers": "HTTP/1.1 200 OK\r\nTest: 2\r\nContent-Type: application/jason\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", - "body": "Test 2"}) -server.addResponse("sessionlog.json", - {'timestamp': 102, - "headers": "GET /test-3 HTTP/1.1\r\nHost: test-3\r\n\r\n", - "body": ""}, - {'timestamp': 102, - "headers": "HTTP/1.1 200 OK\r\nTest: 3\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", - "body": "Test 3"}) +server.addResponse( + "sessionlog.json", { + 'timestamp': 101, + "headers": "GET /test-2 HTTP/1.1\r\nHost: test-2\r\n\r\n", + "body": "" + }, { + 'timestamp': 101, + "headers": + "HTTP/1.1 200 OK\r\nTest: 2\r\nContent-Type: application/jason\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", + "body": "Test 2" + }) +server.addResponse( + "sessionlog.json", { + 'timestamp': 102, + "headers": "GET /test-3 HTTP/1.1\r\nHost: test-3\r\n\r\n", + "body": "" + }, { + 'timestamp': 102, + "headers": "HTTP/1.1 200 OK\r\nTest: 3\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", + "body": "Test 3" + }) nameserver = Test.MakeDNServer("dns", default='127.0.0.1') -ts.Disk.records_config.update({ - 'proxy.config.net.connections_throttle': 100, - 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", - 'proxy.config.dns.resolv_conf': 'NULL' -}) +ts.Disk.records_config.update( + { + 'proxy.config.net.connections_throttle': 100, + 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", + 'proxy.config.dns.resolv_conf': 'NULL' + }) # setup some config file for this server -ts.Disk.remap_config.AddLine( - 'map / http://localhost:{}/'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map / http://localhost:{}/'.format(server.Variables.Port)) ts.Disk.logging_yaml.AddLines( ''' @@ -68,15 +76,13 @@ logs: - filename: field-json-test format: custom -'''.split("\n") -) +'''.split("\n")) # ######################################################################### # at the end of the different test run a custom log file should exist # Because of this we expect the testruns to pass the real test is if the # customlog file exists and passes the format check -Test.Disk.File(os.path.join(ts.Variables.LOGDIR, 'field-json-test.log'), - exists=True, content='gold/field-json-test.gold') +Test.Disk.File(os.path.join(ts.Variables.LOGDIR, 'field-json-test.log'), exists=True, content='gold/field-json-test.gold') # first test is a miss for default tr = Test.AddTestRun() @@ -86,17 +92,17 @@ # Delay on readiness of our ssl ports tr.Processes.Default.StartBefore(Test.Processes.ts) -tr.Processes.Default.Command = 'curl --verbose --header "Host: test-1" --header "Foo: ab\td/ef" http://localhost:{0}/test-1' .format( +tr.Processes.Default.Command = 'curl --verbose --header "Host: test-1" --header "Foo: ab\td/ef" http://localhost:{0}/test-1'.format( ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = 'curl --verbose --header "Host: test-2" --header "Foo: ab\x1fd/ef" http://localhost:{0}/test-2' .format( +tr.Processes.Default.Command = 'curl --verbose --header "Host: test-2" --header "Foo: ab\x1fd/ef" http://localhost:{0}/test-2'.format( ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = 'curl --verbose --header "Host: test-3" --header "Foo: abc\x7fde" http://localhost:{0}/test-3' .format( +tr.Processes.Default.Command = 'curl --verbose --header "Host: test-3" --header "Foo: abc\x7fde" http://localhost:{0}/test-3'.format( ts.Variables.port) tr.Processes.Default.ReturnCode = 0 @@ -104,6 +110,5 @@ test_run = Test.AddTestRun() test_run.Processes.Default.Command = ( os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + - os.path.join(ts.Variables.LOGDIR, 'field-json-test.log') -) + os.path.join(ts.Variables.LOGDIR, 'field-json-test.log')) test_run.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/logging/log-field.test.py trafficserver-9.2.4+ds/tests/gold_tests/logging/log-field.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/logging/log-field.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/logging/log-field.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -28,35 +28,43 @@ request_header = {'timestamp': 100, "headers": "GET /test-1 HTTP/1.1\r\nHost: test-1\r\n\r\n", "body": ""} response_header = { 'timestamp': 100, - "headers": "HTTP/1.1 200 OK\r\nTest: 1\r\nContent-Type: application/json\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", - "body": "Test 1"} + "headers": + "HTTP/1.1 200 OK\r\nTest: 1\r\nContent-Type: application/json\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", + "body": "Test 1" +} server.addResponse("sessionlog.json", request_header, response_header) -server.addResponse("sessionlog.json", - {'timestamp': 101, - "headers": "GET /test-2 HTTP/1.1\r\nHost: test-2\r\n\r\n", - "body": ""}, - {'timestamp': 101, - "headers": "HTTP/1.1 200 OK\r\nTest: 2\r\nContent-Type: application/jason\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", - "body": "Test 2"}) -server.addResponse("sessionlog.json", - {'timestamp': 102, - "headers": "GET /test-3 HTTP/1.1\r\nHost: test-3\r\n\r\n", - "body": ""}, - {'timestamp': 102, - "headers": "HTTP/1.1 200 OK\r\nTest: 3\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", - "body": "Test 3"}) +server.addResponse( + "sessionlog.json", { + 'timestamp': 101, + "headers": "GET /test-2 HTTP/1.1\r\nHost: test-2\r\n\r\n", + "body": "" + }, { + 'timestamp': 101, + "headers": + "HTTP/1.1 200 OK\r\nTest: 2\r\nContent-Type: application/jason\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", + "body": "Test 2" + }) +server.addResponse( + "sessionlog.json", { + 'timestamp': 102, + "headers": "GET /test-3 HTTP/1.1\r\nHost: test-3\r\n\r\n", + "body": "" + }, { + 'timestamp': 102, + "headers": "HTTP/1.1 200 OK\r\nTest: 3\r\nConnection: close\r\nContent-Type: application/json\r\n\r\n", + "body": "Test 3" + }) nameserver = Test.MakeDNServer("dns", default='127.0.0.1') -ts.Disk.records_config.update({ - 'proxy.config.net.connections_throttle': 100, - 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", - 'proxy.config.dns.resolv_conf': 'NULL' -}) +ts.Disk.records_config.update( + { + 'proxy.config.net.connections_throttle': 100, + 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", + 'proxy.config.dns.resolv_conf': 'NULL' + }) # setup some config file for this server -ts.Disk.remap_config.AddLine( - 'map / http://localhost:{}/'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map / http://localhost:{}/'.format(server.Variables.Port)) ts.Disk.logging_yaml.AddLines( ''' @@ -67,15 +75,13 @@ logs: - filename: field-test format: custom -'''.split("\n") -) +'''.split("\n")) # ######################################################################### # at the end of the different test run a custom log file should exist # Because of this we expect the testruns to pass the real test is if the # customlog file exists and passes the format check -Test.Disk.File(os.path.join(ts.Variables.LOGDIR, 'field-test.log'), - exists=True, content='gold/field-test.gold') +Test.Disk.File(os.path.join(ts.Variables.LOGDIR, 'field-test.log'), exists=True, content='gold/field-test.gold') # first test is a miss for default tr = Test.AddTestRun() @@ -85,24 +91,19 @@ # Delay on readiness of our ssl ports tr.Processes.Default.StartBefore(Test.Processes.ts) -tr.Processes.Default.Command = 'curl --verbose --header "Host: test-1" http://localhost:{0}/test-1' .format( - ts.Variables.port) +tr.Processes.Default.Command = 'curl --verbose --header "Host: test-1" http://localhost:{0}/test-1'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = 'curl --verbose --header "Host: test-2" http://localhost:{0}/test-2' .format( - ts.Variables.port) +tr.Processes.Default.Command = 'curl --verbose --header "Host: test-2" http://localhost:{0}/test-2'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = 'curl --verbose --header "Host: test-3" http://localhost:{0}/test-3' .format( - ts.Variables.port) +tr.Processes.Default.Command = 'curl --verbose --header "Host: test-3" http://localhost:{0}/test-3'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 # Wait for log file to appear, then wait one extra second to make sure TS is done writing it. test_run = Test.AddTestRun() test_run.Processes.Default.Command = ( - os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + - os.path.join(ts.Variables.LOGDIR, 'field-test.log') -) + os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + os.path.join(ts.Variables.LOGDIR, 'field-test.log')) test_run.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/logging/log-filenames.test.py trafficserver-9.2.4+ds/tests/gold_tests/logging/log-filenames.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/logging/log-filenames.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/logging/log-filenames.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -33,11 +33,7 @@ __ts_counter = 1 # The default log names for the various system logs. - default_log_data = { - 'diags': 'diags.log', - 'error': 'error.log', - 'manager': 'manager.log' - } + default_log_data = {'diags': 'diags.log', 'error': 'error.log', 'manager': 'manager.log'} def __init__(self, description, log_data=default_log_data): ''' Handle initialization tasks common across the tests. @@ -66,27 +62,29 @@ ''' self._ts_name = f"ts{LogFilenamesTest.__ts_counter}" LogFilenamesTest.__ts_counter += 1 - self.ts = Test.MakeATSProcess(self._ts_name, command="traffic_manager", - use_traffic_out=False, log_data=log_data) - self.ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'log', - 'proxy.config.log.periodic_tasks_interval': 1, - }) + self.ts = Test.MakeATSProcess(self._ts_name, command="traffic_manager", use_traffic_out=False, log_data=log_data) + self.ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'log', + 'proxy.config.log.periodic_tasks_interval': 1, + }) # Intentionally retrieve a port that is closed, that is no server is # listening on it. We will use this to attempt talking with a # non-existent server, which will result in an error log entry. ports.get_port(self.ts, 'closed_port') - self.ts.Disk.remap_config.AddLines([ - f'map /server/down http://127.0.0.1:{self.ts.Variables.closed_port}', - 'map / https://trafficserver.apache.org @action=deny', - ]) + self.ts.Disk.remap_config.AddLines( + [ + f'map /server/down http://127.0.0.1:{self.ts.Variables.closed_port}', + 'map / https://trafficserver.apache.org @action=deny', + ]) # The following log is configured so that we can wait upon it being # written so we know that ATS is done writing logs. self.sentinel_log_filename = "sentinel" - self.ts.Disk.logging_yaml.AddLine(f''' + self.ts.Disk.logging_yaml.AddLine( + f''' logging: formats: - name: url_and_return_code @@ -96,9 +94,7 @@ format: url_and_return_code ''') - self.sentinel_log_path = os.path.join( - self.ts.Variables.LOGDIR, - f"{self.sentinel_log_filename}.log") + self.sentinel_log_path = os.path.join(self.ts.Variables.LOGDIR, f"{self.sentinel_log_filename}.log") return self.ts @@ -124,8 +120,7 @@ tr = Test.AddTestRun(f'Run traffic for: {description}') tr.Processes.Default.Command = ( f'curl http://127.0.0.1:{self.ts.Variables.port}/some/path --verbose --next ' - f'http://127.0.0.1:{self.ts.Variables.port}/server/down --verbose' - ) + f'http://127.0.0.1:{self.ts.Variables.port}/server/down --verbose') tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.StartBefore(self.ts) @@ -140,7 +135,8 @@ The path to the configured custom log file. """ self.custom_log_filename = custom_log_filename - self.ts.Disk.logging_yaml.AddLine(f''' + self.ts.Disk.logging_yaml.AddLine( + f''' - filename: {custom_log_filename} format: url_and_return_code ''') @@ -152,9 +148,7 @@ else: self.ts.Disk.custom_log = self.ts.Streams.stderr else: - self.custom_log_path = os.path.join( - self.ts.Variables.LOGDIR, - f"{custom_log_filename}.log") + self.custom_log_path = os.path.join(self.ts.Variables.LOGDIR, f"{custom_log_filename}.log") self.ts.Disk.File(self.custom_log_path, id="custom_log") return self.custom_log_path @@ -164,23 +158,19 @@ ''' manager_path = self.ts.Disk.manager_log.AbsPath self.ts.Disk.manager_log.Content += Testers.ContainsExpression( - "Launching ts process", - f"{manager_path} should contain traffic_manager log messages") + "Launching ts process", f"{manager_path} should contain traffic_manager log messages") diags_path = self.ts.Disk.diags_log.AbsPath self.ts.Disk.diags_log.Content += Testers.ContainsExpression( - "Traffic Server is fully initialized", - f"{diags_path} should contain traffic_server diag messages") + "Traffic Server is fully initialized", f"{diags_path} should contain traffic_server diag messages") error_log_path = self.ts.Disk.error_log.AbsPath self.ts.Disk.error_log.Content += Testers.ContainsExpression( - "CONNECT: attempt fail", - f"{error_log_path} should contain connection error messages") + "CONNECT: attempt fail", f"{error_log_path} should contain connection error messages") custom_log_path = self.ts.Disk.custom_log.AbsPath self.ts.Disk.custom_log.Content += Testers.ContainsExpression( - "https://trafficserver.apache.org/some/path: 403", - f"{custom_log_path} should contain the custom transaction logs") + "https://trafficserver.apache.org/some/path: 403", f"{custom_log_path} should contain the custom transaction logs") class DefaultNamedTest(LogFilenamesTest): @@ -206,19 +196,16 @@ ''' def __init__(self): - log_data = { - 'diags': 'my_diags.log', - 'error': 'my_error.log', - 'manager': 'my_manager.log' - } + log_data = {'diags': 'my_diags.log', 'error': 'my_error.log', 'manager': 'my_manager.log'} super().__init__('specify log filename configuration', log_data) # Configure custom names for manager.log, etc. - self.ts.Disk.records_config.update({ - 'proxy.node.config.manager_log_filename': 'my_manager.log', - 'proxy.config.diags.logfile.filename': 'my_diags.log', - 'proxy.config.error.logfile.filename': 'my_error.log', - }) + self.ts.Disk.records_config.update( + { + 'proxy.node.config.manager_log_filename': 'my_manager.log', + 'proxy.config.diags.logfile.filename': 'my_diags.log', + 'proxy.config.error.logfile.filename': 'my_error.log', + }) # For these tests, more important than the listening port is the # existence of the log files. In particular, it can take a few seconds @@ -236,19 +223,16 @@ def __init__(self): - log_data = { - 'diags': 'stdout', - 'error': 'stdout', - 'manager': 'stdout' - } + log_data = {'diags': 'stdout', 'error': 'stdout', 'manager': 'stdout'} super().__init__('specify logs to go to stdout', log_data) # Configure custom names for manager.log, etc. - self.ts.Disk.records_config.update({ - 'proxy.node.config.manager_log_filename': 'stdout', - 'proxy.config.diags.logfile.filename': 'stdout', - 'proxy.config.error.logfile.filename': 'stdout', - }) + self.ts.Disk.records_config.update( + { + 'proxy.node.config.manager_log_filename': 'stdout', + 'proxy.config.diags.logfile.filename': 'stdout', + 'proxy.config.error.logfile.filename': 'stdout', + }) self.configure_named_custom_log('stdout') @@ -265,19 +249,16 @@ def __init__(self): - log_data = { - 'diags': 'stderr', - 'error': 'stderr', - 'manager': 'stderr' - } + log_data = {'diags': 'stderr', 'error': 'stderr', 'manager': 'stderr'} super().__init__('specify logs to go to stderr', log_data) # Configure custom names for manager.log, etc. - self.ts.Disk.records_config.update({ - 'proxy.node.config.manager_log_filename': 'stderr', - 'proxy.config.diags.logfile.filename': 'stderr', - 'proxy.config.error.logfile.filename': 'stderr', - }) + self.ts.Disk.records_config.update( + { + 'proxy.node.config.manager_log_filename': 'stderr', + 'proxy.config.diags.logfile.filename': 'stderr', + 'proxy.config.error.logfile.filename': 'stderr', + }) self.configure_named_custom_log('stderr') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/logging/log-filter.test.py trafficserver-9.2.4+ds/tests/gold_tests/logging/log-filter.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/logging/log-filter.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/logging/log-filter.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -27,18 +27,16 @@ server = Test.MakeVerifierServerProcess("server", replay_file) nameserver = Test.MakeDNServer("dns", default='127.0.0.1') -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'log', - - 'proxy.config.net.connections_throttle': 100, - 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", - 'proxy.config.dns.resolv_conf': 'NULL' -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'log', + 'proxy.config.net.connections_throttle': 100, + 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", + 'proxy.config.dns.resolv_conf': 'NULL' + }) # setup some config file for this server -ts.Disk.remap_config.AddLine( - 'map / http://localhost:{}/'.format(server.Variables.http_port) -) +ts.Disk.remap_config.AddLine('map / http://localhost:{}/'.format(server.Variables.http_port)) ts.Disk.logging_yaml.AddLines( ''' @@ -72,15 +70,13 @@ filters: - queryparamescaper_cquuc - not_localhost -'''.split("\n") -) +'''.split("\n")) # ######################################################################### # at the end of the different test run a custom log file should exist # Because of this we expect the testruns to pass the real test is if the # customlog file exists and passes the format check -Test.Disk.File(os.path.join(ts.Variables.LOGDIR, 'filter-test.log'), - exists=True, content='gold/filter-test.gold') +Test.Disk.File(os.path.join(ts.Variables.LOGDIR, 'filter-test.log'), exists=True, content='gold/filter-test.gold') tr = Test.AddTestRun() tr.Processes.Default.StartBefore(server) @@ -91,15 +87,12 @@ # Wait for log file to appear, then wait one extra second to make sure TS is done writing it. test_run = Test.AddTestRun() test_run.Processes.Default.Command = ( - os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + - os.path.join(ts.Variables.LOGDIR, 'filter-test.log') -) + os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + os.path.join(ts.Variables.LOGDIR, 'filter-test.log')) test_run.Processes.Default.ReturnCode = 0 # We already waited for the above, so we don't have to wait for this one. test_run = Test.AddTestRun() test_run.Processes.Default.Command = ( os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 1 1 -f ' + - os.path.join(ts.Variables.LOGDIR, 'should-not-be-written.log') -) + os.path.join(ts.Variables.LOGDIR, 'should-not-be-written.log')) test_run.Processes.Default.ReturnCode = 1 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/logging/log_pipe.test.py trafficserver-9.2.4+ds/tests/gold_tests/logging/log_pipe.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/logging/log_pipe.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/logging/log_pipe.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,9 +22,7 @@ Test.Summary = ''' Test custom log file format ''' -Test.SkipUnless( - Condition.HasATSFeature('TS_HAS_PIPE_BUFFER_SIZE_CONFIG') -) +Test.SkipUnless(Condition.HasATSFeature('TS_HAS_PIPE_BUFFER_SIZE_CONFIG')) ts_counter = 1 @@ -37,18 +35,17 @@ ts = Test.MakeATSProcess("ts{}".format(ts_counter)) ts_counter += 1 - ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'log-file', - 'proxy.config.log.max_secs_per_buffer': 1, - }) + ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'log-file', + 'proxy.config.log.max_secs_per_buffer': 1, + }) # Since we're only verifying logs and not traffic, we don't need an origin # server. The following will simply deny the requests and emit a log # message. - ts.Disk.remap_config.AddLine( - 'map / http://www.linkedin.com/ @action=deny' - ) + ts.Disk.remap_config.AddLine('map / http://www.linkedin.com/ @action=deny') ts.Disk.logging_yaml.AddLines(logging_config) @@ -70,25 +67,20 @@ - filename: '{}' mode: ascii_pipe format: custom -'''.format(pipe_name).split("\n") -) +'''.format(pipe_name).split("\n")) pipe_path = os.path.join(ts.Variables.LOGDIR, pipe_name) ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Created named pipe .*{}".format(pipe_name), - "Verify that the named pipe was created") + "Created named pipe .*{}".format(pipe_name), "Verify that the named pipe was created") ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "no readers for pipe .*{}".format(pipe_name), - "Verify that no readers for the pipe was detected.") + "no readers for pipe .*{}".format(pipe_name), "Verify that no readers for the pipe was detected.") ts.Disk.traffic_out.Content += Testers.ExcludesExpression( - "New buffer size for pipe".format(pipe_name), - "Verify that the default pipe size was used.") + "New buffer size for pipe".format(pipe_name), "Verify that the default pipe size was used.") -curl = tr.Processes.Process("client_request", 'curl "http://127.0.0.1:{0}" --verbose'.format( - ts.Variables.port)) +curl = tr.Processes.Process("client_request", 'curl "http://127.0.0.1:{0}" --verbose'.format(ts.Variables.port)) reader_output = os.path.join(ts.Variables.LOGDIR, "reader_output") pipe_reader = tr.Processes.Process("pipe_reader", 'cat {} | tee {}'.format(pipe_path, reader_output)) @@ -111,7 +103,6 @@ curl.StartBefore(pipe_reader) pipe_reader.StartBefore(ts) - # # Test 2: Change the log's buffer size. # @@ -131,22 +122,18 @@ mode: ascii_pipe format: custom pipe_buffer_size: {} - '''.format(pipe_name, pipe_size).split("\n") -) + '''.format(pipe_name, pipe_size).split("\n")) pipe_path = os.path.join(ts.Variables.LOGDIR, pipe_name) ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Created named pipe .*{}".format(pipe_name), - "Verify that the named pipe was created") + "Created named pipe .*{}".format(pipe_name), "Verify that the named pipe was created") ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "no readers for pipe .*{}".format(pipe_name), - "Verify that no readers for the pipe was detected.") + "no readers for pipe .*{}".format(pipe_name), "Verify that no readers for the pipe was detected.") ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Previous buffer size for pipe .*{}".format(pipe_name), - "Verify that the named pipe's size was adjusted") + "Previous buffer size for pipe .*{}".format(pipe_name), "Verify that the named pipe's size was adjusted") # See fcntl: # "Attempts to set the pipe capacity below the page size @@ -157,20 +144,14 @@ # pipe_buffer_is_larger_than.py helper script to verify that the pipe grew in # size. ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "New buffer size for pipe.*{}".format(pipe_name), - "Verify that the named pipe's size was adjusted") + "New buffer size for pipe.*{}".format(pipe_name), "Verify that the named pipe's size was adjusted") buffer_verifier = "pipe_buffer_is_larger_than.py" tr.Setup.Copy(buffer_verifier) -verify_buffer_size = tr.Processes.Process( - "verify_buffer_size", - f"{sys.executable} {buffer_verifier} {pipe_path} {pipe_size}") +verify_buffer_size = tr.Processes.Process("verify_buffer_size", f"{sys.executable} {buffer_verifier} {pipe_path} {pipe_size}") verify_buffer_size.Return = 0 -verify_buffer_size.Streams.All += Testers.ContainsExpression( - "Success", - "The buffer size verifier should report success.") +verify_buffer_size.Streams.All += Testers.ContainsExpression("Success", "The buffer size verifier should report success.") -curl = tr.Processes.Process("client_request", 'curl "http://127.0.0.1:{0}" --verbose'.format( - ts.Variables.port)) +curl = tr.Processes.Process("client_request", 'curl "http://127.0.0.1:{0}" --verbose'.format(ts.Variables.port)) reader_output = os.path.join(ts.Variables.LOGDIR, "reader_output") pipe_reader = tr.Processes.Process("pipe_reader", 'cat {} | tee {}'.format(pipe_path, reader_output)) @@ -187,7 +168,6 @@ tr.Processes.Default.Command = "echo 'Default place holder for process ordering.'" tr.Processes.Default.Return = 0 - # Process ordering. tr.Processes.Default.StartBefore(verify_buffer_size) verify_buffer_size.StartBefore(wait_for_log) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/logging/log_retention.test.py trafficserver-9.2.4+ds/tests/gold_tests/logging/log_retention.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/logging/log_retention.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/logging/log_retention.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -90,13 +90,19 @@ return cls.__server server = Test.MakeOriginServer("server") - request_header = {"headers": "GET / HTTP/1.1\r\n" - "Host: does.not.matter\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} - response_header = {"headers": "HTTP/1.1 200 OK\r\n" - "Connection: close\r\n" - "Cache-control: max-age=85000\r\n\r\n", - "timestamp": "1469733493.993", "body": "xxx"} + request_header = { + "headers": "GET / HTTP/1.1\r\n" + "Host: does.not.matter\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + } + response_header = { + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "Cache-control: max-age=85000\r\n\r\n", + "timestamp": "1469733493.993", + "body": "xxx" + } server.addResponse("sessionlog.json", request_header, response_header) cls.__server = server return cls.__server @@ -117,31 +123,26 @@ self.ts.Disk.records_config.update(combined_records_config) self.ts.Disk.remap_config.AddLine( - 'map http://127.0.0.1:{0} http://127.0.0.1:{1}'.format( - self.ts.Variables.port, self.server.Variables.Port) - ) + 'map http://127.0.0.1:{0} http://127.0.0.1:{1}'.format(self.ts.Variables.port, self.server.Variables.Port)) return self.ts def get_curl_command(self): """ Generate the appropriate single curl command. """ - return 'curl "http://127.0.0.1:{0}" --verbose'.format( - self.ts.Variables.port) + return 'curl "http://127.0.0.1:{0}" --verbose'.format(self.ts.Variables.port) def get_command_to_rotate_once(self): """ Generate the set of curl commands to trigger a log rotate. """ - return 'for i in {{1..2500}}; do curl "http://127.0.0.1:{0}" --verbose; done'.format( - self.ts.Variables.port) + return 'for i in {{1..2500}}; do curl "http://127.0.0.1:{0}" --verbose; done'.format(self.ts.Variables.port) def get_command_to_rotate_thrice(self): """ Generate the set of curl commands to trigger a log rotate. """ - return 'for i in {{1..7500}}; do curl "http://127.0.0.1:{0}" --verbose; done'.format( - self.ts.Variables.port) + return 'for i in {{1..7500}}; do curl "http://127.0.0.1:{0}" --verbose; done'.format(self.ts.Variables.port) # @@ -157,8 +158,7 @@ # Verify that setting a hostname changes the hostname used in rolled logs. 'proxy.config.log.hostname': specified_hostname, } -test = TestLogRetention(twelve_meg_log_space, - "Verify log rotation and deletion of the configured log file with no min_count.") +test = TestLogRetention(twelve_meg_log_space, "Verify log rotation and deletion of the configured log file with no min_count.") # Configure approximately 5 KB entries for a log with no specified min_count. test.ts.Disk.logging_yaml.AddLines( @@ -170,25 +170,20 @@ logs: - filename: test_deletion format: long -'''.format(prefix="0123456789" * 500).split("\n") -) +'''.format(prefix="0123456789" * 500).split("\n")) # Verify that each log type was registered for auto-deletion. test.ts.Disk.traffic_out.Content = Testers.ContainsExpression( "Registering rotated log deletion for test_deletion.log with min roll count 0", "Verify test_deletion.log auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for error.log with min roll count 0", - "Verify error.log auto-delete configuration") + "Registering rotated log deletion for error.log with min roll count 0", "Verify error.log auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for traffic.out with min roll count 0", - "Verify traffic.out auto-delete configuration") + "Registering rotated log deletion for traffic.out with min roll count 0", "Verify traffic.out auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for diags.log with min roll count 0", - "Verify diags.log auto-delete configuration") + "Registering rotated log deletion for diags.log with min roll count 0", "Verify diags.log auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for manager.log with min roll count 0", - "Verify manager.log auto-delete configuration") + "Registering rotated log deletion for manager.log with min roll count 0", "Verify manager.log auto-delete configuration") # Verify test_deletion was rotated and deleted. test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( f"The rolled logfile.*test_deletion.log_{specified_hostname}.*was auto-deleted.*bytes were reclaimed", @@ -200,12 +195,10 @@ test.tr.StillRunningAfter = test.ts test.tr.StillRunningAfter = test.server - # # Test 1: Verify log deletion happens with a min_count of 1. # -test = TestLogRetention(twelve_meg_log_space, - "Verify log rotation and deletion of the configured log file with a min_count of 1.") +test = TestLogRetention(twelve_meg_log_space, "Verify log rotation and deletion of the configured log file with a min_count of 1.") # Configure approximately 5 KB entries for a log with no specified min_count. test.ts.Disk.logging_yaml.AddLines( @@ -218,8 +211,7 @@ - filename: test_deletion rolling_min_count: 1 format: long -'''.format(prefix="0123456789" * 500).split("\n") -) +'''.format(prefix="0123456789" * 500).split("\n")) # Verify that each log type was registered for auto-deletion. test.ts.Disk.traffic_out.Content = Testers.ContainsExpression( @@ -227,17 +219,13 @@ "Verify test_deletion.log auto-delete configuration") # Only the test_deletion should have its min_count overridden. test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for error.log with min roll count 0", - "Verify error.log auto-delete configuration") + "Registering rotated log deletion for error.log with min roll count 0", "Verify error.log auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for traffic.out with min roll count 0", - "Verify traffic.out auto-delete configuration") + "Registering rotated log deletion for traffic.out with min roll count 0", "Verify traffic.out auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for diags.log with min roll count 0", - "Verify diags.log auto-delete configuration") + "Registering rotated log deletion for diags.log with min roll count 0", "Verify diags.log auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for manager.log with min roll count 0", - "Verify manager.log auto-delete configuration") + "Registering rotated log deletion for manager.log with min roll count 0", "Verify manager.log auto-delete configuration") # Verify test_deletion was rotated and deleted. test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( f"The rolled logfile.*test_deletion.log_{specified_hostname}.*was auto-deleted.*bytes were reclaimed", @@ -248,12 +236,10 @@ test.tr.StillRunningAfter = test.ts test.tr.StillRunningAfter = test.server - # # Test 2: Verify log deletion happens for a plugin's logs. # -test = TestLogRetention(twelve_meg_log_space, - "Verify log rotation and deletion of plugin logs.") +test = TestLogRetention(twelve_meg_log_space, "Verify log rotation and deletion of plugin logs.") Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'test_log_interface.so'), test.ts) # Verify that the plugin's logs and other core logs were registered for deletion. @@ -261,21 +247,16 @@ "Registering rotated log deletion for test_log_interface.log with min roll count 0", "Verify test_log_interface.log auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for error.log with min roll count 0", - "Verify error.log auto-delete configuration") + "Registering rotated log deletion for error.log with min roll count 0", "Verify error.log auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for traffic.out with min roll count 0", - "Verify traffic.out auto-delete configuration") + "Registering rotated log deletion for traffic.out with min roll count 0", "Verify traffic.out auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for diags.log with min roll count 0", - "Verify diags.log auto-delete configuration") + "Registering rotated log deletion for diags.log with min roll count 0", "Verify diags.log auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for manager.log with min roll count 0", - "Verify manager.log auto-delete configuration") + "Registering rotated log deletion for manager.log with min roll count 0", "Verify manager.log auto-delete configuration") # Verify test_deletion was rotated and deleted. test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "The rolled logfile.*test_log_interface.log_.*was auto-deleted.*bytes were reclaimed", - "Verify that space was reclaimed") + "The rolled logfile.*test_log_interface.log_.*was auto-deleted.*bytes were reclaimed", "Verify that space was reclaimed") test.tr.Processes.Default.Command = test.get_command_to_rotate_once() test.tr.Processes.Default.ReturnCode = 0 @@ -291,8 +272,7 @@ 'proxy.config.log.max_space_mb_headroom': 2, 'proxy.config.log.max_space_mb_for_logs': 22, } -test = TestLogRetention(twenty_two_meg_log_space, - "Verify log deletion priority behavior.") +test = TestLogRetention(twenty_two_meg_log_space, "Verify log deletion priority behavior.") # Configure approximately 5 KB entries for a log with no specified min_count. test.ts.Disk.logging_yaml.AddLines( @@ -309,8 +289,7 @@ - filename: test_high_priority_deletion rolling_min_count: 1 format: long -'''.format(prefix="0123456789" * 500).split("\n") -) +'''.format(prefix="0123456789" * 500).split("\n")) # Verify that each log type was registered for auto-deletion. test.ts.Disk.traffic_out.Content = Testers.ContainsExpression( @@ -321,17 +300,13 @@ "Verify test_high_priority_deletion.log auto-delete configuration") # Only the test_deletion should have its min_count overridden. test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for error.log with min roll count 0", - "Verify error.log auto-delete configuration") + "Registering rotated log deletion for error.log with min roll count 0", "Verify error.log auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for traffic.out with min roll count 0", - "Verify traffic.out auto-delete configuration") + "Registering rotated log deletion for traffic.out with min roll count 0", "Verify traffic.out auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for diags.log with min roll count 0", - "Verify diags.log auto-delete configuration") + "Registering rotated log deletion for diags.log with min roll count 0", "Verify diags.log auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for manager.log with min roll count 0", - "Verify manager.log auto-delete configuration") + "Registering rotated log deletion for manager.log with min roll count 0", "Verify manager.log auto-delete configuration") # Verify test_deletion was rotated and deleted. test.ts.Disk.traffic_out.Content += Testers.ExcludesExpression( "The rolled logfile.*test_low_priority_deletion.log_.*was auto-deleted.*bytes were reclaimed", @@ -358,27 +333,21 @@ 'proxy.config.output.logfile.rolling_min_count': 4, 'proxy.config.diags.logfile.rolling_min_count': 5, } -test = TestLogRetention(various_min_count_overrides, - "Verify that the various min_count configurations behave as expected") +test = TestLogRetention(various_min_count_overrides, "Verify that the various min_count configurations behave as expected") # Only the test_deletion should have its min_count overridden. test.ts.Disk.traffic_out.Content = Testers.ContainsExpression( - "Registering rotated log deletion for error.log with min roll count 3", - "Verify error.log auto-delete configuration") + "Registering rotated log deletion for error.log with min roll count 3", "Verify error.log auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for traffic.out with min roll count 4", - "Verify traffic.out auto-delete configuration") + "Registering rotated log deletion for traffic.out with min roll count 4", "Verify traffic.out auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for diags.log with min roll count 5", - "Verify diags.log auto-delete configuration") + "Registering rotated log deletion for diags.log with min roll count 5", "Verify diags.log auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for manager.log with min roll count 5", - "Verify manager.log auto-delete configuration") + "Registering rotated log deletion for manager.log with min roll count 5", "Verify manager.log auto-delete configuration") # In case a future log is added, make sure the developer doesn't forget to # set the min count per configuration. test.ts.Disk.traffic_out.Content += Testers.ExcludesExpression( - "Registering .* with min roll count 0", - "Verify nothing has a default min roll count of 0 per configuration") + "Registering .* with min roll count 0", "Verify nothing has a default min roll count of 0 per configuration") # This test doesn't require a log rotation. We just verify that the logs communicate # the appropriate min_count values above. @@ -387,18 +356,17 @@ test.tr.StillRunningAfter = test.ts test.tr.StillRunningAfter = test.server - # # Test 5: Verify log deletion does not happen when it is disabled. # auto_delete_disabled = twelve_meg_log_space.copy() -auto_delete_disabled.update({ - 'proxy.config.log.auto_delete_rolled_files': 0, - # Verify that setting a hostname changes the hostname used in rolled logs. - 'proxy.config.log.hostname': 'my_hostname', -}) -test = TestLogRetention(auto_delete_disabled, - "Verify log deletion does not happen when auto-delet is disabled.") +auto_delete_disabled.update( + { + 'proxy.config.log.auto_delete_rolled_files': 0, + # Verify that setting a hostname changes the hostname used in rolled logs. + 'proxy.config.log.hostname': 'my_hostname', + }) +test = TestLogRetention(auto_delete_disabled, "Verify log deletion does not happen when auto-delet is disabled.") # Configure approximately 5 KB entries for a log with no specified min_count. test.ts.Disk.logging_yaml.AddLines( @@ -411,8 +379,7 @@ - filename: test_deletion rolling_min_count: 1 format: long -'''.format(prefix="0123456789" * 500).split("\n") -) +'''.format(prefix="0123456789" * 500).split("\n")) # Verify that each log type was registered for auto-deletion. test.ts.Disk.traffic_out.Content = Testers.ExcludesExpression( @@ -420,21 +387,16 @@ "Verify test_deletion.log auto-delete configuration") # Only the test_deletion should have its min_count overridden. test.ts.Disk.traffic_out.Content += Testers.ExcludesExpression( - "Registering rotated log deletion for error.log with min roll count 0", - "Verify error.log auto-delete configuration") + "Registering rotated log deletion for error.log with min roll count 0", "Verify error.log auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ExcludesExpression( - "Registering rotated log deletion for traffic.out with min roll count 0", - "Verify traffic.out auto-delete configuration") + "Registering rotated log deletion for traffic.out with min roll count 0", "Verify traffic.out auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ExcludesExpression( - "Registering rotated log deletion for diags.log with min roll count 0", - "Verify diags.log auto-delete configuration") + "Registering rotated log deletion for diags.log with min roll count 0", "Verify diags.log auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ExcludesExpression( - "Registering rotated log deletion for manager.log with min roll count 0", - "Verify manager.log auto-delete configuration") + "Registering rotated log deletion for manager.log with min roll count 0", "Verify manager.log auto-delete configuration") # Verify test_deletion was not deleted. test.ts.Disk.traffic_out.Content += Testers.ExcludesExpression( - "The rolled logfile.*test_deletion.log_.*was auto-deleted.*bytes were reclaimed", - "Verify that space was reclaimed") + "The rolled logfile.*test_deletion.log_.*was auto-deleted.*bytes were reclaimed", "Verify that space was reclaimed") test.tr.Processes.Default.Command = test.get_command_to_rotate_once() test.tr.Processes.Default.ReturnCode = 0 @@ -455,8 +417,7 @@ # This is the configuration under test. 'proxy.config.log.rolling_max_count': 2, } -test = TestLogRetention(max_roll_count_of_2, - "Verify max_roll_count is respected.") +test = TestLogRetention(max_roll_count_of_2, "Verify max_roll_count is respected.") # Configure approximately 5 KB entries for a log with no specified min_count. test.ts.Disk.logging_yaml.AddLines( @@ -468,13 +429,11 @@ logs: - filename: test_deletion format: long -'''.format(prefix="0123456789" * 500).split("\n") -) +'''.format(prefix="0123456789" * 500).split("\n")) # Verify that trim happened for the rolled file. test.ts.Disk.traffic_out.Content = Testers.ContainsExpression( - "rolled logfile.*test_deletion.log.*old.* was auto-deleted", - "Verify test_deletion.log was trimmed") + "rolled logfile.*test_deletion.log.*old.* was auto-deleted", "Verify test_deletion.log was trimmed") test.tr.Processes.Default.Command = test.get_command_to_rotate_thrice() test.tr.Processes.Default.ReturnCode = 0 @@ -484,8 +443,7 @@ # # Test 7: Verify log deletion happens after a config reload. # -test = TestLogRetention(twelve_meg_log_space, - "Verify log rotation and deletion after a config reload.") +test = TestLogRetention(twelve_meg_log_space, "Verify log rotation and deletion after a config reload.") test.ts.Disk.logging_yaml.AddLines( ''' @@ -496,29 +454,23 @@ logs: - filename: test_deletion format: long -'''.format(prefix="0123456789" * 500).split("\n") -) +'''.format(prefix="0123456789" * 500).split("\n")) # Verify that the plugin's logs and other core logs were registered for deletion. test.ts.Disk.traffic_out.Content = Testers.ContainsExpression( "Registering rotated log deletion for test_deletion.log with min roll count 0", "Verify test_deletion.log auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for error.log with min roll count 0", - "Verify error.log auto-delete configuration") + "Registering rotated log deletion for error.log with min roll count 0", "Verify error.log auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for traffic.out with min roll count 0", - "Verify traffic.out auto-delete configuration") + "Registering rotated log deletion for traffic.out with min roll count 0", "Verify traffic.out auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for diags.log with min roll count 0", - "Verify diags.log auto-delete configuration") + "Registering rotated log deletion for diags.log with min roll count 0", "Verify diags.log auto-delete configuration") test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Registering rotated log deletion for manager.log with min roll count 0", - "Verify manager.log auto-delete configuration") + "Registering rotated log deletion for manager.log with min roll count 0", "Verify manager.log auto-delete configuration") # Verify test_deletion was rotated and deleted. test.ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "The rolled logfile.*test_deletion.log_.*was auto-deleted.*bytes were reclaimed", - "Verify that space was reclaimed") + "The rolled logfile.*test_deletion.log_.*was auto-deleted.*bytes were reclaimed", "Verify that space was reclaimed") # Touch logging.yaml so the config reload applies to logging objects. test.tr.Processes.Default.Command = "touch " + test.ts.Disk.logging_yaml.AbsRunTimePath diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/logging/new_log_flds.test.py trafficserver-9.2.4+ds/tests/gold_tests/logging/new_log_flds.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/logging/new_log_flds.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/logging/new_log_flds.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -23,9 +23,7 @@ Test new log fields ''' -Test.SkipUnless( - Condition.HasCurlFeature('http2') -) +Test.SkipUnless(Condition.HasCurlFeature('http2')) # ---- # Setup httpbin Origin Server @@ -39,27 +37,22 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({ - # 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), -}) +ts.Disk.records_config.update( + { + # 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) -ts.Disk.remap_config.AddLine( - 'map http://127.0.0.1:{0} http://127.0.0.1:{1}/ip'.format(ts.Variables.port, httpbin.Variables.Port) -) +ts.Disk.remap_config.AddLine('map http://127.0.0.1:{0} http://127.0.0.1:{1}/ip'.format(ts.Variables.port, httpbin.Variables.Port)) ts.Disk.remap_config.AddLine( - 'map https://127.0.0.1:{0} http://127.0.0.1:{1}/ip'.format(ts.Variables.ssl_port, httpbin.Variables.Port) -) + 'map https://127.0.0.1:{0} http://127.0.0.1:{1}/ip'.format(ts.Variables.ssl_port, httpbin.Variables.Port)) ts.Disk.remap_config.AddLine( - 'map https://reallyreallyreallyreallylong.com http://127.0.0.1:{1}/ip'.format(ts.Variables.ssl_port, httpbin.Variables.Port) -) + 'map https://reallyreallyreallyreallylong.com http://127.0.0.1:{1}/ip'.format(ts.Variables.ssl_port, httpbin.Variables.Port)) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.logging_yaml.AddLines( ''' @@ -70,40 +63,33 @@ logs: - filename: test_new_log_flds format: custom -'''.split("\n") -) +'''.split("\n")) tr = Test.AddTestRun() # Delay on readiness of ssl port tr.Processes.Default.StartBefore(Test.Processes.ts) tr.Processes.Default.StartBefore(httpbin, ready=When.PortOpen(httpbin.Variables.Port)) # -tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}" --verbose'.format( - ts.Variables.port) +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}" --verbose'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}" --verbose'.format( - ts.Variables.port) +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}" --verbose'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}" "http://127.0.0.1:{0}" --http1.1 --verbose'.format( - ts.Variables.port) +tr.Processes.Default.Command = 'curl "http://127.0.0.1:{0}" "http://127.0.0.1:{0}" --http1.1 --verbose'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() tr.Processes.Default.Command = ( - 'curl "https://127.0.0.1:{0}" "https://127.0.0.1:{0}" --http2 --insecure --verbose'.format( - ts.Variables.ssl_port) -) + 'curl "https://127.0.0.1:{0}" "https://127.0.0.1:{0}" --http2 --insecure --verbose'.format(ts.Variables.ssl_port)) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() tr.Processes.Default.Command = ( 'curl "https://reallyreallyreallyreallylong.com:{0}" --http2 --insecure --verbose' + - ' --resolve reallyreallyreallyreallylong.com:{0}:127.0.0.1' -).format(ts.Variables.ssl_port) + ' --resolve reallyreallyreallyreallylong.com:{0}:127.0.0.1').format(ts.Variables.ssl_port) tr.Processes.Default.ReturnCode = 0 # Wait for log file to appear, then wait one extra second to make sure TS is done writing it. @@ -111,8 +97,7 @@ test_run = Test.AddTestRun() test_run.Processes.Default.Command = ( os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + - os.path.join(ts.Variables.LOGDIR, 'test_new_log_flds.log') -) + os.path.join(ts.Variables.LOGDIR, 'test_new_log_flds.log')) test_run.Processes.Default.ReturnCode = 0 # Validate generated log. diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/logging/new_log_flds_observer.py trafficserver-9.2.4+ds/tests/gold_tests/logging/new_log_flds_observer.py --- trafficserver-9.2.3+ds/tests/gold_tests/logging/new_log_flds_observer.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/logging/new_log_flds_observer.py 2024-04-03 15:38:30.000000000 +0000 @@ -47,14 +47,8 @@ # Validate contents of report. # -if (ccid[0] != ccid[1] and - ccid[1] != ccid[2] and - ccid[2] == ccid[3] and - ctid[2] != ctid[3] and - ccid[3] != ccid[4] and - ccid[4] == ccid[5] and - ctid[4] != ctid[5] and - ccid[5] != ccid[6]): +if (ccid[0] != ccid[1] and ccid[1] != ccid[2] and ccid[2] == ccid[3] and ctid[2] != ctid[3] and ccid[3] != ccid[4] and + ccid[4] == ccid[5] and ctid[4] != ctid[5] and ccid[5] != ccid[6]): exit(code=0) # Failure exit if report was not valid. diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/logging/pipe_buffer_is_larger_than.py trafficserver-9.2.4+ds/tests/gold_tests/logging/pipe_buffer_is_larger_than.py --- trafficserver-9.2.3+ds/tests/gold_tests/logging/pipe_buffer_is_larger_than.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/logging/pipe_buffer_is_larger_than.py 2024-04-03 15:38:30.000000000 +0000 @@ -26,16 +26,11 @@ def parse_args(): - parser = parser = argparse.ArgumentParser( - description='Verify that a FIFO has a buffer of at least a certain size') + parser = parser = argparse.ArgumentParser(description='Verify that a FIFO has a buffer of at least a certain size') - parser.add_argument( - 'pipe_name', - help='The pipe name upon which to verify the size is large enough.') - - parser.add_argument( - 'minimum_buffer_size', - help='The minimu buffer size for the pipe to expect.') + parser.add_argument('pipe_name', help='The pipe name upon which to verify the size is large enough.') + + parser.add_argument('minimum_buffer_size', help='The minimu buffer size for the pipe to expect.') return parser.parse_args() @@ -46,14 +41,10 @@ buffer_size = fcntl.fcntl(fifo_fd, F_GETPIPE_SZ) if buffer_size >= int(minimum_buffer_size): - print("Success. Size is: {} which is larger than: {}".format( - buffer_size, - minimum_buffer_size)) + print("Success. Size is: {} which is larger than: {}".format(buffer_size, minimum_buffer_size)) return 0 else: - print("Fail. Size is: {} which is smaller than: {}".format( - buffer_size, - minimum_buffer_size)) + print("Fail. Size is: {} which is smaller than: {}".format(buffer_size, minimum_buffer_size)) return 1 except Exception as e: print("Unable to open fifo, error: {}".format(str(e))) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/logging/pqsi-pqsp.test.py trafficserver-9.2.4+ds/tests/gold_tests/logging/pqsi-pqsp.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/logging/pqsi-pqsp.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/logging/pqsi-pqsp.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -26,18 +26,16 @@ server = Test.MakeOriginServer("server") request_header = { - "headers": - "GET /test HTTP/1.1\r\n" - "Host: whatever\r\n" - "\r\n", + "headers": "GET /test HTTP/1.1\r\n" + "Host: whatever\r\n" + "\r\n", "body": "", 'timestamp': "1469733493.993", } response_header = { - "headers": - "HTTP/1.1 200 OK\r\n" - "Connection: close\r\n" - "\r\n", + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "\r\n", "body": "body\n", 'timestamp': "1469733493.993", } @@ -45,15 +43,14 @@ nameserver = Test.MakeDNServer("dns", default='127.0.0.1') -ts.Disk.records_config.update({ - 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", - 'proxy.config.dns.resolv_conf': 'NULL', - 'proxy.config.http.cache.http': 1, - 'proxy.config.http.cache.required_headers': 0, -}) -ts.Disk.remap_config.AddLine( - 'map / http://localhost:{}/'.format(server.Variables.Port) -) +ts.Disk.records_config.update( + { + 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", + 'proxy.config.dns.resolv_conf': 'NULL', + 'proxy.config.http.cache.http': 1, + 'proxy.config.http.cache.required_headers': 0, + }) +ts.Disk.remap_config.AddLine('map / http://localhost:{}/'.format(server.Variables.Port)) ts.Disk.logging_yaml.AddLines( ''' @@ -64,8 +61,7 @@ logs: - filename: field-test format: custom -'''.split("\n") -) +'''.split("\n")) tr = Test.AddTestRun() tr.Processes.Default.StartBefore(server) @@ -84,9 +80,7 @@ # Wait for log file to appear, then wait one extra second to make sure TS is done writing it. tr = Test.AddTestRun() -tr.Processes.Default.Command = ( - os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + log_filespec -) +tr.Processes.Default.Command = (os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + log_filespec) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/logging/sigusr2.test.py trafficserver-9.2.4+ds/tests/gold_tests/logging/sigusr2.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/logging/sigusr2.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/logging/sigusr2.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -20,7 +20,6 @@ import os import sys - TRAFFIC_MANAGER_PID_SCRIPT = 'ts_process_handler.py' @@ -40,16 +39,17 @@ self._ts_name = "sigusr2_ts{}".format(Sigusr2Test.__ts_counter) Sigusr2Test.__ts_counter += 1 self.ts = Test.MakeATSProcess(self._ts_name, command="traffic_manager") - self.ts.Disk.records_config.update({ - 'proxy.config.http.wait_for_cache': 1, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'log', - 'proxy.config.log.periodic_tasks_interval': 1, - - # All log rotation should be handled externally. - 'proxy.config.log.rolling_enabled': 0, - 'proxy.config.log.auto_delete_rolled_files': 0, - }) + self.ts.Disk.records_config.update( + { + 'proxy.config.http.wait_for_cache': 1, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'log', + 'proxy.config.log.periodic_tasks_interval': 1, + + # All log rotation should be handled externally. + 'proxy.config.log.rolling_enabled': 0, + 'proxy.config.log.auto_delete_rolled_files': 0, + }) # For this test, more important than the listening port is the existence of the # log files. In particular, it can take a few seconds for traffic_manager to @@ -67,10 +67,9 @@ self.ts.Disk.File(self.rotated_manager_log, id="manager_log_old") self.ts.Disk.remap_config.AddLine( - 'map http://127.0.0.1:{0} http://127.0.0.1:{1}'.format( - self.ts.Variables.port, self.server.Variables.Port) - ) - self.ts.Disk.logging_yaml.AddLine(''' + 'map http://127.0.0.1:{0} http://127.0.0.1:{1}'.format(self.ts.Variables.port, self.server.Variables.Port)) + self.ts.Disk.logging_yaml.AddLine( + ''' logging: formats: - name: has_path @@ -93,13 +92,19 @@ server = Test.MakeOriginServer("server") Sigusr2Test.__server = server for path in ['/first', '/second', '/third']: - request_header = {"headers": "GET {} HTTP/1.1\r\n" - "Host: does.not.matter\r\n\r\n".format(path), - "timestamp": "1469733493.993", "body": ""} - response_header = {"headers": "HTTP/1.1 200 OK\r\n" - "Connection: close\r\n" - "Cache-control: max-age=85000\r\n\r\n", - "timestamp": "1469733493.993", "body": "xxx"} + request_header = { + "headers": "GET {} HTTP/1.1\r\n" + "Host: does.not.matter\r\n\r\n".format(path), + "timestamp": "1469733493.993", + "body": "" + } + response_header = { + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "Cache-control: max-age=85000\r\n\r\n", + "timestamp": "1469733493.993", + "body": "xxx" + } server.addResponse("sessionlog.json", request_header, response_header) return server @@ -127,10 +132,9 @@ diags_test = Sigusr2Test() # Configure our rotation processes. -rotate_diags_log = tr1.Processes.Process("rotate_diags_log", "mv {} {}".format( - diags_test.diags_log, diags_test.rotated_diags_log)) -rotate_manager_log = tr1.Processes.Process("rotate_manager_log", "mv {} {}".format( - diags_test.manager_log, diags_test.rotated_manager_log)) +rotate_diags_log = tr1.Processes.Process("rotate_diags_log", "mv {} {}".format(diags_test.diags_log, diags_test.rotated_diags_log)) +rotate_manager_log = tr1.Processes.Process( + "rotate_manager_log", "mv {} {}".format(diags_test.manager_log, diags_test.rotated_manager_log)) # Configure the signaling of SIGUSR2 to traffic_manager. tr1.Processes.Default.Command = diags_test.get_sigusr2_signal_command() @@ -147,23 +151,19 @@ # manager.log should have been rotated. Check for the expected content in the # old file and the newly created file. diags_test.ts.Disk.manager_log_old.Content += Testers.ContainsExpression( - "received SIGUSR2, rotating the logs", - "manager.log_old should explain that SIGUSR2 was passed to it") + "received SIGUSR2, rotating the logs", "manager.log_old should explain that SIGUSR2 was passed to it") diags_test.ts.Disk.manager_log.Content += Testers.ContainsExpression( - "Reseated manager.log", - "The new manager.log should indicate the newly opened manager.log") + "Reseated manager.log", "The new manager.log should indicate the newly opened manager.log") # diags.log should have been rotated. The old one had the reference to traffic # server running, this new one shouldn't. But it should indicate that the new # diags.log was opened. diags_test.ts.Disk.diags_log.Content += Testers.ExcludesExpression( - "traffic server running", - "The new diags.log should not reference the running traffic server") + "traffic server running", "The new diags.log should not reference the running traffic server") diags_test.ts.Disk.diags_log.Content += Testers.ContainsExpression( - "Reseated diags.log", - "The new diags.log should indicate the newly opened diags.log") + "Reseated diags.log", "The new diags.log should indicate the newly opened diags.log") # # Test 2: Verify SIGUSR2 isn't needed for rotated configured logs. @@ -172,8 +172,7 @@ configured_test = Sigusr2Test() first_curl = tr2.Processes.Process( - "first_curl", - 'curl "http://127.0.0.1:{0}/first" --verbose'.format(configured_test.ts.Variables.port)) + "first_curl", 'curl "http://127.0.0.1:{0}/first" --verbose'.format(configured_test.ts.Variables.port)) # Note that for each of these processes, aside from the final Default one, they # are all treated like long-running servers to AuTest. Thus the long sleeps # only allow us to wait until the logs get populated with the desired content, @@ -183,12 +182,11 @@ first_curl_ready.StartupTimeout = 30 first_curl_ready.Ready = When.FileContains(configured_test.configured_log, "/first") -rotate_log = tr2.Processes.Process("rotate_log_file", "mv {} {}".format( - configured_test.configured_log, configured_test.rotated_configured_log)) +rotate_log = tr2.Processes.Process( + "rotate_log_file", "mv {} {}".format(configured_test.configured_log, configured_test.rotated_configured_log)) second_curl = tr2.Processes.Process( - "second_curl", - 'curl "http://127.0.0.1:{0}/second" --verbose'.format(configured_test.ts.Variables.port)) + "second_curl", 'curl "http://127.0.0.1:{0}/second" --verbose'.format(configured_test.ts.Variables.port)) second_curl_ready = tr2.Processes.Process("second_curl_ready", 'sleep 30') # In the autest environment, it can take more than 10 seconds for the log file to be created. @@ -201,8 +199,7 @@ send_pkill_ready.Ready = When.FileExists(configured_test.configured_log) third_curl = tr2.Processes.Process( - "third_curl", - 'curl "http://127.0.0.1:{0}/third" --verbose'.format(configured_test.ts.Variables.port)) + "third_curl", 'curl "http://127.0.0.1:{0}/third" --verbose'.format(configured_test.ts.Variables.port)) third_curl_ready = tr2.Processes.Process("third_curl_ready", 'sleep 30') # In the autest environment, it can take more than 10 seconds for the log file to be created. third_curl_ready.StartupTimeout = 30 @@ -232,21 +229,15 @@ # Verify that the logs are in the correct files. configured_test.ts.Disk.configured_log.Content += Testers.ExcludesExpression( - "/first", - "The new test_rotation.log should not have the first GET retrieval in it.") + "/first", "The new test_rotation.log should not have the first GET retrieval in it.") configured_test.ts.Disk.configured_log.Content += Testers.ExcludesExpression( - "/second", - "The new test_rotation.log should not have the second GET retrieval in it.") + "/second", "The new test_rotation.log should not have the second GET retrieval in it.") configured_test.ts.Disk.configured_log.Content += Testers.ContainsExpression( - "/third", - "The new test_rotation.log should have the third GET retrieval in it.") + "/third", "The new test_rotation.log should have the third GET retrieval in it.") configured_test.ts.Disk.configured_log_old.Content += Testers.ContainsExpression( - "/first", - "test_rotation.log_old should have the first GET retrieval in it.") + "/first", "test_rotation.log_old should have the first GET retrieval in it.") configured_test.ts.Disk.configured_log_old.Content += Testers.ContainsExpression( - "/second", - "test_rotation.log_old should have the second GET retrieval in it.") + "/second", "test_rotation.log_old should have the second GET retrieval in it.") configured_test.ts.Disk.configured_log_old.Content += Testers.ExcludesExpression( - "/third", - "test_rotation.log_old should not have the third GET retrieval in it.") + "/third", "test_rotation.log_old should not have the third GET retrieval in it.") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/logging/ts_process_handler.py trafficserver-9.2.4+ds/tests/gold_tests/logging/ts_process_handler.py --- trafficserver-9.2.3+ds/tests/gold_tests/logging/ts_process_handler.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/logging/ts_process_handler.py 2024-04-03 15:38:30.000000000 +0000 @@ -79,18 +79,13 @@ def parse_args(): - parser = argparse.ArgumentParser( - description='Interact with a Traffic Server process') + parser = argparse.ArgumentParser(description='Interact with a Traffic Server process') parser.add_argument( - 'ts_identifier', - help='An identifier in the command line for the desired ' + 'ts_identifier', help='An identifier in the command line for the desired ' 'Traffic Server process.') + parser.add_argument('--signal', help='Send the given signal to the process.') parser.add_argument( - '--signal', - help='Send the given signal to the process.') - parser.add_argument( - '--parent', action="store_true", default=False, - help='Interact with the parent process of the Traffic Server process') + '--parent', action="store_true", default=False, help='Interact with the parent process of the Traffic Server process') return parser.parse_args() @@ -100,8 +95,7 @@ try: process = get_desired_process(args.ts_identifier, args.parent) except GetPidError as e: - print(traceback.format_exception(None, e, e.__traceback__), - file=sys.stderr, flush=True) + print(traceback.format_exception(None, e, e.__traceback__), file=sys.stderr, flush=True) return 1 if args.signal: diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/next_hop/strategies_ch/strategies_ch.test.py trafficserver-9.2.4+ds/tests/gold_tests/next_hop/strategies_ch/strategies_ch.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/next_hop/strategies_ch/strategies_ch.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/next_hop/strategies_ch/strategies_ch.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,11 +24,10 @@ # server = Test.MakeOriginServer("server") response_header = { - "headers": - "HTTP/1.1 200 OK\r\n" - "Connection: close\r\n" - "Cache-control: max-age=85000\r\n" - "\r\n", + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "Cache-control: max-age=85000\r\n" + "\r\n", "timestamp": "1469733493.993", "body": "This is the body.\n" } @@ -52,30 +51,30 @@ ts_nh = [] for i in range(num_nh): ts = Test.MakeATSProcess(f"ts_nh{i}", use_traffic_out=False, command=f"traffic_server 2>nh_trace{i}.log") - ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", - 'proxy.config.dns.resolv_conf': "NULL", - }) - ts.Disk.remap_config.AddLine( - f"map / http://127.0.0.1:{server.Variables.Port}" - ) + ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", + 'proxy.config.dns.resolv_conf': "NULL", + }) + ts.Disk.remap_config.AddLine(f"map / http://127.0.0.1:{server.Variables.Port}") ts_nh.append(ts) ts = Test.MakeATSProcess("ts") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. - 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. - 'proxy.config.http.cache.http': 0, - 'proxy.config.http.uncacheable_requests_bypass_parent': 0, - 'proxy.config.http.no_dns_just_forward_to_parent': 1, - 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, - 'proxy.config.http.parent_proxy.self_detect': 0, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. + 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. + 'proxy.config.http.cache.http': 0, + 'proxy.config.http.uncacheable_requests_bypass_parent': 0, + 'proxy.config.http.no_dns_just_forward_to_parent': 1, + 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, + 'proxy.config.http.parent_proxy.self_detect': 0, + }) ts.Disk.File(ts.Variables.CONFIGDIR + "/strategies.yaml", id="strategies", typename="ats:config") s = ts.Disk.strategies @@ -90,17 +89,19 @@ # The health check URL does not seem to be used currently. # s.AddLine(f" health_check_url: http://next_hop{i}:{ts_nh[i].Variables.port}") s.AddLine(f" weight: 1.0") -s.AddLines([ - "strategies:", - " - strategy: the-strategy", - " policy: consistent_hash", - " hash_key: path", - " go_direct: false", - " parent_is_proxy: true", - " ignore_self_detect: true", - " groups:", - " - *g1", - " scheme: http"]) +s.AddLines( + [ + "strategies:", + " - strategy: the-strategy", + " policy: consistent_hash", + " hash_key: path", + " go_direct: false", + " parent_is_proxy: true", + " ignore_self_detect: true", + " groups:", + " - *g1", + " scheme: http", + ]) # Fallover not currently tested. # @@ -111,11 +112,9 @@ # " response_codes:", # " - 404", # " health_check:", -# " - passive"]) +# " - passive",]) -ts.Disk.remap_config.AddLine( - "map http://dummy.com http://not_used @strategy=the-strategy" -) +ts.Disk.remap_config.AddLine("map http://dummy.com http://not_used @strategy=the-strategy") tr = Test.AddTestRun() tr.Processes.Default.StartBefore(server) @@ -128,9 +127,7 @@ for i in range(num_objects): tr = Test.AddTestRun() - tr.Processes.Default.Command = ( - f'curl --verbose --proxy 127.0.0.1:{ts.Variables.port} http://dummy.com/obj{i}' - ) + tr.Processes.Default.Command = (f'curl --verbose --proxy 127.0.0.1:{ts.Variables.port} http://dummy.com/obj{i}') tr.Processes.Default.Streams.stdout = "body.gold" tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/next_hop/strategies_ch2/strategies_ch2.test.py trafficserver-9.2.4+ds/tests/gold_tests/next_hop/strategies_ch2/strategies_ch2.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/next_hop/strategies_ch2/strategies_ch2.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/next_hop/strategies_ch2/strategies_ch2.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,11 +24,10 @@ # server = Test.MakeOriginServer("server") response_header = { - "headers": - "HTTP/1.1 200 OK\r\n" - "Connection: close\r\n" - "Cache-control: max-age=85000\r\n" - "\r\n", + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "Cache-control: max-age=85000\r\n" + "\r\n", "timestamp": "1469733493.993", "body": "This is the body.\n" } @@ -52,30 +51,30 @@ ts_nh = [] for i in range(num_nh): ts = Test.MakeATSProcess(f"ts_nh{i}") - ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", - 'proxy.config.dns.resolv_conf': "NULL", - }) - ts.Disk.remap_config.AddLine( - f"map / http://127.0.0.1:{server.Variables.Port}" - ) + ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", + 'proxy.config.dns.resolv_conf': "NULL", + }) + ts.Disk.remap_config.AddLine(f"map / http://127.0.0.1:{server.Variables.Port}") ts_nh.append(ts) ts = Test.MakeATSProcess("ts", use_traffic_out=False, command="traffic_server 2> trace.log") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. - 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. - 'proxy.config.http.cache.http': 0, - 'proxy.config.http.uncacheable_requests_bypass_parent': 0, - 'proxy.config.http.no_dns_just_forward_to_parent': 1, - 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, - 'proxy.config.http.parent_proxy.self_detect': 0, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. + 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. + 'proxy.config.http.cache.http': 0, + 'proxy.config.http.uncacheable_requests_bypass_parent': 0, + 'proxy.config.http.no_dns_just_forward_to_parent': 1, + 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, + 'proxy.config.http.parent_proxy.self_detect': 0, + }) ts.Disk.File(ts.Variables.CONFIGDIR + "/strategies.yaml", id="strategies", typename="ats:config") s = ts.Disk.strategies @@ -90,18 +89,19 @@ # The health check URL does not seem to be used currently. # s.AddLine(f" health_check_url: http://next_hop{i}:{ts_nh[i].Variables.port}") s.AddLine(f" weight: 1.0") -s.AddLines([ - "strategies:", - " - strategy: the-strategy", - " policy: consistent_hash", - " hash_key: path", - " go_direct: false", - " parent_is_proxy: true", - " ignore_self_detect: true", - " groups:", - " - *g1", - " scheme: http" -]) +s.AddLines( + [ + "strategies:", + " - strategy: the-strategy", + " policy: consistent_hash", + " hash_key: path", + " go_direct: false", + " parent_is_proxy: true", + " ignore_self_detect: true", + " groups:", + " - *g1", + " scheme: http", + ]) # Use default fallover config. # @@ -112,12 +112,10 @@ # " response_codes:", # " - 404", # " health_check:", -# " - passive" +# " - passive", # ]) -ts.Disk.remap_config.AddLine( - "map http://dummy.com http://not_used @strategy=the-strategy" -) +ts.Disk.remap_config.AddLine("map http://dummy.com http://not_used @strategy=the-strategy") tr = Test.AddTestRun() tr.Processes.Default.StartBefore(server) @@ -131,9 +129,7 @@ for i in range(num_objects): tr = Test.AddTestRun() - tr.Processes.Default.Command = ( - f'curl --verbose --proxy 127.0.0.1:{ts.Variables.port} http://dummy.com/obj{i}' - ) + tr.Processes.Default.Command = (f'curl --verbose --proxy 127.0.0.1:{ts.Variables.port} http://dummy.com/obj{i}') tr.Processes.Default.Streams.stdout = "body.gold" tr.Processes.Default.ReturnCode = 0 @@ -145,15 +141,11 @@ for i in range(num_objects): tr = Test.AddTestRun() - tr.Processes.Default.Command = ( - f'curl --verbose --proxy 127.0.0.1:{ts.Variables.port} http://dummy.com/obj{i}' - ) + tr.Processes.Default.Command = (f'curl --verbose --proxy 127.0.0.1:{ts.Variables.port} http://dummy.com/obj{i}') tr.Processes.Default.Streams.stdout = "body.gold" tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = ( - "grep -F PARENT_SPECIFIED trace.log | sed 's/^.*(next_hop) [^ ]* //' | sed 's/[.][0-9]*$$//'" -) +tr.Processes.Default.Command = ("grep -F PARENT_SPECIFIED trace.log | sed 's/^.*(next_hop) [^ ]* //' | sed 's/[.][0-9]*$$//'") tr.Processes.Default.Streams.stdout = "trace.gold" tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/next_hop/strategies_stale/strategies_stale.test.py trafficserver-9.2.4+ds/tests/gold_tests/next_hop/strategies_stale/strategies_stale.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/next_hop/strategies_stale/strategies_stale.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/next_hop/strategies_stale/strategies_stale.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,11 +24,10 @@ # server = Test.MakeOriginServer("server") response_header = { - "headers": - "HTTP/1.1 200 OK\r\n" - "Connection: close\r\n" - "Cache-control: max-age=2\r\n" - "\r\n", + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "Cache-control: max-age=2\r\n" + "\r\n", "timestamp": "1469733493.993", "body": "This is the body.\n" } @@ -48,29 +47,29 @@ # Define next hop trafficserver instances. # ts_nh = Test.MakeATSProcess(f"ts_nh0", use_traffic_out=False, command=f"traffic_server 2>nh_trace.log") -ts_nh.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", - 'proxy.config.dns.resolv_conf': "NULL", -}) -ts_nh.Disk.remap_config.AddLine( - f"map / http://127.0.0.1:{server.Variables.Port}" -) +ts_nh.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", + 'proxy.config.dns.resolv_conf': "NULL", + }) +ts_nh.Disk.remap_config.AddLine(f"map / http://127.0.0.1:{server.Variables.Port}") ts = Test.MakeATSProcess("ts") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. - 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. - 'proxy.config.http.cache.http': 1, - 'proxy.config.http.uncacheable_requests_bypass_parent': 0, - 'proxy.config.http.no_dns_just_forward_to_parent': 1, - 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, - 'proxy.config.http.parent_proxy.self_detect': 0, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. + 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. + 'proxy.config.http.cache.http': 1, + 'proxy.config.http.uncacheable_requests_bypass_parent': 0, + 'proxy.config.http.no_dns_just_forward_to_parent': 1, + 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, + 'proxy.config.http.parent_proxy.self_detect': 0, + }) ts.Disk.File(ts.Variables.CONFIGDIR + "/strategies.yaml", id="strategies", typename="ats:config") s = ts.Disk.strategies @@ -84,17 +83,19 @@ # The health check URL does not seem to be used currently. # s.AddLine(f" health_check_url: http://next_hop0:{ts_nh.Variables.port}") s.AddLine(f" weight: 1.0") -s.AddLines([ - "strategies:", - " - strategy: the-strategy", - " policy: consistent_hash", - " hash_key: path", - " go_direct: false", - " parent_is_proxy: true", - " ignore_self_detect: true", - " groups:", - " - *g1", - " scheme: http"]) +s.AddLines( + [ + "strategies:", + " - strategy: the-strategy", + " policy: consistent_hash", + " hash_key: path", + " go_direct: false", + " parent_is_proxy: true", + " ignore_self_detect: true", + " groups:", + " - *g1", + " scheme: http", + ]) # Fallover not currently tested. # @@ -105,11 +106,9 @@ # " response_codes:", # " - 404", # " health_check:", -# " - passive"]) +# " - passive",]) -ts.Disk.remap_config.AddLine( - "map http://dummy.com http://not_used @strategy=the-strategy" -) +ts.Disk.remap_config.AddLine("map http://dummy.com http://not_used @strategy=the-strategy") tr = Test.AddTestRun() tr.Processes.Default.StartBefore(server) @@ -120,9 +119,7 @@ tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = ( - f'curl --verbose --proxy 127.0.0.1:{ts.Variables.port} http://dummy.com/obj0' -) +tr.Processes.Default.Command = (f'curl --verbose --proxy 127.0.0.1:{ts.Variables.port} http://dummy.com/obj0') tr.Processes.Default.Streams.stdout = "body.gold" tr.Processes.Default.ReturnCode = 0 @@ -133,9 +130,7 @@ # Request should come back as 200 tr = Test.AddTestRun() -tr.Processes.Default.Command = ( - f'curl --verbose --proxy 127.0.0.1:{ts.Variables.port} http://dummy.com/obj0' -) +tr.Processes.Default.Command = (f'curl --verbose --proxy 127.0.0.1:{ts.Variables.port} http://dummy.com/obj0') tr.Processes.Default.Streams.stdout = "body.gold" tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/next_hop/zzz_strategies_peer/zzz_strategies_peer.test.py trafficserver-9.2.4+ds/tests/gold_tests/next_hop/zzz_strategies_peer/zzz_strategies_peer.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/next_hop/zzz_strategies_peer/zzz_strategies_peer.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/next_hop/zzz_strategies_peer/zzz_strategies_peer.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -27,11 +27,10 @@ # server = Test.MakeOriginServer("server") response_header = { - "headers": - "HTTP/1.1 200 OK\r\n" - "Connection: close\r\n" - "Cache-control: max-age=85000\r\n" - "\r\n", + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "Cache-control: max-age=85000\r\n" + "\r\n", "timestamp": "1469733493.993", "body": "This is the body.\n" } @@ -56,15 +55,14 @@ for i in range(num_upstream): ts = Test.MakeATSProcess(f"ts_upstream{i}") dns.addRecords(records={f"ts_upstream{i}": ["127.0.0.1"]}) - ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", - 'proxy.config.dns.resolv_conf': "NULL", - }) - ts.Disk.remap_config.AddLine( - f"map / http://127.0.0.1:{server.Variables.Port}" - ) + ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", + 'proxy.config.dns.resolv_conf': "NULL", + }) + ts.Disk.remap_config.AddLine(f"map / http://127.0.0.1:{server.Variables.Port}") ts_upstream.append(ts) # Define peer trafficserver instances. @@ -78,18 +76,19 @@ ts = ts_peer[i] dns.addRecords(records={f"ts_peer{i}": ["127.0.0.1"]}) - ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb|cachekey', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. - 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. - 'proxy.config.http.cache.http': 1, - 'proxy.config.http.cache.required_headers': 0, - 'proxy.config.http.uncacheable_requests_bypass_parent': 0, - 'proxy.config.http.no_dns_just_forward_to_parent': 1, - 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, - 'proxy.config.http.parent_proxy.self_detect': 1, - }) + ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb|cachekey', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. + 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. + 'proxy.config.http.cache.http': 1, + 'proxy.config.http.cache.required_headers': 0, + 'proxy.config.http.uncacheable_requests_bypass_parent': 0, + 'proxy.config.http.no_dns_just_forward_to_parent': 1, + 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, + 'proxy.config.http.parent_proxy.self_detect': 1, + }) ts.Disk.File(ts.Variables.CONFIGDIR + "/strategies.yaml", id="strategies", typename="ats:config") s = ts.Disk.strategies @@ -112,34 +111,36 @@ # The health check URL does not seem to be used currently. # s.AddLine(f" health_check_url: http://ts_upstream{j}:{ts_upstream[j].Variables.port}") s.AddLine(f" weight: 1.0") - s.AddLines([ - "strategies:", - " - strategy: the-strategy", - " policy: consistent_hash", - " hash_key: cache_key", - " go_direct: false", - " parent_is_proxy: true", - " cache_peer_result: false", - " ignore_self_detect: false", - " groups:", - " - *peer_group", - " - *peer_upstream", - " scheme: http", - " failover:", - " ring_mode: peering_ring", - f" self: ts_peer{i}", - #" max_simple_retries: 2", - #" response_codes:", - #" - 404", - #" health_check:", - #" - passive", - ]) + s.AddLines( + [ + "strategies:", + " - strategy: the-strategy", + " policy: consistent_hash", + " hash_key: cache_key", + " go_direct: false", + " parent_is_proxy: true", + " cache_peer_result: false", + " ignore_self_detect: false", + " groups:", + " - *peer_group", + " - *peer_upstream", + " scheme: http", + " failover:", + " ring_mode: peering_ring", + f" self: ts_peer{i}", + #" max_simple_retries: 2", + #" response_codes:", + #" - 404", + #" health_check:", + #" - passive", + ]) suffix = " @strategy=the-strategy @plugin=cachekey.so @pparam=--uri-type=remap @pparam=--capture-prefix=/(.*):(.*)/$1/" - ts.Disk.remap_config.AddLines([ - "map http://dummy.com http://not_used" + suffix, - "map http://not_used http://also_not_used" + suffix, - ]) + ts.Disk.remap_config.AddLines( + [ + "map http://dummy.com http://not_used" + suffix, + "map http://not_used http://also_not_used" + suffix, + ]) tr = Test.AddTestRun() tr.Processes.Default.StartBefore(server) @@ -154,8 +155,7 @@ for i in range(num_object): tr = Test.AddTestRun() tr.Processes.Default.Command = ( - f'curl --verbose --proxy 127.0.0.1:{ts_peer[i % num_peer].Variables.port} http://dummy.com/obj{i}' - ) + f'curl --verbose --proxy 127.0.0.1:{ts_peer[i % num_peer].Variables.port} http://dummy.com/obj{i}') tr.Processes.Default.Streams.stdout = "body.gold" tr.Processes.Default.ReturnCode = 0 @@ -163,15 +163,13 @@ tr = Test.AddTestRun() # num_peer must not be a multiple of 3 tr.Processes.Default.Command = ( - f'curl --verbose --proxy 127.0.0.1:{ts_peer[(i * 3) % num_peer].Variables.port} http://dummy.com/obj{i}' - ) + f'curl --verbose --proxy 127.0.0.1:{ts_peer[(i * 3) % num_peer].Variables.port} http://dummy.com/obj{i}') tr.Processes.Default.Streams.stdout = "body.gold" tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() tr.Processes.Default.Command = ( "grep -e '^+++' -e '^[A-Z].*TTP/' -e '^.alts. --' -e 'PARENT_SPECIFIED' trace_peer*.log" - " | sed 's/^.*(next_hop) [^ ]* //' | sed 's/[.][0-9]*$$//'" -) + " | sed 's/^.*(next_hop) [^ ]* //' | sed 's/[.][0-9]*$$//'") tr.Processes.Default.Streams.stdout = "trace.gold" tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/next_hop/zzz_strategies_peer2/zzz_strategies_peer2.test.py trafficserver-9.2.4+ds/tests/gold_tests/next_hop/zzz_strategies_peer2/zzz_strategies_peer2.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/next_hop/zzz_strategies_peer2/zzz_strategies_peer2.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/next_hop/zzz_strategies_peer2/zzz_strategies_peer2.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -27,11 +27,10 @@ # server = Test.MakeOriginServer("server") response_header = { - "headers": - "HTTP/1.1 200 OK\r\n" - "Connection: close\r\n" - "Cache-control: max-age=85000\r\n" - "\r\n", + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "Cache-control: max-age=85000\r\n" + "\r\n", "timestamp": "1469733493.993", "body": "This is the body.\n" } @@ -56,15 +55,14 @@ for i in range(num_upstream): ts = Test.MakeATSProcess(f"ts_upstream{i}") dns.addRecords(records={f"ts_upstream{i}": ["127.0.0.1"]}) - ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", - 'proxy.config.dns.resolv_conf': "NULL", - }) - ts.Disk.remap_config.AddLine( - f"map / http://127.0.0.1:{server.Variables.Port}" - ) + ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", + 'proxy.config.dns.resolv_conf': "NULL", + }) + ts.Disk.remap_config.AddLine(f"map / http://127.0.0.1:{server.Variables.Port}") ts_upstream.append(ts) # Define peer trafficserver instances. @@ -78,18 +76,19 @@ ts = ts_peer[i] dns.addRecords(records={f"ts_peer{i}": ["127.0.0.1"]}) - ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. - 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. - 'proxy.config.http.cache.http': 1, - 'proxy.config.http.cache.required_headers': 0, - 'proxy.config.http.uncacheable_requests_bypass_parent': 0, - 'proxy.config.http.no_dns_just_forward_to_parent': 0, - 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, - 'proxy.config.http.parent_proxy.self_detect': 1, - }) + ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. + 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. + 'proxy.config.http.cache.http': 1, + 'proxy.config.http.cache.required_headers': 0, + 'proxy.config.http.uncacheable_requests_bypass_parent': 0, + 'proxy.config.http.no_dns_just_forward_to_parent': 0, + 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, + 'proxy.config.http.parent_proxy.self_detect': 1, + }) ts.Disk.File(ts.Variables.CONFIGDIR + "/strategies.yaml", id="strategies", typename="ats:config") s = ts.Disk.strategies @@ -103,27 +102,28 @@ # The health check URL does not seem to be used currently. # s.AddLine(f" health_check_url: http://ts_peer{j}:{ts_peer[j].Variables.port}") s.AddLine(f" weight: 1.0") - s.AddLines([ - "strategies:", - " - strategy: the-strategy", - " policy: consistent_hash", - " hash_key: path", - " go_direct: true", - " parent_is_proxy: true", - " cache_peer_result: false", - " ignore_self_detect: false", - " groups:", - " - *peer_group", - " scheme: http", - " failover:", - " ring_mode: peering_ring", - f" self: ts_peer{i}", - #" max_simple_retries: 2", - #" response_codes:", - #" - 404", - #" health_check:", - #" - passive", - ]) + s.AddLines( + [ + "strategies:", + " - strategy: the-strategy", + " policy: consistent_hash", + " hash_key: path", + " go_direct: true", + " parent_is_proxy: true", + " cache_peer_result: false", + " ignore_self_detect: false", + " groups:", + " - *peer_group", + " scheme: http", + " failover:", + " ring_mode: peering_ring", + f" self: ts_peer{i}", + #" max_simple_retries: 2", + #" response_codes:", + #" - 404", + #" health_check:", + #" - passive", + ]) for i in range(num_upstream): prefix = f"http://ts_upstream{i}:{ts_upstream[i].Variables.port}/" @@ -163,7 +163,6 @@ tr = Test.AddTestRun() tr.Processes.Default.Command = ( "grep -e '^+++' -e '^[A-Z].*TTP/' -e '^.alts. --' -e 'PARENT_SPECIFIED' trace_peer*.log" - " | sed 's/^.*(next_hop) [^ ]* //' | sed 's/[.][0-9]*$$//' " + normalize_ports -) + " | sed 's/^.*(next_hop) [^ ]* //' | sed 's/[.][0-9]*$$//' " + normalize_ports) tr.Processes.Default.Streams.stdout = "trace.gold" tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/null_transform/null_transform.test.py trafficserver-9.2.4+ds/tests/gold_tests/null_transform/null_transform.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/null_transform/null_transform.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/null_transform/null_transform.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -1,4 +1,3 @@ - ''' ''' # Licensed to the Apache Software Foundation (ASF) under one @@ -17,14 +16,11 @@ # See the License for the specific language governing permissions and # limitations under the License. - Test.Summary = ''' Test a basic null transform plugin ''' -Test.SkipUnless( - Condition.PluginExists('null_transform.so') -) +Test.SkipUnless(Condition.PluginExists('null_transform.so')) Test.ContinueOnFail = True @@ -33,25 +29,18 @@ server = Test.MakeOriginServer("server") Test.testName = "" -request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", - "body": "" - } +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} # Expected response from origin server -response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", - - "body": "This is expected response."} +response_header = { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "This is expected response." +} # Add response the server dictionary server.addResponse("sessionfile.log", request_header, response_header) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'null_transform' -}) -ts.Disk.remap_config.AddLine( - 'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'null_transform'}) +ts.Disk.remap_config.AddLine('map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)) # Load plugin Test.PrepareInstalledPlugin('null_transform.so', ts) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/origin_connection/per_server_connection_max.test.py trafficserver-9.2.4+ds/tests/gold_tests/origin_connection/per_server_connection_max.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/origin_connection/per_server_connection_max.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/origin_connection/per_server_connection_max.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,7 +17,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - Test.Summary = __doc__ @@ -44,31 +43,26 @@ def _configure_trafficserver(self) -> None: """Configure Traffic Server to be used in the test.""" self._ts = Test.MakeATSProcess("ts") - self._ts.Disk.remap_config.AddLine( - f'map / http://127.0.0.1:{self._server.Variables.http_port}' - ) - self._ts.Disk.records_config.update({ - 'proxy.config.dns.nameservers': f"127.0.0.1:{self._nameserver.Variables.Port}", - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.http.per_server.connection.max': self._origin_max_connections, - }) + self._ts.Disk.remap_config.AddLine(f'map / http://127.0.0.1:{self._server.Variables.http_port}') + self._ts.Disk.records_config.update( + { + 'proxy.config.dns.nameservers': f"127.0.0.1:{self._nameserver.Variables.Port}", + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.http.per_server.connection.max': self._origin_max_connections, + }) self._ts.Disk.diags_log.Content += Testers.ContainsExpression( f'WARNING:.*too many connections:.*limit={self._origin_max_connections}', 'Verify the user is warned about the connection limit being hit.') def run(self) -> None: """Configure the TestRun.""" - tr = Test.AddTestRun( - 'Verify we enforce proxy.config.http.per_server.connection.max') + tr = Test.AddTestRun('Verify we enforce proxy.config.http.per_server.connection.max') tr.Processes.Default.StartBefore(self._nameserver) tr.Processes.Default.StartBefore(self._server) tr.Processes.Default.StartBefore(self._ts) - tr.AddVerifierClientProcess( - 'client', - self._replay_file, - http_ports=[self._ts.Variables.port]) + tr.AddVerifierClientProcess('client', self._replay_file, http_ports=[self._ts.Variables.port]) PerServerConnectionMaxTest().run() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/parent_proxy/parent-retry.test.py trafficserver-9.2.4+ds/tests/gold_tests/parent_proxy/parent-retry.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/parent_proxy/parent-retry.test.py 1970-01-01 00:00:00.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/parent_proxy/parent-retry.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -0,0 +1,47 @@ +""" +Test parent_retry config settings +""" +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +Test.testName = "Test parent_retry settings" +Test.ContinueOnFail = True + + +class ParentRetryTest: + """ + Test loading parent.config with parent_retry setting enabled + """ + ts_parent_hostname = "localhost:8081" + + def __init__(self): + """Initialize the test.""" + self._configure_ts_child() + + def _configure_ts_child(self): + self.ts_child = Test.MakeATSProcess("ts_child") + self.ts_child.Disk.parent_config.AddLine( + f'dest_domain=. method=get parent="{self.ts_parent_hostname}" parent_retry=unavailable_server_retry unavailable_server_retry_responses="502,503"' + ) + + def run(self): + tr = Test.AddTestRun() + tr.Processes.Default.StartBefore(self.ts_child) + tr.Processes.Default.Command = f'curl "{self.ts_child.Variables.port}" --verbose' + tr.StillRunningAfter = self.ts_child + + +ParentRetryTest().run() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/CppDelayTransformation/CppDelayTransformation.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/CppDelayTransformation/CppDelayTransformation.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/CppDelayTransformation/CppDelayTransformation.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/CppDelayTransformation/CppDelayTransformation.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -16,38 +16,30 @@ import os - Test.Summary = ''' Test CPPAPI example plugin DelayTransformation ''' -Test.SkipUnless( - Condition.PluginExists("DelayTransformationPlugin.so") -) +Test.SkipUnless(Condition.PluginExists("DelayTransformationPlugin.so")) server = Test.MakeOriginServer("server") resp_body = "1234567890" "1234567890" "1234567890" "1234567890" "1234567890" "\n" server.addResponse( - "sessionlog.json", - { - "headers": - "GET / HTTP/1.1\r\n" - "Host: does_not_matter" - "\r\n", + "sessionlog.json", { + "headers": "GET / HTTP/1.1\r\n" + "Host: does_not_matter" + "\r\n", "timestamp": "1469733493.993", - }, - { - "headers": - "HTTP/1.1 200 OK\r\n" - "Connection: close\r\n" - f"Content-Length: {len(resp_body)}\r\n" - "\r\n", + }, { + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + f"Content-Length: {len(resp_body)}\r\n" + "\r\n", "timestamp": "1469733493.993", "body": resp_body - } -) + }) ts = Test.MakeATSProcess("ts") @@ -58,9 +50,7 @@ 'proxy.config.diags.debug.tags': 'delay_transformation', }) -ts.Disk.remap_config.AddLine( - f'map http://xyz/ http://127.0.0.1:{server.Variables.Port}/' -) +ts.Disk.remap_config.AddLine(f'map http://xyz/ http://127.0.0.1:{server.Variables.Port}/') tr = Test.AddTestRun() tr.Processes.Default.StartBefore(server) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -40,46 +40,29 @@ server = Test.MakeOriginServer("server", lookup_key="{%uuid}") # default root -req_chk = {"headers": - "GET / HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "uuid: none\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -res_chk = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } +req_chk = { + "headers": "GET / HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "uuid: none\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +res_chk = {"headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", req_chk, res_chk) body = "lets go surfin now" -req_full = {"headers": - "GET /path HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "Accept: */*\r\n" + - "uuid: full\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -res_full = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Cache-Control: max-age=500\r\n" + - "Connection: close\r\n" + - 'Etag: "path"\r\n' + - "\r\n", - "timestamp": "1469733493.993", - "body": body - } +req_full = { + "headers": "GET /path HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Accept: */*\r\n" + "uuid: full\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +res_full = { + "headers": "HTTP/1.1 200 OK\r\n" + "Cache-Control: max-age=500\r\n" + "Connection: close\r\n" + 'Etag: "path"\r\n' + "\r\n", + "timestamp": "1469733493.993", + "body": body +} server.addResponse("sessionlog.json", req_full, res_full) @@ -88,148 +71,119 @@ inner_str = "7-15" -req_inner = {"headers": - "GET /path HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "Accept: */*\r\n" + - "Range: bytes={}\r\n".format(inner_str) + - "uuid: inner\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -res_inner = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - "Cache-Control: max-age=500\r\n" + - "Content-Range: bytes {0}/{1}\r\n".format(inner_str, bodylen) + - "Connection: close\r\n" + - 'Etag: "path"\r\n' + - "\r\n", - "timestamp": "1469733493.993", - "body": body[7:15] - } +req_inner = { + "headers": + "GET /path HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Accept: */*\r\n" + "Range: bytes={}\r\n".format(inner_str) + + "uuid: inner\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +res_inner = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + "Cache-Control: max-age=500\r\n" + + "Content-Range: bytes {0}/{1}\r\n".format(inner_str, bodylen) + "Connection: close\r\n" + 'Etag: "path"\r\n' + "\r\n", + "timestamp": "1469733493.993", + "body": body[7:15] +} server.addResponse("sessionlog.json", req_inner, res_inner) frange_str = "0-" -req_frange = {"headers": - "GET /path HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "Accept: */*\r\n" + - "Range: bytes={}\r\n".format(frange_str) + - "uuid: frange\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -res_frange = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - "Cache-Control: max-age=500\r\n" + - "Content-Range: bytes 0-{0}/{0}\r\n".format(bodylen) + - "Connection: close\r\n" + - 'Etag: "path"\r\n' + - "\r\n", - "timestamp": "1469733493.993", - "body": body - } +req_frange = { + "headers": + "GET /path HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Accept: */*\r\n" + "Range: bytes={}\r\n".format(frange_str) + + "uuid: frange\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +res_frange = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + "Cache-Control: max-age=500\r\n" + + "Content-Range: bytes 0-{0}/{0}\r\n".format(bodylen) + "Connection: close\r\n" + 'Etag: "path"\r\n' + "\r\n", + "timestamp": "1469733493.993", + "body": body +} server.addResponse("sessionlog.json", req_frange, res_frange) last_str = "-5" -req_last = {"headers": - "GET /path HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "Accept: */*\r\n" + - "Range: bytes={}\r\n".format(last_str) + - "uuid: last\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -res_last = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - "Cache-Control: max-age=200\r\n" + - "Content-Range: bytes {0}-{1}/{1}\r\n".format(bodylen - 5, bodylen) + - "Connection: close\r\n" + - 'Etag: "path"\r\n' + - "\r\n", - "timestamp": "1469733493.993", - "body": body[-5:] - } +req_last = { + "headers": + "GET /path HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Accept: */*\r\n" + "Range: bytes={}\r\n".format(last_str) + + "uuid: last\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +res_last = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + "Cache-Control: max-age=200\r\n" + + "Content-Range: bytes {0}-{1}/{1}\r\n".format(bodylen - 5, bodylen) + "Connection: close\r\n" + 'Etag: "path"\r\n' + "\r\n", + "timestamp": "1469733493.993", + "body": body[-5:] +} server.addResponse("sessionlog.json", req_last, res_last) pselect_str = "1-10" -req_pselect = {"headers": - "GET /path HTTP/1.1\r\n" + - "Host: parentselect\r\n" + - "Accept: */*\r\n" + - "Range: bytes={}\r\n".format(pselect_str) + - "uuid: pselect\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -res_pselect = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - "Cache-Control: max-age=200\r\n" + - "Content-Range: bytes {}/19\r\n".format(pselect_str) + - "Connection: close\r\n" + - 'Etag: "path"\r\n' + - "\r\n", - "timestamp": "1469733493.993", - "body": body[1:10] - } +req_pselect = { + "headers": + "GET /path HTTP/1.1\r\n" + "Host: parentselect\r\n" + "Accept: */*\r\n" + "Range: bytes={}\r\n".format(pselect_str) + + "uuid: pselect\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +res_pselect = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + "Cache-Control: max-age=200\r\n" + + "Content-Range: bytes {}/19\r\n".format(pselect_str) + "Connection: close\r\n" + 'Etag: "path"\r\n' + "\r\n", + "timestamp": "1469733493.993", + "body": body[1:10] +} server.addResponse("sessionlog.json", req_pselect, res_pselect) -req_psd = {"headers": - "GET /path HTTP/1.1\r\n" + - "Host: psd\r\n" + - "Accept: */*\r\n" + - "Range: bytes={}\r\n".format(pselect_str) + - "uuid: pselect\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } +req_psd = { + "headers": + "GET /path HTTP/1.1\r\n" + "Host: psd\r\n" + "Accept: */*\r\n" + "Range: bytes={}\r\n".format(pselect_str) + + "uuid: pselect\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} server.addResponse("sessionlog.json", req_psd, res_pselect) # cache range requests plugin remap ts.Setup.CopyAs('reason.conf', Test.RunDirectory) -ts.Disk.remap_config.AddLines([ - 'map http://www.example.com http://127.0.0.1:{}'.format(server.Variables.Port) + - ' @plugin=header_rewrite.so @pparam={}/reason.conf @plugin=cache_range_requests.so'.format(Test.RunDirectory), - - # parent select cache key option - 'map http://parentselect http://127.0.0.1:{}'.format(server.Variables.Port) + - ' @plugin=cache_range_requests.so @pparam=--ps-cachekey', - - # deprecated - 'map http://psd http://127.0.0.1:{}'.format(server.Variables.Port) + - ' @plugin=cache_range_requests.so @pparam=ps_mode:cache_key_url', -]) +ts.Disk.remap_config.AddLines( + [ + 'map http://www.example.com http://127.0.0.1:{}'.format(server.Variables.Port) + + ' @plugin=header_rewrite.so @pparam={}/reason.conf @plugin=cache_range_requests.so'.format(Test.RunDirectory), + + # parent select cache key option + 'map http://parentselect http://127.0.0.1:{}'.format(server.Variables.Port) + + ' @plugin=cache_range_requests.so @pparam=--ps-cachekey', + + # deprecated + 'map http://psd http://127.0.0.1:{}'.format(server.Variables.Port) + + ' @plugin=cache_range_requests.so @pparam=ps_mode:cache_key_url', + ]) # cache debug ts.Disk.plugin_config.AddLine('xdebug.so') # minimal configuration -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cache_range_requests|http', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cache_range_requests|http', + }) curl_and_args = 'curl -s -D /dev/stdout -o /dev/stderr -x localhost:{} -H "x-debug: x-cache"'.format(ts.Variables.port) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_cache_complete_responses.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_cache_complete_responses.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_cache_complete_responses.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_cache_complete_responses.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -61,144 +61,105 @@ server = Test.MakeOriginServer("server", lookup_key="{%UID}") # default root -req_chk = {"headers": - "GET / HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "uuid: none\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -res_chk = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } +req_chk = { + "headers": "GET / HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "uuid: none\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +res_chk = {"headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", req_chk, res_chk) -small_req = {"headers": - "GET /obj HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "Accept: */*\r\n" + - "UID: SMALL\r\n" - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -small_resp = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Cache-Control: max-age=1\r\n" + - "Connection: close\r\n" + - 'Etag: "772102f4-56f4bc1e6d417"\r\n' + - "\r\n", - "timestamp": "1469733493.993", - "body": small_body - } - -small_reval_req = {"headers": - "GET /obj HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "Accept: */*\r\n" + - "UID: SMALL-INM\r\n" - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -small_reval_resp = {"headers": - "HTTP/1.1 304 Not Modified\r\n" + - "Cache-Control: max-age=10\r\n" + - "Connection: close\r\n" + - 'Etag: "772102f4-56f4bc1e6d417"\r\n' + - "\r\n", - "timestamp": "1469733493.993" - } - -slice_req = {"headers": - "GET /slice HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "Range: bytes=0-4194303\r\n" + - "Accept: */*\r\n" + - "UID: SLICE\r\n" - "\r\n", - "timestamp": "1469733493.993", - } - -slice_resp = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Cache-Control: max-age=1\r\n" + - "Content-Range: bytes 0-{}/{}\r\n".format(slice_body_len - 1, slice_body_len * 2) + "\r\n" + - "Content-Length: {}\r\n".format(slice_body_len) + "\r\n" + - "Connection: close\r\n" + - 'Etag: "872104f4-d6bcaa1e6f979"\r\n' + - "\r\n", - "timestamp": "1469733493.993", - "body": slice_body - } - -slice_reval_req = {"headers": - "GET /slice HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "Accept: */*\r\n" + - "UID: SLICE-INM\r\n" - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -slice_reval_resp = {"headers": - "HTTP/1.1 304 Not Modified\r\n" + - "Cache-Control: max-age=10\r\n" + - "Connection: close\r\n" + - 'Etag: "872104f4-d6bcaa1e6f979"\r\n' + - "\r\n", - "timestamp": "1469733493.993" - } - -naieve_req = {"headers": - "GET /naieve/obj HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "Accept: */*\r\n" + - "UID: NAIEVE\r\n" - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -naieve_resp = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Cache-Control: max-age=1\r\n" + - "Connection: close\r\n" + - 'Etag: "cad04ff4-56f4bc197ceda"\r\n' + +small_req = { + "headers": "GET /obj HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Accept: */*\r\n" + "UID: SMALL\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +small_resp = { + "headers": + "HTTP/1.1 200 OK\r\n" + "Cache-Control: max-age=1\r\n" + "Connection: close\r\n" + 'Etag: "772102f4-56f4bc1e6d417"\r\n' + + "\r\n", + "timestamp": "1469733493.993", + "body": small_body +} + +small_reval_req = { + "headers": "GET /obj HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Accept: */*\r\n" + "UID: SMALL-INM\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +small_reval_resp = { + "headers": + "HTTP/1.1 304 Not Modified\r\n" + "Cache-Control: max-age=10\r\n" + "Connection: close\r\n" + + 'Etag: "772102f4-56f4bc1e6d417"\r\n' + "\r\n", + "timestamp": "1469733493.993" +} + +slice_req = { + "headers": + "GET /slice HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Range: bytes=0-4194303\r\n" + "Accept: */*\r\n" + + "UID: SLICE\r\n" + "\r\n", + "timestamp": "1469733493.993", +} + +slice_resp = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Cache-Control: max-age=1\r\n" + + "Content-Range: bytes 0-{}/{}\r\n".format(slice_body_len - 1, slice_body_len * 2) + "\r\n" + + "Content-Length: {}\r\n".format(slice_body_len) + "\r\n" + "Connection: close\r\n" + 'Etag: "872104f4-d6bcaa1e6f979"\r\n' + + "\r\n", + "timestamp": "1469733493.993", + "body": slice_body +} + +slice_reval_req = { + "headers": "GET /slice HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Accept: */*\r\n" + "UID: SLICE-INM\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +slice_reval_resp = { + "headers": + "HTTP/1.1 304 Not Modified\r\n" + "Cache-Control: max-age=10\r\n" + "Connection: close\r\n" + + 'Etag: "872104f4-d6bcaa1e6f979"\r\n' + "\r\n", + "timestamp": "1469733493.993" +} + +naieve_req = { + "headers": "GET /naieve/obj HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Accept: */*\r\n" + "UID: NAIEVE\r\n" "\r\n", - "timestamp": "1469733493.993", - "body": small_body - } - -naieve_reval_req = {"headers": - "GET /naieve/obj HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "Accept: */*\r\n" + - "UID: NAIEVE-INM\r\n" - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -naieve_reval_resp = {"headers": - "HTTP/1.1 304 Not Modified\r\n" + - "Cache-Control: max-age=10\r\n" + - "Connection: close\r\n" + - 'Etag: "cad04ff4-56f4bc197ceda"\r\n' + - "\r\n", - "timestamp": "1469733493.993" - } + "timestamp": "1469733493.993", + "body": "" +} + +naieve_resp = { + "headers": + "HTTP/1.1 200 OK\r\n" + "Cache-Control: max-age=1\r\n" + "Connection: close\r\n" + 'Etag: "cad04ff4-56f4bc197ceda"\r\n' + + "\r\n", + "timestamp": "1469733493.993", + "body": small_body +} +naieve_reval_req = { + "headers": "GET /naieve/obj HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Accept: */*\r\n" + "UID: NAIEVE-INM\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +naieve_reval_resp = { + "headers": + "HTTP/1.1 304 Not Modified\r\n" + "Cache-Control: max-age=10\r\n" + "Connection: close\r\n" + + 'Etag: "cad04ff4-56f4bc197ceda"\r\n' + "\r\n", + "timestamp": "1469733493.993" +} server.addResponse("sessionlog.json", small_req, small_resp) server.addResponse("sessionlog.json", small_reval_req, small_reval_resp) @@ -210,27 +171,29 @@ # remap with the cache range requests plugin only # this is a "naieve" configuration due to the lack of range normalization performed at remap time by slice # this config should only be used if ranges have been reliably normalized by the requestor (either the client itself or a cache) -ts.Disk.remap_config.AddLines([ - f'map http://example.com/naieve http://127.0.0.1:{server.Variables.Port}/naieve \\' + - ' @plugin=cache_range_requests.so @pparam=--cache-complete-responses', -]) +ts.Disk.remap_config.AddLines( + [ + f'map http://example.com/naieve http://127.0.0.1:{server.Variables.Port}/naieve \\' + + ' @plugin=cache_range_requests.so @pparam=--cache-complete-responses', + ]) # remap with slice, cachekey, and the cache range requests plugin to ensure range normalization and cache keys are correct -ts.Disk.remap_config.AddLines([ - f'map http://example.com http://127.0.0.1:{server.Variables.Port} \\' + - ' @plugin=slice.so @pparam=--blockbytes=4m \\', - ' @plugin=cachekey.so @pparam=--key-type=cache_key @pparam=--include-headers=Range @pparam=--remove-all-params=true \\', - ' @plugin=cache_range_requests.so @pparam=--no-modify-cachekey @pparam=--cache-complete-responses', -]) +ts.Disk.remap_config.AddLines( + [ + f'map http://example.com http://127.0.0.1:{server.Variables.Port} \\' + ' @plugin=slice.so @pparam=--blockbytes=4m \\', + ' @plugin=cachekey.so @pparam=--key-type=cache_key @pparam=--include-headers=Range @pparam=--remove-all-params=true \\', + ' @plugin=cache_range_requests.so @pparam=--no-modify-cachekey @pparam=--cache-complete-responses', + ]) # cache debug ts.Disk.plugin_config.AddLine('xdebug.so') # enable debug -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cachekey|cache_range_requests|slice', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cachekey|cache_range_requests|slice', + }) # base cURL command curl_and_args = 'curl -s -D /dev/stdout -o /dev/stderr -x localhost:{} -H "x-debug: x-cache, x-cache-key"'.format(ts.Variables.port) @@ -250,8 +213,7 @@ ps.Streams.stdout.Content = Testers.ExcludesExpression("Content-Range:", "expected no Content-Range header") ps.Streams.stdout.Content = Testers.ContainsExpression("X-Cache: miss, none", "expected cache miss") ps.Streams.stdout.Content = Testers.ContainsExpression( - "X-Cache-Key: /.*?/Range:bytes=0-4194303/obj", - "expected cache key with bytes 0-4194303") + "X-Cache-Key: /.*?/Range:bytes=0-4194303/obj", "expected cache key with bytes 0-4194303") tr.StillRunningAfter = ts # 1 Test - Fetch /obj with a different range but less than 4MB @@ -263,8 +225,7 @@ ps.Streams.stdout.Content = Testers.ExcludesExpression("Content-Range:", "expected no Content-Range header") ps.Streams.stdout.Content = Testers.ContainsExpression("X-Cache: hit-fresh, none", "expected cache hit") ps.Streams.stdout.Content = Testers.ContainsExpression( - "X-Cache-Key: /.*?/Range:bytes=0-4194303/obj", - "expected cache key with bytes 0-4194303") + "X-Cache-Key: /.*?/Range:bytes=0-4194303/obj", "expected cache key with bytes 0-4194303") tr.StillRunningAfter = ts # 2 Test - Revalidate /obj with a different range but less than 4MB @@ -277,8 +238,7 @@ ps.Streams.stdout.Content = Testers.ExcludesExpression("Content-Range:", "expected no Content-Range header") ps.Streams.stdout.Content = Testers.ContainsExpression("X-Cache: hit-stale, none", "expected cache hit stale") ps.Streams.stdout.Content = Testers.ContainsExpression( - "X-Cache-Key: /.*?/Range:bytes=0-4194303/obj", - "expected cache key with bytes 0-4194303") + "X-Cache-Key: /.*?/Range:bytes=0-4194303/obj", "expected cache key with bytes 0-4194303") tr.StillRunningAfter = ts # 3 Test - Fetch /obj with a different range but less than 4MB @@ -290,8 +250,7 @@ ps.Streams.stdout.Content = Testers.ExcludesExpression("Content-Range:", "expected no Content-Range header") ps.Streams.stdout.Content = Testers.ContainsExpression("X-Cache: hit-fresh, none", "expected cache hit-fresh") ps.Streams.stdout.Content = Testers.ContainsExpression( - "X-Cache-Key: /.*?/Range:bytes=0-4194303/obj", - "expected cache key with bytes 0-4194303") + "X-Cache-Key: /.*?/Range:bytes=0-4194303/obj", "expected cache key with bytes 0-4194303") tr.StillRunningAfter = ts # Test round 2: repeat, but ensure we have 206s and matching Content-Range @@ -304,12 +263,10 @@ ps.ReturnCode = 0 ps.Streams.stdout.Content = Testers.ContainsExpression("206 Partial Content", "expected 206 Partial Content") ps.Streams.stdout.Content = Testers.ContainsExpression( - "Content-Range: bytes 0-5000/8388608", - "expected Content-Range: bytes 0-5000/8388608") + "Content-Range: bytes 0-5000/8388608", "expected Content-Range: bytes 0-5000/8388608") ps.Streams.stdout.Content = Testers.ContainsExpression("X-Cache: miss, none", "expected cache miss") ps.Streams.stdout.Content = Testers.ContainsExpression( - "X-Cache-Key: /.*?/Range:bytes=0-4194303/slice", - "expected cache key with bytes 0-4194303") + "X-Cache-Key: /.*?/Range:bytes=0-4194303/slice", "expected cache key with bytes 0-4194303") tr.StillRunningAfter = ts # 5 Test - Fetch /slice with a different range but less than 4MB @@ -319,12 +276,10 @@ ps.ReturnCode = 0 ps.Streams.stdout.Content = Testers.ContainsExpression("206 Partial Content", "expected 206 Partial Content") ps.Streams.stdout.Content = Testers.ContainsExpression( - "Content-Range: bytes 5001-5999/8388608", - "expected Content-Range: bytes 5001-5999/8388608") + "Content-Range: bytes 5001-5999/8388608", "expected Content-Range: bytes 5001-5999/8388608") ps.Streams.stdout.Content = Testers.ContainsExpression("X-Cache: hit-fresh, none", "expected cache hit") ps.Streams.stdout.Content = Testers.ContainsExpression( - "X-Cache-Key: /.*?/Range:bytes=0-4194303/slice", - "expected cache key with bytes 0-4194303") + "X-Cache-Key: /.*?/Range:bytes=0-4194303/slice", "expected cache key with bytes 0-4194303") tr.StillRunningAfter = ts # 6 Test - Revalidate /slice with a different range but less than 4MB @@ -335,12 +290,10 @@ ps.ReturnCode = 0 ps.Streams.stdout.Content = Testers.ContainsExpression("206 Partial Content", "expected 206 Partial Content") ps.Streams.stdout.Content = Testers.ContainsExpression( - "Content-Range: bytes 0-403/8388608", - "expected Content-Range: bytes 0-403/8388608") + "Content-Range: bytes 0-403/8388608", "expected Content-Range: bytes 0-403/8388608") ps.Streams.stdout.Content = Testers.ContainsExpression("X-Cache: hit-stale, none", "expected cache hit stale") ps.Streams.stdout.Content = Testers.ContainsExpression( - "X-Cache-Key: /.*?/Range:bytes=0-4194303/slice", - "expected cache key with bytes 0-4194303") + "X-Cache-Key: /.*?/Range:bytes=0-4194303/slice", "expected cache key with bytes 0-4194303") tr.StillRunningAfter = ts # 7 Test - Fetch /slice with a different range but less than 4MB @@ -350,12 +303,10 @@ ps.ReturnCode = 0 ps.Streams.stdout.Content = Testers.ContainsExpression("206 Partial Content", "expected 206 Partial Content") ps.Streams.stdout.Content = Testers.ContainsExpression( - "Content-Range: bytes 0-3999/8388608", - "expected Content-Range: bytes 0-3999/8388608") + "Content-Range: bytes 0-3999/8388608", "expected Content-Range: bytes 0-3999/8388608") ps.Streams.stdout.Content = Testers.ContainsExpression("X-Cache: hit-fresh, none", "expected cache hit-fresh") ps.Streams.stdout.Content = Testers.ContainsExpression( - "X-Cache-Key: /.*?/Range:bytes=0-4194303/slice", - "expected cache key with bytes 0-4194303") + "X-Cache-Key: /.*?/Range:bytes=0-4194303/slice", "expected cache key with bytes 0-4194303") tr.StillRunningAfter = ts # Test round 3: test behavior of the cache range requests plugin when caching complete ranges *without* the slice and cachekey plugins @@ -372,8 +323,7 @@ ps.Streams.stdout.Content = Testers.ExcludesExpression("Content-Range:", "expected no Content-Range header") ps.Streams.stdout.Content = Testers.ContainsExpression("X-Cache: miss", "expected cache miss") ps.Streams.stdout.Content = Testers.ContainsExpression( - "X-Cache-Key: http://.*?/naieve/obj-bytes=0-5000", - "expected cache key with bytes 0-5000") + "X-Cache-Key: http://.*?/naieve/obj-bytes=0-5000", "expected cache key with bytes 0-5000") tr.StillRunningAfter = ts # 9 Test - Fetch /naieve/obj with the same Range header @@ -385,8 +335,7 @@ ps.Streams.stdout.Content = Testers.ExcludesExpression("Content-Range:", "expected no Content-Range header") ps.Streams.stdout.Content = Testers.ContainsExpression("X-Cache: hit-fresh", "expected cache hit") ps.Streams.stdout.Content = Testers.ContainsExpression( - "X-Cache-Key: http://.*?/naieve/obj-bytes=0-5000", - "expected cache key with bytes 0-5000") + "X-Cache-Key: http://.*?/naieve/obj-bytes=0-5000", "expected cache key with bytes 0-5000") tr.StillRunningAfter = ts # 10 Test - Revalidate /naieve/obj with the same Range header @@ -399,8 +348,7 @@ ps.Streams.stdout.Content = Testers.ExcludesExpression("Content-Range:", "expected no Content-Range header") ps.Streams.stdout.Content = Testers.ContainsExpression("X-Cache: hit-stale", "expected cache hit stale") ps.Streams.stdout.Content = Testers.ContainsExpression( - "X-Cache-Key: http://.*?/naieve/obj-bytes=0-5000", - "expected cache key with bytes 0-5000") + "X-Cache-Key: http://.*?/naieve/obj-bytes=0-5000", "expected cache key with bytes 0-5000") tr.StillRunningAfter = ts # 11 Test - Fetch /naieve/obj with the same Range header @@ -412,8 +360,7 @@ ps.Streams.stdout.Content = Testers.ExcludesExpression("Content-Range:", "expected no Content-Range header") ps.Streams.stdout.Content = Testers.ContainsExpression("X-Cache: hit-fresh", "expected cache hit-fresh") ps.Streams.stdout.Content = Testers.ContainsExpression( - "X-Cache-Key: http://.*?/naieve/obj-bytes=0-5000", - "expected cache key with bytes 0-5000") + "X-Cache-Key: http://.*?/naieve/obj-bytes=0-5000", "expected cache key with bytes 0-5000") tr.StillRunningAfter = ts # 12 Test - Fetch /naieve/obj with a *different* Range header; note the cache key changes and is a miss for the same object @@ -425,8 +372,7 @@ ps.Streams.stdout.Content = Testers.ExcludesExpression("Content-Range:", "expected no Content-Range header") ps.Streams.stdout.Content = Testers.ContainsExpression("X-Cache: miss", "expected cache miss") ps.Streams.stdout.Content = Testers.ContainsExpression( - "X-Cache-Key: http://.*?/naieve/obj-bytes=444-777", - "expected cache key with bytes 444-777") + "X-Cache-Key: http://.*?/naieve/obj-bytes=444-777", "expected cache key with bytes 444-777") tr.StillRunningAfter = ts # 13 Test - Fetch /naieve/obj with the prior Range header; now a cache hit but we've effectively cached /naieve/obj twice @@ -439,8 +385,7 @@ ps.Streams.stdout.Content = Testers.ExcludesExpression("Content-Range:", "expected no Content-Range header") ps.Streams.stdout.Content = Testers.ContainsExpression("X-Cache: hit", "expected cache hit-fresh") ps.Streams.stdout.Content = Testers.ContainsExpression( - "X-Cache-Key: http://.*?/naieve/obj-bytes=444-777", - "expected cache key with bytes 444-777") + "X-Cache-Key: http://.*?/naieve/obj-bytes=444-777", "expected cache key with bytes 444-777") tr.StillRunningAfter = ts # 14 Test - Fetch /naieve/obj with the original Range header (0-5000); still a cache hit @@ -452,6 +397,5 @@ ps.Streams.stdout.Content = Testers.ExcludesExpression("Content-Range:", "expected no Content-Range header") ps.Streams.stdout.Content = Testers.ContainsExpression("X-Cache: hit-fresh", "expected cache hit-fresh") ps.Streams.stdout.Content = Testers.ContainsExpression( - "X-Cache-Key: http://.*?/naieve/obj-bytes=0-5000", - "expected cache key with bytes 0-5000") + "X-Cache-Key: http://.*?/naieve/obj-bytes=0-5000", "expected cache key with bytes 0-5000") tr.StillRunningAfter = ts diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_cachekey.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_cachekey.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_cachekey.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_cachekey.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -40,22 +40,13 @@ server = Test.MakeOriginServer("server", lookup_key="{%uuid}") # default root -req_chk = {"headers": - "GET / HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "uuid: none\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -res_chk = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } +req_chk = { + "headers": "GET / HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "uuid: none\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +res_chk = {"headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", req_chk, res_chk) @@ -63,94 +54,71 @@ bodylen = len(body) # this request should work -req_full = {"headers": - "GET /path HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "Accept: */*\r\n" + - "uuid: full\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -res_full = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - 'Etag: "foo"\r\n' + - "Cache-Control: public, max-age=500\r\n" + - "Connection: close\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body - } +req_full = { + "headers": "GET /path HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Accept: */*\r\n" + "uuid: full\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +res_full = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + 'Etag: "foo"\r\n' + + "Cache-Control: public, max-age=500\r\n" + "Connection: close\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": body +} server.addResponse("sessionlog.json", req_full, res_full) # this request should work -req_good = {"headers": - "GET /path HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "Accept: */*\r\n" + - "Range: bytes=0-\r\n" + - "uuid: range_full\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -res_good = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - 'Etag: "foo"\r\n' + - "Cache-Control: public, max-age=500\r\n" + - "Content-Range: bytes 0-{0}/{0}\r\n".format(bodylen) + - "Connection: close\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body - } +req_good = { + "headers": + "GET /path HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Accept: */*\r\n" + "Range: bytes=0-\r\n" + + "uuid: range_full\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +res_good = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + 'Etag: "foo"\r\n' + + "Cache-Control: public, max-age=500\r\n" + "Content-Range: bytes 0-{0}/{0}\r\n".format(bodylen) + "Connection: close\r\n" + + "\r\n", + "timestamp": "1469733493.993", + "body": body +} server.addResponse("sessionlog.json", req_good, res_good) # this request should fail with a cache_range_requests asset -req_fail = {"headers": - "GET /path HTTP/1.1\r\n" + - "Host: www.fail.com\r\n" + - "Accept: */*\r\n" + - "Range: bytes=0-\r\n" + - "uuid: range_fail\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -res_fail = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - 'Etag: "foo"\r\n' + - "Cache-Control: public, max-age=500\r\n" + - "Content-Range: bytes 0-{0}/{0}\r\n".format(bodylen) + - "Connection: close\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body - } +req_fail = { + "headers": + "GET /path HTTP/1.1\r\n" + "Host: www.fail.com\r\n" + "Accept: */*\r\n" + "Range: bytes=0-\r\n" + "uuid: range_fail\r\n" + + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +res_fail = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + 'Etag: "foo"\r\n' + + "Cache-Control: public, max-age=500\r\n" + "Content-Range: bytes 0-{0}/{0}\r\n".format(bodylen) + "Connection: close\r\n" + + "\r\n", + "timestamp": "1469733493.993", + "body": body +} server.addResponse("sessionlog.json", req_fail, res_fail) # cache range requests plugin remap, working config ts.Disk.remap_config.AddLine( 'map http://www.example.com http://127.0.0.1:{}'.format(server.Variables.Port) + - ' @plugin=cachekey.so @pparam=--include-headers=Range' + - ' @plugin=cache_range_requests.so @pparam=--no-modify-cachekey', -) + ' @plugin=cachekey.so @pparam=--include-headers=Range' + ' @plugin=cache_range_requests.so @pparam=--no-modify-cachekey',) # improperly configured cache_range_requests with cachekey ts.Disk.remap_config.AddLine( - 'map http://www.fail.com http://127.0.0.1:{}'.format(server.Variables.Port) + - ' @plugin=cachekey.so @pparam=--static-prefix=foo' - ' @plugin=cache_range_requests.so', -) + 'map http://www.fail.com http://127.0.0.1:{}'.format(server.Variables.Port) + ' @plugin=cachekey.so @pparam=--static-prefix=foo' + ' @plugin=cache_range_requests.so',) # cache debug ts.Disk.plugin_config.AddLine('xdebug.so') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_cachekey_global.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_cachekey_global.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_cachekey_global.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_cachekey_global.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -40,22 +40,13 @@ server = Test.MakeOriginServer("server", lookup_key="{%uuid}") # default root -req_chk = {"headers": - "GET / HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "uuid: none\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -res_chk = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } +req_chk = { + "headers": "GET / HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "uuid: none\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +res_chk = {"headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", req_chk, res_chk) @@ -63,103 +54,82 @@ bodylen = len(body) # this request should work -req_full = {"headers": - "GET /path HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "Accept: */*\r\n" + - "uuid: full\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -res_full = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - 'Etag: "foo"\r\n' + - "Cache-Control: public, max-age=500\r\n" + - "Connection: close\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body - } +req_full = { + "headers": "GET /path HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Accept: */*\r\n" + "uuid: full\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +res_full = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + 'Etag: "foo"\r\n' + + "Cache-Control: public, max-age=500\r\n" + "Connection: close\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": body +} server.addResponse("sessionlog.json", req_full, res_full) # this request should work -req_good = {"headers": - "GET /path HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "Accept: */*\r\n" + - "Range: bytes=0-\r\n" + - "uuid: range_full\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -res_good = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - 'Etag: "foo"\r\n' + - "Cache-Control: public, max-age=500\r\n" + - "Content-Range: bytes 0-{0}/{0}\r\n".format(bodylen) + - "Connection: close\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body - } +req_good = { + "headers": + "GET /path HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Accept: */*\r\n" + "Range: bytes=0-\r\n" + + "uuid: range_full\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +res_good = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + 'Etag: "foo"\r\n' + + "Cache-Control: public, max-age=500\r\n" + "Content-Range: bytes 0-{0}/{0}\r\n".format(bodylen) + "Connection: close\r\n" + + "\r\n", + "timestamp": "1469733493.993", + "body": body +} server.addResponse("sessionlog.json", req_good, res_good) # this request should fail with a cache_range_requests asset -req_fail = {"headers": - "GET /path HTTP/1.1\r\n" + - "Host: www.fail.com\r\n" + - "Accept: */*\r\n" + - "Range: bytes=0-\r\n" + - "uuid: range_fail\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -res_fail = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - 'Etag: "foo"\r\n' + - "Cache-Control: public, max-age=500\r\n" + - "Content-Range: bytes 0-{0}/{0}\r\n".format(bodylen) + - "Connection: close\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body - } +req_fail = { + "headers": + "GET /path HTTP/1.1\r\n" + "Host: www.fail.com\r\n" + "Accept: */*\r\n" + "Range: bytes=0-\r\n" + "uuid: range_fail\r\n" + + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +res_fail = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + 'Etag: "foo"\r\n' + + "Cache-Control: public, max-age=500\r\n" + "Content-Range: bytes 0-{0}/{0}\r\n".format(bodylen) + "Connection: close\r\n" + + "\r\n", + "timestamp": "1469733493.993", + "body": body +} server.addResponse("sessionlog.json", req_fail, res_fail) # cache range requests plugin remap, working config -ts.Disk.remap_config.AddLine( - 'map http://www.example.com http://127.0.0.1:{}'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map http://www.example.com http://127.0.0.1:{}'.format(server.Variables.Port)) # improperly configured cache_range_requests with cachekey -ts.Disk.remap_config.AddLine( - 'map http://www.fail.com http://127.0.0.1:{}'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map http://www.fail.com http://127.0.0.1:{}'.format(server.Variables.Port)) # cache debug -ts.Disk.plugin_config.AddLines([ - 'cachekey.so --include-headers=Range --static-prefix=foo', - 'cache_range_requests.so --no-modify-cachekey', - 'xdebug.so', -]) +ts.Disk.plugin_config.AddLines( + [ + 'cachekey.so --include-headers=Range --static-prefix=foo', + 'cache_range_requests.so --no-modify-cachekey', + 'xdebug.so', + ]) # minimal configuration -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cachekey|cache_range_requests', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cachekey|cache_range_requests', + }) curl_and_args = 'curl -s -D /dev/stdout -o /dev/stderr -x localhost:{} -H "x-debug: x-cache-key"'.format(ts.Variables.port) @@ -171,6 +141,5 @@ ps.Command = curl_and_args + ' http://www.example.com/path -r0- -H "uuid: full"' ps.ReturnCode = 0 ps.Streams.stdout.Content = Testers.ContainsExpression( - "X-Cache-Key: /foo/Range:bytes=0-/path", - "expected cachekey style range request in cachekey") + "X-Cache-Key: /foo/Range:bytes=0-/path", "expected cachekey style range request in cachekey") tr.StillRunningAfter = ts diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_ims.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_ims.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_ims.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cache_range_requests/cache_range_requests_ims.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -41,60 +41,43 @@ server = Test.MakeOriginServer("server") # default root -req_chk = {"headers": - "GET / HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "uuid: none\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -res_chk = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } +req_chk = { + "headers": "GET / HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "uuid: none\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +res_chk = {"headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", req_chk, res_chk) body = "lets go surfin now" bodylen = len(body) -req_full = {"headers": - "GET /path HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "Accept: */*\r\n" + - "Range: bytes=0-\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } - -res_full = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - "Cache-Control: max-age=500\r\n" + - "Content-Range: bytes 0-{0}/{0}\r\n".format(bodylen) + - "Connection: close\r\n" + - 'Etag: "772102f4-56f4bc1e6d417"\r\n' + - "\r\n", - "timestamp": "1469733493.993", - "body": body - } +req_full = { + "headers": "GET /path HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Accept: */*\r\n" + "Range: bytes=0-\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +res_full = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + "Cache-Control: max-age=500\r\n" + + "Content-Range: bytes 0-{0}/{0}\r\n".format(bodylen) + "Connection: close\r\n" + 'Etag: "772102f4-56f4bc1e6d417"\r\n' + + "\r\n", + "timestamp": "1469733493.993", + "body": body +} server.addResponse("sessionlog.json", req_full, res_full) # cache range requests plugin remap -ts.Disk.remap_config.AddLines([ - f'map http://ims http://127.0.0.1:{server.Variables.Port}' + - ' @plugin=cache_range_requests.so @pparam=--consider-ims', - f'map http://imsheader http://127.0.0.1:{server.Variables.Port}' + - ' @plugin=cache_range_requests.so @pparam=--consider-ims' + - ' @pparam=--ims-header=CrrIms', -]) +ts.Disk.remap_config.AddLines( + [ + f'map http://ims http://127.0.0.1:{server.Variables.Port}' + ' @plugin=cache_range_requests.so @pparam=--consider-ims', + f'map http://imsheader http://127.0.0.1:{server.Variables.Port}' + + ' @plugin=cache_range_requests.so @pparam=--consider-ims' + ' @pparam=--ims-header=CrrIms', + ]) # cache debug ts.Disk.plugin_config.AddLine('xdebug.so') @@ -117,7 +100,6 @@ ps.Streams.stdout.Content = Testers.ContainsExpression("X-Cache: miss", "expected cache miss for load") tr.StillRunningAfter = ts - # test inner range # 1 Test - Fetch range into cache tr = Test.AddTestRun("0- cache hit check") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cert_update/cert_update.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cert_update/cert_update.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cert_update/cert_update.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cert_update/cert_update.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -25,13 +25,11 @@ Test.SkipUnless( Condition.HasProgram("openssl", "Openssl need to be installed on system for this test to work"), - Condition.PluginExists('cert_update.so') -) + Condition.PluginExists('cert_update.so')) # Set up origin server server = Test.MakeOriginServer("server") -request_header = { - "headers": "GET / HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) @@ -47,24 +45,24 @@ # reserve port, attach it to 'ts' so it is released later ports.get_port(ts, 's_server_port') -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cert_update', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.url_remap.pristine_host_hdr': 1 -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server1.pem ssl_key_name=server1.pem' -) - -ts.Disk.remap_config.AddLines([ - 'map https://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port), - 'map https://foo.com https://127.0.0.1:{0}'.format(ts.Variables.s_server_port) -]) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cert_update', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.url_remap.pristine_host_hdr': 1 + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server1.pem ssl_key_name=server1.pem') + +ts.Disk.remap_config.AddLines( + [ + 'map https://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port), + 'map https://foo.com https://127.0.0.1:{0}'.format(ts.Variables.s_server_port), + ]) ts.Disk.sni_yaml.AddLines([ 'sni:', @@ -81,8 +79,7 @@ tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(Test.Processes.ts) tr.Processes.Default.Command = ( - 'curl --verbose --insecure --ipv4 --resolve bar.com:{0}:127.0.0.1 https://bar.com:{0}'.format(ts.Variables.ssl_port) -) + 'curl --verbose --insecure --ipv4 --resolve bar.com:{0}:127.0.0.1 https://bar.com:{0}'.format(ts.Variables.ssl_port)) tr.Processes.Default.Streams.stderr = "gold/server-cert-pre.gold" tr.Processes.Default.ReturnCode = 0 tr.StillRunningAfter = server @@ -91,8 +88,7 @@ tr = Test.AddTestRun("Server-Cert-Update") tr.Processes.Default.Env = ts.Env tr.Processes.Default.Command = ( - '{0}/traffic_ctl plugin msg cert_update.server {1}/server2.pem'.format(ts.Variables.BINDIR, ts.Variables.SSLDir) -) + '{0}/traffic_ctl plugin msg cert_update.server {1}/server2.pem'.format(ts.Variables.BINDIR, ts.Variables.SSLDir)) ts.Disk.traffic_out.Content = "gold/update.gold" ts.StillRunningAfter = server @@ -109,10 +105,8 @@ # s_server should see client (Traffic Server) as alice.com tr = Test.AddTestRun("Client-Cert-Pre") s_server = tr.Processes.Process( - "s_server", - "openssl s_server -www -key {0}/server1.pem -cert {0}/server1.pem -accept {1} -Verify 1 -msg".format( - ts.Variables.SSLDir, - ts.Variables.s_server_port)) + "s_server", "openssl s_server -www -key {0}/server1.pem -cert {0}/server1.pem -accept {1} -Verify 1 -msg".format( + ts.Variables.SSLDir, ts.Variables.s_server_port)) s_server.Ready = When.PortReady(ts.Variables.s_server_port) tr.Command = 'curl --verbose --insecure --ipv4 --header "Host: foo.com" https://localhost:{}'.format(ts.Variables.ssl_port) tr.Processes.Default.StartBefore(s_server) @@ -125,8 +119,7 @@ tr.Processes.Default.Env = ts.Env tr.Processes.Default.Command = ( 'mv {0}/client2.pem {0}/client1.pem && {1}/traffic_ctl plugin msg cert_update.client {0}/client1.pem'.format( - ts.Variables.SSLDir, ts.Variables.BINDIR) -) + ts.Variables.SSLDir, ts.Variables.BINDIR)) ts.Disk.traffic_out.Content = "gold/update.gold" ts.StillRunningAfter = server @@ -134,10 +127,8 @@ # after use traffic_ctl to update client cert, s_server should see client (Traffic Server) as bob.com tr = Test.AddTestRun("Client-Cert-After") s_server = tr.Processes.Process( - "s_server", - "openssl s_server -www -key {0}/server1.pem -cert {0}/server1.pem -accept {1} -Verify 1 -msg".format( - ts.Variables.SSLDir, - ts.Variables.s_server_port)) + "s_server", "openssl s_server -www -key {0}/server1.pem -cert {0}/server1.pem -accept {1} -Verify 1 -msg".format( + ts.Variables.SSLDir, ts.Variables.s_server_port)) s_server.Ready = When.PortReady(ts.Variables.s_server_port) tr.Processes.Default.Env = ts.Env # Move client2.pem to replace client1.pem since cert path matters in client context mapping diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/client_context_dump/client_context_dump.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/client_context_dump/client_context_dump.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/client_context_dump/client_context_dump.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/client_context_dump/client_context_dump.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,7 +17,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - Test.Summary = ''' Test client_context_dump plugin ''' @@ -31,26 +30,26 @@ ts.addSSLfile("ssl/one.com.pem") ts.addSSLfile("ssl/two.com.pem") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'client_context_dump', - 'proxy.config.ssl.server.cert.path': '{}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.cert.path': '{}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.private_key.path': '{}'.format(ts.Variables.SSLDir), -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=one.com.pem ssl_key_name=one.com.pem' -) - -ts.Disk.sni_yaml.AddLines([ - 'sni:', - '- fqdn: "*one.com"', - ' client_cert: "one.com.pem"', - '- fqdn: "*two.com"', - ' client_cert: "two.com.pem"' -]) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'client_context_dump', + 'proxy.config.ssl.server.cert.path': '{}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.cert.path': '{}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.private_key.path': '{}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=one.com.pem ssl_key_name=one.com.pem') + +ts.Disk.sni_yaml.AddLines( + [ + 'sni:', + '- fqdn: "*one.com"', + ' client_cert: "one.com.pem"', + '- fqdn: "*two.com"', + ' client_cert: "two.com.pem"', + ]) # Set up plugin Test.PrepareInstalledPlugin('client_context_dump.so', ts) @@ -72,7 +71,5 @@ # Client contexts test tr = Test.AddTestRun() tr.Processes.Default.Env = ts.Env -tr.Processes.Default.Command = ( - '{0}/traffic_ctl plugin msg client_context_dump.t 1'.format(ts.Variables.BINDIR) -) +tr.Processes.Default.Command = ('{0}/traffic_ctl plugin msg client_context_dump.t 1'.format(ts.Variables.BINDIR)) tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/combo_handler/combo_handler.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/combo_handler/combo_handler.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/combo_handler/combo_handler.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/combo_handler/combo_handler.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -25,9 +25,7 @@ # Skip if plugin not present. # -Test.SkipUnless( - Condition.PluginExists('combo_handler.so'), -) +Test.SkipUnless(Condition.PluginExists('combo_handler.so'),) # Function to generate a unique data file path (in the top level of the test's run directory), put data (in string 'data') into # the file, and return the file name. @@ -43,6 +41,7 @@ f.write(data) return file_path + # Function to return command (string) to run tcp_client.py tool. 'host' 'port', and 'file_path' are the parameters to tcp_client. # @@ -50,6 +49,7 @@ def tcp_client_cmd(host, port, file_path): return f"{sys.executable} {Test.Variables.AtsTestToolsDir}/tcp_client.py {host} {port} {file_path}" + # Function to return command (string) to run tcp_client.py tool. 'host' and 'port' are the first two parameters to tcp_client. # 'data' is the data to put in the data file input to tcp_client. # @@ -64,19 +64,15 @@ def add_server_obj(content_type, path): request_header = { - "headers": "GET " + path + " HTTP/1.1\r\n" + - "Host: just.any.thing\r\n\r\n", + "headers": "GET " + path + " HTTP/1.1\r\n" + "Host: just.any.thing\r\n\r\n", "timestamp": "1469733493.993", "body": "" } response_header = { - "headers": "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - 'Etag: "359670651"\r\n' + - "Cache-Control: public, max-age=31536000\r\n" + - "Accept-Ranges: bytes\r\n" + - "Content-Type: " + content_type + "\r\n" + - "\r\n", + "headers": + "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + 'Etag: "359670651"\r\n' + + "Cache-Control: public, max-age=31536000\r\n" + "Accept-Ranges: bytes\r\n" + "Content-Type: " + content_type + "\r\n" + + "\r\n", "timestamp": "1469733493.993", "body": "Content for " + path + "\n" } @@ -97,15 +93,9 @@ ts.Disk.plugin_config.AddLine("combo_handler.so - - - ctwl.txt") -ts.Disk.remap_config.AddLine( - 'map http://xyz/ http://127.0.0.1/ @plugin=combo_handler.so' -) -ts.Disk.remap_config.AddLine( - f'map http://localhost/127.0.0.1/ http://127.0.0.1:{server.Variables.Port}/' -) -ts.Disk.remap_config.AddLine( - f'map http://localhost/sub/ http://127.0.0.1:{server.Variables.Port}/sub/' -) +ts.Disk.remap_config.AddLine('map http://xyz/ http://127.0.0.1/ @plugin=combo_handler.so') +ts.Disk.remap_config.AddLine(f'map http://localhost/127.0.0.1/ http://127.0.0.1:{server.Variables.Port}/') +ts.Disk.remap_config.AddLine(f'map http://localhost/sub/ http://127.0.0.1:{server.Variables.Port}/sub/') # Configure the combo_handler's configuration file. ts.Setup.Copy("ctwl.txt", ts.Variables.CONFIGDIR) @@ -117,23 +107,17 @@ tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = tcp_client("127.0.0.1", ts.Variables.port, - "GET /admin/v1/combo?obj1&sub:obj2&obj3 HTTP/1.1\n" + - "Host: xyz\n" + - "Connection: close\n" + - "\n" - ) +tr.Processes.Default.Command = tcp_client( + "127.0.0.1", ts.Variables.port, + "GET /admin/v1/combo?obj1&sub:obj2&obj3 HTTP/1.1\n" + "Host: xyz\n" + "Connection: close\n" + "\n") tr.Processes.Default.ReturnCode = 0 f = tr.Disk.File("_output/1-tr-Default/stream.all.txt") f.Content = "combo_handler_files/tr1.gold" tr = Test.AddTestRun() -tr.Processes.Default.Command = tcp_client("127.0.0.1", ts.Variables.port, - "GET /admin/v1/combo?obj1&sub:obj2&obj4 HTTP/1.1\n" + - "Host: xyz\n" + - "Connection: close\n" + - "\n" - ) +tr.Processes.Default.Command = tcp_client( + "127.0.0.1", ts.Variables.port, + "GET /admin/v1/combo?obj1&sub:obj2&obj4 HTTP/1.1\n" + "Host: xyz\n" + "Connection: close\n" + "\n") tr.Processes.Default.ReturnCode = 0 f = tr.Disk.File("_output/2-tr-Default/stream.all.txt") f.Content = "combo_handler_files/tr2.gold" diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/compress/compress.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/compress/compress.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/compress/compress.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/compress/compress.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -25,10 +25,7 @@ # Skip if plugins not present. # Test.SkipUnless( - Condition.PluginExists('compress.so'), - Condition.PluginExists('conf_remap.so'), - Condition.HasATSFeature('TS_HAS_BROTLI') -) + Condition.PluginExists('compress.so'), Condition.PluginExists('conf_remap.so'), Condition.HasATSFeature('TS_HAS_BROTLI')) server = Test.MakeOriginServer("server", options={'--load': '{}/compress_observer.py'.format(Test.TestDirectory)}) @@ -47,28 +44,28 @@ # expected response from the origin server response_header = { - "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n" + - 'Etag: "359670651"\r\n' + - "Cache-Control: public, max-age=31536000\r\n" + - "Accept-Ranges: bytes\r\n" + - "Content-Type: text/javascript\r\n" + - "\r\n", + "headers": + "HTTP/1.1 200 OK\r\nConnection: close\r\n" + 'Etag: "359670651"\r\n' + "Cache-Control: public, max-age=31536000\r\n" + + "Accept-Ranges: bytes\r\n" + "Content-Type: text/javascript\r\n" + "\r\n", "timestamp": "1469733493.993", "body": body } for i in range(3): # add request/response to the server dictionary request_header = { - "headers": "GET /obj{} HTTP/1.1\r\nHost: just.any.thing\r\n\r\n".format(i), "timestamp": "1469733493.993", "body": "" + "headers": "GET /obj{} HTTP/1.1\r\nHost: just.any.thing\r\n\r\n".format(i), + "timestamp": "1469733493.993", + "body": "" } server.addResponse("sessionfile.log", request_header, response_header) - # post for the origin server post_request_header = { - "headers": "POST /obj3 HTTP/1.1\r\nHost: just.any.thing\r\nContent-Type: application/x-www-form-urlencoded\r\nContent-Length: 11\r\n\r\n", + "headers": + "POST /obj3 HTTP/1.1\r\nHost: just.any.thing\r\nContent-Type: application/x-www-form-urlencoded\r\nContent-Length: 11\r\n\r\n", "timestamp": "1469733493.993", - "body": "knock knock"} + "body": "knock knock" +} server.addResponse("sessionfile.log", post_request_header, response_header) @@ -77,8 +74,7 @@ "curl --verbose --proxy http://127.0.0.1:{}".format(ts.Variables.port) + " --header 'X-Ats-Compress-Test: {}/{}'".format(idx, encodingList) + " --header 'Accept-Encoding: {0}' 'http://ae-{1}/obj{1}'".format(encodingList, idx) + - " 2>> compress_long.log ; printf '\n===\n' >> compress_long.log" - ) + " 2>> compress_long.log ; printf '\n===\n' >> compress_long.log") def curl_post(ts, idx, encodingList): @@ -86,8 +82,7 @@ "curl --verbose -d 'knock knock' --proxy http://127.0.0.1:{}".format(ts.Variables.port) + " --header 'X-Ats-Compress-Test: {}/{}'".format(idx, encodingList) + " --header 'Accept-Encoding: {0}' 'http://ae-{1}/obj{1}'".format(encodingList, idx) + - " 2>> compress_long.log ; printf '\n===\n' >> compress_long.log" - ) + " 2>> compress_long.log ; printf '\n===\n' >> compress_long.log") waitForServer = True @@ -96,33 +91,30 @@ ts = Test.MakeATSProcess("ts", enable_cache=False) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'compress', - 'proxy.config.http.normalize_ae': 0, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'compress', + 'proxy.config.http.normalize_ae': 0, + }) ts.Setup.Copy("compress.config") ts.Setup.Copy("compress2.config") ts.Disk.remap_config.AddLine( 'map http://ae-0/ http://127.0.0.1:{}/'.format(server.Variables.Port) + - ' @plugin=compress.so @pparam={}/compress.config'.format(Test.RunDirectory) -) + ' @plugin=compress.so @pparam={}/compress.config'.format(Test.RunDirectory)) ts.Disk.remap_config.AddLine( 'map http://ae-1/ http://127.0.0.1:{}/'.format(server.Variables.Port) + ' @plugin=conf_remap.so @pparam=proxy.config.http.normalize_ae=1' + - ' @plugin=compress.so @pparam={}/compress.config'.format(Test.RunDirectory) -) + ' @plugin=compress.so @pparam={}/compress.config'.format(Test.RunDirectory)) ts.Disk.remap_config.AddLine( 'map http://ae-2/ http://127.0.0.1:{}/'.format(server.Variables.Port) + ' @plugin=conf_remap.so @pparam=proxy.config.http.normalize_ae=2' + - ' @plugin=compress.so @pparam={}/compress2.config'.format(Test.RunDirectory) -) + ' @plugin=compress.so @pparam={}/compress2.config'.format(Test.RunDirectory)) ts.Disk.remap_config.AddLine( 'map http://ae-3/ http://127.0.0.1:{}/'.format(server.Variables.Port) + - ' @plugin=compress.so @pparam={}/compress.config'.format(Test.RunDirectory) -) + ' @plugin=compress.so @pparam={}/compress.config'.format(Test.RunDirectory)) for i in range(3): @@ -199,8 +191,8 @@ tr = Test.AddTestRun() tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Command = ( - r"tr -d '\r' < compress_long.log | sed 's/\(..*\)\([<>]\)/\1\n\2/' | {0}/greplog.sh > compress_short.log" -).format(Test.TestDirectory) + r"tr -d '\r' < compress_long.log | sed 's/\(..*\)\([<>]\)/\1\n\2/' | {0}/greplog.sh > compress_short.log").format( + Test.TestDirectory) f = tr.Disk.File("compress_short.log") f.Content = "compress.gold" diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/bucketcookie.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/bucketcookie.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/bucketcookie.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/bucketcookie.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ # limitations under the License. import os + Test.Summary = ''' ''' @@ -31,8 +32,11 @@ # second server is run during second test server = Test.MakeOriginServer("server", ip='127.0.0.10') -request_header = {"headers": "GET /cookiematches HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = { + "headers": "GET /cookiematches HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -40,8 +44,11 @@ server.addResponse("sessionfile.log", request_header, response_header) server2 = Test.MakeOriginServer("server2", ip='127.0.0.11') -request_header2 = {"headers": "GET /cookiedoesntmatch HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header2 = { + "headers": "GET /cookiedoesntmatch HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header2 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -53,10 +60,11 @@ with open(config_path, 'r') as config_file: config1 = config_file.read() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', + }) config1 = config1.replace("$PORT", str(server.Variables.Port)) config1 = config1.replace("$ALTPORT", str(server2.Variables.Port)) @@ -65,8 +73,7 @@ ts.Disk.config1.WriteOn(config1) ts.Disk.remap_config.AddLine( - 'map http://www.example.com/magic http://shouldnothit.com @plugin=cookie_remap.so @pparam=config/bucketconfig.txt' -) + 'map http://www.example.com/magic http://shouldnothit.com @plugin=cookie_remap.so @pparam=config/bucketconfig.txt') # Cookie value in bucket tr = Test.AddTestRun("cookie value in bucket") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/collapseslashes.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/collapseslashes.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/collapseslashes.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/collapseslashes.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ # limitations under the License. import os + Test.Summary = ''' ''' @@ -31,8 +32,11 @@ # and verify it collapsed the double // server = Test.MakeOriginServer("server", ip='127.0.0.10') -request_header = {"headers": "GET /i/like/cheetos?.done=http://finance.yahoo.com HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = { + "headers": "GET /i/like/cheetos?.done=http://finance.yahoo.com HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -44,10 +48,11 @@ with open(config_path, 'r') as config_file: config1 = config_file.read() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', + }) config1 = config1.replace("$PORT", str(server.Variables.Port)) @@ -55,8 +60,7 @@ ts.Disk.config1.WriteOn(config1) ts.Disk.remap_config.AddLine( - 'map http://www.example.com/magic http://shouldnothit.com/magic @plugin=cookie_remap.so @pparam=config/collapseconfig.txt' -) + 'map http://www.example.com/magic http://shouldnothit.com/magic @plugin=cookie_remap.so @pparam=config/collapseconfig.txt') tr = Test.AddTestRun("collapse consecutive forward slashes") tr.Processes.Default.Command = ''' diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/connector.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/connector.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/connector.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/connector.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ # limitations under the License. import os + Test.Summary = ''' ''' @@ -31,8 +32,11 @@ # second server is run during second test server = Test.MakeOriginServer("server", ip='127.0.0.10') -request_header = {"headers": "GET /cookiematches HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = { + "headers": "GET /cookiematches HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -40,8 +44,11 @@ server.addResponse("sessionfile.log", request_header, response_header) server2 = Test.MakeOriginServer("server2", ip='127.0.0.11') -request_header2 = {"headers": "GET /cookiedoesntmatch HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header2 = { + "headers": "GET /cookiedoesntmatch HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header2 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -53,10 +60,11 @@ with open(config_path, 'r') as config_file: config1 = config_file.read() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', + }) config1 = config1.replace("$PORT", str(server.Variables.Port)) config1 = config1.replace("$ALTPORT", str(server2.Variables.Port)) @@ -65,8 +73,7 @@ ts.Disk.config1.WriteOn(config1) ts.Disk.remap_config.AddLine( - 'map http://www.example.com/magic http://shouldnothit.com/magic @plugin=cookie_remap.so @pparam=config/connectorconfig.txt' -) + 'map http://www.example.com/magic http://shouldnothit.com/magic @plugin=cookie_remap.so @pparam=config/connectorconfig.txt') # Positive test case that remaps because all connected operations pass tr = Test.AddTestRun("cookie value matches") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/existscookie.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/existscookie.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/existscookie.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/existscookie.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ # limitations under the License. import os + Test.Summary = ''' ''' @@ -31,8 +32,11 @@ # second server is run during second test server = Test.MakeOriginServer("server", ip='127.0.0.10') -request_header = {"headers": "GET /cookieexists HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = { + "headers": "GET /cookieexists HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -40,8 +44,11 @@ server.addResponse("sessionfile.log", request_header, response_header) server2 = Test.MakeOriginServer("server2", ip='127.0.0.11') -request_header2 = {"headers": "GET /cookiedoesntexist HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header2 = { + "headers": "GET /cookiedoesntexist HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header2 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -53,10 +60,11 @@ with open(config_path, 'r') as config_file: config1 = config_file.read() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', + }) config1 = config1.replace("$PORT", str(server.Variables.Port)) config1 = config1.replace("$ALTPORT", str(server2.Variables.Port)) @@ -65,8 +73,7 @@ ts.Disk.config1.WriteOn(config1) ts.Disk.remap_config.AddLine( - 'map http://www.example.com/magic http://shouldnothit.com @plugin=cookie_remap.so @pparam=config/existsconfig.txt' -) + 'map http://www.example.com/magic http://shouldnothit.com @plugin=cookie_remap.so @pparam=config/existsconfig.txt') # Positive test case that remaps because cookie exists tr = Test.AddTestRun("cookie fpbeta exists") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/matchcookie.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/matchcookie.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/matchcookie.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/matchcookie.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ # limitations under the License. import os + Test.Summary = ''' ''' @@ -31,8 +32,11 @@ # second server is run during second test server = Test.MakeOriginServer("server", ip='127.0.0.10') -request_header = {"headers": "GET /cookiematches?a=1&b=2&c=3 HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = { + "headers": "GET /cookiematches?a=1&b=2&c=3 HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -40,8 +44,11 @@ server.addResponse("sessionfile.log", request_header, response_header) server2 = Test.MakeOriginServer("server2", ip='127.0.0.11') -request_header2 = {"headers": "GET /cookiedoesntmatch?a=1&b=2&c=3 HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header2 = { + "headers": "GET /cookiedoesntmatch?a=1&b=2&c=3 HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header2 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -53,10 +60,11 @@ with open(config_path, 'r') as config_file: config1 = config_file.read() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', + }) config1 = config1.replace("$PORT", str(server.Variables.Port)) config1 = config1.replace("$ALTPORT", str(server2.Variables.Port)) @@ -65,8 +73,7 @@ ts.Disk.config1.WriteOn(config1) ts.Disk.remap_config.AddLine( - 'map http://www.example.com/magic http://shouldnothit.com @plugin=cookie_remap.so @pparam=config/matchconfig.txt' -) + 'map http://www.example.com/magic http://shouldnothit.com @plugin=cookie_remap.so @pparam=config/matchconfig.txt') # Positive test case that remaps because cookie matches tr = Test.AddTestRun("cookie value matches") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/matchuri.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/matchuri.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/matchuri.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/matchuri.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ # limitations under the License. import os + Test.Summary = ''' ''' @@ -31,8 +32,11 @@ # second server is run during second test server = Test.MakeOriginServer("server", ip='127.0.0.10') -request_header = {"headers": "GET /cookiematches HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = { + "headers": "GET /cookiematches HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -40,8 +44,11 @@ server.addResponse("sessionfile.log", request_header, response_header) server2 = Test.MakeOriginServer("server2", ip='127.0.0.11') -request_header2 = {"headers": "GET /cookiedoesntmatch HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header2 = { + "headers": "GET /cookiedoesntmatch HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header2 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -53,10 +60,11 @@ with open(config_path, 'r') as config_file: config1 = config_file.read() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', + }) config1 = config1.replace("$PORT", str(server.Variables.Port)) config1 = config1.replace("$ALTPORT", str(server2.Variables.Port)) @@ -65,8 +73,7 @@ ts.Disk.config1.WriteOn(config1) ts.Disk.remap_config.AddLine( - 'map http://www.example.com/magic http://shouldnothit.com @plugin=cookie_remap.so @pparam=config/matchuriconfig.txt' -) + 'map http://www.example.com/magic http://shouldnothit.com @plugin=cookie_remap.so @pparam=config/matchuriconfig.txt') # Positive test case, URI matches rule tr = Test.AddTestRun("URI value matches") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/matrixparams.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/matrixparams.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/matrixparams.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/matrixparams.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ # limitations under the License. import os + Test.Summary = ''' ''' @@ -32,26 +33,38 @@ # That's why I am not adding any canned request/response server = Test.MakeOriginServer("server", ip='127.0.0.10') -request_header = {"headers": "GET /eighth/magic;matrix=1/eighth HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = { + "headers": "GET /eighth/magic;matrix=1/eighth HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionfile.log", request_header, response_header) -request_header_2 = {"headers": "GET /eighth/magic;matrix=1/eighth?hello=10 HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header_2 = { + "headers": "GET /eighth/magic;matrix=1/eighth?hello=10 HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} response_header_2 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionfile.log", request_header_2, response_header_2) -request_header_3 = {"headers": "GET /tenth/magic/tenth;matrix=2 HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header_3 = { + "headers": "GET /tenth/magic/tenth;matrix=2 HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} response_header_3 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionfile.log", request_header_3, response_header_3) -request_header_4 = {"headers": "GET /tenth/magic/tenth;matrix=2?query=10 HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header_4 = { + "headers": "GET /tenth/magic/tenth;matrix=2?query=10 HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} response_header_4 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionfile.log", request_header_4, response_header_4) @@ -59,7 +72,8 @@ request_header_5 = { "headers": "GET /eleventh/magic;matrix=4/eleventh;matrix=2?query=true HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", - "body": ""} + "body": "" +} response_header_5 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionfile.log", request_header_5, response_header_5) @@ -69,10 +83,11 @@ with open(config_path, 'r') as config_file: config1 = config_file.read() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', + }) config1 = config1.replace("$PORT", str(server.Variables.Port)) @@ -80,17 +95,13 @@ ts.Disk.config1.WriteOn(config1) ts.Disk.remap_config.AddLine( - 'map http://www.example.com/eighth http://shouldnothit.com/eighth @plugin=cookie_remap.so @pparam=config/matrixconfig.txt' -) + 'map http://www.example.com/eighth http://shouldnothit.com/eighth @plugin=cookie_remap.so @pparam=config/matrixconfig.txt') ts.Disk.remap_config.AddLine( - 'map http://www.example.com/ninth http://shouldnothit.com/ninth @plugin=cookie_remap.so @pparam=config/matrixconfig.txt' -) + 'map http://www.example.com/ninth http://shouldnothit.com/ninth @plugin=cookie_remap.so @pparam=config/matrixconfig.txt') ts.Disk.remap_config.AddLine( - 'map http://www.example.com/tenth http://shouldnothit.com/tenth @plugin=cookie_remap.so @pparam=config/matrixconfig.txt' -) + 'map http://www.example.com/tenth http://shouldnothit.com/tenth @plugin=cookie_remap.so @pparam=config/matrixconfig.txt') ts.Disk.remap_config.AddLine( - 'map http://www.example.com/eleventh http://shouldnothit.com/eleventh @plugin=cookie_remap.so @pparam=config/matrixconfig.txt' -) + 'map http://www.example.com/eleventh http://shouldnothit.com/eleventh @plugin=cookie_remap.so @pparam=config/matrixconfig.txt') tr = Test.AddTestRun("path is substituted") tr.Processes.Default.Command = ''' diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/notexistscookie.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/notexistscookie.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/notexistscookie.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/notexistscookie.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ # limitations under the License. import os + Test.Summary = ''' ''' @@ -31,8 +32,11 @@ # second server is run during second test server = Test.MakeOriginServer("server", ip='127.0.0.10') -request_header = {"headers": "GET /cookiedoesntexist HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = { + "headers": "GET /cookiedoesntexist HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -40,8 +44,11 @@ server.addResponse("sessionfile.log", request_header, response_header) server2 = Test.MakeOriginServer("server2", ip='127.0.0.11') -request_header2 = {"headers": "GET /cookieexists HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header2 = { + "headers": "GET /cookieexists HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header2 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -53,10 +60,11 @@ with open(config_path, 'r') as config_file: config1 = config_file.read() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', + }) config1 = config1.replace("$PORT", str(server.Variables.Port)) config1 = config1.replace("$ALTPORT", str(server2.Variables.Port)) @@ -65,8 +73,7 @@ ts.Disk.config1.WriteOn(config1) ts.Disk.remap_config.AddLine( - 'map http://www.example.com/magic http://shouldnothit.com @plugin=cookie_remap.so @pparam=config/notexistsconfig.txt' -) + 'map http://www.example.com/magic http://shouldnothit.com @plugin=cookie_remap.so @pparam=config/notexistsconfig.txt') # Positive test case that remaps because cookie doesn't exist tr = Test.AddTestRun("cookie fpbeta doesn't exist") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/pcollapseslashes.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/pcollapseslashes.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/pcollapseslashes.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/pcollapseslashes.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ # limitations under the License. import os + Test.Summary = ''' ''' @@ -31,8 +32,11 @@ # and verify it collapsed the double // server = Test.MakeOriginServer("server", ip='127.0.0.10') -request_header = {"headers": "GET /i/like/cheetos?.done=http://finance.yahoo.com HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = { + "headers": "GET /i/like/cheetos?.done=http://finance.yahoo.com HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -44,10 +48,11 @@ with open(config_path, 'r') as config_file: config1 = config_file.read() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', + }) config1 = config1.replace("$PORT", str(server.Variables.Port)) @@ -55,8 +60,7 @@ ts.Disk.config1.WriteOn(config1) ts.Disk.remap_config.AddLine( - 'map http://www.example.com/orig_path http://shouldnothit.com/magic @plugin=cookie_remap.so @pparam=config/collapseconfig.txt' -) + 'map http://www.example.com/orig_path http://shouldnothit.com/magic @plugin=cookie_remap.so @pparam=config/collapseconfig.txt') tr = Test.AddTestRun("collapse consecutive forward slashes") tr.Processes.Default.Command = ''' diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/psubstitute.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/psubstitute.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/psubstitute.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/psubstitute.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ # limitations under the License. import os + Test.Summary = ''' ''' @@ -29,28 +30,39 @@ server = Test.MakeOriginServer("server", ip='127.0.0.10') -request_header = {"headers": "GET /photos/search?query=magic HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = { + "headers": "GET /photos/search?query=magic HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionfile.log", request_header, response_header) -request_header = {"headers": "GET /photos/search?query=/theunmatchedpath HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = { + "headers": "GET /photos/search?query=/theunmatchedpath HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionfile.log", request_header, response_header) -request_header = {"headers": "GET /photos/search/magic/foobar HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = { + "headers": "GET /photos/search/magic/foobar HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionfile.log", request_header, response_header) request_header = { - "headers": "GET /photos/search/cr_substitutions?query=%28http%3A%2F%2Fwww%2Eexample%2Ecom%2Fmagic HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "headers": + "GET /photos/search/cr_substitutions?query=%28http%3A%2F%2Fwww%2Eexample%2Ecom%2Fmagic HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", - "body": ""} + "body": "" +} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionfile.log", request_header, response_header) @@ -60,10 +72,11 @@ with open(config_path, 'r') as config_file: config1 = config_file.read() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', + }) config1 = config1.replace("$PORT", str(server.Variables.Port)) @@ -71,8 +84,7 @@ ts.Disk.config1.WriteOn(config1) ts.Disk.remap_config.AddLine( - 'map http://www.example.com/magic http://shouldnothit.com/not-used @plugin=cookie_remap.so @pparam=config/substituteconfig.txt' -) + 'map http://www.example.com/magic http://shouldnothit.com/not-used @plugin=cookie_remap.so @pparam=config/substituteconfig.txt') tr = Test.AddTestRun("Substitute $ppath in the dest query") tr.Processes.Default.Command = ''' diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/regexcookie.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/regexcookie.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/regexcookie.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/regexcookie.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ # limitations under the License. import os + Test.Summary = ''' ''' @@ -31,8 +32,11 @@ # second server is run during second test server = Test.MakeOriginServer("server", ip='127.0.0.10') -request_header = {"headers": "GET /regexmatches?cookies=oreos-chipsahoy-icecream HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = { + "headers": "GET /regexmatches?cookies=oreos-chipsahoy-icecream HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -40,8 +44,11 @@ server.addResponse("sessionfile.log", request_header, response_header) server2 = Test.MakeOriginServer("server2", ip='127.0.0.11') -request_header2 = {"headers": "GET /regexdoesntmatch HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header2 = { + "headers": "GET /regexdoesntmatch HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header2 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -53,10 +60,11 @@ with open(config_path, 'r') as config_file: config1 = config_file.read() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', + }) config1 = config1.replace("$PORT", str(server.Variables.Port)) config1 = config1.replace("$ALTPORT", str(server2.Variables.Port)) @@ -65,8 +73,7 @@ ts.Disk.config1.WriteOn(config1) ts.Disk.remap_config.AddLine( - 'map http://www.example.com/magic http://shouldnothit.com @plugin=cookie_remap.so @pparam=config/regexconfig.txt' -) + 'map http://www.example.com/magic http://shouldnothit.com @plugin=cookie_remap.so @pparam=config/regexconfig.txt') # Positive test case that remaps because cookie regex matches tr = Test.AddTestRun("cookie regex matches") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/setstatus.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/setstatus.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/setstatus.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/setstatus.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ # limitations under the License. import os + Test.Summary = ''' ''' @@ -32,17 +33,17 @@ with open(config_path, 'r') as config_file: config1 = config_file.read() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', + }) ts.Disk.File(ts.Variables.CONFIGDIR + "/statusconfig.txt", id="config1") ts.Disk.config1.WriteOn(config1) ts.Disk.remap_config.AddLine( - 'map http://www.example.com/magic http://shouldnothit.com @plugin=cookie_remap.so @pparam=config/statusconfig.txt' -) + 'map http://www.example.com/magic http://shouldnothit.com @plugin=cookie_remap.so @pparam=config/statusconfig.txt') # Plugin sets the HTTP status because first rule matches tr = Test.AddTestRun("Sets the status to 205") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/subcookie.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/subcookie.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/subcookie.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/subcookie.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ # limitations under the License. import os + Test.Summary = ''' ''' @@ -31,8 +32,11 @@ # second server is run during second test server = Test.MakeOriginServer("server", ip='127.0.0.10') -request_header = {"headers": "GET /cookiematches HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = { + "headers": "GET /cookiematches HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -40,8 +44,11 @@ server.addResponse("sessionfile.log", request_header, response_header) server2 = Test.MakeOriginServer("server2", ip='127.0.0.11') -request_header2 = {"headers": "GET /cookiedoesntmatch HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header2 = { + "headers": "GET /cookiedoesntmatch HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # expected response from the origin server response_header2 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -53,10 +60,11 @@ with open(config_path, 'r') as config_file: config1 = config_file.read() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', + }) config1 = config1.replace("$PORT", str(server.Variables.Port)) config1 = config1.replace("$ALTPORT", str(server2.Variables.Port)) @@ -65,8 +73,7 @@ ts.Disk.config1.WriteOn(config1) ts.Disk.remap_config.AddLine( - 'map http://www.example.com http://shouldnothit.com/magic @plugin=cookie_remap.so @pparam=config/subcookie.txt' -) + 'map http://www.example.com http://shouldnothit.com/magic @plugin=cookie_remap.so @pparam=config/subcookie.txt') # Positive test case that remaps because all connected operations pass tr = Test.AddTestRun("cookie value matches") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/substitute.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/substitute.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cookie_remap/substitute.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cookie_remap/substitute.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ # limitations under the License. import os + Test.Summary = ''' ''' @@ -29,20 +30,29 @@ server = Test.MakeOriginServer("server", ip='127.0.0.10') -request_header = {"headers": "GET /photos/search?query=magic HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = { + "headers": "GET /photos/search?query=magic HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionfile.log", request_header, response_header) -request_header_2 = {"headers": "GET /photos/search?query=/theunmatchedpath HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header_2 = { + "headers": "GET /photos/search?query=/theunmatchedpath HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} response_header_2 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionfile.log", request_header_2, response_header_2) -request_header_3 = {"headers": "GET /photos/search/magic/foobar HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header_3 = { + "headers": "GET /photos/search/magic/foobar HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} response_header_3 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionfile.log", request_header_3, response_header_3) @@ -52,10 +62,11 @@ with open(config_path, 'r') as config_file: config1 = config_file.read() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'cookie_remap.*|http.*|dns.*', + }) config1 = config1.replace("$PORT", str(server.Variables.Port)) @@ -63,8 +74,7 @@ ts.Disk.config1.WriteOn(config1) ts.Disk.remap_config.AddLine( - 'map http://www.example.com/magic http://shouldnothit.com/magic @plugin=cookie_remap.so @pparam=config/substituteconfig.txt' -) + 'map http://www.example.com/magic http://shouldnothit.com/magic @plugin=cookie_remap.so @pparam=config/substituteconfig.txt') tr = Test.AddTestRun("Substitute $path in the dest query") tr.Processes.Default.Command = ''' diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cppapi/cppapi.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cppapi/cppapi.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/cppapi/cppapi.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/cppapi/cppapi.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -16,7 +16,6 @@ import os - Test.Summary = ''' Execute plugin with cppapi tests. ''' diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/esi/esi.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/esi/esi.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/esi/esi.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/esi/esi.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -23,9 +23,7 @@ Test the ESI plugin. ''' -Test.SkipUnless( - Condition.PluginExists('esi.so'), -) +Test.SkipUnless(Condition.PluginExists('esi.so'),) class EsiTest(): @@ -33,16 +31,12 @@ A class that encapsulates the configuration and execution of a set of EPI test cases. """ - """ static: The same server Process is used across all tests. """ _server = None - """ static: A counter to keep the ATS process names unique across tests. """ _ts_counter = 0 - """ static: A counter to keep any output file names unique across tests. """ _output_counter = 0 - """ The ATS process for this set of test cases. """ _ts = None @@ -69,10 +63,9 @@ # See: # doc/admin-guide/plugins/esi.en.rst request_header = { - "headers": ( - "GET /esi.php HTTP/1.1\r\n" - "Host: www.example.com\r\n" - "Content-Length: 0\r\n\r\n"), + "headers": ("GET /esi.php HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Content-Length: 0\r\n\r\n"), "timestamp": "1469733493.993", "body": "" } @@ -84,23 +77,23 @@ ''' response_header = { - "headers": ( - "HTTP/1.1 200 OK\r\n" - "Content-Type: text/html\r\n" - "X-Esi: 1\r\n" - "Connection: close\r\n" - "Content-Length: {}\r\n" - "Cache-Control: max-age=300\r\n" - "\r\n".format(len(esi_body))), + "headers": + ( + "HTTP/1.1 200 OK\r\n" + "Content-Type: text/html\r\n" + "X-Esi: 1\r\n" + "Connection: close\r\n" + "Content-Length: {}\r\n" + "Cache-Control: max-age=300\r\n" + "\r\n".format(len(esi_body))), "timestamp": "1469733493.993", "body": esi_body } server.addResponse("sessionfile.log", request_header, response_header) request_header = { - "headers": ( - "GET /date.php HTTP/1.1\r\n" - "Host: www.example.com\r\n" - "Content-Length: 0\r\n\r\n"), + "headers": ("GET /date.php HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Content-Length: 0\r\n\r\n"), "timestamp": "1469733493.993", "body": "" } @@ -110,34 +103,35 @@ ?> ''' response_header = { - "headers": ( - "HTTP/1.1 200 OK\r\n" - "Content-Type: text/html\r\n" - "Connection: close\r\n" - "Content-Length: {}\r\n" - "Cache-Control: max-age=300\r\n" - "\r\n".format(len(date_body))), + "headers": + ( + "HTTP/1.1 200 OK\r\n" + "Content-Type: text/html\r\n" + "Connection: close\r\n" + "Content-Length: {}\r\n" + "Cache-Control: max-age=300\r\n" + "\r\n".format(len(date_body))), "timestamp": "1469733493.993", "body": date_body } server.addResponse("sessionfile.log", request_header, response_header) # Verify correct functionality with an empty body. request_header = { - "headers": ( - "GET /expect_empty_body HTTP/1.1\r\n" - "Host: www.example.com\r\n" - "Content-Length: 0\r\n\r\n"), + "headers": ("GET /expect_empty_body HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "Content-Length: 0\r\n\r\n"), "timestamp": "1469733493.993", "body": "" } response_header = { - "headers": ( - "HTTP/1.1 200 OK\r\n" - "X-ESI: On\r\n" - "Content-Length: 0\r\n" - "Connection: close\r\n" - "Content-Type: text/html; charset=UTF-8\r\n" - "\r\n"), + "headers": + ( + "HTTP/1.1 200 OK\r\n" + "X-ESI: On\r\n" + "Content-Length: 0\r\n" + "Connection: close\r\n" + "Content-Type: text/html; charset=UTF-8\r\n" + "\r\n"), "timestamp": "1469733493.993", "body": "" } @@ -165,9 +159,7 @@ 'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'http|plugin_esi', }) - ts.Disk.remap_config.AddLine( - 'map http://www.example.com/ http://127.0.0.1:{0}'.format(EsiTest._server.Variables.Port) - ) + ts.Disk.remap_config.AddLine('map http://www.example.com/ http://127.0.0.1:{0}'.format(EsiTest._server.Variables.Port)) ts.Disk.plugin_config.AddLine(plugin_config) # Create a run to start the ATS process. @@ -206,9 +198,7 @@ # Test 3: Verify the ESI plugin can gzip a response when the client accepts it. tr = Test.AddTestRun("Verify the ESI plugin can gzip a response") EsiTest._output_counter += 1 - unzipped_body_file = os.path.join( - tr.RunDirectory, - "non_empty_curl_output_{}".format(EsiTest._output_counter)) + unzipped_body_file = os.path.join(tr.RunDirectory, "non_empty_curl_output_{}".format(EsiTest._output_counter)) gzipped_body_file = unzipped_body_file + ".gz" tr.Processes.Default.Command = \ ('curl http://127.0.0.1:{0}/esi.php -H"Host: www.example.com" ' @@ -233,9 +223,7 @@ # Test 4: Verify correct handling of a gzipped empty response body. tr = Test.AddTestRun("Verify we can handle an empty response.") EsiTest._output_counter += 1 - empty_body_file = os.path.join( - tr.RunDirectory, - "empty_curl_output_{}".format(EsiTest._output_counter)) + empty_body_file = os.path.join(tr.RunDirectory, "empty_curl_output_{}".format(EsiTest._output_counter)) gzipped_empty_body = empty_body_file + ".gz" tr.Processes.Default.Command = \ ('curl http://127.0.0.1:{0}/expect_empty_body -H"Host: www.example.com" ' diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/esi/esi_304.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/esi/esi_304.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/esi/esi_304.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/esi/esi_304.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -23,9 +23,7 @@ Test the ESI plugin when origin returns 304 response. ''' -Test.SkipUnless( - Condition.PluginExists('esi.so'), -) +Test.SkipUnless(Condition.PluginExists('esi.so'),) class EsiTest(): @@ -33,16 +31,12 @@ A class that encapsulates the configuration and execution of a set of ESI test cases. """ - """ static: The same server Process is used across all tests. """ _server = None - """ static: A counter to keep the ATS process names unique across tests. """ _ts_counter = 0 - """ static: A counter to keep any output file names unique across tests. """ _output_counter = 0 - """ The ATS process for this set of test cases. """ _ts = None @@ -68,10 +62,7 @@ # Generate the set of ESI responses. request_header = { "headers": - "GET /esi_etag.php HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "uuid: first\r\n" + - "Content-Length: 0\r\n\r\n", + "GET /esi_etag.php HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "uuid: first\r\n" + "Content-Length: 0\r\n\r\n", "timestamp": "1469733493.993", "body": "" } @@ -83,14 +74,8 @@ ''' response_header = { "headers": - "HTTP/1.1 200 OK\r\n" + - "X-Esi: 1\r\n" + - "Cache-Control: public, max-age=0\r\n" + - 'Etag: "esi_304_test"\r\n' + - "Content-Type: text/html\r\n" + - "Connection: close\r\n" + - "Content-Length: {}\r\n".format(len(esi_body)) + - "\r\n", + "HTTP/1.1 200 OK\r\n" + "X-Esi: 1\r\n" + "Cache-Control: public, max-age=0\r\n" + 'Etag: "esi_304_test"\r\n' + + "Content-Type: text/html\r\n" + "Connection: close\r\n" + "Content-Length: {}\r\n".format(len(esi_body)) + "\r\n", "timestamp": "1469733493.993", "body": esi_body } @@ -98,32 +83,22 @@ request_header = { "headers": - "GET /esi_etag.php HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "uuid: second\r\n" + - 'If-None-Match: "esi_304_test"\r\n' + - "Content-Length: 0\r\n\r\n", + "GET /esi_etag.php HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "uuid: second\r\n" + + 'If-None-Match: "esi_304_test"\r\n' + "Content-Length: 0\r\n\r\n", "timestamp": "1469733493.993", "body": "" } response_header = { "headers": - "HTTP/1.1 304 Not Modified\r\n" + - "Content-Type: text/html\r\n" + - "Connection: close\r\n" + - "Content-Length: 0\r\n" + - "\r\n", + "HTTP/1.1 304 Not Modified\r\n" + "Content-Type: text/html\r\n" + "Connection: close\r\n" + + "Content-Length: 0\r\n" + "\r\n", "timestamp": "1469733493.993", "body": "" } server.addResponse("sessionfile.log", request_header, response_header) request_header = { - "headers": - "GET /date.php HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "uuid: date\r\n" + - "Content-Length: 0\r\n\r\n", + "headers": "GET /date.php HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "uuid: date\r\n" + "Content-Length: 0\r\n\r\n", "timestamp": "1469733493.993", "body": "" } @@ -132,11 +107,8 @@ ''' response_header = { "headers": - "HTTP/1.1 200 OK\r\n" + - "Content-Type: text/html\r\n" + - "Connection: close\r\n" + - "Content-Length: {}\r\n".format(len(date_body)) + - "\r\n", + "HTTP/1.1 200 OK\r\n" + "Content-Type: text/html\r\n" + "Connection: close\r\n" + + "Content-Length: {}\r\n".format(len(date_body)) + "\r\n", "timestamp": "1469733493.993", "body": date_body } @@ -164,9 +136,7 @@ 'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'http|plugin_esi', }) - ts.Disk.remap_config.AddLine( - 'map http://www.example.com/ http://127.0.0.1:{0}'.format(EsiTest._server.Variables.Port) - ) + ts.Disk.remap_config.AddLine('map http://www.example.com/ http://127.0.0.1:{0}'.format(EsiTest._server.Variables.Port)) ts.Disk.plugin_config.AddLine(plugin_config) # Create a run to start the ATS process. diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -38,15 +38,9 @@ }) # The following rule changes the status code returned from origin server to 303 ts.Setup.CopyAs('rules/rule.conf', Test.RunDirectory) -ts.Disk.plugin_config.AddLine( - 'header_rewrite.so {0}/rule.conf'.format(Test.RunDirectory) -) -ts.Disk.remap_config.AddLine( - 'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - 'map http://www.example.com:8080 http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.plugin_config.AddLine('header_rewrite.so {0}/rule.conf'.format(Test.RunDirectory)) +ts.Disk.remap_config.AddLine('map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts.Disk.remap_config.AddLine('map http://www.example.com:8080 http://127.0.0.1:{0}'.format(server.Variables.Port)) # call localhost straight tr = Test.AddTestRun() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_cond_cache.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_cond_cache.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_cond_cache.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_cond_cache.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -23,16 +23,13 @@ Test.testName = "CACHE" # Request from client -request_header = {"headers": - "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} # Expected response from the origin server -response_header = {"headers": - "HTTP/1.1 200 OK\r\nConnection: close\r\nCache-Control: max-age=10,public\r\n\r\n", - "timestamp": "1469733493.993", - "body": "CACHED"} - +response_header = { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nCache-Control: max-age=10,public\r\n\r\n", + "timestamp": "1469733493.993", + "body": "CACHED" +} # add request/response server.addResponse("sessionlog.log", request_header, response_header) @@ -45,15 +42,9 @@ # (ie. "hit-fresh", "hit-stale", "miss", "none") ts.Setup.CopyAs('rules/rule_add_cache_result_header.conf', Test.RunDirectory) -ts.Disk.plugin_config.AddLine( - 'header_rewrite.so {0}/rule_add_cache_result_header.conf'.format(Test.RunDirectory) -) -ts.Disk.remap_config.AddLine( - 'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.plugin_config.AddLine('header_rewrite.so {0}/rule_add_cache_result_header.conf'.format(Test.RunDirectory)) +ts.Disk.remap_config.AddLine('map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) # Commands to get the following response headers # 1. miss (empty cache) @@ -64,8 +55,7 @@ 'curl -s -v -H "Host: www.example.com" http://127.0.0.1:{0};' 'curl -v -H "Host: www.example.com" http://127.0.0.1:{0};' 'sleep 15; curl -s -v -H "Host: www.example.com" http://127.0.0.1:{0};' - 'curl -s -v -H "Host: www.example.com" http://127.0.0.1:{0}' -) + 'curl -s -v -H "Host: www.example.com" http://127.0.0.1:{0}') # Test Case tr = Test.AddTestRun() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_cond_method.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_cond_method.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_cond_method.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_cond_method.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -35,20 +35,17 @@ session_file = "sessionfile.log" server.addResponse(session_file, request_get, response) server.addResponse(session_file, request_delete, response) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'header.*', - 'proxy.config.http.insert_response_via_str': 0, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'header.*', + 'proxy.config.http.insert_response_via_str': 0, + }) # The following rule inserts a via header if the request method is a GET or DELETE conf_name = "rule_cond_method.conf" ts.Setup.CopyAs('rules/{0}'.format(conf_name), Test.RunDirectory) -ts.Disk.plugin_config.AddLine( - 'header_rewrite.so {0}/{1}'.format(Test.RunDirectory, conf_name) -) -ts.Disk.remap_config.AddLine( - 'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.plugin_config.AddLine('header_rewrite.so {0}/{1}'.format(Test.RunDirectory, conf_name)) +ts.Disk.remap_config.AddLine('map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)) # Test method in READ_REQUEST_HDR_HOOK. expected_output = "gold/header_rewrite_cond_method.gold" diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_cond_ssn_txn_count.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_cond_ssn_txn_count.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_cond_ssn_txn_count.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_cond_ssn_txn_count.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -23,31 +23,43 @@ Test.testName = "SSN-TXN-COUNT" # Test SSN-TXN-COUNT condition. -request_header_hello = {"headers": - "GET /hello HTTP/1.1\r\nHost: www.example.com\r\nContent-Length: 0\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} -response_header_hello = {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\n" - "Content-Length: 0\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} - -request_header_world = {"headers": "GET /world HTTP/1.1\r\nContent-Length: 0\r\n" - "Host: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": "a\r\na\r\na\r\n\r\n"} -response_header_world = {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\n" - "Connection: close\r\nContent-Length: 0\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header_hello = { + "headers": "GET /hello HTTP/1.1\r\nHost: www.example.com\r\nContent-Length: 0\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} +response_header_hello = { + "headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\n" + "Content-Length: 0\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} + +request_header_world = { + "headers": "GET /world HTTP/1.1\r\nContent-Length: 0\r\n" + "Host: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "a\r\na\r\na\r\n\r\n" +} +response_header_world = { + "headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\n" + "Connection: close\r\nContent-Length: 0\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" +} # add request/response server.addResponse("sessionlog.log", request_header_hello, response_header_hello) server.addResponse("sessionlog.log", request_header_world, response_header_world) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'header.*', - 'proxy.config.http.auth_server_session_private': 1, - 'proxy.config.http.server_session_sharing.pool': 'global', - 'proxy.config.http.server_session_sharing.match': 'both', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'header.*', + 'proxy.config.http.auth_server_session_private': 1, + 'proxy.config.http.server_session_sharing.pool': 'global', + 'proxy.config.http.server_session_sharing.match': 'both', + }) # In case we need this in the future, just remove the comments. # ts.Disk.logging_yaml.AddLines( @@ -78,8 +90,7 @@ # I have to force last one with close connection header, this is also reflected in the response ^. # if I do not do this, then the microserver will fail to close and when shutting down the process will # fail with -9. - 'curl -v -H\'Host: www.example.com\' -H\'Connection: close\' http://127.0.0.1:{0}/world' -) + 'curl -v -H\'Host: www.example.com\' -H\'Connection: close\' http://127.0.0.1:{0}/world') tr = Test.AddTestRun("Add connection close header when ssn-txn-count > 2") tr.Processes.Default.Command = curlRequest.format(ts.Variables.port) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_l_value.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_l_value.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_l_value.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_l_value.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -42,15 +42,9 @@ # The following rule adds X-First and X-Last headers ts.Setup.CopyAs('rules/rule_l_value.conf', Test.RunDirectory) -ts.Disk.plugin_config.AddLine( - 'header_rewrite.so {0}/rule_l_value.conf'.format(Test.RunDirectory) -) -ts.Disk.remap_config.AddLine( - 'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - 'map http://www.example.com:8080 http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.plugin_config.AddLine('header_rewrite.so {0}/rule_l_value.conf'.format(Test.RunDirectory)) +ts.Disk.remap_config.AddLine('map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts.Disk.remap_config.AddLine('map http://www.example.com:8080 http://127.0.0.1:{0}'.format(server.Variables.Port)) # [L] test tr = Test.AddTestRun("Header Rewrite End [L]") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_url.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_url.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_url.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/header_rewrite/header_rewrite_url.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -45,24 +45,20 @@ # This configuration makes use of CLIENT-URL in conditions. ts.Disk.remap_config.AddLine( 'map http://www.example.com/from_path/ https://127.0.0.1:{0}/to_path/ ' - '@plugin=header_rewrite.so @pparam={1}/rule_client.conf'.format( - server.Variables.Port, Test.RunDirectory)) + '@plugin=header_rewrite.so @pparam={1}/rule_client.conf'.format(server.Variables.Port, Test.RunDirectory)) ts.Disk.remap_config.AddLine( 'map http://www.example.com:8080/from_path/ https://127.0.0.1:{0}/to_path/ ' - '@plugin=header_rewrite.so @pparam={1}/rule_client.conf'.format( - server.Variables.Port, Test.RunDirectory)) + '@plugin=header_rewrite.so @pparam={1}/rule_client.conf'.format(server.Variables.Port, Test.RunDirectory)) # This configuration makes use of TO-URL in a set-redirect operator. ts.Disk.remap_config.AddLine( 'map http://no_path.com http://no_path.com?name=brian/ ' - '@plugin=header_rewrite.so @pparam={0}/set_redirect.conf'.format( - Test.RunDirectory)) + '@plugin=header_rewrite.so @pparam={0}/set_redirect.conf'.format(Test.RunDirectory)) # Test CLIENT-URL. tr = Test.AddTestRun() tr.Processes.Default.Command = ( 'curl --proxy 127.0.0.1:{0} "http://www.example.com/from_path/hello?=foo=bar" ' - '-H "Proxy-Connection: keep-alive" --verbose'.format( - ts.Variables.port)) + '-H "Proxy-Connection: keep-alive" --verbose'.format(ts.Variables.port)) tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port)) tr.Processes.Default.StartBefore(Test.Processes.ts) @@ -72,8 +68,7 @@ # Test TO-URL in a set-redirect operator. tr = Test.AddTestRun() -tr.Processes.Default.Command = 'curl --head 127.0.0.1:{0} -H "Host: no_path.com" --verbose'.format( - ts.Variables.port) +tr.Processes.Default.Command = 'curl --head 127.0.0.1:{0} -H "Host: no_path.com" --verbose'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.stderr = "gold/set-redirect.gold" tr.StillRunningAfter = server diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/lua/lua_debug_tags.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/lua/lua_debug_tags.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/lua/lua_debug_tags.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/lua/lua_debug_tags.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,17 +22,13 @@ Test lua is_debug_tag_set functionality ''' -Test.SkipUnless( - Condition.PluginExists('tslua.so'), -) +Test.SkipUnless(Condition.PluginExists('tslua.so'),) Test.ContinueOnFail = False # Define default ATS ts = Test.MakeATSProcess("ts", command="traffic_manager") -ts.Disk.remap_config.AddLine( - 'map http://test http://127.0.0.1/ @plugin=tslua.so @pparam=tags.lua' -) +ts.Disk.remap_config.AddLine('map http://test http://127.0.0.1/ @plugin=tslua.so @pparam=tags.lua') # Configure the tslua's configuration file. ts.Setup.Copy("tags.lua", ts.Variables.CONFIGDIR) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/lua/lua_header_table.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/lua/lua_header_table.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/lua/lua_header_table.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/lua/lua_header_table.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -20,17 +20,13 @@ Test lua header table functionality ''' -Test.SkipUnless( - Condition.PluginExists('tslua.so'), -) +Test.SkipUnless(Condition.PluginExists('tslua.so'),) Test.ContinueOnFail = True # Define default ATS ts = Test.MakeATSProcess("ts") -ts.Disk.remap_config.AddLine( - f"map / http://127.0.0.1 @plugin=tslua.so @pparam=header_table.lua" -) +ts.Disk.remap_config.AddLine(f"map / http://127.0.0.1 @plugin=tslua.so @pparam=header_table.lua") # Configure the tslua's configuration file. ts.Setup.Copy("header_table.lua", ts.Variables.CONFIGDIR) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/lua/lua_states_stats.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/lua/lua_states_stats.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/lua/lua_states_stats.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/lua/lua_states_stats.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -20,9 +20,7 @@ Test lua states and stats functionality ''' -Test.SkipUnless( - Condition.PluginExists('tslua.so'), -) +Test.SkipUnless(Condition.PluginExists('tslua.so'),) Test.ContinueOnFail = True # Define default ATS @@ -38,27 +36,27 @@ Test.Setup.Copy("lifecycle_stats.sh") # test to ensure origin server works -request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} -response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} # add response to the server dictionary server.addResponse("sessionfile.log", request_header, response_header) -ts.Disk.remap_config.AddLines({ - 'map / http://127.0.0.1:{}/'.format(server.Variables.Port), - 'map http://hello http://127.0.0.1:{}/'.format(server.Variables.Port) + - ' @plugin=tslua.so @pparam={}/hello.lua'.format(Test.RunDirectory) -}) +ts.Disk.remap_config.AddLines( + { + 'map / http://127.0.0.1:{}/'.format(server.Variables.Port), + 'map http://hello http://127.0.0.1:{}/'.format(server.Variables.Port) + + ' @plugin=tslua.so @pparam={}/hello.lua'.format(Test.RunDirectory) + }) ts.Disk.plugin_config.AddLine('tslua.so {}/global.lua'.format(Test.RunDirectory)) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ts_lua', - 'proxy.config.plugin.lua.max_states': 4, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ts_lua', + 'proxy.config.plugin.lua.max_states': 4, + }) curl_and_args = 'curl -s -D /dev/stdout -o /dev/stderr -x localhost:{} '.format(ts.Variables.port) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/lua/lua_watermark.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/lua/lua_watermark.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/lua/lua_watermark.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/lua/lua_watermark.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,9 +22,7 @@ Test lua functionality ''' -Test.SkipUnless( - Condition.PluginExists('tslua.so'), -) +Test.SkipUnless(Condition.PluginExists('tslua.so'),) Test.ContinueOnFail = True # Define default ATS @@ -32,27 +30,19 @@ server = Test.MakeOriginServer("server") Test.testName = "" -request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} # expected response from the origin server -response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} # add response to the server dictionary server.addResponse("sessionfile.log", request_header, response_header) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{}/'.format(server.Variables.Port) + - ' @plugin=tslua.so @pparam=watermark.lua' -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{}/'.format(server.Variables.Port) + ' @plugin=tslua.so @pparam=watermark.lua') # Configure the tslua's configuration file. ts.Setup.Copy("watermark.lua", ts.Variables.CONFIGDIR) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ts_lua' -}) +ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'ts_lua'}) # Test for watermark debug output ts.Disk.traffic_out.Content = Testers.ContainsExpression(r"WMbytes\(31337\)", "Upstream watermark should be properly set") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/money_trace/money_trace.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/money_trace/money_trace.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/money_trace/money_trace.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/money_trace/money_trace.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -14,15 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. import os + Test.Summary = ''' Test money_trace remap ''' # Test description: -Test.SkipUnless( - Condition.PluginExists('money_trace.so'), -) +Test.SkipUnless(Condition.PluginExists('money_trace.so'),) Test.ContinueOnFail = False Test.testName = "money_trace remap" @@ -32,41 +31,26 @@ # configure origin server server = Test.MakeOriginServer("server") -req_chk = {"headers": - "GET / HTTP/1.1\r\n" + "Host: origin\r\n" + "\r\n", - "timestamp": "1469733493.993", - "body": "" - } -res_chk = {"headers": - "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "\r\n", - "timestamp": "1469733493.993", - "body": "" - } +req_chk = {"headers": "GET / HTTP/1.1\r\n" + "Host: origin\r\n" + "\r\n", "timestamp": "1469733493.993", "body": ""} +res_chk = {"headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", req_chk, res_chk) -req_hdr = {"headers": - "GET /path HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "\r\n", - "timestamp": "1469733493.993", - "body": "" - } -res_hdr = {"headers": - "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "\r\n", - "timestamp": "1469733493.993", - "body": "" - } +req_hdr = {"headers": "GET /path HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "\r\n", "timestamp": "1469733493.993", "body": ""} +res_hdr = {"headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", req_hdr, res_hdr) -ts.Disk.remap_config.AddLines([ - f"map http://none/ http://127.0.0.1:{server.Variables.Port}", - f"map http://basic/ http://127.0.0.1:{server.Variables.Port} @plugin=money_trace.so", - f"map http://header/ http://127.0.0.1:{server.Variables.Port} @plugin=money_trace.so @pparam=--header=mt", - f"map http://pregen/ http://127.0.0.1:{server.Variables.Port} @plugin=money_trace.so @pparam=--pregen-header=@pregen", - f"map http://pgh/ http://127.0.0.1:{server.Variables.Port} @plugin=money_trace.so @pparam=--header=mt @pparam=--pregen-header=@pregen", - f"map http://create/ http://127.0.0.1:{server.Variables.Port} @plugin=money_trace.so @pparam=--create-if-none=true", - f"map http://cheader/ http://127.0.0.1:{server.Variables.Port} @plugin=money_trace.so @pparam=--create-if-none=true @pparam=--header=mt", - f"map http://cpregen/ http://127.0.0.1:{server.Variables.Port} @plugin=money_trace.so @pparam=--create-if-none=true @pparam=--pregen-header=@pregen", - f"map http://passthru/ http://127.0.0.1:{server.Variables.Port} @plugin=money_trace.so @pparam=--passthru=true", -]) +ts.Disk.remap_config.AddLines( + [ + f"map http://none/ http://127.0.0.1:{server.Variables.Port}", + f"map http://basic/ http://127.0.0.1:{server.Variables.Port} @plugin=money_trace.so", + f"map http://header/ http://127.0.0.1:{server.Variables.Port} @plugin=money_trace.so @pparam=--header=mt", + f"map http://pregen/ http://127.0.0.1:{server.Variables.Port} @plugin=money_trace.so @pparam=--pregen-header=@pregen", + f"map http://pgh/ http://127.0.0.1:{server.Variables.Port} @plugin=money_trace.so @pparam=--header=mt @pparam=--pregen-header=@pregen", + f"map http://create/ http://127.0.0.1:{server.Variables.Port} @plugin=money_trace.so @pparam=--create-if-none=true", + f"map http://cheader/ http://127.0.0.1:{server.Variables.Port} @plugin=money_trace.so @pparam=--create-if-none=true @pparam=--header=mt", + f"map http://cpregen/ http://127.0.0.1:{server.Variables.Port} @plugin=money_trace.so @pparam=--create-if-none=true @pparam=--pregen-header=@pregen", + f"map http://passthru/ http://127.0.0.1:{server.Variables.Port} @plugin=money_trace.so @pparam=--passthru=true", + ]) # minimal configuration ts.Disk.records_config.update({ @@ -83,11 +67,9 @@ logs: - filename: remap format: custom -'''.split("\n") -) +'''.split("\n")) -Test.Disk.File(os.path.join(ts.Variables.LOGDIR, 'remap.log'), - exists=True, content='gold/remap-log.gold') +Test.Disk.File(os.path.join(ts.Variables.LOGDIR, 'remap.log'), exists=True, content='gold/remap-log.gold') curl_and_args = f"curl -s -D /dev/stdout -o /dev/stderr -x 127.0.0.1:{ts.Variables.port}" @@ -220,6 +202,4 @@ tr = Test.AddTestRun() ps = tr.Processes.Default ps.Command = ( - os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + - os.path.join(ts.Variables.LOGDIR, 'remap.log') -) + os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + os.path.join(ts.Variables.LOGDIR, 'remap.log')) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/money_trace/money_trace_global.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/money_trace/money_trace_global.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/money_trace/money_trace_global.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/money_trace/money_trace_global.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -15,6 +15,7 @@ # limitations under the License. import os + Test.Summary = ''' Test money_trace global ''' @@ -23,8 +24,7 @@ Test.SkipUnless( # Condition.PluginExists('xdebug.so'), - Condition.PluginExists('money_trace.so'), -) + Condition.PluginExists('money_trace.so'),) Test.ContinueOnFail = False Test.testName = "money_trace global" @@ -34,28 +34,12 @@ # configure origin server server = Test.MakeOriginServer("server") -req_chk = {"headers": - "GET / HTTP/1.1\r\n" + "Host: origin\r\n" + "\r\n", - "timestamp": "1469733493.993", - "body": "" - } -res_chk = {"headers": - "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "\r\n", - "timestamp": "1469733493.993", - "body": "" - } +req_chk = {"headers": "GET / HTTP/1.1\r\n" + "Host: origin\r\n" + "\r\n", "timestamp": "1469733493.993", "body": ""} +res_chk = {"headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", req_chk, res_chk) -req_hdr = {"headers": - "GET /path HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "\r\n", - "timestamp": "1469733493.993", - "body": "" - } -res_hdr = {"headers": - "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "\r\n", - "timestamp": "1469733493.993", - "body": "" - } +req_hdr = {"headers": "GET /path HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "\r\n", "timestamp": "1469733493.993", "body": ""} +res_hdr = {"headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", req_hdr, res_hdr) ts.Disk.remap_config.AddLines([ @@ -80,11 +64,9 @@ logs: - filename: global format: custom -'''.split("\n") -) +'''.split("\n")) -Test.Disk.File(os.path.join(ts.Variables.LOGDIR, 'global.log'), - exists=True, content='gold/global-log.gold') +Test.Disk.File(os.path.join(ts.Variables.LOGDIR, 'global.log'), exists=True, content='gold/global-log.gold') curl_and_args = f"curl -s -D /dev/stdout -o /dev/stderr -x 127.0.0.1:{ts.Variables.port}" # -H 'X-Debug: Probe' " @@ -120,7 +102,5 @@ tr = Test.AddTestRun() ps = tr.Processes.Default ps.Command = ( - os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + - os.path.join(ts.Variables.LOGDIR, 'global.log') -) + os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + os.path.join(ts.Variables.LOGDIR, 'global.log')) #ps.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/multiplexer/multiplexer.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/multiplexer/multiplexer.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/multiplexer/multiplexer.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/multiplexer/multiplexer.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,9 +22,7 @@ Test the Multiplexer plugin. ''' -Test.SkipUnless( - Condition.PluginExists('multiplexer.so') -) +Test.SkipUnless(Condition.PluginExists('multiplexer.so')) class MultiplexerTestBase: @@ -46,91 +44,74 @@ def setupServers(self): counter = MultiplexerTestBase.server_counter MultiplexerTestBase.server_counter += 1 - self.server_origin = Test.MakeVerifierServerProcess( - f"server_origin_{counter}", self.replay_file) - self.server_http = Test.MakeVerifierServerProcess( - f"server_http_{counter}", self.multiplexed_host_replay_file) - self.server_https = Test.MakeVerifierServerProcess( - f"server_https_{counter}", self.multiplexed_host_replay_file) + self.server_origin = Test.MakeVerifierServerProcess(f"server_origin_{counter}", self.replay_file) + self.server_http = Test.MakeVerifierServerProcess(f"server_http_{counter}", self.multiplexed_host_replay_file) + self.server_https = Test.MakeVerifierServerProcess(f"server_https_{counter}", self.multiplexed_host_replay_file) # The origin should never receive "X-Multiplexer: copy" self.server_origin.Streams.All += Testers.ExcludesExpression( - 'X-Multiplexer: copy', - 'Verify the original server target never receives a "copy".') + 'X-Multiplexer: copy', 'Verify the original server target never receives a "copy".') # Nor should the multiplexed hosts receive an "original" X-Multiplexer value. self.server_http.Streams.All += Testers.ExcludesExpression( - 'X-Multiplexer: original', - 'Verify the HTTP multiplexed host does not receive an "original".') + 'X-Multiplexer: original', 'Verify the HTTP multiplexed host does not receive an "original".') self.server_https.Streams.All += Testers.ExcludesExpression( - 'X-Multiplexer: original', - 'Verify the HTTPS multiplexed host does not receive an "original".') + 'X-Multiplexer: original', 'Verify the HTTPS multiplexed host does not receive an "original".') # In addition, the original server should always receive the POST and # PUT requests. self.server_origin.Streams.All += Testers.ContainsExpression( - 'uuid: POST', - "Verify the client's original target received the POST transaction.") + 'uuid: POST', "Verify the client's original target received the POST transaction.") self.server_origin.Streams.All += Testers.ContainsExpression( - 'uuid: PUT', - "Verify the client's original target received the PUT transaction.") + 'uuid: PUT', "Verify the client's original target received the PUT transaction.") # Under all configurations, the GET request should be multiplexed. self.server_origin.Streams.All += Testers.ContainsExpression( - 'X-Multiplexer: original', - 'Verify the client\'s original target received the "original" request.') + 'X-Multiplexer: original', 'Verify the client\'s original target received the "original" request.') self.server_origin.Streams.All += Testers.ContainsExpression( - 'uuid: GET', - "Verify the client's original target received the GET request.") + 'uuid: GET', "Verify the client's original target received the GET request.") self.server_http.Streams.All += Testers.ContainsExpression( - 'X-Multiplexer: copy', - 'Verify the HTTP server received a "copy" of the request.') - self.server_http.Streams.All += Testers.ContainsExpression( - 'uuid: GET', - "Verify the HTTP server received the GET request.") + 'X-Multiplexer: copy', 'Verify the HTTP server received a "copy" of the request.') + self.server_http.Streams.All += Testers.ContainsExpression('uuid: GET', "Verify the HTTP server received the GET request.") self.server_https.Streams.All += Testers.ContainsExpression( - 'X-Multiplexer: copy', - 'Verify the HTTPS server received a "copy" of the request.') + 'X-Multiplexer: copy', 'Verify the HTTPS server received a "copy" of the request.') self.server_https.Streams.All += Testers.ContainsExpression( - 'uuid: GET', - "Verify the HTTPS server received the GET request.") + 'uuid: GET', "Verify the HTTPS server received the GET request.") # Verify that the HTTPS server receives a TLS connection. self.server_https.Streams.All += Testers.ContainsExpression( - 'Finished accept using TLSSession', - "Verify the HTTPS was indeed used by the HTTPS server.") + 'Finished accept using TLSSession', "Verify the HTTPS was indeed used by the HTTPS server.") def setupTS(self, skip_post): counter = MultiplexerTestBase.ts_counter MultiplexerTestBase.ts_counter += 1 self.ts = Test.MakeATSProcess(f"ts_{counter}", enable_tls=True, enable_cache=False) self.ts.addDefaultSSLFiles() - self.ts.Disk.records_config.update({ - "proxy.config.ssl.server.cert.path": f'{self.ts.Variables.SSLDir}', - "proxy.config.ssl.server.private_key.path": f'{self.ts.Variables.SSLDir}', - "proxy.config.ssl.client.verify.server.policy": 'PERMISSIVE', - - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'multiplexer', - }) - self.ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' - ) + self.ts.Disk.records_config.update( + { + "proxy.config.ssl.server.cert.path": f'{self.ts.Variables.SSLDir}', + "proxy.config.ssl.server.private_key.path": f'{self.ts.Variables.SSLDir}', + "proxy.config.ssl.client.verify.server.policy": 'PERMISSIVE', + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'multiplexer', + }) + self.ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') skip_remap_param = '' if skip_post: skip_remap_param = ' @pparam=proxy.config.multiplexer.skip_post_put=1' - self.ts.Disk.remap_config.AddLines([ - f'map https://origin.server.com https://127.0.0.1:{self.server_origin.Variables.https_port} ' - f'@plugin=multiplexer.so @pparam=nontls.server.com @pparam=tls.server.com' - f'{skip_remap_param}', - - # Now create remap entries for the multiplexed hosts: one that - # verifies HTTP, and another that verifies HTTPS. - f'map http://nontls.server.com http://127.0.0.1:{self.server_http.Variables.http_port}', - f'map http://tls.server.com https://127.0.0.1:{self.server_https.Variables.https_port}', - ]) + self.ts.Disk.remap_config.AddLines( + [ + f'map https://origin.server.com https://127.0.0.1:{self.server_origin.Variables.https_port} ' + f'@plugin=multiplexer.so @pparam=nontls.server.com @pparam=tls.server.com' + f'{skip_remap_param}', + + # Now create remap entries for the multiplexed hosts: one that + # verifies HTTP, and another that verifies HTTPS. + f'map http://nontls.server.com http://127.0.0.1:{self.server_http.Variables.http_port}', + f'map http://tls.server.com https://127.0.0.1:{self.server_https.Variables.https_port}', + ]) def run(self): tr = Test.AddTestRun() @@ -141,10 +122,7 @@ counter = MultiplexerTestBase.client_counter MultiplexerTestBase.client_counter += 1 - tr.AddVerifierClientProcess( - f"client_{counter}", - self.replay_file, - https_ports=[self.ts.Variables.ssl_port]) + tr.AddVerifierClientProcess(f"client_{counter}", self.replay_file, https_ports=[self.ts.Variables.ssl_port]) class MultiplexerTest(MultiplexerTestBase): @@ -156,10 +134,7 @@ multiplexed_host_replay_file = os.path.join("replays", "multiplexer_copy.replay.yaml") def __init__(self): - super().__init__( - MultiplexerTest.replay_file, - MultiplexerTest.multiplexed_host_replay_file, - skip_post=False) + super().__init__(MultiplexerTest.replay_file, MultiplexerTest.multiplexed_host_replay_file, skip_post=False) def setupServers(self): super().setupServers() @@ -167,19 +142,14 @@ # Both of the multiplexed hosts should receive the POST because skip_post # is disabled. self.server_http.Streams.All += Testers.ContainsExpression( - 'uuid: POST', - "Verify the HTTP server received the POST request.") + 'uuid: POST', "Verify the HTTP server received the POST request.") self.server_https.Streams.All += Testers.ContainsExpression( - 'uuid: POST', - "Verify the HTTPS server received the POST request.") + 'uuid: POST', "Verify the HTTPS server received the POST request.") # Same with PUT - self.server_http.Streams.All += Testers.ContainsExpression( - 'uuid: PUT', - "Verify the HTTP server received the PUT request.") + self.server_http.Streams.All += Testers.ContainsExpression('uuid: PUT', "Verify the HTTP server received the PUT request.") self.server_https.Streams.All += Testers.ContainsExpression( - 'uuid: PUT', - "Verify the HTTPS server received the PUT request.") + 'uuid: PUT', "Verify the HTTPS server received the PUT request.") class MultiplexerSkipPostTest(MultiplexerTestBase): @@ -191,10 +161,7 @@ multiplexed_host_replay_file = os.path.join("replays", "multiplexer_copy_skip_post.replay.yaml") def __init__(self): - super().__init__( - MultiplexerSkipPostTest.replay_file, - MultiplexerSkipPostTest.multiplexed_host_replay_file, - skip_post=True) + super().__init__(MultiplexerSkipPostTest.replay_file, MultiplexerSkipPostTest.multiplexed_host_replay_file, skip_post=True) def setupServers(self): super().setupServers() @@ -202,19 +169,15 @@ # Neither of the multiplexed hosts should receive the POST because skip_post # is enabled. self.server_http.Streams.All += Testers.ExcludesExpression( - 'uuid: POST', - "Verify the HTTP server did not receive the POST request.") + 'uuid: POST', "Verify the HTTP server did not receive the POST request.") self.server_https.Streams.All += Testers.ExcludesExpression( - 'uuid: POST', - "Verify the HTTPS server did not receive the POST request.") + 'uuid: POST', "Verify the HTTPS server did not receive the POST request.") # Same with PUT. self.server_http.Streams.All += Testers.ExcludesExpression( - 'uuid: PUT', - "Verify the HTTP server did not receive the PUT request.") + 'uuid: PUT', "Verify the HTTP server did not receive the PUT request.") self.server_https.Streams.All += Testers.ExcludesExpression( - 'uuid: PUT', - "Verify the HTTPS server did not receive the PUT request.") + 'uuid: PUT', "Verify the HTTPS server did not receive the PUT request.") MultiplexerTest().run() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/parent_select/parent_select.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/parent_select/parent_select.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/parent_select/parent_select.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/parent_select/parent_select.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -20,20 +20,17 @@ Basic parent_select plugin test ''' -Test.SkipUnless( - Condition.PluginExists('parent_select.so'), -) +Test.SkipUnless(Condition.PluginExists('parent_select.so'),) Test.ContinueOnFail = False # Define and populate MicroServer. # server = Test.MakeOriginServer("server") response_header = { - "headers": - "HTTP/1.1 200 OK\r\n" - "Connection: close\r\n" - "Cache-control: max-age=85000\r\n" - "\r\n", + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "Cache-control: max-age=85000\r\n" + "\r\n", "timestamp": "1469733493.993", "body": "This is the body.\n" } @@ -57,30 +54,30 @@ ts_nh = [] for i in range(num_nh): ts = Test.MakeATSProcess(f"ts_nh{i}", use_traffic_out=False, command=f"traffic_server 2>nh_trace{i}.log") - ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", - 'proxy.config.dns.resolv_conf': "NULL", - }) - ts.Disk.remap_config.AddLine( - f"map / http://127.0.0.1:{server.Variables.Port}" - ) + ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", + 'proxy.config.dns.resolv_conf': "NULL", + }) + ts.Disk.remap_config.AddLine(f"map / http://127.0.0.1:{server.Variables.Port}") ts_nh.append(ts) ts = Test.MakeATSProcess("ts") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. - 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. - 'proxy.config.http.cache.http': 0, - 'proxy.config.http.uncacheable_requests_bypass_parent': 0, - 'proxy.config.http.no_dns_just_forward_to_parent': 1, - 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, - 'proxy.config.http.parent_proxy.self_detect': 0, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. + 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. + 'proxy.config.http.cache.http': 0, + 'proxy.config.http.uncacheable_requests_bypass_parent': 0, + 'proxy.config.http.no_dns_just_forward_to_parent': 1, + 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, + 'proxy.config.http.parent_proxy.self_detect': 0, + }) ts.Disk.File(ts.Variables.CONFIGDIR + "/strategies.yaml", id="strategies", typename="ats:config") s = ts.Disk.strategies @@ -95,17 +92,11 @@ # The health check URL does not seem to be used currently. # s.AddLine(f" health_check_url: http://next_hop{i}:{ts_nh[i].Variables.port}") s.AddLine(f" weight: 1.0") -s.AddLines([ - "strategies:", - " - strategy: the-strategy", - " policy: consistent_hash", - " hash_key: path", - " go_direct: false", - " parent_is_proxy: true", - " ignore_self_detect: true", - " groups:", - " - *g1", - " scheme: http"]) +s.AddLines( + [ + "strategies:", " - strategy: the-strategy", " policy: consistent_hash", " hash_key: path", " go_direct: false", + " parent_is_proxy: true", " ignore_self_detect: true", " groups:", " - *g1", " scheme: http" + ]) # Fallover not currently tested. # @@ -119,8 +110,7 @@ # " - passive"]) ts.Disk.remap_config.AddLine( - "map http://dummy.com http://not_used @plugin=parent_select.so @pparam=" + - ts.Variables.CONFIGDIR + + "map http://dummy.com http://not_used @plugin=parent_select.so @pparam=" + ts.Variables.CONFIGDIR + "/strategies.yaml @pparam=the-strategy") tr = Test.AddTestRun() @@ -134,9 +124,7 @@ for i in range(num_objects): tr = Test.AddTestRun() - tr.Processes.Default.Command = ( - f'curl --verbose --proxy 127.0.0.1:{ts.Variables.port} http://dummy.com/obj{i}' - ) + tr.Processes.Default.Command = (f'curl --verbose --proxy 127.0.0.1:{ts.Variables.port} http://dummy.com/obj{i}') tr.Processes.Default.Streams.stdout = "body.gold" tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/parent_select/parent_select_optional_scheme_matching.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/parent_select/parent_select_optional_scheme_matching.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/parent_select/parent_select_optional_scheme_matching.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/parent_select/parent_select_optional_scheme_matching.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -20,20 +20,17 @@ Basic parent_select plugin test ''' -Test.SkipUnless( - Condition.PluginExists('parent_select.so'), -) +Test.SkipUnless(Condition.PluginExists('parent_select.so'),) Test.ContinueOnFail = False # Define and populate MicroServer. # server = Test.MakeOriginServer("server") response_header = { - "headers": - "HTTP/1.1 200 OK\r\n" - "Connection: close\r\n" - "Cache-control: max-age=85000\r\n" - "\r\n", + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "Cache-control: max-age=85000\r\n" + "\r\n", "timestamp": "1469733493.993", "body": "This is the body.\n" } @@ -57,30 +54,30 @@ ts_nh = [] for i in range(num_nh): ts = Test.MakeATSProcess(f"ts_nh{i}", use_traffic_out=False, command=f"traffic_server 2>nh_trace{i}.log") - ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", - 'proxy.config.dns.resolv_conf': "NULL", - }) - ts.Disk.remap_config.AddLine( - f"map / http://127.0.0.1:{server.Variables.Port}" - ) + ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", + 'proxy.config.dns.resolv_conf': "NULL", + }) + ts.Disk.remap_config.AddLine(f"map / http://127.0.0.1:{server.Variables.Port}") ts_nh.append(ts) ts = Test.MakeATSProcess("ts") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. - 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. - 'proxy.config.http.cache.http': 0, - 'proxy.config.http.uncacheable_requests_bypass_parent': 0, - 'proxy.config.http.no_dns_just_forward_to_parent': 1, - 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, - 'proxy.config.http.parent_proxy.self_detect': 0, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. + 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. + 'proxy.config.http.cache.http': 0, + 'proxy.config.http.uncacheable_requests_bypass_parent': 0, + 'proxy.config.http.no_dns_just_forward_to_parent': 1, + 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, + 'proxy.config.http.parent_proxy.self_detect': 0, + }) ts.Disk.File(ts.Variables.CONFIGDIR + "/strategies.yaml", id="strategies", typename="ats:config") s = ts.Disk.strategies @@ -94,20 +91,14 @@ # The health check URL does not seem to be used currently. # s.AddLine(f" health_check_url: http://next_hop{i}:{ts_nh[i].Variables.port}") s.AddLine(f" weight: 1.0") -s.AddLines([ - "strategies:", - " - strategy: the-strategy", - " policy: consistent_hash", - " hash_key: path", - " go_direct: false", - " parent_is_proxy: true", - " ignore_self_detect: true", - " groups:", - " - *g1"]) +s.AddLines( + [ + "strategies:", " - strategy: the-strategy", " policy: consistent_hash", " hash_key: path", " go_direct: false", + " parent_is_proxy: true", " ignore_self_detect: true", " groups:", " - *g1" + ]) ts.Disk.remap_config.AddLine( - "map http://dummy.com http://not_used @plugin=parent_select.so @pparam=" + - ts.Variables.CONFIGDIR + + "map http://dummy.com http://not_used @plugin=parent_select.so @pparam=" + ts.Variables.CONFIGDIR + "/strategies.yaml @pparam=the-strategy") tr = Test.AddTestRun() @@ -121,9 +112,7 @@ for i in range(num_objects): tr = Test.AddTestRun() - tr.Processes.Default.Command = ( - f'curl --verbose --proxy 127.0.0.1:{ts.Variables.port} http://dummy.com/obj{i}' - ) + tr.Processes.Default.Command = (f'curl --verbose --proxy 127.0.0.1:{ts.Variables.port} http://dummy.com/obj{i}') tr.Processes.Default.Streams.stdout = "body.gold" tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/parent_select/parent_select_peer.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/parent_select/parent_select_peer.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/parent_select/parent_select_peer.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/parent_select/parent_select_peer.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -27,11 +27,10 @@ # server = Test.MakeOriginServer("server") response_header = { - "headers": - "HTTP/1.1 200 OK\r\n" - "Connection: close\r\n" - "Cache-control: max-age=85000\r\n" - "\r\n", + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "Cache-control: max-age=85000\r\n" + "\r\n", "timestamp": "1469733493.993", "body": "This is the body.\n" } @@ -56,15 +55,14 @@ for i in range(num_upstream): ts = Test.MakeATSProcess(f"ts_upstream{i}") dns.addRecords(records={f"ts_upstream{i}": ["127.0.0.1"]}) - ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", - 'proxy.config.dns.resolv_conf': "NULL", - }) - ts.Disk.remap_config.AddLine( - f"map / http://127.0.0.1:{server.Variables.Port}" - ) + ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", + 'proxy.config.dns.resolv_conf': "NULL", + }) + ts.Disk.remap_config.AddLine(f"map / http://127.0.0.1:{server.Variables.Port}") ts_upstream.append(ts) # Define peer trafficserver instances. @@ -78,18 +76,19 @@ ts = ts_peer[i] dns.addRecords(records={f"ts_peer{i}": ["127.0.0.1"]}) - ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb|cachekey|pparent_select', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. - 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. - 'proxy.config.http.cache.http': 1, - 'proxy.config.http.cache.required_headers': 0, - 'proxy.config.http.uncacheable_requests_bypass_parent': 0, - 'proxy.config.http.no_dns_just_forward_to_parent': 1, - 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, - 'proxy.config.http.parent_proxy.self_detect': 1, - }) + ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb|cachekey|pparent_select', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. + 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. + 'proxy.config.http.cache.http': 1, + 'proxy.config.http.cache.required_headers': 0, + 'proxy.config.http.uncacheable_requests_bypass_parent': 0, + 'proxy.config.http.no_dns_just_forward_to_parent': 1, + 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, + 'proxy.config.http.parent_proxy.self_detect': 1, + }) ts.Disk.File(ts.Variables.CONFIGDIR + "/strategies.yaml", id="strategies", typename="ats:config") s = ts.Disk.strategies @@ -112,34 +111,36 @@ # The health check URL does not seem to be used currently. # s.AddLine(f" health_check_url: http://ts_upstream{j}:{ts_upstream[j].Variables.port}") s.AddLine(f" weight: 1.0") - s.AddLines([ - "strategies:", - " - strategy: the-strategy", - " policy: consistent_hash", - " hash_key: cache_key", - " go_direct: false", - " parent_is_proxy: true", - " cache_peer_result: false", - " ignore_self_detect: false", - " groups:", - " - *peer_group", - " - *peer_upstream", - " scheme: http", - " failover:", - " ring_mode: peering_ring", - f" self: ts_peer{i}", - #" max_simple_retries: 2", - #" response_codes:", - #" - 404", - #" health_check:", - #" - passive", - ]) + s.AddLines( + [ + "strategies:", + " - strategy: the-strategy", + " policy: consistent_hash", + " hash_key: cache_key", + " go_direct: false", + " parent_is_proxy: true", + " cache_peer_result: false", + " ignore_self_detect: false", + " groups:", + " - *peer_group", + " - *peer_upstream", + " scheme: http", + " failover:", + " ring_mode: peering_ring", + f" self: ts_peer{i}", + #" max_simple_retries: 2", + #" response_codes:", + #" - 404", + #" health_check:", + #" - passive", + ]) suffix = f" @plugin=parent_select.so @pparam={ts.Variables.CONFIGDIR}/strategies.yaml @pparam=the-strategy @plugin=cachekey.so @pparam=--uri-type=remap @pparam=--capture-prefix=/(.*):(.*)/$1/" - ts.Disk.remap_config.AddLines([ - "map http://dummy.com http://not_used" + suffix, - "map http://not_used http://also_not_used" + suffix, - ]) + ts.Disk.remap_config.AddLines( + [ + "map http://dummy.com http://not_used" + suffix, + "map http://not_used http://also_not_used" + suffix, + ]) tr = Test.AddTestRun() tr.Processes.Default.StartBefore(server) @@ -154,8 +155,7 @@ for i in range(num_object): tr = Test.AddTestRun() tr.Processes.Default.Command = ( - f'curl --verbose --proxy 127.0.0.1:{ts_peer[i % num_peer].Variables.port} http://dummy.com/obj{i}' - ) + f'curl --verbose --proxy 127.0.0.1:{ts_peer[i % num_peer].Variables.port} http://dummy.com/obj{i}') tr.Processes.Default.Streams.stdout = "peer.body.gold" tr.Processes.Default.ReturnCode = 0 @@ -163,15 +163,13 @@ tr = Test.AddTestRun() # num_peer must not be a multiple of 3 tr.Processes.Default.Command = ( - f'curl --verbose --proxy 127.0.0.1:{ts_peer[(i * 3) % num_peer].Variables.port} http://dummy.com/obj{i}' - ) + f'curl --verbose --proxy 127.0.0.1:{ts_peer[(i * 3) % num_peer].Variables.port} http://dummy.com/obj{i}') tr.Processes.Default.Streams.stdout = "peer.body.gold" tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() tr.Processes.Default.Command = ( "grep -e '^+++' -e '^[A-Z].*TTP/' -e '^.alts. --' -e 'PARENT_SPECIFIED' trace_peer*.log" - " | sed 's/^.*(pparent_select) [^ ]* //' | sed 's/[.][0-9]*$$//'" -) + " | sed 's/^.*(pparent_select) [^ ]* //' | sed 's/[.][0-9]*$$//'") tr.Processes.Default.Streams.stdout = "peer.trace.gold" tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/parent_select/parent_select_peer2.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/parent_select/parent_select_peer2.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/parent_select/parent_select_peer2.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/parent_select/parent_select_peer2.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -27,11 +27,10 @@ # server = Test.MakeOriginServer("server") response_header = { - "headers": - "HTTP/1.1 200 OK\r\n" - "Connection: close\r\n" - "Cache-control: max-age=85000\r\n" - "\r\n", + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "Cache-control: max-age=85000\r\n" + "\r\n", "timestamp": "1469733493.993", "body": "This is the body.\n" } @@ -56,15 +55,14 @@ for i in range(num_upstream): ts = Test.MakeATSProcess(f"ts_upstream{i}") dns.addRecords(records={f"ts_upstream{i}": ["127.0.0.1"]}) - ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", - 'proxy.config.dns.resolv_conf': "NULL", - }) - ts.Disk.remap_config.AddLine( - f"map / http://127.0.0.1:{server.Variables.Port}" - ) + ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", + 'proxy.config.dns.resolv_conf': "NULL", + }) + ts.Disk.remap_config.AddLine(f"map / http://127.0.0.1:{server.Variables.Port}") ts_upstream.append(ts) # Define peer trafficserver instances. @@ -78,18 +76,19 @@ ts = ts_peer[i] dns.addRecords(records={f"ts_peer{i}": ["127.0.0.1"]}) - ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb|pparent_select', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. - 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. - 'proxy.config.http.cache.http': 1, - 'proxy.config.http.cache.required_headers': 0, - 'proxy.config.http.uncacheable_requests_bypass_parent': 0, - 'proxy.config.http.no_dns_just_forward_to_parent': 0, - 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, - 'proxy.config.http.parent_proxy.self_detect': 1, - }) + ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|parent|next_hop|host_statuses|hostdb|pparent_select', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", # Only nameservers if resolv_conf NULL. + 'proxy.config.dns.resolv_conf': "NULL", # This defaults to /etc/resvolv.conf (OS namesevers) if not NULL. + 'proxy.config.http.cache.http': 1, + 'proxy.config.http.cache.required_headers': 0, + 'proxy.config.http.uncacheable_requests_bypass_parent': 0, + 'proxy.config.http.no_dns_just_forward_to_parent': 0, + 'proxy.config.http.parent_proxy.mark_down_hostdb': 0, + 'proxy.config.http.parent_proxy.self_detect': 1, + }) ts.Disk.File(ts.Variables.CONFIGDIR + "/strategies.yaml", id="strategies", typename="ats:config") s = ts.Disk.strategies @@ -103,33 +102,33 @@ # The health check URL does not seem to be used currently. # s.AddLine(f" health_check_url: http://ts_peer{j}:{ts_peer[j].Variables.port}") s.AddLine(f" weight: 1.0") - s.AddLines([ - "strategies:", - " - strategy: the-strategy", - " policy: consistent_hash", - " hash_key: path", - " go_direct: true", - " parent_is_proxy: true", - " cache_peer_result: false", - " ignore_self_detect: false", - " groups:", - " - *peer_group", - " scheme: http", - " failover:", - " ring_mode: peering_ring", - f" self: ts_peer{i}", - #" max_simple_retries: 2", - #" response_codes:", - #" - 404", - #" health_check:", - #" - passive", - ]) + s.AddLines( + [ + "strategies:", + " - strategy: the-strategy", + " policy: consistent_hash", + " hash_key: path", + " go_direct: true", + " parent_is_proxy: true", + " cache_peer_result: false", + " ignore_self_detect: false", + " groups:", + " - *peer_group", + " scheme: http", + " failover:", + " ring_mode: peering_ring", + f" self: ts_peer{i}", + #" max_simple_retries: 2", + #" response_codes:", + #" - 404", + #" health_check:", + #" - passive", + ]) for i in range(num_upstream): prefix = f"http://ts_upstream{i}:{ts_upstream[i].Variables.port}/" ts.Disk.remap_config.AddLine( - f"map {prefix} {prefix} @plugin=parent_select.so @pparam=" + - ts.Variables.CONFIGDIR + + f"map {prefix} {prefix} @plugin=parent_select.so @pparam=" + ts.Variables.CONFIGDIR + "/strategies.yaml @pparam=the-strategy") tr = Test.AddTestRun() @@ -166,7 +165,6 @@ tr = Test.AddTestRun() tr.Processes.Default.Command = ( "grep -e '^+++' -e '^[A-Z].*TTP/' -e '^.alts. --' -e 'PARENT_SPECIFIED' trace_peer*.log" - " | sed 's/^.*(pparent_select) [^ ]* //' | sed 's/[.][0-9]*$$//' " + normalize_ports -) + " | sed 's/^.*(pparent_select) [^ ]* //' | sed 's/[.][0-9]*$$//' " + normalize_ports) tr.Processes.Default.Streams.stdout = "peer2.trace.gold" tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/prefetch_simple/prefetch_simple.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/prefetch_simple/prefetch_simple.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/prefetch_simple/prefetch_simple.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/prefetch_simple/prefetch_simple.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -27,15 +27,14 @@ f"GET /texts/demo-{i + 1}.txt HTTP/1.1\r\n" "Host: does.not.matter\r\n" # But cannot be omitted. "\r\n", - "timestamp": "1469733493.993", - "body": "" + "timestamp": "1469733493.993", + "body": "" } response_header = { - "headers": - "HTTP/1.1 200 OK\r\n" - "Connection: close\r\n" - "Cache-control: max-age=85000\r\n" - "\r\n", + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "Cache-control: max-age=85000\r\n" + "\r\n", "timestamp": "1469733493.993", "body": f"This is the body for demo-{i + 1}.txt.\n" } @@ -44,21 +43,17 @@ dns = Test.MakeDNServer("dns") ts = Test.MakeATSProcess("ts", use_traffic_out=False, command="traffic_server 2> trace.log") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|prefetch', - 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", - 'proxy.config.dns.resolv_conf': "NULL", -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|prefetch', + 'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}", + 'proxy.config.dns.resolv_conf': "NULL", + }) ts.Disk.remap_config.AddLine( - f"map http://domain.in http://127.0.0.1:{server.Variables.Port}" + - " @plugin=cachekey.so @pparam=--remove-all-params=true" - " @plugin=prefetch.so" + - " @pparam=--front=true" + - " @pparam=--fetch-policy=simple" + - r" @pparam=--fetch-path-pattern=/(.*-)(\d+)(.*)/$1{$2+1}$3/" + - " @pparam=--fetch-count=3" -) + f"map http://domain.in http://127.0.0.1:{server.Variables.Port}" + " @plugin=cachekey.so @pparam=--remove-all-params=true" + " @plugin=prefetch.so" + " @pparam=--front=true" + " @pparam=--fetch-policy=simple" + + r" @pparam=--fetch-path-pattern=/(.*-)(\d+)(.*)/$1{$2+1}$3/" + " @pparam=--fetch-count=3") tr = Test.AddTestRun() tr.Processes.Default.StartBefore(server) @@ -68,14 +63,10 @@ tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = ( - f'curl --verbose --proxy 127.0.0.1:{ts.Variables.port} http://domain.in/texts/demo-1.txt' -) +tr.Processes.Default.Command = (f'curl --verbose --proxy 127.0.0.1:{ts.Variables.port} http://domain.in/texts/demo-1.txt') tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = ( - "grep 'GET http://domain.in' trace.log" -) +tr.Processes.Default.Command = ("grep 'GET http://domain.in' trace.log") tr.Streams.stdout = "prefetch_simple.gold" tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/regex_remap/regex_remap.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/regex_remap/regex_remap.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/regex_remap/regex_remap.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/regex_remap/regex_remap.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ import os import time import json + Test.Summary = ''' Test regex_remap ''' @@ -31,9 +32,7 @@ # If the rule disappears from regex_revalidate.conf its still loaded!! # A rule's expiry can't be changed after the fact! -Test.SkipUnless( - Condition.PluginExists('regex_remap.so'), -) +Test.SkipUnless(Condition.PluginExists('regex_remap.so'),) Test.ContinueOnFail = False # configure origin server @@ -56,36 +55,37 @@ regex_remap2_conf_path = os.path.join(ts.Variables.CONFIGDIR, 'regex_remap2.conf') curl_and_args = 'curl -s -D - -v --proxy localhost:{} '.format(ts.Variables.port) -ts.Disk.File(regex_remap_conf_path, typename="ats:config").AddLines([ - "# regex_remap configuration\n" - "^/alpha/bravo/[?]((?!action=(newsfeed|calendar|contacts|notepad)).)*$ https://redirect.com/ @status=301\n" -]) - -ts.Disk.File(regex_remap2_conf_path, typename="ats:config").AddLines([ - "# 2nd regex_remap configuration\n" - "^/alpha/bravo/[?]((?!action=(newsfeed|calendar|contacts|notepad)).)*$ " + - f"http://localhost:{server.Variables.Port}\n" -]) +ts.Disk.File( + regex_remap_conf_path, typename="ats:config").AddLines( + [ + "# regex_remap configuration\n" + "^/alpha/bravo/[?]((?!action=(newsfeed|calendar|contacts|notepad)).)*$ https://redirect.com/ @status=301\n" + ]) + +ts.Disk.File( + regex_remap2_conf_path, typename="ats:config").AddLines( + [ + "# 2nd regex_remap configuration\n" + "^/alpha/bravo/[?]((?!action=(newsfeed|calendar|contacts|notepad)).)*$ " + f"http://localhost:{server.Variables.Port}\n" + ]) ts.Disk.remap_config.AddLine( - "map http://example.one/ http://localhost:{}/ @plugin=regex_remap.so @pparam=regex_remap.conf\n".format(server.Variables.Port) -) + "map http://example.one/ http://localhost:{}/ @plugin=regex_remap.so @pparam=regex_remap.conf\n".format(server.Variables.Port)) ts.Disk.remap_config.AddLine( "map http://example.two/ http://localhost:{}/ ".format(server.Variables.Port) + - "@plugin=regex_remap.so @pparam=regex_remap.conf @pparam=pristine\n" -) + "@plugin=regex_remap.so @pparam=regex_remap.conf @pparam=pristine\n") ts.Disk.remap_config.AddLine( "map http://example.three/ http://wrong.com/ ".format(server.Variables.Port) + - "@plugin=regex_remap.so @pparam=regex_remap2.conf @pparam=pristine\n" -) + "@plugin=regex_remap.so @pparam=regex_remap2.conf @pparam=pristine\n") # minimal configuration -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|regex_remap', - 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", - 'proxy.config.dns.resolv_conf': 'NULL' -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|regex_remap', + 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", + 'proxy.config.dns.resolv_conf': 'NULL' + }) # 0 Test - Load cache (miss) (path1) tr = Test.AddTestRun("smoke test") @@ -101,10 +101,8 @@ # 1 Test - Match and redirect tr = Test.AddTestRun("pristine test") tr.Processes.Default.Command = ( - curl_and_args + - "'http://example.two/alpha/bravo/?action=newsfed;param0001=00003E;param0002=00004E;param0003=00005E'" + - f" | grep -e '^HTTP/' -e '^Location' | sed 's/{server.Variables.Port}/SERVER_PORT/'" -) + curl_and_args + "'http://example.two/alpha/bravo/?action=newsfed;param0001=00003E;param0002=00004E;param0003=00005E'" + + f" | grep -e '^HTTP/' -e '^Location' | sed 's/{server.Variables.Port}/SERVER_PORT/'") tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.stdout = "gold/regex_remap_redirect.gold" tr.StillRunningAfter = ts @@ -114,8 +112,7 @@ tr.Processes.Default.Command = ( curl_and_args + '--header "uuid: {}" '.format(creq["headers"]["fields"][1][1]) + " 'http://example.three/alpha/bravo/?action=newsfed;param0001=00003E;param0002=00004E;param0003=00005E'" + - " | grep -e '^HTTP/' -e '^Content-Length'" -) + " | grep -e '^HTTP/' -e '^Content-Length'") tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.stdout = "gold/regex_remap_simple.gold" tr.StillRunningAfter = ts diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/regex_revalidate/regex_revalidate.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/regex_revalidate/regex_revalidate.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/regex_revalidate/regex_revalidate.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/regex_revalidate/regex_revalidate.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -18,6 +18,7 @@ import os import time + Test.Summary = ''' Test a basic regex_revalidate ''' @@ -32,10 +33,7 @@ # If the rule disappears from regex_revalidate.conf its still loaded!! # A rule's expiry can't be changed after the fact! -Test.SkipUnless( - Condition.PluginExists('regex_revalidate.so'), - Condition.PluginExists('xdebug.so') -) +Test.SkipUnless(Condition.PluginExists('regex_revalidate.so'), Condition.PluginExists('xdebug.so')) Test.ContinueOnFail = False # configure origin server @@ -48,76 +46,56 @@ Test.Setup.Copy("metrics.sh") # default root -request_header_0 = {"headers": - "GET / HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header_0 = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - "Cache-Control: max-age=300\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "xxx", - } +request_header_0 = { + "headers": "GET / HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header_0 = { + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "Cache-Control: max-age=300\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "xxx", +} # cache item path1 -request_header_1 = {"headers": - "GET /path1 HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } -response_header_1 = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - 'Etag: "path1"\r\n' + - "Cache-Control: max-age=600,public\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "abc" - } +request_header_1 = { + "headers": "GET /path1 HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} +response_header_1 = { + "headers": + "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + 'Etag: "path1"\r\n' + "Cache-Control: max-age=600,public\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "abc" +} # cache item path1a -request_header_2 = {"headers": - "GET /path1a HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } -response_header_2 = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - 'Etag: "path1a"\r\n' + - "Cache-Control: max-age=600,public\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "cde" - } +request_header_2 = { + "headers": "GET /path1a HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} +response_header_2 = { + "headers": + "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + 'Etag: "path1a"\r\n' + "Cache-Control: max-age=600,public\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "cde" +} # cache item path2a -request_header_3 = {"headers": - "GET /path2a HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } -response_header_3 = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - 'Etag: "path2a"\r\n' + - "Cache-Control: max-age=900,public\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "efg" - } +request_header_3 = { + "headers": "GET /path2a HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} +response_header_3 = { + "headers": + "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + 'Etag: "path2a"\r\n' + "Cache-Control: max-age=900,public\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "efg" +} server.addResponse("sessionlog.json", request_header_0, response_header_0) server.addResponse("sessionlog.json", request_header_1, response_header_1) @@ -126,9 +104,7 @@ # Configure ATS server ts.Disk.plugin_config.AddLine('xdebug.so') -ts.Disk.plugin_config.AddLine( - 'regex_revalidate.so -d -c regex_revalidate.conf' -) +ts.Disk.plugin_config.AddLine('regex_revalidate.so -d -c regex_revalidate.conf') regex_revalidate_conf_path = os.path.join(ts.Variables.CONFIGDIR, 'regex_revalidate.conf') curl_and_args = 'curl -s -D - -v -H "x-debug: x-cache" -H "Host: www.example.com"' @@ -136,22 +112,22 @@ path1_rule = 'path1 {}\n'.format(int(time.time()) + 600) # Define first revision for when trafficserver starts -ts.Disk.File(regex_revalidate_conf_path, typename="ats:config").AddLines([ - "# Empty\n" -]) - -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{}'.format(server.Variables.Port) -) +ts.Disk.File( + regex_revalidate_conf_path, typename="ats:config").AddLines([ + "# Empty\n", + ]) + +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{}'.format(server.Variables.Port)) # minimal configuration -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|regex_revalidate', - # 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.http.insert_age_in_response': 0, - 'proxy.config.http.response_via_str': 3, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|regex_revalidate', + # 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.http.insert_age_in_response': 0, + 'proxy.config.http.response_via_str': 3, + }) # 0 Test - Load cache (miss) (path1) tr = Test.AddTestRun("Cache miss path1") @@ -189,9 +165,10 @@ # the old is greater than the granularity of the time stamp used. (The config file write # happens after the delay.) tr.DelayStart = 1 -tr.Disk.File(regex_revalidate_conf_path, typename="ats:config").AddLines([ - path1_rule -]) +tr.Disk.File( + regex_revalidate_conf_path, typename="ats:config").AddLines([ + path1_rule, + ]) tr.StillRunningAfter = ts tr.StillRunningAfter = server tr.Processes.Default.Command = 'traffic_ctl config reload' @@ -222,10 +199,11 @@ # the old is greater than the granularity of the time stamp used. (The config file write # happens after the delay.) tr.DelayStart = 1 -tr.Disk.File(regex_revalidate_conf_path, typename="ats:config").AddLines([ - path1_rule, - 'path2 {}\n'.format(int(time.time()) + 700) -]) +tr.Disk.File( + regex_revalidate_conf_path, typename="ats:config").AddLines([ + path1_rule, + 'path2 {}\n'.format(int(time.time()) + 700), + ]) tr.StillRunningAfter = ts tr.StillRunningAfter = server tr.Processes.Default.Command = 'traffic_ctl config reload' @@ -259,10 +237,11 @@ # the old is greater than the granularity of the time stamp used. (The config file write # happens after the delay.) tr.DelayStart = 1 -tr.Disk.File(regex_revalidate_conf_path, typename="ats:config").AddLines([ - path1_rule, - 'path2 {}\n'.format(int(time.time()) - 100), -]) +tr.Disk.File( + regex_revalidate_conf_path, typename="ats:config").AddLines([ + path1_rule, + 'path2 {}\n'.format(int(time.time()) - 100), + ]) tr.StillRunningAfter = ts tr.StillRunningAfter = server tr.Processes.Default.Command = 'traffic_ctl config reload' diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/regex_revalidate/regex_revalidate_miss.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/regex_revalidate/regex_revalidate_miss.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/regex_revalidate/regex_revalidate_miss.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/regex_revalidate/regex_revalidate_miss.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -18,6 +18,7 @@ import os import time + Test.Summary = ''' regex_revalidate plugin test, MISS (refetch) functionality ''' @@ -26,10 +27,7 @@ # If MISS tag encountered, should load rule as refetch instead of IMS. # If rule switched from MISS to IMS or vice versa, rule should reset. -Test.SkipUnless( - Condition.PluginExists('regex_revalidate.so'), - Condition.PluginExists('xdebug.so') -) +Test.SkipUnless(Condition.PluginExists('regex_revalidate.so'), Condition.PluginExists('xdebug.so')) Test.ContinueOnFail = False # configure origin server @@ -42,50 +40,37 @@ Test.Setup.Copy("metrics_miss.sh") # default root -request_header_0 = {"headers": - "GET / HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header_0 = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - "Cache-Control: max-age=300\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "xxx", - } +request_header_0 = { + "headers": "GET / HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header_0 = { + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "Cache-Control: max-age=300\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "xxx", +} # cache item path1 -request_header_1 = {"headers": - "GET /path1 HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } -response_header_1 = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - 'Etag: "path1"\r\n' + - "Cache-Control: max-age=600,public\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "abc" - } - +request_header_1 = { + "headers": "GET /path1 HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" +} +response_header_1 = { + "headers": + "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + 'Etag: "path1"\r\n' + "Cache-Control: max-age=600,public\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "abc" +} server.addResponse("sessionlog.json", request_header_0, response_header_0) server.addResponse("sessionlog.json", request_header_1, response_header_1) # Configure ATS server ts.Disk.plugin_config.AddLine('xdebug.so') -ts.Disk.plugin_config.AddLine( - 'regex_revalidate.so -d -c regex_revalidate.conf -l revalidate.log' -) +ts.Disk.plugin_config.AddLine('regex_revalidate.so -d -c regex_revalidate.conf -l revalidate.log') regex_revalidate_conf_path = os.path.join(ts.Variables.CONFIGDIR, 'regex_revalidate.conf') #curl_and_args = 'curl -s -D - -v -H "x-debug: x-cache" -H "Host: www.example.com"' @@ -93,23 +78,20 @@ path1_rule = 'path1 {}'.format(int(time.time()) + 600) # Define first revision for when trafficserver starts -ts.Disk.File(regex_revalidate_conf_path, typename="ats:config").AddLine( - "# Empty" -) - -ts.Disk.remap_config.AddLine( - 'map http://ats/ http://127.0.0.1:{}'.format(server.Variables.Port) -) +ts.Disk.File(regex_revalidate_conf_path, typename="ats:config").AddLine("# Empty") + +ts.Disk.remap_config.AddLine('map http://ats/ http://127.0.0.1:{}'.format(server.Variables.Port)) # minimal configuration -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'regex_revalidate', - 'proxy.config.http.insert_age_in_response': 0, - 'proxy.config.http.response_via_str': 3, - 'proxy.config.http.cache.http': 1, - 'proxy.config.http.wait_for_cache': 1, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'regex_revalidate', + 'proxy.config.http.insert_age_in_response': 0, + 'proxy.config.http.response_via_str': 3, + 'proxy.config.http.cache.http': 1, + 'proxy.config.http.wait_for_cache': 1, + }) curl_and_args = 'curl -s -D /dev/stdout -o /dev/stderr -x http://127.0.0.1:{}'.format(ts.Variables.port) + ' -H "x-debug: x-cache"' diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/regex_revalidate/regex_revalidate_state.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/regex_revalidate/regex_revalidate_state.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/regex_revalidate/regex_revalidate_state.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/regex_revalidate/regex_revalidate_state.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -18,6 +18,7 @@ import os import time + Test.Summary = ''' regex_revalidate plugin test, reload epoch state on ats start ''' @@ -26,9 +27,7 @@ # Ensures that that the regex revalidate config file is loaded, # then epoch times from the state file are properly merged. -Test.SkipUnless( - Condition.PluginExists('regex_revalidate.so') -) +Test.SkipUnless(Condition.PluginExists('regex_revalidate.so')) Test.ContinueOnFail = False # configure origin server @@ -41,22 +40,17 @@ testName = "regex_revalidate_state" # default root -request_header_0 = {"headers": - "GET / HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header_0 = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - "Cache-Control: max-age=300\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "xxx", - } +request_header_0 = { + "headers": "GET / HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header_0 = { + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "Cache-Control: max-age=300\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "xxx", +} server.addResponse("sessionlog.json", request_header_0, response_header_0) @@ -64,9 +58,7 @@ reval_state_path = os.path.join(Test.Variables.RUNTIMEDIR, 'reval.state') # Configure ATS server -ts.Disk.plugin_config.AddLine( - f"regex_revalidate.so -d -c reval.conf -l reval.log -f {reval_state_path}" -) +ts.Disk.plugin_config.AddLine(f"regex_revalidate.so -d -c reval.conf -l reval.log -f {reval_state_path}") sep = ' ' @@ -84,10 +76,12 @@ # Create gold files gold_path_good = reval_state_path + ".good" -ts.Disk.File(gold_path_good, typename="ats:config").AddLines([ - sep.join([path0_regex, "``", path0_expiry, path0_type]), - sep.join([path1_regex, path1_epoch, path1_expiry, path1_type]), -]) +ts.Disk.File( + gold_path_good, typename="ats:config").AddLines( + [ + sep.join([path0_regex, "``", path0_expiry, path0_type]), + sep.join([path1_regex, path1_epoch, path1_expiry, path1_type]), + ]) # It seems there's no API for negative gold file matching ''' @@ -99,29 +93,31 @@ ''' # Create a state file, second line will be discarded and not merged -ts.Disk.File(reval_state_path, typename="ats:config").AddLines([ - sep.join([path1_regex, path1_epoch, path1_expiry, path1_type]), - sep.join(["dummy", path1_epoch, path1_expiry, path1_type]), -]) +ts.Disk.File( + reval_state_path, typename="ats:config").AddLines( + [ + sep.join([path1_regex, path1_epoch, path1_expiry, path1_type]), + sep.join(["dummy", path1_epoch, path1_expiry, path1_type]), + ]) # Write out reval.conf file -ts.Disk.File(reval_conf_path, typename="ats:config").AddLines([ - path0_rule, path1_rule, -]) +ts.Disk.File( + reval_conf_path, typename="ats:config").AddLines([ + path0_rule, + path1_rule, + ]) ts.chownForATSProcess(reval_state_path) -ts.Disk.remap_config.AddLine( - f"map http://ats/ http://127.0.0.1:{server.Variables.Port}" -) +ts.Disk.remap_config.AddLine(f"map http://ats/ http://127.0.0.1:{server.Variables.Port}") # minimal configuration -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'regex_revalidate', - 'proxy.config.http.wait_for_cache': 1, -}) - +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'regex_revalidate', + 'proxy.config.http.wait_for_cache': 1, + }) # This TestRun creates the state file so it exists when the ts process's Setup # logic is run so that it can be chowned at that point. diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/remap_stats/remap_stats.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/remap_stats/remap_stats.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/remap_stats/remap_stats.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/remap_stats/remap_stats.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,28 +24,23 @@ Test.Setup.Copy("metrics.sh") -request_header = { - "headers": "GET /argh HTTP/1.1\r\nHost: one\r\n\r\n", "timestamp": "1469733493.993", "body": ""} -response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET /argh HTTP/1.1\r\nHost: one\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) ts = Test.MakeATSProcess("ts", command="traffic_manager", select_ports=True) ts.Disk.plugin_config.AddLine('remap_stats.so') -ts.Disk.remap_config.AddLine( - "map http://one http://127.0.0.1:{0}".format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - "map http://two http://127.0.0.1:{0}".format(server.Variables.Port) -) - -ts.Disk.records_config.update({ - 'proxy.config.http.transaction_active_timeout_out': 2, - 'proxy.config.http.transaction_no_activity_timeout_out': 2, - 'proxy.config.http.connect_attempts_timeout': 2, -}) +ts.Disk.remap_config.AddLine("map http://one http://127.0.0.1:{0}".format(server.Variables.Port)) +ts.Disk.remap_config.AddLine("map http://two http://127.0.0.1:{0}".format(server.Variables.Port)) + +ts.Disk.records_config.update( + { + 'proxy.config.http.transaction_active_timeout_out': 2, + 'proxy.config.http.transaction_no_activity_timeout_out': 2, + 'proxy.config.http.connect_attempts_timeout': 2, + }) # 0 Test - Curl host One tr = Test.AddTestRun("curl host one") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/remap_stats/remap_stats_post.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/remap_stats/remap_stats_post.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/remap_stats/remap_stats_post.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/remap_stats/remap_stats_post.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,28 +24,23 @@ Test.Setup.Copy("metrics_post.sh") -request_header = { - "headers": "GET /argh HTTP/1.1\r\nHost: one\r\n\r\n", "timestamp": "1469733493.993", "body": ""} -response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET /argh HTTP/1.1\r\nHost: one\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) ts = Test.MakeATSProcess("ts", command="traffic_manager", select_ports=True) ts.Disk.plugin_config.AddLine('remap_stats.so --post-remap-host') -ts.Disk.remap_config.AddLine( - "map http://one http://127.0.0.1:{0}".format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - "map http://two http://127.0.0.1:{0}".format(server.Variables.Port) -) - -ts.Disk.records_config.update({ - 'proxy.config.http.transaction_active_timeout_out': 2, - 'proxy.config.http.transaction_no_activity_timeout_out': 2, - 'proxy.config.http.connect_attempts_timeout': 2, -}) +ts.Disk.remap_config.AddLine("map http://one http://127.0.0.1:{0}".format(server.Variables.Port)) +ts.Disk.remap_config.AddLine("map http://two http://127.0.0.1:{0}".format(server.Variables.Port)) + +ts.Disk.records_config.update( + { + 'proxy.config.http.transaction_active_timeout_out': 2, + 'proxy.config.http.transaction_no_activity_timeout_out': 2, + 'proxy.config.http.connect_attempts_timeout': 2, + }) # 0 Test - Curl host One tr = Test.AddTestRun("curl host one") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/s3_auth/s3_auth_config.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/s3_auth/s3_auth_config.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/s3_auth/s3_auth_config.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/s3_auth/s3_auth_config.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -26,16 +26,13 @@ request_header = { "headers": "GET /s3-bucket HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", - "x-amz-security-token": "hkMsi6/bfHyBKrSeM/H0hoXeyx8z1yZ/mJ0c+B/TqYx=tTJDjnQWtul38Z9iVJjeH1HB4VT2c=2o3yE3o=I9kmFs/lJDR85qWjB8e5asY/WbjyRpbAzmDipQpboIcYnUYg55bxrQFidV/q8gZa5A9MpR3n=op1C0lWjeBqcEJxpevNZxteSQTQfeGsi98Cdf+On=/SINVlKrNhMnmMsDOLMGx1YYt9d4UsRg1jtVrwxL4Vd/F7aHCZySAXKv+1rkhACR023wpa3dhp+xirGJxSO9LWwvcrTdM4xJo4RS8B40tGENOJ1NKixUJxwN/6og58Oft/u==uleR89Ja=7zszK2H7tX3DqmEYNvNDYQh/7VBRe5otghQtPwJzWpXAGk+Vme4hPPM5K6axH2LxipXzRiIV=oxNs0upKNu1FvuzbCQmkQdKQVmXl0344vngngrgN7wkEfrYtmKwICmpAS0cbW9jdSClgziVo4NaFc/hsIfok=4UA3hVtxIdw74lFNXD0RR7HKXkFPLIn85M7peOZsqMUCfO4gxr7KCfabszQQf0YcP/mt79XK50=WrSJG7oUyn+clUySPhlegqHAfT9a50uSK5WiQmOnGNGLF4wDO10sqKN1xRgQbYHPtwL+Ye0EMisvmYA3==kScorTSGaQWyibSWXAvxq9+IVGBYShVJ6S7DmTT=u/2d/fGEge+Xmbxlftza=cxJ=Md=k1Q71Lp6Boa56d7wtYRpK6tXHJ9I/2r7rN1E4OtwkFqb7SfWV3UXwyUrXyaaNPTIbqnAHnbgUGtuU6pgICpfREiIxVqvKBf6ErbxHRmMmAuYKxk5E9Mn6nnbxR4WTniweKYeDv2w39zge/tss+36Moeuio9d2eoyRFqXhq=rUGtDwX3fzXV0wV+dUojxOYQ57GQDl7+68PwHPcX794OIXuGOxBk83lNIYIcYz3Vc7qnGy6tFTz7f6S9+EZuSGN7TY5VKkT2eWye46DebrDF9Nwzs/FVpTzbPD/KGDIBtFIbazglhKoWe9txqb1QW8vFNNVOEhYa+cViO3g8ZmY1wG960US2zsnX5Eg8Q5a4h3+sxaJSJ4ONiXZWJuAgKRQzcrszu+M5C0ZVoCOv1goEgfNJeSm/yFc/3rx8wmeWLIJFtq65B7zF72HRKq1nthHAguaxXr20nguHpKkDpNBDVa=WwuJsbeGI", + "x-amz-security-token": + "hkMsi6/bfHyBKrSeM/H0hoXeyx8z1yZ/mJ0c+B/TqYx=tTJDjnQWtul38Z9iVJjeH1HB4VT2c=2o3yE3o=I9kmFs/lJDR85qWjB8e5asY/WbjyRpbAzmDipQpboIcYnUYg55bxrQFidV/q8gZa5A9MpR3n=op1C0lWjeBqcEJxpevNZxteSQTQfeGsi98Cdf+On=/SINVlKrNhMnmMsDOLMGx1YYt9d4UsRg1jtVrwxL4Vd/F7aHCZySAXKv+1rkhACR023wpa3dhp+xirGJxSO9LWwvcrTdM4xJo4RS8B40tGENOJ1NKixUJxwN/6og58Oft/u==uleR89Ja=7zszK2H7tX3DqmEYNvNDYQh/7VBRe5otghQtPwJzWpXAGk+Vme4hPPM5K6axH2LxipXzRiIV=oxNs0upKNu1FvuzbCQmkQdKQVmXl0344vngngrgN7wkEfrYtmKwICmpAS0cbW9jdSClgziVo4NaFc/hsIfok=4UA3hVtxIdw74lFNXD0RR7HKXkFPLIn85M7peOZsqMUCfO4gxr7KCfabszQQf0YcP/mt79XK50=WrSJG7oUyn+clUySPhlegqHAfT9a50uSK5WiQmOnGNGLF4wDO10sqKN1xRgQbYHPtwL+Ye0EMisvmYA3==kScorTSGaQWyibSWXAvxq9+IVGBYShVJ6S7DmTT=u/2d/fGEge+Xmbxlftza=cxJ=Md=k1Q71Lp6Boa56d7wtYRpK6tXHJ9I/2r7rN1E4OtwkFqb7SfWV3UXwyUrXyaaNPTIbqnAHnbgUGtuU6pgICpfREiIxVqvKBf6ErbxHRmMmAuYKxk5E9Mn6nnbxR4WTniweKYeDv2w39zge/tss+36Moeuio9d2eoyRFqXhq=rUGtDwX3fzXV0wV+dUojxOYQ57GQDl7+68PwHPcX794OIXuGOxBk83lNIYIcYz3Vc7qnGy6tFTz7f6S9+EZuSGN7TY5VKkT2eWye46DebrDF9Nwzs/FVpTzbPD/KGDIBtFIbazglhKoWe9txqb1QW8vFNNVOEhYa+cViO3g8ZmY1wG960US2zsnX5Eg8Q5a4h3+sxaJSJ4ONiXZWJuAgKRQzcrszu+M5C0ZVoCOv1goEgfNJeSm/yFc/3rx8wmeWLIJFtq65B7zF72HRKq1nthHAguaxXr20nguHpKkDpNBDVa=WwuJsbeGI", "body": "" } # desired response form the origin server -response_header = { - "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", - "body": "success!" -} +response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": "success!"} # add request/response server.addResponse("sessionlog.log", request_header, response_header) @@ -50,8 +47,7 @@ ts.Disk.remap_config.AddLine( f'map http://www.example.com http://127.0.0.1:{server.Variables.Port} \ @plugin=s3_auth.so \ - @pparam=--config @pparam={Test.RunDirectory}/v4-parse-test.test_input' -) + @pparam=--config @pparam={Test.RunDirectory}/v4-parse-test.test_input') # Test Case tr = Test.AddTestRun() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/server_push_preload/server_push_preload.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/server_push_preload/server_push_preload.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/server_push_preload/server_push_preload.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/server_push_preload/server_push_preload.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,8 +22,7 @@ Test.SkipUnless( Condition.PluginExists('server_push_preload.so'), - Condition.HasProgram("nghttp", - "Nghttp need to be installed on system for this test to work"), + Condition.HasProgram("nghttp", "Nghttp need to be installed on system for this test to work"), ) Test.testName = "server_push_preload" Test.ContinueOnFail = True @@ -34,22 +33,36 @@ microserver = Test.MakeOriginServer("microserver") # index.html -microserver.addResponse("sessionfile.log", - {"headers": "GET /index.html HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "body": ""}, - {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nLink: ; rel=preload; as=style; nopush\r\nLink: ; rel=preload; as=script\r\n\r\n", - "body": "\r\n\r\n\r\n\r\n\r\n\r\nServer Push Preload Test\r\n\r\n\r\n"}) +microserver.addResponse( + "sessionfile.log", { + "headers": "GET /index.html HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "body": "" + }, { + "headers": + "HTTP/1.1 200 OK\r\nConnection: close\r\nLink: ; rel=preload; as=style; nopush\r\nLink: ; rel=preload; as=script\r\n\r\n", + "body": + "\r\n\r\n\r\n\r\n\r\n\r\nServer Push Preload Test\r\n\r\n\r\n" + }) # /app/style.css -microserver.addResponse("sessionfile.log", - {"headers": "GET /app/style.css HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "body": ""}, - {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "body": "body { font-weight: bold; }\r\n"}) +microserver.addResponse( + "sessionfile.log", { + "headers": "GET /app/style.css HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "body": "" + }, { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", + "body": "body { font-weight: bold; }\r\n" + }) # /app/script.js -microserver.addResponse("sessionfile.log", - {"headers": "GET /app/script.js HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "body": ""}, - {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "body": "function do_nothing() { return; }\r\n"}) +microserver.addResponse( + "sessionfile.log", { + "headers": "GET /app/script.js HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "body": "" + }, { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", + "body": "function do_nothing() { return; }\r\n" + }) # ---- # Setup ATS @@ -58,22 +71,18 @@ ts.addDefaultSSLFiles() -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}/ @plugin=server_push_preload.so'.format( - microserver.Variables.Port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}/ @plugin=server_push_preload.so'.format(microserver.Variables.Port)) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http2|server_push_preload', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.http2.active_timeout_in': 3, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http2|server_push_preload', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.http2.active_timeout_in': 3, + }) # ---- # Test Cases @@ -81,11 +90,9 @@ # Test Case 0: Server Push by Link header tr = Test.AddTestRun() -tr.Processes.Default.Command = "nghttp -vs --no-dep 'https://127.0.0.1:{0}/index.html'".format( - ts.Variables.ssl_port) +tr.Processes.Default.Command = "nghttp -vs --no-dep 'https://127.0.0.1:{0}/index.html'".format(ts.Variables.ssl_port) tr.Processes.Default.ReturnCode = 0 -tr.Processes.Default.StartBefore( - microserver, ready=When.PortOpen(microserver.Variables.Port)) +tr.Processes.Default.StartBefore(microserver, ready=When.PortOpen(microserver.Variables.Port)) tr.Processes.Default.StartBefore(Test.Processes.ts) tr.Processes.Default.Streams.stdout = "gold/server_push_preload_0_stdout.gold" tr.StillRunningAfter = microserver diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/slice/slice.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/slice/slice.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/slice/slice.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/slice/slice.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -25,9 +25,7 @@ # Reload remap rule with slice plugin # Request content through the slice plugin -Test.SkipUnless( - Condition.PluginExists('slice.so'), -) +Test.SkipUnless(Condition.PluginExists('slice.so'),) Test.ContinueOnFail = False # configure origin server @@ -37,59 +35,49 @@ ts = Test.MakeATSProcess("ts", command="traffic_server") # default root -request_header_chk = {"headers": - "GET / HTTP/1.1\r\n" + - "Host: ats\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header_chk = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } +request_header_chk = { + "headers": "GET / HTTP/1.1\r\n" + "Host: ats\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header_chk = { + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} server.addResponse("sessionlog.json", request_header_chk, response_header_chk) block_bytes = 7 body = "lets go surfin now" -request_header = {"headers": - "GET /path HTTP/1.1\r\n" + - "Host: origin\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - 'Etag: "path"\r\n' + - "Cache-Control: max-age=500\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body, - } +request_header = { + "headers": "GET /path HTTP/1.1\r\n" + "Host: origin\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header = { + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + 'Etag: "path"\r\n' + "Cache-Control: max-age=500\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": body, +} server.addResponse("sessionlog.json", request_header, response_header) curl_and_args = 'curl -s -D /dev/stdout -o /dev/stderr -x http://127.0.0.1:{}'.format(ts.Variables.port) # set up whole asset fetch into cache -ts.Disk.remap_config.AddLines([ - f'map http://preload/ http://127.0.0.1:{server.Variables.Port}', - f'map http://slice_only/ http://127.0.0.1:{server.Variables.Port}', - f'map http://slice/ http://127.0.0.1:{server.Variables.Port}' + - f' @plugin=slice.so @pparam=--blockbytes-test={block_bytes}', - f'map http://slicehdr/ http://127.0.0.1:{server.Variables.Port}' + - f' @plugin=slice.so @pparam=--blockbytes-test={block_bytes}' + - ' @pparam=--skip-header=SkipSlice', -]) +ts.Disk.remap_config.AddLines( + [ + f'map http://preload/ http://127.0.0.1:{server.Variables.Port}', + f'map http://slice_only/ http://127.0.0.1:{server.Variables.Port}', + f'map http://slice/ http://127.0.0.1:{server.Variables.Port}' + + f' @plugin=slice.so @pparam=--blockbytes-test={block_bytes}', + f'map http://slicehdr/ http://127.0.0.1:{server.Variables.Port}' + + f' @plugin=slice.so @pparam=--blockbytes-test={block_bytes}' + ' @pparam=--skip-header=SkipSlice', + ]) ts.Disk.records_config.update({ 'proxy.config.diags.debug.enabled': 0, diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/slice/slice_error.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/slice/slice_error.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/slice/slice_error.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/slice/slice_error.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -25,9 +25,7 @@ # Reload remap rule with slice plugin # Request content through the slice plugin -Test.SkipUnless( - Condition.PluginExists('slice.so'), -) +Test.SkipUnless(Condition.PluginExists('slice.so'),) Test.ContinueOnFail = False # configure origin server @@ -39,22 +37,17 @@ body = "the quick brown fox" # len 19 # default root -request_header_chk = {"headers": - "GET / HTTP/1.1\r\n" + - "Host: ats\r\n" + - "Range: bytes=0-\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header_chk = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Connection: close\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body, - } +request_header_chk = { + "headers": "GET / HTTP/1.1\r\n" + "Host: ats\r\n" + "Range: bytes=0-\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header_chk = { + "headers": "HTTP/1.1 206 Partial Content\r\n" + "Connection: close\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": body, +} server.addResponse("sessionlog.json", request_header_chk, response_header_chk) @@ -68,217 +61,174 @@ # Mismatch etag -request_header_etag0 = {"headers": - "GET /etag HTTP/1.1\r\n" + - "Host: ats\r\n" + - "Range: bytes={}\r\n".format(range0) + - "X-Slicer-Info: full content request\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header_etag0 = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Connection: close\r\n" + - 'Etag: "etag0"\r\n' + - "Content-Range: bytes {}/{}\r\n".format(range0, len(body)) + - "Cache-Control: max-age=500\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body0, - } +request_header_etag0 = { + "headers": + "GET /etag HTTP/1.1\r\n" + "Host: ats\r\n" + "Range: bytes={}\r\n".format(range0) + + "X-Slicer-Info: full content request\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header_etag0 = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Connection: close\r\n" + 'Etag: "etag0"\r\n' + + "Content-Range: bytes {}/{}\r\n".format(range0, len(body)) + "Cache-Control: max-age=500\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": body0, +} server.addResponse("sessionlog.json", request_header_etag0, response_header_etag0) -request_header_etag1 = {"headers": - "GET /etag HTTP/1.1\r\n" + - "Host: ats\r\n" + - "Range: bytes={}\r\n".format(range1) + - "X-Slicer-Info: full content request\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header_etag1 = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Connection: close\r\n" + - 'Etag: "etag1"\r\n' + - "Content-Range: bytes {}/{}\r\n".format(range1, len(body)) + - "Cache-Control: max-age=500\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body1, - } +request_header_etag1 = { + "headers": + "GET /etag HTTP/1.1\r\n" + "Host: ats\r\n" + "Range: bytes={}\r\n".format(range1) + + "X-Slicer-Info: full content request\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header_etag1 = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Connection: close\r\n" + 'Etag: "etag1"\r\n' + + "Content-Range: bytes {}/{}\r\n".format(range1, len(body)) + "Cache-Control: max-age=500\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": body1, +} server.addResponse("sessionlog.json", request_header_etag1, response_header_etag1) # mismatch Last-Modified -request_header_lm0 = {"headers": - "GET /lastmodified HTTP/1.1\r\n" + - "Host: ats\r\n" + - "Range: bytes={}\r\n".format(range0) + - "X-Slicer-Info: full content request\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header_lm0 = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Connection: close\r\n" + - "Last-Modified: Tue, 08 May 2018 15:49:41 GMT\r\n" + - "Content-Range: bytes {}/{}\r\n".format(range0, len(body)) + - "Cache-Control: max-age=500\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body0, - } +request_header_lm0 = { + "headers": + "GET /lastmodified HTTP/1.1\r\n" + "Host: ats\r\n" + "Range: bytes={}\r\n".format(range0) + + "X-Slicer-Info: full content request\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header_lm0 = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Connection: close\r\n" + "Last-Modified: Tue, 08 May 2018 15:49:41 GMT\r\n" + + "Content-Range: bytes {}/{}\r\n".format(range0, len(body)) + "Cache-Control: max-age=500\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": body0, +} server.addResponse("sessionlog.json", request_header_lm0, response_header_lm0) -request_header_lm1 = {"headers": - "GET /lastmodified HTTP/1.1\r\n" + - "Host: ats\r\n" + - "Range: bytes={}\r\n".format(range1) + - "X-Slicer-Info: full content request\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header_lm1 = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Connection: close\r\n" + - "Last-Modified: Tue, 08 Apr 2019 18:00:00 GMT\r\n" + - "Content-Range: bytes {}/{}\r\n".format(range1, len(body)) + - "Cache-Control: max-age=500\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body1, - } +request_header_lm1 = { + "headers": + "GET /lastmodified HTTP/1.1\r\n" + "Host: ats\r\n" + "Range: bytes={}\r\n".format(range1) + + "X-Slicer-Info: full content request\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header_lm1 = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Connection: close\r\n" + "Last-Modified: Tue, 08 Apr 2019 18:00:00 GMT\r\n" + + "Content-Range: bytes {}/{}\r\n".format(range1, len(body)) + "Cache-Control: max-age=500\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": body1, +} server.addResponse("sessionlog.json", request_header_lm1, response_header_lm1) # non 206 slice block -request_header_n206_0 = {"headers": - "GET /non206 HTTP/1.1\r\n" + - "Host: ats\r\n" + - "Range: bytes={}\r\n".format(range0) + - "X-Slicer-Info: full content request\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header_n206_0 = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Connection: close\r\n" + - 'Etag: "etag"\r\n' + - "Last-Modified: Tue, 08 May 2018 15:49:41 GMT\r\n" + - "Content-Range: bytes {}/{}\r\n".format(range0, len(body)) + - "Cache-Control: max-age=500\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body0, - } +request_header_n206_0 = { + "headers": + "GET /non206 HTTP/1.1\r\n" + "Host: ats\r\n" + "Range: bytes={}\r\n".format(range0) + + "X-Slicer-Info: full content request\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header_n206_0 = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Connection: close\r\n" + 'Etag: "etag"\r\n' + + "Last-Modified: Tue, 08 May 2018 15:49:41 GMT\r\n" + "Content-Range: bytes {}/{}\r\n".format(range0, len(body)) + + "Cache-Control: max-age=500\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": body0, +} server.addResponse("sessionlog.json", request_header_n206_0, response_header_n206_0) -request_header_n206_1 = {"headers": - "GET /non206 HTTP/1.1\r\n" + - "Host: ats\r\n" + - "Range: bytes={}\r\n".format(range1) + - "X-Slicer-Info: full content request\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header_n206_1 = {"headers": - "HTTP/1.1 502 Bad Gateway\r\n" + - "Connection: close\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body1, - } +request_header_n206_1 = { + "headers": + "GET /non206 HTTP/1.1\r\n" + "Host: ats\r\n" + "Range: bytes={}\r\n".format(range1) + + "X-Slicer-Info: full content request\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header_n206_1 = { + "headers": "HTTP/1.1 502 Bad Gateway\r\n" + "Connection: close\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": body1, +} server.addResponse("sessionlog.json", request_header_n206_1, response_header_n206_1) # mismatch content-range -request_header_crr0 = {"headers": - "GET /crr HTTP/1.1\r\n" + - "Host: ats\r\n" + - "Range: bytes={}\r\n".format(range0) + - "X-Slicer-Info: full content request\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header_crr0 = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Connection: close\r\n" + - "Etag: crr\r\n" + - "Content-Range: bytes {}/{}\r\n".format(range0, len(body)) + - "Cache-Control: max-age=500\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body0, - } +request_header_crr0 = { + "headers": + "GET /crr HTTP/1.1\r\n" + "Host: ats\r\n" + "Range: bytes={}\r\n".format(range0) + + "X-Slicer-Info: full content request\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header_crr0 = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Connection: close\r\n" + "Etag: crr\r\n" + + "Content-Range: bytes {}/{}\r\n".format(range0, len(body)) + "Cache-Control: max-age=500\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": body0, +} server.addResponse("sessionlog.json", request_header_crr0, response_header_crr0) -request_header_crr1 = {"headers": - "GET /crr HTTP/1.1\r\n" + - "Host: ats\r\n" + - "Range: bytes={}\r\n".format(range1) + - "X-Slicer-Info: full content request\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header_crr1 = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Connection: close\r\n" + - "Etag: crr\r\n" + - "Content-Range: bytes {}/{}\r\n".format(range1, len(body) - 1) + - "Cache-Control: max-age=500\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body1, - } +request_header_crr1 = { + "headers": + "GET /crr HTTP/1.1\r\n" + "Host: ats\r\n" + "Range: bytes={}\r\n".format(range1) + + "X-Slicer-Info: full content request\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header_crr1 = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Connection: close\r\n" + "Etag: crr\r\n" + + "Content-Range: bytes {}/{}\r\n".format(range1, + len(body) - 1) + "Cache-Control: max-age=500\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": body1, +} server.addResponse("sessionlog.json", request_header_crr1, response_header_crr1) # 404 internal block -request_header_internal404_0 = {"headers": - "GET /internal404 HTTP/1.1\r\n" + - "Host: ats\r\n" + - "Range: bytes={}\r\n".format(range0) + - "X-Slicer-Info: full content request\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header_internal404_0 = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Connection: close\r\n" + - 'Etag: "etag"\r\n' + - "Last-Modified: Tue, 08 May 2018 15:49:41 GMT\r\n" + - "Content-Range: bytes {}/{}\r\n".format(range0, len(body)) + - "Cache-Control: max-age=500\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body0, - } +request_header_internal404_0 = { + "headers": + "GET /internal404 HTTP/1.1\r\n" + "Host: ats\r\n" + "Range: bytes={}\r\n".format(range0) + + "X-Slicer-Info: full content request\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header_internal404_0 = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Connection: close\r\n" + 'Etag: "etag"\r\n' + + "Last-Modified: Tue, 08 May 2018 15:49:41 GMT\r\n" + "Content-Range: bytes {}/{}\r\n".format(range0, len(body)) + + "Cache-Control: max-age=500\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": body0, +} server.addResponse("sessionlog.json", request_header_internal404_0, response_header_internal404_0) @@ -286,9 +236,7 @@ # set up whole asset fetch into cache ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{}'.format(server.Variables.Port) + - ' @plugin=slice.so @pparam=--blockbytes-test={}'.format(blockbytes) -) + 'map / http://127.0.0.1:{}'.format(server.Variables.Port) + ' @plugin=slice.so @pparam=--blockbytes-test={}'.format(blockbytes)) # minimal configuration ts.Disk.records_config.update({ @@ -300,8 +248,8 @@ # taken from the slice plug code ts.Disk.diags_log.Content = Testers.ContainsExpression('reason="Mismatch block Etag', "Mismatch block etag") ts.Disk.diags_log.Content += Testers.ContainsExpression('reason="Mismatch block Last-Modified', "Mismatch block Last-Modified") -ts.Disk.diags_log.Content += Testers.ContainsExpression('reason="Mismatch/Bad block Content-Range', - "Mismatch/Bad block Content-Range") +ts.Disk.diags_log.Content += Testers.ContainsExpression( + 'reason="Mismatch/Bad block Content-Range', "Mismatch/Bad block Content-Range") ts.Disk.diags_log.Content += Testers.ContainsExpression('reason="404 internal block response', "404 internal block response") # 0 Test - Etag mismatch test diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/slice/slice_prefetch.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/slice/slice_prefetch.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/slice/slice_prefetch.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/slice/slice_prefetch.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -43,22 +43,17 @@ body = "lets go surfin now" bodylen = len(body) -request_header = {"headers": - "GET /path HTTP/1.1\r\n" + - "Host: origin\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - "Cache-Control: public, max-age=5\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body, - } +request_header = { + "headers": "GET /path HTTP/1.1\r\n" + "Host: origin\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header = { + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "Cache-Control: public, max-age=5\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": body, +} server.addResponse("sessionlog.json", request_header, response_header) @@ -67,56 +62,52 @@ for i in range(bodylen // block_bytes + 1): b0 = i * block_bytes b1 = b0 + block_bytes - 1 - req_header = {"headers": - "GET /path HTTP/1.1\r\n" + - "Host: *\r\n" + - "Accept: */*\r\n" + - f"Range: bytes={b0}-{b1}\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "" - } + req_header = { + "headers": "GET /path HTTP/1.1\r\n" + "Host: *\r\n" + "Accept: */*\r\n" + f"Range: bytes={b0}-{b1}\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "" + } if (b1 > bodylen - 1): b1 = bodylen - 1 - resp_header = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - "Cache-Control: public, max-age=5\r\n" + - f"Content-Range: bytes {b0}-{b1}/{bodylen}\r\n" + - "Connection: close\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body[b0:b1 + 1] - } + resp_header = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + "Cache-Control: public, max-age=5\r\n" + + f"Content-Range: bytes {b0}-{b1}/{bodylen}\r\n" + "Connection: close\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": body[b0:b1 + 1] + } server.addResponse("sessionlog.json", req_header, resp_header) curl_and_args = 'curl -s -D /dev/stdout -o /dev/stderr -x http://127.0.0.1:{} -H "x-debug: x-cache"'.format(ts.Variables.port) -ts.Disk.remap_config.AddLines([ - f'map http://sliceprefetchbytes1/ http://127.0.0.1:{server.Variables.Port}' + - f' @plugin=slice.so @pparam=--blockbytes-test={block_bytes_1} @pparam=--prefetch-count=1 \\' + - ' @plugin=cache_range_requests.so', - f'map http://sliceprefetchbytes2/ http://127.0.0.1:{server.Variables.Port}' + - f' @plugin=slice.so @pparam=--blockbytes-test={block_bytes_2} @pparam=--prefetch-count=3 \\' + - ' @plugin=cache_range_requests.so', -]) +ts.Disk.remap_config.AddLines( + [ + f'map http://sliceprefetchbytes1/ http://127.0.0.1:{server.Variables.Port}' + + f' @plugin=slice.so @pparam=--blockbytes-test={block_bytes_1} @pparam=--prefetch-count=1 \\' + + ' @plugin=cache_range_requests.so', + f'map http://sliceprefetchbytes2/ http://127.0.0.1:{server.Variables.Port}' + + f' @plugin=slice.so @pparam=--blockbytes-test={block_bytes_2} @pparam=--prefetch-count=3 \\' + + ' @plugin=cache_range_requests.so', + ]) ts.Disk.plugin_config.AddLine('xdebug.so') -ts.Disk.logging_yaml.AddLines([ - 'logging:', - ' formats:', - ' - name: cache', - ' format: "%<{Content-Range}psh> %<{X-Cache}psh>"', - ' logs:', - ' - filename: cache', - ' format: cache', - ' mode: ascii', -]) - -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'slice|cache_range_requests|xdebug', -}) +ts.Disk.logging_yaml.AddLines( + [ + 'logging:', + ' formats:', + ' - name: cache', + ' format: "%<{Content-Range}psh> %<{X-Cache}psh>"', + ' logs:', + ' - filename: cache', + ' format: cache', + ' mode: ascii', + ]) + +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'slice|cache_range_requests|xdebug', + }) # 0 Test - Full object slice (miss) with only block 14-20 prefetched in background, block bytes= 7 tr = Test.AddTestRun("Full object slice: first block is miss, only block 14-20 prefetched") @@ -185,9 +176,6 @@ cache_file = os.path.join(ts.Variables.LOGDIR, 'cache.log') # Wait for log file to appear, then wait one extra second to make sure TS is done writing it. test_run = Test.AddTestRun("Checking debug logs for background fetches") -test_run.Processes.Default.Command = ( - os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + - cache_file -) +test_run.Processes.Default.Command = (os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + cache_file) ts.Disk.File(cache_file).Content = "gold/slice_prefetch.gold" test_run.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/slice/slice_purge.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/slice/slice_purge.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/slice/slice_purge.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/slice/slice_purge.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -43,30 +43,30 @@ self._ts = Test.MakeATSProcess(f"ts_{self._num}", enable_cache=True) if (self._ref_block): - self._ts.Disk.remap_config.AddLines([ - f"map /ref/block http://127.0.0.1:{self._server.Variables.http_port} \ + self._ts.Disk.remap_config.AddLines( + [ + f"map /ref/block http://127.0.0.1:{self._server.Variables.http_port} \ @plugin=slice.so @pparam=--blockbytes-test=10 \ @plugin=cache_range_requests.so", - ]) + ]) else: - self._ts.Disk.remap_config.AddLines([ - f"map /ref/block http://127.0.0.1:{self._server.Variables.http_port} \ + self._ts.Disk.remap_config.AddLines( + [ + f"map /ref/block http://127.0.0.1:{self._server.Variables.http_port} \ @plugin=slice.so @pparam=--blockbytes-test=10 @pparam=--ref-relative \ @plugin=cache_range_requests.so", - ]) + ]) - self._ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|slice|cache_range_requests', - }) + self._ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|slice|cache_range_requests', + }) def slice_purge(self): tr = Test.AddTestRun() - tr.AddVerifierClientProcess( - f"client_{self._num}", - SlicePurgeRequestTest.replay_file, - http_ports=[self._ts.Variables.port]) + tr.AddVerifierClientProcess(f"client_{self._num}", SlicePurgeRequestTest.replay_file, http_ports=[self._ts.Variables.port]) tr.Processes.Default.StartBefore(self._server) tr.Processes.Default.StartBefore(self._ts) @@ -74,10 +74,7 @@ def slice_purge_ref(self): tr = Test.AddTestRun() - tr.AddVerifierClientProcess( - "client_ref", - SlicePurgeRequestTest.replay_ref_file, - http_ports=[self._ts.Variables.port]) + tr.AddVerifierClientProcess("client_ref", SlicePurgeRequestTest.replay_ref_file, http_ports=[self._ts.Variables.port]) tr.Processes.Default.StartBefore(self._server) tr.Processes.Default.StartBefore(self._ts) @@ -85,10 +82,7 @@ def slice_purge_no_ref(self): tr = Test.AddTestRun() - tr.AddVerifierClientProcess( - "client_no_ref", - SlicePurgeRequestTest.replay_no_ref_file, - http_ports=[self._ts.Variables.port]) + tr.AddVerifierClientProcess("client_no_ref", SlicePurgeRequestTest.replay_no_ref_file, http_ports=[self._ts.Variables.port]) tr.Processes.Default.StartBefore(self._server) tr.Processes.Default.StartBefore(self._ts) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/slice/slice_regex.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/slice/slice_regex.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/slice/slice_regex.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/slice/slice_regex.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -25,9 +25,7 @@ # Reload remap rule with slice plugin # Request content through the slice plugin -Test.SkipUnless( - Condition.PluginExists('slice.so'), -) +Test.SkipUnless(Condition.PluginExists('slice.so'),) Test.ContinueOnFail = False # configure origin server @@ -37,67 +35,53 @@ ts = Test.MakeATSProcess("ts", command="traffic_server", enable_cache=False) # default root -request_header_chk = {"headers": - "GET / HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header_chk = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } +request_header_chk = { + "headers": "GET / HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header_chk = { + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} server.addResponse("sessionlog.json", request_header_chk, response_header_chk) body = "lets go surfin now" -request_header_txt = {"headers": - "GET /slice.txt HTTP/1.1\r\n" + - "Host: slice\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header_txt = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - 'Etag: "path"\r\n' + - "Cache-Control: max-age=500\r\n" + - "X-Info: notsliced\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body, - } +request_header_txt = { + "headers": "GET /slice.txt HTTP/1.1\r\n" + "Host: slice\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header_txt = { + "headers": + "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + 'Etag: "path"\r\n' + "Cache-Control: max-age=500\r\n" + + "X-Info: notsliced\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": body, +} server.addResponse("sessionlog.json", request_header_txt, response_header_txt) -request_header_mp4 = {"headers": - "GET /slice.mp4 HTTP/1.1\r\n" + - "Host: sliced\r\n" + - "Range: bytes=0-99\r\n" - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -response_header_mp4 = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Connection: close\r\n" + - 'Etag: "path"\r\n' + - "Content-Range: bytes 0-{}/{}\r\n".format(len(body) - 1, len(body)) + - "Cache-Control: max-age=500\r\n" + - "X-Info: sliced\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": body, - } +request_header_mp4 = { + "headers": "GET /slice.mp4 HTTP/1.1\r\n" + "Host: sliced\r\n" + "Range: bytes=0-99\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +response_header_mp4 = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Connection: close\r\n" + 'Etag: "path"\r\n' + + "Content-Range: bytes 0-{}/{}\r\n".format(len(body) - 1, len(body)) + "Cache-Control: max-age=500\r\n" + + "X-Info: sliced\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": body, +} server.addResponse("sessionlog.json", request_header_mp4, response_header_mp4) @@ -106,28 +90,25 @@ block_bytes = 100 # set up whole asset fetch into cache -ts.Disk.remap_config.AddLines([ - 'map http://exclude/ http://127.0.0.1:{}/'.format(server.Variables.Port) + - ' @plugin=slice.so' + - ' @pparam=--blockbytes-test={}'.format(block_bytes) + - ' @pparam=--exclude-regex=\\.txt' - ' @pparam=--remap-host=sliced', - 'map http://include/ http://127.0.0.1:{}/'.format(server.Variables.Port) + - ' @plugin=slice.so' + - ' @pparam=--blockbytes-test={}'.format(block_bytes) + - ' @pparam=--include-regex=\\.mp4' - ' @pparam=--remap-host=sliced', - 'map http://sliced/ http://127.0.0.1:{}/'.format(server.Variables.Port), -]) - +ts.Disk.remap_config.AddLines( + [ + 'map http://exclude/ http://127.0.0.1:{}/'.format(server.Variables.Port) + ' @plugin=slice.so' + + ' @pparam=--blockbytes-test={}'.format(block_bytes) + ' @pparam=--exclude-regex=\\.txt' + ' @pparam=--remap-host=sliced', + 'map http://include/ http://127.0.0.1:{}/'.format(server.Variables.Port) + ' @plugin=slice.so' + + ' @pparam=--blockbytes-test={}'.format(block_bytes) + ' @pparam=--include-regex=\\.mp4' + ' @pparam=--remap-host=sliced', + 'map http://sliced/ http://127.0.0.1:{}/'.format(server.Variables.Port), + ]) # minimal configuration -ts.Disk.records_config.update({ - # 'proxy.config.diags.debug.enabled': 1, - # 'proxy.config.diags.debug.tags': 'slice', - 'proxy.config.http.insert_age_in_response': 0, - 'proxy.config.http.response_via_str': 0, -}) +ts.Disk.records_config.update( + { + # 'proxy.config.diags.debug.enabled': 1, + # 'proxy.config.diags.debug.tags': 'slice', + 'proxy.config.http.insert_age_in_response': 0, + 'proxy.config.http.response_via_str': 0, + }) # 0 Test - Exclude: ensure txt passes through tr = Test.AddTestRun("Exclude - asset passed through") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/slice/slice_rm_range.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/slice/slice_rm_range.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/slice/slice_rm_range.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/slice/slice_rm_range.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -38,27 +38,26 @@ """Configure Traffic Server.""" self._ts = Test.MakeATSProcess("ts", enable_cache=False) - self._ts.Disk.remap_config.AddLines([ - f"map /no/range http://127.0.0.1:{self._server.Variables.http_port} \ + self._ts.Disk.remap_config.AddLines( + [ + f"map /no/range http://127.0.0.1:{self._server.Variables.http_port} \ @plugin=slice.so @pparam=--blockbytes-test=10 @pparam=--strip-range-for-head \ @plugin=cache_range_requests.so", - f"map /with/range http://127.0.0.1:{self._server.Variables.http_port} \ + f"map /with/range http://127.0.0.1:{self._server.Variables.http_port} \ @plugin=slice.so @pparam=--blockbytes-test=10 \ @plugin=cache_range_requests.so", - ]) + ]) - self._ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|slice|cache_range_requests', - }) + self._ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|slice|cache_range_requests', + }) def _test_head_request_range_header(self): tr = Test.AddTestRun() - tr.AddVerifierClientProcess( - "client", - SliceStripRangeForHeadRequestTest.replay_file, - http_ports=[self._ts.Variables.port]) + tr.AddVerifierClientProcess("client", SliceStripRangeForHeadRequestTest.replay_file, http_ports=[self._ts.Variables.port]) tr.Processes.Default.StartBefore(self._server) tr.Processes.Default.StartBefore(self._ts) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/slice/slice_selfhealing.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/slice/slice_selfhealing.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/slice/slice_selfhealing.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/slice/slice_selfhealing.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -27,15 +27,14 @@ # string representation of a date according to RFC 1123 (HTTP/1.1). weekday = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][dt.weekday()] month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"][dt.month - 1] - return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (weekday, dt.day, month, - dt.year, dt.hour, dt.minute, dt.second) + return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (weekday, dt.day, month, dt.year, dt.hour, dt.minute, dt.second) + # Test description: # Preload the cache with the entire asset to be range requested. # Reload remap rule with slice plugin # Request content through the slice plugin - Test.SkipUnless( Condition.PluginExists('slice.so'), Condition.PluginExists('cache_range_requests.so'), @@ -50,118 +49,92 @@ ts = Test.MakeATSProcess("ts", command="traffic_server") # default root -req_header_chk = {"headers": - "GET / HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "uuid: none\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -res_header_chk = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Connection: close\r\n" + - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } +req_header_chk = { + "headers": "GET / HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "uuid: none\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +res_header_chk = { + "headers": "HTTP/1.1 200 OK\r\n" + "Connection: close\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} server.addResponse("sessionlog.json", req_header_chk, res_header_chk) # set up slice plugin with remap host into cache_range_requests -ts.Disk.remap_config.AddLines([ - f'map http://slice/ http://127.0.0.1:{server.Variables.Port}/' + - ' @plugin=slice.so @pparam=--blockbytes-test=3 @pparam=--remap-host=crr', - f'map http://crr/ http://127.0.0.1:{server.Variables.Port}/' + - ' @plugin=cache_range_requests.so @pparam=--consider-ims', - f'map http://slicehdr/ http://127.0.0.1:{server.Variables.Port}/' + - ' @plugin=slice.so @pparam=--blockbytes-test=3' + - ' @pparam=--remap-host=crrhdr @pparam=--crr-ims-header=crr-foo', - f'map http://crrhdr/ http://127.0.0.1:{server.Variables.Port}/' - ' @plugin=cache_range_requests.so @pparam=--ims-header=crr-foo', -]) +ts.Disk.remap_config.AddLines( + [ + f'map http://slice/ http://127.0.0.1:{server.Variables.Port}/' + + ' @plugin=slice.so @pparam=--blockbytes-test=3 @pparam=--remap-host=crr', + f'map http://crr/ http://127.0.0.1:{server.Variables.Port}/' + ' @plugin=cache_range_requests.so @pparam=--consider-ims', + f'map http://slicehdr/ http://127.0.0.1:{server.Variables.Port}/' + ' @plugin=slice.so @pparam=--blockbytes-test=3' + + ' @pparam=--remap-host=crrhdr @pparam=--crr-ims-header=crr-foo', + f'map http://crrhdr/ http://127.0.0.1:{server.Variables.Port}/' + ' @plugin=cache_range_requests.so @pparam=--ims-header=crr-foo', + ]) ts.Disk.plugin_config.AddLine('xdebug.so') -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'cache_range_requests|slice', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'cache_range_requests|slice', + }) curl_and_args = 'curl -s -D /dev/stdout -o /dev/stderr -x localhost:{}'.format(ts.Variables.port) + ' -H "x-debug: x-cache"' # Test case: 2nd slice out of date (refetch and continue) -req_header_2ndold1 = {"headers": - "GET /second HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "uuid: etagold-1\r\n" + - "Range: bytes=3-5\r\n" - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -res_header_2ndold1 = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - "Cache-Control: max-age=5000\r\n" + - "Connection: close\r\n" + - "Content-Range: bytes 3-4/5\r\n" + - 'Etag: "etagold"\r\n' + - "\r\n", - "timestamp": "1469733493.993", - "body": "aa" - } +req_header_2ndold1 = { + "headers": "GET /second HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "uuid: etagold-1\r\n" + "Range: bytes=3-5\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +res_header_2ndold1 = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + "Cache-Control: max-age=5000\r\n" + + "Connection: close\r\n" + "Content-Range: bytes 3-4/5\r\n" + 'Etag: "etagold"\r\n' + "\r\n", + "timestamp": "1469733493.993", + "body": "aa" +} server.addResponse("sessionlog.json", req_header_2ndold1, res_header_2ndold1) -req_header_2ndnew0 = {"headers": - "GET /second HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "uuid: etagnew-0\r\n" + - "Range: bytes=0-2\r\n" - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -res_header_2ndnew0 = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - "Cache-Control: max-age=5000\r\n" + - "Connection: close\r\n" + - "Content-Range: bytes 0-2/5\r\n" + - 'Etag: "etagnew"\r\n' + - "\r\n", - "timestamp": "1469733493.993", - "body": "bbb" - } +req_header_2ndnew0 = { + "headers": "GET /second HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "uuid: etagnew-0\r\n" + "Range: bytes=0-2\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +res_header_2ndnew0 = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + "Cache-Control: max-age=5000\r\n" + + "Connection: close\r\n" + "Content-Range: bytes 0-2/5\r\n" + 'Etag: "etagnew"\r\n' + "\r\n", + "timestamp": "1469733493.993", + "body": "bbb" +} server.addResponse("sessionlog.json", req_header_2ndnew0, res_header_2ndnew0) -req_header_2ndnew1 = {"headers": - "GET /second HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "uuid: etagnew-1\r\n" + - "Range: bytes=3-5\r\n" - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -res_header_2ndnew1 = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - "Cache-Control: max-age=5000\r\n" + - "Connection: close\r\n" + - "Content-Range: bytes 3-4/5\r\n" + - 'Etag: "etagnew"\r\n' + - "\r\n", - "timestamp": "1469733493.993", - "body": "bb" - } +req_header_2ndnew1 = { + "headers": "GET /second HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "uuid: etagnew-1\r\n" + "Range: bytes=3-5\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +res_header_2ndnew1 = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + "Cache-Control: max-age=5000\r\n" + + "Connection: close\r\n" + "Content-Range: bytes 3-4/5\r\n" + 'Etag: "etagnew"\r\n' + "\r\n", + "timestamp": "1469733493.993", + "body": "bb" +} server.addResponse("sessionlog.json", req_header_2ndnew1, res_header_2ndnew1) @@ -205,75 +178,54 @@ # Test case: reference slice out of date (abort connection, heal reference) -req_header_refold0 = {"headers": - "GET /reference HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "uuid: etagold-0\r\n" + - "Range: bytes=0-2\r\n" - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -res_header_refold0 = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - "Cache-Control: max-age=5000\r\n" + - "Connection: close\r\n" + - "Content-Range: bytes 0-2/5\r\n" + - 'Etag: "etagold"\r\n' + - "\r\n", - "timestamp": "1469733493.993", - "body": "aaa" - } +req_header_refold0 = { + "headers": "GET /reference HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "uuid: etagold-0\r\n" + "Range: bytes=0-2\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +res_header_refold0 = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + "Cache-Control: max-age=5000\r\n" + + "Connection: close\r\n" + "Content-Range: bytes 0-2/5\r\n" + 'Etag: "etagold"\r\n' + "\r\n", + "timestamp": "1469733493.993", + "body": "aaa" +} server.addResponse("sessionlog.json", req_header_refold0, res_header_refold0) -req_header_refnew0 = {"headers": - "GET /reference HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "uuid: etagnew-0\r\n" + - "Range: bytes=0-2\r\n" - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -res_header_refnew0 = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - "Cache-Control: max-age=5000\r\n" + - "Connection: close\r\n" + - "Content-Range: bytes 0-2/5\r\n" + - 'Etag: "etagnew"\r\n' + - "\r\n", - "timestamp": "1469733493.993", - "body": "bbb" - } +req_header_refnew0 = { + "headers": "GET /reference HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "uuid: etagnew-0\r\n" + "Range: bytes=0-2\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +res_header_refnew0 = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + "Cache-Control: max-age=5000\r\n" + + "Connection: close\r\n" + "Content-Range: bytes 0-2/5\r\n" + 'Etag: "etagnew"\r\n' + "\r\n", + "timestamp": "1469733493.993", + "body": "bbb" +} server.addResponse("sessionlog.json", req_header_refnew0, res_header_refnew0) -req_header_refnew1 = {"headers": - "GET /reference HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "uuid: etagnew-1\r\n" + - "Range: bytes=3-5\r\n" - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -res_header_refnew1 = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - "Cache-Control: max-age=5000\r\n" + - "Connection: close\r\n" + - "Content-Range: bytes 3-4/5\r\n" + - 'Etag: "etagnew"\r\n' + - "\r\n", - "timestamp": "1469733493.993", - "body": "bb" - } +req_header_refnew1 = { + "headers": "GET /reference HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "uuid: etagnew-1\r\n" + "Range: bytes=3-5\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +res_header_refnew1 = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + "Cache-Control: max-age=5000\r\n" + + "Connection: close\r\n" + "Content-Range: bytes 3-4/5\r\n" + 'Etag: "etagnew"\r\n' + "\r\n", + "timestamp": "1469733493.993", + "body": "bb" +} server.addResponse("sessionlog.json", req_header_refnew1, res_header_refnew1) @@ -317,25 +269,18 @@ # Request results in 200, not 206 (server not support range requests) -req_header_200 = {"headers": - "GET /code200 HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "uuid: code200\r\n" + - "Range: bytes=3-5\r\n" - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -res_header_200 = {"headers": - "HTTP/1.1 200 OK\r\n" + - "Cache-Control: max-age=5000\r\n" + - "Connection: close\r\n" + - 'Etag: "etag"\r\n' + - "\r\n", - "timestamp": "1469733493.993", - "body": "ccccc" - } +req_header_200 = { + "headers": "GET /code200 HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "uuid: code200\r\n" + "Range: bytes=3-5\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +res_header_200 = { + "headers": "HTTP/1.1 200 OK\r\n" + "Cache-Control: max-age=5000\r\n" + "Connection: close\r\n" + 'Etag: "etag"\r\n' + "\r\n", + "timestamp": "1469733493.993", + "body": "ccccc" +} server.addResponse("sessionlog.json", req_header_200, res_header_200) @@ -351,27 +296,20 @@ # Test for asset gone # Preload -req_header_assetgone0 = {"headers": - "GET /assetgone HTTP/1.1\r\n" + - "Host: www.example.com\r\n" + - "uuid: assetgone-0\r\n" + - "Range: bytes=0-2\r\n" - "\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -res_header_assetgone0 = {"headers": - "HTTP/1.1 206 Partial Content\r\n" + - "Accept-Ranges: bytes\r\n" + - "Cache-Control: max-age=5000\r\n" + - "Connection: close\r\n" + - "Content-Range: bytes 0-2/5\r\n" + - 'Etag: "etag"\r\n' + - "\r\n", - "timestamp": "1469733493.993", - "body": "aaa" - } +req_header_assetgone0 = { + "headers": "GET /assetgone HTTP/1.1\r\n" + "Host: www.example.com\r\n" + "uuid: assetgone-0\r\n" + "Range: bytes=0-2\r\n" + "\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +res_header_assetgone0 = { + "headers": + "HTTP/1.1 206 Partial Content\r\n" + "Accept-Ranges: bytes\r\n" + "Cache-Control: max-age=5000\r\n" + + "Connection: close\r\n" + "Content-Range: bytes 0-2/5\r\n" + 'Etag: "etag"\r\n' + "\r\n", + "timestamp": "1469733493.993", + "body": "aaa" +} server.addResponse("sessionlog.json", req_header_assetgone0, res_header_assetgone0) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/sslheaders/sslheaders.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/sslheaders/sslheaders.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/sslheaders/sslheaders.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/sslheaders/sslheaders.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -30,8 +30,7 @@ server = Test.MakeOriginServer("server", options={'--load': Test.TestDirectory + '/observer.py'}) -request_header = { - "headers": "GET / HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) @@ -42,37 +41,30 @@ ts.addDefaultSSLFiles() # ts.addSSLfile("ssl/signer.pem") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name. - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.http.server_ports': ( - 'ipv4:{0} ipv4:{1}:proto=http2;http:ssl ipv6:{0} ipv6:{1}:proto=http2;http:ssl' - .format(ts.Variables.port, ts.Variables.ssl_port)), -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) - -ts.Disk.remap_config.AddLine( - 'map http://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - 'map https://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port) -) - -ts.Disk.plugin_config.AddLine( - 'sslheaders.so SSL-Client-ID=client.subject' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name. + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.http.server_ports': + ( + 'ipv4:{0} ipv4:{1}:proto=http2;http:ssl ipv6:{0} ipv6:{1}:proto=http2;http:ssl'.format( + ts.Variables.port, ts.Variables.ssl_port)), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + +ts.Disk.remap_config.AddLine('map http://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts.Disk.remap_config.AddLine('map https://bar.com http://127.0.0.1:{0}'.format(server.Variables.Port)) + +ts.Disk.plugin_config.AddLine('sslheaders.so SSL-Client-ID=client.subject') tr = Test.AddTestRun() tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(Test.Processes.ts) tr.Processes.Default.Command = ( 'curl -H "SSL-Client-ID: My Fake Client ID" --verbose --ipv4 --insecure --header "Host: bar.com"' + - ' https://localhost:{}'.format(ts.Variables.ssl_port) -) + ' https://localhost:{}'.format(ts.Variables.ssl_port)) tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/stek_share/stek_share.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/stek_share/stek_share.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/stek_share/stek_share.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/stek_share/stek_share.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -40,16 +40,8 @@ key_path = os.path.join(Test.RunDirectory, 'self_signed.key') server_list_path = os.path.join(Test.RunDirectory, 'server_list.yaml') -request_header1 = { - 'headers': 'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n', - 'timestamp': '1469733493.993', - 'body': '' -} -response_header1 = { - 'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n', - 'timestamp': '1469733493.993', - 'body': 'curl test' -} +request_header1 = {'headers': 'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n', 'timestamp': '1469733493.993', 'body': ''} +response_header1 = {'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n', 'timestamp': '1469733493.993', 'body': 'curl test'} server.addResponse('sessionlog.json', request_header1, response_header1) stek_share_conf_path_1 = os.path.join(ts1.Variables.CONFIGDIR, 'stek_share_conf.yaml') @@ -64,127 +56,202 @@ ts4.Disk.File(stek_share_conf_path_4, id="stek_share_conf_4", typename="ats:config") ts5.Disk.File(stek_share_conf_path_5, id="stek_share_conf_5", typename="ats:config") -ts1.Disk.stek_share_conf_1.AddLines([ - 'server_id: 1', - 'address: 127.0.0.1', - 'port: 10001', - 'asio_thread_pool_size: 4', - 'heart_beat_interval: 100', - 'election_timeout_lower_bound: 200', - 'election_timeout_upper_bound: 400', - 'reserved_log_items: 5', - 'snapshot_distance: 5', - 'client_req_timeout: 3000', # this is in milliseconds - 'key_update_interval: 3600', # this is in seconds - 'server_list_file: {0}'.format(server_list_path), - 'root_cert_file: {0}'.format(cert_path), - 'server_cert_file: {0}'.format(cert_path), - 'server_key_file: {0}'.format(key_path), - 'cert_verify_str: /C=US/ST=IL/O=Yahoo/OU=Edge/CN=stek-share' -]) - -ts2.Disk.stek_share_conf_2.AddLines([ - 'server_id: 2', - 'address: 127.0.0.1', - 'port: 10002', - 'asio_thread_pool_size: 4', - 'heart_beat_interval: 100', - 'election_timeout_lower_bound: 200', - 'election_timeout_upper_bound: 400', - 'reserved_log_items: 5', - 'snapshot_distance: 5', - 'client_req_timeout: 3000', # this is in milliseconds - 'key_update_interval: 3600', # this is in seconds - 'server_list_file: {0}'.format(server_list_path), - 'root_cert_file: {0}'.format(cert_path), - 'server_cert_file: {0}'.format(cert_path), - 'server_key_file: {0}'.format(key_path), - 'cert_verify_str: /C=US/ST=IL/O=Yahoo/OU=Edge/CN=stek-share' -]) - -ts3.Disk.stek_share_conf_3.AddLines([ - 'server_id: 3', - 'address: 127.0.0.1', - 'port: 10003', - 'asio_thread_pool_size: 4', - 'heart_beat_interval: 100', - 'election_timeout_lower_bound: 200', - 'election_timeout_upper_bound: 400', - 'reserved_log_items: 5', - 'snapshot_distance: 5', - 'client_req_timeout: 3000', # this is in milliseconds - 'key_update_interval: 3600', # this is in seconds - 'server_list_file: {0}'.format(server_list_path), - 'root_cert_file: {0}'.format(cert_path), - 'server_cert_file: {0}'.format(cert_path), - 'server_key_file: {0}'.format(key_path), - 'cert_verify_str: /C=US/ST=IL/O=Yahoo/OU=Edge/CN=stek-share' -]) - -ts4.Disk.stek_share_conf_4.AddLines([ - 'server_id: 4', - 'address: 127.0.0.1', - 'port: 10004', - 'asio_thread_pool_size: 4', - 'heart_beat_interval: 100', - 'election_timeout_lower_bound: 200', - 'election_timeout_upper_bound: 400', - 'reserved_log_items: 5', - 'snapshot_distance: 5', - 'client_req_timeout: 3000', # this is in milliseconds - 'key_update_interval: 3600', # this is in seconds - 'server_list_file: {0}'.format(server_list_path), - 'root_cert_file: {0}'.format(cert_path), - 'server_cert_file: {0}'.format(cert_path), - 'server_key_file: {0}'.format(key_path), - 'cert_verify_str: /C=US/ST=IL/O=Yahoo/OU=Edge/CN=stek-share' -]) - -ts5.Disk.stek_share_conf_5.AddLines([ - 'server_id: 5', - 'address: 127.0.0.1', - 'port: 10005', - 'asio_thread_pool_size: 4', - 'heart_beat_interval: 100', - 'election_timeout_lower_bound: 200', - 'election_timeout_upper_bound: 400', - 'reserved_log_items: 5', - 'snapshot_distance: 5', - 'client_req_timeout: 3000', # this is in milliseconds - 'key_update_interval: 3600', # this is in seconds - 'server_list_file: {0}'.format(server_list_path), - 'root_cert_file: {0}'.format(cert_path), - 'server_cert_file: {0}'.format(cert_path), - 'server_key_file: {0}'.format(key_path), - 'cert_verify_str: /C=US/ST=IL/O=Yahoo/OU=Edge/CN=stek-share' -]) - -ts1.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'stek_share', 'proxy.config.exec_thread.autoconfig': 0, 'proxy.config.exec_thread.limit': 4, 'proxy.config.ssl.server.cert.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.server.private_key.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.session_cache': 2, 'proxy.config.ssl.session_cache.size': 1024, 'proxy.config.ssl.session_cache.timeout': 7200, 'proxy.config.ssl.session_cache.num_buckets': 16, 'proxy.config.ssl.server.session_ticket.enable': 1, 'proxy.config.ssl.server.cipher_suite': - 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA'}) +ts1.Disk.stek_share_conf_1.AddLines( + [ + 'server_id: 1', + 'address: 127.0.0.1', + 'port: 10001', + 'asio_thread_pool_size: 4', + 'heart_beat_interval: 100', + 'election_timeout_lower_bound: 200', + 'election_timeout_upper_bound: 400', + 'reserved_log_items: 5', + 'snapshot_distance: 5', + 'client_req_timeout: 3000', # this is in milliseconds + 'key_update_interval: 3600', # this is in seconds + 'server_list_file: {0}'.format(server_list_path), + 'root_cert_file: {0}'.format(cert_path), + 'server_cert_file: {0}'.format(cert_path), + 'server_key_file: {0}'.format(key_path), + 'cert_verify_str: /C=US/ST=IL/O=Yahoo/OU=Edge/CN=stek-share', + ]) + +ts2.Disk.stek_share_conf_2.AddLines( + [ + 'server_id: 2', + 'address: 127.0.0.1', + 'port: 10002', + 'asio_thread_pool_size: 4', + 'heart_beat_interval: 100', + 'election_timeout_lower_bound: 200', + 'election_timeout_upper_bound: 400', + 'reserved_log_items: 5', + 'snapshot_distance: 5', + 'client_req_timeout: 3000', # this is in milliseconds + 'key_update_interval: 3600', # this is in seconds + 'server_list_file: {0}'.format(server_list_path), + 'root_cert_file: {0}'.format(cert_path), + 'server_cert_file: {0}'.format(cert_path), + 'server_key_file: {0}'.format(key_path), + 'cert_verify_str: /C=US/ST=IL/O=Yahoo/OU=Edge/CN=stek-share', + ]) + +ts3.Disk.stek_share_conf_3.AddLines( + [ + 'server_id: 3', + 'address: 127.0.0.1', + 'port: 10003', + 'asio_thread_pool_size: 4', + 'heart_beat_interval: 100', + 'election_timeout_lower_bound: 200', + 'election_timeout_upper_bound: 400', + 'reserved_log_items: 5', + 'snapshot_distance: 5', + 'client_req_timeout: 3000', # this is in milliseconds + 'key_update_interval: 3600', # this is in seconds + 'server_list_file: {0}'.format(server_list_path), + 'root_cert_file: {0}'.format(cert_path), + 'server_cert_file: {0}'.format(cert_path), + 'server_key_file: {0}'.format(key_path), + 'cert_verify_str: /C=US/ST=IL/O=Yahoo/OU=Edge/CN=stek-share', + ]) + +ts4.Disk.stek_share_conf_4.AddLines( + [ + 'server_id: 4', + 'address: 127.0.0.1', + 'port: 10004', + 'asio_thread_pool_size: 4', + 'heart_beat_interval: 100', + 'election_timeout_lower_bound: 200', + 'election_timeout_upper_bound: 400', + 'reserved_log_items: 5', + 'snapshot_distance: 5', + 'client_req_timeout: 3000', # this is in milliseconds + 'key_update_interval: 3600', # this is in seconds + 'server_list_file: {0}'.format(server_list_path), + 'root_cert_file: {0}'.format(cert_path), + 'server_cert_file: {0}'.format(cert_path), + 'server_key_file: {0}'.format(key_path), + 'cert_verify_str: /C=US/ST=IL/O=Yahoo/OU=Edge/CN=stek-share', + ]) + +ts5.Disk.stek_share_conf_5.AddLines( + [ + 'server_id: 5', + 'address: 127.0.0.1', + 'port: 10005', + 'asio_thread_pool_size: 4', + 'heart_beat_interval: 100', + 'election_timeout_lower_bound: 200', + 'election_timeout_upper_bound: 400', + 'reserved_log_items: 5', + 'snapshot_distance: 5', + 'client_req_timeout: 3000', # this is in milliseconds + 'key_update_interval: 3600', # this is in seconds + 'server_list_file: {0}'.format(server_list_path), + 'root_cert_file: {0}'.format(cert_path), + 'server_cert_file: {0}'.format(cert_path), + 'server_key_file: {0}'.format(key_path), + 'cert_verify_str: /C=US/ST=IL/O=Yahoo/OU=Edge/CN=stek-share', + ]) + +ts1.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'stek_share', + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.limit': 4, + 'proxy.config.ssl.server.cert.path': '{0}'.format(Test.RunDirectory), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(Test.RunDirectory), + 'proxy.config.ssl.session_cache': 2, + 'proxy.config.ssl.session_cache.size': 1024, + 'proxy.config.ssl.session_cache.timeout': 7200, + 'proxy.config.ssl.session_cache.num_buckets': 16, + 'proxy.config.ssl.server.session_ticket.enable': 1, + 'proxy.config.ssl.server.cipher_suite': + 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA' + }) ts1.Disk.plugin_config.AddLine('stek_share.so {0}'.format(stek_share_conf_path_1)) ts1.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=self_signed.crt ssl_key_name=self_signed.key') ts1.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) -ts2.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'stek_share', 'proxy.config.exec_thread.autoconfig': 0, 'proxy.config.exec_thread.limit': 4, 'proxy.config.ssl.server.cert.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.server.private_key.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.session_cache': 2, 'proxy.config.ssl.session_cache.size': 1024, 'proxy.config.ssl.session_cache.timeout': 7200, 'proxy.config.ssl.session_cache.num_buckets': 16, 'proxy.config.ssl.server.session_ticket.enable': 1, 'proxy.config.ssl.server.cipher_suite': - 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA'}) +ts2.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'stek_share', + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.limit': 4, + 'proxy.config.ssl.server.cert.path': '{0}'.format(Test.RunDirectory), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(Test.RunDirectory), + 'proxy.config.ssl.session_cache': 2, + 'proxy.config.ssl.session_cache.size': 1024, + 'proxy.config.ssl.session_cache.timeout': 7200, + 'proxy.config.ssl.session_cache.num_buckets': 16, + 'proxy.config.ssl.server.session_ticket.enable': 1, + 'proxy.config.ssl.server.cipher_suite': + 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA' + }) ts2.Disk.plugin_config.AddLine('stek_share.so {0}'.format(stek_share_conf_path_2)) ts2.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=self_signed.crt ssl_key_name=self_signed.key') ts2.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) -ts3.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'stek_share', 'proxy.config.exec_thread.autoconfig': 0, 'proxy.config.exec_thread.limit': 4, 'proxy.config.ssl.server.cert.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.server.private_key.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.session_cache': 2, 'proxy.config.ssl.session_cache.size': 1024, 'proxy.config.ssl.session_cache.timeout': 7200, 'proxy.config.ssl.session_cache.num_buckets': 16, 'proxy.config.ssl.server.session_ticket.enable': 1, 'proxy.config.ssl.server.cipher_suite': - 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA'}) +ts3.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'stek_share', + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.limit': 4, + 'proxy.config.ssl.server.cert.path': '{0}'.format(Test.RunDirectory), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(Test.RunDirectory), + 'proxy.config.ssl.session_cache': 2, + 'proxy.config.ssl.session_cache.size': 1024, + 'proxy.config.ssl.session_cache.timeout': 7200, + 'proxy.config.ssl.session_cache.num_buckets': 16, + 'proxy.config.ssl.server.session_ticket.enable': 1, + 'proxy.config.ssl.server.cipher_suite': + 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA' + }) ts3.Disk.plugin_config.AddLine('stek_share.so {0}'.format(stek_share_conf_path_3)) ts3.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=self_signed.crt ssl_key_name=self_signed.key') ts3.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) -ts4.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'stek_share', 'proxy.config.exec_thread.autoconfig': 0, 'proxy.config.exec_thread.limit': 4, 'proxy.config.ssl.server.cert.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.server.private_key.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.session_cache': 2, 'proxy.config.ssl.session_cache.size': 1024, 'proxy.config.ssl.session_cache.timeout': 7200, 'proxy.config.ssl.session_cache.num_buckets': 16, 'proxy.config.ssl.server.session_ticket.enable': 1, 'proxy.config.ssl.server.cipher_suite': - 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA'}) +ts4.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'stek_share', + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.limit': 4, + 'proxy.config.ssl.server.cert.path': '{0}'.format(Test.RunDirectory), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(Test.RunDirectory), + 'proxy.config.ssl.session_cache': 2, + 'proxy.config.ssl.session_cache.size': 1024, + 'proxy.config.ssl.session_cache.timeout': 7200, + 'proxy.config.ssl.session_cache.num_buckets': 16, + 'proxy.config.ssl.server.session_ticket.enable': 1, + 'proxy.config.ssl.server.cipher_suite': + 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA' + }) ts4.Disk.plugin_config.AddLine('stek_share.so {0}'.format(stek_share_conf_path_4)) ts4.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=self_signed.crt ssl_key_name=self_signed.key') ts4.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) -ts5.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'stek_share', 'proxy.config.exec_thread.autoconfig': 0, 'proxy.config.exec_thread.limit': 4, 'proxy.config.ssl.server.cert.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.server.private_key.path': '{0}'.format(Test.RunDirectory), 'proxy.config.ssl.session_cache': 2, 'proxy.config.ssl.session_cache.size': 1024, 'proxy.config.ssl.session_cache.timeout': 7200, 'proxy.config.ssl.session_cache.num_buckets': 16, 'proxy.config.ssl.server.session_ticket.enable': 1, 'proxy.config.ssl.server.cipher_suite': - 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA'}) +ts5.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'stek_share', + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.limit': 4, + 'proxy.config.ssl.server.cert.path': '{0}'.format(Test.RunDirectory), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(Test.RunDirectory), + 'proxy.config.ssl.session_cache': 2, + 'proxy.config.ssl.session_cache.size': 1024, + 'proxy.config.ssl.session_cache.timeout': 7200, + 'proxy.config.ssl.session_cache.num_buckets': 16, + 'proxy.config.ssl.server.session_ticket.enable': 1, + 'proxy.config.ssl.server.cipher_suite': + 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA' + }) ts5.Disk.plugin_config.AddLine('stek_share.so {0}'.format(stek_share_conf_path_5)) ts5.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=self_signed.crt ssl_key_name=self_signed.key') ts5.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/test_hooks/hook_add.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/test_hooks/hook_add.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/test_hooks/hook_add.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/test_hooks/hook_add.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -16,7 +16,6 @@ import os - Test.Summary = ''' Test adding hooks ''' @@ -25,33 +24,29 @@ server = Test.MakeOriginServer("server") -request_header = { - "headers": "GET /argh HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET /argh HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=False, enable_cache=False) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.tags': 'test', - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.url_remap.remap_required': 0, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.tags': 'test', + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.url_remap.remap_required': 0, + }) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'hook_add_plugin.so'), ts) -ts.Disk.remap_config.AddLine( - "map http://one http://127.0.0.1:{0}".format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine("map http://one http://127.0.0.1:{0}".format(server.Variables.Port)) tr = Test.AddTestRun() # Probe server port to check if ready. tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port)) tr.Processes.Default.StartBefore(Test.Processes.ts) # -tr.Processes.Default.Command = ( - 'curl --verbose --ipv4 --header "Host: one" http://localhost:{0}/argh'.format(ts.Variables.port) -) +tr.Processes.Default.Command = ('curl --verbose --ipv4 --header "Host: one" http://localhost:{0}/argh'.format(ts.Variables.port)) tr.Processes.Default.ReturnCode = 0 # Look at the debug output from the plugin diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/test_hooks/ssn_start_delay_hook.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/test_hooks/ssn_start_delay_hook.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/test_hooks/ssn_start_delay_hook.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/test_hooks/ssn_start_delay_hook.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -16,7 +16,6 @@ import os - Test.Summary = ''' Test adding hooks, and rescheduling the ssn start hook from a non-net thread ''' @@ -25,25 +24,23 @@ server = Test.MakeOriginServer("server") -request_header = { - "headers": "GET /argh HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET /argh HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=False) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.tags': 'test', - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.http.cache.http': 0, - 'proxy.config.url_remap.remap_required': 0, -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.tags': 'test', + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.http.cache.http': 0, + 'proxy.config.url_remap.remap_required': 0, + }) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'hook_add_plugin.so'), ts, '-delay') -ts.Disk.remap_config.AddLine( - "map http://one http://127.0.0.1:{0}".format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine("map http://one http://127.0.0.1:{0}".format(server.Variables.Port)) tr = Test.AddTestRun() # Probe server port to check if ready. @@ -51,9 +48,7 @@ # Probe TS cleartext port to check if ready (probing TLS port causes spurious VCONN hook triggers). tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.port)) # -tr.Processes.Default.Command = ( - 'curl --verbose --ipv4 --header "Host: one" http://localhost:{0}/argh'.format(ts.Variables.port) -) +tr.Processes.Default.Command = ('curl --verbose --ipv4 --header "Host: one" http://localhost:{0}/argh'.format(ts.Variables.port)) tr.Processes.Default.ReturnCode = 0 # Look at the debug output from the plugin diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/test_hooks/test_hooks.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/test_hooks/test_hooks.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/test_hooks/test_hooks.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/test_hooks/test_hooks.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -16,20 +16,16 @@ import os - Test.Summary = ''' Test TS API Hooks. ''' -Test.SkipUnless( - Condition.HasCurlFeature('http2'), -) +Test.SkipUnless(Condition.HasCurlFeature('http2'),) Test.ContinueOnFail = True server = Test.MakeOriginServer("server") -request_header = { - "headers": "GET /argh HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET /argh HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) @@ -43,48 +39,39 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({ - 'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name. - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.url_remap.remap_required': 0, - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'http|test_hooks', -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name. + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.url_remap.remap_required': 0, + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'http|test_hooks', + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'test_hooks.so'), ts) -ts.Disk.remap_config.AddLine( - "map http://one http://127.0.0.1:{0}".format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - "map https://one http://127.0.0.1:{0}".format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine("map http://one http://127.0.0.1:{0}".format(server.Variables.Port)) +ts.Disk.remap_config.AddLine("map https://one http://127.0.0.1:{0}".format(server.Variables.Port)) tr = Test.AddTestRun() # Probe server port to check if ready. tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port)) tr.Processes.Default.StartBefore(Test.Processes.ts) # -tr.Processes.Default.Command = ( - 'curl --verbose --ipv4 --header "Host: one" http://localhost:{0}/argh'.format(ts.Variables.port) -) +tr.Processes.Default.Command = ('curl --verbose --ipv4 --header "Host: one" http://localhost:{0}/argh'.format(ts.Variables.port)) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() tr.Processes.Default.Command = ( - 'curl --verbose --ipv4 --http2 --insecure --header "Host: one" https://localhost:{0}/argh'.format(ts.Variables.ssl_port) -) + 'curl --verbose --ipv4 --http2 --insecure --header "Host: one" https://localhost:{0}/argh'.format(ts.Variables.ssl_port)) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() tr.Processes.Default.Command = ( - 'curl --verbose --ipv4 --http1.1 --insecure --header "Host: one" https://localhost:{0}/argh'.format(ts.Variables.ssl_port) -) + 'curl --verbose --ipv4 --http1.1 --insecure --header "Host: one" https://localhost:{0}/argh'.format(ts.Variables.ssl_port)) tr.Processes.Default.ReturnCode = 0 # The probing of the ATS port to detect when ATS is ready may be seen by ATS as a VCONN start/close, so filter out these diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,18 +24,13 @@ Verify traffic_dump functionality. ''' -Test.SkipUnless( - Condition.PluginExists('traffic_dump.so'), -) +Test.SkipUnless(Condition.PluginExists('traffic_dump.so'),) schema_path = os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json') # Configure the origin server. replay_file = "replay/traffic_dump.yaml" -server = Test.MakeVerifierServerProcess( - "server", replay_file, - ssl_cert="ssl/server_combined.pem", ca_cert="ssl/signer.pem") - +server = Test.MakeVerifierServerProcess("server", replay_file, ssl_cert="ssl/server_combined.pem", ca_cert="ssl/signer.pem") # Define ATS and configure it. ts = Test.MakeATSProcess("ts", command='traffic_manager', enable_tls=True) @@ -48,39 +43,36 @@ ts.Setup.Copy("ssl/signed-foo.pem") ts.Setup.Copy("ssl/signed-foo.key") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'traffic_dump|http', - 'proxy.config.http.insert_age_in_response': 0, - - 'proxy.config.ssl.server.cert.path': ts.Variables.SSLDir, - 'proxy.config.ssl.server.private_key.path': ts.Variables.SSLDir, - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.ssl.CA.cert.filename': f'{ts.Variables.SSLDir}/signer.pem', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.http.host_sni_policy': 2, - 'proxy.config.ssl.TLSv1_3': 0, - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', - - 'proxy.config.http.connect_ports': f"{server.Variables.http_port}", -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) - -ts.Disk.remap_config.AddLines([ - f'map https://www.client_only_tls.com/ http://127.0.0.1:{server.Variables.http_port}', - f'map https://www.tls.com/ https://127.0.0.1:{server.Variables.https_port}', - f'map http://www.connect_target.com/ http://127.0.0.1:{server.Variables.http_port}', - f'map / http://127.0.0.1:{server.Variables.http_port}', -]) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'traffic_dump|http', + 'proxy.config.http.insert_age_in_response': 0, + 'proxy.config.ssl.server.cert.path': ts.Variables.SSLDir, + 'proxy.config.ssl.server.private_key.path': ts.Variables.SSLDir, + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.ssl.CA.cert.filename': f'{ts.Variables.SSLDir}/signer.pem', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.http.host_sni_policy': 2, + 'proxy.config.ssl.TLSv1_3': 0, + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + 'proxy.config.http.connect_ports': f"{server.Variables.http_port}", + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + +ts.Disk.remap_config.AddLines( + [ + f'map https://www.client_only_tls.com/ http://127.0.0.1:{server.Variables.http_port}', + f'map https://www.tls.com/ https://127.0.0.1:{server.Variables.https_port}', + f'map http://www.connect_target.com/ http://127.0.0.1:{server.Variables.http_port}', + f'map / http://127.0.0.1:{server.Variables.http_port}', + ]) # Configure traffic_dump. ts.Disk.plugin_config.AddLine( f'traffic_dump.so --logdir {replay_dir} --sample 1 --limit 1000000000 ' - '--sensitive-fields "cookie,set-cookie,x-request-1,x-request-2"' -) + '--sensitive-fields "cookie,set-cookie,x-request-1,x-request-2"') # Configure logging of transactions. This is helpful for the cache test below. ts.Disk.logging_yaml.AddLines( ''' @@ -95,20 +87,15 @@ # Set up trafficserver expectations. ts.Disk.diags_log.Content = Testers.ContainsExpression( - "loading plugin.*traffic_dump.so", - "Verify the traffic_dump plugin got loaded.") + "loading plugin.*traffic_dump.so", "Verify the traffic_dump plugin got loaded.") ts.Disk.traffic_out.Content = Testers.ContainsExpression( - f"Initialized with log directory: {replay_dir}", - "Verify traffic_dump initialized with the configured directory.") + f"Initialized with log directory: {replay_dir}", "Verify traffic_dump initialized with the configured directory.") ts.Disk.traffic_out.Content += Testers.ContainsExpression( "Initialized with sample pool size of 1 bytes and disk limit of 1000000000 bytes", "Verify traffic_dump initialized with the configured disk limit.") ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Finish a session with log file of.*bytes", - "Verify traffic_dump sees the end of sessions and accounts for it.") -ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Dumping body bytes: false", - "Verify that dumping body bytes is enabled.") + "Finish a session with log file of.*bytes", "Verify traffic_dump sees the end of sessions and accounts for it.") +ts.Disk.traffic_out.Content += Testers.ContainsExpression("Dumping body bytes: false", "Verify that dumping body bytes is enabled.") # Set up the json replay file expectations. replay_file_session_1 = os.path.join(replay_dir, "127", "0000000000000000") @@ -151,9 +138,12 @@ # Run our test traffic. tr = Test.AddTestRun("Run the test traffic.") tr.AddVerifierClientProcess( - "client", replay_file, http_ports=[ts.Variables.port], + "client", + replay_file, + http_ports=[ts.Variables.port], https_ports=[ts.Variables.ssl_port], - ssl_cert="ssl/server_combined.pem", ca_cert="ssl/signer.pem", + ssl_cert="ssl/server_combined.pem", + ca_cert="ssl/signer.pem", other_args='--thread-limit 1') tr.Processes.Default.StartBefore(server) @@ -348,9 +338,12 @@ tr = Test.AddTestRun("Run some more test traffic with the restricted disk limit.") tr.AddVerifierClientProcess( - "client-2", replay_file, http_ports=[ts.Variables.port], + "client-2", + replay_file, + http_ports=[ts.Variables.port], https_ports=[ts.Variables.ssl_port], - ssl_cert="ssl/server_combined.pem", ca_cert="ssl/signer.pem", + ssl_cert="ssl/server_combined.pem", + ca_cert="ssl/signer.pem", other_args='--keys 1') # Since the limit is zero, we should not see any new replay file created. @@ -377,9 +370,12 @@ tr = Test.AddTestRun("Run some more test traffic with no disk limit.") tr.AddVerifierClientProcess( - "client-3", replay_file, http_ports=[ts.Variables.port], + "client-3", + replay_file, + http_ports=[ts.Variables.port], https_ports=[ts.Variables.ssl_port], - ssl_cert="ssl/server_combined.pem", ca_cert="ssl/signer.pem", + ssl_cert="ssl/server_combined.pem", + ca_cert="ssl/signer.pem", other_args='--keys 1') # Since the limit is zero, we should not see any new replay file created. @@ -409,9 +405,12 @@ tr = Test.AddTestRun("Run test traffic with newly restricted disk limit.") tr.AddVerifierClientProcess( - "client-4", replay_file, http_ports=[ts.Variables.port], + "client-4", + replay_file, + http_ports=[ts.Variables.port], https_ports=[ts.Variables.ssl_port], - ssl_cert="ssl/server_combined.pem", ca_cert="ssl/signer.pem", + ssl_cert="ssl/server_combined.pem", + ca_cert="ssl/signer.pem", other_args='--keys 1') # Since the limit is zero, we should not see any new replay file created. diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump_http3.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump_http3.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump_http3.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump_http3.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -28,18 +28,13 @@ Condition.PluginExists('traffic_dump.so'), Condition.HasATSFeature('TS_USE_QUIC'), ) -Test.SkipIf( - Condition.true("Skip this test until the TS_EVENT_HTTP_SSN are supported for QUIC connections."), -) +Test.SkipIf(Condition.true("Skip this test until the TS_EVENT_HTTP_SSN are supported for QUIC connections."),) schema_path = os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json') # Configure the origin server. replay_file = "replay/http3.yaml" -server = Test.MakeVerifierServerProcess( - "server", replay_file, - ssl_cert="ssl/server_combined.pem", ca_cert="ssl/signer.pem") - +server = Test.MakeVerifierServerProcess("server", replay_file, ssl_cert="ssl/server_combined.pem", ca_cert="ssl/signer.pem") # Define ATS and configure it. ts = Test.MakeATSProcess("ts", enable_tls=True, enable_quic=True) @@ -50,41 +45,31 @@ ts.addSSLfile("ssl/server.key") ts.addSSLfile("ssl/signer.pem") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'traffic_dump|quic', - 'proxy.config.http.insert_age_in_response': 0, - - 'proxy.config.quic.qlog_dir': qlog_dir, - - 'proxy.config.ssl.server.cert.path': ts.Variables.SSLDir, - 'proxy.config.ssl.server.private_key.path': ts.Variables.SSLDir, - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.ssl.CA.cert.filename': f'{ts.Variables.SSLDir}/signer.pem', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.http.host_sni_policy': 2, - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) - -ts.Disk.remap_config.AddLine( - f'map https://www.client_only_tls.com/ http://127.0.0.1:{server.Variables.http_port}' -) -ts.Disk.remap_config.AddLine( - f'map https://www.tls.com/ https://127.0.0.1:{server.Variables.https_port}' -) -ts.Disk.remap_config.AddLine( - f'map / http://127.0.0.1:{server.Variables.http_port}' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'traffic_dump|quic', + 'proxy.config.http.insert_age_in_response': 0, + 'proxy.config.quic.qlog_dir': qlog_dir, + 'proxy.config.ssl.server.cert.path': ts.Variables.SSLDir, + 'proxy.config.ssl.server.private_key.path': ts.Variables.SSLDir, + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.ssl.CA.cert.filename': f'{ts.Variables.SSLDir}/signer.pem', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.http.host_sni_policy': 2, + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + +ts.Disk.remap_config.AddLine(f'map https://www.client_only_tls.com/ http://127.0.0.1:{server.Variables.http_port}') +ts.Disk.remap_config.AddLine(f'map https://www.tls.com/ https://127.0.0.1:{server.Variables.https_port}') +ts.Disk.remap_config.AddLine(f'map / http://127.0.0.1:{server.Variables.http_port}') # Configure traffic_dump. ts.Disk.plugin_config.AddLine( f'traffic_dump.so --logdir {ts_log_dir} --sample 1 --limit 1000000000 ' - '--sensitive-fields "cookie,set-cookie,x-request-1,x-request-2"' -) + '--sensitive-fields "cookie,set-cookie,x-request-1,x-request-2"') # Configure logging of transactions. This is helpful for the cache test below. ts.Disk.logging_yaml.AddLines( ''' @@ -99,17 +84,14 @@ # Set up trafficserver expectations. ts.Disk.diags_log.Content = Testers.ContainsExpression( - "loading plugin.*traffic_dump.so", - "Verify the traffic_dump plugin got loaded.") + "loading plugin.*traffic_dump.so", "Verify the traffic_dump plugin got loaded.") ts.Disk.traffic_out.Content = Testers.ContainsExpression( - f"Initialized with log directory: {ts_log_dir}", - "Verify traffic_dump initialized with the configured directory.") + f"Initialized with log directory: {ts_log_dir}", "Verify traffic_dump initialized with the configured directory.") ts.Disk.traffic_out.Content += Testers.ContainsExpression( "Initialized with sample pool size of 1 bytes and disk limit of 1000000000 bytes", "Verify traffic_dump initialized with the configured disk limit.") ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Finish a session with log file of.*bytes", - "Verify traffic_dump sees the end of sessions and accounts for it.") + "Finish a session with log file of.*bytes", "Verify traffic_dump sees the end of sessions and accounts for it.") # Set up the json replay file expectations. replay_file_session_1 = os.path.join(ts_log_dir, "127", "0000000000000000") @@ -119,10 +101,13 @@ # are run in serial. tr = Test.AddTestRun("Run the test traffic.") tr.AddVerifierClientProcess( - "client", replay_file, http_ports=[ts.Variables.port], + "client", + replay_file, + http_ports=[ts.Variables.port], https_ports=[ts.Variables.ssl_port], http3_ports=[ts.Variables.ssl_port], - ssl_cert="ssl/server_combined.pem", ca_cert="ssl/signer.pem", + ssl_cert="ssl/server_combined.pem", + ca_cert="ssl/signer.pem", other_args='--thread-limit 1') tr.Processes.Default.StartBefore(server) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump_ip_filter.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump_ip_filter.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump_ip_filter.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump_ip_filter.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,9 +24,7 @@ Verify traffic_dump IP filter functionality. ''' -Test.SkipUnless( - Condition.PluginExists('traffic_dump.so'), -) +Test.SkipUnless(Condition.PluginExists('traffic_dump.so'),) # Configure the origin server. replay_file = "replay/traffic_dump.yaml" @@ -51,9 +49,7 @@ 'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'traffic_dump', }) - ts.Disk.remap_config.AddLine( - f'map / http://127.0.0.1:{server.Variables.http_port}' - ) + ts.Disk.remap_config.AddLine(f'map / http://127.0.0.1:{server.Variables.http_port}') # Configure traffic_dump as specified. ts.Disk.plugin_config.AddLine(plugin_command.format(replay_dir)) @@ -72,15 +68,10 @@ # tr = Test.AddTestRun("Verify that -4 matches 127.0.0.1 as expected") ts1, ts1_replay_file = get_common_ats_process( - "ts1", - 'traffic_dump.so --logdir {0} --sample 1 --limit 1000000000 -4 127.0.0.1', - replay_exists=True) + "ts1", 'traffic_dump.so --logdir {0} --sample 1 --limit 1000000000 -4 127.0.0.1', replay_exists=True) ts1.Disk.traffic_out.Content += Testers.ContainsExpression( - "Filtering to only dump connections with ip: 127.0.0.1", - "Verify the IP filter status message.") -tr.AddVerifierClientProcess( - "client0", replay_file, http_ports=[ts1.Variables.port], - other_args='--keys 1') + "Filtering to only dump connections with ip: 127.0.0.1", "Verify the IP filter status message.") +tr.AddVerifierClientProcess("client0", replay_file, http_ports=[ts1.Variables.port], other_args='--keys 1') tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(ts1) @@ -100,15 +91,10 @@ # tr = Test.AddTestRun("Verify that -4 filters out our non-matching IP as expected") ts2, ts2_replay_file = get_common_ats_process( - "ts2", - 'traffic_dump.so --logdir {0} --sample 1 --limit 1000000000 -4 1.2.3.4', - replay_exists=False) + "ts2", 'traffic_dump.so --logdir {0} --sample 1 --limit 1000000000 -4 1.2.3.4', replay_exists=False) ts2.Disk.traffic_out.Content += Testers.ContainsExpression( - "Filtering to only dump connections with ip: 1.2.3.4", - "Verify the IP filter status message.") -tr.AddVerifierClientProcess( - "client1", replay_file, http_ports=[ts2.Variables.port], - other_args='--keys 1') + "Filtering to only dump connections with ip: 1.2.3.4", "Verify the IP filter status message.") +tr.AddVerifierClientProcess("client1", replay_file, http_ports=[ts2.Variables.port], other_args='--keys 1') tr.Processes.Default.StartBefore(ts2) tr.StillRunningAfter = server @@ -120,15 +106,10 @@ tr = Test.AddTestRun("Verify that -4 detects an invalid IP string") invalid_ip = "this_is_not_a_valid_ip_string" ts3, ts3_replay_file = get_common_ats_process( - "ts3", - 'traffic_dump.so --logdir {0} --sample 1 --limit 1000000000 -4 ' + invalid_ip, - replay_exists=False) + "ts3", 'traffic_dump.so --logdir {0} --sample 1 --limit 1000000000 -4 ' + invalid_ip, replay_exists=False) ts3.Disk.diags_log.Content = Testers.ContainsExpression( - f"Problems parsing IP filter address argument: {invalid_ip}", - "Verify traffic_dump detects an invalid IPv4 address.") -tr.AddVerifierClientProcess( - "client2", replay_file, http_ports=[ts3.Variables.port], - other_args='--keys 1') + f"Problems parsing IP filter address argument: {invalid_ip}", "Verify traffic_dump detects an invalid IPv4 address.") +tr.AddVerifierClientProcess("client2", replay_file, http_ports=[ts3.Variables.port], other_args='--keys 1') tr.Processes.Default.StartBefore(ts3) tr.StillRunningAfter = server diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump_response_body.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump_response_body.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump_response_body.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump_response_body.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,9 +24,7 @@ Verify traffic_dump response body functionality. ''' -Test.SkipUnless( - Condition.PluginExists('traffic_dump.so'), -) +Test.SkipUnless(Condition.PluginExists('traffic_dump.so'),) # Configure the origin server. replay_file = "replay/response_body.yaml" @@ -40,32 +38,28 @@ ts.addSSLfile("ssl/server.key") ts.addSSLfile("ssl/signer.pem") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'traffic_dump', - - 'proxy.config.ssl.server.cert.path': ts.Variables.SSLDir, - 'proxy.config.ssl.server.private_key.path': ts.Variables.SSLDir, - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.ssl.CA.cert.filename': f'{ts.Variables.SSLDir}/signer.pem', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.http.host_sni_policy': 2, - 'proxy.config.ssl.TLSv1_3': 0, - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'traffic_dump', + 'proxy.config.ssl.server.cert.path': ts.Variables.SSLDir, + 'proxy.config.ssl.server.private_key.path': ts.Variables.SSLDir, + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.ssl.CA.cert.filename': f'{ts.Variables.SSLDir}/signer.pem', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.http.host_sni_policy': 2, + 'proxy.config.ssl.TLSv1_3': 0, + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLines([ - f'map / http://127.0.0.1:{server.Variables.http_port}' + f'map / http://127.0.0.1:{server.Variables.http_port}', ]) # Configure traffic_dump to dump body bytes (-b). -ts.Disk.plugin_config.AddLine( - f'traffic_dump.so --logdir {replay_dir} --sample 1 --limit 1000000000 -b' -) +ts.Disk.plugin_config.AddLine(f'traffic_dump.so --logdir {replay_dir} --sample 1 --limit 1000000000 -b') ts_dump_0 = os.path.join(replay_dir, "127", "0000000000000000") ts.Disk.File(ts_dump_0, exists=True) @@ -79,16 +73,17 @@ ts_dump_3 = os.path.join(replay_dir, "127", "0000000000000003") ts.Disk.File(ts_dump_3, exists=True) -ts.Disk.traffic_out.Content = Testers.ContainsExpression( - "Dumping body bytes: true", - "Verify that dumping body bytes is enabled.") +ts.Disk.traffic_out.Content = Testers.ContainsExpression("Dumping body bytes: true", "Verify that dumping body bytes is enabled.") # Run our test traffic. tr = Test.AddTestRun("Run the test traffic.") tr.AddVerifierClientProcess( - "client", replay_file, http_ports=[ts.Variables.port], + "client", + replay_file, + http_ports=[ts.Variables.port], https_ports=[ts.Variables.ssl_port], - ssl_cert="ssl/server_combined.pem", ca_cert="ssl/signer.pem", + ssl_cert="ssl/server_combined.pem", + ca_cert="ssl/signer.pem", other_args='--thread-limit 1') tr.Processes.Default.StartBefore(server) @@ -96,7 +91,6 @@ tr.StillRunningAfter = server tr.StillRunningAfter = ts - # Common verification variables. verify_replay = "verify_replay.py" schema = os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump_sni_filter.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump_sni_filter.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump_sni_filter.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/traffic_dump/traffic_dump_sni_filter.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,15 +24,12 @@ Verify traffic_dump functionality. ''' -Test.SkipUnless( - Condition.PluginExists('traffic_dump.so'), -) +Test.SkipUnless(Condition.PluginExists('traffic_dump.so'),) schema_path = os.path.join(Test.Variables.AtsTestToolsDir, 'lib', 'replay_schema.json') replay_file = "replay/various_sni.yaml" server = Test.MakeVerifierServerProcess( - "server-various-sni", replay_file, - ssl_cert="ssl/server_combined.pem", ca_cert="ssl/signer.pem") + "server-various-sni", replay_file, ssl_cert="ssl/server_combined.pem", ca_cert="ssl/signer.pem") # Define ATS and configure ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True) @@ -42,27 +39,23 @@ ts.addSSLfile("ssl/server.key") ts.addSSLfile("ssl/signer.pem") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'traffic_dump', - - 'proxy.config.ssl.server.cert.path': ts.Variables.SSLDir, - 'proxy.config.ssl.server.private_key.path': ts.Variables.SSLDir, - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.ssl.CA.cert.filename': f'{ts.Variables.SSLDir}/signer.pem', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.http.host_sni_policy': 2, - 'proxy.config.ssl.TLSv1_3': 0, - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) - -ts.Disk.remap_config.AddLine( - f'map / https://127.0.0.1:{server.Variables.https_port}' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'traffic_dump', + 'proxy.config.ssl.server.cert.path': ts.Variables.SSLDir, + 'proxy.config.ssl.server.private_key.path': ts.Variables.SSLDir, + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.ssl.CA.cert.filename': f'{ts.Variables.SSLDir}/signer.pem', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.http.host_sni_policy': 2, + 'proxy.config.ssl.TLSv1_3': 0, + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + +ts.Disk.remap_config.AddLine(f'map / https://127.0.0.1:{server.Variables.https_port}') ts.Disk.sni_yaml.AddLines([ 'sni:', @@ -73,19 +66,15 @@ # Configure traffic_dump's SNI filter to only dump connections with SNI bob.com. sni_filter = "bob.com" -ts.Disk.plugin_config.AddLine( - f'traffic_dump.so --logdir {replay_dir} --sample 1 ' - f'--sni-filter "{sni_filter}"' -) +ts.Disk.plugin_config.AddLine(f'traffic_dump.so --logdir {replay_dir} --sample 1 ' + f'--sni-filter "{sni_filter}"') # Set up trafficserver expectations. ts.Disk.traffic_out.Content += Testers.ContainsExpression( - f"Filtering to only dump connections with SNI: {sni_filter}", - "Verify filtering for the expected SNI.") + f"Filtering to only dump connections with SNI: {sni_filter}", "Verify filtering for the expected SNI.") ts.Disk.traffic_out.Content += Testers.ContainsExpression( - "Ignore HTTPS session with non-filtered SNI: dave", - "Verify that the non-desired SNI session was filtered out.") + "Ignore HTTPS session with non-filtered SNI: dave", "Verify that the non-desired SNI session was filtered out.") ts.Disk.traffic_out.Content += Testers.ContainsExpression( "Initialized with sample pool size of 1 bytes and unlimited disk utilization", @@ -111,8 +100,11 @@ # across both. server_port = server.Variables.http_port tr.AddVerifierClientProcess( - "client-various-sni", replay_file, https_ports=[ts.Variables.ssl_port], - ssl_cert="ssl/server_combined.pem", ca_cert="ssl/signer.pem") + "client-various-sni", + replay_file, + https_ports=[ts.Variables.ssl_port], + ssl_cert="ssl/server_combined.pem", + ca_cert="ssl/signer.pem") tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(ts) tr.StillRunningAfter = server diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/traffic_dump/verify_replay.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/traffic_dump/verify_replay.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/traffic_dump/verify_replay.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/traffic_dump/verify_replay.py 2024-04-03 15:38:30.000000000 +0000 @@ -1,5 +1,4 @@ #!/usr/bin/env python3 - """ Verify that a given JSON replay file fulfills basic expectations. """ @@ -128,8 +127,7 @@ return False if size != client_request_size: - print("Mismatched client-request request size. Expected: {}, received: {}".format( - client_request_size, size)) + print("Mismatched client-request request size. Expected: {}, received: {}".format(client_request_size, size)) return False return True @@ -180,8 +178,9 @@ return True else: host_name = "client" if is_client else "server" - print('Unexpected protocol {} stack. Expected: "{}", found: "{}".'.format( - host_name, expected_protocols, dumped_protocols)) + print( + 'Unexpected protocol {} stack. Expected: "{}", found: "{}".'.format( + host_name, expected_protocols, dumped_protocols)) return False except KeyError: print("Could not find {} protocol stack node in the replay file.".format(host_name)) @@ -225,8 +224,7 @@ elif expected_value.lower() == "true": expected_value = True else: - raise ValueError("Cannot convert expected value to a boolean: {}, found: {}".format( - expected_value, found_value)) + raise ValueError("Cannot convert expected value to a boolean: {}, found: {}".format(expected_value, found_value)) if found_value == 'true': found_value = True elif found_value == 'false': @@ -238,12 +236,10 @@ elif isinstance(found_value, int): value_matches = found_value == int(expected_value) else: - raise ValueError("Cannot determine type of found value: {}".format( - found_value)) + raise ValueError("Cannot determine type of found value: {}".format(found_value)) if not value_matches: - print('Mismatched value for "{}", expected "{}", received "{}"'.format( - expected_key, expected_value, found_value)) + print('Mismatched value for "{}", expected "{}", received "{}"'.format(expected_key, expected_value, found_value)) return False return True @@ -256,8 +252,7 @@ return False if not verify_tls_features(expected_tls_features, tls_features): - print('Failed to verify client tls features in "{}"'.format( - expected_tls_features)) + print('Failed to verify client tls features in "{}"'.format(expected_tls_features)) return False return True @@ -270,8 +265,7 @@ return False if not verify_tls_features(expected_tls_features, tls_features): - print('Failed to verify server tls features in "{}"'.format( - expected_tls_features)) + print('Failed to verify server tls features in "{}"'.format(expected_tls_features)) return False return True @@ -284,8 +278,7 @@ return False if expected_client_http_version != found_version: - print('Expected client version of "{}", but found "{}"'.format( - expected_client_http_version, found_version)) + print('Expected client version of "{}", but found "{}"'.format(expected_client_http_version, found_version)) return False return True @@ -314,8 +307,9 @@ return False if int(proxy_request_body_size) != len(expected_body_bytes): - print("Expected the proxy-request content size to be '{0}' but got '{1}'".format( - len(expected_body_bytes), proxy_request_body_size)) + print( + "Expected the proxy-request content size to be '{0}' but got '{1}'".format( + len(expected_body_bytes), proxy_request_body_size)) return False return True @@ -345,8 +339,9 @@ return False if int(server_response_body_size) != len(expected_body_bytes): - print("Expected the server-response content size to be '{0}' but got '{1}'".format( - len(expected_body_bytes), server_response_body_size)) + print( + "Expected the server-response content size to be '{0}' but got '{1}'".format( + len(expected_body_bytes), server_response_body_size)) return False return True @@ -354,36 +349,21 @@ def parse_args(): parser = argparse.ArgumentParser() - parser.add_argument("schema_file", - type=argparse.FileType('r'), - help="The schema in which to validate the replay file.") - parser.add_argument("replay_file", - type=argparse.FileType('r'), - help="The replay file to validate.") - parser.add_argument("--request-target", - help="The request target ('url' element) to expect in the replay file.") - parser.add_argument("--client-request-size", - type=int, - help="The expected size value in the client-request node.") - parser.add_argument("--sensitive-fields", - action="append", - help="The fields that are considered sensitive and replaced with insensitive values.") - parser.add_argument("--client-protocols", - help="The comma-separated sequence of protocols to expect for the client connection.") - parser.add_argument("--server-protocols", - help="The comma-separated sequence of protocols to expect for the server connection.") - parser.add_argument("--client-tls-features", - help="The TLS values to expect for the client connection.") - parser.add_argument("--server-tls-features", - help="The TLS values to expect for the server connection.") - parser.add_argument("--client-http-version", - help="The client HTTP version to expect") - parser.add_argument("--request_body", - type=str, - help="Verify that the client request has the specified body bytes.") - parser.add_argument("--response_body", - type=str, - help="Verify that the proxy response has the specified body bytes.") + parser.add_argument("schema_file", type=argparse.FileType('r'), help="The schema in which to validate the replay file.") + parser.add_argument("replay_file", type=argparse.FileType('r'), help="The replay file to validate.") + parser.add_argument("--request-target", help="The request target ('url' element) to expect in the replay file.") + parser.add_argument("--client-request-size", type=int, help="The expected size value in the client-request node.") + parser.add_argument( + "--sensitive-fields", + action="append", + help="The fields that are considered sensitive and replaced with insensitive values.") + parser.add_argument("--client-protocols", help="The comma-separated sequence of protocols to expect for the client connection.") + parser.add_argument("--server-protocols", help="The comma-separated sequence of protocols to expect for the server connection.") + parser.add_argument("--client-tls-features", help="The TLS values to expect for the client connection.") + parser.add_argument("--server-tls-features", help="The TLS values to expect for the server connection.") + parser.add_argument("--client-http-version", help="The client HTTP version to expect") + parser.add_argument("--request_body", type=str, help="Verify that the client request has the specified body bytes.") + parser.add_argument("--response_body", type=str, help="Verify that the proxy response has the specified body bytes.") return parser.parse_args() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/transform/transaction_data_sink.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/transform/transaction_data_sink.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/transform/transaction_data_sink.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/transform/transaction_data_sink.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,30 +22,26 @@ Verify transaction data sink. ''' -Test.SkipUnless( - Condition.PluginExists('txn_data_sink.so'), -) +Test.SkipUnless(Condition.PluginExists('txn_data_sink.so'),) replay_file = "transaction-with-body.replays.yaml" server = Test.MakeVerifierServerProcess("server", replay_file) nameserver = Test.MakeDNServer("dns", default='127.0.0.1') ts = Test.MakeATSProcess("ts", enable_cache=False) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'txn_data_sink', - 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", -}) -ts.Disk.remap_config.AddLine( - f'map / http://localhost:{server.Variables.http_port}/' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'txn_data_sink', + 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", + }) +ts.Disk.remap_config.AddLine(f'map / http://localhost:{server.Variables.http_port}/') ts.Disk.plugin_config.AddLine('txn_data_sink.so') # Verify that the various aspects of the expected debug output for the # transaction are logged. ts.Disk.traffic_out.Content = Testers.ContainsExpression( - '"http1.1_response_body"', - "The response body should be printed by the plugin.") + '"http1.1_response_body"', "The response body should be printed by the plugin.") tr = Test.AddTestRun() tr.Processes.Default.StartBefore(server) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/tsapi/tsapi.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/tsapi/tsapi.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/tsapi/tsapi.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/tsapi/tsapi.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -16,27 +16,22 @@ import os - Test.Summary = ''' Test TS API. ''' -Test.SkipUnless( - Condition.HasCurlFeature('http2'), -) +Test.SkipUnless(Condition.HasCurlFeature('http2'),) Test.ContinueOnFail = True plugin_name = "test_tsapi" server = Test.MakeOriginServer("server") -request_header = { - "headers": "GET / HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": "112233"} server.addResponse("sessionlog.json", request_header, response_header) -request_header = { - "headers": "GET /xYz HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET /xYz HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": "445566"} server.addResponse("sessionlog.json", request_header, response_header) @@ -50,28 +45,25 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({ - 'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name. - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.url_remap.remap_required': 1, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': f'http|{plugin_name}', -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name. + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.url_remap.remap_required': 1, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': f'http|{plugin_name}', + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') rp = os.path.join(Test.Variables.AtsBuildGoldTestsDir, 'pluginTest', 'tsapi', '.libs', f'{plugin_name}.so') ts.Setup.Copy(rp, ts.Env['PROXY_CONFIG_PLUGIN_PLUGIN_DIR']) ts.Disk.remap_config.AddLine( - "map http://myhost.test http://127.0.0.1:{0} @plugin={1} @plugin={1}".format(server.Variables.Port, f"{plugin_name}.so") -) + "map http://myhost.test http://127.0.0.1:{0} @plugin={1} @plugin={1}".format(server.Variables.Port, f"{plugin_name}.so")) ts.Disk.remap_config.AddLine( - "map https://myhost.test:123 http://127.0.0.1:{0} @plugin={1} @plugin={1}".format(server.Variables.Port, f"{plugin_name}.so") -) + "map https://myhost.test:123 http://127.0.0.1:{0} @plugin={1} @plugin={1}".format(server.Variables.Port, f"{plugin_name}.so")) # For some reason, without this delay, traffic_server cannot reliably open the cleartext port for listening without an # error. @@ -85,22 +77,17 @@ tr.Processes.Default.StartBefore(server, ready=When.PortOpen(server.Variables.Port)) tr.Processes.Default.StartBefore(Test.Processes.ts) # -tr.Processes.Default.Command = ( - 'curl --verbose --ipv4 --header "Host: mYhOsT.teSt" hTtP://loCalhOst:{}/'.format(ts.Variables.port) -) +tr.Processes.Default.Command = ('curl --verbose --ipv4 --header "Host: mYhOsT.teSt" hTtP://loCalhOst:{}/'.format(ts.Variables.port)) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() -tr.Processes.Default.Command = ( - 'curl --verbose --ipv4 --proxy localhost:{} http://mYhOsT.teSt/xYz'.format(ts.Variables.port) -) +tr.Processes.Default.Command = ('curl --verbose --ipv4 --proxy localhost:{} http://mYhOsT.teSt/xYz'.format(ts.Variables.port)) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() tr.Processes.Default.Command = ( 'curl --verbose --ipv4 --http2 --insecure --header ' + - '"Host: myhost.test:123" HttPs://LocalHost:{}/'.format(ts.Variables.ssl_port) -) + '"Host: myhost.test:123" HttPs://LocalHost:{}/'.format(ts.Variables.ssl_port)) tr.Processes.Default.ReturnCode = 0 tr = Test.AddTestRun() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/uri_signing/uri_signing.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/uri_signing/uri_signing.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/uri_signing/uri_signing.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/uri_signing/uri_signing.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -28,43 +28,45 @@ server = Test.MakeOriginServer("server") # Default origin test -req_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", - "body": "", - } -res_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", - "body": "", - } +req_header = { + "headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "", +} +res_header = { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "", +} server.addResponse("sessionfile.log", req_header, res_header) # Test case for normal -req_header = {"headers": - "GET /someasset.ts HTTP/1.1\r\nHost: somehost\r\n\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -res_header = {"headers": - "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", - "body": "somebody", - } +req_header = { + "headers": "GET /someasset.ts HTTP/1.1\r\nHost: somehost\r\n\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +res_header = { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "somebody", +} server.addResponse("sessionfile.log", req_header, res_header) # Test case for crossdomain -req_header = {"headers": - "GET /crossdomain.xml HTTP/1.1\r\nHost: somehost\r\n\r\n", - "timestamp": "1469733493.993", - "body": "", - } - -res_header = {"headers": - "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", - "body": "", - } +req_header = { + "headers": "GET /crossdomain.xml HTTP/1.1\r\nHost: somehost\r\n\r\n", + "timestamp": "1469733493.993", + "body": "", +} + +res_header = { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "", +} server.addResponse("sessionfile.log", req_header, res_header) @@ -74,17 +76,17 @@ ts = Test.MakeATSProcess("ts", enable_cache=False) #ts = Test.MakeATSProcess("ts", "traffic_server_valgrind.sh") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'uri_signing|http', - # 'proxy.config.diags.debug.tags': 'uri_signing', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'uri_signing|http', + # 'proxy.config.diags.debug.tags': 'uri_signing', + }) # Use unchanged incoming URL. ts.Disk.remap_config.AddLine( 'map http://somehost/ http://127.0.0.1:{}/'.format(server.Variables.Port) + - ' @plugin=uri_signing.so @pparam={}/config.json'.format(Test.RunDirectory) -) + ' @plugin=uri_signing.so @pparam={}/config.json'.format(Test.RunDirectory)) # Install configuration ts.Setup.CopyAs('config.json', Test.RunDirectory) @@ -204,7 +206,6 @@ tr.StillRunningAfter = server tr.StillRunningAfter = ts - # 12 - Check missing iss from the payload tr = Test.AddTestRun("Missing iss field in the payload") ps = tr.Processes.Default @@ -212,7 +213,6 @@ ps.ReturnCode = 0 ps.Streams.stderr = "gold/403.gold" ts.Disk.traffic_out.Content = Testers.ContainsExpression( - "Initial JWT Failure: iss is missing, must be present", - "should fail the validation") + "Initial JWT Failure: iss is missing, must be present", "should fail the validation") tr.StillRunningAfter = server tr.StillRunningAfter = ts diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/url_sig/url_sig.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/url_sig/url_sig.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/url_sig/url_sig.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/url_sig/url_sig.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -36,7 +36,9 @@ server = Test.MakeOriginServer("server") request_header = { - "headers": "GET /foo/abcde/qrstuvwxyz HTTP/1.1\r\nHost: just.any.thing\r\n\r\n", "timestamp": "1469733493.993", "body": "" + "headers": "GET /foo/abcde/qrstuvwxyz HTTP/1.1\r\nHost: just.any.thing\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" } # expected response from the origin server response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -49,54 +51,45 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({ - # 'proxy.config.diags.debug.enabled': 1, - # 'proxy.config.diags.debug.tags': 'http|url_sig', - 'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name. - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + # 'proxy.config.diags.debug.enabled': 1, + # 'proxy.config.diags.debug.tags': 'http|url_sig', + 'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name. + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Use unchanged incoming URL. # ts.Setup.Copy("url_sig.config", ts.Variables.CONFIGDIR) ts.Disk.remap_config.AddLine( - f'map http://one.two.three/ http://127.0.0.1:{server.Variables.Port}/' + - ' @plugin=url_sig.so @pparam=url_sig.config' -) + f'map http://one.two.three/ http://127.0.0.1:{server.Variables.Port}/' + ' @plugin=url_sig.so @pparam=url_sig.config') # Use unchanged incoming HTTPS URL. # ts.Disk.remap_config.AddLine( - f'map https://one.two.three/ http://127.0.0.1:{server.Variables.Port}/' + - ' @plugin=url_sig.so @pparam=url_sig.config' -) + f'map https://one.two.three/ http://127.0.0.1:{server.Variables.Port}/' + ' @plugin=url_sig.so @pparam=url_sig.config') # Use pristine URL, incoming URL unchanged. # ts.Disk.remap_config.AddLine( f'map http://four.five.six/ http://127.0.0.1:{server.Variables.Port}/' + - ' @plugin=url_sig.so @pparam=url_sig.config @pparam=pristineurl' -) + ' @plugin=url_sig.so @pparam=url_sig.config @pparam=pristineurl') # Use pristine URL, incoming URL changed. # ts.Disk.remap_config.AddLine( f'map http://seven.eight.nine/ http://127.0.0.1:{server.Variables.Port}' + - ' @plugin=url_sig.so @pparam=url_sig.config @pparam=PristineUrl' -) + ' @plugin=url_sig.so @pparam=url_sig.config @pparam=PristineUrl') # Use config with all settings set # ts.Setup.Copy("url_sig.all.config", ts.Variables.CONFIGDIR) ts.Disk.remap_config.AddLine( - f'map http://ten.eleven.twelve/ http://127.0.0.1:{server.Variables.Port}/' + - ' @plugin=url_sig.so @pparam=url_sig.all.config' -) + f'map http://ten.eleven.twelve/ http://127.0.0.1:{server.Variables.Port}/' + ' @plugin=url_sig.so @pparam=url_sig.all.config') # Validation failure tests. @@ -110,9 +103,7 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Command = ( f"curl --verbose --proxy http://127.0.0.1:{ts.Variables.port} 'http://seven.eight.nine/" + - "foo/abcde/qrstuvwxyz?C=127.0.0.2&E=33046620008&A=2&K=13&P=101&S=d1f352d4f1d931ad2f441013402d93f8'" + - LogTee -) + "foo/abcde/qrstuvwxyz?C=127.0.0.2&E=33046620008&A=2&K=13&P=101&S=d1f352d4f1d931ad2f441013402d93f8'" + LogTee) # With client / MD5 / P=010 / URL pristine / URL altered -- Expired. # @@ -120,9 +111,7 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Command = ( f"curl --verbose --proxy http://127.0.0.1:{ts.Variables.port} 'http://seven.eight.nine/" + - "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=1&A=2&K=13&P=010&S=f237aad1fa010234d7bf8108a0e36387'" + - LogTee -) + "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=1&A=2&K=13&P=010&S=f237aad1fa010234d7bf8108a0e36387'" + LogTee) # With client / No algorithm / P=101 / URL pristine / URL altered. # @@ -130,9 +119,7 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Command = ( f"curl --verbose --proxy http://127.0.0.1:{ts.Variables.port} 'http://seven.eight.nine/" + - "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&K=13&P=101&S=d1f352d4f1d931ad2f441013402d93f8'" + - LogTee -) + "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&K=13&P=101&S=d1f352d4f1d931ad2f441013402d93f8'" + LogTee) # With client / Bad algorithm / P=101 / URL pristine / URL altered. # @@ -140,9 +127,7 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Command = ( f"curl --verbose --proxy http://127.0.0.1:{ts.Variables.port} 'http://seven.eight.nine/" + - "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&A=3&K=13&P=101&S=d1f352d4f1d931ad2f441013402d93f8'" + - LogTee -) + "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&A=3&K=13&P=101&S=d1f352d4f1d931ad2f441013402d93f8'" + LogTee) # With client / MD5 / No parts / URL pristine / URL altered. # @@ -150,9 +135,7 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Command = ( f"curl --verbose --proxy http://127.0.0.1:{ts.Variables.port} 'http://seven.eight.nine/" + - "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&A=2&K=13&S=d1f352d4f1d931ad2f441013402d93f8'" + - LogTee -) + "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&A=2&K=13&S=d1f352d4f1d931ad2f441013402d93f8'" + LogTee) # With client / MD5 / P=10 (bad) / URL pristine / URL altered. # @@ -160,9 +143,7 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Command = ( f"curl --verbose --proxy http://127.0.0.1:{ts.Variables.port} 'http://seven.eight.nine/" + - "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&A=2&K=13&P=10&S=d1f352d4f1d931ad2f441013402d93f8'" + - LogTee -) + "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&A=2&K=13&P=10&S=d1f352d4f1d931ad2f441013402d93f8'" + LogTee) # With client / MD5 / P=101 / URL pristine / URL altered -- No signature. # @@ -170,9 +151,7 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Command = ( f"curl --verbose --proxy http://127.0.0.1:{ts.Variables.port} 'http://seven.eight.nine/" + - "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&A=2&K=13&P=101'" + - LogTee -) + "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&A=2&K=13&P=101'" + LogTee) # With client / MD5 / P=101 / URL pristine / URL altered -- Bad signature. # @@ -180,9 +159,7 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Command = ( f"curl --verbose --proxy http://127.0.0.1:{ts.Variables.port} 'http://seven.eight.nine/" + - "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&A=2&K=13&P=101&S=d1f452d4f1d931ad2f441013402d93f8'" + - LogTee -) + "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&A=2&K=13&P=101&S=d1f452d4f1d931ad2f441013402d93f8'" + LogTee) # With client / MD5 / P=101 / URL pristine / URL altered -- Spurious &. # @@ -190,9 +167,7 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Command = ( f"curl --verbose --proxy http://127.0.0.1:{ts.Variables.port} 'http://seven.eight.nine/" + - "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&A=2&&K=13&P=101&S=d1f352d4f1d931ad2f441013402d93f8#'" + - LogTee -) + "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&A=2&&K=13&P=101&S=d1f352d4f1d931ad2f441013402d93f8#'" + LogTee) # Success tests. @@ -202,9 +177,7 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Command = ( f"curl --verbose --proxy http://127.0.0.1:{ts.Variables.port} 'http://four.five.six/" + - "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046618556&A=1&K=15&P=1&S=f4103561a23adab7723a89b9831d77e0afb61d92'" + - LogTee -) + "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046618556&A=1&K=15&P=1&S=f4103561a23adab7723a89b9831d77e0afb61d92'" + LogTee) # No client / MD5 / P=1 / URL pristine / URL altered. # @@ -212,9 +185,7 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Command = ( f"curl --verbose --proxy http://127.0.0.1:{ts.Variables.port} 'http://seven.eight.nine/" + - "foo/abcde/qrstuvwxyz?E=33046618586&A=2&K=0&P=1&S=0364efa28afe345544596705b92d20ac'" + - LogTee -) + "foo/abcde/qrstuvwxyz?E=33046618586&A=2&K=0&P=1&S=0364efa28afe345544596705b92d20ac'" + LogTee) # With client / MD5 / P=010 / URL pristine / URL altered. # @@ -222,9 +193,7 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Command = ( f"curl --verbose --proxy http://127.0.0.1:{ts.Variables.port} 'http://seven.eight.nine/" + - "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046619717&A=2&K=13&P=010&S=f237aad1fa010234d7bf8108a0e36387'" + - LogTee -) + "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046619717&A=2&K=13&P=010&S=f237aad1fa010234d7bf8108a0e36387'" + LogTee) # With client / MD5 / P=101 / URL pristine / URL altered. # @@ -232,9 +201,7 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Command = ( f"curl --verbose --proxy http://127.0.0.1:{ts.Variables.port} 'http://seven.eight.nine/" + - "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&A=2&K=13&P=101&S=d1f352d4f1d931ad2f441013402d93f8'" + - LogTee -) + "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&A=2&K=13&P=101&S=d1f352d4f1d931ad2f441013402d93f8'" + LogTee) def sign(payload, key): @@ -252,9 +219,7 @@ tr = Test.AddTestRun() tr.Processes.Default.ReturnCode = 0 -tr.Processes.Default.Command = ( - f"curl --verbose --proxy http://127.0.0.1:{ts.Variables.port} '{url}'" + LogTee -) +tr.Processes.Default.Command = (f"curl --verbose --proxy http://127.0.0.1:{ts.Variables.port} '{url}'" + LogTee) # No client / SHA1 / P=1 / URL not pristine / URL not altered -- HTTPS. # @@ -265,9 +230,8 @@ tr = Test.AddTestRun() tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Command = ( - f"curl --verbose --http1.1 --insecure --header 'Host: one.two.three' '{url}'" + - LogTee + " ; grep -F -e '< HTTP' -e Authorization {0}/url_sig_long.log > {0}/url_sig_short.log ".format(ts.RunDirectory) -) + f"curl --verbose --http1.1 --insecure --header 'Host: one.two.three' '{url}'" + LogTee + + " ; grep -F -e '< HTTP' -e Authorization {0}/url_sig_long.log > {0}/url_sig_short.log ".format(ts.RunDirectory)) # With client / MD5 / P=101 / URL pristine / URL altered. # uses url_type pristine in config @@ -275,9 +239,7 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Command = ( f"curl --verbose --proxy http://127.0.0.1:{ts.Variables.port} 'http://ten.eleven.twelve/" + - "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&A=2&K=13&P=101&S=586ef8e808caeeea025c525c89ff2638'" + - LogTee -) + "foo/abcde/qrstuvwxyz?C=127.0.0.1&E=33046620008&A=2&K=13&P=101&S=586ef8e808caeeea025c525c89ff2638'" + LogTee) # Overriding the built in ERROR check since we expect some ERROR messages ts.Disk.diags_log.Content = Testers.ContainsExpression("ERROR", "Some tests are failure tests") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/xdebug/x_cache_info/x_cache_info.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/xdebug/x_cache_info/x_cache_info.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/xdebug/x_cache_info/x_cache_info.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/xdebug/x_cache_info/x_cache_info.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -23,30 +23,24 @@ server = Test.MakeOriginServer("server") started = False -request_header = { - "headers": "GET /argh HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET /argh HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) ts = Test.MakeATSProcess("ts") -ts.Disk.records_config.update({ - 'proxy.config.url_remap.remap_required': 0, - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'http' -}) +ts.Disk.records_config.update( + { + 'proxy.config.url_remap.remap_required': 0, + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'http' + }) ts.Disk.plugin_config.AddLine('xdebug.so') -ts.Disk.remap_config.AddLine( - "map http://one http://127.0.0.1:{0}".format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - "map http://two http://127.0.0.1:{0}".format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - "regex_map http://three[0-9]+ http://127.0.0.1:{0}".format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine("map http://one http://127.0.0.1:{0}".format(server.Variables.Port)) +ts.Disk.remap_config.AddLine("map http://two http://127.0.0.1:{0}".format(server.Variables.Port)) +ts.Disk.remap_config.AddLine("regex_map http://three[0-9]+ http://127.0.0.1:{0}".format(server.Variables.Port)) Test.Setup.Copy(f'{Test.Variables.AtsTestToolsDir}') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/xdebug/x_effective_url/x_effective_url.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/xdebug/x_effective_url/x_effective_url.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/xdebug/x_effective_url/x_effective_url.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/xdebug/x_effective_url/x_effective_url.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,14 +22,11 @@ server = Test.MakeOriginServer("server") -request_header = { - "headers": "GET /argh HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} -response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET /argh HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) -request_header_two = { - "headers": "GET /two/argh HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +request_header_two = {"headers": "GET /two/argh HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header_two, response_header) ts = Test.MakeATSProcess("ts") @@ -41,21 +38,14 @@ ts.Disk.plugin_config.AddLine('xdebug.so') -ts.Disk.remap_config.AddLine( - "map http://one http://127.0.0.1:{0}".format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - "map http://two http://127.0.0.1:{0}".format(server.Variables.Port) + "/two" -) -ts.Disk.remap_config.AddLine( - "regex_map http://three[0-9]+ http://127.0.0.1:{0}".format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine("map http://one http://127.0.0.1:{0}".format(server.Variables.Port)) +ts.Disk.remap_config.AddLine("map http://two http://127.0.0.1:{0}".format(server.Variables.Port) + "/two") +ts.Disk.remap_config.AddLine("regex_map http://three[0-9]+ http://127.0.0.1:{0}".format(server.Variables.Port)) tr = Test.AddTestRun() tr.Processes.Default.StartBefore(Test.Processes.ts) tr.Processes.Default.StartBefore(Test.Processes.server) -tr.Processes.Default.Command = "cp {}/tcp_client.py {}/tcp_client.py".format( - Test.Variables.AtsTestToolsDir, Test.RunDirectory) +tr.Processes.Default.Command = "cp {}/tcp_client.py {}/tcp_client.py".format(Test.Variables.AtsTestToolsDir, Test.RunDirectory) tr.Processes.Default.ReturnCode = 0 @@ -64,8 +54,7 @@ tr = Test.AddTestRun() tr.Processes.Default.Command = ( f"( {sys.executable} {Test.RunDirectory}/tcp_client.py 127.0.0.1 {ts.Variables.port} {Test.TestDirectory}/{msgFile}.in" - f" ; echo '======' ) | sed 's/:{server.Variables.Port}/:SERVER_PORT/' >> {Test.RunDirectory}/out.log 2>&1 " - ) + f" ; echo '======' ) | sed 's/:{server.Variables.Port}/:SERVER_PORT/' >> {Test.RunDirectory}/out.log 2>&1 ") tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/xdebug/x_remap/x_remap.test.py trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/xdebug/x_remap/x_remap.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/pluginTest/xdebug/x_remap/x_remap.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/pluginTest/xdebug/x_remap/x_remap.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,38 +22,30 @@ server = Test.MakeOriginServer("server", options={'--load': (Test.TestDirectory + '/x_remap-observer.py')}) -request_header = { - "headers": "GET /argh HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} -response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET /argh HTTP/1.1\r\nHost: doesnotmatter\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) ts = Test.MakeATSProcess("ts") -ts.Disk.records_config.update({ - 'proxy.config.url_remap.remap_required': 0, - 'proxy.config.diags.debug.enabled': 0, - # 'proxy.config.diags.debug.tags': 'http|xdebug' - # 'proxy.config.diags.debug.tags': 'xdebug' -}) +ts.Disk.records_config.update( + { + 'proxy.config.url_remap.remap_required': 0, + 'proxy.config.diags.debug.enabled': 0, + # 'proxy.config.diags.debug.tags': 'http|xdebug' + # 'proxy.config.diags.debug.tags': 'xdebug' + }) ts.Disk.plugin_config.AddLine('xdebug.so') -ts.Disk.remap_config.AddLine( - "map http://one http://127.0.0.1:{0}".format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - "map http://two http://127.0.0.1:{0}".format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - "regex_map http://three[0-9]+ http://127.0.0.1:{0}".format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine("map http://one http://127.0.0.1:{0}".format(server.Variables.Port)) +ts.Disk.remap_config.AddLine("map http://two http://127.0.0.1:{0}".format(server.Variables.Port)) +ts.Disk.remap_config.AddLine("regex_map http://three[0-9]+ http://127.0.0.1:{0}".format(server.Variables.Port)) tr = Test.AddTestRun() tr.Processes.Default.StartBefore(Test.Processes.ts) tr.Processes.Default.StartBefore(Test.Processes.server) -tr.Processes.Default.Command = "cp {}/tcp_client.py {}/tcp_client.py".format( - Test.Variables.AtsTestToolsDir, Test.RunDirectory) +tr.Processes.Default.Command = "cp {}/tcp_client.py {}/tcp_client.py".format(Test.Variables.AtsTestToolsDir, Test.RunDirectory) tr.Processes.Default.ReturnCode = 0 @@ -62,8 +54,7 @@ tr = Test.AddTestRun() tr.Processes.Default.Command = ( f"( {sys.executable} {Test.RunDirectory}/tcp_client.py 127.0.0.1 {ts.Variables.port} {Test.TestDirectory}/{msgFile}.in" - f" ; echo '======' ) | sed 's/:{server.Variables.Port}/:SERVER_PORT/' >> {Test.RunDirectory}/out.log 2>&1 " - ) + f" ; echo '======' ) | sed 's/:{server.Variables.Port}/:SERVER_PORT/' >> {Test.RunDirectory}/out.log 2>&1 ") tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/post/post-continue.test.py trafficserver-9.2.4+ds/tests/gold_tests/post/post-continue.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/post/post-continue.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/post/post-continue.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -25,9 +25,7 @@ Test the Expect header in post ''' # Require HTTP/2 enabled Curl -Test.SkipUnless( - Condition.HasCurlFeature('http2'), -) +Test.SkipUnless(Condition.HasCurlFeature('http2'),) Test.ContinueOnFail = True # ---- @@ -47,32 +45,25 @@ ts.addDefaultSSLFiles() ts2.addDefaultSSLFiles() -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.http_port) -) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - -}) -ts2.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.http_port) -) -ts2.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) -ts2.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.http.send_100_continue_response': 1 -}) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.http_port)) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + }) +ts2.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.http_port)) +ts2.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') +ts2.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.http.send_100_continue_response': 1 + }) big_post_body = "0123456789" * 131070 big_post_body_file = open(os.path.join(Test.RunDirectory, "big_post_body"), "w") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/post/post-early-return.test.py trafficserver-9.2.4+ds/tests/gold_tests/post/post-early-return.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/post/post-early-return.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/post/post-early-return.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -36,19 +36,16 @@ # add ssl materials like key, certificates for the server ts.addDefaultSSLFiles() -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(Test.Variables.upstream_port) -) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.diags.debug.enabled': 0, - # 'proxy.config.http2.initial_window_size_in': 2*16384, # Make a ludacrisly small window - 'proxy.config.diags.debug.tags': 'http', -}) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(Test.Variables.upstream_port)) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.diags.debug.enabled': 0, + # 'proxy.config.http2.initial_window_size_in': 2*16384, # Make a ludacrisly small window + 'proxy.config.diags.debug.tags': 'http', + }) big_post_body = "0123456789" * 231070 big_post_body_file = open(os.path.join(Test.RunDirectory, "big_post_body"), "w") @@ -66,8 +63,8 @@ test_run.Processes.Default.ReturnCode = 0 test_run = Test.AddTestRun("http1.1 Post with large body early return") -test_run.Processes.Default.Command = '(nc -o output2 --sh-exec \'printf \"HTTP/1.1 420 Be Calm\r\nContent-Length: 0\r\n\r\n\"; sleep 1\' -l 127.0.0.1 {} & ) ; sleep 1 ; curl -H "Expect:" -v -o /dev/null --http1.1 -d @big_post_body -k https://127.0.0.1:{}/post'.format(Test.Variables.upstream_port, - ts.Variables.ssl_port) +test_run.Processes.Default.Command = '(nc -o output2 --sh-exec \'printf \"HTTP/1.1 420 Be Calm\r\nContent-Length: 0\r\n\r\n\"; sleep 1\' -l 127.0.0.1 {} & ) ; sleep 1 ; curl -H "Expect:" -v -o /dev/null --http1.1 -d @big_post_body -k https://127.0.0.1:{}/post'.format( + Test.Variables.upstream_port, ts.Variables.ssl_port) test_run.Processes.Default.Streams.All = Testers.ContainsExpression("HTTP/1.1 420 Be Calm", "Receive the early response") test_run.StillRunningAfter = ts test_run.Processes.Default.ReturnCode = 0 @@ -95,19 +92,19 @@ client_out3.Content += Testers.ContainsExpression("Connection: close", "ATS marks the client connection to close") test_run = Test.AddTestRun("http1.1 Post with paused body") -test_run.Processes.Default.Command = '(nc -o output3 --sh-exec \'printf \"HTTP/1.1 420 Be Calm\r\nContent-Length: 0\r\n\r\n\"; sleep 1\' -l 127.0.0.1 {} & ) ; sleep 1 ; nc -o clientout --sh-exec \' printf \"POST /post HTTP/1.1\r\nHost: bob\r\nContent-Length: 20\r\n\r\n1234567890\"; sleep 4; printf \"0123456789\"\' 127.0.0.1 {}'.format(Test.Variables.upstream_port, - ts.Variables.port) +test_run.Processes.Default.Command = '(nc -o output3 --sh-exec \'printf \"HTTP/1.1 420 Be Calm\r\nContent-Length: 0\r\n\r\n\"; sleep 1\' -l 127.0.0.1 {} & ) ; sleep 1 ; nc -o clientout --sh-exec \' printf \"POST /post HTTP/1.1\r\nHost: bob\r\nContent-Length: 20\r\n\r\n1234567890\"; sleep 4; printf \"0123456789\"\' 127.0.0.1 {}'.format( + Test.Variables.upstream_port, ts.Variables.port) test_run.StillRunningAfter = ts test_run.Processes.Default.ReturnCode = 0 test_run = Test.AddTestRun("http1.1 Post with delayed and paused body") -test_run.Processes.Default.Command = '(nc -o output3 --sh-exec \'printf \"HTTP/1.1 420 Be Calm\r\nContent-Length: 0\r\n\r\n\"; sleep 1\' -l 127.0.0.1 {} & ) ; sleep 1 ; nc -o clientout3 --sh-exec \' printf \"POST /post HTTP/1.1\r\nHost: bob\r\nContent-Length: 20\r\n\r\n\"; sleep 1; printf \"1234567890\"; sleep 4; printf \"0123456789\"\' 127.0.0.1 {}'.format(Test.Variables.upstream_port, - ts.Variables.port) +test_run.Processes.Default.Command = '(nc -o output3 --sh-exec \'printf \"HTTP/1.1 420 Be Calm\r\nContent-Length: 0\r\n\r\n\"; sleep 1\' -l 127.0.0.1 {} & ) ; sleep 1 ; nc -o clientout3 --sh-exec \' printf \"POST /post HTTP/1.1\r\nHost: bob\r\nContent-Length: 20\r\n\r\n\"; sleep 1; printf \"1234567890\"; sleep 4; printf \"0123456789\"\' 127.0.0.1 {}'.format( + Test.Variables.upstream_port, ts.Variables.port) test_run.StillRunningAfter = ts test_run.Processes.Default.ReturnCode = 0 test_run = Test.AddTestRun("http1.1 Post with paused body and no delay on server") -test_run.Processes.Default.Command = '(nc -o output4 --sh-exec \'printf \"HTTP/1.1 420 Be Calm\r\nContent-Length: 0\r\n\r\n\"\' -l 127.0.0.1 {} & ) ; sleep 1 ; nc -o clientout2 --sh-exec \' printf \"POST /post HTTP/1.1\r\nHost: bob\r\nContent-Length: 20\r\n\r\n1234567890\"; sleep 4; printf \"0123456789\"\' 127.0.0.1 {}'.format(Test.Variables.upstream_port, - ts.Variables.port) +test_run.Processes.Default.Command = '(nc -o output4 --sh-exec \'printf \"HTTP/1.1 420 Be Calm\r\nContent-Length: 0\r\n\r\n\"\' -l 127.0.0.1 {} & ) ; sleep 1 ; nc -o clientout2 --sh-exec \' printf \"POST /post HTTP/1.1\r\nHost: bob\r\nContent-Length: 20\r\n\r\n1234567890\"; sleep 4; printf \"0123456789\"\' 127.0.0.1 {}'.format( + Test.Variables.upstream_port, ts.Variables.port) test_run.StillRunningAfter = ts test_run.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/post_slow_server/post_slow_server.test.py trafficserver-9.2.4+ds/tests/gold_tests/post_slow_server/post_slow_server.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/post_slow_server/post_slow_server.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/post_slow_server/post_slow_server.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,43 +24,35 @@ # Because of the 2 minute delay, we don't want to run this test in CI checks. Comment out this line to run it. Test.SkipIf(Condition.true("Test takes too long to run it in CI.")) -Test.SkipUnless( - Condition.HasCurlFeature('http2') -) +Test.SkipUnless(Condition.HasCurlFeature('http2')) ts = Test.MakeATSProcess("ts", enable_tls=True, enable_cache=False) ts.addDefaultSSLFiles() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name. - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.http.transaction_no_activity_timeout_out': 150, - 'proxy.config.http2.no_activity_timeout_in': 150, -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.proxy_name': 'Poxy_Proxy', # This will be the server name. + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.http.transaction_no_activity_timeout_out': 150, + 'proxy.config.http2.no_activity_timeout_in': 150, + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') Test.GetTcpPort("server_port") -ts.Disk.remap_config.AddLine( - 'map https://localhost http://localhost:{}'.format(Test.Variables.server_port) -) - -server = Test.Processes.Process( - "server", "bash -c '" + Test.TestDirectory + "/server.sh {}'".format(Test.Variables.server_port) -) +ts.Disk.remap_config.AddLine('map https://localhost http://localhost:{}'.format(Test.Variables.server_port)) + +server = Test.Processes.Process("server", "bash -c '" + Test.TestDirectory + "/server.sh {}'".format(Test.Variables.server_port)) tr = Test.AddTestRun() tr.Processes.Default.Command = ( 'curl --request POST --verbose --ipv4 --http2 --insecure --header "Content-Length: 0"' + - " --header 'Host: localhost' https://localhost:{}/xyz >curl.log 2>curl.err".format(ts.Variables.ssl_port) -) + " --header 'Host: localhost' https://localhost:{}/xyz >curl.log 2>curl.err".format(ts.Variables.ssl_port)) tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(ts) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/proxy_protocol/proxy_protocol.test.py trafficserver-9.2.4+ds/tests/gold_tests/proxy_protocol/proxy_protocol.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/proxy_protocol/proxy_protocol.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/proxy_protocol/proxy_protocol.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -19,13 +19,12 @@ import sys Test.Summary = 'Test PROXY Protocol' -Test.SkipUnless( - Condition.HasCurlOption("--haproxy-protocol") -) +Test.SkipUnless(Condition.HasCurlOption("--haproxy-protocol")) Test.ContinueOnFail = True class ProxyProtocolTest: + def __init__(self): self.setupOriginServer() self.setupTS() @@ -43,18 +42,18 @@ self.ts.addDefaultSSLFiles() self.ts.Disk.ssl_multicert_config.AddLine("dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key") - self.ts.Disk.remap_config.AddLine( - f"map / http://127.0.0.1:{self.httpbin.Variables.Port}/") + self.ts.Disk.remap_config.AddLine(f"map / http://127.0.0.1:{self.httpbin.Variables.Port}/") - self.ts.Disk.records_config.update({ - "proxy.config.http.server_ports": f"{self.ts.Variables.port}:pp {self.ts.Variables.ssl_port}:ssl:pp", - "proxy.config.http.proxy_protocol_allowlist": "127.0.0.1", - "proxy.config.http.insert_forwarded": "for|proto", - "proxy.config.ssl.server.cert.path": f"{self.ts.Variables.SSLDir}", - "proxy.config.ssl.server.private_key.path": f"{self.ts.Variables.SSLDir}", - "proxy.config.diags.debug.enabled": 1, - "proxy.config.diags.debug.tags": "proxyprotocol", - }) + self.ts.Disk.records_config.update( + { + "proxy.config.http.server_ports": f"{self.ts.Variables.port}:pp {self.ts.Variables.ssl_port}:ssl:pp", + "proxy.config.http.proxy_protocol_allowlist": "127.0.0.1", + "proxy.config.http.insert_forwarded": "for|proto", + "proxy.config.ssl.server.cert.path": f"{self.ts.Variables.SSLDir}", + "proxy.config.ssl.server.private_key.path": f"{self.ts.Variables.SSLDir}", + "proxy.config.diags.debug.enabled": 1, + "proxy.config.diags.debug.tags": "proxyprotocol", + }) def addTestCase0(self): """ diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/proxy_protocol/proxy_serve_stale.test.py trafficserver-9.2.4+ds/tests/gold_tests/proxy_protocol/proxy_serve_stale.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/proxy_protocol/proxy_serve_stale.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/proxy_protocol/proxy_serve_stale.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,7 +17,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - Test.testName = "proxy_serve_stale" Test.ContinueOnFail = True @@ -34,41 +33,32 @@ self._configure_ts() def _configure_server(self): - self.server = Test.MakeVerifierServerProcess( - "server", - self.single_transaction_replay) - self.nameserver = Test.MakeDNServer( - "dns", - default='127.0.0.1') + self.server = Test.MakeVerifierServerProcess("server", self.single_transaction_replay) + self.nameserver = Test.MakeDNServer("dns", default='127.0.0.1') def _configure_ts(self): self.ts_child = Test.MakeATSProcess("ts_child") # Config child proxy to route to parent proxy - self.ts_child.Disk.records_config.update({ - 'proxy.config.http.push_method_enabled': 1, - 'proxy.config.http.parent_proxy.fail_threshold': 2, - 'proxy.config.http.parent_proxy.total_connect_attempts': 1, - 'proxy.config.http.cache.max_stale_age': 10, - 'proxy.config.http.parent_proxy.self_detect': 0, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|parent_proxy', - 'proxy.config.dns.nameservers': f"127.0.0.1:{self.nameserver.Variables.Port}", - }) + self.ts_child.Disk.records_config.update( + { + 'proxy.config.http.push_method_enabled': 1, + 'proxy.config.http.parent_proxy.fail_threshold': 2, + 'proxy.config.http.parent_proxy.total_connect_attempts': 1, + 'proxy.config.http.cache.max_stale_age': 10, + 'proxy.config.http.parent_proxy.self_detect': 0, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|parent_proxy', + 'proxy.config.dns.nameservers': f"127.0.0.1:{self.nameserver.Variables.Port}", + }) self.ts_child.Disk.parent_config.AddLine( - f'dest_domain=. parent="{self.ts_parent_hostname}" round_robin=consistent_hash go_direct=false' - ) - self.ts_child.Disk.remap_config.AddLine( - f'map / http://localhost:{self.server.Variables.http_port}' - ) + f'dest_domain=. parent="{self.ts_parent_hostname}" round_robin=consistent_hash go_direct=false') + self.ts_child.Disk.remap_config.AddLine(f'map / http://localhost:{self.server.Variables.http_port}') def run(self): """Run the test cases.""" tr = Test.AddTestRun() - tr.AddVerifierClientProcess( - 'client', - self.single_transaction_replay, - http_ports=[self.ts_child.Variables.port]) + tr.AddVerifierClientProcess('client', self.single_transaction_replay, http_ports=[self.ts_child.Variables.port]) tr.Processes.Default.ReturnCode = 0 tr.StillRunningAfter = self.ts_child tr.Processes.Default.StartBefore(self.server) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/proxy_protocol/proxy_serve_stale_dns_fail.test.py trafficserver-9.2.4+ds/tests/gold_tests/proxy_protocol/proxy_serve_stale_dns_fail.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/proxy_protocol/proxy_serve_stale_dns_fail.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/proxy_protocol/proxy_serve_stale_dns_fail.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -27,33 +27,28 @@ Test.testName = "STALE" # Config child proxy to route to parent proxy -ts_child.Disk.records_config.update({ - 'proxy.config.http.push_method_enabled': 1, - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.http.cache.max_stale_age': 10, - 'proxy.config.http.parent_proxy.self_detect': 0, - 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", -}) +ts_child.Disk.records_config.update( + { + 'proxy.config.http.push_method_enabled': 1, + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.http.cache.max_stale_age': 10, + 'proxy.config.http.parent_proxy.self_detect': 0, + 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", + }) ts_child.Disk.parent_config.AddLine( - f'dest_domain=. parent=localhost:{ts_parent.Variables.port} round_robin=consistent_hash go_direct=false' -) -ts_child.Disk.remap_config.AddLine( - f'map http://localhost:{ts_child.Variables.port} {server_name}' -) + f'dest_domain=. parent=localhost:{ts_parent.Variables.port} round_robin=consistent_hash go_direct=false') +ts_child.Disk.remap_config.AddLine(f'map http://localhost:{ts_child.Variables.port} {server_name}') # Configure parent proxy -ts_parent.Disk.records_config.update({ - 'proxy.config.http.push_method_enabled': 1, - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.http.cache.max_stale_age': 10, - 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", -}) -ts_parent.Disk.remap_config.AddLine( - f'map http://localhost:{ts_parent.Variables.port} {server_name}' -) -ts_parent.Disk.remap_config.AddLine( - f'map {server_name} {server_name}' -) +ts_parent.Disk.records_config.update( + { + 'proxy.config.http.push_method_enabled': 1, + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.http.cache.max_stale_age': 10, + 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", + }) +ts_parent.Disk.remap_config.AddLine(f'map http://localhost:{ts_parent.Variables.port} {server_name}') +ts_parent.Disk.remap_config.AddLine(f'map {server_name} {server_name}') # Configure nameserver nameserver.addRecords(records={"localhost": ["127.0.0.1"]}) @@ -62,7 +57,6 @@ stale_5 = "HTTP/1.1 200 OK\nServer: ATS/10.0.0\nAccept-Ranges: bytes\nContent-Length: 6\nCache-Control: public, max-age=5\n\nCACHED" stale_10 = "HTTP/1.1 200 OK\nServer: ATS/10.0.0\nAccept-Ranges: bytes\nContent-Length: 6\nCache-Control: public, max-age=10\n\nCACHED" - # Testing scenarios child_curl_request = ( # Test child serving stale with failed DNS OS lookup @@ -73,8 +67,7 @@ # Test parent serving stale with failed DNS OS lookup f'curl -X PUSH -d "{stale_5}" "http://localhost:{ts_parent.Variables.port}";' f'sleep 7; curl -s -v http://localhost:{ts_parent.Variables.port};' - f'sleep 15; curl -s -v http://localhost:{ts_parent.Variables.port};' -) + f'sleep 15; curl -s -v http://localhost:{ts_parent.Variables.port};') # Test case for when parent server is down but child proxy can serve cache object tr = Test.AddTestRun() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/redirect/number_of_redirects.test.py trafficserver-9.2.4+ds/tests/gold_tests/redirect/number_of_redirects.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/redirect/number_of_redirects.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/redirect/number_of_redirects.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -18,6 +18,7 @@ # limitations under the License. import os + Test.Summary = ''' Test redirection/location & number of redirects(number_of_redirections config) ''' @@ -46,13 +47,11 @@ self._srv2 = Test.MakeVerifierServerProcess( f"srv2_{self._numberOfRedirections}", "replay/redirect_srv2_replay.yaml", - context={ - "vs_http_port": self._srv3.Variables.http_port}) + context={"vs_http_port": self._srv3.Variables.http_port}) self._srv1 = Test.MakeVerifierServerProcess( f"srv1_{self._numberOfRedirections}", "replay/redirect_srv1_replay.yaml", - context={ - "vs_http_port": self._srv2.Variables.http_port}) + context={"vs_http_port": self._srv2.Variables.http_port}) def setup_dns(self): self._dns = Test.MakeDNServer(f"dns_{self._numberOfRedirections}") @@ -61,20 +60,22 @@ self._dns.addRecords(records={"c.test": ["127.0.0.1"]}) def add_config(self): - self._ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|redirect|http_redirect', - 'proxy.config.http.number_of_redirections': self._numberOfRedirections, - 'proxy.config.dns.nameservers': f'127.0.0.1:{self._dns.Variables.Port}', - 'proxy.config.dns.resolv_conf': 'NULL', - 'proxy.config.url_remap.remap_required': 0, # need this so the domain gets a chance to be evaluated through DNS - 'proxy.config.http.redirect.actions': 'self:follow', # redirects to self are not followed by default - }) - self._ts.Disk.remap_config.AddLines([ - 'map a.test/ping http://a.test:{0}/'.format(self._srv1.Variables.http_port), - 'map b.test/pong http://b.test:{0}/'.format(self._srv2.Variables.http_port), - 'map c.test/pang http://c.test:{0}/'.format(self._srv3.Variables.http_port), - ]) + self._ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|redirect|http_redirect', + 'proxy.config.http.number_of_redirections': self._numberOfRedirections, + 'proxy.config.dns.nameservers': f'127.0.0.1:{self._dns.Variables.Port}', + 'proxy.config.dns.resolv_conf': 'NULL', + 'proxy.config.url_remap.remap_required': 0, # need this so the domain gets a chance to be evaluated through DNS + 'proxy.config.http.redirect.actions': 'self:follow', # redirects to self are not followed by default + }) + self._ts.Disk.remap_config.AddLines( + [ + 'map a.test/ping http://a.test:{0}/'.format(self._srv1.Variables.http_port), + 'map b.test/pong http://b.test:{0}/'.format(self._srv2.Variables.http_port), + 'map c.test/pang http://c.test:{0}/'.format(self._srv3.Variables.http_port), + ]) def run(self): self._tr.Processes.Default.StartBefore(self._srv1) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/redirect/redirect.test.py trafficserver-9.2.4+ds/tests/gold_tests/redirect/redirect.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/redirect/redirect.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/redirect/redirect.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -31,15 +31,16 @@ dest_serv = Test.MakeOriginServer("dest_server") dns = Test.MakeDNServer("dns") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|redirect', - 'proxy.config.http.number_of_redirections': 1, - 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), - 'proxy.config.dns.resolv_conf': 'NULL', - 'proxy.config.url_remap.remap_required': 0, # need this so the domain gets a chance to be evaluated through DNS - 'proxy.config.http.redirect.actions': 'self:follow', # redirects to self are not followed by default -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|redirect', + 'proxy.config.http.number_of_redirections': 1, + 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), + 'proxy.config.dns.resolv_conf': 'NULL', + 'proxy.config.url_remap.remap_required': 0, # need this so the domain gets a chance to be evaluated through DNS + 'proxy.config.http.redirect.actions': 'self:follow', # redirects to self are not followed by default + }) ts.Disk.logging_yaml.AddLines( ''' @@ -50,14 +51,16 @@ logs: - filename: the_log format: custom -'''.split("\n") -) +'''.split("\n")) Test.Setup.Copy(os.path.join(Test.Variables.AtsTestToolsDir, 'tcp_client.py')) redirect_request_header = {"headers": "GET /redirect HTTP/1.1\r\nHost: *\r\n\r\n", "timestamp": "5678", "body": ""} -redirect_response_header = {"headers": "HTTP/1.1 302 Found\r\nLocation: http://127.0.0.1:{0}/redirectDest\r\n\r\n".format( - dest_serv.Variables.Port), "timestamp": "5678", "body": ""} +redirect_response_header = { + "headers": "HTTP/1.1 302 Found\r\nLocation: http://127.0.0.1:{0}/redirectDest\r\n\r\n".format(dest_serv.Variables.Port), + "timestamp": "5678", + "body": "" +} redirect_serv.addResponse("sessionfile.log", redirect_request_header, redirect_response_header) dest_request_header = {"headers": "GET /redirectDest HTTP/1.1\r\nHost: *\r\n\r\n", "timestamp": "11", "body": ""} @@ -84,7 +87,6 @@ tr.Processes.Default.Streams.stdout = "gold/redirect.gold" tr.Processes.Default.ReturnCode = 0 - redirect_request_header = {"headers": "GET /redirect-relative-path HTTP/1.1\r\nHost: *\r\n\r\n", "timestamp": "5678", "body": ""} redirect_response_header = {"headers": "HTTP/1.1 302 Found\r\nLocation: /redirect\r\n\r\n", "timestamp": "5678", "body": ""} redirect_serv.addResponse("sessionfile.log", redirect_request_header, redirect_response_header) @@ -106,9 +108,11 @@ tr.Processes.Default.Streams.stdout = "gold/redirect.gold" tr.Processes.Default.ReturnCode = 0 - redirect_request_header = { - "headers": "GET /redirect-relative-path-no-leading-slash HTTP/1.1\r\nHost: *\r\n\r\n", "timestamp": "5678", "body": ""} + "headers": "GET /redirect-relative-path-no-leading-slash HTTP/1.1\r\nHost: *\r\n\r\n", + "timestamp": "5678", + "body": "" +} redirect_response_header = {"headers": "HTTP/1.1 302 Found\r\nLocation: redirect\r\n\r\n", "timestamp": "5678", "body": ""} redirect_serv.addResponse("sessionfile.log", redirect_request_header, redirect_response_header) @@ -116,7 +120,8 @@ command_path = os.path.join(data_path, tr.Name) with open(command_path, 'w') as f: f.write( - 'GET /redirect-relative-path-no-leading-slash HTTP/1.1\r\nHost: iwillredirect.test:{port}\r\n\r\n'.format(port=redirect_serv.Variables.Port)) + 'GET /redirect-relative-path-no-leading-slash HTTP/1.1\r\nHost: iwillredirect.test:{port}\r\n\r\n'.format( + port=redirect_serv.Variables.Port)) tr.Processes.Default.Command = f"{sys.executable} tcp_client.py 127.0.0.1 {ts.Variables.port} {command_path} | egrep -v '^(Date: |Server: ATS/)'" tr.StillRunningAfter = ts tr.StillRunningAfter = redirect_serv @@ -125,7 +130,6 @@ tr.Processes.Default.Streams.stdout = "gold/redirect.gold" tr.Processes.Default.ReturnCode = 0 - for status, phrase in sorted({ 301: 'Moved Permanently', 302: 'Found', @@ -137,25 +141,25 @@ redirect_request_header = { "headers": ("GET /redirect{0} HTTP/1.1\r\n" - "Host: *\r\n\r\n"). - format(status), + "Host: *\r\n\r\n").format(status), "timestamp": "5678", - "body": ""} + "body": "" + } redirect_response_header = { "headers": ("HTTP/1.1 {0} {1}\r\n" "Connection: close\r\n" - "Location: /redirect\r\n\r\n"). - format(status, phrase), + "Location: /redirect\r\n\r\n").format(status, phrase), "timestamp": "5678", - "body": ""} + "body": "" + } redirect_serv.addResponse("sessionfile.log", redirect_request_header, redirect_response_header) tr = Test.AddTestRun("FollowsRedirect{0}".format(status)) command_path = os.path.join(data_path, tr.Name) with open(command_path, 'w') as f: - f.write(('GET /redirect{0} HTTP/1.1\r\n' - 'Host: iwillredirect.test:{1}\r\n\r\n'). - format(status, redirect_serv.Variables.Port)) + f.write( + ('GET /redirect{0} HTTP/1.1\r\n' + 'Host: iwillredirect.test:{1}\r\n\r\n').format(status, redirect_serv.Variables.Port)) tr.Processes.Default.Command = f"{sys.executable} tcp_client.py 127.0.0.1 {ts.Variables.port} {command_path} | egrep -v '^(Date: |Server: ATS/)'" tr.StillRunningAfter = ts tr.StillRunningAfter = redirect_serv @@ -168,10 +172,7 @@ tr = Test.AddTestRun("wait_for_log") tr.Processes.Default.Command = ( - './wait_for_log.sh {} {}'.format( - os.path.join(ts.Variables.LOGDIR, 'the_log.log'), redirect_serv.Variables.Port - ) -) + './wait_for_log.sh {} {}'.format(os.path.join(ts.Variables.LOGDIR, 'the_log.log'), redirect_serv.Variables.Port)) tr.Processes.Default.Streams.stdout = "gold/redirect_log.gold" tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/redirect/redirect_actions.test.py trafficserver-9.2.4+ds/tests/gold_tests/redirect/redirect_actions.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/redirect/redirect_actions.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/redirect/redirect_actions.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -38,19 +38,18 @@ host = socket.gethostname() ipv4addrs = set() try: - ipv4addrs = set([ip for - (family, _, _, _, (ip, *_)) in - socket.getaddrinfo(host, port=None) if - socket.AF_INET == family]) + ipv4addrs = set([ip for (family, _, _, _, (ip, *_)) in socket.getaddrinfo(host, port=None) if socket.AF_INET == family]) except socket.gaierror: pass ipv6addrs = set() try: - ipv6addrs = set(["[{0}]".format(ip.split('%')[0]) for - (family, _, _, _, (ip, *_)) in - socket.getaddrinfo(host, port=None) if - socket.AF_INET6 == family and 'fe80' != ip[0:4]]) # Skip link-local addresses. + ipv6addrs = set( + [ + "[{0}]".format(ip.split('%')[0]) + for (family, _, _, _, (ip, *_)) in socket.getaddrinfo(host, port=None) + if socket.AF_INET6 == family and 'fe80' != ip[0:4] + ]) # Skip link-local addresses. except socket.gaierror: pass @@ -62,12 +61,14 @@ 'headers': ('GET / HTTP/1.1\r\n' 'Host: *\r\n\r\n'), 'timestamp': ArbitraryTimestamp, - 'body': ''} + 'body': '' +} response_header = { 'headers': ('HTTP/1.1 204 No Content\r\n' 'Connection: close\r\n\r\n'), 'timestamp': ArbitraryTimestamp, - 'body': ''} + 'body': '' +} origin.addResponse('sessionfile.log', request_header, response_header) # Map scenarios to trafficserver processes. @@ -112,17 +113,18 @@ if config not in trafficservers: trafficservers[config] = Test.MakeATSProcess('ts_{0}'.format(normConfig), enable_cache=False) - trafficservers[config].Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|redirect', - 'proxy.config.http.number_of_redirections': 1, - 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), - 'proxy.config.dns.resolv_conf': 'NULL', - 'proxy.config.url_remap.remap_required': 0, - 'proxy.config.http.redirect.actions': config, - 'proxy.config.http.connect_attempts_timeout': 5, - 'proxy.config.http.connect_attempts_max_retries': 0, - }) + trafficservers[config].Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|redirect', + 'proxy.config.http.number_of_redirections': 1, + 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), + 'proxy.config.dns.resolv_conf': 'NULL', + 'proxy.config.url_remap.remap_required': 0, + 'proxy.config.http.redirect.actions': config, + 'proxy.config.http.connect_attempts_timeout': 5, + 'proxy.config.http.connect_attempts_max_retries': 0, + }) tr.Processes.Default.StartBefore(trafficservers[config]) else: tr.StillRunningAfter = trafficservers[config] @@ -136,26 +138,27 @@ # A GET request parameterized on the config and on the target. request_header = { 'headers': ('GET /redirect?config={0}&target={1} HTTP/1.1\r\n' - 'Host: *\r\n\r\n'). - format(normConfig, normRedirectTarget), + 'Host: *\r\n\r\n').format(normConfig, normRedirectTarget), 'timestamp': ArbitraryTimestamp, - 'body': ''} + 'body': '' + } # Returns a redirect to the test domain for the given target & the port number for the TS of the given config. response_header = { - 'headers': ('HTTP/1.1 307 Temporary Redirect\r\n' - 'Location: http://{0}:{1}/\r\n' - 'Connection: close\r\n\r\n'). - format(testDomain, origin.Variables.Port), + 'headers': + ('HTTP/1.1 307 Temporary Redirect\r\n' + 'Location: http://{0}:{1}/\r\n' + 'Connection: close\r\n\r\n').format(testDomain, origin.Variables.Port), 'timestamp': ArbitraryTimestamp, - 'body': ''} + 'body': '' + } origin.addResponse('sessionfile.log', request_header, response_header) # Generate the request data file. command_path = os.path.join(data_path, tr.Name) with open(command_path, 'w') as f: - f.write(('GET /redirect?config={0}&target={1} HTTP/1.1\r\n' - 'Host: iwillredirect.test:{2}\r\n\r\n'). - format(normConfig, normRedirectTarget, origin.Variables.Port)) + f.write( + ('GET /redirect?config={0}&target={1} HTTP/1.1\r\n' + 'Host: iwillredirect.test:{2}\r\n\r\n').format(normConfig, normRedirectTarget, origin.Variables.Port)) # Set the command with the appropriate URL. port = trafficservers[config].Variables.port dir_path = os.path.join(data_dirname, tr.Name) @@ -204,7 +207,6 @@ AddressE.Self: ActionE.Return, AddressE.Default: ActionE.Reject, }, - { # Follow to loopback, but alternately reject/return others, flipped from the previous scenario. AddressE.Private: ActionE.Return, @@ -215,19 +217,16 @@ AddressE.Self: ActionE.Reject, AddressE.Default: ActionE.Return, }, - { # Return loopback, but reject everything else. AddressE.Loopback: ActionE.Return, AddressE.Default: ActionE.Reject, }, - { # Reject loopback, but return everything else. AddressE.Loopback: ActionE.Reject, AddressE.Default: ActionE.Return, }, - { # Return everything. AddressE.Default: ActionE.Return, diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/redirect/redirect_post.test.py trafficserver-9.2.4+ds/tests/gold_tests/redirect/redirect_post.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/redirect/redirect_post.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/redirect/redirect_post.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -23,9 +23,7 @@ # TODO figure out how to use this MAX_REDIRECT = 99 -Test.SkipUnless( - Condition.HasProgram("truncate", "truncate need to be installed on system for this test to work") -) +Test.SkipUnless(Condition.HasProgram("truncate", "truncate need to be installed on system for this test to work")) Test.ContinueOnFail = True @@ -34,25 +32,41 @@ redirect_serv2 = Test.MakeOriginServer("re_server2") dest_serv = Test.MakeOriginServer("dest_server") -ts.Disk.records_config.update({ - 'proxy.config.http.number_of_redirections': MAX_REDIRECT, - 'proxy.config.http.post_copy_size': 919430601, - 'proxy.config.http.redirect.actions': 'self:follow', # redirects to self are not followed by default - # 'proxy.config.diags.debug.enabled': 1, -}) +ts.Disk.records_config.update( + { + 'proxy.config.http.number_of_redirections': MAX_REDIRECT, + 'proxy.config.http.post_copy_size': 919430601, + 'proxy.config.http.redirect.actions': 'self:follow', # redirects to self are not followed by default + # 'proxy.config.diags.debug.enabled': 1, + }) redirect_request_header = { - "headers": "POST /redirect1 HTTP/1.1\r\nHost: *\r\nContent-Length: 52428800\r\n\r\n", "timestamp": "5678", "body": ""} -redirect_response_header = {"headers": "HTTP/1.1 302 Found\r\nLocation: http://127.0.0.1:{0}/redirect2\r\n\r\n".format( - redirect_serv2.Variables.Port), "timestamp": "5678", "body": ""} + "headers": "POST /redirect1 HTTP/1.1\r\nHost: *\r\nContent-Length: 52428800\r\n\r\n", + "timestamp": "5678", + "body": "" +} +redirect_response_header = { + "headers": "HTTP/1.1 302 Found\r\nLocation: http://127.0.0.1:{0}/redirect2\r\n\r\n".format(redirect_serv2.Variables.Port), + "timestamp": "5678", + "body": "" +} redirect_request_header2 = { - "headers": "POST /redirect2 HTTP/1.1\r\nHost: *\r\nContent-Length: 52428800\r\n\r\n", "timestamp": "5678", "body": ""} -redirect_response_header2 = {"headers": "HTTP/1.1 302 Found\r\nLocation: http://127.0.0.1:{0}/redirectDest\r\n\r\n".format( - dest_serv.Variables.Port), "timestamp": "5678", "body": ""} + "headers": "POST /redirect2 HTTP/1.1\r\nHost: *\r\nContent-Length: 52428800\r\n\r\n", + "timestamp": "5678", + "body": "" +} +redirect_response_header2 = { + "headers": "HTTP/1.1 302 Found\r\nLocation: http://127.0.0.1:{0}/redirectDest\r\n\r\n".format(dest_serv.Variables.Port), + "timestamp": "5678", + "body": "" +} dest_request_header = { - "headers": "POST /redirectDest HTTP/1.1\r\nHost: *\r\nContent-Length: 52428800\r\n\r\n", "timestamp": "11", "body": ""} + "headers": "POST /redirectDest HTTP/1.1\r\nHost: *\r\nContent-Length: 52428800\r\n\r\n", + "timestamp": "11", + "body": "" +} dest_response_header = {"headers": "HTTP/1.1 204 No Content\r\n\r\n", "timestamp": "22", "body": ""} redirect_serv1.addResponse("sessionfile.log", redirect_request_header, redirect_response_header) @@ -60,8 +74,7 @@ dest_serv.addResponse("sessionfile.log", dest_request_header, dest_response_header) ts.Disk.remap_config.AddLine( - 'map http://127.0.0.1:{0} http://127.0.0.1:{1}'.format(ts.Variables.port, redirect_serv1.Variables.Port) -) + 'map http://127.0.0.1:{0} http://127.0.0.1:{1}'.format(ts.Variables.port, redirect_serv1.Variables.Port)) tr = Test.AddTestRun() tr.Processes.Default.Command = 'touch largefile.txt && truncate largefile.txt -s 50M && curl -H "Expect: " -i http://127.0.0.1:{0}/redirect1 -F "filename=@./largefile.txt" && rm -f largefile.txt'.format( diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/redirect/redirect_stale.test.py trafficserver-9.2.4+ds/tests/gold_tests/redirect/redirect_stale.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/redirect/redirect_stale.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/redirect/redirect_stale.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -29,39 +29,44 @@ 'headers': ('GET /obj HTTP/1.1\r\n' 'Host: *\r\n\r\n'), 'timestamp': ArbitraryTimestamp, - 'body': ''} + 'body': '' +} response_header = { 'headers': ('HTTP/1.1 302 Found\r\n' 'Location: http://127.0.0.1:{}/obj2\r\n\r\n'.format(server.Variables.Port)), 'timestamp': ArbitraryTimestamp, - 'body': ''} + 'body': '' +} server.addResponse('sessionfile.log', request_header, response_header) request_header = { 'headers': ('GET /obj2 HTTP/1.1\r\n' 'Host: *\r\n\r\n'), 'timestamp': ArbitraryTimestamp, - 'body': ''} + 'body': '' +} response_header = { 'headers': ('HTTP/1.1 200 OK\r\n' 'X-Obj: obj2\r\n' 'Cache-Control: max-age=2\r\n' 'Content-Length: 0\r\n\r\n'), 'timestamp': ArbitraryTimestamp, - 'body': ''} + 'body': '' +} server.addResponse('sessionfile.log', request_header, response_header) ts = Test.MakeATSProcess("ts") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|cache|redirect', - 'proxy.config.http.cache.required_headers': 0, # Only Content-Length header required for caching. - 'proxy.config.http.push_method_enabled': 1, - 'proxy.config.url_remap.remap_required': 0, - 'proxy.config.http.redirect.actions': 'routable:follow,loopback:follow,self:follow', - 'proxy.config.http.number_of_redirections': 1 -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|cache|redirect', + 'proxy.config.http.cache.required_headers': 0, # Only Content-Length header required for caching. + 'proxy.config.http.push_method_enabled': 1, + 'proxy.config.url_remap.remap_required': 0, + 'proxy.config.http.redirect.actions': 'routable:follow,loopback:follow,self:follow', + 'proxy.config.http.number_of_redirections': 1 + }) # Set up to check the output after the tests have run. # @@ -73,8 +78,7 @@ tr.Processes.Default.StartBefore(server) tr.Processes.Default.Command = ( r"printf 'GET /obj HTTP/1.1\r\nHost: 127.0.0.1:{}\r\n\r\n' | nc localhost {} >> log.txt".format( - server.Variables.Port, ts.Variables.port) -) + server.Variables.Port, ts.Variables.port)) tr.Processes.Default.ReturnCode = 0 # Wait for the response in cache to become stale, then GET it again. @@ -82,14 +86,12 @@ tr = Test.AddTestRun() tr.Processes.Default.Command = ( r"sleep 4 ; printf 'GET /obj HTTP/1.1\r\nHost: 127.0.0.1:{}\r\n\r\n' | nc localhost {} >> log.txt".format( - server.Variables.Port, ts.Variables.port) -) + server.Variables.Port, ts.Variables.port)) tr.Processes.Default.ReturnCode = 0 # Filter out inconsistent content in test output. # tr = Test.AddTestRun() tr.Processes.Default.Command = ( - r"grep -v -e '^Date: ' -e '^Age: ' -e '^Connection: ' -e '^Server: ATS/' log.txt | tr -d '\r'> log2.txt" -) + r"grep -v -e '^Date: ' -e '^Age: ' -e '^Connection: ' -e '^Server: ATS/' log.txt | tr -d '\r'> log2.txt") tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/remap/conf_remap_float.test.py trafficserver-9.2.4+ds/tests/gold_tests/remap/conf_remap_float.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/remap/conf_remap_float.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/remap/conf_remap_float.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - Test.Summary = ''' Test command: traffic_ctl config describe proxy.config.http.background_fill_completed_threshold (YTSATS-3309) ''' @@ -22,9 +21,8 @@ ts = Test.MakeATSProcess("ts", command="traffic_manager", select_ports=True) -ts.Disk.MakeConfigFile('conf_remap.config').AddLines([ - 'CONFIG proxy.config.http.background_fill_completed_threshold FLOAT 0.500000' -]) +ts.Disk.MakeConfigFile('conf_remap.config').AddLines( + ['CONFIG proxy.config.http.background_fill_completed_threshold FLOAT 0.500000']) ts.Disk.remap_config.AddLine( f"map http://cdn.example.com/ http://origin.example.com/ @plugin=conf_remap.so @pparam={Test.RunDirectory}/ts/config/conf_remap.config" diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/remap/regex_map.test.py trafficserver-9.2.4+ds/tests/gold_tests/remap/regex_map.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/remap/regex_map.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/remap/regex_map.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -27,30 +27,27 @@ dns = Test.MakeDNServer("dns", default='127.0.0.1') Test.testName = "" -request_header = {"headers": "GET / HTTP/1.1\r\nHost: zero.one.two.three.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} -response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: zero.one.two.three.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} +response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionfile.log", request_header, response_header) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http.*|dns|conf_remap', - 'proxy.config.http.referer_filter': 1, - 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), - 'proxy.config.dns.resolv_conf': 'NULL' -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http.*|dns|conf_remap', + 'proxy.config.http.referer_filter': 1, + 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), + 'proxy.config.dns.resolv_conf': 'NULL' + }) ts.Disk.remap_config.AddLine( r'regex_map ' r'http://(.*)?one\.two\.three\.com/ ' - r'http://$1reactivate.four.five.six.com:{}/'.format(server.Variables.Port) -) + r'http://$1reactivate.four.five.six.com:{}/'.format(server.Variables.Port)) ts.Disk.remap_config.AddLine( r'regex_map ' r'https://\b(?!(.*one|two|three|four|five|six)).+\b\.seven\.eight\.nine\.com/blah12345.html ' - r'https://www.example.com:{}/one/two/three/blah12345.html'.format(server.Variables.Port) -) + r'https://www.example.com:{}/one/two/three/blah12345.html'.format(server.Variables.Port)) tr = Test.AddTestRun() tr.Processes.Default.Command = 'curl -H"Host: zero.one.two.three.com" http://127.0.0.1:{0}/ --verbose'.format(ts.Variables.port) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/remap/remap_http.test.py trafficserver-9.2.4+ds/tests/gold_tests/remap/remap_http.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/remap/remap_http.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/remap/remap_http.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -28,51 +28,38 @@ dns = Test.MakeDNServer("dns") Test.testName = "" -request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} # expected response from the origin server -response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} -request_header2 = {"headers": "GET /test HTTP/1.1\r\nHost: www.testexample.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header2 = {"headers": "GET /test HTTP/1.1\r\nHost: www.testexample.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} # expected response from the origin server -response_header2 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +response_header2 = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} # add response to the server dictionary server.addResponse("sessionfile.log", request_header, response_header) server2.addResponse("sessionfile.log", request_header2, response_header2) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http.*|dns|conf_remap', - 'proxy.config.http.referer_filter': 1, - 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), - 'proxy.config.dns.resolv_conf': 'NULL' -}) - -ts.Disk.remap_config.AddLine( - 'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - 'map_with_recv_port http://www.example2.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.port, server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - 'map http://www.example.com:8080 http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http.*|dns|conf_remap', + 'proxy.config.http.referer_filter': 1, + 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), + 'proxy.config.dns.resolv_conf': 'NULL' + }) + +ts.Disk.remap_config.AddLine('map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts.Disk.remap_config.AddLine( + 'map_with_recv_port http://www.example2.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.port, server.Variables.Port)) +ts.Disk.remap_config.AddLine('map http://www.example.com:8080 http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts.Disk.remap_config.AddLine('redirect http://test3.com http://httpbin.org'.format(server.Variables.Port)) ts.Disk.remap_config.AddLine( - 'redirect http://test3.com http://httpbin.org'.format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - 'map_with_referer http://test4.com http://127.0.0.1:{0} http://httpbin.org (.*[.])?persia[.]com'.format(server.Variables.Port) -) -ts.Disk.remap_config.AddLine( - 'map http://testDNS.com http://audrey.hepburn.com:{0}'.format(server.Variables.Port) -) + 'map_with_referer http://test4.com http://127.0.0.1:{0} http://httpbin.org (.*[.])?persia[.]com'.format(server.Variables.Port)) +ts.Disk.remap_config.AddLine('map http://testDNS.com http://audrey.hepburn.com:{0}'.format(server.Variables.Port)) ts.Disk.remap_config.AddLine( - 'map http://www.testexample.com http://127.0.0.1:{0} @plugin=conf_remap.so @pparam=proxy.config.url_remap.pristine_host_hdr=1'.format( - server2.Variables.Port)) + 'map http://www.testexample.com http://127.0.0.1:{0} @plugin=conf_remap.so @pparam=proxy.config.url_remap.pristine_host_hdr=1' + .format(server2.Variables.Port)) dns.addRecords(records={"audrey.hepburn.com.": ["127.0.0.1"]}) dns.addRecords(records={"whatever.com.": ["127.0.0.1"]}) @@ -151,8 +138,7 @@ # DNS test tr = Test.AddTestRun() -tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://testDNS.com" --verbose'.format( - ts.Variables.port) +tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://testDNS.com" --verbose'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.stderr = "gold/remap-DNS-200.gold" diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/remap/remap_https.test.py trafficserver-9.2.4+ds/tests/gold_tests/remap/remap_https.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/remap/remap_https.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/remap/remap_https.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -28,44 +28,35 @@ # **testname is required** testName = "" -request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} # desired response form the origin server -response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) server2.addResponse("sessionlog.json", request_header, response_header) # add ssl materials like key, certificates for the server ts.addDefaultSSLFiles() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'lm|ssl', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - # enable ssl port - 'proxy.config.http.server_ports': '{0} {1}:proto=http2;http:ssl'.format(ts.Variables.port, ts.Variables.ssl_port), - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'lm|ssl', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + # enable ssl port + 'proxy.config.http.server_ports': '{0} {1}:proto=http2;http:ssl'.format(ts.Variables.port, ts.Variables.ssl_port), + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) +ts.Disk.remap_config.AddLine('map https://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)) ts.Disk.remap_config.AddLine( - 'map https://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port) -) + 'map https://www.example.com:{1} http://127.0.0.1:{0}'.format(server.Variables.Port, ts.Variables.ssl_port)) ts.Disk.remap_config.AddLine( - 'map https://www.example.com:{1} http://127.0.0.1:{0}'.format(server.Variables.Port, ts.Variables.ssl_port) -) + 'map_with_recv_port https://www.example3.com:{1} http://127.0.0.1:{0}'.format(server.Variables.Port, ts.Variables.ssl_port)) ts.Disk.remap_config.AddLine( - 'map_with_recv_port https://www.example3.com:{1} http://127.0.0.1:{0}'.format(server.Variables.Port, ts.Variables.ssl_port) -) -ts.Disk.remap_config.AddLine( - 'map https://www.anotherexample.com https://127.0.0.1:{0}'.format(server2.Variables.SSL_Port, ts.Variables.ssl_port) -) - + 'map https://www.anotherexample.com https://127.0.0.1:{0}'.format(server2.Variables.SSL_Port, ts.Variables.ssl_port)) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # call localhost straight tr = Test.AddTestRun() @@ -79,7 +70,6 @@ tr.StillRunningAfter = server tr.StillRunningAfter = ts - # www.example.com host tr = Test.AddTestRun() tr.Processes.Default.Command = 'curl --http1.1 -k https://127.0.0.1:{0} -H "Host: www.example.com" --verbose'.format( @@ -87,7 +77,6 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.stderr = "gold/remap-https-200.gold" - # www.example.com:80 host tr = Test.AddTestRun() tr.Processes.Default.Command = 'curl --http1.1 -k https://127.0.0.1:{0} -H "Host: www.example.com:443" --verbose'.format( diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/remap/remap_ip_resolve.test.py trafficserver-9.2.4+ds/tests/gold_tests/remap/remap_ip_resolve.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/remap/remap_ip_resolve.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/remap/remap_ip_resolve.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -1,4 +1,3 @@ - ''' ''' # Licensed to the Apache Software Foundation (ASF) under one @@ -30,32 +29,29 @@ dns = Test.MakeDNServer("dns") Test.testName = "" -request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} # expected response from the origin server -response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} +response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} # add response to the server dictionary server.addResponse("sessionfile.log", request_header, response_header) server_v6.addResponse("sessionfile.log", request_header, response_header) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http.*|dns|conf_remap', - 'proxy.config.http.referer_filter': 1, - 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), - 'proxy.config.dns.resolv_conf': 'NULL', - 'proxy.config.hostdb.ip_resolve': 'ipv4' -}) - +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http.*|dns|conf_remap', + 'proxy.config.http.referer_filter': 1, + 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), + 'proxy.config.dns.resolv_conf': 'NULL', + 'proxy.config.hostdb.ip_resolve': 'ipv4' + }) ts.Disk.remap_config.AddLine( - 'map http://testDNS.com http://test.ipv4.only.com:{0} @plugin=conf_remap.so @pparam=proxy.config.hostdb.ip_resolve=ipv6;ipv4;client'.format( - server.Variables.Port)) + 'map http://testDNS.com http://test.ipv4.only.com:{0} @plugin=conf_remap.so @pparam=proxy.config.hostdb.ip_resolve=ipv6;ipv4;client' + .format(server.Variables.Port)) ts.Disk.remap_config.AddLine( - 'map http://testDNS2.com http://test.ipv6.only.com:{0} @plugin=conf_remap.so @pparam=proxy.config.hostdb.ip_resolve=ipv6;only'.format( - server_v6.Variables.Port)) - + 'map http://testDNS2.com http://test.ipv6.only.com:{0} @plugin=conf_remap.so @pparam=proxy.config.hostdb.ip_resolve=ipv6;only' + .format(server_v6.Variables.Port)) dns.addRecords(records={"test.ipv4.only.com.": ["127.0.0.1"]}) dns.addRecords(records={"test.ipv6.only.com": ["127.0.0.1", "::1"]}) @@ -69,7 +65,6 @@ tr.Processes.Default.Streams.stderr = "gold/remap-DNS-200.gold" tr.StillRunningAfter = server - tr = Test.AddTestRun() tr.Processes.Default.Command = 'curl --proxy 127.0.0.1:{0} "http://testDNS2.com" --verbose'.format(ts.Variables.port) tr.Processes.Default.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/remap/remap_ws.test.py trafficserver-9.2.4+ds/tests/gold_tests/remap/remap_ws.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/remap/remap_ws.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/remap/remap_ws.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -26,28 +26,32 @@ server = Test.MakeOriginServer("server") testName = "Test WebSocket Remaps" -request_header = {"headers": "GET /chat HTTP/1.1\r\nHost: www.example.com\r\nUpgrade: websocket\r\nConnection: Upgrade\r\n\r\n", - "body": None} +request_header = { + "headers": "GET /chat HTTP/1.1\r\nHost: www.example.com\r\nUpgrade: websocket\r\nConnection: Upgrade\r\n\r\n", + "body": None +} response_header = { - "headers": "HTTP/1.1 101 OK\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n\r\n", - "body": None} + "headers": + "HTTP/1.1 101 OK\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n\r\n", + "body": None +} server.addResponse("sessionlog.json", request_header, response_header) ts.addDefaultSSLFiles() -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), -}) - -ts.Disk.remap_config.AddLines([ - 'map ws://www.example.com:{1} ws://127.0.0.1:{0}'.format(server.Variables.Port, ts.Variables.port), - 'map wss://www.example.com:{1} ws://127.0.0.1:{0}'.format(server.Variables.Port, ts.Variables.ssl_port) -]) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.remap_config.AddLines( + [ + 'map ws://www.example.com:{1} ws://127.0.0.1:{0}'.format(server.Variables.Port, ts.Variables.port), + 'map wss://www.example.com:{1} ws://127.0.0.1:{0}'.format(server.Variables.Port, ts.Variables.ssl_port), + ]) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # wss mapping tr = Test.AddTestRun() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/runroot/runroot_init.test.py trafficserver-9.2.4+ds/tests/gold_tests/runroot/runroot_init.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/runroot/runroot_init.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/runroot/runroot_init.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,8 +22,8 @@ Test for init of runroot from traffic_layout. ''' Test.ContinueOnFail = True -Test.SkipUnless(Test.Variables.BINDIR.startswith(Test.Variables.PREFIX), - "need to guarantee bin path starts with prefix for runroot") +Test.SkipUnless( + Test.Variables.BINDIR.startswith(Test.Variables.PREFIX), "need to guarantee bin path starts with prefix for runroot") # init from pass in path path1 = os.path.join(Test.RunDirectory, "runroot1") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/runroot/runroot_manager.test.py trafficserver-9.2.4+ds/tests/gold_tests/runroot/runroot_manager.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/runroot/runroot_manager.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/runroot/runroot_manager.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -50,5 +50,5 @@ p = tr.Processes.Default p.Command = "$ATS_BIN/traffic_manager --run-root=" + rr_file p.RunningEvent.Connect(Testers.Lambda(lambda ev: StopProcess(ev, 10))) -p.Streams.All = Testers.ContainsExpression("traffic_server: using root directory '" + - runroot_path + "'", "check if the right runroot is passed down") +p.Streams.All = Testers.ContainsExpression( + "traffic_server: using root directory '" + runroot_path + "'", "check if the right runroot is passed down") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/runroot/runroot_use.test.py trafficserver-9.2.4+ds/tests/gold_tests/runroot/runroot_use.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/runroot/runroot_use.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/runroot/runroot_use.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,8 +22,8 @@ Test for using of runroot from traffic_layout. ''' Test.ContinueOnFail = True -Test.SkipUnless(Test.Variables.BINDIR.startswith(Test.Variables.PREFIX), - "need to guarantee bin path starts with prefix for runroot") +Test.SkipUnless( + Test.Variables.BINDIR.startswith(Test.Variables.PREFIX), "need to guarantee bin path starts with prefix for runroot") # create two runroot for testing path = os.path.join(Test.RunDirectory, "runroot") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/runroot/runroot_verify.test.py trafficserver-9.2.4+ds/tests/gold_tests/runroot/runroot_verify.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/runroot/runroot_verify.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/runroot/runroot_verify.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -40,7 +40,6 @@ # given a custom setup this might work.. or it might not logsuffix = logdir - # create runroot path = os.path.join(Test.RunDirectory, "runroot") tr = Test.AddTestRun("Create runroot") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/session_sharing/session_match.test.py trafficserver-9.2.4+ds/tests/gold_tests/session_sharing/session_match.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/session_sharing/session_match.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/session_sharing/session_match.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -32,42 +32,58 @@ def setupOriginServer(self): self._server = Test.MakeOriginServer("server{counter}".format(counter=self._MyTestCount)) - request_header = {"headers": - "GET /one HTTP/1.1\r\nHost: www.example.com\r\nContent-Length: 0\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} - response_header = {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\n" - "Content-Length: 0\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} + request_header = { + "headers": "GET /one HTTP/1.1\r\nHost: www.example.com\r\nContent-Length: 0\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + } + response_header = { + "headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\n" + "Content-Length: 0\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + } self._server.addResponse("sessionlog.json", request_header, response_header) - request_header2 = {"headers": "GET /two HTTP/1.1\r\nContent-Length: 0\r\n" - "Host: www.example.com\r\n\r\n", - "timestamp": "1469733493.993", "body": "a\r\na\r\na\r\n\r\n"} - response_header2 = {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\n" - "Content-Length: 0\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} + request_header2 = { + "headers": "GET /two HTTP/1.1\r\nContent-Length: 0\r\n" + "Host: www.example.com\r\n\r\n", + "timestamp": "1469733493.993", + "body": "a\r\na\r\na\r\n\r\n" + } + response_header2 = { + "headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\n" + "Content-Length: 0\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + } self._server.addResponse("sessionlog.json", request_header2, response_header2) - request_header3 = {"headers": "GET /three HTTP/1.1\r\nContent-Length: 0\r\n" - "Host: www.example.com\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", "body": "a\r\na\r\na\r\n\r\n"} - response_header3 = {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\n" - "Connection: close\r\nContent-Length: 0\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} + request_header3 = { + "headers": "GET /three HTTP/1.1\r\nContent-Length: 0\r\n" + "Host: www.example.com\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "a\r\na\r\na\r\n\r\n" + } + response_header3 = { + "headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\n" + "Connection: close\r\nContent-Length: 0\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + } self._server.addResponse("sessionlog.json", request_header3, response_header3) def setupTS(self): self._ts = Test.MakeATSProcess("ts{counter}".format(counter=self._MyTestCount)) - self._ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(self._server.Variables.Port) - ) - self._ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.http.auth_server_session_private': 1, - 'proxy.config.http.server_session_sharing.pool': 'global', - 'proxy.config.http.server_session_sharing.match': self._sharingMatchValue, - }) + self._ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(self._server.Variables.Port)) + self._ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.http.auth_server_session_private': 1, + 'proxy.config.http.server_session_sharing.pool': 'global', + 'proxy.config.http.server_session_sharing.match': self._sharingMatchValue, + }) def _runTraffic(self): self._tr.Processes.Default.Command = ( @@ -83,32 +99,24 @@ def runAndExpectSharing(self): self._runTraffic() self._ts.Disk.traffic_out.Content = Testers.ContainsExpression( - "global pool search successful", - "Verify that sessions got shared") + "global pool search successful", "Verify that sessions got shared") def runAndExpectNoSharing(self): self._runTraffic() self._ts.Disk.traffic_out.Content = Testers.ExcludesExpression( - "global pool search successful", - "Verify that sessions did not get shared") + "global pool search successful", "Verify that sessions did not get shared") -sessionMatchTest = SessionMatchTest( - TestSummary='Test that session sharing works with host matching', - sharingMatchValue='host') +sessionMatchTest = SessionMatchTest(TestSummary='Test that session sharing works with host matching', sharingMatchValue='host') sessionMatchTest.runAndExpectSharing() -sessionMatchTest = SessionMatchTest( - TestSummary='Test that session sharing works with ip matching', - sharingMatchValue='ip') +sessionMatchTest = SessionMatchTest(TestSummary='Test that session sharing works with ip matching', sharingMatchValue='ip') sessionMatchTest.runAndExpectSharing() sessionMatchTest = SessionMatchTest( - TestSummary='Test that session sharing works with matching both ip and host', - sharingMatchValue='both') + TestSummary='Test that session sharing works with matching both ip and host', sharingMatchValue='both') sessionMatchTest.runAndExpectSharing() sessionMatchTest = SessionMatchTest( - TestSummary='Test that session sharing is disabled when matching is set to none', - sharingMatchValue='none') + TestSummary='Test that session sharing is disabled when matching is set to none', sharingMatchValue='none') sessionMatchTest.runAndExpectNoSharing() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/shutdown/emergency.test.py trafficserver-9.2.4+ds/tests/gold_tests/shutdown/emergency.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/shutdown/emergency.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/shutdown/emergency.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -16,7 +16,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - import os Test.Summary = 'Test TSEmergency API' @@ -27,15 +26,16 @@ Test.testName = 'Emergency Shutdown Test' -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 16, - 'proxy.config.accept_threads': 1, - 'proxy.config.task_threads': 2, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'TSEmergency_test' -}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 16, + 'proxy.config.accept_threads': 1, + 'proxy.config.task_threads': 2, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'TSEmergency_test' + }) # Load plugin Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'emergency_shutdown.so'), ts) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/shutdown/fatal.test.py trafficserver-9.2.4+ds/tests/gold_tests/shutdown/fatal.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/shutdown/fatal.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/shutdown/fatal.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -16,7 +16,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - import os Test.Summary = 'Test TSFatal API' @@ -27,15 +26,16 @@ Test.testName = 'Fatal Shutdown Test' -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 16, - 'proxy.config.accept_threads': 1, - 'proxy.config.task_threads': 2, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'TSFatal_test' -}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 16, + 'proxy.config.accept_threads': 1, + 'proxy.config.task_threads': 2, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'TSFatal_test' + }) # Load plugin Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'fatal_shutdown.so'), ts) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/slow_post/http_utils.py trafficserver-9.2.4+ds/tests/gold_tests/slow_post/http_utils.py --- trafficserver-9.2.3+ds/tests/gold_tests/slow_post/http_utils.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/slow_post/http_utils.py 2024-04-03 15:38:30.000000000 +0000 @@ -66,10 +66,7 @@ return content_length_value - (len(read_bytes) - end_of_headers) -def drain_socket( - sock: socket.socket, - previously_read_data: bytes, - num_bytes_to_drain: int) -> None: +def drain_socket(sock: socket.socket, previously_read_data: bytes, num_bytes_to_drain: int) -> None: """Read the rest of the request. :param sock: The socket to drain. diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/slow_post/quick_server.py trafficserver-9.2.4+ds/tests/gold_tests/slow_post/quick_server.py --- trafficserver-9.2.3+ds/tests/gold_tests/slow_post/quick_server.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/slow_post/quick_server.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,9 +17,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from http_utils import (wait_for_headers_complete, - determine_outstanding_bytes_to_read, - drain_socket) +from http_utils import (wait_for_headers_complete, determine_outstanding_bytes_to_read, drain_socket) import argparse import socket @@ -29,22 +27,11 @@ def parse_args() -> argparse.Namespace: """Parse command line arguments.""" parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("address", help="Address to listen on") + parser.add_argument("port", type=int, default=8080, help="The port to listen on") + parser.add_argument('--drain-request', action='store_true', help="Drain the entire request before closing the connection") parser.add_argument( - "address", - help="Address to listen on") - parser.add_argument( - "port", - type=int, - default=8080, - help="The port to listen on") - parser.add_argument( - '--drain-request', - action='store_true', - help="Drain the entire request before closing the connection") - parser.add_argument( - '--abort-response-headers', - action='store_true', - help="Abort the response in the midst of sending the response headers") + '--abort-response-headers', action='store_true', help="Abort the response in the midst of sending the response headers") return parser.parse_args() @@ -80,11 +67,9 @@ if abort_early: response = "HTTP/1." else: - response = ( - r"HTTP/1.1 200 OK\r\n" - r"Content-Length: 0\r\n" - r"\r\n" - ) + response = (r"HTTP/1.1 200 OK\r\n" + r"Content-Length: 0\r\n" + r"\r\n") print(f'Sending:\n{response}') sock.sendall(response.encode("utf-8")) @@ -116,8 +101,7 @@ break if args.drain_request: - num_bytes_to_drain = determine_outstanding_bytes_to_read( - read_bytes) + num_bytes_to_drain = determine_outstanding_bytes_to_read(read_bytes) print(f'Read {len(read_bytes)} bytes. ' f'Draining {num_bytes_to_drain} bytes.') drain_socket(sock, read_bytes, num_bytes_to_drain) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/slow_post/quick_server.test.py trafficserver-9.2.4+ds/tests/gold_tests/slow_post/quick_server.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/slow_post/quick_server.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/slow_post/quick_server.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -19,7 +19,6 @@ from ports import get_port import sys - Test.Summary = __doc__ @@ -51,9 +50,7 @@ :param tr: The test run to associate with the DNS process with. """ - self._dns = tr.MakeDNServer( - f'dns-{QuickServerTest._dns_counter}', - default='127.0.0.1') + self._dns = tr.MakeDNServer(f'dns-{QuickServerTest._dns_counter}', default='127.0.0.1') QuickServerTest._dns_counter += 1 def _configure_server(self, tr: 'TestRun'): @@ -83,15 +80,14 @@ """ self._ts = tr.MakeATSProcess(f'ts-{QuickServerTest._ts_counter}') QuickServerTest._ts_counter += 1 - self._ts.Disk.remap_config.AddLine( - f'map / http://quick.server.com:{self._server.Variables.http_port}' - ) - self._ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http|dns|hostdb', - 'proxy.config.dns.nameservers': f'127.0.0.1:{self._dns.Variables.Port}', - 'proxy.config.dns.resolv_conf': 'NULL', - }) + self._ts.Disk.remap_config.AddLine(f'map / http://quick.server.com:{self._server.Variables.http_port}') + self._ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http|dns|hostdb', + 'proxy.config.dns.nameservers': f'127.0.0.1:{self._dns.Variables.Port}', + 'proxy.config.dns.resolv_conf': 'NULL', + }) def run(self): """Run the test.""" @@ -106,11 +102,9 @@ tr.Setup.CopyAs(self._slow_post_client, Test.RunDirectory) tr.Setup.CopyAs(self._quick_server, Test.RunDirectory) - client_command = ( - f'{sys.executable} {self._slow_post_client} ' - '127.0.0.1 ' - f'{self._ts.Variables.port} ' - ) + client_command = (f'{sys.executable} {self._slow_post_client} ' + '127.0.0.1 ' + f'{self._ts.Variables.port} ') if not self._should_abort_request: client_command += '--finish-request ' tr.Processes.Default.Command = client_command @@ -125,8 +119,5 @@ for abort_request in [True, False]: for drain_request in [True, False]: for abort_response_headers in [True, False]: - test = QuickServerTest( - abort_request, - drain_request, - abort_response_headers) + test = QuickServerTest(abort_request, drain_request, abort_response_headers) test.run() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/slow_post/slow_post.test.py trafficserver-9.2.4+ds/tests/gold_tests/slow_post/slow_post.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/slow_post/slow_post.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/slow_post/slow_post.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -18,12 +18,11 @@ import sys -Test.SkipUnless( - Condition.PluginExists('request_buffer.so') -) +Test.SkipUnless(Condition.PluginExists('request_buffer.so')) class SlowPostAttack: + def __init__(cls): Test.Summary = 'Test how ATS handles the slow-post attack' cls._origin_max_connections = 3 @@ -35,29 +34,35 @@ def setupOriginServer(self): self._server = Test.MakeOriginServer("server") request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} - response_header = {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} + response_header = { + "headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + } self._server.addResponse("sessionlog.json", request_header, response_header) request_header2 = { "headers": "POST / HTTP/1.1\r\nTransfer-Encoding: chunked\r\nHost: www.example.com\r\nConnection: keep-alive\r\n\r\n", "timestamp": "1469733493.993", - "body": "a\r\na\r\na\r\n\r\n"} - response_header2 = {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", "body": ""} + "body": "a\r\na\r\na\r\n\r\n" + } + response_header2 = { + "headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + } self._server.addResponse("sessionlog.json", request_header2, response_header2) def setupTS(self): self._ts = Test.MakeATSProcess("ts", select_ports=True) - self._ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(self._server.Variables.Port) - ) + self._ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(self._server.Variables.Port)) # This plugin can enable request buffer for POST. Test.PrepareInstalledPlugin('request_buffer.so', self._ts) - self._ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.http.per_server.connection.max': self._origin_max_connections, - }) + self._ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.http.per_server.connection.max': self._origin_max_connections, + }) def run(self): tr = Test.AddTestRun() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/slow_post/slow_post_client.py trafficserver-9.2.4+ds/tests/gold_tests/slow_post/slow_post_client.py --- trafficserver-9.2.3+ds/tests/gold_tests/slow_post/slow_post_client.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/slow_post/slow_post_client.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,9 +17,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from http_utils import (wait_for_headers_complete, - determine_outstanding_bytes_to_read, - drain_socket) +from http_utils import (wait_for_headers_complete, determine_outstanding_bytes_to_read, drain_socket) import argparse import socket @@ -29,24 +27,15 @@ def parse_args() -> argparse.Namespace: """Parse the command line arguments.""" parser = argparse.ArgumentParser() + parser.add_argument("proxy_address", help="Address of the proxy to connect to.") + parser.add_argument("proxy_port", type=int, help="The port of the proxy to connect to.") parser.add_argument( - "proxy_address", - help="Address of the proxy to connect to.") - parser.add_argument( - "proxy_port", - type=int, - help="The port of the proxy to connect to.") - parser.add_argument( - '-s', '--server-hostname', + '-s', + '--server-hostname', dest="server_hostname", default="some.server.com", help="The hostname of the server to connect to.") - parser.add_argument( - "-t", "--send_time", - dest="send_time", - type=int, - default=3, - help="The number of seconds to send the POST.") + parser.add_argument("-t", "--send_time", dest="send_time", type=int, default=3, help="The number of seconds to send the POST.") parser.add_argument( '--finish-request', dest="finish_request", @@ -69,11 +58,7 @@ return sock -def send_slow_post( - sock: socket.socket, - server_hostname: str, - send_time: int, - finish_request: bool) -> None: +def send_slow_post(sock: socket.socket, server_hostname: str, send_time: int, finish_request: bool) -> None: """Send a slow POST request. :param sock: The socket to send the request on. @@ -84,11 +69,8 @@ """ # Send the POST request. host_header = f'Host: {server_hostname}\r\n'.encode() - request = ( - b"POST / HTTP/1.1\r\n" - + host_header + - b"Transfer-Encoding: chunked\r\n" - b"\r\n") + request = (b"POST / HTTP/1.1\r\n" + host_header + b"Transfer-Encoding: chunked\r\n" + b"\r\n") sock.sendall(request) print('Sent request headers:') print(request.decode()) @@ -128,11 +110,7 @@ print(args) with open_connection(args.proxy_address, args.proxy_port) as sock: - send_slow_post( - sock, - args.server_hostname, - args.send_time, - args.finish_request) + send_slow_post(sock, args.server_hostname, args.send_time, args.finish_request) if args.finish_request: drain_response(sock) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/slow_post/slow_post_clients.py trafficserver-9.2.4+ds/tests/gold_tests/slow_post/slow_post_clients.py --- trafficserver-9.2.3+ds/tests/gold_tests/slow_post/slow_post_clients.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/slow_post/slow_post_clients.py 2024-04-03 15:38:30.000000000 +0000 @@ -1,5 +1,4 @@ #!/usr/bin/env python3 - ''' ''' # Licensed to the Apache Software Foundation (ASF) under one @@ -31,7 +30,7 @@ def slow_post(port, slow_time): - requests.post('http://127.0.0.1:{0}/'.format(port, ), data=gen(slow_time)) + requests.post('http://127.0.0.1:{0}/'.format(port,), data=gen(slow_time)) def makerequest(port, connection_limit): @@ -47,12 +46,8 @@ def main(): parser = argparse.ArgumentParser() - parser.add_argument("--port", "-p", - type=int, - help="Port to use") - parser.add_argument("--connectionlimit", "-c", - type=int, - help="connection limit") + parser.add_argument("--port", "-p", type=int, help="Port to use") + parser.add_argument("--connectionlimit", "-c", type=int, help="connection limit") args = parser.parse_args() makerequest(args.port, args.connectionlimit) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/thread_config/check_threads.py trafficserver-9.2.4+ds/tests/gold_tests/thread_config/check_threads.py --- trafficserver-9.2.3+ds/tests/gold_tests/thread_config/check_threads.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/thread_config/check_threads.py 2024-04-03 15:38:30.000000000 +0000 @@ -1,5 +1,4 @@ #!/usr/bin/env python3 - ''' ''' # Licensed to the Apache Software Foundation (ASF) under one @@ -111,14 +110,13 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument('-p', '--ts-path', type=str, dest='ts_path', help='path to traffic_server binary', required=True) - parser.add_argument('-e', '--etnet-threads', type=int, dest='etnet_threads', - help='expected number of ET_NET threads', required=True) - parser.add_argument('-a', '--accept-threads', type=int, dest='accept_threads', - help='expected number of ACCEPT threads', required=True) - parser.add_argument('-t', '--task-threads', type=int, dest='task_threads', - help='expected number of TASK threads', required=True) - parser.add_argument('-c', '--aio-threads', type=int, dest='aio_threads', - help='expected number of AIO threads', required=True) + parser.add_argument( + '-e', '--etnet-threads', type=int, dest='etnet_threads', help='expected number of ET_NET threads', required=True) + parser.add_argument( + '-a', '--accept-threads', type=int, dest='accept_threads', help='expected number of ACCEPT threads', required=True) + parser.add_argument( + '-t', '--task-threads', type=int, dest='task_threads', help='expected number of TASK threads', required=True) + parser.add_argument('-c', '--aio-threads', type=int, dest='aio_threads', help='expected number of AIO threads', required=True) args = parser.parse_args() exit(count_threads(args.ts_path, args.etnet_threads, args.accept_threads, args.task_threads, args.aio_threads)) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/thread_config/thread_config.test.py trafficserver-9.2.4+ds/tests/gold_tests/thread_config/thread_config.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/thread_config/thread_config.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/thread_config/thread_config.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,15 +22,17 @@ Test.ContinueOnFail = True ts = Test.MakeATSProcess('ts-1_exec-0_accept-1_task-1_aio') -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 1, - 'proxy.config.accept_threads': 0, - 'proxy.config.task_threads': 1, - 'proxy.config.cache.threads_per_disk': 1, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start'}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 1, + 'proxy.config.accept_threads': 0, + 'proxy.config.task_threads': 1, + 'proxy.config.cache.threads_per_disk': 1, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start' + }) ts.Setup.CopyAs('check_threads.py', Test.RunDirectory) tr = Test.AddTestRun() @@ -40,15 +42,17 @@ tr.Processes.Default.StartBefore(ts) ts = Test.MakeATSProcess('ts-1_exec-1_accept-2_task-8_aio') -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 1, - 'proxy.config.accept_threads': 1, - 'proxy.config.task_threads': 2, - 'proxy.config.cache.threads_per_disk': 8, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start'}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 1, + 'proxy.config.accept_threads': 1, + 'proxy.config.task_threads': 2, + 'proxy.config.cache.threads_per_disk': 8, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start' + }) tr = Test.AddTestRun() TS_ROOT = ts.Env['TS_ROOT'] @@ -57,15 +61,17 @@ tr.Processes.Default.StartBefore(ts) ts = Test.MakeATSProcess('ts-1_exec-10_accept-10_task-32_aio') -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 1, - 'proxy.config.accept_threads': 10, - 'proxy.config.task_threads': 10, - 'proxy.config.cache.threads_per_disk': 32, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start'}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 1, + 'proxy.config.accept_threads': 10, + 'proxy.config.task_threads': 10, + 'proxy.config.cache.threads_per_disk': 32, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start' + }) tr = Test.AddTestRun() TS_ROOT = ts.Env['TS_ROOT'] @@ -74,15 +80,17 @@ tr.Processes.Default.StartBefore(ts) ts = Test.MakeATSProcess('ts-2_exec-0_accept-1_task-1_aio') -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 2, - 'proxy.config.accept_threads': 0, - 'proxy.config.task_threads': 1, - 'proxy.config.cache.threads_per_disk': 1, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start'}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 2, + 'proxy.config.accept_threads': 0, + 'proxy.config.task_threads': 1, + 'proxy.config.cache.threads_per_disk': 1, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start' + }) tr = Test.AddTestRun() TS_ROOT = ts.Env['TS_ROOT'] @@ -91,15 +99,17 @@ tr.Processes.Default.StartBefore(ts) ts = Test.MakeATSProcess('ts-2_exec-1_accept-2_task-8_aio') -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 2, - 'proxy.config.accept_threads': 1, - 'proxy.config.task_threads': 2, - 'proxy.config.cache.threads_per_disk': 8, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start'}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 2, + 'proxy.config.accept_threads': 1, + 'proxy.config.task_threads': 2, + 'proxy.config.cache.threads_per_disk': 8, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start' + }) tr = Test.AddTestRun() TS_ROOT = ts.Env['TS_ROOT'] @@ -108,15 +118,17 @@ tr.Processes.Default.StartBefore(ts) ts = Test.MakeATSProcess('ts-2_exec-10_accept-10_task-32_aio') -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 2, - 'proxy.config.accept_threads': 10, - 'proxy.config.task_threads': 10, - 'proxy.config.cache.threads_per_disk': 32, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start'}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 2, + 'proxy.config.accept_threads': 10, + 'proxy.config.task_threads': 10, + 'proxy.config.cache.threads_per_disk': 32, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start' + }) tr = Test.AddTestRun() TS_ROOT = ts.Env['TS_ROOT'] @@ -125,15 +137,17 @@ tr.Processes.Default.StartBefore(ts) ts = Test.MakeATSProcess('ts-32_exec-0_accept-1_task-1_aio') -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 32, - 'proxy.config.accept_threads': 0, - 'proxy.config.task_threads': 1, - 'proxy.config.cache.threads_per_disk': 1, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start'}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 32, + 'proxy.config.accept_threads': 0, + 'proxy.config.task_threads': 1, + 'proxy.config.cache.threads_per_disk': 1, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start' + }) tr = Test.AddTestRun() TS_ROOT = ts.Env['TS_ROOT'] @@ -142,15 +156,17 @@ tr.Processes.Default.StartBefore(ts) ts = Test.MakeATSProcess('ts-32_exec-1_accept-2_task-8_aio') -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 32, - 'proxy.config.accept_threads': 1, - 'proxy.config.task_threads': 2, - 'proxy.config.cache.threads_per_disk': 8, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start'}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 32, + 'proxy.config.accept_threads': 1, + 'proxy.config.task_threads': 2, + 'proxy.config.cache.threads_per_disk': 8, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start' + }) tr = Test.AddTestRun() TS_ROOT = ts.Env['TS_ROOT'] @@ -159,15 +175,17 @@ tr.Processes.Default.StartBefore(ts) ts = Test.MakeATSProcess('ts-32_exec-10_accept-10_task-32_aio') -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 32, - 'proxy.config.accept_threads': 10, - 'proxy.config.task_threads': 10, - 'proxy.config.cache.threads_per_disk': 32, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start'}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 32, + 'proxy.config.accept_threads': 10, + 'proxy.config.task_threads': 10, + 'proxy.config.cache.threads_per_disk': 32, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start' + }) tr = Test.AddTestRun() TS_ROOT = ts.Env['TS_ROOT'] @@ -176,15 +194,17 @@ tr.Processes.Default.StartBefore(ts) ts = Test.MakeATSProcess('ts-100_exec-0_accept-1_task-1_aio') -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 100, - 'proxy.config.accept_threads': 0, - 'proxy.config.task_threads': 1, - 'proxy.config.cache.threads_per_disk': 1, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start'}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 100, + 'proxy.config.accept_threads': 0, + 'proxy.config.task_threads': 1, + 'proxy.config.cache.threads_per_disk': 1, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start' + }) tr = Test.AddTestRun() TS_ROOT = ts.Env['TS_ROOT'] @@ -193,15 +213,17 @@ tr.Processes.Default.StartBefore(ts) ts = Test.MakeATSProcess('ts-100_exec-1_accept-2_task-8_aio') -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 100, - 'proxy.config.accept_threads': 1, - 'proxy.config.task_threads': 2, - 'proxy.config.cache.threads_per_disk': 8, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start'}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 100, + 'proxy.config.accept_threads': 1, + 'proxy.config.task_threads': 2, + 'proxy.config.cache.threads_per_disk': 8, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start' + }) tr = Test.AddTestRun() TS_ROOT = ts.Env['TS_ROOT'] @@ -210,15 +232,17 @@ tr.Processes.Default.StartBefore(ts) ts = Test.MakeATSProcess('ts-100_exec-10_accept-10_task-32_aio') -ts.Disk.records_config.update({ - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.5, - 'proxy.config.exec_thread.limit': 100, - 'proxy.config.accept_threads': 10, - 'proxy.config.task_threads': 10, - 'proxy.config.cache.threads_per_disk': 32, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start'}) +ts.Disk.records_config.update( + { + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.5, + 'proxy.config.exec_thread.limit': 100, + 'proxy.config.accept_threads': 10, + 'proxy.config.task_threads': 10, + 'proxy.config.cache.threads_per_disk': 32, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start' + }) tr = Test.AddTestRun() TS_ROOT = ts.Env['TS_ROOT'] diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/timeout/accept_timeout.test.py trafficserver-9.2.4+ds/tests/gold_tests/timeout/accept_timeout.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/timeout/accept_timeout.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/timeout/accept_timeout.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -19,28 +19,25 @@ Test.Summary = 'Testing ATS inactivity timeout' Test.SkipUnless( - Condition.HasCurlFeature('http2'), - Condition.HasProgram("telnet", "Need telnet to shutdown when server shuts down tcp"), - Condition.HasProgram("nc", "Need nc to send data to server") -) + Condition.HasCurlFeature('http2'), Condition.HasProgram("telnet", "Need telnet to shutdown when server shuts down tcp"), + Condition.HasProgram("nc", "Need nc to send data to server")) ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True) ts.addSSLfile("../tls/ssl/server.pem") ts.addSSLfile("../tls/ssl/server.key") -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.http.transaction_no_activity_timeout_in': 6, - 'proxy.config.http.accept_no_activity_timeout': 2, - 'proxy.config.net.default_inactivity_timeout': 10, - 'proxy.config.net.defer_accept': 0 # Must turn off defer accept to test the raw TCP case -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.http.transaction_no_activity_timeout_in': 6, + 'proxy.config.http.accept_no_activity_timeout': 2, + 'proxy.config.net.default_inactivity_timeout': 10, + 'proxy.config.net.defer_accept': 0 # Must turn off defer accept to test the raw TCP case + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # case 1 TLS with no data tr = Test.AddTestRun("tr") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/timeout/active_timeout.test.py trafficserver-9.2.4+ds/tests/gold_tests/timeout/active_timeout.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/timeout/active_timeout.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/timeout/active_timeout.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -18,9 +18,7 @@ Test.Summary = 'Testing ATS active timeout' -Test.SkipUnless( - Condition.HasCurlFeature('http2') -) +Test.SkipUnless(Condition.HasCurlFeature('http2')) if Condition.HasATSFeature('TS_USE_QUIC') and Condition.HasCurlFeature('http3'): ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True, enable_quic=True) @@ -36,19 +34,17 @@ ts.addSSLfile("../tls/ssl/server.pem") ts.addSSLfile("../tls/ssl/server.key") -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.url_remap.remap_required': 1, - 'proxy.config.http.transaction_active_timeout_out': 2, -}) - -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}/'.format(server.Variables.Port)) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.url_remap.remap_required': 1, + 'proxy.config.http.transaction_active_timeout_out': 2, + }) + +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}/'.format(server.Variables.Port)) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') tr = Test.AddTestRun("tr") tr.Processes.Default.StartBefore(server) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/timeout/conn_timeout.test.py trafficserver-9.2.4+ds/tests/gold_tests/timeout/conn_timeout.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/timeout/conn_timeout.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/timeout/conn_timeout.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -30,14 +30,15 @@ server4 = Test.MakeOriginServer("server4") -ts.Disk.records_config.update({ - 'proxy.config.url_remap.remap_required': 1, - 'proxy.config.http.connect_attempts_timeout': 2, - 'proxy.config.http.connect_attempts_max_retries': 0, - 'proxy.config.http.transaction_no_activity_timeout_out': 5, - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'http', -}) +ts.Disk.records_config.update( + { + 'proxy.config.url_remap.remap_required': 1, + 'proxy.config.http.connect_attempts_timeout': 2, + 'proxy.config.http.connect_attempts_max_retries': 0, + 'proxy.config.http.transaction_no_activity_timeout_out': 5, + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'http', + }) ts.Disk.remap_config.AddLine('map /blocked http://10.1.1.1:{0}'.format(Test.Variables.blocked_upstream_port)) ts.Disk.remap_config.AddLine('map /not-blocked http://10.1.1.1:{0}'.format(Test.Variables.upstream_port)) @@ -52,9 +53,7 @@ - mode: ascii format: testformat filename: squid -'''.split("\n") -) - +'''.split("\n")) # Set up the network name space. Requires privilege tr = Test.AddTestRun("tr-ns-setup") @@ -79,7 +78,6 @@ tr.Processes.Default.Streams.All = Testers.ContainsExpression( "HTTP/1.1 502 internal error - server connection terminated", "Connect failed") - # Should not catch the connect timeout. Even though the first bytes are not sent until after the 2 second connect timeout # But before the no-activity timeout tr = Test.AddTestRun("tr-delayed") @@ -89,7 +87,6 @@ tr.Processes.Default.TimeOut = 7 tr.Processes.Default.Streams.All = Testers.ContainsExpression("HTTP/1.1 200", "Connect succeeded") - # cleanup the network namespace and virtual network tr = Test.AddTestRun("tr-cleanup") tr.Processes.Default.Command = 'sudo ip netns del testserver; sudo ip link del veth0 type veth peer name veth1' diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/timeout/inactive_client_timeout.test.py trafficserver-9.2.4+ds/tests/gold_tests/timeout/inactive_client_timeout.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/timeout/inactive_client_timeout.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/timeout/inactive_client_timeout.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -27,24 +27,24 @@ ts.addSSLfile("../tls/ssl/server.pem") ts.addSSLfile("../tls/ssl/server.key") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.url_remap.remap_required': 1, - 'proxy.config.http.transaction_no_activity_timeout_in': 2, - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.url_remap.remap_required': 1, + 'proxy.config.http.transaction_no_activity_timeout_in': 2, + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) -ts.Disk.remap_config.AddLines([ - 'map https://www.tls.com/ https://127.0.0.1:{0}'.format(server.Variables.https_port), - 'map / http://127.0.0.1:{0}'.format(server.Variables.http_port), -]) +ts.Disk.remap_config.AddLines( + [ + 'map https://www.tls.com/ https://127.0.0.1:{0}'.format(server.Variables.https_port), + 'map / http://127.0.0.1:{0}'.format(server.Variables.http_port), + ]) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # # Test 1: Verify that server delay does not trigger client activity timeout. @@ -57,8 +57,6 @@ # get applied after the request is sent. In other words, a slow to respond server should not # trigger the client inactivity timeout. tr = Test.AddTestRun("Verify that server delay does not trigger client activity timeout.") -tr.AddVerifierClientProcess( - "client", replay_file, http_ports=[ts.Variables.port], - https_ports=[ts.Variables.ssl_port]) +tr.AddVerifierClientProcess("client", replay_file, http_ports=[ts.Variables.port], https_ports=[ts.Variables.ssl_port]) tr.Processes.Default.StartBefore(ts) tr.Processes.Default.StartBefore(server) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/timeout/inactive_timeout.test.py trafficserver-9.2.4+ds/tests/gold_tests/timeout/inactive_timeout.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/timeout/inactive_timeout.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/timeout/inactive_timeout.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -18,9 +18,7 @@ Test.Summary = 'Testing ATS inactivity timeout' -Test.SkipUnless( - Condition.HasCurlFeature('http2') -) +Test.SkipUnless(Condition.HasCurlFeature('http2')) ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True) server = Test.MakeOriginServer("server", delay=8) @@ -33,19 +31,17 @@ ts.addSSLfile("../tls/ssl/server.pem") ts.addSSLfile("../tls/ssl/server.key") -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.url_remap.remap_required': 1, - 'proxy.config.http.transaction_no_activity_timeout_out': 2, -}) - -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}/'.format(server.Variables.Port)) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.url_remap.remap_required': 1, + 'proxy.config.http.transaction_no_activity_timeout_out': 2, + }) + +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}/'.format(server.Variables.Port)) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') tr = Test.AddTestRun("tr") tr.Processes.Default.StartBefore(server) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/timeout/tls_conn_timeout.test.py trafficserver-9.2.4+ds/tests/gold_tests/timeout/tls_conn_timeout.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/timeout/tls_conn_timeout.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/timeout/tls_conn_timeout.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -40,16 +40,17 @@ delay_get_ttfb = Test.Processes.Process( "delay get ttfb", './ssl-delay-server {0} 0 6 server.pem'.format(Test.Variables.get_block_ttfb_port)) -ts.Disk.records_config.update({ - 'proxy.config.url_remap.remap_required': 1, - 'proxy.config.http.connect_attempts_timeout': 1, - 'proxy.config.http.post_connect_attempts_timeout': 1, - 'proxy.config.http.connect_attempts_max_retries': 1, - 'proxy.config.http.transaction_no_activity_timeout_out': 4, - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'http|ssl', - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', -}) +ts.Disk.records_config.update( + { + 'proxy.config.url_remap.remap_required': 1, + 'proxy.config.http.connect_attempts_timeout': 1, + 'proxy.config.http.post_connect_attempts_timeout': 1, + 'proxy.config.http.connect_attempts_max_retries': 1, + 'proxy.config.http.transaction_no_activity_timeout_out': 4, + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'http|ssl', + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) ts.Disk.remap_config.AddLine('map /connect_blocked https://127.0.0.1:{0}'.format(Test.Variables.block_connect_port)) ts.Disk.remap_config.AddLine('map /ttfb_blocked https://127.0.0.1:{0}'.format(Test.Variables.block_ttfb_port)) @@ -69,8 +70,7 @@ tr.Processes.Default.StartBefore(delay_post_connect, ready=When.PortOpen(Test.Variables.block_connect_port)) tr.Processes.Default.Command = 'curl -H"Connection:close" -d "bob" -i http://127.0.0.1:{0}/connect_blocked --tlsv1.2'.format( ts.Variables.port) -tr.Processes.Default.Streams.All = Testers.ContainsExpression( - "HTTP/1.1 502 connect failed", "Connect failed") +tr.Processes.Default.Streams.All = Testers.ContainsExpression("HTTP/1.1 502 connect failed", "Connect failed") tr.Processes.Default.ReturnCode = 0 tr.StillRunningAfter = delay_post_connect tr.StillRunningAfter = Test.Processes.ts @@ -93,8 +93,7 @@ tr.Processes.Default.StartBefore(delay_get_connect, ready=When.PortOpen(Test.Variables.get_block_connect_port)) tr.Processes.Default.Command = 'curl -H"Connection:close" -i http://127.0.0.1:{0}/get_connect_blocked --tlsv1.2'.format( ts.Variables.port) -tr.Processes.Default.Streams.All = Testers.ContainsExpression( - "HTTP/1.1 502 connect failed", "Connect failed") +tr.Processes.Default.Streams.All = Testers.ContainsExpression("HTTP/1.1 502 connect failed", "Connect failed") tr.Processes.Default.ReturnCode = 0 tr.StillRunningAfter = delay_get_connect @@ -114,7 +113,6 @@ delay_post_ttfb.Streams.All = Testers.ContainsExpression("Accept try", "Should appear one time") delay_post_ttfb.Streams.All += Testers.ContainsExpression("TTFB delay", "Should reach the TTFB delay logic") - delay_get_connect.Streams.All = Testers.ContainsExpression( "Accept try", "Should appear at least two times (may be an extra one due to port ready test)") delay_get_connect.Streams.All += Testers.ExcludesExpression("TTFB delay", "Should not reach the TTFB delay logic") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/h2_early_decode.py trafficserver-9.2.4+ds/tests/gold_tests/tls/h2_early_decode.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/h2_early_decode.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/h2_early_decode.py 2024-04-03 15:38:30.000000000 +0000 @@ -15,7 +15,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - ''' A simple tool to decode http2 frames for 0-rtt testing. ''' @@ -102,6 +101,7 @@ class Http2Frame: + def __init__(self, length, frame_type, flags, stream_id): self.length = length self.frame_type = frame_type @@ -162,8 +162,7 @@ error_code = int(self.payload[4:8].hex(), 16) debug_data = self.payload[8:].hex() return '\nLast Stream ID = 0x{0:08x}\nError Code = 0x{1:08x}\nDebug Data = {2}'.format( - last_stream_id, error_code, debug_data - ) + last_stream_id, error_code, debug_data) else: return '\nError: Frame type mismatch: {0}'.format(Http2FrameDefs.FRAME_TYPES[self.frame_type]) @@ -192,8 +191,7 @@ def print(self): output = 'Length: {0}\nType: {1}\nFlags: {2}\nStream ID: {3}\nPayload: {4}\n'.format( - self.length, Http2FrameDefs.FRAME_TYPES[self.frame_type], self.flags, self.stream_id, self.print_payload() - ) + self.length, Http2FrameDefs.FRAME_TYPES[self.frame_type], self.flags, self.stream_id, self.print_payload()) if self.decode_error is not None: output += self.decode_error + '\n' return output @@ -203,13 +201,13 @@ class Decoder: + def read_frame_header(self, data): frame = Http2Frame( length=int(data[0:3].hex(), 16), frame_type=int(data[3:4].hex(), 16), flags=int(data[4:5].hex(), 16), - stream_id=int(data[5:9].hex(), 16) & Http2FrameDefs.RESERVE_BIT_MASK - ) + stream_id=int(data[5:9].hex(), 16) & Http2FrameDefs.RESERVE_BIT_MASK) return frame def decode(self, data): diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/h2_early_gen.py trafficserver-9.2.4+ds/tests/gold_tests/tls/h2_early_gen.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/h2_early_gen.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/h2_early_gen.py 2024-04-03 15:38:30.000000000 +0000 @@ -15,7 +15,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - ''' A simple tool to generate some raw http2 frames for 0-rtt testing. ''' @@ -91,8 +90,7 @@ frame_type=TYPE_SETTINGS_FRAME, frame_flags=1 if ack else 0, frame_stream_id=0, - frame_payload=payload - ) + frame_payload=payload) return frame @@ -102,12 +100,7 @@ payload = bytes.fromhex(payload) frame = make_frame( - frame_length=len(payload), - frame_type=TYPE_WINDOW_UPDATE_FRAME, - frame_flags=0, - frame_stream_id=0, - frame_payload=payload - ) + frame_length=len(payload), frame_type=TYPE_WINDOW_UPDATE_FRAME, frame_flags=0, frame_stream_id=0, frame_payload=payload) return frame @@ -126,12 +119,7 @@ else: headers.append((':path', '/early_post')) - headers.extend([ - (':scheme', 'https'), - (':authority', '127.0.0.1'), - ('host', '127.0.0.1'), - ('accept', '*/*') - ]) + headers.extend([(':scheme', 'https'), (':authority', '127.0.0.1'), ('host', '127.0.0.1'), ('accept', '*/*')]) headers_encoded = encode_payload(headers) @@ -140,8 +128,7 @@ frame_type=TYPE_HEADERS_FRAME, frame_flags=HEADERS_FLAG_END_STREAM | HEADERS_FLAG_END_HEADERS, frame_stream_id=stream_id, - frame_payload=headers_encoded - ) + frame_payload=headers_encoded) return frame @@ -149,10 +136,7 @@ def make_h2_req(test): h2_req = H2_PREFACE if test == 'get' or test == 'post': - frames = [ - make_settins_frame(ack=True), - make_headers_frame(test) - ] + frames = [make_settins_frame(ack=True), make_headers_frame(test)] for frame in frames: h2_req += frame elif test == 'multi1': diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/ssl_multicert_loader.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/ssl_multicert_loader.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/ssl_multicert_loader.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/ssl_multicert_loader.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -28,20 +28,17 @@ response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': f'{ts.Variables.SSLDir}', - 'proxy.config.ssl.server.private_key.path': f'{ts.Variables.SSLDir}', -}) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': f'{ts.Variables.SSLDir}', + 'proxy.config.ssl.server.private_key.path': f'{ts.Variables.SSLDir}', + }) ts.addDefaultSSLFiles() -ts.Disk.remap_config.AddLine( - f'map / http://127.0.0.1:{server.Variables.Port}' -) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.remap_config.AddLine(f'map / http://127.0.0.1:{server.Variables.Port}') + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') tr = Test.AddTestRun("ensure we can connect for SNI $sni_domain") tr.Processes.Default.StartBefore(Test.Processes.ts) @@ -53,16 +50,16 @@ tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Check response") tr.Processes.Default.Streams.stderr = Testers.IncludesExpression(f"CN={sni_domain}", "Check response") - tr2 = Test.AddTestRun("Update config files") # Update the configs sslcertpath = ts.Disk.ssl_multicert_config.AbsPath tr2.Disk.File(sslcertpath, id="ssl_multicert_config", typename="ats:config") -tr2.Disk.ssl_multicert_config.AddLines([ - 'ssl_cert_name=server_does_not_exist.pem ssl_key_name=server_does_not_exist.key', - 'dest_ip=* ssl_cert_name=server.pem_doesnotexist ssl_key_name=server.key', -]) +tr2.Disk.ssl_multicert_config.AddLines( + [ + 'ssl_cert_name=server_does_not_exist.pem ssl_key_name=server_does_not_exist.key', + 'dest_ip=* ssl_cert_name=server.pem_doesnotexist ssl_key_name=server.key', + ]) tr2.StillRunningAfter = ts tr2.StillRunningAfter = server tr2.Processes.Default.Command = 'echo Updated configs' @@ -89,7 +86,6 @@ tr3.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Check response") tr3.Processes.Default.Streams.stderr = Testers.IncludesExpression(f"CN={sni_domain}", "Check response") - ########################################################################## # Ensure ATS fails/exits when non-existent cert is specified # Also, not explicitly setting proxy.config.ssl.server.multicert.exit_on_load_fail @@ -108,6 +104,5 @@ ts2.ReturnCode = 2 ts2.Ready = 0 # Need this to be 0 because we are testing shutdown, this is to make autest not think ats went away for a bad reason. ts.Disk.traffic_out.Content = Testers.ExcludesExpression( - 'Traffic Server is fully initialized', - 'process should fail when invalid certificate specified') + 'Traffic Server is fully initialized', 'process should fail when invalid certificate specified') ts2.Disk.diags_log.Content = Testers.IncludesExpression('FATAL: failed to load SSL certificate file', 'check diags.log"') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/test-0rtt-s_client.py trafficserver-9.2.4+ds/tests/gold_tests/tls/test-0rtt-s_client.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/test-0rtt-s_client.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/test-0rtt-s_client.py 2024-04-03 15:38:30.000000000 +0000 @@ -1,5 +1,4 @@ #!/usr/bin/env python3 - ''' ''' # Licensed to the Apache Software Foundation (ASF) under one @@ -35,18 +34,19 @@ s_client_cmd_1 = shlex.split( 'openssl s_client -connect 127.0.0.1:{0} -tls1_3 -quiet -sess_out {1}'.format(ats_port, sess_file_path)) s_client_cmd_2 = shlex.split( - 'openssl s_client -connect 127.0.0.1:{0} -tls1_3 -quiet -sess_in {1} -early_data {2}'.format(ats_port, sess_file_path, early_data_file_path)) + 'openssl s_client -connect 127.0.0.1:{0} -tls1_3 -quiet -sess_in {1} -early_data {2}'.format( + ats_port, sess_file_path, early_data_file_path)) - create_sess_proc = subprocess.Popen(s_client_cmd_1, env=os.environ.copy( - ), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + create_sess_proc = subprocess.Popen( + s_client_cmd_1, env=os.environ.copy(), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) try: output = create_sess_proc.communicate(timeout=1)[0] except subprocess.TimeoutExpired: create_sess_proc.kill() output = create_sess_proc.communicate()[0] - reuse_sess_proc = subprocess.Popen(s_client_cmd_2, env=os.environ.copy( - ), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + reuse_sess_proc = subprocess.Popen( + s_client_cmd_2, env=os.environ.copy(), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) try: output = reuse_sess_proc.communicate(timeout=1)[0] except subprocess.TimeoutExpired: diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -48,30 +48,32 @@ post_body = "{0}0".format(post_body) # Add info the origin server responses -server.addResponse("sessionlog.json", - {"headers": header_string, - "timestamp": "1469733493.993", - "body": post_body}, - {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\nCache-Control: max-age=3600\r\nContent-Length: 2\r\n\r\n", - "timestamp": "1469733493.993", - "body": "ok"}) +server.addResponse( + "sessionlog.json", { + "headers": header_string, + "timestamp": "1469733493.993", + "body": post_body + }, { + "headers": + "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\nCache-Control: max-age=3600\r\nContent-Length: 2\r\n\r\n", + "timestamp": "1469733493.993", + "body": "ok" + }) # add ssl materials like key, certificates for the server ts.addDefaultSSLFiles() -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) -ts.Disk.records_config.update({'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl', - }) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl', + }) tr = Test.AddTestRun("Run-Test") tr.Command = './ssl-post 127.0.0.1 40 {0} {1}'.format(header_count, ts.Variables.ssl_port) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_0rtt_server.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_0rtt_server.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_0rtt_server.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_0rtt_server.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -30,21 +30,9 @@ ts = Test.MakeATSProcess('ts', enable_tls=True) server = Test.MakeOriginServer('server') -request_header1 = { - 'headers': 'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n', - 'timestamp': '1469733493.993', - 'body': '' -} -response_header1 = { - 'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n', - 'timestamp': '1469733493.993', - 'body': 'curl test' -} -request_header2 = { - 'headers': 'GET /early_get HTTP/1.1\r\nHost: www.example.com\r\n\r\n', - 'timestamp': '1469733493.993', - 'body': '' -} +request_header1 = {'headers': 'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n', 'timestamp': '1469733493.993', 'body': ''} +response_header1 = {'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n', 'timestamp': '1469733493.993', 'body': 'curl test'} +request_header2 = {'headers': 'GET /early_get HTTP/1.1\r\nHost: www.example.com\r\n\r\n', 'timestamp': '1469733493.993', 'body': ''} response_header2 = { 'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n', 'timestamp': '1469733493.993', @@ -109,30 +97,28 @@ ts.Setup.Copy('early_h2_multi1.txt') ts.Setup.Copy('early_h2_multi2.txt') -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'http', - 'proxy.config.exec_thread.autoconfig': 0, - 'proxy.config.exec_thread.limit': 8, - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.session_cache': 2, - 'proxy.config.ssl.session_cache.size': 512000, - 'proxy.config.ssl.session_cache.timeout': 7200, - 'proxy.config.ssl.session_cache.num_buckets': 32768, - 'proxy.config.ssl.server.session_ticket.enable': 1, - 'proxy.config.ssl.server.max_early_data': 16384, - 'proxy.config.ssl.server.allow_early_data_params': 0, - 'proxy.config.ssl.server.cipher_suite': 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA' -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) - -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'http', + 'proxy.config.exec_thread.autoconfig': 0, + 'proxy.config.exec_thread.limit': 8, + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.session_cache': 2, + 'proxy.config.ssl.session_cache.size': 512000, + 'proxy.config.ssl.session_cache.timeout': 7200, + 'proxy.config.ssl.session_cache.num_buckets': 32768, + 'proxy.config.ssl.server.session_ticket.enable': 1, + 'proxy.config.ssl.server.max_early_data': 16384, + 'proxy.config.ssl.server.allow_early_data_params': 0, + 'proxy.config.ssl.server.cipher_suite': + 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA' + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) tr = Test.AddTestRun('Basic Curl Test') tr.Processes.Default.Command = 'curl https://127.0.0.1:{0} -k'.format(ts.Variables.ssl_port) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_bad_alpn.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_bad_alpn.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_bad_alpn.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_bad_alpn.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -34,16 +34,15 @@ ts.addSSLfile("ssl/server.key") # Make sure the TS server certs are different from the origin certs -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), -}) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) tr = Test.AddTestRun("alpn banana") tr.Processes.Default.Command = "openssl s_client -ign_eof -alpn=banana -connect 127.0.0.1:{}".format(ts.Variables.ssl_port) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_check_cert_selection.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_check_cert_selection.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_check_cert_selection.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_check_cert_selection.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -39,26 +39,27 @@ ts.addSSLfile("ssl/signer.key") ts.addSSLfile("ssl/combo.pem") -ts.Disk.remap_config.AddLine( - 'map / https://foo.com:{1}'.format(ts.Variables.ssl_port, server.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map / https://foo.com:{1}'.format(ts.Variables.ssl_port, server.Variables.SSL_Port)) -ts.Disk.ssl_multicert_config.AddLines([ - 'dest_ip=127.0.0.1 ssl_cert_name=signed-foo.pem ssl_key_name=signed-foo.key', - 'ssl_cert_name=signed2-bar.pem ssl_key_name=signed-bar.key', - 'dest_ip=* ssl_cert_name=combo.pem' -]) +ts.Disk.ssl_multicert_config.AddLines( + [ + 'dest_ip=127.0.0.1 ssl_cert_name=signed-foo.pem ssl_key_name=signed-foo.key', + 'ssl_cert_name=signed2-bar.pem ssl_key_name=signed-bar.key', + 'dest_ip=* ssl_cert_name=combo.pem', + ]) # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.dns.resolv_conf': 'NULL', - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', -}) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.dns.resolv_conf': 'NULL', + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) dns.addRecords(records={"foo.com.": ["127.0.0.1"]}) dns.addRecords(records={"bar.com.": ["127.0.0.1"]}) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_check_cert_selection_reload.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_check_cert_selection_reload.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_check_cert_selection_reload.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_check_cert_selection_reload.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -37,25 +37,26 @@ ts.addSSLfile("ssl/signer.key") ts.addSSLfile("ssl/combo.pem") -ts.Disk.remap_config.AddLine( - 'map /stuff https://foo.com:{1}'.format(ts.Variables.ssl_port, server.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map /stuff https://foo.com:{1}'.format(ts.Variables.ssl_port, server.Variables.SSL_Port)) -ts.Disk.ssl_multicert_config.AddLines([ - 'ssl_cert_name=signed-bar.pem ssl_key_name=signed-bar.key', - 'dest_ip=* ssl_cert_name=combo.pem' -]) +ts.Disk.ssl_multicert_config.AddLines( + [ + 'ssl_cert_name=signed-bar.pem ssl_key_name=signed-bar.key', + 'dest_ip=* ssl_cert_name=combo.pem', + ]) # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', - 'proxy.config.diags.debug.tags': 'ssl|http|lm', - 'proxy.config.diags.debug.enabled': 1 -}) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + 'proxy.config.diags.debug.tags': 'ssl|http|lm', + 'proxy.config.diags.debug.enabled': 1 + }) # Should receive a bar.com cert issued by first signer tr = Test.AddTestRun("bar.com cert signer1") @@ -80,8 +81,7 @@ tr.StillRunningAfter = server tr.StillRunningAfter = ts tr.Processes.Default.Streams.All = Testers.ContainsExpression( - "unable to get local issuer certificate", - "Server certificate not issued by expected signer") + "unable to get local issuer certificate", "Server certificate not issued by expected signer") # Pause a little to ensure mtime will be updated tr = Test.AddTestRun("Pause a little to ensure mtime will be different") @@ -106,17 +106,15 @@ tr = Test.AddTestRun("Try with signer 1 again") # Wait for the reload to complete -tr.Processes.Default.StartBefore(server3, ready=When.FileContains( - ts.Disk.diags_log.Name, 'ssl_multicert.config finished loading', 2)) +tr.Processes.Default.StartBefore( + server3, ready=When.FileContains(ts.Disk.diags_log.Name, 'ssl_multicert.config finished loading', 2)) tr.StillRunningAfter = ts tr.StillRunningAfter = server tr.Processes.Default.Command = "curl -v --cacert ./signer.pem --resolve 'bar.com:{0}:127.0.0.1' https://bar.com:{0}/random".format( ts.Variables.ssl_port) tr.ReturnCode = 60 tr.Processes.Default.Streams.All = Testers.ContainsExpression( - "unable to get local issuer certificate", - "Server certificate not issued by expected signer") - + "unable to get local issuer certificate", "Server certificate not issued by expected signer") tr = Test.AddTestRun("Try with signer 2 again") tr.Processes.Default.Command = "curl -v --cacert ./signer2.pem --resolve 'bar.com:{0}:127.0.0.1' https://bar.com:{0}/random".format( diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_check_dual_cert_selection.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_check_dual_cert_selection.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_check_dual_cert_selection.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_check_dual_cert_selection.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -46,28 +46,29 @@ ts.addSSLfile("ssl/server.pem") ts.addSSLfile("ssl/server.key") -ts.Disk.remap_config.AddLine( - 'map / https://foo.com:{1}'.format(ts.Variables.ssl_port, server.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map / https://foo.com:{1}'.format(ts.Variables.ssl_port, server.Variables.SSL_Port)) -ts.Disk.ssl_multicert_config.AddLines([ - 'ssl_cert_name=signed-foo-ec.pem,signed-foo.pem ssl_key_name=signed-foo-ec.key,signed-foo.key', - 'ssl_cert_name=signed-san-ec.pem,signed-san.pem ssl_key_name=signed-san-ec.key,signed-san.key', - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -]) +ts.Disk.ssl_multicert_config.AddLines( + [ + 'ssl_cert_name=signed-foo-ec.pem,signed-foo.pem ssl_key_name=signed-foo-ec.key,signed-foo.key', + 'ssl_cert_name=signed-san-ec.pem,signed-san.pem ssl_key_name=signed-san-ec.key,signed-san.key', + 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key', + ]) # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.cipher_suite': 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256', - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.dns.resolv_conf': 'NULL', - 'proxy.config.diags.debug.tags': 'ssl', - 'proxy.config.diags.debug.enabled': 1 -}) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.cipher_suite': 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256', + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.dns.resolv_conf': 'NULL', + 'proxy.config.diags.debug.tags': 'ssl', + 'proxy.config.diags.debug.enabled': 1 + }) dns.addRecords(records={"foo.com.": ["127.0.0.1"]}) dns.addRecords(records={"bar.com.": ["127.0.0.1"]}) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_check_dual_cert_selection2.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_check_dual_cert_selection2.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_check_dual_cert_selection2.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_check_dual_cert_selection2.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -47,28 +47,29 @@ ts.addSSLfile("ssl/signer.pem") ts.addSSLfile("ssl/signer.key") -ts.Disk.remap_config.AddLine( - 'map / https://foo.com:{1}'.format(ts.Variables.ssl_port, server.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map / https://foo.com:{1}'.format(ts.Variables.ssl_port, server.Variables.SSL_Port)) -ts.Disk.ssl_multicert_config.AddLines([ - 'ssl_cert_name=combined-ec.pem,combined.pem', - 'ssl_cert_name=signed-foo-ec.pem,signed-foo.pem', - 'dest_ip=* ssl_cert_name=signed-san-ec.pem,signed-san.pem' -]) +ts.Disk.ssl_multicert_config.AddLines( + [ + 'ssl_cert_name=combined-ec.pem,combined.pem', + 'ssl_cert_name=signed-foo-ec.pem,signed-foo.pem', + 'dest_ip=* ssl_cert_name=signed-san-ec.pem,signed-san.pem', + ]) # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '/tmp', # Faulty key path should not matter, since there are no key files - 'proxy.config.ssl.server.cipher_suite': 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256', - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.dns.resolv_conf': 'NULL', - 'proxy.config.diags.debug.tags': 'ssl', - 'proxy.config.diags.debug.enabled': 0 -}) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '/tmp', # Faulty key path should not matter, since there are no key files + 'proxy.config.ssl.server.cipher_suite': 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256', + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.dns.resolv_conf': 'NULL', + 'proxy.config.diags.debug.tags': 'ssl', + 'proxy.config.diags.debug.enabled': 0 + }) dns.addRecords(records={"foo.com.": ["127.0.0.1"]}) dns.addRecords(records={"bar.com.": ["127.0.0.1"]}) @@ -98,11 +99,11 @@ with open(os.path.join(Test.TestDirectory, 'ssl', 'combined-ec.pem'), 'r') as myfile: file_string = myfile.read() cert_end = file_string.find("END CERTIFICATE-----") - combo_ec_string = re.escape(file_string[0: cert_end]) + combo_ec_string = re.escape(file_string[0:cert_end]) with open(os.path.join(Test.TestDirectory, 'ssl', 'combined.pem'), 'r') as myfile: file_string = myfile.read() cert_end = file_string.find("END CERTIFICATE-----") - combo_rsa_string = re.escape(file_string[0: cert_end]) + combo_rsa_string = re.escape(file_string[0:cert_end]) # Should receive a EC cert since ATS cipher list prefers EC tr = Test.AddTestRun("Default for foo should return EC cert") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_client_cert.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_client_cert.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_client_cert.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_client_cert.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -25,18 +25,24 @@ cafile = "{0}/signer.pem".format(Test.RunDirectory) cafile2 = "{0}/signer2.pem".format(Test.RunDirectory) # --clientverify: "" empty string because microserver does store_true for argparse, but options is a dictionary -server = Test.MakeOriginServer("server", - ssl=True, - options={"--clientCA": cafile, - "--clientverify": ""}, - clientcert="{0}/signed-foo.pem".format(Test.RunDirectory), - clientkey="{0}/signed-foo.key".format(Test.RunDirectory)) -server2 = Test.MakeOriginServer("server2", - ssl=True, - options={"--clientCA": cafile2, - "--clientverify": ""}, - clientcert="{0}/signed2-bar.pem".format(Test.RunDirectory), - clientkey="{0}/signed-bar.key".format(Test.RunDirectory)) +server = Test.MakeOriginServer( + "server", + ssl=True, + options={ + "--clientCA": cafile, + "--clientverify": "" + }, + clientcert="{0}/signed-foo.pem".format(Test.RunDirectory), + clientkey="{0}/signed-foo.key".format(Test.RunDirectory)) +server2 = Test.MakeOriginServer( + "server2", + ssl=True, + options={ + "--clientCA": cafile2, + "--clientverify": "" + }, + clientcert="{0}/signed2-bar.pem".format(Test.RunDirectory), + clientkey="{0}/signed-bar.key".format(Test.RunDirectory)) server3 = Test.MakeOriginServer("server3") server4 = Test.MakeOriginServer("server4") server.Setup.Copy("ssl/signer.pem") @@ -70,39 +76,30 @@ ts.addSSLfile("ssl/signed2-bar.pem") ts.addSSLfile("ssl/signed-bar.key") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_verify_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.cert.filename': 'signed-foo.pem', - 'proxy.config.ssl.client.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.private_key.filename': 'signed-foo.key', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) - -ts.Disk.remap_config.AddLine( - 'map /case1 https://127.0.0.1:{0}/'.format(server.Variables.SSL_Port) -) -ts.Disk.remap_config.AddLine( - 'map /case2 https://127.0.0.1:{0}/'.format(server2.Variables.SSL_Port) -) - -ts.Disk.sni_yaml.AddLine( - 'sni:') -ts.Disk.sni_yaml.AddLine( - '- fqdn: bar.com') -ts.Disk.sni_yaml.AddLine( - ' client_cert: {0}/signed2-bar.pem'.format(ts.Variables.SSLDir)) -ts.Disk.sni_yaml.AddLine( - ' client_key: {0}/signed-bar.key'.format(ts.Variables.SSLDir)) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_verify_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.cert.filename': 'signed-foo.pem', + 'proxy.config.ssl.client.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.private_key.filename': 'signed-foo.key', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + +ts.Disk.remap_config.AddLine('map /case1 https://127.0.0.1:{0}/'.format(server.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map /case2 https://127.0.0.1:{0}/'.format(server2.Variables.SSL_Port)) + +ts.Disk.sni_yaml.AddLine('sni:') +ts.Disk.sni_yaml.AddLine('- fqdn: bar.com') +ts.Disk.sni_yaml.AddLine(' client_cert: {0}/signed2-bar.pem'.format(ts.Variables.SSLDir)) +ts.Disk.sni_yaml.AddLine(' client_key: {0}/signed-bar.key'.format(ts.Variables.SSLDir)) ts.Disk.logging_yaml.AddLines( ''' @@ -114,8 +111,7 @@ - mode: ascii format: testformat filename: squid -'''.split("\n") -) +'''.split("\n")) # Should succeed tr = Test.AddTestRun("Connect with first client cert to first server") @@ -161,26 +157,23 @@ snipath = ts.Disk.sni_yaml.AbsPath recordspath = ts.Disk.records_config.AbsPath tr2.Disk.File(snipath, id="sni_yaml", typename="ats:config"), -tr2.Disk.sni_yaml.AddLine( - 'sni:') -tr2.Disk.sni_yaml.AddLine( - '- fqdn: bar.com') -tr2.Disk.sni_yaml.AddLine( - ' client_cert: {0}/signed-bar.pem'.format(ts.Variables.SSLDir)) -tr2.Disk.sni_yaml.AddLine( - ' client_key: {0}/signed-bar.key'.format(ts.Variables.SSLDir)) +tr2.Disk.sni_yaml.AddLine('sni:') +tr2.Disk.sni_yaml.AddLine('- fqdn: bar.com') +tr2.Disk.sni_yaml.AddLine(' client_cert: {0}/signed-bar.pem'.format(ts.Variables.SSLDir)) +tr2.Disk.sni_yaml.AddLine(' client_key: {0}/signed-bar.key'.format(ts.Variables.SSLDir)) # recreate the records.config with the cert filename changed tr2.Disk.File(recordspath, id="records_config", typename="ats:config:records"), -tr2.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.cert.filename': 'signed2-foo.pem', - 'proxy.config.ssl.client.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.private_key.filename': 'signed-foo.key', - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', -}) +tr2.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.cert.filename': 'signed2-foo.pem', + 'proxy.config.ssl.client.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.private_key.filename': 'signed-foo.key', + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) tr2.StillRunningAfter = ts tr2.StillRunningAfter = server tr2.StillRunningAfter = server2 @@ -198,7 +191,6 @@ tr2reload.Processes.Default.Env = ts.Env tr2reload.Processes.Default.ReturnCode = 0 - # Should succeed tr3bar = Test.AddTestRun("Make request with other bar cert to first server") # Wait for the reload to complete @@ -238,7 +230,6 @@ tr3fail.Processes.Default.ReturnCode = 0 tr3fail.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Check response") - # Test the case of updating certificate contents without changing file name. trupdate = Test.AddTestRun("Update client cert file in place") trupdate.StillRunningAfter = ts diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_client_cert2.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_client_cert2.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_client_cert2.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_client_cert2.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,18 +24,24 @@ ts = Test.MakeATSProcess("ts", command="traffic_server", select_ports=True) cafile = "{0}/signer.pem".format(Test.RunDirectory) cafile2 = "{0}/signer2.pem".format(Test.RunDirectory) -server = Test.MakeOriginServer("server", - ssl=True, - options={"--clientCA": cafile, - "--clientverify": ""}, - clientcert="{0}/signed-foo.pem".format(Test.RunDirectory), - clientkey="{0}/signed-foo.key".format(Test.RunDirectory)) -server2 = Test.MakeOriginServer("server2", - ssl=True, - options={"--clientCA": cafile2, - "--clientverify": ""}, - clientcert="{0}/signed2-bar.pem".format(Test.RunDirectory), - clientkey="{0}/signed-bar.key".format(Test.RunDirectory)) +server = Test.MakeOriginServer( + "server", + ssl=True, + options={ + "--clientCA": cafile, + "--clientverify": "" + }, + clientcert="{0}/signed-foo.pem".format(Test.RunDirectory), + clientkey="{0}/signed-foo.key".format(Test.RunDirectory)) +server2 = Test.MakeOriginServer( + "server2", + ssl=True, + options={ + "--clientCA": cafile2, + "--clientverify": "" + }, + clientcert="{0}/signed2-bar.pem".format(Test.RunDirectory), + clientkey="{0}/signed-bar.key".format(Test.RunDirectory)) server4 = Test.MakeOriginServer("server4") server.Setup.Copy("ssl/signer.pem") server.Setup.Copy("ssl/signer2.pem") @@ -69,41 +75,37 @@ ts.addSSLfile("ssl/signed2-bar.pem") ts.addSSLfile("ssl/signed-bar.key") -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) - -ts.Disk.remap_config.AddLine( - 'map /case1 https://127.0.0.1:{0}/'.format(server.Variables.SSL_Port) -) -ts.Disk.remap_config.AddLine( - 'map /case2 https://127.0.0.1:{0}/'.format(server2.Variables.SSL_Port) -) - -ts.Disk.sni_yaml.AddLines([ - 'sni:', - '- fqdn: bob.bar.com', - ' client_cert: signed-bar.pem', - ' client_key: signed-bar.key', - '- fqdn: bob.*.com', - ' client_cert: {0}/combo-signed-foo.pem'.format(ts.Variables.SSLDir), - '- fqdn: "*bar.com"', - ' client_cert: {0}/signed2-bar.pem'.format(ts.Variables.SSLDir), - ' client_key: {0}/signed-bar.key'.format(ts.Variables.SSLDir), - '- fqdn: "foo.com"', - ' client_cert: {0}/signed2-foo.pem'.format(ts.Variables.SSLDir), - ' client_key: {0}/signed-foo.key'.format(ts.Variables.SSLDir), -]) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + +ts.Disk.remap_config.AddLine('map /case1 https://127.0.0.1:{0}/'.format(server.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map /case2 https://127.0.0.1:{0}/'.format(server2.Variables.SSL_Port)) + +ts.Disk.sni_yaml.AddLines( + [ + 'sni:', + '- fqdn: bob.bar.com', + ' client_cert: signed-bar.pem', + ' client_key: signed-bar.key', + '- fqdn: bob.*.com', + ' client_cert: {0}/combo-signed-foo.pem'.format(ts.Variables.SSLDir), + '- fqdn: "*bar.com"', + ' client_cert: {0}/signed2-bar.pem'.format(ts.Variables.SSLDir), + ' client_key: {0}/signed-bar.key'.format(ts.Variables.SSLDir), + '- fqdn: "foo.com"', + ' client_cert: {0}/signed2-foo.pem'.format(ts.Variables.SSLDir), + ' client_key: {0}/signed-foo.key'.format(ts.Variables.SSLDir), + ]) ts.Disk.logging_yaml.AddLines( ''' @@ -115,8 +117,7 @@ - mode: ascii format: testformat filename: squid -'''.split("\n") -) +'''.split("\n")) # Should succeed tr = Test.AddTestRun("bob.bar.com to server 1") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_client_cert_override.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_client_cert_override.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_client_cert_override.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_client_cert_override.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,18 +24,24 @@ ts = Test.MakeATSProcess("ts", command="traffic_manager", select_ports=True) cafile = "{0}/signer.pem".format(Test.RunDirectory) cafile2 = "{0}/signer2.pem".format(Test.RunDirectory) -server = Test.MakeOriginServer("server", - ssl=True, - options={"--clientCA": cafile, - "--clientverify": ""}, - clientcert="{0}/signed-foo.pem".format(Test.RunDirectory), - clientkey="{0}/signed-foo.key".format(Test.RunDirectory)) -server2 = Test.MakeOriginServer("server2", - ssl=True, - options={"--clientCA": cafile2, - "--clientverify": ""}, - clientcert="{0}/signed2-bar.pem".format(Test.RunDirectory), - clientkey="{0}/signed-bar.key".format(Test.RunDirectory)) +server = Test.MakeOriginServer( + "server", + ssl=True, + options={ + "--clientCA": cafile, + "--clientverify": "" + }, + clientcert="{0}/signed-foo.pem".format(Test.RunDirectory), + clientkey="{0}/signed-foo.key".format(Test.RunDirectory)) +server2 = Test.MakeOriginServer( + "server2", + ssl=True, + options={ + "--clientCA": cafile2, + "--clientverify": "" + }, + clientcert="{0}/signed2-bar.pem".format(Test.RunDirectory), + clientkey="{0}/signed-bar.key".format(Test.RunDirectory)) server.Setup.Copy("ssl/signer.pem") server.Setup.Copy("ssl/signer2.pem") server.Setup.Copy("ssl/signed-foo.pem") @@ -67,44 +73,35 @@ ts.addSSLfile("ssl/signed2-bar.pem") ts.addSSLfile("ssl/signed-bar.key") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.cert.filename': 'signed-foo.pem', - 'proxy.config.ssl.client.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.private_key.filename': 'signed-foo.key', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.cert.filename': 'signed-foo.pem', + 'proxy.config.ssl.client.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.private_key.filename': 'signed-foo.key', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map /case1 https://127.0.0.1:{0}/ @plugin=conf_remap.so @pparam=proxy.config.ssl.client.cert.filename={1} plugin=conf_remap.so @pparam=proxy.config.ssl.client.private_key.filename={2}'.format( - server.Variables.SSL_Port, - "signed-foo.pem", - "signed-foo.key")) + 'map /case1 https://127.0.0.1:{0}/ @plugin=conf_remap.so @pparam=proxy.config.ssl.client.cert.filename={1} plugin=conf_remap.so @pparam=proxy.config.ssl.client.private_key.filename={2}' + .format(server.Variables.SSL_Port, "signed-foo.pem", "signed-foo.key")) ts.Disk.remap_config.AddLine( - 'map /badcase1 https://127.0.0.1:{0}/ @plugin=conf_remap.so @pparam=proxy.config.ssl.client.cert.filename={1} plugin=conf_remap.so @pparam=proxy.config.ssl.client.private_key.filename={2}'.format( - server.Variables.SSL_Port, - "signed2-foo.pem", - "signed-foo.key")) + 'map /badcase1 https://127.0.0.1:{0}/ @plugin=conf_remap.so @pparam=proxy.config.ssl.client.cert.filename={1} plugin=conf_remap.so @pparam=proxy.config.ssl.client.private_key.filename={2}' + .format(server.Variables.SSL_Port, "signed2-foo.pem", "signed-foo.key")) ts.Disk.remap_config.AddLine( - 'map /case2 https://127.0.0.1:{0}/ @plugin=conf_remap.so @pparam=proxy.config.ssl.client.cert.filename={1} plugin=conf_remap.so @pparam=proxy.config.ssl.client.private_key.filename={2}'.format( - server2.Variables.SSL_Port, - "signed2-foo.pem", - "signed-foo.key")) + 'map /case2 https://127.0.0.1:{0}/ @plugin=conf_remap.so @pparam=proxy.config.ssl.client.cert.filename={1} plugin=conf_remap.so @pparam=proxy.config.ssl.client.private_key.filename={2}' + .format(server2.Variables.SSL_Port, "signed2-foo.pem", "signed-foo.key")) ts.Disk.remap_config.AddLine( - 'map /badcase2 https://127.0.0.1:{0}/ @plugin=conf_remap.so @pparam=proxy.config.ssl.client.cert.filename={1} plugin=conf_remap.so @pparam=proxy.config.ssl.client.private_key.filename={2}'.format( - server2.Variables.SSL_Port, - "signed-foo.pem", - "signed-foo.key")) + 'map /badcase2 https://127.0.0.1:{0}/ @plugin=conf_remap.so @pparam=proxy.config.ssl.client.cert.filename={1} plugin=conf_remap.so @pparam=proxy.config.ssl.client.private_key.filename={2}' + .format(server2.Variables.SSL_Port, "signed-foo.pem", "signed-foo.key")) # Should succeed tr = Test.AddTestRun("Connect with correct client cert to first server") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_client_verify.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_client_verify.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_client_verify.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_client_verify.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,7 +17,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - Test.Summary = ''' Test various options for requiring certificate from client for mutual authentication TLS ''' @@ -39,37 +38,35 @@ ts.addSSLfile("ssl/server.key") ts.addSSLfile("ssl/signer.pem") -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.ssl.client.certification_level': 2, - 'proxy.config.ssl.CA.cert.filename': '{0}/signer.pem'.format(ts.Variables.SSLDir), - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.TLSv1_3': 0 -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.ssl.client.certification_level': 2, + 'proxy.config.ssl.CA.cert.filename': '{0}/signer.pem'.format(ts.Variables.SSLDir), + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.TLSv1_3': 0 + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Just map everything through to origin. This test is concentrating on the user-agent side -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}/'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}/'.format(server.Variables.Port)) # Scenario 1: Default no client cert required. cert required for bar.com -ts.Disk.sni_yaml.AddLines([ - 'sni:', - '- fqdn: bob.bar.com', - ' verify_client: NONE', - '- fqdn: "bob.com"', - ' verify_client: STRICT', - '- fqdn: bob.*.com', - ' verify_client: NONE', - '- fqdn: "*bar.com"', - ' verify_client: STRICT', -]) +ts.Disk.sni_yaml.AddLines( + [ + 'sni:', + '- fqdn: bob.bar.com', + ' verify_client: NONE', + '- fqdn: "bob.com"', + ' verify_client: STRICT', + '- fqdn: bob.*.com', + ' verify_client: NONE', + '- fqdn: "*bar.com"', + ' verify_client: STRICT', + ]) ts.Disk.logging_yaml.AddLines( ''' @@ -81,8 +78,7 @@ - mode: ascii format: testformat filename: squid -'''.split("\n") -) +'''.split("\n")) # to foo.com w/o client cert. Should fail tr = Test.AddTestRun("Connect to foo.com without cert") @@ -196,7 +192,6 @@ ts.Variables.ssl_port) tr.Processes.Default.ReturnCode = 35 - # Test that the fqdn's match completely. bob.com should require client certificate. bob.com.com should not tr = Test.AddTestRun("Connect to bob.com without cert, should fail") tr.StillRunningAfter = ts diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_client_verify2.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_client_verify2.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_client_verify2.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_client_verify2.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -37,35 +37,33 @@ ts.addSSLfile("ssl/server.key") ts.addSSLfile("ssl/signer.pem") -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.ssl.client.certification_level': 0, - 'proxy.config.ssl.CA.cert.path': '', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.CA.cert.filename': '{0}/signer.pem'.format(ts.Variables.SSLDir) -}) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.ssl.client.certification_level': 0, + 'proxy.config.ssl.CA.cert.path': '', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.CA.cert.filename': '{0}/signer.pem'.format(ts.Variables.SSLDir) + }) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Just map everything through to origin. This test is concentrating on the user-agent side -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}/'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}/'.format(server.Variables.Port)) # Scenario 1: Default no client cert required. cert required for bar.com -ts.Disk.sni_yaml.AddLines([ - 'sni:', - '- fqdn: bob.bar.com', - ' verify_client: STRICT', - '- fqdn: bob.*.com', - ' verify_client: STRICT', - '- fqdn: "*bar.com"', - ' verify_client: NONE', -]) +ts.Disk.sni_yaml.AddLines( + [ + 'sni:', + '- fqdn: bob.bar.com', + ' verify_client: STRICT', + '- fqdn: bob.*.com', + ' verify_client: STRICT', + '- fqdn: "*bar.com"', + ' verify_client: NONE', + ]) # to foo.com w/o client cert. Should succeed tr = Test.AddTestRun("Connect to foo.com without cert") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_client_verify3.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_client_verify3.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_client_verify3.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_client_verify3.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,7 +17,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - Test.Summary = ''' Test per SNI server name selection of CA certs for validating cert sent by client. ''' @@ -39,42 +38,38 @@ ts.Setup.Copy("ssl/aaa-ca.pem", ts.Variables.SSLDir) ts.Setup.Copy("ssl/ccc-ca.pem", ts.Variables.SSLDir) -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl', - 'proxy.config.ssl.server.cert.path': ts.Variables.SSLDir, - 'proxy.config.ssl.server.private_key.path': ts.Variables.SSLDir, - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.ssl.client.certification_level': 2, - 'proxy.config.ssl.CA.cert.filename': f'{ts.Variables.SSLDir}/aaa-ca.pem', - 'proxy.config.ssl.TLSv1_3': 0 -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'ssl_cert_name=bbb-signed.pem ssl_key_name=bbb-signed.key' -) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl', + 'proxy.config.ssl.server.cert.path': ts.Variables.SSLDir, + 'proxy.config.ssl.server.private_key.path': ts.Variables.SSLDir, + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.ssl.client.certification_level': 2, + 'proxy.config.ssl.CA.cert.filename': f'{ts.Variables.SSLDir}/aaa-ca.pem', + 'proxy.config.ssl.TLSv1_3': 0 + }) + +ts.Disk.ssl_multicert_config.AddLine('ssl_cert_name=bbb-signed.pem ssl_key_name=bbb-signed.key') +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Just map everything through to origin. This test is concentrating on the user-agent side. -ts.Disk.remap_config.AddLine( - f'map / http://127.0.0.1:{server.Variables.Port}/' -) - -ts.Disk.sni_yaml.AddLines([ - 'sni:', - '- fqdn: bbb.com', - ' verify_client: STRICT', - ' verify_client_ca_certs: bbb-ca.pem', - '- fqdn: bbb-signed', - ' verify_client: STRICT', - ' verify_client_ca_certs: bbb-ca.pem', - '- fqdn: ccc.com', - ' verify_client: STRICT', - ' verify_client_ca_certs:', - f' file: {ts.Variables.SSLDir}/ccc-ca.pem' -]) +ts.Disk.remap_config.AddLine(f'map / http://127.0.0.1:{server.Variables.Port}/') + +ts.Disk.sni_yaml.AddLines( + [ + 'sni:', + '- fqdn: bbb.com', + ' verify_client: STRICT', + ' verify_client_ca_certs: bbb-ca.pem', + '- fqdn: bbb-signed', + ' verify_client: STRICT', + ' verify_client_ca_certs: bbb-ca.pem', + '- fqdn: ccc.com', + ' verify_client: STRICT', + ' verify_client_ca_certs:', + f' file: {ts.Variables.SSLDir}/ccc-ca.pem', + ]) # Success test runs. @@ -84,9 +79,8 @@ tr.StillRunningAfter = ts tr.StillRunningAfter = server tr.Processes.Default.Command = ( - "curl -v -k --tls-max 1.2 --cert {1}.pem --key {1}.key --resolve 'aaa.com:{0}:127.0.0.1'" + - " https://aaa.com:{0}/xyz" -).format(ts.Variables.ssl_port, Test.TestDirectory + "/ssl/aaa-signed") + "curl -v -k --tls-max 1.2 --cert {1}.pem --key {1}.key --resolve 'aaa.com:{0}:127.0.0.1'" + " https://aaa.com:{0}/xyz").format( + ts.Variables.ssl_port, Test.TestDirectory + "/ssl/aaa-signed") tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.All = Testers.ExcludesExpression("error", "Check response") @@ -95,8 +89,7 @@ tr.StillRunningAfter = server tr.Processes.Default.Command = ( "curl -v -k --tls-max 1.2 --cert {1}.pem --key {1}.key --resolve 'bbb-signed:{0}:127.0.0.1'" + - " https://bbb-signed:{0}/xyz" -).format(ts.Variables.ssl_port, Test.TestDirectory + "/ssl/bbb-signed") + " https://bbb-signed:{0}/xyz").format(ts.Variables.ssl_port, Test.TestDirectory + "/ssl/bbb-signed") tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.All = Testers.ExcludesExpression("error", "Check response") @@ -104,9 +97,8 @@ tr.StillRunningAfter = ts tr.StillRunningAfter = server tr.Processes.Default.Command = ( - "curl -v -k --tls-max 1.2 --cert {1}.pem --key {1}.key --resolve 'ccc.com:{0}:127.0.0.1'" + - " https://ccc.com:{0}/xyz" -).format(ts.Variables.ssl_port, Test.TestDirectory + "/ssl/ccc-signed") + "curl -v -k --tls-max 1.2 --cert {1}.pem --key {1}.key --resolve 'ccc.com:{0}:127.0.0.1'" + " https://ccc.com:{0}/xyz").format( + ts.Variables.ssl_port, Test.TestDirectory + "/ssl/ccc-signed") tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.All = Testers.ExcludesExpression("error", "Check response") @@ -116,25 +108,22 @@ tr.StillRunningAfter = ts tr.StillRunningAfter = server tr.Processes.Default.Command = ( - "curl -v -k --tls-max 1.2 --cert {1}.pem --key {1}.key --resolve 'aaa.com:{0}:127.0.0.1'" + - " https://aaa.com:{0}/xyz" -).format(ts.Variables.ssl_port, Test.TestDirectory + "/ssl/bbb-signed") + "curl -v -k --tls-max 1.2 --cert {1}.pem --key {1}.key --resolve 'aaa.com:{0}:127.0.0.1'" + " https://aaa.com:{0}/xyz").format( + ts.Variables.ssl_port, Test.TestDirectory + "/ssl/bbb-signed") tr.Processes.Default.ReturnCode = 35 tr = Test.AddTestRun() tr.StillRunningAfter = ts tr.StillRunningAfter = server tr.Processes.Default.Command = ( - "curl -v -k --tls-max 1.2 --cert {1}.pem --key {1}.key --resolve 'bbb.com:{0}:127.0.0.1'" + - " https://bbb.com:{0}/xyz" -).format(ts.Variables.ssl_port, Test.TestDirectory + "/ssl/ccc-signed") + "curl -v -k --tls-max 1.2 --cert {1}.pem --key {1}.key --resolve 'bbb.com:{0}:127.0.0.1'" + " https://bbb.com:{0}/xyz").format( + ts.Variables.ssl_port, Test.TestDirectory + "/ssl/ccc-signed") tr.Processes.Default.ReturnCode = 35 tr = Test.AddTestRun() tr.StillRunningAfter = ts tr.StillRunningAfter = server tr.Processes.Default.Command = ( - "curl -v -k --tls-max 1.2 --cert {1}.pem --key {1}.key --resolve 'ccc.com:{0}:127.0.0.1'" + - " https://ccc.com:{0}/xyz" -).format(ts.Variables.ssl_port, Test.TestDirectory + "/ssl/aaa-signed") + "curl -v -k --tls-max 1.2 --cert {1}.pem --key {1}.key --resolve 'ccc.com:{0}:127.0.0.1'" + " https://ccc.com:{0}/xyz").format( + ts.Variables.ssl_port, Test.TestDirectory + "/ssl/aaa-signed") tr.Processes.Default.ReturnCode = 35 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_client_versions.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_client_versions.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_client_versions.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_client_versions.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -23,9 +23,7 @@ # By default only offer TLSv1_2 # for special domain foo.com only offer TLSv1 and TLSv1_1 -Test.SkipUnless( - Condition.HasOpenSSLVersion("1.1.1") -) +Test.SkipUnless(Condition.HasOpenSSLVersion("1.1.1")) # Define default ATS ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True) @@ -42,25 +40,24 @@ # Need no remap rules. Everything should be processed by sni # Make sure the TS server certs are different from the origin certs -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') cipher_suite = 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2' if Condition.HasOpenSSLVersion("3.0.0"): cipher_suite += ":@SECLEVEL=0" -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.cipher_suite': cipher_suite, - 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.ssl.TLSv1': 0, - 'proxy.config.ssl.TLSv1_1': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.TLSv1_2': 1 -}) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.cipher_suite': cipher_suite, + 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.ssl.TLSv1': 0, + 'proxy.config.ssl.TLSv1_1': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.TLSv1_2': 1 + }) # foo.com should only offer the older TLS protocols # bar.com should terminate. @@ -68,7 +65,7 @@ ts.Disk.sni_yaml.AddLines([ 'sni:', '- fqdn: foo.com', - ' valid_tls_versions_in: [ TLSv1, TLSv1_1 ]' + ' valid_tls_versions_in: [ TLSv1, TLSv1_1 ]', ]) # Target foo.com for TLSv1_2. Should fail diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_engine.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_engine.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_engine.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_engine.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -16,10 +16,8 @@ # See the License for the specific language governing permissions and # limitations under the License. - import os - # Someday should add client cert to origin to exercise the # engine interface on the other side @@ -40,55 +38,58 @@ ts.Setup.Copy(os.path.join(Test.Variables.AtsTestPluginsDir, 'async_engine.so'), Test.RunDirectory) # Add info the origin server responses -server.addResponse("sessionlog.json", - {"headers": "GET / HTTP/1.1\r\nuuid: basic\r\n\r\n", - "timestamp": "1469733493.993", - "body": ""}, - {"headers": "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\nCache-Control: max-age=3600\r\nContent-Length: 2\r\n\r\n", - "timestamp": "1469733493.993", - "body": "ok"}) +server.addResponse( + "sessionlog.json", { + "headers": "GET / HTTP/1.1\r\nuuid: basic\r\n\r\n", + "timestamp": "1469733493.993", + "body": "" + }, { + "headers": + "HTTP/1.1 200 OK\r\nServer: microserver\r\nConnection: close\r\nCache-Control: max-age=3600\r\nContent-Length: 2\r\n\r\n", + "timestamp": "1469733493.993", + "body": "ok" + }) # add ssl materials like key, certificates for the server ts.addSSLfile("ssl/server.pem") ts.addSSLfile("ssl/server.key") -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.engine.conf_file': '{0}/ts/config/load_engine.cnf'.format(Test.RunDirectory), - 'proxy.config.ssl.async.handshake.enabled': 1, - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'ssl|http' -}) - -ts.Disk.MakeConfigFile('load_engine.cnf').AddLines([ - 'openssl_conf = openssl_init', - '', - '[openssl_init]', - '', - 'engines = engine_section', - '', - '[engine_section]', - '', - 'async = async_section', - '', - '[async_section]', - '', - 'dynamic_path = {0}/async_engine.so'.format(Test.RunDirectory), - '', - 'engine_id = async-test', - '', - 'default_algorithms = RSA', - '', - 'init = 1']) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.engine.conf_file': '{0}/ts/config/load_engine.cnf'.format(Test.RunDirectory), + 'proxy.config.ssl.async.handshake.enabled': 1, + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'ssl|http' + }) + +ts.Disk.MakeConfigFile('load_engine.cnf').AddLines( + [ + 'openssl_conf = openssl_init', + '', + '[openssl_init]', + '', + 'engines = engine_section', + '', + '[engine_section]', + '', + 'async = async_section', + '', + '[async_section]', + '', + 'dynamic_path = {0}/async_engine.so'.format(Test.RunDirectory), + '', + 'engine_id = async-test', + '', + 'default_algorithms = RSA', + '', + 'init = 1', + ]) # Make a basic request. Hopefully it goes through tr = Test.AddTestRun("Run-Test") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_forward_nonhttp.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_forward_nonhttp.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_forward_nonhttp.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_forward_nonhttp.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -38,28 +38,27 @@ # Need no remap rules. Everything should be processed by sni # Make sure the TS server certs are different from the origin certs -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.http.connect_ports': '{0} {1}'.format(ts.Variables.ssl_port, ts.Variables.s_client_port), - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", - 'proxy.config.dns.resolv_conf': 'NULL' -}) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.http.connect_ports': '{0} {1}'.format(ts.Variables.ssl_port, ts.Variables.s_client_port), + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", + 'proxy.config.dns.resolv_conf': 'NULL' + }) # foo.com should not terminate. Just tunnel to server_foo # bar.com should terminate. Forward its tcp stream to server_bar ts.Disk.sni_yaml.AddLines([ "sni:", "- fqdn: bar.com", - " forward_route: localhost:{0}".format(ts.Variables.s_client_port) + " forward_route: localhost:{0}".format(ts.Variables.s_client_port), ]) tr = Test.AddTestRun("forward-non-http") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_hooks_client_verify.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_hooks_client_verify.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_hooks_client_verify.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_hooks_client_verify.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -36,30 +36,26 @@ ts.addSSLfile("ssl/server.key") ts.addSSLfile("ssl/signer.pem") -ts.Disk.records_config.update({ - # Test looks for debug output from the plugin - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_client_verify_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.CA.cert.filename': '{0}/signer.pem'.format(ts.Variables.SSLDir), - 'proxy.config.url_remap.pristine_host_hdr': 1 -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + # Test looks for debug output from the plugin + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_client_verify_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.CA.cert.filename': '{0}/signer.pem'.format(ts.Variables.SSLDir), + 'proxy.config.url_remap.pristine_host_hdr': 1 + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://foo.com:{1}/ https://127.0.0.1:{0}'.format(server.Variables.SSL_Port, ts.Variables.ssl_port) -) + 'map https://foo.com:{1}/ https://127.0.0.1:{0}'.format(server.Variables.SSL_Port, ts.Variables.ssl_port)) ts.Disk.remap_config.AddLine( - 'map https://bar.com:{1}/ https://127.0.0.1:{0}'.format(server.Variables.SSL_Port, ts.Variables.ssl_port) -) + 'map https://bar.com:{1}/ https://127.0.0.1:{0}'.format(server.Variables.SSL_Port, ts.Variables.ssl_port)) ts.Disk.remap_config.AddLine( - 'map https://random.com:{1}/ https://127.0.0.1:{0}'.format(server.Variables.SSL_Port, ts.Variables.ssl_port) -) + 'map https://random.com:{1}/ https://127.0.0.1:{0}'.format(server.Variables.SSL_Port, ts.Variables.ssl_port)) ts.Disk.sni_yaml.AddLines([ 'sni:', @@ -85,7 +81,6 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.all = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded") - tr2 = Test.AddTestRun("request bad name") tr2.StillRunningAfter = ts tr2.StillRunningAfter = server diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_hooks_verify.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_hooks_verify.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_hooks_verify.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_hooks_verify.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -33,39 +33,32 @@ ts.addSSLfile("ssl/server.pem") ts.addSSLfile("ssl/server.key") -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_verify_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.verify.server.policy': 'ENFORCED', - 'proxy.config.ssl.client.verify.server.properties': 'NONE', - 'proxy.config.url_remap.pristine_host_hdr': 1 -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_verify_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.verify.server.policy': 'ENFORCED', + 'proxy.config.ssl.client.verify.server.properties': 'NONE', + 'proxy.config.url_remap.pristine_host_hdr': 1 + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://foo.com:{1}/ https://127.0.0.1:{0}'.format(server.Variables.SSL_Port, ts.Variables.ssl_port) -) + 'map https://foo.com:{1}/ https://127.0.0.1:{0}'.format(server.Variables.SSL_Port, ts.Variables.ssl_port)) ts.Disk.remap_config.AddLine( - 'map https://bar.com:{1}/ https://127.0.0.1:{0}'.format(server.Variables.SSL_Port, ts.Variables.ssl_port) -) + 'map https://bar.com:{1}/ https://127.0.0.1:{0}'.format(server.Variables.SSL_Port, ts.Variables.ssl_port)) ts.Disk.remap_config.AddLine( - 'map https://random.com:{1}/ https://127.0.0.1:{0}'.format(server.Variables.SSL_Port, ts.Variables.ssl_port) -) + 'map https://random.com:{1}/ https://127.0.0.1:{0}'.format(server.Variables.SSL_Port, ts.Variables.ssl_port)) -ts.Disk.sni_yaml.AddLine( - 'sni:') -ts.Disk.sni_yaml.AddLine( - '- fqdn: bar.com') -ts.Disk.sni_yaml.AddLine( - ' verify_server_policy: PERMISSIVE') +ts.Disk.sni_yaml.AddLine('sni:') +ts.Disk.sni_yaml.AddLine('- fqdn: bar.com') +ts.Disk.sni_yaml.AddLine(' verify_server_policy: PERMISSIVE') -Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_verify_test.so'), - ts, '-count=2 -bad=random.com -bad=bar.com') +Test.PrepareTestPlugin( + os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_verify_test.so'), ts, '-count=2 -bad=random.com -bad=bar.com') tr = Test.AddTestRun("request good name") tr.Processes.Default.StartBefore(server) @@ -76,7 +69,6 @@ tr.Processes.Default.ReturnCode = 0 tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have failed") - tr2 = Test.AddTestRun("request bad name") tr2.StillRunningAfter = ts tr2.StillRunningAfter = server diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_keepalive.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_keepalive.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_keepalive.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_keepalive.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -23,9 +23,7 @@ Verify that the client-side keep alive is honored for TLS and different versions of HTTP ''' -Test.SkipUnless( - Condition.HasCurlFeature('http2') -) +Test.SkipUnless(Condition.HasCurlFeature('http2')) ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True) server = Test.MakeOriginServer("server") @@ -37,21 +35,19 @@ ts.addSSLfile("ssl/server.pem") ts.addSSLfile("ssl/server.key") -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.TLSv1_3': 0, - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.log.max_secs_per_buffer': 1 -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.TLSv1_3': 0, + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.log.max_secs_per_buffer': 1 + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port) -) + 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port)) ts.Disk.logging_yaml.AddLines( ''' @@ -63,8 +59,7 @@ - mode: ascii format: testformat filename: squid -'''.split("\n") -) +'''.split("\n")) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-preaccept=1') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_ocsp.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_ocsp.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_ocsp.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_ocsp.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ # limitations under the License. import os + Test.Summary = ''' Test tls server prefetched OCSP responses ''' @@ -40,25 +41,23 @@ ts.addSSLfile("ssl/ocsp_response.der") ts.Disk.remap_config.AddLine( - 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port) -) + 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port)) ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.ocsp.pem ssl_key_name=server.ocsp.key ssl_ocsp_name=ocsp_response.der' -) + 'dest_ip=* ssl_cert_name=server.ocsp.pem ssl_key_name=server.ocsp.key ssl_ocsp_name=ocsp_response.der') # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.cert_chain.filename': 'ca.ocsp.pem', - # enable prefetched OCSP responses - 'proxy.config.ssl.ocsp.response.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.ocsp.enabled': 1, - 'proxy.config.exec_thread.autoconfig.scale': 1.0 -}) - +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.cert_chain.filename': 'ca.ocsp.pem', + # enable prefetched OCSP responses + 'proxy.config.ssl.ocsp.response.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.ocsp.enabled': 1, + 'proxy.config.exec_thread.autoconfig.scale': 1.0 + }) tr = Test.AddTestRun("Check OCSP response using curl") tr.Processes.Default.StartBefore(server) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_origin_session_reuse.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_origin_session_reuse.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_origin_session_reuse.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_origin_session_reuse.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ # limitations under the License. import re + Test.Summary = ''' Test tls origin session reuse ''' @@ -29,16 +30,8 @@ server = Test.MakeOriginServer("server") # Add info the origin server responses -request_header = { - 'headers': 'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n', - 'timestamp': '1469733493.993', - 'body': '' -} -response_header = { - 'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n', - 'timestamp': '1469733493.993', - 'body': 'curl test' -} +request_header = {'headers': 'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n', 'timestamp': '1469733493.993', 'body': ''} +response_header = {'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n', 'timestamp': '1469733493.993', 'body': 'curl test'} server.addResponse("sessionlog.json", request_header, response_header) # add ssl materials like key, certificates for the server @@ -51,101 +44,92 @@ ts4.addSSLfile("ssl/server.pem") ts4.addSSLfile("ssl/server.key") -ts1.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) -ts2.Disk.remap_config.AddLines([ - 'map /reuse_session https://127.0.0.1:{0}'.format(ts1.Variables.ssl_port), - 'map /remove_oldest https://127.0.1.1:{0}'.format(ts1.Variables.ssl_port) -]) -ts3.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) -ts4.Disk.remap_config.AddLine( - 'map / https://127.0.0.1:{0}'.format(ts3.Variables.ssl_port) -) - -ts1.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) -ts2.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) -ts3.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) -ts4.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) - -ts1.Disk.records_config.update({ - 'proxy.config.http.cache.http': 0, - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts1.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts1.Variables.SSLDir), - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.session_cache': 2, - 'proxy.config.ssl.session_cache.size': 4096, - 'proxy.config.ssl.session_cache.num_buckets': 256, - 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, - 'proxy.config.ssl.session_cache.timeout': 0, - 'proxy.config.ssl.session_cache.auto_clear': 1, - 'proxy.config.ssl.server.session_ticket.enable': 1, - 'proxy.config.ssl.origin_session_cache': 1, - 'proxy.config.ssl.origin_session_cache.size': 1, - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', -}) -ts2.Disk.records_config.update({ - 'proxy.config.http.cache.http': 0, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl.origin_session_cache', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts2.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts2.Variables.SSLDir), - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.session_cache': 2, - 'proxy.config.ssl.session_cache.size': 4096, - 'proxy.config.ssl.session_cache.num_buckets': 256, - 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, - 'proxy.config.ssl.session_cache.timeout': 0, - 'proxy.config.ssl.session_cache.auto_clear': 1, - 'proxy.config.ssl.server.session_ticket.enable': 1, - 'proxy.config.ssl.origin_session_cache': 1, - 'proxy.config.ssl.origin_session_cache.size': 1, - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', -}) -ts3.Disk.records_config.update({ - 'proxy.config.http.cache.http': 0, - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts3.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts3.Variables.SSLDir), - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.session_cache': 2, - 'proxy.config.ssl.session_cache.size': 4096, - 'proxy.config.ssl.session_cache.num_buckets': 256, - 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, - 'proxy.config.ssl.session_cache.timeout': 0, - 'proxy.config.ssl.session_cache.auto_clear': 1, - 'proxy.config.ssl.server.session_ticket.enable': 1, - 'proxy.config.ssl.origin_session_cache': 1, - 'proxy.config.ssl.origin_session_cache.size': 1, - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', -}) -ts4.Disk.records_config.update({ - 'proxy.config.http.cache.http': 0, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl.origin_session_cache', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts4.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts4.Variables.SSLDir), - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.session_cache': 2, - 'proxy.config.ssl.session_cache.size': 4096, - 'proxy.config.ssl.session_cache.num_buckets': 256, - 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, - 'proxy.config.ssl.session_cache.timeout': 0, - 'proxy.config.ssl.session_cache.auto_clear': 1, - 'proxy.config.ssl.server.session_ticket.enable': 1, - 'proxy.config.ssl.origin_session_cache': 0, - 'proxy.config.ssl.origin_session_cache.size': 1, - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', -}) +ts1.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts2.Disk.remap_config.AddLines( + [ + 'map /reuse_session https://127.0.0.1:{0}'.format(ts1.Variables.ssl_port), + 'map /remove_oldest https://127.0.1.1:{0}'.format(ts1.Variables.ssl_port), + ]) +ts3.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts4.Disk.remap_config.AddLine('map / https://127.0.0.1:{0}'.format(ts3.Variables.ssl_port)) + +ts1.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') +ts2.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') +ts3.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') +ts4.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + +ts1.Disk.records_config.update( + { + 'proxy.config.http.cache.http': 0, + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts1.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts1.Variables.SSLDir), + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.session_cache': 2, + 'proxy.config.ssl.session_cache.size': 4096, + 'proxy.config.ssl.session_cache.num_buckets': 256, + 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, + 'proxy.config.ssl.session_cache.timeout': 0, + 'proxy.config.ssl.session_cache.auto_clear': 1, + 'proxy.config.ssl.server.session_ticket.enable': 1, + 'proxy.config.ssl.origin_session_cache': 1, + 'proxy.config.ssl.origin_session_cache.size': 1, + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) +ts2.Disk.records_config.update( + { + 'proxy.config.http.cache.http': 0, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl.origin_session_cache', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts2.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts2.Variables.SSLDir), + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.session_cache': 2, + 'proxy.config.ssl.session_cache.size': 4096, + 'proxy.config.ssl.session_cache.num_buckets': 256, + 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, + 'proxy.config.ssl.session_cache.timeout': 0, + 'proxy.config.ssl.session_cache.auto_clear': 1, + 'proxy.config.ssl.server.session_ticket.enable': 1, + 'proxy.config.ssl.origin_session_cache': 1, + 'proxy.config.ssl.origin_session_cache.size': 1, + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) +ts3.Disk.records_config.update( + { + 'proxy.config.http.cache.http': 0, + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts3.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts3.Variables.SSLDir), + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.session_cache': 2, + 'proxy.config.ssl.session_cache.size': 4096, + 'proxy.config.ssl.session_cache.num_buckets': 256, + 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, + 'proxy.config.ssl.session_cache.timeout': 0, + 'proxy.config.ssl.session_cache.auto_clear': 1, + 'proxy.config.ssl.server.session_ticket.enable': 1, + 'proxy.config.ssl.origin_session_cache': 1, + 'proxy.config.ssl.origin_session_cache.size': 1, + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) +ts4.Disk.records_config.update( + { + 'proxy.config.http.cache.http': 0, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl.origin_session_cache', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts4.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts4.Variables.SSLDir), + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.session_cache': 2, + 'proxy.config.ssl.session_cache.size': 4096, + 'proxy.config.ssl.session_cache.num_buckets': 256, + 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, + 'proxy.config.ssl.session_cache.timeout': 0, + 'proxy.config.ssl.session_cache.auto_clear': 1, + 'proxy.config.ssl.server.session_ticket.enable': 1, + 'proxy.config.ssl.origin_session_cache': 0, + 'proxy.config.ssl.origin_session_cache.size': 1, + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) tr = Test.AddTestRun('new session then reuse') tr.Processes.Default.Command = 'curl https://127.0.0.1:{0}/reuse_session -k && curl https://127.0.0.1:{0}/reuse_session -k'.format( diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_partial_blind_tunnel.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_partial_blind_tunnel.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_partial_blind_tunnel.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_partial_blind_tunnel.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -36,29 +36,29 @@ # Need no remap rules. Everything should be processed by sni # Make sure the TS server certs are different from the origin certs -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=signed-foo.pem ssl_key_name=signed-foo.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=signed-foo.pem ssl_key_name=signed-foo.key') # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.http.connect_ports': '{0} {1}'.format(ts.Variables.ssl_port, server_bar.Variables.SSL_Port), - 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', - 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", - 'proxy.config.dns.resolv_conf': 'NULL' -}) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.http.connect_ports': '{0} {1}'.format(ts.Variables.ssl_port, server_bar.Variables.SSL_Port), + 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", + 'proxy.config.dns.resolv_conf': 'NULL' + }) # foo.com should terminate. and reconnect via TLS upstream to bar.com -ts.Disk.sni_yaml.AddLines([ - "sni:", - "- fqdn: 'foo.com'", - " partial_blind_route: 'localhost:{0}'".format(server_bar.Variables.SSL_Port), -]) +ts.Disk.sni_yaml.AddLines( + [ + "sni:", + "- fqdn: 'foo.com'", + " partial_blind_route: 'localhost:{0}'".format(server_bar.Variables.SSL_Port), + ]) tr = Test.AddTestRun("Partial Blind Route") tr.Processes.Default.Command = "curl --http1.1 -v --resolve 'foo.com:{0}:127.0.0.1' -k https://foo.com:{0}".format( @@ -70,7 +70,7 @@ tr.StillRunningAfter = ts tr.Processes.Default.Streams.All += Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded") -tr.Processes.Default.Streams.All += Testers.ExcludesExpression("Not Found on Accelerato", - "Should not try to remap on Traffic Server") +tr.Processes.Default.Streams.All += Testers.ExcludesExpression( + "Not Found on Accelerato", "Should not try to remap on Traffic Server") tr.Processes.Default.Streams.All += Testers.ContainsExpression("HTTP/1.1 200 OK", "Should get a successful response") tr.Processes.Default.Streams.All += Testers.ContainsExpression("ok bar", "Body is expected") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_session_key_logging.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_session_key_logging.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_session_key_logging.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_session_key_logging.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -19,7 +19,6 @@ import os - Test.Summary = ''' Test TLS secrets logging. ''' @@ -40,8 +39,7 @@ def setupOriginServer(self): server_name = f"server_{TlsKeyloggingTest.server_counter}" TlsKeyloggingTest.server_counter += 1 - self.server = Test.MakeVerifierServerProcess( - server_name, TlsKeyloggingTest.replay_file) + self.server = Test.MakeVerifierServerProcess(server_name, TlsKeyloggingTest.replay_file) def setupTS(self, enable_secrets_logging): ts_name = f"ts_{TlsKeyloggingTest.ts_counter}" @@ -49,20 +47,16 @@ self.ts = Test.MakeATSProcess(ts_name, enable_tls=True, enable_cache=False) self.ts.addDefaultSSLFiles() - self.ts.Disk.records_config.update({ - "proxy.config.ssl.server.cert.path": f'{self.ts.Variables.SSLDir}', - "proxy.config.ssl.server.private_key.path": f'{self.ts.Variables.SSLDir}', - "proxy.config.ssl.client.verify.server.policy": 'PERMISSIVE', - - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_keylog' - }) - self.ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' - ) - self.ts.Disk.remap_config.AddLine( - f'map / https://127.0.0.1:{self.server.Variables.https_port}' - ) + self.ts.Disk.records_config.update( + { + "proxy.config.ssl.server.cert.path": f'{self.ts.Variables.SSLDir}', + "proxy.config.ssl.server.private_key.path": f'{self.ts.Variables.SSLDir}', + "proxy.config.ssl.client.verify.server.policy": 'PERMISSIVE', + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_keylog' + }) + self.ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + self.ts.Disk.remap_config.AddLine(f'map / https://127.0.0.1:{self.server.Variables.https_port}') keylog_file = os.path.join(self.ts.Variables.LOGDIR, "tls_secrets.txt") @@ -76,8 +70,7 @@ }) self.ts.Disk.diags_log.Content += Testers.ContainsExpression( - f"Opened {keylog_file} for TLS key logging", - "Verify the user was notified of TLS secrets logging.") + f"Opened {keylog_file} for TLS key logging", "Verify the user was notified of TLS secrets logging.") self.ts.Disk.File(keylog_file, id="keylog", exists=True) # It would be nice to verify the content of certain lines in the # keylog file, but the content is dependent upon the particular TLS @@ -93,10 +86,7 @@ client_name = f"client_{TlsKeyloggingTest.client_counter}" TlsKeyloggingTest.client_counter += 1 - tr.AddVerifierClientProcess( - client_name, - TlsKeyloggingTest.replay_file, - https_ports=[self.ts.Variables.ssl_port]) + tr.AddVerifierClientProcess(client_name, TlsKeyloggingTest.replay_file, https_ports=[self.ts.Variables.ssl_port]) TlsKeyloggingTest(enable_secrets_logging=False).run() diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_session_reuse.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_session_reuse.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_session_reuse.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_session_reuse.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -29,7 +29,6 @@ ts3 = Test.MakeATSProcess("ts3", select_ports=True, enable_tls=True) server = Test.MakeOriginServer("server") - # Add info the origin server responses request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -43,65 +42,59 @@ ts3.addSSLfile("ssl/server.pem") ts3.addSSLfile("ssl/server.key") -ts1.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) -ts2.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) -ts3.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) - -ts1.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) -ts2.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) -ts3.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) - -ts1.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts1.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts1.Variables.SSLDir), - 'proxy.config.ssl.server.cipher_suite': 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.session_cache': 2, - 'proxy.config.ssl.session_cache.size': 4096, - 'proxy.config.ssl.session_cache.num_buckets': 256, - 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, - 'proxy.config.ssl.session_cache.timeout': 0, - 'proxy.config.ssl.session_cache.auto_clear': 1, - 'proxy.config.ssl.server.session_ticket.enable': 0, -}) -ts2.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts2.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts2.Variables.SSLDir), - 'proxy.config.ssl.server.cipher_suite': 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.session_cache': 2, - 'proxy.config.ssl.session_cache.size': 4096, - 'proxy.config.ssl.session_cache.num_buckets': 256, - 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, - 'proxy.config.ssl.session_cache.timeout': 0, - 'proxy.config.ssl.session_cache.auto_clear': 1, - 'proxy.config.ssl.server.session_ticket.enable': 1, -}) -ts3.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts3.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts3.Variables.SSLDir), - 'proxy.config.ssl.server.cipher_suite': 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.session_cache': 0, - 'proxy.config.ssl.session_cache.size': 4096, - 'proxy.config.ssl.session_cache.num_buckets': 256, - 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, - 'proxy.config.ssl.session_cache.timeout': 0, - 'proxy.config.ssl.session_cache.auto_clear': 1, - 'proxy.config.ssl.server.session_ticket.enable': 1, -}) +ts1.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts2.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts3.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) + +ts1.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') +ts2.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') +ts3.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') + +ts1.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts1.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts1.Variables.SSLDir), + 'proxy.config.ssl.server.cipher_suite': + 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.session_cache': 2, + 'proxy.config.ssl.session_cache.size': 4096, + 'proxy.config.ssl.session_cache.num_buckets': 256, + 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, + 'proxy.config.ssl.session_cache.timeout': 0, + 'proxy.config.ssl.session_cache.auto_clear': 1, + 'proxy.config.ssl.server.session_ticket.enable': 0, + }) +ts2.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts2.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts2.Variables.SSLDir), + 'proxy.config.ssl.server.cipher_suite': + 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.session_cache': 2, + 'proxy.config.ssl.session_cache.size': 4096, + 'proxy.config.ssl.session_cache.num_buckets': 256, + 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, + 'proxy.config.ssl.session_cache.timeout': 0, + 'proxy.config.ssl.session_cache.auto_clear': 1, + 'proxy.config.ssl.server.session_ticket.enable': 1, + }) +ts3.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts3.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts3.Variables.SSLDir), + 'proxy.config.ssl.server.cipher_suite': + 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.session_cache': 0, + 'proxy.config.ssl.session_cache.size': 4096, + 'proxy.config.ssl.session_cache.num_buckets': 256, + 'proxy.config.ssl.session_cache.skip_cache_on_bucket_contention': 0, + 'proxy.config.ssl.session_cache.timeout': 0, + 'proxy.config.ssl.session_cache.auto_clear': 1, + 'proxy.config.ssl.server.session_ticket.enable': 1, + }) def check_session(ev, test): diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_sni_host_policy.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_sni_host_policy.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_sni_host_policy.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_sni_host_policy.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -19,7 +19,6 @@ import os - Test.Summary = ''' Test exercising host and SNI mismatch controls ''' @@ -40,38 +39,36 @@ ts.addSSLfile("ssl/server.key") ts.addSSLfile("ssl/signer.pem") -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.ssl.CA.cert.filename': '{0}/signer.pem'.format(ts.Variables.SSLDir), - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.http.host_sni_policy': 2, - 'proxy.config.ssl.TLSv1_3': 0, - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl', -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.ssl.CA.cert.filename': '{0}/signer.pem'.format(ts.Variables.SSLDir), + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.http.host_sni_policy': 2, + 'proxy.config.ssl.TLSv1_3': 0, + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl', + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Just map everything through to origin. This test is concentrating on the user-agent side -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}/'.format(server.Variables.Port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}/'.format(server.Variables.Port)) # Scenario 1: Default no client cert required. cert required for bar.com. # Make boblite and bob mixed case to verify that we can match hostnames case # insensitively. -ts.Disk.sni_yaml.AddLines([ - 'sni:', - '- fqdn: boBliTe', - ' verify_client: STRICT', - ' host_sni_policy: PERMISSIVE', - '- fqdn: bOb', - ' verify_client: STRICT', -]) +ts.Disk.sni_yaml.AddLines( + [ + 'sni:', + '- fqdn: boBliTe', + ' verify_client: STRICT', + ' host_sni_policy: PERMISSIVE', + '- fqdn: bOb', + ' verify_client: STRICT', + ]) # case 1 # sni=Bob and host=dave. Do not provide client cert. This should match fqdn bOb which has @@ -174,16 +171,14 @@ # Wait for the error.log to appaer. test_run = Test.AddTestRun() test_run.Processes.Default.Command = ( - os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + - os.path.join(ts.Variables.LOGDIR, 'error.log') -) + os.path.join(Test.Variables.AtsTestToolsDir, 'condwait') + ' 60 1 -f ' + os.path.join(ts.Variables.LOGDIR, 'error.log')) ts.Disk.diags_log.Content += Testers.ContainsExpression( "WARNING: SNI/hostname mismatch sni=dave host=bob action=terminate", "Should have warning on mismatch") ts.Disk.diags_log.Content += Testers.ContainsExpression( "WARNING: SNI/hostname mismatch sni=ellen host=Boblite action=continue", "Should have warning on mismatch") -ts.Disk.diags_log.Content += Testers.ExcludesExpression("WARNING: SNI/hostname mismatch sni=ellen host=fran", - "Should not have warning on mismatch with non-policy host") +ts.Disk.diags_log.Content += Testers.ExcludesExpression( + "WARNING: SNI/hostname mismatch sni=ellen host=fran", "Should not have warning on mismatch with non-policy host") test_run.Processes.Default.ReturnCode = 0 ts.Disk.error_log.Content += Testers.ContainsExpression( diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_sni_yaml_reload.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_sni_yaml_reload.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_sni_yaml_reload.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_sni_yaml_reload.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - Test.Summary = ''' Test reloading sni.yaml behaves as expected ''' @@ -29,33 +28,26 @@ response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} server.addResponse("sessionlog.json", request_header, response_header) -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': ts.Variables.SSLDir, - 'proxy.config.ssl.server.private_key.path': ts.Variables.SSLDir, - 'proxy.config.ssl.CA.cert.filename': f'{ts.Variables.SSLDir}/signer.pem', - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl|http', - 'proxy.config.diags.output.debug': 'L', -}) - +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': ts.Variables.SSLDir, + 'proxy.config.ssl.server.private_key.path': ts.Variables.SSLDir, + 'proxy.config.ssl.CA.cert.filename': f'{ts.Variables.SSLDir}/signer.pem', + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl|http', + 'proxy.config.diags.output.debug': 'L', + }) ts.addDefaultSSLFiles() ts.addSSLfile("ssl/signed-foo.pem") ts.addSSLfile("ssl/signed-foo.key") ts.addSSLfile("ssl/signer.pem") -ts.Disk.remap_config.AddLine( - f'map / http://127.0.0.1:{server.Variables.Port}' -) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.remap_config.AddLine(f'map / http://127.0.0.1:{server.Variables.Port}') +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.sni_yaml.AddLines( f""" @@ -65,8 +57,7 @@ client_cert: {ts.Variables.SSLDir}/signed-foo.pem client_key: {ts.Variables.SSLDir}/signed-foo.key verify_client: STRICT - """.split('\n') -) + """.split('\n')) tr = Test.AddTestRun(f'ensure we can connect for SNI {sni_domain}') tr.Setup.Copy("ssl/signed-foo.pem") @@ -81,8 +72,7 @@ tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Verify curl could successfully connect") tr.Processes.Default.Streams.stderr = Testers.IncludesExpression(f"CN={sni_domain}", f"Verify curl used the {sni_domain} SNI") ts.Disk.diags_log.Content = Testers.IncludesExpression( - "SSL negotiation finished successfully", - "Verify that the TLS handshake was successful") + "SSL negotiation finished successfully", "Verify that the TLS handshake was successful") # This config reload should fail because it references non-existent TLS key files trupd = Test.AddTestRun("Update config file") @@ -97,8 +87,7 @@ client_cert: {ts.Variables.SSLDir}/signed-notexist.pem client_key: {ts.Variables.SSLDir}/signed-notexist.key verify_client: STRICT - """.split('\n') -) + """.split('\n')) trupd.StillRunningAfter = ts trupd.StillRunningAfter = server @@ -106,7 +95,6 @@ trupd.Processes.Default.Env = ts.Env trupd.Processes.Default.ReturnCode = 0 - tr2reload = Test.AddTestRun("Reload config") tr2reload.StillRunningAfter = ts tr2reload.StillRunningAfter = server @@ -114,8 +102,7 @@ tr2reload.Processes.Default.Env = ts.Env tr2reload.Processes.Default.ReturnCode = 0 ts.Disk.diags_log.Content = Testers.ContainsExpression( - 'sni.yaml failed to load', - 'reload should result in failure to load sni.yaml') + 'sni.yaml failed to load', 'reload should result in failure to load sni.yaml') tr3 = Test.AddTestRun(f"Make request again for {sni_domain} that should still work") # Wait for the reload to complete diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_ticket.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_ticket.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_ticket.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_ticket.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -17,6 +17,7 @@ # limitations under the License. import re + Test.Summary = ''' Test tls tickets ''' @@ -26,7 +27,6 @@ ts2 = Test.MakeATSProcess("ts2", select_ports=True, enable_tls=True) server = Test.MakeOriginServer("server") - # Add info the origin server responses request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -38,35 +38,28 @@ ts2.addSSLfile("ssl/server.pem") ts2.addSSLfile("ssl/server.key") -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) -ts2.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.Port) -) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) -ts2.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) - -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.server.session_ticket.enable': '1', - 'proxy.config.ssl.server.ticket_key.filename': '../../file.ticket' -}) -ts2.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts2.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts2.Variables.SSLDir), - 'proxy.config.ssl.server.session_ticket.enable': '1', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.server.ticket_key.filename': '../../file.ticket' -}) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) +ts2.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.Port)) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') +ts2.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.server.session_ticket.enable': '1', + 'proxy.config.ssl.server.ticket_key.filename': '../../file.ticket' + }) +ts2.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts2.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts2.Variables.SSLDir), + 'proxy.config.ssl.server.session_ticket.enable': '1', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.server.ticket_key.filename': '../../file.ticket' + }) tr = Test.AddTestRun("Create ticket") tr.Setup.Copy('file.ticket') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_tunnel.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_tunnel.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_tunnel.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_tunnel.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -50,40 +50,41 @@ # Need no remap rules. Everything should be processed by sni # Make sure the TS server certs are different from the origin certs -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=signed-foo.pem ssl_key_name=signed-foo.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=signed-foo.pem ssl_key_name=signed-foo.key') # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.http.connect_ports': '{0} {1} {2}'.format(ts.Variables.ssl_port, - server_foo.Variables.SSL_Port, - server_bar.Variables.SSL_Port), - 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), - 'proxy.config.dns.resolv_conf': 'NULL'}) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.http.connect_ports': + '{0} {1} {2}'.format(ts.Variables.ssl_port, server_foo.Variables.SSL_Port, server_bar.Variables.SSL_Port), + 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), + 'proxy.config.dns.resolv_conf': 'NULL' + }) # foo.com should not terminate. Just tunnel to server_foo # bar.com should terminate. Forward its tcp stream to server_bar # empty SNI should tunnel to server_bar -ts.Disk.sni_yaml.AddLines([ - 'sni:', - '- fqdn: foo.com', - " tunnel_route: localhost:{0}".format(server_foo.Variables.SSL_Port), - "- fqdn: bob.*.com", - " tunnel_route: localhost:{0}".format(server_foo.Variables.SSL_Port), - "- fqdn: '*.match.com'", - " tunnel_route: $1.testmatch:{0}".format(server_foo.Variables.SSL_Port), - "- fqdn: '*.ok.*.com'", - " tunnel_route: $2.example.$1:{0}".format(server_foo.Variables.SSL_Port), - "- fqdn: ''", # No SNI sent - " tunnel_route: localhost:{0}".format(server_bar.Variables.SSL_Port) -]) +ts.Disk.sni_yaml.AddLines( + [ + 'sni:', + '- fqdn: foo.com', + " tunnel_route: localhost:{0}".format(server_foo.Variables.SSL_Port), + "- fqdn: bob.*.com", + " tunnel_route: localhost:{0}".format(server_foo.Variables.SSL_Port), + "- fqdn: '*.match.com'", + " tunnel_route: $1.testmatch:{0}".format(server_foo.Variables.SSL_Port), + "- fqdn: '*.ok.*.com'", + " tunnel_route: $2.example.$1:{0}".format(server_foo.Variables.SSL_Port), + "- fqdn: ''", # No SNI sent + " tunnel_route: localhost:{0}".format(server_bar.Variables.SSL_Port) + ]) tr = Test.AddTestRun("foo.com Tunnel-test") tr.Processes.Default.Command = "curl -v --resolve 'foo.com:{0}:127.0.0.1' -k https://foo.com:{0}".format(ts.Variables.ssl_port) @@ -133,7 +134,6 @@ tr.Processes.Default.Streams.All += Testers.ExcludesExpression("ATS", "Do not terminate on Traffic Server") tr.Processes.Default.Streams.All += Testers.ContainsExpression("bar ok", "Should get a response from bar") - tr = Test.AddTestRun("one.match.com Tunnel-test") tr.Processes.Default.Command = "curl -vvv --resolve 'one.match.com:{0}:127.0.0.1' -k https://one.match.com:{0}".format( ts.Variables.ssl_port) @@ -147,7 +147,6 @@ tr.Processes.Default.Streams.All += Testers.ExcludesExpression("ATS", "Do not terminate on Traffic Server") tr.Processes.Default.Streams.All += Testers.ContainsExpression("foo ok", "Should get a response from tm") - tr = Test.AddTestRun("one.ok.two.com Tunnel-test") tr.Processes.Default.Command = "curl -vvv --resolve 'one.ok.two.com:{0}:127.0.0.1' -k https:/one.ok.two.com:{0}".format( ts.Variables.ssl_port) @@ -161,7 +160,6 @@ tr.Processes.Default.Streams.All += Testers.ExcludesExpression("ATS", "Do not terminate on Traffic Server") tr.Processes.Default.Streams.All += Testers.ContainsExpression("foo ok", "Should get a response from tm") - # Update sni file and reload tr = Test.AddTestRun("Update config files") # Update the SNI config diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_tunnel_forward.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_tunnel_forward.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_tunnel_forward.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_tunnel_forward.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -32,8 +32,11 @@ request_random_header = {"headers": "GET / HTTP/1.1\r\nHost: random.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} response_foo_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": "ok foo"} response_bar_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": "ok bar"} -response_random_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", - "timestamp": "1469733493.993", "body": "ok random"} +response_random_header = { + "headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", + "timestamp": "1469733493.993", + "body": "ok random" +} server_foo.addResponse("sessionlog_foo.json", request_foo_header, response_foo_header) server_bar.addResponse("sessionlog_bar.json", request_bar_header, response_bar_header) server_random.addResponse("sessionlog_random.json", request_random_header, response_random_header) @@ -51,36 +54,37 @@ # Need no remap rules. Everything should be processed by sni # Make sure the TS server certs are different from the origin certs -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=signed-foo.pem ssl_key_name=signed-foo.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=signed-foo.pem ssl_key_name=signed-foo.key') # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.http.connect_ports': '{0} {1} {2} {3}'.format(ts.Variables.ssl_port, - server_foo.Variables.SSL_Port, - server_bar.Variables.Port, - server_random.Variables.Port), - 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", - 'proxy.config.dns.resolv_conf': 'NULL'}) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.http.connect_ports': + '{0} {1} {2} {3}'.format( + ts.Variables.ssl_port, server_foo.Variables.SSL_Port, server_bar.Variables.Port, server_random.Variables.Port), + 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", + 'proxy.config.dns.resolv_conf': 'NULL' + }) # foo.com should not terminate. Just tunnel to server_foo # bar.com should terminate. Forward its tcp stream to server_bar -ts.Disk.sni_yaml.AddLines([ - "sni:", - "- fqdn: 'foo.com'", - " tunnel_route: 'localhost:{0}'".format(server_foo.Variables.SSL_Port), - "- fqdn: 'bar.com'", - " forward_route: 'localhost:{0}'".format(server_bar.Variables.Port), - "- fqdn: ''", # default case - " forward_route: 'localhost:{0}'".format(server_random.Variables.Port), -]) +ts.Disk.sni_yaml.AddLines( + [ + "sni:", + "- fqdn: 'foo.com'", + " tunnel_route: 'localhost:{0}'".format(server_foo.Variables.SSL_Port), + "- fqdn: 'bar.com'", + " forward_route: 'localhost:{0}'".format(server_bar.Variables.Port), + "- fqdn: ''", # default case + " forward_route: 'localhost:{0}'".format(server_random.Variables.Port), + ]) tr = Test.AddTestRun("Tunnel-test") tr.Processes.Default.Command = "curl -v --resolve 'foo.com:{0}:127.0.0.1' -k https://foo.com:{0}".format(ts.Variables.ssl_port) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,18 +22,27 @@ # Define default ATS ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True) -server_foo = Test.MakeOriginServer("server_foo", - ssl=True, - options={"--key": "{0}/signed-foo.key".format(Test.RunDirectory), - "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory)}) -server_bar = Test.MakeOriginServer("server_bar", - ssl=True, - options={"--key": "{0}/signed-bar.key".format(Test.RunDirectory), - "--cert": "{0}/signed-bar.pem".format(Test.RunDirectory)}) -server_wild = Test.MakeOriginServer("server_wild", - ssl=True, - options={"--key": "{0}/signed-wild.key".format(Test.RunDirectory), - "--cert": "{0}/signed-wild.pem".format(Test.RunDirectory)}) +server_foo = Test.MakeOriginServer( + "server_foo", + ssl=True, + options={ + "--key": "{0}/signed-foo.key".format(Test.RunDirectory), + "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory) + }) +server_bar = Test.MakeOriginServer( + "server_bar", + ssl=True, + options={ + "--key": "{0}/signed-bar.key".format(Test.RunDirectory), + "--cert": "{0}/signed-bar.pem".format(Test.RunDirectory) + }) +server_wild = Test.MakeOriginServer( + "server_wild", + ssl=True, + options={ + "--key": "{0}/signed-wild.key".format(Test.RunDirectory), + "--cert": "{0}/signed-wild.pem".format(Test.RunDirectory) + }) server = Test.MakeOriginServer("server", ssl=True) request_foo_header = {"headers": "GET / HTTP/1.1\r\nHost: foo.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -59,51 +68,44 @@ ts.addSSLfile("ssl/signed-wild.key") ts.addSSLfile("ssl/signed-wild.pem") -ts.Disk.remap_config.AddLine( - 'map / https://127.0.0.1:{0}'.format(server.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map https://foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map https://bad_foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map https://bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map https://bad_bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map https://foo.wild.com/ https://127.0.0.1:{0}'.format(server_wild.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map https://foo_bar.wild.com/ https://127.0.0.1:{0}'.format(server_wild.Variables.SSL_Port)) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.remap_config.AddLine('map / https://127.0.0.1:{0}'.format(server.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map https://foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map https://bad_foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map https://bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map https://bad_bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map https://foo.wild.com/ https://127.0.0.1:{0}'.format(server_wild.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map https://foo_bar.wild.com/ https://127.0.0.1:{0}'.format(server_wild.Variables.SSL_Port)) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - # set global policy - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', - 'proxy.config.ssl.client.verify.server.properties': 'SIGNATURE', - 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.url_remap.pristine_host_hdr': 1 -}) - -ts.Disk.sni_yaml.AddLines([ - 'sni:', - '- fqdn: bar.com', - ' verify_server_policy: ENFORCED', - ' verify_server_properties: ALL', - '- fqdn: "*.wild.com"', - ' verify_server_policy: ENFORCED', - ' verify_server_properties: ALL', - '- fqdn: bad_bar.com', - ' verify_server_policy: ENFORCED', - ' verify_server_properties: ALL' -]) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + # set global policy + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + 'proxy.config.ssl.client.verify.server.properties': 'SIGNATURE', + 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.url_remap.pristine_host_hdr': 1 + }) + +ts.Disk.sni_yaml.AddLines( + [ + 'sni:', + '- fqdn: bar.com', + ' verify_server_policy: ENFORCED', + ' verify_server_properties: ALL', + '- fqdn: "*.wild.com"', + ' verify_server_policy: ENFORCED', + ' verify_server_properties: ALL', + '- fqdn: bad_bar.com', + ' verify_server_policy: ENFORCED', + ' verify_server_properties: ALL', + ]) tr = Test.AddTestRun("Permissive-Test") tr.Setup.Copy("ssl/signed-foo.key") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify2.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify2.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify2.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify2.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,14 +22,20 @@ # Define default ATS ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True) -server_foo = Test.MakeOriginServer("server_foo", - ssl=True, - options={"--key": "{0}/signed-foo.key".format(Test.RunDirectory), - "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory)}) -server_bar = Test.MakeOriginServer("server_bar", - ssl=True, - options={"--key": "{0}/signed-bar.key".format(Test.RunDirectory), - "--cert": "{0}/signed-bar.pem".format(Test.RunDirectory)}) +server_foo = Test.MakeOriginServer( + "server_foo", + ssl=True, + options={ + "--key": "{0}/signed-foo.key".format(Test.RunDirectory), + "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory) + }) +server_bar = Test.MakeOriginServer( + "server_bar", + ssl=True, + options={ + "--key": "{0}/signed-bar.key".format(Test.RunDirectory), + "--cert": "{0}/signed-bar.pem".format(Test.RunDirectory) + }) server = Test.MakeOriginServer("server", ssl=True) request_foo_header = {"headers": "GET / HTTP/1.1\r\nHost: foo.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -52,53 +58,38 @@ ts.addSSLfile("ssl/signer.pem") ts.addSSLfile("ssl/signer.key") -ts.Disk.remap_config.AddLine( - 'map https://foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map https://bad_foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map https://bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map https://bad_bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map / https://127.0.0.1:{0}'.format(server.Variables.SSL_Port)) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.remap_config.AddLine('map https://foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map https://bad_foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map https://bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map https://bad_bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map / https://127.0.0.1:{0}'.format(server.Variables.SSL_Port)) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - # set global policy - 'proxy.config.ssl.client.verify.server.policy': 'ENFORCED', - 'proxy.config.ssl.client.verify.server.properties': 'ALL', - 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.url_remap.pristine_host_hdr': 1 -}) - -ts.Disk.sni_yaml.AddLine( - 'sni:') -ts.Disk.sni_yaml.AddLine( - '- fqdn: bar.com') -ts.Disk.sni_yaml.AddLine( - ' verify_server_policy: PERMISSIVE') -ts.Disk.sni_yaml.AddLine( - ' verify_server_properties: SIGNATURE') -ts.Disk.sni_yaml.AddLine( - '- fqdn: bad_bar.com') -ts.Disk.sni_yaml.AddLine( - ' verify_server_policy: PERMISSIVE') -ts.Disk.sni_yaml.AddLine( - ' verify_server_properties: SIGNATURE') -ts.Disk.sni_yaml.AddLine( - '- fqdn: random.com') -ts.Disk.sni_yaml.AddLine( - ' verify_server_policy: DISABLED') +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + # set global policy + 'proxy.config.ssl.client.verify.server.policy': 'ENFORCED', + 'proxy.config.ssl.client.verify.server.properties': 'ALL', + 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.url_remap.pristine_host_hdr': 1 + }) + +ts.Disk.sni_yaml.AddLine('sni:') +ts.Disk.sni_yaml.AddLine('- fqdn: bar.com') +ts.Disk.sni_yaml.AddLine(' verify_server_policy: PERMISSIVE') +ts.Disk.sni_yaml.AddLine(' verify_server_properties: SIGNATURE') +ts.Disk.sni_yaml.AddLine('- fqdn: bad_bar.com') +ts.Disk.sni_yaml.AddLine(' verify_server_policy: PERMISSIVE') +ts.Disk.sni_yaml.AddLine(' verify_server_properties: SIGNATURE') +ts.Disk.sni_yaml.AddLine('- fqdn: random.com') +ts.Disk.sni_yaml.AddLine(' verify_server_policy: DISABLED') tr = Test.AddTestRun("default-enforce") tr.Setup.Copy("ssl/signed-foo.key") @@ -150,7 +141,6 @@ tr6.StillRunningAfter = server tr6.StillRunningAfter = ts - # No name checking for the sig-only permissive override for bad_bar ts.Disk.diags_log.Content += Testers.ExcludesExpression( r"WARNING: SNI \(bad_bar.com\) not in certificate", "bad_bar name checked should be skipped.") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify3.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify3.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify3.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify3.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,14 +22,20 @@ # Define default ATS ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True) -server_foo = Test.MakeOriginServer("server_foo", - ssl=True, - options={"--key": "{0}/signed-foo.key".format(Test.RunDirectory), - "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory)}) -server_bar = Test.MakeOriginServer("server_bar", - ssl=True, - options={"--key": "{0}/signed-bar.key".format(Test.RunDirectory), - "--cert": "{0}/signed-bar.pem".format(Test.RunDirectory)}) +server_foo = Test.MakeOriginServer( + "server_foo", + ssl=True, + options={ + "--key": "{0}/signed-foo.key".format(Test.RunDirectory), + "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory) + }) +server_bar = Test.MakeOriginServer( + "server_bar", + ssl=True, + options={ + "--key": "{0}/signed-bar.key".format(Test.RunDirectory), + "--cert": "{0}/signed-bar.pem".format(Test.RunDirectory) + }) server = Test.MakeOriginServer("server", ssl=True) request_foo_header = {"headers": "GET / HTTP/1.1\r\nHost: foo.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -60,38 +66,37 @@ 'map https://bar.com:{1}/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port, ts.Variables.ssl_port)) ts.Disk.remap_config.AddLine( 'map https://bob.bar.com:{1}/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port, ts.Variables.ssl_port)) -ts.Disk.remap_config.AddLine( - 'map / https://127.0.0.1:{0}'.format(server.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map / https://127.0.0.1:{0}'.format(server.Variables.SSL_Port)) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - # set global policy - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', - 'proxy.config.ssl.client.verify.server.properties': 'ALL', - 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.url_remap.pristine_host_hdr': 1 -}) - -ts.Disk.sni_yaml.AddLines([ - 'sni:', - '- fqdn: bob.bar.com', - ' verify_server_policy: ENFORCED', - ' verify_server_properties: ALL', - '- fqdn: bob.*.com', - ' verify_server_policy: ENFORCED', - ' verify_server_properties: SIGNATURE', - "- fqdn: '*bar.com'", - ' verify_server_policy: DISABLED', -]) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + # set global policy + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + 'proxy.config.ssl.client.verify.server.properties': 'ALL', + 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.url_remap.pristine_host_hdr': 1 + }) + +ts.Disk.sni_yaml.AddLines( + [ + 'sni:', + '- fqdn: bob.bar.com', + ' verify_server_policy: ENFORCED', + ' verify_server_properties: ALL', + '- fqdn: bob.*.com', + ' verify_server_policy: ENFORCED', + ' verify_server_properties: SIGNATURE', + "- fqdn: '*bar.com'", + ' verify_server_policy: DISABLED', + ]) tr = Test.AddTestRun("foo.com Permissive-Test") tr.Setup.Copy("ssl/signed-foo.key") @@ -116,7 +121,6 @@ tr.StillRunningAfter = ts tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded") - tr2 = Test.AddTestRun("bob.bar.com Override-enforcing-Test") tr2.Processes.Default.Command = "curl -v -k --resolve 'bob.bar.com:{0}:127.0.0.1' https://bob.bar.com:{0}/".format( ts.Variables.ssl_port) @@ -141,7 +145,6 @@ tr3.StillRunningAfter = server tr3.StillRunningAfter = ts - # Over riding the built in ERROR check since we expect tr3 to fail ts.Disk.diags_log.Content = Testers.ContainsExpression( r"WARNING: SNI \(bob.bar.com\) not in certificate", "Make sure bob.bar name checked failed.") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify4.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify4.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify4.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify4.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -23,14 +23,20 @@ # Define default ATS ts = Test.MakeATSProcess("ts", command="traffic_manager", enable_tls=True) -server_foo = Test.MakeOriginServer("server_foo", - ssl=True, - options={"--key": "{0}/signed-foo.key".format(Test.RunDirectory), - "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory)}) -server_bar = Test.MakeOriginServer("server_bar", - ssl=True, - options={"--key": "{0}/signed-bar.key".format(Test.RunDirectory), - "--cert": "{0}/signed-bar.pem".format(Test.RunDirectory)}) +server_foo = Test.MakeOriginServer( + "server_foo", + ssl=True, + options={ + "--key": "{0}/signed-foo.key".format(Test.RunDirectory), + "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory) + }) +server_bar = Test.MakeOriginServer( + "server_bar", + ssl=True, + options={ + "--key": "{0}/signed-bar.key".format(Test.RunDirectory), + "--cert": "{0}/signed-bar.pem".format(Test.RunDirectory) + }) server = Test.MakeOriginServer("server", ssl=True) request_foo_header = {"headers": "GET / HTTP/1.1\r\nHost: foo.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -53,37 +59,32 @@ ts.addSSLfile("ssl/signer.pem") ts.addSSLfile("ssl/signer.key") -ts.Disk.remap_config.AddLine( - 'map https://foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map https://bad_foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map https://bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map https://bad_bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map / https://127.0.0.1:{0}'.format(server.Variables.SSL_Port)) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.remap_config.AddLine('map https://foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map https://bad_foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map https://bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map https://bad_bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map / https://127.0.0.1:{0}'.format(server.Variables.SSL_Port)) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2', - # set global policy - 'proxy.config.ssl.client.verify.server.policy': 'ENFORCED', - 'proxy.config.ssl.client.verify.server.properties': 'ALL', - 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'ssl' -}) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.cipher_suite': + 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2', + # set global policy + 'proxy.config.ssl.client.verify.server.policy': 'ENFORCED', + 'proxy.config.ssl.client.verify.server.properties': 'ALL', + 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'ssl' + }) tr = Test.AddTestRun("default-enforce-bad-sig") tr.Setup.Copy("ssl/signed-foo.key") @@ -104,20 +105,22 @@ recordspath = ts.Disk.records_config.AbsPath # recreate the records.config with the cert filename changed tr2.Disk.File(recordspath, id="records_config", typename="ats:config:records"), -tr2.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2', - # set global policy - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', - 'proxy.config.ssl.client.verify.server.properties': 'ALL', - 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'ssl' -}) +tr2.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.cipher_suite': + 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2', + # set global policy + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + 'proxy.config.ssl.client.verify.server.properties': 'ALL', + 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'ssl' + }) tr2.StillRunningAfter = ts tr2.StillRunningAfter = server tr2.Processes.Default.Command = 'echo Updated configs' @@ -146,20 +149,22 @@ recordspath = ts.Disk.records_config.AbsPath # recreate the records.config with the cert filename changed tr2.Disk.File(recordspath, id="records_config", typename="ats:config:records"), -tr2.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2', - # set global policy - 'proxy.config.ssl.client.verify.server.policy': 'ENFORCED', - 'proxy.config.ssl.client.verify.server.properties': 'ALL', - 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.diags.debug.enabled': 0, - 'proxy.config.diags.debug.tags': 'ssl' -}) +tr2.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.cipher_suite': + 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2', + # set global policy + 'proxy.config.ssl.client.verify.server.policy': 'ENFORCED', + 'proxy.config.ssl.client.verify.server.properties': 'ALL', + 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.diags.debug.enabled': 0, + 'proxy.config.diags.debug.tags': 'ssl' + }) tr2.StillRunningAfter = ts tr2.StillRunningAfter = server tr2.Processes.Default.Command = 'echo Updated configs to ENFORCED' diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify_base.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify_base.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify_base.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify_base.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,14 +22,20 @@ # Define default ATS ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True) -server_foo = Test.MakeOriginServer("server_foo", - ssl=True, - options={"--key": "{0}/signed-foo.key".format(Test.RunDirectory), - "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory)}) -server_bar = Test.MakeOriginServer("server_bar", - ssl=True, - options={"--key": "{0}/signed-bar.key".format(Test.RunDirectory), - "--cert": "{0}/signed-bar.pem".format(Test.RunDirectory)}) +server_foo = Test.MakeOriginServer( + "server_foo", + ssl=True, + options={ + "--key": "{0}/signed-foo.key".format(Test.RunDirectory), + "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory) + }) +server_bar = Test.MakeOriginServer( + "server_bar", + ssl=True, + options={ + "--key": "{0}/signed-bar.key".format(Test.RunDirectory), + "--cert": "{0}/signed-bar.pem".format(Test.RunDirectory) + }) server = Test.MakeOriginServer("server", ssl=True) request_foo_header = {"headers": "GET / HTTP/1.1\r\nHost: foo.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -52,42 +58,37 @@ ts.addSSLfile("ssl/signer.pem") ts.addSSLfile("ssl/signer.key") -ts.Disk.remap_config.AddLine( - 'map / https://127.0.0.1:{0}'.format(server.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map https://foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map https://bad_foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map https://bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map https://bad_bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port)) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.remap_config.AddLine('map / https://127.0.0.1:{0}'.format(server.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map https://foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map https://bad_foo.com/ https://127.0.0.1:{0}'.format(server_foo.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map https://bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map https://bad_bar.com/ https://127.0.0.1:{0}'.format(server_bar.Variables.SSL_Port)) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.client.sni_policy': 'host' -}) - -ts.Disk.sni_yaml.AddLines([ - 'sni:', - '- fqdn: bar.com', - ' verify_server_policy: ENFORCED', - ' verify_server_properties: ALL', - '- fqdn: bad_bar.com', - ' verify_server_policy: ENFORCED', - ' verify_server_properties: ALL' -]) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.client.sni_policy': 'host' + }) + +ts.Disk.sni_yaml.AddLines( + [ + 'sni:', + '- fqdn: bar.com', + ' verify_server_policy: ENFORCED', + ' verify_server_properties: ALL', + '- fqdn: bad_bar.com', + ' verify_server_policy: ENFORCED', + ' verify_server_properties: ALL', + ]) tr = Test.AddTestRun("Permissive-Test") tr.Setup.Copy("ssl/signed-foo.key") @@ -111,7 +112,6 @@ tr.StillRunningAfter = ts tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded") - tr2 = Test.AddTestRun("Override-enforcing-Test") tr2.Processes.Default.Command = "curl -v -k -H \"host: bar.com\" https://127.0.0.1:{0}".format(ts.Variables.ssl_port) tr2.ReturnCode = 0 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify_ca_override.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify_ca_override.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify_ca_override.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify_ca_override.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,14 +22,20 @@ # Define default ATS ts = Test.MakeATSProcess("ts", select_ports=True) -server1 = Test.MakeOriginServer("server1", - ssl=True, - options={"--key": "{0}/signed-foo.key".format(Test.RunDirectory), - "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory)}) -server2 = Test.MakeOriginServer("server2", - ssl=True, - options={"--key": "{0}/signed-foo.key".format(Test.RunDirectory), - "--cert": "{0}/signed2-foo.pem".format(Test.RunDirectory)}) +server1 = Test.MakeOriginServer( + "server1", + ssl=True, + options={ + "--key": "{0}/signed-foo.key".format(Test.RunDirectory), + "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory) + }) +server2 = Test.MakeOriginServer( + "server2", + ssl=True, + options={ + "--key": "{0}/signed-foo.key".format(Test.RunDirectory), + "--cert": "{0}/signed2-foo.pem".format(Test.RunDirectory) + }) request_foo_header = {"headers": "GET / HTTP/1.1\r\nHost: foo.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} request_bad_foo_header = {"headers": "GET / HTTP/1.1\r\nHost: bad_foo.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""} @@ -54,38 +60,33 @@ ts.Disk.remap_config.AddLine( 'map /case1 https://127.0.0.1:{0}/ @plugin=conf_remap.so @pparam=proxy.config.ssl.client.CA.cert.filename={1}/{2}'.format( - server1.Variables.SSL_Port, ts.Variables.SSLDir, "signer.pem") -) + server1.Variables.SSL_Port, ts.Variables.SSLDir, "signer.pem")) ts.Disk.remap_config.AddLine( 'map /badcase1 https://127.0.0.1:{0}/ @plugin=conf_remap.so @pparam=proxy.config.ssl.client.CA.cert.filename={1}/{2}'.format( - server1.Variables.SSL_Port, ts.Variables.SSLDir, "signer2.pem") -) + server1.Variables.SSL_Port, ts.Variables.SSLDir, "signer2.pem")) ts.Disk.remap_config.AddLine( 'map /case2 https://127.0.0.1:{0}/ @plugin=conf_remap.so @pparam=proxy.config.ssl.client.CA.cert.filename={1}/{2}'.format( - server2.Variables.SSL_Port, ts.Variables.SSLDir, "signer2.pem") -) + server2.Variables.SSL_Port, ts.Variables.SSLDir, "signer2.pem")) ts.Disk.remap_config.AddLine( 'map /badcase2 https://127.0.0.1:{0}/ @plugin=conf_remap.so @pparam=proxy.config.ssl.client.CA.cert.filename={1}/{2}'.format( - server2.Variables.SSL_Port, ts.Variables.SSLDir, "signer.pem") -) + server2.Variables.SSL_Port, ts.Variables.SSLDir, "signer.pem")) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - # set global policy - 'proxy.config.ssl.client.verify.server.policy': 'ENFORCED', - 'proxy.config.ssl.client.verify.server.properties': 'SIGNATURE', - 'proxy.config.ssl.client.CA.cert.path': '/tmp', - 'proxy.config.ssl.client.CA.cert.filename': '{0}/signer.pem'.format(ts.Variables.SSLDir), - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.url_remap.pristine_host_hdr': 1 -}) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + # set global policy + 'proxy.config.ssl.client.verify.server.policy': 'ENFORCED', + 'proxy.config.ssl.client.verify.server.properties': 'SIGNATURE', + 'proxy.config.ssl.client.CA.cert.path': '/tmp', + 'proxy.config.ssl.client.CA.cert.filename': '{0}/signer.pem'.format(ts.Variables.SSLDir), + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.url_remap.pristine_host_hdr': 1 + }) # Should succeed tr = Test.AddTestRun("Use correct ca bundle for server 1") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify_not_pristine.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify_not_pristine.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify_not_pristine.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify_not_pristine.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,10 +22,13 @@ # Define default ATS ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True) -server_foo = Test.MakeOriginServer("server_foo", - ssl=True, - options={"--key": "{0}/signed-foo.key".format(Test.RunDirectory), - "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory)}) +server_foo = Test.MakeOriginServer( + "server_foo", + ssl=True, + options={ + "--key": "{0}/signed-foo.key".format(Test.RunDirectory), + "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory) + }) server = Test.MakeOriginServer("server", ssl=True) dns = Test.MakeDNServer("dns") @@ -50,25 +53,24 @@ ts.Disk.remap_config.AddLine( 'map https://foo.com:{0}/ https://bar.com:{1}'.format(ts.Variables.ssl_port, server_foo.Variables.SSL_Port)) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - # set global policy - 'proxy.config.ssl.client.verify.server.policy': 'ENFORCED', - 'proxy.config.ssl.client.verify.server.properties': 'ALL', - 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', - 'proxy.config.url_remap.pristine_host_hdr': 0, - 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.dns.resolv_conf': 'NULL' -}) +ts.Disk.records_config.update( + { + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + # set global policy + 'proxy.config.ssl.client.verify.server.policy': 'ENFORCED', + 'proxy.config.ssl.client.verify.server.properties': 'ALL', + 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', + 'proxy.config.url_remap.pristine_host_hdr': 0, + 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.dns.resolv_conf': 'NULL' + }) dns.addRecords(records={"foo.com.": ["127.0.0.1"]}) dns.addRecords(records={"bar.com.": ["127.0.0.1"]}) @@ -98,5 +100,5 @@ # Over riding the built in ERROR check since we expect tr3 to fail ts.Disk.diags_log.Content = Testers.ExcludesExpression("verification failed", "Make sure the signatures didn't fail") -ts.Disk.diags_log.Content += Testers.ContainsExpression(r"WARNING: SNI \(bar.com\) not in certificate", - "Make sure bad_bar name checked failed.") +ts.Disk.diags_log.Content += Testers.ContainsExpression( + r"WARNING: SNI \(bar.com\) not in certificate", "Make sure bad_bar name checked failed.") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify_override.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify_override.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify_override.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify_override.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,14 +22,20 @@ # Define default ATS ts = Test.MakeATSProcess("ts", select_ports=True) -server_foo = Test.MakeOriginServer("server_foo", - ssl=True, - options={"--key": "{0}/signed-foo.key".format(Test.RunDirectory), - "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory)}) -server_bar = Test.MakeOriginServer("server_bar", - ssl=True, - options={"--key": "{0}/signed-bar.key".format(Test.RunDirectory), - "--cert": "{0}/signed-bar.pem".format(Test.RunDirectory)}) +server_foo = Test.MakeOriginServer( + "server_foo", + ssl=True, + options={ + "--key": "{0}/signed-foo.key".format(Test.RunDirectory), + "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory) + }) +server_bar = Test.MakeOriginServer( + "server_bar", + ssl=True, + options={ + "--key": "{0}/signed-bar.key".format(Test.RunDirectory), + "--cert": "{0}/signed-bar.pem".format(Test.RunDirectory) + }) server = Test.MakeOriginServer("server", ssl=True) dns = Test.MakeDNServer("dns") @@ -54,73 +60,68 @@ ts.addSSLfile("ssl/signer.pem") ts.addSSLfile("ssl/signer.key") +ts.Disk.remap_config.AddLine('map http://foo.com/basictobar https://bar.com:{0}'.format(server_bar.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map http://foo.com/basic https://foo.com:{0}'.format(server_foo.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map http://foo.com/basictobar https://bar.com:{0}'.format(server_bar.Variables.SSL_Port)) + 'map http://foo.com/override https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED' + .format(server_foo.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map http://bar.com/basic https://bar.com:{0}'.format(server_foo.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map http://foo.com/basic https://foo.com:{0}'.format(server_foo.Variables.SSL_Port)) + 'map http://bar.com/overridedisabled https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=DISABLED' + .format(server_foo.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map http://foo.com/override https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format( - server_foo.Variables.SSL_Port)) + 'map http://bad_bar.com/overridedisabled https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=DISABLED' + .format(server_foo.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map http://bar.com/basic https://bar.com:{0}'.format(server_foo.Variables.SSL_Port)) + 'map http://bar.com/overridesignature https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=SIGNATURE @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED' + .format(server_foo.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map http://bar.com/overridedisabled https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=DISABLED'.format( - server_foo.Variables.SSL_Port)) + 'map http://bar.com/overridenone https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NONE @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED' + .format(server_foo.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map http://bad_bar.com/overridedisabled https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=DISABLED'.format( - server_foo.Variables.SSL_Port)) + 'map http://bar.com/overrideenforced https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED' + .format(server_foo.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map /basic https://random.com:{0}'.format(server.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map http://bar.com/overridesignature https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=SIGNATURE @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format( - server_foo.Variables.SSL_Port)) + 'map /overrideenforce https://127.0.0.1:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED' + .format(server.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map http://bar.com/overridenone https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NONE @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format( - server_foo.Variables.SSL_Port)) + 'map /overridename https://127.0.0.1:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME' + .format(server.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map http://bar.com/overrideenforced https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format( - server_foo.Variables.SSL_Port)) + 'map /snipolicyfooremap https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=remap' + .format(server_bar.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map /basic https://random.com:{0}'.format(server.Variables.SSL_Port)) + 'map /snipolicyfoohost https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=host' + .format(server_bar.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map /overrideenforce https://127.0.0.1:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format( - server.Variables.SSL_Port)) + 'map /snipolicybarremap https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=remap' + .format(server_bar.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map /overridename https://127.0.0.1:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME'.format( - server.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map /snipolicyfooremap https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=remap'.format( - server_bar.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map /snipolicyfoohost https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=host'.format( - server_bar.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map /snipolicybarremap https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=remap'.format( - server_bar.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map /snipolicybarhost https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=host'.format( - server_bar.Variables.SSL_Port)) + 'map /snipolicybarhost https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=host' + .format(server_bar.Variables.SSL_Port)) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - # set global policy - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', - 'proxy.config.ssl.client.verify.server.properties': 'ALL', - 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), - 'proxy.config.dns.resolv_conf': 'NULL', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.client.sni_policy': 'remap', -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + # set global policy + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + 'proxy.config.ssl.client.verify.server.properties': 'ALL', + 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), + 'proxy.config.dns.resolv_conf': 'NULL', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.client.sni_policy': 'remap', + }) dns.addRecords(records={"foo.com.": ["127.0.0.1"]}) dns.addRecords(records={"bar.com.": ["127.0.0.1"]}) diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify_override_base.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify_override_base.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify_override_base.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify_override_base.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -22,14 +22,20 @@ # Define default ATS ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True) -server_foo = Test.MakeOriginServer("server_foo", - ssl=True, - options={"--key": "{0}/signed-foo.key".format(Test.RunDirectory), - "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory)}) -server_bar = Test.MakeOriginServer("server_bar", - ssl=True, - options={"--key": "{0}/signed-bar.key".format(Test.RunDirectory), - "--cert": "{0}/signed-bar.pem".format(Test.RunDirectory)}) +server_foo = Test.MakeOriginServer( + "server_foo", + ssl=True, + options={ + "--key": "{0}/signed-foo.key".format(Test.RunDirectory), + "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory) + }) +server_bar = Test.MakeOriginServer( + "server_bar", + ssl=True, + options={ + "--key": "{0}/signed-bar.key".format(Test.RunDirectory), + "--cert": "{0}/signed-bar.pem".format(Test.RunDirectory) + }) server = Test.MakeOriginServer("server", ssl=True) dns = Test.MakeDNServer("dns") @@ -54,67 +60,63 @@ ts.addSSLfile("ssl/signer.pem") ts.addSSLfile("ssl/signer.key") +ts.Disk.remap_config.AddLine('map http://foo.com/basic https://foo.com:{0}'.format(server_foo.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map http://foo.com/basic https://foo.com:{0}'.format(server_foo.Variables.SSL_Port)) + 'map http://foo.com/override https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED' + .format(server_foo.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map http://bar.com/basic https://bar.com:{0}'.format(server_foo.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map http://foo.com/override https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format( - server_foo.Variables.SSL_Port)) + 'map http://bar.com/overridedisabled https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=DISABLED' + .format(server_foo.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map http://bar.com/basic https://bar.com:{0}'.format(server_foo.Variables.SSL_Port)) + 'map http://bar.com/overridesignature https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=SIGNATURE @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED' + .format(server_foo.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map http://bar.com/overridedisabled https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=DISABLED'.format( - server_foo.Variables.SSL_Port)) + 'map http://bar.com/overrideenforced https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED' + .format(server_foo.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map /basic https://127.0.0.1:{0}'.format(server.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map http://bar.com/overridesignature https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=SIGNATURE @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format( - server_foo.Variables.SSL_Port)) + 'map /overrideenforce https://127.0.0.1:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED' + .format(server.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map http://bar.com/overrideenforced https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format( - server_foo.Variables.SSL_Port)) + 'map /overridename https://127.0.0.1:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME' + .format(server.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map /basic https://127.0.0.1:{0}'.format(server.Variables.SSL_Port)) + 'map /snipolicyfooremap https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=remap' + .format(server_bar.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map /overrideenforce https://127.0.0.1:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format( - server.Variables.SSL_Port)) + 'map /snipolicyfoohost https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=host' + .format(server_bar.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map /overridename https://127.0.0.1:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME'.format( - server.Variables.SSL_Port)) + 'map /snipolicyfooservername https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=server_name' + .format(server_bar.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map /snipolicyfooremap https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=remap'.format( - server_bar.Variables.SSL_Port)) + 'map /snipolicybarremap https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=remap' + .format(server_bar.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map /snipolicyfoohost https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=host'.format( - server_bar.Variables.SSL_Port)) + 'map /snipolicybarhost https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=host' + .format(server_bar.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map /snipolicyfooservername https://foo.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=server_name'.format( - server_bar.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map /snipolicybarremap https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=remap'.format( - server_bar.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map /snipolicybarhost https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=host'.format( - server_bar.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map /snipolicybarservername https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=server_name'.format( - server_bar.Variables.SSL_Port)) + 'map /snipolicybarservername https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=NAME @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED @plugin=conf_remap.so @pparam=proxy.config.ssl.client.sni_policy=server_name' + .format(server_bar.Variables.SSL_Port)) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # Case 1, global config policy=permissive properties=signature # override for foo.com policy=enforced properties=all -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), - 'proxy.config.dns.resolv_conf': 'NULL' -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), + 'proxy.config.dns.resolv_conf': 'NULL' + }) dns.addRecords(records={"foo.com.": ["127.0.0.1"]}) dns.addRecords(records={"bar.com.": ["127.0.0.1"]}) @@ -234,7 +236,6 @@ tr.StillRunningAfter = ts tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could not connect", "Curl attempt should succeed") - # Over riding the built in ERROR check since we expect some cases to fail # checks on random.com should fail with message only diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify_override_sni.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify_override_sni.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls/tls_verify_override_sni.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls/tls_verify_override_sni.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,22 +24,28 @@ ts = Test.MakeATSProcess("ts", select_ports=True) cafile = "{0}/signer.pem".format(Test.RunDirectory) -server_foo = Test.MakeOriginServer("server_foo", - ssl=True, - options={"--key": "{0}/signed-foo.key".format(Test.RunDirectory), - "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory), - "--clientCA": cafile, - "--clientverify": ""}, - clientcert="{0}/signed-bar.pem".format(Test.RunDirectory), - clientkey="{0}/signed-bar.key".format(Test.RunDirectory)) -server_bar = Test.MakeOriginServer("server_bar", - ssl=True, - options={"--key": "{0}/signed-foo.key".format(Test.RunDirectory), - "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory), - "--clientCA": cafile, - "--clientverify": ""}, - clientcert="{0}/signed-bar.pem".format(Test.RunDirectory), - clientkey="{0}/signed-bar.key".format(Test.RunDirectory)) +server_foo = Test.MakeOriginServer( + "server_foo", + ssl=True, + options={ + "--key": "{0}/signed-foo.key".format(Test.RunDirectory), + "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory), + "--clientCA": cafile, + "--clientverify": "" + }, + clientcert="{0}/signed-bar.pem".format(Test.RunDirectory), + clientkey="{0}/signed-bar.key".format(Test.RunDirectory)) +server_bar = Test.MakeOriginServer( + "server_bar", + ssl=True, + options={ + "--key": "{0}/signed-foo.key".format(Test.RunDirectory), + "--cert": "{0}/signed-foo.pem".format(Test.RunDirectory), + "--clientCA": cafile, + "--clientverify": "" + }, + clientcert="{0}/signed-bar.pem".format(Test.RunDirectory), + clientkey="{0}/signed-bar.key".format(Test.RunDirectory)) dns = Test.MakeDNServer("dns") @@ -68,45 +74,43 @@ ts.addSSLfile("ssl/signer.pem") ts.addSSLfile("ssl/signer.key") +ts.Disk.remap_config.AddLine('map http://foo.com/defaultbar https://bar.com:{0}'.format(server_bar.Variables.SSL_Port)) +ts.Disk.remap_config.AddLine('map http://foo.com/default https://foo.com:{0}'.format(server_foo.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map http://foo.com/defaultbar https://bar.com:{0}'.format(server_bar.Variables.SSL_Port)) + 'map http://foo.com/overridepolicy https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED' + .format(server_foo.Variables.SSL_Port)) ts.Disk.remap_config.AddLine( - 'map http://foo.com/default https://foo.com:{0}'.format(server_foo.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map http://foo.com/overridepolicy https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.policy=ENFORCED'.format( - server_foo.Variables.SSL_Port)) -ts.Disk.remap_config.AddLine( - 'map http://foo.com/overrideproperties https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=SIGNATURE'.format( - server_foo.Variables.SSL_Port)) + 'map http://foo.com/overrideproperties https://bar.com:{0} @plugin=conf_remap.so @pparam=proxy.config.ssl.client.verify.server.properties=SIGNATURE' + .format(server_foo.Variables.SSL_Port)) -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') # global config policy=permissive properties=all -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - # set global policy - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', - 'proxy.config.ssl.client.verify.server.properties': 'ALL', - 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', - 'proxy.config.url_remap.pristine_host_hdr': 1, - 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), - 'proxy.config.dns.resolv_conf': 'NULL', - 'proxy.config.exec_thread.autoconfig.scale': 1.0, - 'proxy.config.ssl.client.sni_policy': 'remap' -}) - -ts.Disk.sni_yaml.AddLines([ - 'sni:', - '- fqdn: bar.com', - ' client_cert: "{0}/signed-foo.pem"'.format(ts.Variables.SSLDir), - ' client_key: "{0}/signed-foo.key"'.format(ts.Variables.SSLDir), -]) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + # set global policy + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + 'proxy.config.ssl.client.verify.server.properties': 'ALL', + 'proxy.config.ssl.client.CA.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.CA.cert.filename': 'signer.pem', + 'proxy.config.url_remap.pristine_host_hdr': 1, + 'proxy.config.dns.nameservers': '127.0.0.1:{0}'.format(dns.Variables.Port), + 'proxy.config.dns.resolv_conf': 'NULL', + 'proxy.config.exec_thread.autoconfig.scale': 1.0, + 'proxy.config.ssl.client.sni_policy': 'remap' + }) + +ts.Disk.sni_yaml.AddLines( + [ + 'sni:', + '- fqdn: bar.com', + ' client_cert: "{0}/signed-foo.pem"'.format(ts.Variables.SSLDir), + ' client_key: "{0}/signed-foo.key"'.format(ts.Variables.SSLDir), + ]) dns.addRecords(records={"foo.com.": ["127.0.0.1"]}) dns.addRecords(records={"bar.com.": ["127.0.0.1"]}) @@ -142,7 +146,6 @@ tr2.StillRunningAfter = ts tr2.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Curl attempt should have succeeded") - # Over riding the built in ERROR check since we expect some cases to fail ts.Disk.diags_log.Content = Testers.ContainsExpression( r"WARNING: SNI \(bar.com\) not in certificate. Action=Continue server=bar.com", "Warning for mismatch name not enforcing") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -33,21 +33,19 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_hook_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.TLSv1_3': 0, -}) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.TLSv1_3': 0, + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port) -) + 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port)) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-preaccept=1') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks10.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks10.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks10.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks10.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -32,19 +32,18 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_hook_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - }) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port) -) + 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port)) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-cert=1 -i=2') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks11.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks11.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks11.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks11.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -33,19 +33,18 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_hook_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - }) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port) -) + 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port)) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-d=1') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks12.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks12.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks12.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks12.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -32,19 +32,18 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_hook_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - }) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port) -) + 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port)) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-p=2 -d=1') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks13.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks13.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks13.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks13.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -32,19 +32,18 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_hook_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - }) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{0} https://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.SSL_Port) -) + 'map https://example.com:{0} https://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.SSL_Port)) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-out_start=1 -out_close=2') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks14.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks14.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks14.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks14.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -32,20 +32,19 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_hook_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', - }) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE', + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{0} https://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.SSL_Port) -) + 'map https://example.com:{0} https://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.SSL_Port)) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-out_start_delay=2') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks15.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks15.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks15.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks15.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -32,19 +32,18 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_hook_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - }) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{0} https://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.SSL_Port) -) + 'map https://example.com:{0} https://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.SSL_Port)) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-close=2 -out_close=1') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks16.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks16.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks16.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks16.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,9 +24,7 @@ Test different combinations of TLS handshake hooks to ensure they are applied consistently. ''' -Test.SkipUnless( - Condition.HasOpenSSLVersion("1.1.1") -) +Test.SkipUnless(Condition.HasOpenSSLVersion("1.1.1")) ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True) server = Test.MakeOriginServer("server") @@ -37,19 +35,18 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_hook_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - }) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{1} http://127.0.0.1:{0}'.format(server.Variables.Port, ts.Variables.ssl_port) -) + 'map https://example.com:{1} http://127.0.0.1:{0}'.format(server.Variables.Port, ts.Variables.ssl_port)) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-client_hello_imm=1') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks17.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks17.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks17.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks17.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,9 +24,7 @@ Test different combinations of TLS handshake hooks to ensure they are applied consistently. ''' -Test.SkipUnless( - Condition.HasOpenSSLVersion("1.1.1"), -) +Test.SkipUnless(Condition.HasOpenSSLVersion("1.1.1"),) ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True) server = Test.MakeOriginServer("server") @@ -37,19 +35,18 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_hook_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - }) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{1} http://127.0.0.1:{0}'.format(server.Variables.Port, ts.Variables.ssl_port) -) + 'map https://example.com:{1} http://127.0.0.1:{0}'.format(server.Variables.Port, ts.Variables.ssl_port)) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-client_hello=1 -close=1') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks18.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks18.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks18.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks18.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -24,9 +24,7 @@ Test different combinations of TLS handshake hooks to ensure they are applied consistently. ''' -Test.SkipUnless( - Condition.HasOpenSSLVersion("1.1.1"), -) +Test.SkipUnless(Condition.HasOpenSSLVersion("1.1.1"),) ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True) server = Test.MakeOriginServer("server") @@ -37,19 +35,18 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_hook_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - }) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{1} http://127.0.0.1:{0}'.format(server.Variables.Port, ts.Variables.ssl_port) -) + 'map https://example.com:{1} http://127.0.0.1:{0}'.format(server.Variables.Port, ts.Variables.ssl_port)) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-client_hello=2 -close=1') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks2.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks2.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks2.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks2.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -33,19 +33,18 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_hook_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - }) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port) -) + 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port)) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-sni=1') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks3.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks3.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks3.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks3.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -33,19 +33,18 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_hook_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - }) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port) -) + 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port)) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-cert=1') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks4.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks4.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks4.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks4.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -33,19 +33,18 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_hook_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - }) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port) -) + 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port)) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-cert=1 -sni=1 -preaccept=1') @@ -65,10 +64,12 @@ ts.Disk.traffic_out.Content = Testers.ContainsExpression( r"\A(?:(?!{0}).)*{0}(?!.*{0}).*\Z".format(snistring), "SNI message appears only once", reflags=re.S | re.M) # the preaccept may get triggered twice because the test framework creates a TCP connection before handing off to traffic_server -ts.Disk.traffic_out.Content += Testers.ContainsExpression(r"\A(?:(?!{0}).)*{0}.*({0})?(?!.*{0}).*\Z".format( - preacceptstring), "Pre accept message appears only once or twice", reflags=re.S | re.M) -ts.Disk.traffic_out.Content += Testers.ContainsExpression(r"\A(?:(?!{0}).)*{0}(?!.*{0}).*\Z".format(certstring), - "Cert message appears only once", reflags=re.S | re.M) +ts.Disk.traffic_out.Content += Testers.ContainsExpression( + r"\A(?:(?!{0}).)*{0}.*({0})?(?!.*{0}).*\Z".format(preacceptstring), + "Pre accept message appears only once or twice", + reflags=re.S | re.M) +ts.Disk.traffic_out.Content += Testers.ContainsExpression( + r"\A(?:(?!{0}).)*{0}(?!.*{0}).*\Z".format(certstring), "Cert message appears only once", reflags=re.S | re.M) tr.Processes.Default.TimeOut = 15 tr.TimeOut = 15 diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks6.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks6.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks6.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks6.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -33,19 +33,18 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_hook_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - }) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port) -) + 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port)) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-preaccept=2') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks7.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks7.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks7.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks7.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -33,19 +33,18 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_hook_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - }) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port) -) + 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port)) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-sni=2') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks8.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks8.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks8.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks8.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -33,19 +33,18 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_hook_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - }) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port) -) + 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port)) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-cert=2') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks9.test.py trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks9.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/tls_hooks/tls_hooks9.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/tls_hooks/tls_hooks9.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -33,19 +33,18 @@ ts.addDefaultSSLFiles() -ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'ssl_hook_test', - 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), - 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), - }) - -ts.Disk.ssl_multicert_config.AddLine( - 'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key' -) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'ssl_hook_test', + 'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir), + 'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir), + }) + +ts.Disk.ssl_multicert_config.AddLine('dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key') ts.Disk.remap_config.AddLine( - 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port) -) + 'map https://example.com:{0} http://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.Port)) Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-i=1') diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/traffic_ctl/remap_inc/remap_inc.test.py trafficserver-9.2.4+ds/tests/gold_tests/traffic_ctl/remap_inc/remap_inc.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/traffic_ctl/remap_inc/remap_inc.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/traffic_ctl/remap_inc/remap_inc.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -15,6 +15,7 @@ # limitations under the License. import os + Test.Summary = ''' Test traffic_ctl config reload with remap.config .include directive ''' @@ -29,35 +30,26 @@ ts.Disk.File(ts.Variables.CONFIGDIR + "/test.inc", id="test_cfg", typename="ats:config") ts.Disk.test_cfg.AddLine( - "map http://example.two/ http://yada.com/ " + - "@plugin=conf_remap.so @pparam=proxy.config.url_remap.pristine_host_hdr=1" -) - -ts.Disk.remap_config.AddLine( - "map http://example.one/ http://yada.com/" -) -ts.Disk.remap_config.AddLine( - ".include test.inc" -) -ts.Disk.remap_config.AddLine( - "map http://example.three/ http://yada.com/" -) + "map http://example.two/ http://yada.com/ " + "@plugin=conf_remap.so @pparam=proxy.config.url_remap.pristine_host_hdr=1") + +ts.Disk.remap_config.AddLine("map http://example.one/ http://yada.com/") +ts.Disk.remap_config.AddLine(".include test.inc") +ts.Disk.remap_config.AddLine("map http://example.three/ http://yada.com/") # minimal configuration -ts.Disk.records_config.update({ - 'proxy.config.diags.debug.enabled': 1, - 'proxy.config.diags.debug.tags': 'regex_remap|url_rewrite|plugin_factory', - 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", -}) +ts.Disk.records_config.update( + { + 'proxy.config.diags.debug.enabled': 1, + 'proxy.config.diags.debug.tags': 'regex_remap|url_rewrite|plugin_factory', + 'proxy.config.dns.nameservers': f"127.0.0.1:{nameserver.Variables.Port}", + }) tr = Test.AddTestRun("Start TS, then update test.inc") tr.Processes.Default.StartBefore(Test.Processes.ts) tr.Processes.Default.StartBefore(nameserver) test_inc_path = ts.Variables.CONFIGDIR + "/test.inc" tr.Processes.Default.Command = ( - f"rm -f {test_inc_path} ; " + - f"echo 'map http://example.four/ http://localhost/ @plugin=generator.so' > {test_inc_path}" -) + f"rm -f {test_inc_path} ; " + f"echo 'map http://example.four/ http://localhost/ @plugin=generator.so' > {test_inc_path}") tr.Processes.Default.ReturnCode = 0 tr.StillRunningAfter = ts @@ -74,12 +66,9 @@ tr.StillRunningAfter = ts tr = Test.AddTestRun("Get response from generator") -tr.Processes.Default.Command = ( - f'test $$(curl --proxy 127.0.0.1:{ts.Variables.port} http://example.four/nocache/5 | wc -c) == 5' -) +tr.Processes.Default.Command = (f'test $$(curl --proxy 127.0.0.1:{ts.Variables.port} http://example.four/nocache/5 | wc -c) == 5') tr.Processes.Default.ReturnCode = 0 tr.StillRunningAfter = ts ts.Disk.manager_log.Content += Testers.ExcludesExpression( - "needs restart", - "Ensure that extra msg reported in issue #7530 does not reappear") + "needs restart", "Ensure that extra msg reported in issue #7530 does not reappear") diff -Nru trafficserver-9.2.3+ds/tests/gold_tests/url/uri.test.py trafficserver-9.2.4+ds/tests/gold_tests/url/uri.test.py --- trafficserver-9.2.3+ds/tests/gold_tests/url/uri.test.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/gold_tests/url/uri.test.py 2024-04-03 15:38:30.000000000 +0000 @@ -28,14 +28,8 @@ 'proxy.config.diags.debug.enabled': 1, 'proxy.config.diags.debug.tags': 'http|cache|url', }) -ts.Disk.remap_config.AddLine( - 'map / http://127.0.0.1:{0}'.format(server.Variables.http_port) -) +ts.Disk.remap_config.AddLine('map / http://127.0.0.1:{0}'.format(server.Variables.http_port)) tr = Test.AddTestRun("Verify correct URI parsing behavior.") tr.Processes.Default.StartBefore(server) tr.Processes.Default.StartBefore(ts) -tr.AddVerifierClientProcess( - "client", - replay_file, - http_ports=[ts.Variables.port], - other_args='--thread-limit 1') +tr.AddVerifierClientProcess("client", replay_file, http_ports=[ts.Variables.port], other_args='--thread-limit 1') diff -Nru trafficserver-9.2.3+ds/tests/tools/plugins/test_log_interface.cc trafficserver-9.2.4+ds/tests/tools/plugins/test_log_interface.cc --- trafficserver-9.2.3+ds/tests/tools/plugins/test_log_interface.cc 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/tools/plugins/test_log_interface.cc 2024-04-03 15:38:30.000000000 +0000 @@ -64,11 +64,11 @@ return 0; default: - return 0; + TSError("[%s] global_handler: unexpected event: %d\n", plugin_name, event); + break; } TSHttpSsnReenable(session, TS_EVENT_HTTP_CONTINUE); - return 0; } diff -Nru trafficserver-9.2.3+ds/tests/tools/tcp_client.py trafficserver-9.2.4+ds/tests/tools/tcp_client.py --- trafficserver-9.2.3+ds/tests/tools/tcp_client.py 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tests/tools/tcp_client.py 2024-04-03 15:38:30.000000000 +0000 @@ -53,13 +53,12 @@ def main(argv): - parser = argparse.ArgumentParser(description=DESCRIPTION, - formatter_class=argparse.RawDescriptionHelpFormatter) + parser = argparse.ArgumentParser(description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('host', help='the target host') parser.add_argument('port', type=int, help='the target port') parser.add_argument('file', help='the file with content to be sent') - parser.add_argument('--delay-after-send', metavar='SECONDS', type=int, - help='after send, delay in seconds before half-close', default=0) + parser.add_argument( + '--delay-after-send', metavar='SECONDS', type=int, help='after send, delay in seconds before half-close', default=0) args = parser.parse_args() data = '' diff -Nru trafficserver-9.2.3+ds/tools/git/pre-commit trafficserver-9.2.4+ds/tools/git/pre-commit --- trafficserver-9.2.3+ds/tools/git/pre-commit 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tools/git/pre-commit 2024-04-03 15:38:30.000000000 +0000 @@ -42,10 +42,10 @@ exit 1 fi -source "$GIT_TOP/tools/autopep8.sh" -if [ ! -d ${AUTOPEP8_VENV} ] +source "$GIT_TOP/tools/yapf.sh" +if [ ! -d ${YAPF_VENV} ] then - echo "Run \"make autopep8\"" + echo "Run \"make yaf\"" exit 1 fi source ${AUTOPEP8_VENV}/bin/activate @@ -53,26 +53,27 @@ # Where to store the patch clang_patch_file=$(mktemp -t clang-format.XXXXXXXXXX) -autopep8_patch_file=$(mktemp -t autopep8.XXXXXXXXXX) -trap "rm -f $clang_patch_file $autopep8_patch_file" 0 1 2 3 5 15 +yapf_patch_file=$(mktemp -t yapf.XXXXXXXXXX) +trap "rm -f $clang_patch_file $yapf_patch_file" 0 1 2 3 5 15 # Loop over all files that are changed, and produce a diff file +source ${YAPF_VENV}/bin/activate +REPO_ROOT=$(cd $(dirname $0) && git rev-parse --show-toplevel) +YAPF_CONFIG=${REPO_ROOT}/.style.yapf git diff-index --cached --diff-filter=ACMR --name-only HEAD | grep -vE "lib/yamlcpp" | while read file; do case "$file" in *.cc | *.c | *.h | *.h.in) ${FORMAT} "$file" | diff -u "$file" - >> "$clang_patch_file" ;; # Keep this list of Python extensions the same with the list of - # extensions searched for in the toosl/autopep8.sh script. + # extensions searched for in the toosl/yapf.sh script. *.py | *.cli.ext | *.test.ext) - autopep8 \ - --ignore-local-config \ - --exclude ${GIT_TOP}/lib/yamlcpp \ - --max-line-length 132 \ - --aggressive \ - --aggressive \ + yapf \ + --style ${YAPF_CONFIG} \ + --parallel \ --diff \ - "$file" >> "$autopep8_patch_file" + "$file" >>"$yapf_patch_file" + ;; esac done @@ -87,19 +88,18 @@ echo fi - -if [ -s "$autopep8_patch_file" ] ; then - echo "The commit is not accepted because autopep8 reports issues with it." +if [ -s "$yapf_patch_file" ]; then + echo "The commit is not accepted because yapf reports issues with it." echo "The easiest way to fix this is to run:" echo - echo " $ make autopep8" + echo " $ make yapf" exit 1 else - echo "This commit complies with the current autopep8 formatting rules." + echo "This commit complies with the current yapf formatting rules." echo fi # Cleanup before exit deactivate -rm -f "$clang_patch_file" "$autopep8_patch_file" +rm -f "$clang_patch_file" "$yapf_patch_file" exit 0 diff -Nru trafficserver-9.2.3+ds/tools/package/trafficserver.spec trafficserver-9.2.4+ds/tools/package/trafficserver.spec --- trafficserver-9.2.3+ds/tools/package/trafficserver.spec 2023-10-09 20:36:24.000000000 +0000 +++ trafficserver-9.2.4+ds/tools/package/trafficserver.spec 2024-04-03 15:38:30.000000000 +0000 @@ -26,7 +26,7 @@ Summary: Apache Traffic Server, a reverse, forward and transparent HTTP proxy cache Name: trafficserver -Version: 9.2.3 +Version: 9.2.4 Release: %{release}%{?dist} License: Apache Software License 2.0 (AL2) Group: System Environment/Daemons diff -Nru trafficserver-9.2.3+ds/tools/yapf.sh trafficserver-9.2.4+ds/tools/yapf.sh --- trafficserver-9.2.3+ds/tools/yapf.sh 1970-01-01 00:00:00.000000000 +0000 +++ trafficserver-9.2.4+ds/tools/yapf.sh 2024-04-03 15:38:30.000000000 +0000 @@ -0,0 +1,109 @@ +#! /usr/bin/env bash +# +# Simple wrapper to run yapf on a directory. +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Update these VERSION variables with the new desired yapf tag when a new +# yapf version is desired. +# See: +# https://github.com/google/yapf/tags +YAPF_VERSION="v0.32.0" +VERSION="yapf 0.32.0" + +function main() { + # check for python3 + python3 - << _END_ +import sys + +if sys.version_info.major < 3 or sys.version_info.minor < 8: + exit(1) +_END_ + + if [ $? = 1 ]; then + echo "Python 3.8 or newer is not installed/enabled." + exit 1 + fi + + set -e # exit on error + + if ! type virtualenv >/dev/null 2>/dev/null + then + pip install -q virtualenv + fi + + REPO_ROOT=$(cd $(dirname $0) && git rev-parse --show-toplevel) + YAPF_VENV=${YAPF_VENV:-${REPO_ROOT}/.git/fmt/yapf_${YAPF_VERSION}_venv} + if [ ! -e ${YAPF_VENV} ] + then + python3 -m virtualenv ${YAPF_VENV} + fi + source ${YAPF_VENV}/bin/activate + + pip install -q --upgrade pip + pip install -q "yapf==${YAPF_VERSION}" + + ver=$(yapf --version 2>&1) + if [ "$ver" != "$VERSION" ] + then + echo "Wrong version of yapf!" + echo "Expected: \"${VERSION}\", got: \"${ver}\"" + exit 1 + fi + + DIR=${@:-.} + + # Only run yapf on tracked files. This saves time and possibly avoids + # formatting files the user doesn't want formatted. + tmp_dir=$(mktemp -d -t tracked-git-files.XXXXXXXXXX) + files=${tmp_dir}/git_files.txt + files_filtered=${tmp_dir}/git_files_filtered.txt + git ls-tree -r HEAD --name-only ${DIR} | grep -vE "lib/yamlcpp" > ${files} + # Add to the above any newly added staged files. + git diff --cached --name-only --diff-filter=A >> ${files} + # Keep this list of Python extensions the same with the list of + # extensions searched for in the tools/git/pre-commit hook. + grep -E '\.py$|\.cli.ext$|\.test.ext$' ${files} > ${files_filtered} + # Prepend the filenames with "./" to make the modified file output consistent + # with the clang-format target output. + sed -i'.bak' 's:^:\./:' ${files_filtered} + rm -f ${files_filtered}.bak + + # Efficiently retrieving modification timestamps in a platform + # independent way is challenging. We use find's -newer argument, which + # seems to be broadly supported. The following file is created and has a + # timestamp just before running yapf. Any file with a timestamp + # after this we assume was modified by yapf. + start_time_file=${tmp_dir}/format_start.$$ + touch ${start_time_file} + YAPF_CONFIG=${REPO_ROOT}/.style.yapf + yapf \ + --style ${YAPF_CONFIG} \ + --parallel \ + --in-place \ + $(cat ${files_filtered}) + find $(cat ${files_filtered}) -newer ${start_time_file} + + rm -rf ${tmp_dir} + deactivate +} + +if [[ "$(basename -- "$0")" == 'yapf.sh' ]]; then + main "$@" +else + YAPF_VENV=${YAPF_VENV:-$(git rev-parse --show-toplevel)/.git/fmt/yapf_${YAPF_VERSION}_venv} +fi