Version in base suite: 9.0.43-2~deb11u6 Base version: tomcat9_9.0.43-2~deb11u6 Target version: tomcat9_9.0.43-2~deb11u7 Base file: /srv/ftp-master.debian.org/ftp/pool/main/t/tomcat9/tomcat9_9.0.43-2~deb11u6.dsc Target file: /srv/ftp-master.debian.org/policy/pool/main/t/tomcat9/tomcat9_9.0.43-2~deb11u7.dsc changelog | 32 ++++ patches/CVE-2023-24998.patch | 203 +++++++++++++++++++++++++++++ patches/CVE-2023-28709.patch | 21 +++ patches/CVE-2023-41080.patch | 17 ++ patches/CVE-2023-42795.patch | 218 +++++++++++++++++++++++++++++++ patches/CVE-2023-44487.patch | 294 +++++++++++++++++++++++++++++++++++++++++++ patches/CVE-2023-45648.patch | 82 +++++++++++ patches/series | 6 8 files changed, 873 insertions(+) diff -Nru tomcat9-9.0.43/debian/changelog tomcat9-9.0.43/debian/changelog --- tomcat9-9.0.43/debian/changelog 2023-04-05 15:47:16.000000000 +0000 +++ tomcat9-9.0.43/debian/changelog 2023-10-10 16:20:19.000000000 +0000 @@ -1,3 +1,35 @@ +tomcat9 (9.0.43-2~deb11u7) bullseye-security; urgency=high + + * Fix CVE-2023-45648: Request smuggling. Tomcat did not correctly parse HTTP + trailer headers. A specially crafted, invalid trailer header could cause + Tomcat to treat a single request as multiple requests leading to the + possibility of request smuggling when behind a reverse proxy. + * Fix CVE-2023-44487: DoS caused by HTTP/2 frame overhead (Rapid Reset Attack) + * Fix CVE-2023-42795: Information Disclosure. When recycling various internal + objects, including the request and the response, prior to re-use by the next + request/response, an error could cause Tomcat to skip some parts of the + recycling process leading to information leaking from the current + request/response to the next. + * Fix CVE-2023-41080: Open redirect. If the ROOT (default) web application + is configured to use FORM authentication then it is possible that a + specially crafted URL could be used to trigger a redirect to an URL of + the attackers choice. + * Fix CVE-2023-28709: Denial of Service. If non-default HTTP connector + settings were used such that the maxParameterCount could be reached using + query string parameters and a request was submitted that supplied exactly + maxParameterCount parameters in the query string, the limit for uploaded + request parts could be bypassed with the potential for a denial of service + to occur. + * Fix CVE-2023-24998: Denial of service. Tomcat uses a packaged renamed copy + of Apache Commons FileUpload to provide the file upload functionality + defined in the Jakarta Servlet specification. Apache Tomcat was, therefore, + also vulnerable to the Commons FileUpload vulnerability CVE-2023-24998 as + there was no limit to the number of request parts processed. This resulted + in the possibility of an attacker triggering a DoS with a malicious upload + or series of uploads. + + -- Emmanuel Bourg Tue, 10 Oct 2023 18:20:19 +0200 + tomcat9 (9.0.43-2~deb11u6) bullseye-security; urgency=high * Team upload. diff -Nru tomcat9-9.0.43/debian/patches/CVE-2023-24998.patch tomcat9-9.0.43/debian/patches/CVE-2023-24998.patch --- tomcat9-9.0.43/debian/patches/CVE-2023-24998.patch 1970-01-01 00:00:00.000000000 +0000 +++ tomcat9-9.0.43/debian/patches/CVE-2023-24998.patch 2023-10-10 15:45:20.000000000 +0000 @@ -0,0 +1,203 @@ +Description: Update packaged renamed fork of Commons File Upload +Origin: upstream, https://github.com/apache/tomcat/commit/cf77cc545de0488fb89e24294151504a7432df74 +--- a/java/org/apache/catalina/connector/Request.java ++++ b/java/org/apache/catalina/connector/Request.java +@@ -2862,8 +2862,9 @@ + } + } + ++ int maxParameterCount = getConnector().getMaxParameterCount(); + Parameters parameters = coyoteRequest.getParameters(); +- parameters.setLimit(getConnector().getMaxParameterCount()); ++ parameters.setLimit(maxParameterCount); + + boolean success = false; + try { +@@ -2915,6 +2916,13 @@ + upload.setFileItemFactory(factory); + upload.setFileSizeMax(mce.getMaxFileSize()); + upload.setSizeMax(mce.getMaxRequestSize()); ++ if (maxParameterCount > -1) { ++ // There is a limit. The limit for parts needs to be reduced by ++ // the number of parameters we have already parsed. ++ // Must be under the limit else parsing parameters would have ++ // triggered an exception. ++ upload.setFileCountMax(maxParameterCount - parameters.size()); ++ } + + parts = new ArrayList<>(); + try { +--- a/java/org/apache/tomcat/util/http/Parameters.java ++++ b/java/org/apache/tomcat/util/http/Parameters.java +@@ -125,6 +125,11 @@ + } + + ++ public int size() { ++ return parameterCount; ++ } ++ ++ + public void recycle() { + parameterCount = 0; + paramHashValues.clear(); +--- a/java/org/apache/tomcat/util/http/fileupload/FileUploadBase.java ++++ b/java/org/apache/tomcat/util/http/fileupload/FileUploadBase.java +@@ -25,6 +25,7 @@ + import java.util.Map; + import java.util.Objects; + ++import org.apache.tomcat.util.http.fileupload.impl.FileCountLimitExceededException; + import org.apache.tomcat.util.http.fileupload.impl.FileItemIteratorImpl; + import org.apache.tomcat.util.http.fileupload.impl.FileItemStreamImpl; + import org.apache.tomcat.util.http.fileupload.impl.FileUploadIOException; +@@ -133,6 +134,12 @@ + private long fileSizeMax = -1; + + /** ++ * The maximum permitted number of files that may be uploaded in a single ++ * request. A value of -1 indicates no maximum. ++ */ ++ private long fileCountMax = -1; ++ ++ /** + * The content encoding to use when reading part headers. + */ + private String headerEncoding; +@@ -209,6 +216,24 @@ + } + + /** ++ * Returns the maximum number of files allowed in a single request. ++ * ++ * @return The maximum number of files allowed in a single request. ++ */ ++ public long getFileCountMax() { ++ return fileCountMax; ++ } ++ ++ /** ++ * Sets the maximum number of files allowed per request/ ++ * ++ * @param fileCountMax The new limit. {@code -1} means no limit. ++ */ ++ public void setFileCountMax(long fileCountMax) { ++ this.fileCountMax = fileCountMax; ++ } ++ ++ /** + * Retrieves the character encoding used when reading the headers of an + * individual part. When not specified, or {@code null}, the request + * encoding is used. If that is also not specified, or {@code null}, +@@ -281,6 +306,10 @@ + final FileItemFactory fileItemFactory = Objects.requireNonNull(getFileItemFactory(), "No FileItemFactory has been set."); + final byte[] buffer = new byte[Streams.DEFAULT_BUFFER_SIZE]; + while (iter.hasNext()) { ++ if (items.size() == fileCountMax) { ++ // The next item will exceed the limit. ++ throw new FileCountLimitExceededException(ATTACHMENT, getFileCountMax()); ++ } + final FileItemStream item = iter.next(); + // Don't use getName() here to prevent an InvalidFileNameException. + final String fileName = ((FileItemStreamImpl) item).getName(); +--- /dev/null ++++ b/java/org/apache/tomcat/util/http/fileupload/impl/FileCountLimitExceededException.java +@@ -0,0 +1,50 @@ ++/* ++ * Licensed to the Apache Software Foundation (ASF) under one or more ++ * contributor license agreements. See the NOTICE file distributed with ++ * this work for additional information regarding copyright ownership. ++ * The ASF licenses this file to You under the Apache License, Version 2.0 ++ * (the "License"); you may not use this file except in compliance with ++ * the License. You may obtain a copy of the License at ++ * ++ * http://www.apache.org/licenses/LICENSE-2.0 ++ * ++ * Unless required by applicable law or agreed to in writing, software ++ * distributed under the License is distributed on an "AS IS" BASIS, ++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ * See the License for the specific language governing permissions and ++ * limitations under the License. ++ */ ++package org.apache.tomcat.util.http.fileupload.impl; ++ ++import org.apache.tomcat.util.http.fileupload.FileUploadException; ++ ++/** ++ * This exception is thrown if a request contains more files than the specified ++ * limit. ++ */ ++public class FileCountLimitExceededException extends FileUploadException { ++ ++ private static final long serialVersionUID = 2408766352570556046L; ++ ++ private final long limit; ++ ++ /** ++ * Creates a new instance. ++ * ++ * @param message The detail message ++ * @param limit The limit that was exceeded ++ */ ++ public FileCountLimitExceededException(final String message, final long limit) { ++ super(message); ++ this.limit = limit; ++ } ++ ++ /** ++ * Retrieves the limit that was exceeded. ++ * ++ * @return The limit that was exceeded by the request ++ */ ++ public long getLimit() { ++ return limit; ++ } ++} +--- a/webapps/docs/config/ajp.xml ++++ b/webapps/docs/config/ajp.xml +@@ -136,12 +136,15 @@ + + + +-

The maximum number of parameter and value pairs (GET plus POST) which +- will be automatically parsed by the container. Parameter and value pairs +- beyond this limit will be ignored. A value of less than 0 means no limit. +- If not specified, a default of 10000 is used. Note that +- FailedRequestFilter filter can be +- used to reject requests that hit the limit.

++

The maximum total number of request parameters (including uploaded ++ files) obtained from the query string and, for POST requests, the request ++ body if the content type is ++ application/x-www-form-urlencoded or ++ multipart/form-data. Request parameters beyond this limit ++ will be ignored. A value of less than 0 means no limit. If not specified, ++ a default of 10000 is used. Note that FailedRequestFilter ++ filter can be used to reject requests that ++ exceed the limit.

+
+ + +--- a/webapps/docs/config/http.xml ++++ b/webapps/docs/config/http.xml +@@ -153,12 +153,15 @@ + + + +-

The maximum number of parameter and value pairs (GET plus POST) which +- will be automatically parsed by the container. Parameter and value pairs +- beyond this limit will be ignored. A value of less than 0 means no limit. +- If not specified, a default of 10000 is used. Note that +- FailedRequestFilter filter can be +- used to reject requests that hit the limit.

++

The maximum total number of request parameters (including uploaded ++ files) obtained from the query string and, for POST requests, the request ++ body if the content type is ++ application/x-www-form-urlencoded or ++ multipart/form-data. Request parameters beyond this limit ++ will be ignored. A value of less than 0 means no limit. If not specified, ++ a default of 10000 is used. Note that FailedRequestFilter ++ filter can be used to reject requests that ++ exceed the limit.

+
+ + diff -Nru tomcat9-9.0.43/debian/patches/CVE-2023-28709.patch tomcat9-9.0.43/debian/patches/CVE-2023-28709.patch --- tomcat9-9.0.43/debian/patches/CVE-2023-28709.patch 1970-01-01 00:00:00.000000000 +0000 +++ tomcat9-9.0.43/debian/patches/CVE-2023-28709.patch 2023-10-10 15:56:44.000000000 +0000 @@ -0,0 +1,21 @@ +Description: Fix parameter counting logic +Origin: upstream, https://github.com/apache/tomcat/commit/fbd81421629afe8b8a3922d59020cde81caea861 +--- a/java/org/apache/tomcat/util/http/Parameters.java ++++ b/java/org/apache/tomcat/util/http/Parameters.java +@@ -206,14 +206,14 @@ + return; + } + +- parameterCount ++; +- if (limit > -1 && parameterCount > limit) { ++ if (limit > -1 && parameterCount >= limit) { + // Processing this parameter will push us over the limit. ISE is + // what Request.parseParts() uses for requests that are too big + setParseFailedReason(FailReason.TOO_MANY_PARAMETERS); + throw new IllegalStateException(sm.getString( + "parameters.maxCountFail", Integer.valueOf(limit))); + } ++ parameterCount++; + + ArrayList values = paramHashValues.get(key); + if (values == null) { diff -Nru tomcat9-9.0.43/debian/patches/CVE-2023-41080.patch tomcat9-9.0.43/debian/patches/CVE-2023-41080.patch --- tomcat9-9.0.43/debian/patches/CVE-2023-41080.patch 1970-01-01 00:00:00.000000000 +0000 +++ tomcat9-9.0.43/debian/patches/CVE-2023-41080.patch 2023-10-10 15:11:15.000000000 +0000 @@ -0,0 +1,17 @@ +Description: Avoid protocol relative redirects +Origin: upstream, https://github.com/apache/tomcat/commit/77c0ce2d169efa248b64b992e547aad549ec906b.patch +--- a/java/org/apache/catalina/authenticator/FormAuthenticator.java ++++ b/java/org/apache/catalina/authenticator/FormAuthenticator.java +@@ -721,6 +721,12 @@ + sb.append('?'); + sb.append(saved.getQueryString()); + } ++ ++ // Avoid protocol relative redirects ++ while (sb.length() > 1 && sb.charAt(1) == '/') { ++ sb.deleteCharAt(0); ++ } ++ + return sb.toString(); + } + } diff -Nru tomcat9-9.0.43/debian/patches/CVE-2023-42795.patch tomcat9-9.0.43/debian/patches/CVE-2023-42795.patch --- tomcat9-9.0.43/debian/patches/CVE-2023-42795.patch 1970-01-01 00:00:00.000000000 +0000 +++ tomcat9-9.0.43/debian/patches/CVE-2023-42795.patch 2023-10-10 16:02:07.000000000 +0000 @@ -0,0 +1,218 @@ +Description: Improve handling of failures during recycle() methods +Origin: upstream, https://github.com/apache/tomcat/commit/44d05d75d696ca10ce251e4e370511e38f20ae75 +--- a/java/org/apache/catalina/connector/LocalStrings.properties ++++ b/java/org/apache/catalina/connector/LocalStrings.properties +@@ -45,6 +45,7 @@ + coyoteRequest.authenticate.ise=Cannot call authenticate() after the response has been committed + coyoteRequest.changeSessionId=Cannot change session ID. There is no session associated with this request. + coyoteRequest.chunkedPostTooLarge=Parameters were not parsed because the size of the posted data was too big. Because this request was a chunked request, it could not be processed further. Use the maxPostSize attribute of the connector to resolve this if the application should accept large POSTs. ++coyoteRequest.deletePartFailed=Failed to deleted temporary file used for part [{0}] + coyoteRequest.filterAsyncSupportUnknown=Unable to determine if any filters do not support async processing + coyoteRequest.getContextPath.ise=Unable to find match between the canonical context path [{0}] and the URI presented by the user agent [{1}] + coyoteRequest.getInputStream.ise=getReader() has already been called for this request +--- a/java/org/apache/catalina/connector/Request.java ++++ b/java/org/apache/catalina/connector/Request.java +@@ -482,8 +482,9 @@ + for (Part part: parts) { + try { + part.delete(); +- } catch (IOException ignored) { +- // ApplicationPart.delete() never throws an IOEx ++ } catch (Throwable t) { ++ ExceptionUtils.handleThrowable(t); ++ log.warn(sm.getString("coyoteRequest.deletePartFailed", part.getName()), t); + } + } + parts = null; +@@ -536,8 +537,8 @@ + asyncSupported = null; + if (asyncContext!=null) { + asyncContext.recycle(); ++ asyncContext = null; + } +- asyncContext = null; + } + + +--- a/java/org/apache/catalina/core/ApplicationHttpRequest.java ++++ b/java/org/apache/catalina/core/ApplicationHttpRequest.java +@@ -50,6 +50,7 @@ + import org.apache.catalina.util.ParameterMap; + import org.apache.catalina.util.RequestUtil; + import org.apache.catalina.util.URLEncoder; ++import org.apache.tomcat.util.ExceptionUtils; + import org.apache.tomcat.util.buf.B2CConverter; + import org.apache.tomcat.util.buf.MessageBytes; + import org.apache.tomcat.util.http.Parameters; +@@ -645,7 +646,12 @@ + */ + public void recycle() { + if (session != null) { +- session.endAccess(); ++ try { ++ session.endAccess(); ++ } catch (Throwable t) { ++ ExceptionUtils.handleThrowable(t); ++ context.getLogger().warn(sm.getString("applicationHttpRequest.sessionEndAccessFail"), t); ++ } + } + } + +--- a/java/org/apache/catalina/core/LocalStrings.properties ++++ b/java/org/apache/catalina/core/LocalStrings.properties +@@ -59,6 +59,7 @@ + applicationFilterRegistration.nullInitParams=Unable to set initialisation parameters for filter due to null name and/or value. Name [{0}], Value [{1}] + + applicationHttpRequest.fragmentInDispatchPath=The fragment in dispatch path [{0}] has been removed ++applicationHttpRequest.sessionEndAccessFail=Exception triggered ending access to session while recycling request + + applicationPushBuilder.methodInvalid=The HTTP method for a push request must be both cacheable and safe but [{0}] is not + applicationPushBuilder.methodNotToken=HTTP methods must be tokens but [{0}] contains a non-token character +--- a/java/org/apache/catalina/core/LocalStrings_cs.properties ++++ b/java/org/apache/catalina/core/LocalStrings_cs.properties +@@ -24,6 +24,8 @@ + + applicationFilterRegistration.nullInitParams=Není možné nastavit inicializační parametry pro filtr kvůli hodnotě null ve jménu či hodnotě. Jméno [{0}], Hodnota [{1}] + ++applicationHttpRequest.sessionEndAccessFail=Výjimka vyvolala ukončení přístupu k session během recykllování dotazu ++ + aprListener.initializingFIPS=Inicializace FIPS módu... + + containerBase.backgroundProcess.cluster=Výjimka při zpracování procesu na pozadí v clusteru [{0}] +--- a/java/org/apache/catalina/core/LocalStrings_es.properties ++++ b/java/org/apache/catalina/core/LocalStrings_es.properties +@@ -50,6 +50,8 @@ + applicationFilterRegistration.nullInitParam=No puedo poner el parámetro de inicialización para el filtro debido a un nombre nulo y/o valor. Nombre [{0}], Valor [{1}] + applicationFilterRegistration.nullInitParams=No puedo poner los parámetros de inicialización para el filtro debido a un nombre nulo y/o valor. Nombre [{0}], Valor [{1}] + ++applicationHttpRequest.sessionEndAccessFail=Excepción disparada acabando acceso a sesión mientras se reciclaba el requerimiento ++ + applicationServletRegistration.setServletSecurity.iae=Se ha especificado restricción Null para el servlet [{0}] desplegado en el contexto con el nombre [{1}] + applicationServletRegistration.setServletSecurity.ise=No se pueden añadir restricciones de seguridad al servlet [{0}] desplegado en el contexto con el nombre [{1}] ya que el contexto ya ha sido inicializado. + +--- a/java/org/apache/catalina/core/LocalStrings_fr.properties ++++ b/java/org/apache/catalina/core/LocalStrings_fr.properties +@@ -59,6 +59,7 @@ + applicationFilterRegistration.nullInitParams=Impossible de fixer les paramètres d''initialisation du filtre, à cause d''un nom ou d''une valeur nulle, nom [{0}], valeur [{1}] + + applicationHttpRequest.fragmentInDispatchPath=Le fragment dans le chemin de dispatch [{0}] a été enlevé ++applicationHttpRequest.sessionEndAccessFail=Exception lancée durant l'arrêt de l'accès à la session durant le recyclage de la requête + + applicationPushBuilder.methodInvalid=La méthode HTTP pour une requête push doit être à la fois être sans danger et pouvoir être mise en cache, mais [{0}] ne correspond pas + applicationPushBuilder.methodNotToken=Les méthodes HTTP doivent être des "token", mais [{0}] contient un caractère invalide dans un token. +--- a/java/org/apache/catalina/core/LocalStrings_ja.properties ++++ b/java/org/apache/catalina/core/LocalStrings_ja.properties +@@ -59,6 +59,7 @@ + applicationFilterRegistration.nullInitParams=キー [{0}] または値 [{1}] のいずれかが null のためフィルターの初期化パラメータを設定できませんでした。 + + applicationHttpRequest.fragmentInDispatchPath=ディスパッチパス [{0}] 中のフラグメントは除去されました ++applicationHttpRequest.sessionEndAccessFail=リクエストの再利用中に行ったセッションへのアクセス終了処理で例外が送出されました。 + + applicationPushBuilder.methodInvalid=プッシュリクエストの HTTP メソッドはキャッシュ可能、かつ、安全でなければなりません。[{0}] は指定できません。 + applicationPushBuilder.methodNotToken=HTTP メソッド [{0}] にトークンとして利用できない文字が含まれています。 +--- a/java/org/apache/catalina/core/LocalStrings_ko.properties ++++ b/java/org/apache/catalina/core/LocalStrings_ko.properties +@@ -59,6 +59,7 @@ + applicationFilterRegistration.nullInitParams=널인 이름 또는 값 때문에, 필터의 초기화 파라미터를 설정할 수 없습니다. 이름: [{0}], 값: [{1}] + + applicationHttpRequest.fragmentInDispatchPath=디스패치 경로 [{0}](으)로부터 URI fragment를 제거했습니다. ++applicationHttpRequest.sessionEndAccessFail=요청을 참조 해제하는 과정에서, 세션에 대한 접근을 종료시키려 개시하는 중 예외 발생 + + applicationPushBuilder.methodInvalid=PUSH 요청을 위한 HTTP 메소드는 반드시 캐시 가능하고 안전해야 하는데, [{0}]은(는) 그렇지 않습니다. + applicationPushBuilder.methodNotToken=HTTP 메소드들은 토큰들이어야 하지만, [{0}]은(는) 토큰이 아닌 문자를 포함하고 있습니다. +--- a/java/org/apache/catalina/core/LocalStrings_zh_CN.properties ++++ b/java/org/apache/catalina/core/LocalStrings_zh_CN.properties +@@ -60,6 +60,7 @@ + applicationFilterRegistration.nullInitParams=由于name和(或)value为null,无法为过滤器设置初始化参数。name为 [{0}],value为 [{1}] + + applicationHttpRequest.fragmentInDispatchPath=调度路径[{0}]中的片段已被删除 ++applicationHttpRequest.sessionEndAccessFail=在回收请求时,异常触发了对会话的结束访问。 + + applicationPushBuilder.methodInvalid=推送请求的HTTP方法必须既可缓存又安全,但是[{0}]不是 + applicationPushBuilder.methodNotToken=HTTP方法必须是令牌(token),但 [{0}] 包含非令牌字符 +--- a/java/org/apache/tomcat/util/buf/B2CConverter.java ++++ b/java/org/apache/tomcat/util/buf/B2CConverter.java +@@ -27,6 +27,9 @@ + import java.nio.charset.StandardCharsets; + import java.util.Locale; + ++import org.apache.juli.logging.Log; ++import org.apache.juli.logging.LogFactory; ++import org.apache.tomcat.util.ExceptionUtils; + import org.apache.tomcat.util.res.StringManager; + + /** +@@ -34,6 +37,7 @@ + */ + public class B2CConverter { + ++ private static final Log log = LogFactory.getLog(B2CConverter.class); + private static final StringManager sm = StringManager.getManager(B2CConverter.class); + + private static final CharsetCache charsetCache = new CharsetCache(); +@@ -106,7 +110,12 @@ + * Reset the decoder state. + */ + public void recycle() { +- decoder.reset(); ++ try { ++ decoder.reset(); ++ } catch (Throwable t) { ++ ExceptionUtils.handleThrowable(t); ++ log.warn(sm.getString("b2cConverter.decoderResetFail", decoder.charset()), t); ++ } + leftovers.position(0); + } + +--- a/java/org/apache/tomcat/util/buf/C2BConverter.java ++++ b/java/org/apache/tomcat/util/buf/C2BConverter.java +@@ -24,11 +24,19 @@ + import java.nio.charset.CoderResult; + import java.nio.charset.CodingErrorAction; + ++import org.apache.juli.logging.Log; ++import org.apache.juli.logging.LogFactory; ++import org.apache.tomcat.util.ExceptionUtils; ++import org.apache.tomcat.util.res.StringManager; ++ + /** + * NIO based character encoder. + */ + public final class C2BConverter { + ++ private static final Log log = LogFactory.getLog(C2BConverter.class); ++ private static final StringManager sm = StringManager.getManager(C2BConverter.class); ++ + private final CharsetEncoder encoder; + private ByteBuffer bb = null; + private CharBuffer cb = null; +@@ -50,7 +58,12 @@ + * Reset the encoder state. + */ + public void recycle() { +- encoder.reset(); ++ try { ++ encoder.reset(); ++ } catch (Throwable t) { ++ ExceptionUtils.handleThrowable(t); ++ log.warn(sm.getString("c2bConverter.decoderResetFail", encoder.charset()), t); ++ } + leftovers.position(0); + } + +--- a/java/org/apache/tomcat/util/buf/LocalStrings.properties ++++ b/java/org/apache/tomcat/util/buf/LocalStrings.properties +@@ -16,10 +16,13 @@ + asn1Parser.lengthInvalid=Invalid length [{0}] bytes reported when the input data length is [{1}] bytes + asn1Parser.tagMismatch=Expected to find value [{0}] but found value [{1}] + ++b2cConverter.decoderResetFail=Failed to reset instance of decoder for character set [{0}] + b2cConverter.unknownEncoding=The character encoding [{0}] is not supported + + byteBufferUtils.cleaner=Cannot use direct ByteBuffer cleaner, memory leaking may occur + ++c2bConverter.encoderResetFail=Failed to reset instance of encoder for character set [{0}] ++ + chunk.overflow=Buffer overflow and no sink is set, limit [{0}] and buffer length [{1}] + + encodedSolidusHandling.invalid=The value [{0}] is not recognised diff -Nru tomcat9-9.0.43/debian/patches/CVE-2023-44487.patch tomcat9-9.0.43/debian/patches/CVE-2023-44487.patch --- tomcat9-9.0.43/debian/patches/CVE-2023-44487.patch 1970-01-01 00:00:00.000000000 +0000 +++ tomcat9-9.0.43/debian/patches/CVE-2023-44487.patch 2023-10-10 15:06:59.000000000 +0000 @@ -0,0 +1,294 @@ +Description: Improvements to HTTP/2 overhead protection. +Origin: backport, https://github.com/apache/tomcat/commit/30cae120a61f075b1712f2e8da4daa23f1135c83 + https://github.com/apache/tomcat/commit/94480483910f2d19561e88fb194d7b415bb527da + https://github.com/apache/tomcat/commit/3f0efca913b09fa3a3d9c246cc29045ac8a2befe + https://github.com/apache/tomcat/commit/6d1a9fd6642387969e4410b9989c85856b74917a +--- a/java/org/apache/coyote/http2/Http2Protocol.java ++++ b/java/org/apache/coyote/http2/Http2Protocol.java +@@ -64,8 +64,14 @@ + // Maximum amount of streams which can be concurrently executed over + // a single connection + static final int DEFAULT_MAX_CONCURRENT_STREAM_EXECUTION = 20; +- +- static final int DEFAULT_OVERHEAD_COUNT_FACTOR = 1; ++ // Default factor used when adjusting overhead count for overhead frames ++ static final int DEFAULT_OVERHEAD_COUNT_FACTOR = 10; ++ // Default factor used when adjusting overhead count for reset frames ++ static final int DEFAULT_OVERHEAD_RESET_FACTOR = 50; ++ // Not currently configurable. This makes the practical limit for ++ // overheadCountFactor to be ~20. The exact limit will vary with traffic ++ // patterns. ++ static final int DEFAULT_OVERHEAD_REDUCTION_FACTOR = -20; + static final int DEFAULT_OVERHEAD_CONTINUATION_THRESHOLD = 1024; + static final int DEFAULT_OVERHEAD_DATA_THRESHOLD = 1024; + static final int DEFAULT_OVERHEAD_WINDOW_UPDATE_THRESHOLD = 1024; +@@ -96,6 +102,7 @@ + private int maxTrailerCount = Constants.DEFAULT_MAX_TRAILER_COUNT; + private int maxTrailerSize = Constants.DEFAULT_MAX_TRAILER_SIZE; + private int overheadCountFactor = DEFAULT_OVERHEAD_COUNT_FACTOR; ++ private int overheadResetFactor = DEFAULT_OVERHEAD_RESET_FACTOR; + private int overheadContinuationThreshold = DEFAULT_OVERHEAD_CONTINUATION_THRESHOLD; + private int overheadDataThreshold = DEFAULT_OVERHEAD_DATA_THRESHOLD; + private int overheadWindowUpdateThreshold = DEFAULT_OVERHEAD_WINDOW_UPDATE_THRESHOLD; +@@ -343,6 +350,20 @@ + } + + ++ public int getOverheadResetFactor() { ++ return overheadResetFactor; ++ } ++ ++ ++ public void setOverheadResetFactor(int overheadResetFactor) { ++ if (overheadResetFactor < 0) { ++ this.overheadResetFactor = 0; ++ } else { ++ this.overheadResetFactor = overheadResetFactor; ++ } ++ } ++ ++ + public int getOverheadContinuationThreshold() { + return overheadContinuationThreshold; + } +--- a/java/org/apache/coyote/http2/Http2UpgradeHandler.java ++++ b/java/org/apache/coyote/http2/Http2UpgradeHandler.java +@@ -349,7 +349,7 @@ + stream.close(se); + } + } finally { +- if (overheadCount.get() > 0) { ++ if (isOverheadLimitExceeded()) { + throw new ConnectionException( + sm.getString("upgradeHandler.tooMuchOverhead", connectionId), + Http2Error.ENHANCE_YOUR_CALM); +@@ -750,7 +750,7 @@ + Integer.toString(len))); + } + +- reduceOverheadCount(); ++ reduceOverheadCount(FrameType.DATA); + + // Need to check this now since sending end of stream will change this. + boolean writeable = stream.canWrite(); +@@ -1362,13 +1362,54 @@ + } + + +- private void reduceOverheadCount() { +- overheadCount.decrementAndGet(); ++ private void reduceOverheadCount(FrameType frameType) { ++ // A non-overhead frame reduces the overhead count by ++ // Http2Protocol.DEFAULT_OVERHEAD_REDUCTION_FACTOR. A simple browser ++ // request is likely to have one non-overhead frame (HEADERS) and one ++ // overhead frame (REPRIORITISE). With the default settings the overhead ++ // count will remain unchanged for each simple request. ++ // Requests and responses with bodies will create additional ++ // non-overhead frames, further reducing the overhead count. ++ updateOverheadCount(frameType, Http2Protocol.DEFAULT_OVERHEAD_REDUCTION_FACTOR); + } + + +- private void increaseOverheadCount() { +- overheadCount.addAndGet(getProtocol().getOverheadCountFactor()); ++ private void increaseOverheadCount(FrameType frameType) { ++ // An overhead frame increases the overhead count by ++ // overheadCountFactor. By default, this means an overhead frame ++ // increases the overhead count by 1. A simple browser request is likely ++ // to have one non-overhead frame (HEADERS) and one overhead frame ++ // (REPRIORITISE). With the default settings the overhead count will ++ // remain unchanged for each simple request. ++ updateOverheadCount(frameType, getProtocol().getOverheadCountFactor()); ++ } ++ ++ ++ private void increaseOverheadCount(FrameType frameType, int increment) { ++ // Overhead frames that indicate inefficient (and potentially malicious) ++ // use of small frames trigger an increase that is inversely ++ // proportional to size. The default threshold for all three potential ++ // areas for abuse (HEADERS, DATA, WINDOW_UPDATE) is 1024 bytes. Frames ++ // with sizes smaller than this will trigger an increase of ++ // threshold/size. ++ // DATA and WINDOW_UPDATE take an average over the last two non-final ++ // frames to allow for client buffering schemes that can result in some ++ // small DATA payloads. ++ updateOverheadCount(frameType, increment); ++ } ++ ++ ++ private void updateOverheadCount(FrameType frameType, int increment) { ++ long newOverheadCount = overheadCount.addAndGet(increment); ++ if (log.isDebugEnabled()) { ++ log.debug(sm.getString("upgradeHandler.overheadChange", ++ connectionId, getIdAsString(), frameType.name(), Long.valueOf(newOverheadCount))); ++ } ++ } ++ ++ ++ boolean isOverheadLimitExceeded() { ++ return overheadCount.get() > 0; + } + + +@@ -1427,7 +1468,7 @@ + @Override + public ByteBuffer startRequestBodyFrame(int streamId, int payloadSize, boolean endOfStream) throws Http2Exception { + // DATA frames reduce the overhead count ... +- reduceOverheadCount(); ++ reduceOverheadCount(FrameType.DATA); + + // .. but lots of small payloads are inefficient so that will increase + // the overhead count unless it is the final DATA frame where small +@@ -1446,7 +1487,7 @@ + average = 1; + } + if (average < overheadThreshold) { +- overheadCount.addAndGet(overheadThreshold / average); ++ increaseOverheadCount(FrameType.DATA, overheadThreshold / average); + } + } + +@@ -1521,7 +1562,7 @@ + log.debug(sm.getString("upgradeHandler.noNewStreams", + connectionId, Integer.toString(streamId))); + } +- reduceOverheadCount(); ++ reduceOverheadCount(FrameType.HEADERS); + // Stateless so a static can be used to save on GC + return HEADER_SINK; + } +@@ -1549,7 +1590,7 @@ + getConnectionId(), Integer.valueOf(streamId)), Http2Error.PROTOCOL_ERROR); + } + +- increaseOverheadCount(); ++ increaseOverheadCount(FrameType.PRIORITY); + + AbstractNonZeroStream abstractNonZeroStream = getStreamMayBeClosed(streamId, false); + if (abstractNonZeroStream == null) { +@@ -1575,9 +1616,9 @@ + if (payloadSize < overheadThreshold) { + if (payloadSize == 0) { + // Avoid division by zero +- overheadCount.addAndGet(overheadThreshold); ++ increaseOverheadCount(FrameType.HEADERS, overheadThreshold); + } else { +- overheadCount.addAndGet(overheadThreshold / payloadSize); ++ increaseOverheadCount(FrameType.HEADERS, overheadThreshold / payloadSize); + } + } + } +@@ -1596,13 +1637,13 @@ + if (localSettings.getMaxConcurrentStreams() < activeRemoteStreamCount.incrementAndGet()) { + setConnectionTimeoutForStreamCount(activeRemoteStreamCount.decrementAndGet()); + // Ignoring maxConcurrentStreams increases the overhead count +- increaseOverheadCount(); ++ increaseOverheadCount(FrameType.HEADERS); + throw new StreamException(sm.getString("upgradeHandler.tooManyRemoteStreams", + Long.toString(localSettings.getMaxConcurrentStreams())), + Http2Error.REFUSED_STREAM, streamId); + } + // Valid new stream reduces the overhead count +- reduceOverheadCount(); ++ reduceOverheadCount(FrameType.HEADERS); + + processStreamOnContainerThread(stream); + } +@@ -1624,6 +1665,7 @@ + log.debug(sm.getString("upgradeHandler.reset.receive", getConnectionId(), Integer.toString(streamId), + Long.toString(errorCode))); + } ++ increaseOverheadCount(FrameType.RST); + AbstractNonZeroStream abstractNonZeroStream = getStreamMayBeClosed(streamId, true); + abstractNonZeroStream.checkState(FrameType.RST); + if (abstractNonZeroStream instanceof Stream) { +@@ -1640,7 +1682,7 @@ + @Override + public void setting(Setting setting, long value) throws ConnectionException { + +- increaseOverheadCount(); ++ increaseOverheadCount(FrameType.SETTINGS); + + // Possible with empty settings frame + if (setting == null) { +@@ -1689,7 +1731,7 @@ + @Override + public void pingReceive(byte[] payload, boolean ack) throws IOException { + if (!ack) { +- increaseOverheadCount(); ++ increaseOverheadCount(FrameType.PING); + } + pingManager.receivePing(payload, ack); + } +@@ -1725,7 +1767,7 @@ + // Check for small increments which are inefficient + if (average < overheadThreshold) { + // The smaller the increment, the larger the overhead +- overheadCount.addAndGet(overheadThreshold / average); ++ increaseOverheadCount(FrameType.WINDOW_UPDATE, overheadThreshold / average); + } + + incrementWindowSize(increment); +@@ -1739,7 +1781,7 @@ + BacklogTracker tracker = backLogStreams.get(stream); + if (tracker == null || increment < tracker.getRemainingReservation()) { + // The smaller the increment, the larger the overhead +- overheadCount.addAndGet(overheadThreshold / average); ++ increaseOverheadCount(FrameType.WINDOW_UPDATE, overheadThreshold / average); + } + } + +--- a/java/org/apache/coyote/http2/LocalStrings.properties ++++ b/java/org/apache/coyote/http2/LocalStrings.properties +@@ -126,6 +126,7 @@ + upgradeHandler.ioerror=Connection [{0}] + upgradeHandler.noAllocation=Connection [{0}], Stream [{1}], Timeout waiting for allocation + upgradeHandler.noNewStreams=Connection [{0}], Stream [{1}], Stream ignored as no new streams are permitted on this connection ++upgradeHandler.overheadChange=Connection [{0}], Stream [{1}], Frame type [{2}] resulted in new overhead count of [{3}] + upgradeHandler.pause.entry=Connection [{0}] Pausing + upgradeHandler.pingFailed=Connection [{0}] Failed to send ping to client + upgradeHandler.prefaceReceived=Connection [{0}], Connection preface received from client +--- a/java/org/apache/coyote/http2/Http2AsyncParser.java ++++ b/java/org/apache/coyote/http2/Http2AsyncParser.java +@@ -276,6 +276,7 @@ + readUnknownFrame(streamId, frameType, flags, payloadSize, payload); + } + } ++ if (!upgradeHandler.isOverheadLimitExceeded()) { + // See if there is a new 9 byte header and continue parsing if possible + if (payload.remaining() >= 9) { + int position = payload.position(); +@@ -299,12 +300,14 @@ + } + } + } ++ } + } while (continueParsing); + } catch (RuntimeException | IOException | Http2Exception e) { + error = e; +- } +- if (payload.hasRemaining()) { +- socketWrapper.unRead(payload); ++ } finally { ++ if (payload.hasRemaining()) { ++ socketWrapper.unRead(payload); ++ } + } + } + if (state == CompletionState.DONE) { +--- a/webapps/docs/config/http2.xml ++++ b/webapps/docs/config/http2.xml +@@ -230,6 +230,13 @@ + used.

+
+ ++ ++

The amount by which the overhead count (see ++ overheadCountFactor) will be increased for each reset ++ frame received. If not specified, a default value of 50 will ++ be used. A value of less than zero will be treated as zero.

++
++ + +

The threshold below which the average payload size of the current and + previous non-final DATA frames will trigger an increase in diff -Nru tomcat9-9.0.43/debian/patches/CVE-2023-45648.patch tomcat9-9.0.43/debian/patches/CVE-2023-45648.patch --- tomcat9-9.0.43/debian/patches/CVE-2023-45648.patch 1970-01-01 00:00:00.000000000 +0000 +++ tomcat9-9.0.43/debian/patches/CVE-2023-45648.patch 2023-10-10 16:06:05.000000000 +0000 @@ -0,0 +1,82 @@ +Description: Align processing of trailer headers with standard processing +Origin: upstream, https://github.com/apache/tomcat/commit/59583245639d8c42ae0009f4a4a70464d3ea70a0 +--- a/java/org/apache/coyote/http11/Http11InputBuffer.java ++++ b/java/org/apache/coyote/http11/Http11InputBuffer.java +@@ -818,6 +818,12 @@ + */ + private HeaderParseStatus parseHeader() throws IOException { + ++ /* ++ * Implementation note: Any changes to this method probably need to be echoed in ++ * ChunkedInputFilter.parseHeader(). Why not use a common implementation? In short, this code uses non-blocking ++ * reads whereas ChunkedInputFilter using blocking reads. The code is just different enough that a common ++ * implementation wasn't viewed as practical. ++ */ + while (headerParsePos == HeaderParsePosition.HEADER_START) { + + // Read new bytes if needed +@@ -950,7 +956,7 @@ + } else if (prevChr == Constants.CR) { + // Invalid value - also need to delete header + return skipLine(true); +- } else if (chr != Constants.HT && HttpParser.isControl(chr)) { ++ } else if (HttpParser.isControl(chr) && chr != Constants.HT) { + // Invalid value - also need to delete header + return skipLine(true); + } else if (chr == Constants.SP || chr == Constants.HT) { +--- a/java/org/apache/coyote/http11/filters/ChunkedInputFilter.java ++++ b/java/org/apache/coyote/http11/filters/ChunkedInputFilter.java +@@ -30,6 +30,7 @@ + import org.apache.coyote.http11.InputFilter; + import org.apache.tomcat.util.buf.ByteChunk; + import org.apache.tomcat.util.buf.HexUtils; ++import org.apache.tomcat.util.http.parser.HttpParser; + import org.apache.tomcat.util.net.ApplicationBufferHandler; + import org.apache.tomcat.util.res.StringManager; + +@@ -443,6 +444,13 @@ + + private boolean parseHeader() throws IOException { + ++ /* ++ * Implementation note: Any changes to this method probably need to be echoed in ++ * Http11InputBuffer.parseHeader(). Why not use a common implementation? In short, this code uses blocking ++ * reads whereas Http11InputBuffer using non-blocking reads. The code is just different enough that a common ++ * implementation wasn't viewed as practical. ++ */ ++ + Map headers = request.getTrailerFields(); + + byte chr = 0; +@@ -489,6 +497,9 @@ + + if (chr == Constants.COLON) { + colon = true; ++ } else if (!HttpParser.isToken(chr)) { ++ // Non-token characters are illegal in header names ++ throw new IOException(sm.getString("chunkedInputFilter.invalidTrailerHeaderName")); + } else { + trailingHeaders.append(chr); + } +@@ -550,7 +561,9 @@ + if (chr == Constants.CR || chr == Constants.LF) { + parseCRLF(true); + eol = true; +- } else if (chr == Constants.SP) { ++ } else if (HttpParser.isControl(chr) && chr != Constants.HT) { ++ throw new IOException(sm.getString("chunkedInputFilter.invalidTrailerHeaderValue")); ++ } else if (chr == Constants.SP || chr == Constants.HT) { + trailingHeaders.append(chr); + } else { + trailingHeaders.append(chr); +--- a/java/org/apache/coyote/http11/filters/LocalStrings.properties ++++ b/java/org/apache/coyote/http11/filters/LocalStrings.properties +@@ -21,6 +21,8 @@ + chunkedInputFilter.invalidCrlfNoCR=Invalid end of line sequence (No CR before LF) + chunkedInputFilter.invalidCrlfNoData=Invalid end of line sequence (no data available to read) + chunkedInputFilter.invalidHeader=Invalid chunk header ++chunkedInputFilter.invalidTrailerHeaderName=Invalid trailer header name (non-token character in name) ++chunkedInputFilter.invalidTrailerHeaderValue=Invalid trailer header value (control character in value) + chunkedInputFilter.maxExtension=maxExtensionSize exceeded + chunkedInputFilter.maxTrailer=maxTrailerSize exceeded + diff -Nru tomcat9-9.0.43/debian/patches/series tomcat9-9.0.43/debian/patches/series --- tomcat9-9.0.43/debian/patches/series 2023-04-05 06:23:08.000000000 +0000 +++ tomcat9-9.0.43/debian/patches/series 2023-10-10 16:05:01.000000000 +0000 @@ -20,4 +20,10 @@ CVE-2021-43980.patch CVE-2022-42252.patch CVE-2022-45143.patch +CVE-2023-24998.patch CVE-2023-28708.patch +CVE-2023-28709.patch +CVE-2023-41080.patch +CVE-2023-42795.patch +CVE-2023-44487.patch +CVE-2023-45648.patch