Version in base suite: 2.30.0-4 Base version: swift_2.30.0-4 Target version: swift_2.30.1-0+deb12u1 Base file: /srv/ftp-master.debian.org/ftp/pool/main/s/swift/swift_2.30.0-4.dsc Target file: /srv/ftp-master.debian.org/policy/pool/main/s/swift/swift_2.30.1-0+deb12u1.dsc .gitreview | 1 .mailmap | 1 .zuul.yaml | 75 - AUTHORS | 3 CHANGELOG | 13 debian/changelog | 30 debian/control | 4 debian/patches/CVE-2022-47950-stable-zed.patch | 336 ------ debian/patches/Change_getting_major_minor_of_blkdev.patch | 101 ++ debian/patches/bug-2119646-swift.patch | 124 ++ debian/patches/drive-full-checker.patch | 494 ++++++++++ debian/patches/kms_keymaster-allow-specifying-barbican_endpoint.patch | 51 + debian/patches/series | 6 debian/patches/swift-recon-only-query-object-servers-once.patch | 21 debian/rules | 12 debian/swift.install | 2 debian/tests/unittests | 7 py2-constraints.txt | 1 releasenotes/notes/2_30_1_release-856dd70ec466aa74.yaml | 13 swift/__init__.py | 10 swift/common/http_protocol.py | 320 ++++++ swift/common/middleware/s3api/etree.py | 2 swift/common/wsgi.py | 234 ---- test/functional/__init__.py | 3 test/functional/s3api/test_xxe_injection.py | 231 ++++ test/unit/common/middleware/s3api/test_multi_delete.py | 40 test/unit/common/test_http_protocol.py | 412 ++++++++ test/unit/common/test_wsgi.py | 335 ------ test/unit/helpers.py | 2 test/unit/proxy/test_server.py | 3 tools/playbooks/common/install_dependencies.yaml | 20 tools/playbooks/dsvm/pre.yaml | 8 tools/playbooks/multinode_setup/common_config.yaml | 4 tools/playbooks/multinode_setup/make_rings.yaml | 8 tools/playbooks/multinode_setup/pre.yaml | 8 tools/playbooks/multinode_setup/run.yaml | 2 tools/playbooks/multinode_setup/templates/make_multinode_rings.j2 | 2 tools/playbooks/s3api-tests/run.yaml | 8 tools/playbooks/saio_single_node_setup/setup_saio.yaml | 14 tools/test-setup.sh | 12 tox.ini | 7 41 files changed, 2004 insertions(+), 976 deletions(-) diff -Nru swift-2.30.0/.gitreview swift-2.30.1/.gitreview --- swift-2.30.0/.gitreview 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/.gitreview 2023-01-30 23:23:08.000000000 +0000 @@ -2,3 +2,4 @@ host=review.opendev.org port=29418 project=openstack/swift.git +defaultbranch=stable/zed diff -Nru swift-2.30.0/.mailmap swift-2.30.1/.mailmap --- swift-2.30.0/.mailmap 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/.mailmap 2023-01-30 23:23:08.000000000 +0000 @@ -134,3 +134,4 @@ melissaml Ashwin Nair indianwhocodes Romain de Joux +Takashi Natsume diff -Nru swift-2.30.0/.zuul.yaml swift-2.30.1/.zuul.yaml --- swift-2.30.0/.zuul.yaml 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/.zuul.yaml 2023-01-30 23:23:08.000000000 +0000 @@ -156,22 +156,22 @@ python_version: 3.8 - job: - name: swift-tox-func-py36-centos-8-stream + name: swift-tox-func-py39-centos-9-stream parent: swift-tox-func-py38 - nodeset: centos-8-stream + nodeset: centos-9-stream vars: - bindep_profile: test py36 - python_version: 3.6 + bindep_profile: test py39 + python_version: 3.9 - job: - name: swift-tox-func-encryption-py36-centos-8-stream - parent: swift-tox-func-py36-centos-8-stream + name: swift-tox-func-encryption-py39-centos-9-stream + parent: swift-tox-func-py39-centos-9-stream vars: tox_envlist: func-encryption-py3 - job: - name: swift-tox-func-ec-py36-centos-8-stream - parent: swift-tox-func-py36-centos-8-stream + name: swift-tox-func-ec-py39-centos-9-stream + parent: swift-tox-func-py39-centos-9-stream vars: tox_envlist: func-ec-py3 @@ -345,30 +345,36 @@ - job: name: swift-probetests-centos-7 - parent: unittests + parent: swift-probetests-centos-8-stream nodeset: centos-7 description: | Setup a SAIO dev environment and run Swift's probe tests under Python 2. - timeout: 7200 vars: bindep_profile: test py27 - pre-run: - - tools/playbooks/common/install_dependencies.yaml - - tools/playbooks/saio_single_node_setup/setup_saio.yaml - - tools/playbooks/saio_single_node_setup/make_rings.yaml - run: tools/playbooks/probetests/run.yaml - post-run: tools/playbooks/probetests/post.yaml + ensure_pip_from_packages: False + ensure_pip_from_packages_with_python2: False + ensure_pip_from_upstream: True + ensure_pip_from_upstream_interpreters: + - python2 + ensure_pip_from_upstream_url: "https://bootstrap.pypa.io/pip/2.7/get-pip.py" - job: name: swift-probetests-centos-8-stream - parent: swift-probetests-centos-7 + parent: unittests nodeset: centos-8-stream description: | Setup a SAIO dev environment and run Swift's probe tests under Python 3. + timeout: 7200 vars: bindep_profile: test py36 + pre-run: + - tools/playbooks/common/install_dependencies.yaml + - tools/playbooks/saio_single_node_setup/setup_saio.yaml + - tools/playbooks/saio_single_node_setup/make_rings.yaml + run: tools/playbooks/probetests/run.yaml + post-run: tools/playbooks/probetests/post.yaml - job: name: swift-probetests-centos-8-stream-arm64 @@ -585,32 +591,32 @@ vars: *swift_image_vars_py3 - job: - name: swift-tox-func-py36-centos-8-stream-fips - parent: swift-tox-func-py36-centos-8-stream + name: swift-tox-func-py39-centos-9-stream-fips + parent: swift-tox-func-py39-centos-9-stream voting: false description: | - Functional testing on a FIPS enabled Centos 8 system + Functional testing on a FIPS enabled Centos 9 system vars: nslookup_target: 'opendev.org' enable_fips: true - job: - name: swift-tox-func-encryption-py36-centos-8-stream-fips - parent: swift-tox-func-encryption-py36-centos-8-stream + name: swift-tox-func-encryption-py39-centos-9-stream-fips + parent: swift-tox-func-encryption-py39-centos-9-stream voting: false description: | Functional encryption testing on a FIPS enabled - Centos 8 system + Centos 9 system vars: nslookup_target: 'opendev.org' enable_fips: true - job: - name: swift-tox-func-ec-py36-centos-8-stream-fips - parent: swift-tox-func-ec-py36-centos-8-stream + name: swift-tox-func-ec-py39-centos-9-stream-fips + parent: swift-tox-func-ec-py39-centos-9-stream voting: false description: | - Functional EC testing on a FIPS enabled Centos 8 system + Functional EC testing on a FIPS enabled Centos 9 system vars: nslookup_target: 'opendev.org' enable_fips: true @@ -628,6 +634,8 @@ - swift-tox-func-py38-arm64 - project: + vars: + ensure_tox_version: '<4' templates: - publish-openstack-docs-pti - periodic-stable-jobs @@ -637,14 +645,14 @@ - swift-jobs-arm64 check: jobs: - - swift-tox-func-py36-centos-8-stream-fips: + - swift-tox-func-py39-centos-9-stream-fips: irrelevant-files: &functest-irrelevant-files - ^(api-ref|doc|releasenotes)/.*$ - ^test/(cors|probe|s3api)/.*$ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$ - - swift-tox-func-encryption-py36-centos-8-stream-fips: + - swift-tox-func-encryption-py39-centos-9-stream-fips: irrelevant-files: *functest-irrelevant-files - - swift-tox-func-ec-py36-centos-8-stream-fips: + - swift-tox-func-ec-py39-centos-9-stream-fips: irrelevant-files: *functest-irrelevant-files - swift-build-image: irrelevant-files: &docker-irrelevant-files @@ -724,6 +732,7 @@ - ^doc/(requirements.txt|(saio|s3api|source)/.*)$ - swift-multinode-rolling-upgrade: irrelevant-files: *functest-irrelevant-files + voting: false - tempest-integrated-object-storage: irrelevant-files: &tempest-irrelevant-files - ^(api-ref|doc|releasenotes)/.*$ @@ -766,8 +775,6 @@ irrelevant-files: *unittest-irrelevant-files - openstack-tox-pep8: irrelevant-files: *pep8-irrelevant-files - - swift-multinode-rolling-upgrade: - irrelevant-files: *functest-irrelevant-files - tempest-integrated-object-storage: irrelevant-files: *tempest-irrelevant-files - tempest-ipv6-only: @@ -783,9 +790,9 @@ - swift-tox-func-encryption-py27-centos-7 - swift-tox-func-ec-py27-centos-7 - swift-tox-py36-centos-8-stream - - swift-tox-func-py36-centos-8-stream - - swift-tox-func-encryption-py36-centos-8-stream - - swift-tox-func-ec-py36-centos-8-stream + - swift-tox-func-py39-centos-9-stream + - swift-tox-func-encryption-py39-centos-9-stream + - swift-tox-func-ec-py39-centos-9-stream - swift-multinode-rolling-upgrade-rocky - swift-multinode-rolling-upgrade-stein - swift-multinode-rolling-upgrade-train diff -Nru swift-2.30.0/AUTHORS swift-2.30.1/AUTHORS --- swift-2.30.0/AUTHORS 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/AUTHORS 2023-01-30 23:23:08.000000000 +0000 @@ -40,6 +40,7 @@ Ade Lee (alee@redhat.com) Adrian Smith (adrian_f_smith@dell.com) Adrien Pensart (adrien.pensart@corp.ovh.com) +afariasa (afariasa@redhat.com) Akihiro Motoki (amotoki@gmail.com) Akihito Takai (takaiak@nttdata.co.jp) Alex Gaynor (alex.gaynor@gmail.com) @@ -399,7 +400,7 @@ Steven Lang (Steven.Lang@hgst.com) Sushil Kumar (sushil.kumar2@globallogic.com) Takashi Kajinami (tkajinam@redhat.com) -Takashi Natsume (natsume.takashi@lab.ntt.co.jp) +Takashi Natsume (takanattie@gmail.com) TheSriram (sriram@klusterkloud.com) Thiago da Silva (thiagodasilva@gmail.com) Thibault Person (thibault.person@ovhcloud.com) diff -Nru swift-2.30.0/CHANGELOG swift-2.30.1/CHANGELOG --- swift-2.30.0/CHANGELOG 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/CHANGELOG 2023-01-30 23:23:08.000000000 +0000 @@ -1,4 +1,15 @@ -swift (2.30.0) +swift (2.30.1, zed stable backports) + + * Fixed a security issue in how `s3api` handles XML parsing that allowed + authenticated S3 clients to read arbitrary files from proxy servers. + Refer to CVE-2022-47950 for more information. + + * Fixed a path-rewriting bug introduced in Python 3.7.14, 3.8.14, 3.9.14, + and 3.10.6 that could cause some `domain_remap` requests to be routed to + the wrong object. + + +swift (2.30.0, OpenStack Zed) * Sharding improvements diff -Nru swift-2.30.0/debian/changelog swift-2.30.1/debian/changelog --- swift-2.30.0/debian/changelog 2023-01-19 14:43:31.000000000 +0000 +++ swift-2.30.1/debian/changelog 2025-11-11 08:06:52.000000000 +0000 @@ -1,3 +1,33 @@ +swift (2.30.1-0+deb12u1) bookworm-security; urgency=medium + + [ Thomas Goirand ] + * New upstream release. + * Removed CVE-2022-47950-stable-zed.patch applied upstream. + * Add swift-recon-only-query-object-servers-once.patch. + * Add drive-full-checker.patch. + * Blacklist tests: + - test_get_conns_hostname6 + - test_get_conns_v6 + - test_get_conns_v6_default + * Add kms_keymaster-allow-specifying-barbican_endpoint.patch. + * kay reported a vulnerability in Keystone’s ec2tokens and s3tokens APIs. By + sending those endpoints a valid AWS Signature (e.g., from a presigned S3 + URL), an unauthenticated attacker may obtain Keystone authorization + (ec2tokens can yield a fully scoped token; s3tokens can reveal scope + accepted by some services), resulting in unauthorized access and privilege + escalation. Deployments where /v3/ec2tokens or /v3/s3tokens are reachable + by unauthenticated clients (e.g., exposed on a public API) are affected. + Add bug-2119646-swift.patch, which offers swift side compatibility with the + keystone fix. + * Blacklist non-deterministic tests: + - test_delete_partition_ssync_with_cleanup_failure + - test_cleanup_ondisk_files_commit_window + + [ Philippe SÉRAPHIN ] + * Add Change_getting_major_minor_of_blkdev.patch. + + -- Thomas Goirand Tue, 11 Nov 2025 09:06:52 +0100 + swift (2.30.0-4) unstable; urgency=high * CVE-2022-47950: Arbitrary file access through custom S3 XML entities. diff -Nru swift-2.30.0/debian/control swift-2.30.1/debian/control --- swift-2.30.0/debian/control 2023-01-19 14:43:31.000000000 +0000 +++ swift-2.30.1/debian/control 2025-11-11 08:06:52.000000000 +0000 @@ -39,6 +39,8 @@ python3-openstackdocstheme (>= 1.11.0), python3-os-api-ref (>= 1.0.0), python3-os-testr (>= 0.8.0), + python3-oslo.config, + python3-oslo.log, python3-oslosphinx, python3-pastedeploy, python3-pyeclib, @@ -67,6 +69,8 @@ python3-greenlet, python3-netifaces, python3-openssl, + python3-oslo.config, + python3-oslo.log, python3-pastedeploy, python3-pyeclib, python3-six, diff -Nru swift-2.30.0/debian/patches/CVE-2022-47950-stable-zed.patch swift-2.30.1/debian/patches/CVE-2022-47950-stable-zed.patch --- swift-2.30.0/debian/patches/CVE-2022-47950-stable-zed.patch 2023-01-19 14:43:31.000000000 +0000 +++ swift-2.30.1/debian/patches/CVE-2022-47950-stable-zed.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,336 +0,0 @@ -From f4a8d309efd70e2bca3b3bc86e6373418d93a030 Mon Sep 17 00:00:00 2001 -From: Aymeric Ducroquetz -Date: Tue, 25 Oct 2022 22:07:53 +0200 -Subject: [PATCH] s3api: Prevent XXE injections - -Previously, clients could use XML external entities (XXEs) to read -arbitrary files from proxy-servers and inject the content into the -request. Since many S3 APIs reflect request content back to the user, -this could be used to extract any secrets that the swift user could -read, such as tempauth credentials, keymaster secrets, etc. - -Now, disable entity resolution -- any unknown entities will be replaced -with an empty string. Without resolving the entities, the request is -still processed. - -[CVE-2022-47950] - -Closes-Bug: #1998625 -Co-Authored-By: Romain de Joux -Change-Id: I84494123cfc85e234098c554ecd3e77981f8a096 ---- - swift/common/middleware/s3api/etree.py | 2 +- - test/functional/s3api/test_xxe_injection.py | 229 ++++++++++++++++++ - .../middleware/s3api/test_multi_delete.py | 40 +++ - 3 files changed, 270 insertions(+), 1 deletion(-) - create mode 100644 test/functional/s3api/test_xxe_injection.py - -diff --git a/swift/common/middleware/s3api/etree.py b/swift/common/middleware/s3api/etree.py -index 987b84a14..e16b75340 100644 ---- a/swift/common/middleware/s3api/etree.py -+++ b/swift/common/middleware/s3api/etree.py -@@ -130,7 +130,7 @@ class _Element(lxml.etree.ElementBase): - - - parser_lookup = lxml.etree.ElementDefaultClassLookup(element=_Element) --parser = lxml.etree.XMLParser() -+parser = lxml.etree.XMLParser(resolve_entities=False, no_network=True) - parser.set_element_class_lookup(parser_lookup) - - Element = parser.makeelement -diff --git a/test/functional/s3api/test_xxe_injection.py b/test/functional/s3api/test_xxe_injection.py -new file mode 100644 -index 000000000..ae15e548c ---- /dev/null -+++ b/test/functional/s3api/test_xxe_injection.py -@@ -0,0 +1,229 @@ -+#!/usr/bin/env python -+# Copyright (c) 2022 OpenStack Foundation -+# -+# Licensed under the Apache License, Version 2.0 (the "License"); -+# you may not use this file except in compliance with the License. -+# You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -+# implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+ -+import base64 -+import requests -+ -+import botocore -+ -+from swift.common.utils import md5 -+ -+import test.functional as tf -+from test.functional.s3api import S3ApiBaseBoto3 -+ -+ -+class TestS3ApiXxeInjection(S3ApiBaseBoto3): -+ -+ def setUp(self): -+ super(TestS3ApiXxeInjection, self).setUp() -+ self.bucket = 'test-s3api-xxe-injection' -+ -+ def _create_bucket(self, **kwargs): -+ resp = self.conn.create_bucket(Bucket=self.bucket, **kwargs) -+ response_metadata = resp.pop('ResponseMetadata', {}) -+ self.assertEqual(200, response_metadata.get('HTTPStatusCode')) -+ -+ @staticmethod -+ def _clear_data(request, **_kwargs): -+ request.data = b'' -+ -+ def _presign_url(self, method, key=None, **kwargs): -+ params = { -+ 'Bucket': self.bucket -+ } -+ if key: -+ params['Key'] = key -+ params.update(kwargs) -+ try: -+ # https://github.com/boto/boto3/issues/2192 -+ self.conn.meta.events.register( -+ 'before-sign.s3.*', self._clear_data) -+ return self.conn.generate_presigned_url( -+ method, Params=params, ExpiresIn=60) -+ finally: -+ self.conn.meta.events.unregister( -+ 'before-sign.s3.*', self._clear_data) -+ -+ def test_put_bucket_acl(self): -+ if not tf.cluster_info['s3api'].get('s3_acl'): -+ self.skipTest('s3_acl must be enabled') -+ -+ self._create_bucket() -+ -+ url = self._presign_url('put_bucket_acl') -+ resp = requests.put(url, data=""" -+ ]> -+ -+ -+ test:tester -+ test:tester -+ -+ -+ -+ -+ name&xxe; -+ id&xxe; -+ -+ WRITE -+ -+ -+ -+""") # noqa: E501 -+ self.assertEqual(200, resp.status_code) -+ self.assertNotIn(b'xxe', resp.content) -+ self.assertNotIn(b'[swift-hash]', resp.content) -+ -+ acl = self.conn.get_bucket_acl(Bucket=self.bucket) -+ response_metadata = acl.pop('ResponseMetadata', {}) -+ self.assertEqual(200, response_metadata.get('HTTPStatusCode')) -+ self.assertDictEqual({ -+ 'Owner': { -+ 'DisplayName': 'test:tester', -+ 'ID': 'test:tester' -+ }, -+ 'Grants': [ -+ { -+ 'Grantee': { -+ 'DisplayName': 'id', -+ 'ID': 'id', -+ 'Type': 'CanonicalUser' -+ }, -+ 'Permission': 'WRITE' -+ } -+ ] -+ }, acl) -+ -+ def test_create_bucket(self): -+ url = self._presign_url('create_bucket') -+ resp = requests.put(url, data=""" -+ ]> -+ -+ &xxe; -+ -+""") # noqa: E501 -+ self.assertEqual(400, resp.status_code) -+ self.assertNotIn(b'xxe', resp.content) -+ self.assertNotIn(b'[swift-hash]', resp.content) -+ -+ self.assertRaisesRegex( -+ botocore.exceptions.ClientError, 'Not Found', -+ self.conn.head_bucket, Bucket=self.bucket) -+ -+ def test_delete_objects(self): -+ self._create_bucket() -+ -+ url = self._presign_url( -+ 'delete_objects', -+ Delete={ -+ 'Objects': [ -+ { -+ 'Key': 'string', -+ 'VersionId': 'string' -+ } -+ ] -+ }) -+ body = """ -+ ]> -+ -+ -+ &xxe; -+ -+ -+""" -+ body = body.encode('utf-8') -+ content_md5 = ( -+ base64.b64encode(md5(body, usedforsecurity=False).digest())) -+ resp = requests.post( -+ url, headers={'Content-MD5': content_md5}, data=body) -+ self.assertEqual(400, resp.status_code) -+ self.assertNotIn(b'xxe', resp.content) -+ self.assertNotIn(b'[swift-hash]', resp.content) -+ -+ def test_complete_multipart_upload(self): -+ self._create_bucket() -+ -+ resp = self.conn.create_multipart_upload( -+ Bucket=self.bucket, Key='test') -+ response_metadata = resp.pop('ResponseMetadata', {}) -+ self.assertEqual(200, response_metadata.get('HTTPStatusCode')) -+ uploadid = resp.get('UploadId') -+ -+ try: -+ url = self._presign_url( -+ 'complete_multipart_upload', -+ Key='key', -+ MultipartUpload={ -+ 'Parts': [ -+ { -+ 'ETag': 'string', -+ 'PartNumber': 1 -+ } -+ ], -+ }, -+ UploadId=uploadid) -+ resp = requests.post(url, data=""" -+ ]> -+ -+ -+ "{uploadid}" -+ &xxe; -+ -+ -+""") # noqa: E501 -+ self.assertEqual(404, resp.status_code) -+ self.assertNotIn(b'xxe', resp.content) -+ self.assertNotIn(b'[swift-hash]', resp.content) -+ -+ resp = requests.post(url, data=""" -+ ]> -+ -+ -+ "&xxe;" -+ 1 -+ -+ -+""") # noqa: E501 -+ self.assertEqual(404, resp.status_code) -+ self.assertNotIn(b'xxe', resp.content) -+ self.assertNotIn(b'[swift-hash]', resp.content) -+ finally: -+ resp = self.conn.abort_multipart_upload( -+ Bucket=self.bucket, Key='test', UploadId=uploadid) -+ response_metadata = resp.pop('ResponseMetadata', {}) -+ self.assertEqual(204, response_metadata.get('HTTPStatusCode')) -+ -+ def test_put_bucket_versioning(self): -+ self._create_bucket() -+ -+ url = self._presign_url( -+ 'put_bucket_versioning', -+ VersioningConfiguration={ -+ 'Status': 'Enabled' -+ }) -+ resp = requests.put(url, data=""" -+ ]> -+ -+ &xxe; -+ -+""") # noqa: E501 -+ self.assertEqual(400, resp.status_code) -+ self.assertNotIn(b'xxe', resp.content) -+ self.assertNotIn(b'[swift-hash]', resp.content) -+ -+ versioning = self.conn.get_bucket_versioning(Bucket=self.bucket) -+ response_metadata = versioning.pop('ResponseMetadata', {}) -+ self.assertEqual(200, response_metadata.get('HTTPStatusCode')) -+ self.assertDictEqual({}, versioning) -diff --git a/test/unit/common/middleware/s3api/test_multi_delete.py b/test/unit/common/middleware/s3api/test_multi_delete.py -index 0cfe438de..d40b48f2d 100644 ---- a/test/unit/common/middleware/s3api/test_multi_delete.py -+++ b/test/unit/common/middleware/s3api/test_multi_delete.py -@@ -523,6 +523,7 @@ class TestS3ApiMultiDelete(S3ApiTestCase): - body=body) - status, headers, body = self.call_s3api(req) - self.assertEqual(status.split()[0], '200') -+ self.assertIn(b'Key1Server Error', body) - - def _test_object_multi_DELETE(self, account): - self.keys = ['Key1', 'Key2'] -@@ -580,6 +581,45 @@ class TestS3ApiMultiDelete(S3ApiTestCase): - elem = fromstring(body) - self.assertEqual(len(elem.findall('Deleted')), len(self.keys)) - -+ def test_object_multi_DELETE_with_system_entity(self): -+ self.keys = ['Key1', 'Key2'] -+ self.swift.register( -+ 'DELETE', '/v1/AUTH_test/bucket/%s' % self.keys[0], -+ swob.HTTPNotFound, {}, None) -+ self.swift.register( -+ 'DELETE', '/v1/AUTH_test/bucket/%s' % self.keys[1], -+ swob.HTTPNoContent, {}, None) -+ -+ elem = Element('Delete') -+ for key in self.keys: -+ obj = SubElement(elem, 'Object') -+ SubElement(obj, 'Key').text = key -+ body = tostring(elem, use_s3ns=False) -+ body = body.replace( -+ b'?>\n', -+ b'?>\n ]>\n', -+ ).replace(b'>Key1<', b'>Key1&ent;<') -+ content_md5 = ( -+ base64.b64encode(md5(body, usedforsecurity=False).digest()) -+ .strip()) -+ -+ req = Request.blank('/bucket?delete', -+ environ={'REQUEST_METHOD': 'POST'}, -+ headers={ -+ 'Authorization': 'AWS test:full_control:hmac', -+ 'Date': self.get_date_header(), -+ 'Content-MD5': content_md5}, -+ body=body) -+ req.date = datetime.now() -+ req.content_type = 'text/plain' -+ -+ status, headers, body = self.call_s3api(req) -+ self.assertEqual(status, '200 OK', body) -+ self.assertIn(b'Key2', body) -+ self.assertNotIn(b'root:/root', body) -+ self.assertIn(b'Key1', body) -+ - def _test_no_body(self, use_content_length=False, - use_transfer_encoding=False, string_to_md5=b''): - content_md5 = (base64.b64encode( --- -2.38.1 - diff -Nru swift-2.30.0/debian/patches/Change_getting_major_minor_of_blkdev.patch swift-2.30.1/debian/patches/Change_getting_major_minor_of_blkdev.patch --- swift-2.30.0/debian/patches/Change_getting_major_minor_of_blkdev.patch 1970-01-01 00:00:00.000000000 +0000 +++ swift-2.30.1/debian/patches/Change_getting_major_minor_of_blkdev.patch 2025-11-11 08:06:52.000000000 +0000 @@ -0,0 +1,101 @@ +Description: Change getting major:minor of blkdev + Replace method for determine major:minor of block device + because stat can't detect major:minor in some cases. +From: Philippe SERAPHIN +Date: Thu, 22 Jun 2023 06:45:26 +0200 +Origin: Upstream, https://review.opendev.org/c/openstack/swift/+/887021 +Change-Id: Idcc7cd7a41e225d1052c03ba846dff02851758f8 +Last-Update: 2023-06-29 + +Index: swift/bin/swift-drive-audit +=================================================================== +--- swift.orig/bin/swift-drive-audit ++++ swift/bin/swift-drive-audit +@@ -32,6 +32,27 @@ from swift.common.utils import backward, + + def get_devices(device_dir, logger): + devices = [] ++ majmin_devices = {} ++ ++ # List /dev/block ++ # Using os.scandir on recent versions of python, else os.listdir ++ if 'scandir' in dir(os): ++ with os.scandir("/dev/block") as it: ++ for ent in it: ++ if ent.is_symlink(): ++ dev_name = os.path.basename(os.readlink(ent.path)) ++ majmin = os.path.basename(ent.path).split(':') ++ majmin_devices[dev_name] = {'major': majmin[0], ++ 'minor': majmin[1]} ++ else: ++ for ent in os.listdir("/dev/block"): ++ ent_path = os.path.join("/dev/block", ent) ++ if os.path.is_symlink(ent_path): ++ dev_name = os.path.basename(os.readlink(ent_path)) ++ majmin = os.path.basename(ent_path).split(':') ++ majmin_devices[dev_name] = {'major': majmin[0], ++ 'minor': majmin[1]} ++ + for line in open('/proc/mounts').readlines(): + data = line.strip().split() + block_device = data[0] +@@ -40,15 +61,25 @@ def get_devices(device_dir, logger): + device = {} + device['mount_point'] = mount_point + device['block_device'] = block_device +- try: +- device_num = os.stat(block_device).st_rdev +- except OSError: +- # If we can't stat the device, then something weird is going on +- logger.error("Error: Could not stat %s!" % +- block_device) +- continue +- device['major'] = str(os.major(device_num)) +- device['minor'] = str(os.minor(device_num)) ++ dev_name = os.path.basename(block_device) ++ if dev_name in majmin_devices: ++ # If symlink is in /dev/block ++ device['major'] = majmin_devices[dev_name]['major'] ++ device['minor'] = majmin_devices[dev_name]['minor'] ++ else: ++ # Else we try to stat block_device ++ try: ++ device_num = os.stat(block_device).st_rdev ++ except OSError: ++ # If we can't stat the device, ++ # then something weird is going on ++ logger.error( ++ 'Could not determine major:minor numbers for %s ' ++ '(mounted at %s)! Skipping...', ++ block_device, mount_point) ++ continue ++ device['major'] = str(os.major(device_num)) ++ device['minor'] = str(os.minor(device_num)) + devices.append(device) + for line in open('/proc/partitions').readlines()[2:]: + major, minor, blocks, kernel_device = line.strip().split() +@@ -84,7 +115,7 @@ def get_errors(error_re, log_file_patter + # track of the year and month in case the year recently + # ticked over + year = now_time.year +- prev_entry_month = now_time.strftime('%b') ++ prev_ent_month = now_time.strftime('%b') + errors = {} + + reached_old_logs = False +@@ -106,11 +137,11 @@ def get_errors(error_re, log_file_patter + break + # Solves the problem with year change - kern.log does not + # keep track of the year. +- log_time_entry = line.split()[:3] +- if log_time_entry[0] == 'Dec' and prev_entry_month == 'Jan': ++ log_time_ent = line.split()[:3] ++ if log_time_ent[0] == 'Dec' and prev_ent_month == 'Jan': + year -= 1 +- prev_entry_month = log_time_entry[0] +- log_time_string = '%d %s' % (year, ' '.join(log_time_entry)) ++ prev_ent_month = log_time_ent[0] ++ log_time_string = '%d %s' % (year, ' '.join(log_time_ent)) + try: + log_time = datetime.datetime.strptime( + log_time_string, '%Y %b %d %H:%M:%S') diff -Nru swift-2.30.0/debian/patches/bug-2119646-swift.patch swift-2.30.1/debian/patches/bug-2119646-swift.patch --- swift-2.30.0/debian/patches/bug-2119646-swift.patch 1970-01-01 00:00:00.000000000 +0000 +++ swift-2.30.1/debian/patches/bug-2119646-swift.patch 2025-11-11 08:06:52.000000000 +0000 @@ -0,0 +1,124 @@ +Description: Swift patch for LP: 2119646 +Author: Tim Burke +Origin: upstream, https://bugs.launchpad.net/keystone/+bug/2119646 comment 28 +Last-Update: 2025-10-31 + +Index: swift/swift/common/middleware/s3api/s3token.py +=================================================================== +--- swift.orig/swift/common/middleware/s3api/s3token.py ++++ swift/swift/common/middleware/s3api/s3token.py +@@ -181,31 +181,45 @@ class S3Token(object): + self._secret_cache_duration = int(conf.get('secret_cache_duration', 0)) + if self._secret_cache_duration < 0: + raise ValueError('secret_cache_duration must be non-negative') +- if self._secret_cache_duration: +- try: +- auth_plugin = keystone_loading.get_plugin_loader( +- conf.get('auth_type', 'password')) +- available_auth_options = auth_plugin.get_options() +- auth_options = {} +- for option in available_auth_options: +- name = option.name.replace('-', '_') +- value = conf.get(name) +- if value: +- auth_options[name] = value + ++ # Service authentication for s3tokens API calls ++ self.keystoneclient = None ++ try: ++ auth_plugin = keystone_loading.get_plugin_loader( ++ conf.get('auth_type', 'password')) ++ available_auth_options = auth_plugin.get_options() ++ auth_options = {} ++ for option in available_auth_options: ++ name = option.name.replace('-', '_') ++ value = conf.get(name) ++ if value: ++ auth_options[name] = value ++ ++ if not auth_options: ++ self._logger.warning( ++ "No service auth configuration. " ++ "s3tokens API calls will be unauthenticated. " ++ "New versions of keystone require service auth.") ++ else: + auth = auth_plugin.load_from_options(**auth_options) + session = keystone_session.Session(auth=auth) + self.keystoneclient = keystone_client.Client( + session=session, + region_name=conf.get('region_name')) +- self._logger.info("Caching s3tokens for %s seconds", +- self._secret_cache_duration) +- except Exception: +- self._logger.warning("Unable to load keystone auth_plugin. " +- "Secret caching will be unavailable.", +- exc_info=True) +- self.keystoneclient = None +- self._secret_cache_duration = 0 ++ self._logger.info( ++ "Service authentication configured for s3tokens API") ++ except Exception: ++ self._logger.warning( ++ "Unable to load service auth configuration. " ++ "s3tokens API calls will be unauthenticated " ++ "and secret caching will be unavailable.", ++ exc_info=True) ++ ++ if self._secret_cache_duration and self.keystoneclient: ++ self._logger.info("Caching s3tokens for %s seconds", ++ self._secret_cache_duration) ++ else: ++ self._secret_cache_duration = 0 + + def _deny_request(self, code): + error_cls, message = { +@@ -225,6 +239,16 @@ class S3Token(object): + + def _json_request(self, creds_json): + headers = {'Content-Type': 'application/json'} ++ ++ # Add service authentication headers if configured ++ if self.keystoneclient: ++ try: ++ headers.update( ++ self.keystoneclient.session.get_auth_headers()) ++ except Exception: ++ self._logger.warning("Failed to get service token", ++ exc_info=True) ++ + try: + response = requests.post(self._request_uri, + headers=headers, data=creds_json, +Index: swift/test/unit/common/middleware/s3api/test_s3token.py +=================================================================== +--- swift.orig/test/unit/common/middleware/s3api/test_s3token.py ++++ swift/test/unit/common/middleware/s3api/test_s3token.py +@@ -576,6 +576,9 @@ class S3TokenMiddlewareTestGood(S3TokenM + cache.get.return_value = None + + keystone_client = MOCK_KEYSTONE.return_value ++ keystone_client.session.get_auth_headers.return_value = { ++ 'X-Auth-Token': 'bearer token', ++ } + keystone_client.ec2.get.return_value = mock.Mock(secret='secret') + + MOCK_REQUEST.return_value = TestResponse({ +@@ -602,6 +605,18 @@ class S3TokenMiddlewareTestGood(S3TokenM + } + + self.assertTrue(MOCK_REQUEST.called) ++ self.assertEqual(MOCK_REQUEST.mock_calls, [ ++ mock.call('http://example.com/s3tokens', headers={ ++ 'Content-Type': 'application/json', ++ 'X-Auth-Token': 'bearer token', ++ }, data=json.dumps({ ++ "credentials": { ++ "access": "access", ++ "token": "dG9rZW4=", ++ "signature": "signature", ++ } ++ }), verify=None, timeout=10.0) ++ ]) + tenant = GOOD_RESPONSE_V2['access']['token']['tenant'] + expected_cache = (expected_headers, tenant, 'secret') + cache.set.assert_called_once_with('s3secret/access', expected_cache, diff -Nru swift-2.30.0/debian/patches/drive-full-checker.patch swift-2.30.1/debian/patches/drive-full-checker.patch --- swift-2.30.0/debian/patches/drive-full-checker.patch 1970-01-01 00:00:00.000000000 +0000 +++ swift-2.30.1/debian/patches/drive-full-checker.patch 2025-11-11 08:06:52.000000000 +0000 @@ -0,0 +1,494 @@ +Description: drive-full-checker + The admin documentation provides a documentation on how to "prevent[ing] + disk full scenarios" over here: + https://docs.openstack.org/swift/latest/admin_guide.html#preventing-disk-full-scenarios + . + Even if the doc provides an actual example, this example is written in + Python 2, and its implementation is incomplete. + . + This patch intend to fill the gap, and allow administrator to use an + official implementation of a new "swift-drive-full-checker" tool from + /usr/bin directly. Once done, we intend to also patch puppet-swift to + use this new tool. +Author: Thomas Goirand +Forwarded: https://review.opendev.org/c/openstack/swift/+/907523 +Last-Update: 2024-02-11 + +Index: swift/releasenotes/notes/disk-full-checker-d4850f2fb479bb36.yaml +=================================================================== +--- /dev/null ++++ swift/releasenotes/notes/disk-full-checker-d4850f2fb479bb36.yaml +@@ -0,0 +1,14 @@ ++--- ++features: ++ - | ++ A new /usr/bin/swift-drive-full-checker utility is now provided by Swift. ++ This tool watches for partitions in /srv/node (or wherever you configured ++ it) for drive full scenarios. If a drive has less than the configured ++ amount of space available, swift-drive-full-checker will amend the matching ++ entry in rsyncd.conf and set it with `max connections = -1`, so that rsync ++ will gracefully refuse incoming connections, and the sending replicator ++ process will re-attempt duplicating data when space becomes available. ++ Typically, swift-drive-full-checker will be called from a cron job every ++ 5 minutes (at least), with the parameters defining the amount of data ++ reserved for each type of data, and the number of connection allowed if ++ the data partition is not full. +Index: swift/setup.cfg +=================================================================== +--- swift.orig/setup.cfg ++++ swift/setup.cfg +@@ -84,6 +84,7 @@ keystone = + console_scripts = + swift-manage-shard-ranges = swift.cli.manage_shard_ranges:main + swift-container-deleter = swift.cli.container_deleter:main ++ swift-drive-full-checker = swift.cli.drive_full_checker:main + + paste.app_factory = + proxy = swift.proxy.server:app_factory +Index: swift/swift/cli/drive_full_checker.py +=================================================================== +--- /dev/null ++++ swift/swift/cli/drive_full_checker.py +@@ -0,0 +1,207 @@ ++# Copyright (c) 2024, Thomas Goirand ++# Copyright (c) 2024, Philippe Serafin ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); ++# you may not use this file except in compliance with the License. ++# You may obtain a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, ++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or ++# implied. ++# See the License for the specific language governing permissions and ++# limitations under the License. ++# ++# This script check a mounted device is full, and disable ++# its matching rsync module if that is the case. The disk ++# full limit is the first argument. ++ ++import argparse ++import configparser ++import io ++import os ++import shutil ++import sys ++ ++from six.moves.configparser import ConfigParser ++ ++from swift.common.utils import config_true_value, ismount, get_logger ++ ++GiB = 1024 * 1024 * 1024 ++ ++ ++# Params for this function: ++# logger: ref to the logger ++# cp: config parser object containing the rsyncd.conf representation ++# srvnode_dir: name of the drive we're inspecting (for example: sdb) ++# free: bytes available in the current srvnode_dir that we're inspecting ++# sec_name: name of the rsyncd.conf section we may need to patch ++# rs: bytes reserved space in srvnode_dir ++# mc: "normal" max connections (ie: when partition isn't full) ++# for the given dir entry ++def _patch_rsyncdconf_entry(logger, cp, srvnode_dir, free, sec_name, rs, mc): ++ # Calculate section name (ie: replace '{}' by drive name if present) ++ if '{}' in sec_name: ++ search_str = sec_name.format(srvnode_dir).strip('"') ++ else: ++ search_str = sec_name.strip('"') ++ ++ # If referenced in the rsyncd.conf ++ # In old setup (Python 2), calling config_parser['something'] ++ # raises an exception if the something section is not present ++ # in the config file. Which is why we must do try/except. ++ # I believe this try/except can be removed on more recent ++ # Python3 based setups. ++ try: ++ if cp[search_str]: ++ # If partition is full (ie: current_free_space < reserved_space), ++ # set 'max connections' to -1 to disable rsync ++ if free < rs: ++ cm = -1 ++ else: ++ cm = mc ++ ++ if int(cp[search_str]['max connections']) != cm: ++ if cm == -1: ++ logger.info('Disabling ' + search_str) ++ else: ++ logger.info('Enabling ' + search_str) ++ cp[search_str]['max connections'] = str(cm) ++ except KeyError: ++ pass ++ ++ return cp ++ ++ ++def configure_rsyncd_conf(account_rs, account_mc, account_secname, ++ container_rs, container_mc, container_secname, ++ object_rs, object_mc, object_secname, ++ storage_p, rsyncd_p, logger, sf=None): ++ # Load the rsyncd.conf file, adding ++ # a fake global section. ++ fake_section = '[fake_section_to_please_configobj]\n' ++ source_file = sf if sf else rsyncd_p ++ try: ++ with open(source_file, 'r') as f: ++ file_content = fake_section + f.read() ++ except Exception as err: ++ print("Unexpected error reading {}: {}".format(source_file, err)) ++ return 1 ++ ++ cp = configparser.RawConfigParser() ++ if sys.version_info[0] == 2: ++ cp.read_string(file_content.decode('unicode-escape')) ++ else: ++ cp.read_string(file_content) ++ ++ # For all dirs in /srv/node ++ for srvnode_dir in os.listdir(storage_p): ++ dirpath = os.path.join(storage_p, srvnode_dir) ++ # If the dir is mounted ++ if ismount(dirpath): ++ # Get free space of the partition ++ # shutil.disk_usage can be mocked in tests. ++ if sys.version_info[0] == 2: ++ space = os.statvfs(dirpath) ++ free = (space.f_bsize * space.f_bavail) ++ else: ++ free = shutil.disk_usage(dirpath).free ++ ++ # Patch all 3 types of rsync module (a+c+o) ++ cp = _patch_rsyncdconf_entry(logger, cp, srvnode_dir, free, ++ account_secname, account_rs, ++ account_mc) ++ ++ cp = _patch_rsyncdconf_entry(logger, cp, srvnode_dir, free, ++ container_secname, container_rs, ++ container_mc) ++ ++ cp = _patch_rsyncdconf_entry(logger, cp, srvnode_dir, free, ++ object_secname, object_rs, ++ object_mc) ++ ++ # Prepare our rsyncd.conf file before writing ++ iow = io.StringIO() ++ cp.write(iow) ++ file_out = iow.getvalue().replace(fake_section, '') ++ ++ try: ++ with open(rsyncd_p, 'w') as f: ++ f.write(file_out) ++ except Exception as err: ++ logger.error("Unexpected error {}".format(err)) ++ return 1 ++ ++ ++def main(): ++ # Cli OPT parsing ++ parser = argparse.ArgumentParser(prog='swift-drive-full-checker', ++ description='Check if the drives of a ' ++ 'swift node are full, and ' ++ 'switches /etc/rsyncd.conf ' ++ '"max connections" ' ++ 'accordingly.', ++ epilog='(c) 2024, Thomas Goirand, ' ++ 'Philippe Serafin & Infomaniak ' ++ 'Networks.') ++ parser.add_argument('-c', '--config-file', ++ default='/etc/swift/drive-full-checker.conf', ++ help='Path to the drive-full-checker.conf. Default to ' ++ '/etc/swift/drive-full-checker.conf') ++ parser.add_argument('-s', '--source-file', ++ default='/etc/rsyncd.conf', ++ help='Path to the source file. Default to ' ++ '/etc/rsyncd.conf') ++ args = parser.parse_args() ++ ++ # disk-full-checker config file parsing ++ c = ConfigParser() ++ if not c.read(args.config_file): ++ print("Unable to read config file %s" % args.conf_path) ++ sys.exit(1) ++ ++ CONF = dict(c.items('drive-full-checker')) ++ device_dir = CONF.get('device_dir', '/srv/node') ++ rsyncd_conf_path = CONF.get('rsyncd_conf_path', '/etc/rsyncd.conf') ++ ++ account_max_connections = int(CONF.get('account_max_connections', 8)) ++ account_reserved_space = int(CONF.get('account_reserved_space', 100)) * GiB ++ account_rsyncd_section_name = CONF.get('account_rsyncd_section_name', ++ ' account_{} ') ++ ++ container_max_connections = int(CONF.get('container_max_connections', 8)) ++ container_reserved_space = (int(CONF.get('container_reserved_space', 100)) ++ * GiB) ++ container_rsyncd_section_name = CONF.get('container_rsyncd_section_name', ++ ' container_{} ') ++ ++ object_max_connections = int(CONF.get('object_max_connections', 8)) ++ object_reserved_space = int(CONF.get('object_reserved_space', 100)) * GiB ++ object_rsyncd_section_name = CONF.get('object_rsyncd_section_name', ++ ' object_{} ') ++ ++ # logging facility setup ++ log_to_console = config_true_value(CONF.get('log_to_console', False)) ++ CONF['log_name'] = CONF.get('log_name', 'drive-full-checker') ++ logger = get_logger(CONF, log_to_console=log_to_console, ++ log_route='drive-full-checker') ++ ++ return configure_rsyncd_conf(account_reserved_space, ++ account_max_connections, ++ account_rsyncd_section_name, ++ container_reserved_space, ++ container_max_connections, ++ container_rsyncd_section_name, ++ object_reserved_space, ++ object_max_connections, ++ object_rsyncd_section_name, ++ device_dir, ++ rsyncd_conf_path, ++ logger, ++ args.source_file) ++ ++ ++if __name__ == "__main__": ++ sys.exit(main()) +Index: swift/test/unit/cli/test_drive_full_checker.py +=================================================================== +--- /dev/null ++++ swift/test/unit/cli/test_drive_full_checker.py +@@ -0,0 +1,174 @@ ++# Copyright (c) 2024, Thomas Goirand ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); you may not ++# use this file except in compliance with the License. You may obtain a copy ++# of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT ++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the ++# License for the specific language governing permissions and limitations ++# under the License. ++ ++import collections ++import filecmp ++import mock ++import os ++import tempfile ++import unittest ++import shutil ++import sys ++ ++from test.debug_logger import debug_logger ++from swift.cli import drive_full_checker ++ ++GiB = 1024 * 1024 * 1024 ++ ++if sys.version_info[0] == 2: ++ # os.statvfs is removed from Python >= 3 ++ freespace_func = 'os.statvfs' ++else: ++ # shutil.disk_usage doesn't exist in Python <= 2 ++ freespace_func = 'shutil.disk_usage' ++ ++ ++class TestContainerDeleter(unittest.TestCase): ++ def setUp(self): ++ self.logger = debug_logger() ++ ++ def _write_rsyncd_conf(self, path, max_conn): ++ rsyncdconf = """pid file = /var/run/rsyncd.pid ++uid = nobody ++gid = nobody ++use chroot = no ++log format = %t %a %m %f %b ++syslog facility = local3 ++timeout = 300 ++address = 192.168.100.2 ++ ++[ account_sdb ] ++path = /srv/node ++read only = false ++write only = no ++list = yes ++uid = swift ++gid = swift ++incoming chmod = Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r ++outgoing chmod = Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r ++max connections = {max_conn} ++timeout = 0 ++lock file = /var/lock/account_sdb.lock ++ ++[ container_sdb ] ++path = /srv/node ++read only = false ++write only = no ++list = yes ++uid = swift ++gid = swift ++incoming chmod = Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r ++outgoing chmod = Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r ++max connections = {max_conn} ++timeout = 0 ++lock file = /var/lock/container_sdb.lock ++ ++[ object_sdb ] ++path = /srv/node ++read only = false ++write only = no ++list = yes ++uid = swift ++gid = swift ++incoming chmod = Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r ++outgoing chmod = Du=rwx,g=rx,o=rx,Fu=rw,g=r,o=r ++max connections = {max_conn} ++timeout = 0 ++lock file = /var/lock/object_sdb.lock ++ ++""" ++ f = os.open(path, os.O_RDWR | os.O_CREAT) ++ if sys.version_info[0] == 2: ++ os.write(f, rsyncdconf.format(max_conn=max_conn)) ++ else: ++ os.write(f, bytes(rsyncdconf.format(max_conn=max_conn), 'utf-8')) ++ os.close(f) ++ ++ @mock.patch.object(drive_full_checker, 'ismount', return_value=True) ++ @mock.patch(freespace_func) ++ def test_drive_full(self, mock_freespace_func, os_path_ismount): ++ # Create a temp folder to run our tests ++ tmpdirname = tempfile.mkdtemp() ++ ++ storagepath = tmpdirname + '/srvnode' ++ os.mkdir(storagepath) ++ os.mkdir(storagepath + '/sdb') ++ ++ rsyncdpath = tmpdirname + "/rsyncd.conf" ++ ++ # Write a first rsyncd.conf with 8 connections for a,c,o ++ self._write_rsyncd_conf(rsyncdpath, 8) ++ ++ if sys.version_info[0] == 2: ++ retval = collections.namedtuple('statvfs_result', ++ 'f_bsize f_bavail') ++ mock_freespace_func.return_value = retval(10, 10) ++ else: ++ retval = collections.namedtuple('usage', 'total used free') ++ # This says: 10 bytes remaining ++ mock_freespace_func.return_value = retval(10, 10, 10) ++ ++ drive_full_checker.configure_rsyncd_conf(10 * GiB, 8, ' account_{} ', ++ 10 * GiB, 8, ' container_{} ', ++ 10 * GiB, 8, ' object_{} ', ++ storagepath, rsyncdpath, ++ self.logger) ++ ++ should_be_rsyncdconf = tmpdirname + "/rsyncd_should_be.conf" ++ self._write_rsyncd_conf(should_be_rsyncdconf, -1) ++ ++ # Assert that rsyncd.conf and rsyncd_should_be.conf are the same ++ self.assertTrue(filecmp.cmp(rsyncdpath, should_be_rsyncdconf)) ++ ++ shutil.rmtree(tmpdirname) ++ ++ @mock.patch.object(drive_full_checker, 'ismount', return_value=True) ++ @mock.patch(freespace_func) ++ def test_drive_with_space(self, mock_freespace_func, os_path_ismount): ++ # Create a temp folder to run our tests ++ tmpdirname = tempfile.mkdtemp() ++ ++ storagepath = tmpdirname + '/srvnode' ++ os.mkdir(storagepath) ++ os.mkdir(storagepath + '/sdb') ++ ++ rsyncdpath = tmpdirname + "/rsyncd.conf" ++ ++ # Write a first rsyncd.conf with 8 connections for a,c,o ++ self._write_rsyncd_conf(rsyncdpath, -1) ++ ++ if sys.version_info[0] == 2: ++ retval = collections.namedtuple('statvfs_result', ++ 'f_bsize f_bavail') ++ mock_freespace_func.return_value = retval(10 * GiB, 10 * GiB) ++ else: ++ retval = collections.namedtuple('usage', 'total used free') ++ # This says: 10 bytes remaining ++ mock_freespace_func.return_value = retval(10 * GiB, ++ 10 * GiB, ++ 10 * GiB) ++ ++ drive_full_checker.configure_rsyncd_conf(10, 8, ' account_{} ', ++ 10, 8, ' container_{} ', ++ 10, 8, ' object_{} ', ++ storagepath, rsyncdpath, ++ self.logger) ++ ++ should_be_rsyncdconf = tmpdirname + "/rsyncd_should_be.conf" ++ self._write_rsyncd_conf(should_be_rsyncdconf, 8) ++ ++ # Assert that rsyncd.conf and rsyncd_should_be.conf are the same ++ self.assertTrue(filecmp.cmp(rsyncdpath, should_be_rsyncdconf)) ++ ++ shutil.rmtree(tmpdirname) +Index: swift/etc/drive-full-checker.conf-sample +=================================================================== +--- /dev/null ++++ swift/etc/drive-full-checker.conf-sample +@@ -0,0 +1,51 @@ ++[drive-full-checker] ++# Mount point of your storage. (string value) ++# device_dir = /srv/node ++# ++# Path to the rsyncd.conf file to manage. (string value) ++# rsyncd_conf_path = /etc/rsyncd.conf ++# ++# You can specify default log routing here if you want: ++# log_name = drive-audit ++# log_facility = LOG_LOCAL0 ++# log_level = INFO ++# log_address = /dev/log ++# The following caps the length of log lines to the value given; no limit if ++# set to 0, the default. ++# log_max_line_length = 0 ++# ++# By default, drive-full-checker logs only to syslog. Setting this option True ++# makes drive-audit log to console in addition to syslog. ++# log_to_console = False ++# ++ ++# Max connections to the Account rsync backend. (integer value) ++#account_max_connections = 8 ++ ++# Account server reserved space in GiB. (integer value) ++#account_reserved_space = 100 ++ ++# Account section name in the rsyncd.conf file. The "{}" sign will be replaced by the drive name. If not using per drive sections, simply ++# write "account". (string value) ++#account_rsyncd_section_name = " account_{} " ++ ++# Max connections to the Container rsync backend. (integer value) ++#container_max_connections = 8 ++ ++# Container server reserved space in GiB. (integer value) ++#container_reserved_space = 100 ++ ++# Container section name in the rsyncd.conf file. The "{}" sign will be replaced by the drive name. If not using per drive sections, simply ++# write "container". (string value) ++#container_rsyncd_section_name = " container_{} " ++ ++# Max connections to the Object rsync backend. (integer value) ++#object_max_connections = 8 ++ ++# Object server reserved space in GiB. (integer value) ++#object_reserved_space = 100 ++ ++# Object section name in the rsyncd.conf file. The "{}" sign will be replaced by the drive name. If not using per drive sections, simply ++# write "object". (string value) ++#object_rsyncd_section_name = " object_{} " ++ diff -Nru swift-2.30.0/debian/patches/kms_keymaster-allow-specifying-barbican_endpoint.patch swift-2.30.1/debian/patches/kms_keymaster-allow-specifying-barbican_endpoint.patch --- swift-2.30.0/debian/patches/kms_keymaster-allow-specifying-barbican_endpoint.patch 1970-01-01 00:00:00.000000000 +0000 +++ swift-2.30.1/debian/patches/kms_keymaster-allow-specifying-barbican_endpoint.patch 2025-11-11 08:06:52.000000000 +0000 @@ -0,0 +1,51 @@ +Description: kms_keymaster: allow specifying barbican_endpoint + Under a multi-region deployment with a single Keystone server, + specifying the Keystone auth credentials isn't enough. Indeed, + Castellan succeeds when logging-in, but may use the wrong + Barbican endpoint (if there are 2 Barbican deployed). This is + what happened to us, when deploying our 2nd region. + . + They way to fix it would be to tell Castellan what region to use, + unfortunately, there's no such option in Castellan. Though we may + specify the barbican_endpoint, which is what this patch allows. +Author: Thomas Goirand +Date: Thu, 13 Jun 2024 11:27:55 +0200 +Change-Id: Ib7f4219ef5fdef65e9cfd5701e28b5288741783e +Forwarded: https://review.opendev.org/c/openstack/swift/+/921927 +Last-Update: 2024-06-13 + +diff --git a/swift/common/middleware/crypto/kms_keymaster.py b/swift/common/middleware/crypto/kms_keymaster.py +index f9a542e..4c0b250 100644 +--- a/swift/common/middleware/crypto/kms_keymaster.py ++++ b/swift/common/middleware/crypto/kms_keymaster.py +@@ -34,7 +34,7 @@ + 'domain_id', 'domain_name', 'project_id', + 'project_domain_id', 'reauthenticate', + 'auth_endpoint', 'api_class', 'key_id*', +- 'active_root_secret_id') ++ 'barbican_endpoint', 'active_root_secret_id') + keymaster_conf_section = 'kms_keymaster' + + def _get_root_secret(self, conf): +@@ -65,10 +65,17 @@ + project_domain_id=conf.get('project_domain_id'), + reauthenticate=conf.get('reauthenticate')) + oslo_conf = cfg.ConfigOpts() +- options.set_defaults( +- oslo_conf, auth_endpoint=conf.get('auth_endpoint'), +- api_class=conf.get('api_class') +- ) ++ if conf.get('barbican_endpoint'): ++ options.set_defaults( ++ oslo_conf, auth_endpoint=conf.get('auth_endpoint'), ++ barbican_endpoint=conf.get('barbican_endpoint'), ++ api_class=conf.get('api_class') ++ ) ++ else: ++ options.set_defaults( ++ oslo_conf, auth_endpoint=conf.get('auth_endpoint'), ++ api_class=conf.get('api_class') ++ ) + options.enable_logging() + manager = key_manager.API(oslo_conf) + diff -Nru swift-2.30.0/debian/patches/series swift-2.30.1/debian/patches/series --- swift-2.30.0/debian/patches/series 2023-01-19 14:43:31.000000000 +0000 +++ swift-2.30.1/debian/patches/series 2025-11-11 08:06:52.000000000 +0000 @@ -3,4 +3,8 @@ set-default-workers-value.patch Add_tempurl_path_prefix_configuration_option.patch Fix_DB_tests_on_py311.patch -CVE-2022-47950-stable-zed.patch +swift-recon-only-query-object-servers-once.patch +Change_getting_major_minor_of_blkdev.patch +drive-full-checker.patch +kms_keymaster-allow-specifying-barbican_endpoint.patch +bug-2119646-swift.patch diff -Nru swift-2.30.0/debian/patches/swift-recon-only-query-object-servers-once.patch swift-2.30.1/debian/patches/swift-recon-only-query-object-servers-once.patch --- swift-2.30.0/debian/patches/swift-recon-only-query-object-servers-once.patch 1970-01-01 00:00:00.000000000 +0000 +++ swift-2.30.1/debian/patches/swift-recon-only-query-object-servers-once.patch 2025-11-11 08:06:52.000000000 +0000 @@ -0,0 +1,21 @@ +Description: swift-recon: only query object servers once +Author: Thomas Goirand +Forwarded: no +Last-Update: 2023-04-28 + +--- swift-2.31.1.orig/swift/cli/recon.py ++++ swift-2.31.1/swift/cli/recon.py +@@ -908,6 +908,13 @@ class SwiftRecon(object): + recon.scout, filtered_hosts): + if status == 200: + hostusage = [] ++ # Hack: Ignore all hosts where the port is the object one ++ # but the 6200. This is to ensure hosts are added only ++ # once in case we use the "one server per port" option ++ # of swift-object. ++ port=int(url.split(':')[2].split('/')[0]) ++ if port > 6200 and port < 6400: ++ continue + for entry in response: + if not isinstance(entry['mounted'], bool): + print("-> %s/%s: Error: %s" % (url, entry['device'], diff -Nru swift-2.30.0/debian/rules swift-2.30.1/debian/rules --- swift-2.30.0/debian/rules 2023-01-19 14:43:31.000000000 +0000 +++ swift-2.30.1/debian/rules 2025-11-11 08:06:52.000000000 +0000 @@ -11,7 +11,12 @@ --exclude-test=test.unit.common.test_db.TestDatabaseBroker.test_get \ --exclude-test=test.unit.container.test_sync.TestContainerSync.test_init \ --exclude-test=test.unit.common.test_utils.TestPunchHoleReally.test_punch_a_hole \ - --exclude-test=test.unit.common.test_utils.Test_LibcWrapper.test_argument_plumbing + --exclude-test=test.unit.common.test_utils.Test_LibcWrapper.test_argument_plumbing \ + --exclude-test=test.unit.common.test_memcached.TestMemcached.test_get_conns_hostname6 \ + --exclude-test=test.unit.common.test_memcached.TestMemcached.test_get_conns_v6 \ + --exclude-test=test.unit.common.test_memcached.TestMemcached.test_get_conns_v6_default \ + --exclude-test=test.unit.obj.test_diskfile.TestECDiskFileManager.test_cleanup_ondisk_files_commit_window \ + --exclude-test=test.unit.obj.test_replicator.TestObjectReplicator.test_delete_partition_ssync_with_cleanup_failure # --processes=-1 %: @@ -60,6 +65,8 @@ done override_dh_auto_install: + pkgos-dh_auto_install --no-py2 --in-tmp + dh_auto_install rm debian/tmp/usr/bin/swift-init @@ -98,3 +105,6 @@ override_dh_missing: dh_missing --fail-missing + +override_dh_python3: + dh_python3 --shebang=/usr/bin/python3 diff -Nru swift-2.30.0/debian/swift.install swift-2.30.1/debian/swift.install --- swift-2.30.0/debian/swift.install 2023-01-19 14:43:31.000000000 +0000 +++ swift-2.30.1/debian/swift.install 2025-11-11 08:06:52.000000000 +0000 @@ -4,12 +4,14 @@ etc/container-sync-realms.conf-sample => /usr/share/doc/swift/examples/container-sync-realms.conf etc/dispersion.conf-sample => /etc/swift/dispersion.conf etc/dispersion.conf-sample => /usr/share/doc/swift/examples/dispersion.conf +etc/drive-full-checker.conf-sample => /etc/swift/drive-full-checker.conf etc/swift.conf-sample => /etc/swift/swift.conf etc/swift.conf-sample => /usr/share/doc/swift/examples/swift.conf usr/bin/swift-config usr/bin/swift-container-deleter usr/bin/swift-dispersion-populate usr/bin/swift-dispersion-report +usr/bin/swift-drive-full-checker usr/bin/swift-form-signature usr/bin/swift-get-nodes usr/bin/swift-oldies diff -Nru swift-2.30.0/debian/tests/unittests swift-2.30.1/debian/tests/unittests --- swift-2.30.0/debian/tests/unittests 2023-01-19 14:43:31.000000000 +0000 +++ swift-2.30.1/debian/tests/unittests 2025-11-11 08:06:52.000000000 +0000 @@ -14,5 +14,10 @@ --exclude-test=test.unit.common.test_db.TestDatabaseBroker.test_get \ --exclude-test=test.unit.container.test_sync.TestContainerSync.test_init \ --exclude-test=test.unit.common.test_utils.TestPunchHoleReally.test_punch_a_hole \ - --exclude-test=test.unit.common.test_utils.Test_LibcWrapper.test_argument_plumbing + --exclude-test=test.unit.common.test_utils.Test_LibcWrapper.test_argument_plumbing \ + --exclude-test=test.unit.common.test_memcached.TestMemcached.test_get_conns_hostname6 \ + --exclude-test=test.unit.common.test_memcached.TestMemcached.test_get_conns_v6 \ + --exclude-test=test.unit.common.test_memcached.TestMemcached.test_get_conns_v6_default \ + --exclude-test=test.unit.obj.test_diskfile.TestECDiskFileManager.test_cleanup_ondisk_files_commit_window \ + --exclude-test=test.unit.obj.test_replicator.TestObjectReplicator.test_delete_partition_ssync_with_cleanup_failure done diff -Nru swift-2.30.0/py2-constraints.txt swift-2.30.1/py2-constraints.txt --- swift-2.30.0/py2-constraints.txt 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/py2-constraints.txt 2023-01-30 23:23:08.000000000 +0000 @@ -78,3 +78,4 @@ setuptools===44.1.1 pycadf===2.10.0 smmap===3.0.5 +PasteDeploy==2.1.1 diff -Nru swift-2.30.0/releasenotes/notes/2_30_1_release-856dd70ec466aa74.yaml swift-2.30.1/releasenotes/notes/2_30_1_release-856dd70ec466aa74.yaml --- swift-2.30.0/releasenotes/notes/2_30_1_release-856dd70ec466aa74.yaml 1970-01-01 00:00:00.000000000 +0000 +++ swift-2.30.1/releasenotes/notes/2_30_1_release-856dd70ec466aa74.yaml 2023-01-30 23:23:08.000000000 +0000 @@ -0,0 +1,13 @@ +--- +security: + - | + Fixed a security issue in how ``s3api`` handles XML parsing that allowed + authenticated S3 clients to read arbitrary files from proxy servers. + Refer to `CVE-2022-47950 `__ + for more information. + +fixes: + - | + Fixed a path-rewriting bug introduced in Python 3.7.14, 3.8.14, 3.9.14, + and 3.10.6 that could cause some ``domain_remap`` requests to be routed to + the wrong object. diff -Nru swift-2.30.0/swift/__init__.py swift-2.30.1/swift/__init__.py --- swift-2.30.0/swift/__init__.py 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/swift/__init__.py 2023-01-30 23:23:08.000000000 +0000 @@ -79,13 +79,7 @@ warnings.filterwarnings('ignore', module='cryptography|OpenSSL', message=( 'Python 2 is no longer supported by the Python core team. ' - 'Support for it is now deprecated in cryptography, ' - 'and will be removed in a future release.')) -warnings.filterwarnings('ignore', module='cryptography|OpenSSL', message=( - 'Python 2 is no longer supported by the Python core team. ' - 'Support for it is now deprecated in cryptography, ' - 'and will be removed in the next release.')) + 'Support for it is now deprecated in cryptography')) warnings.filterwarnings('ignore', message=( 'Python 3.6 is no longer supported by the Python core team. ' - 'Therefore, support for it is deprecated in cryptography ' - 'and will be removed in a future release.')) + 'Therefore, support for it is deprecated in cryptography')) diff -Nru swift-2.30.0/swift/common/http_protocol.py swift-2.30.1/swift/common/http_protocol.py --- swift-2.30.0/swift/common/http_protocol.py 1970-01-01 00:00:00.000000000 +0000 +++ swift-2.30.1/swift/common/http_protocol.py 2023-01-30 23:23:08.000000000 +0000 @@ -0,0 +1,320 @@ +# Copyright (c) 2010-2022 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from eventlet import wsgi, websocket +import six + + +if six.PY2: + from eventlet.green import httplib as http_client +else: + from eventlet.green.http import client as http_client + + +class SwiftHttpProtocol(wsgi.HttpProtocol): + default_request_version = "HTTP/1.0" + + def __init__(self, *args, **kwargs): + # See https://github.com/eventlet/eventlet/pull/590 + self.pre_shutdown_bugfix_eventlet = not getattr( + websocket.WebSocketWSGI, '_WSGI_APP_ALWAYS_IDLE', None) + # Note this is not a new-style class, so super() won't work + wsgi.HttpProtocol.__init__(self, *args, **kwargs) + + def log_request(self, *a): + """ + Turn off logging requests by the underlying WSGI software. + """ + pass + + def log_message(self, f, *a): + """ + Redirect logging other messages by the underlying WSGI software. + """ + logger = getattr(self.server.app, 'logger', None) + if logger: + logger.error('ERROR WSGI: ' + f, *a) + else: + # eventlet<=0.17.4 doesn't have an error method, and in newer + # versions the output from error is same as info anyway + self.server.log.info('ERROR WSGI: ' + f, *a) + + class MessageClass(wsgi.HttpProtocol.MessageClass): + '''Subclass to see when the client didn't provide a Content-Type''' + # for py2: + def parsetype(self): + if self.typeheader is None: + self.typeheader = '' + wsgi.HttpProtocol.MessageClass.parsetype(self) + + # for py3: + def get_default_type(self): + '''If the client didn't provide a content type, leave it blank.''' + return '' + + def parse_request(self): + """Parse a request (inlined from cpython@7e293984). + + The request should be stored in self.raw_requestline; the results + are in self.command, self.path, self.request_version and + self.headers. + + Return True for success, False for failure; on failure, any relevant + error response has already been sent back. + + """ + self.command = None # set in case of error on the first line + self.request_version = version = self.default_request_version + self.close_connection = True + requestline = self.raw_requestline + if not six.PY2: + requestline = requestline.decode('iso-8859-1') + requestline = requestline.rstrip('\r\n') + self.requestline = requestline + # Split off \x20 explicitly (see https://bugs.python.org/issue33973) + words = requestline.split(' ') + if len(words) == 0: + return False + + if len(words) >= 3: # Enough to determine protocol version + version = words[-1] + try: + if not version.startswith('HTTP/'): + raise ValueError + base_version_number = version.split('/', 1)[1] + version_number = base_version_number.split(".") + # RFC 2145 section 3.1 says there can be only one "." and + # - major and minor numbers MUST be treated as + # separate integers; + # - HTTP/2.4 is a lower version than HTTP/2.13, which in + # turn is lower than HTTP/12.3; + # - Leading zeros MUST be ignored by recipients. + if len(version_number) != 2: + raise ValueError + version_number = int(version_number[0]), int(version_number[1]) + except (ValueError, IndexError): + self.send_error( + 400, + "Bad request version (%r)" % version) + return False + if version_number >= (1, 1) and \ + self.protocol_version >= "HTTP/1.1": + self.close_connection = False + if version_number >= (2, 0): + self.send_error( + 505, + "Invalid HTTP version (%s)" % base_version_number) + return False + self.request_version = version + + if not 2 <= len(words) <= 3: + self.send_error( + 400, + "Bad request syntax (%r)" % requestline) + return False + command, path = words[:2] + if len(words) == 2: + self.close_connection = True + if command != 'GET': + self.send_error( + 400, + "Bad HTTP/0.9 request type (%r)" % command) + return False + self.command, self.path = command, path + + # Examine the headers and look for a Connection directive. + if six.PY2: + self.headers = self.MessageClass(self.rfile, 0) + else: + try: + self.headers = http_client.parse_headers( + self.rfile, + _class=self.MessageClass) + except http_client.LineTooLong as err: + self.send_error( + 431, + "Line too long", + str(err)) + return False + except http_client.HTTPException as err: + self.send_error( + 431, + "Too many headers", + str(err) + ) + return False + + conntype = self.headers.get('Connection', "") + if conntype.lower() == 'close': + self.close_connection = True + elif (conntype.lower() == 'keep-alive' and + self.protocol_version >= "HTTP/1.1"): + self.close_connection = False + # Examine the headers and look for an Expect directive + expect = self.headers.get('Expect', "") + if (expect.lower() == "100-continue" and + self.protocol_version >= "HTTP/1.1" and + self.request_version >= "HTTP/1.1"): + if not self.handle_expect_100(): + return False + return True + + if not six.PY2: + def get_environ(self, *args, **kwargs): + environ = wsgi.HttpProtocol.get_environ(self, *args, **kwargs) + header_payload = self.headers.get_payload() + if isinstance(header_payload, list) and len(header_payload) == 1: + header_payload = header_payload[0].get_payload() + if header_payload: + # This shouldn't be here. We must've bumped up against + # https://bugs.python.org/issue37093 + headers_raw = list(environ['headers_raw']) + for line in header_payload.rstrip('\r\n').split('\n'): + if ':' not in line or line[:1] in ' \t': + # Well, we're no more broken than we were before... + # Should we support line folding? + # Should we 400 a bad header line? + break + header, value = line.split(':', 1) + value = value.strip(' \t\n\r') + # NB: Eventlet looks at the headers obj to figure out + # whether the client said the connection should close; + # see https://github.com/eventlet/eventlet/blob/v0.25.0/ + # eventlet/wsgi.py#L504 + self.headers.add_header(header, value) + headers_raw.append((header, value)) + wsgi_key = 'HTTP_' + header.replace('-', '_').encode( + 'latin1').upper().decode('latin1') + if wsgi_key in ('HTTP_CONTENT_LENGTH', + 'HTTP_CONTENT_TYPE'): + wsgi_key = wsgi_key[5:] + environ[wsgi_key] = value + environ['headers_raw'] = tuple(headers_raw) + # Since we parsed some more headers, check to see if they + # change how our wsgi.input should behave + te = environ.get('HTTP_TRANSFER_ENCODING', '').lower() + if te.rsplit(',', 1)[-1].strip() == 'chunked': + environ['wsgi.input'].chunked_input = True + else: + length = environ.get('CONTENT_LENGTH') + if length: + length = int(length) + environ['wsgi.input'].content_length = length + if environ.get('HTTP_EXPECT', '').lower() == '100-continue': + environ['wsgi.input'].wfile = self.wfile + environ['wsgi.input'].wfile_line = \ + b'HTTP/1.1 100 Continue\r\n' + return environ + + def _read_request_line(self): + # Note this is not a new-style class, so super() won't work + got = wsgi.HttpProtocol._read_request_line(self) + # See https://github.com/eventlet/eventlet/pull/590 + if self.pre_shutdown_bugfix_eventlet: + self.conn_state[2] = wsgi.STATE_REQUEST + return got + + def handle_one_request(self): + # Note this is not a new-style class, so super() won't work + got = wsgi.HttpProtocol.handle_one_request(self) + # See https://github.com/eventlet/eventlet/pull/590 + if self.pre_shutdown_bugfix_eventlet: + if self.conn_state[2] != wsgi.STATE_CLOSE: + self.conn_state[2] = wsgi.STATE_IDLE + return got + + +class SwiftHttpProxiedProtocol(SwiftHttpProtocol): + """ + Protocol object that speaks HTTP, including multiple requests, but with + a single PROXY line as the very first thing coming in over the socket. + This is so we can learn what the client's IP address is when Swift is + behind a TLS terminator, like hitch, that does not understand HTTP and + so cannot add X-Forwarded-For or other similar headers. + + See http://www.haproxy.org/download/1.7/doc/proxy-protocol.txt for + protocol details. + """ + def __init__(self, *a, **kw): + self.proxy_address = None + SwiftHttpProtocol.__init__(self, *a, **kw) + + def handle_error(self, connection_line): + if not six.PY2: + connection_line = connection_line.decode('latin-1') + + # No further processing will proceed on this connection under any + # circumstances. We always send the request into the superclass to + # handle any cleanup - this ensures that the request will not be + # processed. + self.rfile.close() + # We don't really have any confidence that an HTTP Error will be + # processable by the client as our transmission broken down between + # ourselves and our gateway proxy before processing the client + # protocol request. Hopefully the operator will know what to do! + msg = 'Invalid PROXY line %r' % connection_line + self.log_message(msg) + # Even assuming HTTP we don't even known what version of HTTP the + # client is sending? This entire endeavor seems questionable. + self.request_version = self.default_request_version + # appease http.server + self.command = 'PROXY' + self.send_error(400, msg) + + def handle(self): + """Handle multiple requests if necessary.""" + # ensure the opening line for the connection is a valid PROXY protcol + # line; this is the only IO we do on this connection before any + # additional wrapping further pollutes the raw socket. + connection_line = self.rfile.readline(self.server.url_length_limit) + + if not connection_line.startswith(b'PROXY '): + return self.handle_error(connection_line) + + proxy_parts = connection_line.strip(b'\r\n').split(b' ') + if proxy_parts[1].startswith(b'UNKNOWN'): + # "UNKNOWN", in PROXY protocol version 1, means "not + # TCP4 or TCP6". This includes completely legitimate + # things like QUIC or Unix domain sockets. The PROXY + # protocol (section 2.1) states that the receiver + # (that's us) MUST ignore anything after "UNKNOWN" and + # before the CRLF, essentially discarding the first + # line. + pass + elif proxy_parts[1] in (b'TCP4', b'TCP6') and len(proxy_parts) == 6: + if six.PY2: + self.client_address = (proxy_parts[2], proxy_parts[4]) + self.proxy_address = (proxy_parts[3], proxy_parts[5]) + else: + self.client_address = ( + proxy_parts[2].decode('latin-1'), + proxy_parts[4].decode('latin-1')) + self.proxy_address = ( + proxy_parts[3].decode('latin-1'), + proxy_parts[5].decode('latin-1')) + else: + self.handle_error(connection_line) + + return SwiftHttpProtocol.handle(self) + + def get_environ(self, *args, **kwargs): + environ = SwiftHttpProtocol.get_environ(self, *args, **kwargs) + if self.proxy_address: + environ['SERVER_ADDR'] = self.proxy_address[0] + environ['SERVER_PORT'] = self.proxy_address[1] + if self.proxy_address[1] == '443': + environ['wsgi.url_scheme'] = 'https' + environ['HTTPS'] = 'on' + return environ diff -Nru swift-2.30.0/swift/common/middleware/s3api/etree.py swift-2.30.1/swift/common/middleware/s3api/etree.py --- swift-2.30.0/swift/common/middleware/s3api/etree.py 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/swift/common/middleware/s3api/etree.py 2023-01-30 23:23:08.000000000 +0000 @@ -130,7 +130,7 @@ parser_lookup = lxml.etree.ElementDefaultClassLookup(element=_Element) -parser = lxml.etree.XMLParser() +parser = lxml.etree.XMLParser(resolve_entities=False, no_network=True) parser.set_element_class_lookup(parser_lookup) Element = parser.makeelement diff -Nru swift-2.30.0/swift/common/wsgi.py swift-2.30.1/swift/common/wsgi.py --- swift-2.30.0/swift/common/wsgi.py 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/swift/common/wsgi.py 2023-01-30 23:23:08.000000000 +0000 @@ -28,8 +28,7 @@ import eventlet import eventlet.debug -from eventlet import greenio, GreenPool, sleep, wsgi, listen, Timeout, \ - websocket +from eventlet import greenio, GreenPool, sleep, wsgi, listen, Timeout from paste.deploy import loadwsgi from eventlet.green import socket, ssl, os as green_os from io import BytesIO @@ -38,9 +37,10 @@ from six import StringIO from swift.common import utils, constraints +from swift.common.http_protocol import SwiftHttpProtocol, \ + SwiftHttpProxiedProtocol from swift.common.storage_policy import BindPortsCache -from swift.common.swob import Request, wsgi_quote, wsgi_unquote, \ - wsgi_quote_plus, wsgi_unquote_plus, wsgi_to_bytes, bytes_to_wsgi +from swift.common.swob import Request, wsgi_unquote from swift.common.utils import capture_stdio, disable_fallocate, \ drop_privileges, get_logger, NullLogger, config_true_value, \ validate_configuration, get_hub, config_auto_int_value, \ @@ -384,232 +384,6 @@ return app_conf -class SwiftHttpProtocol(wsgi.HttpProtocol): - default_request_version = "HTTP/1.0" - - def __init__(self, *args, **kwargs): - # See https://github.com/eventlet/eventlet/pull/590 - self.pre_shutdown_bugfix_eventlet = not getattr( - websocket.WebSocketWSGI, '_WSGI_APP_ALWAYS_IDLE', None) - # Note this is not a new-style class, so super() won't work - wsgi.HttpProtocol.__init__(self, *args, **kwargs) - - def log_request(self, *a): - """ - Turn off logging requests by the underlying WSGI software. - """ - pass - - def log_message(self, f, *a): - """ - Redirect logging other messages by the underlying WSGI software. - """ - logger = getattr(self.server.app, 'logger', None) - if logger: - logger.error('ERROR WSGI: ' + f, *a) - else: - # eventlet<=0.17.4 doesn't have an error method, and in newer - # versions the output from error is same as info anyway - self.server.log.info('ERROR WSGI: ' + f, *a) - - class MessageClass(wsgi.HttpProtocol.MessageClass): - '''Subclass to see when the client didn't provide a Content-Type''' - # for py2: - def parsetype(self): - if self.typeheader is None: - self.typeheader = '' - wsgi.HttpProtocol.MessageClass.parsetype(self) - - # for py3: - def get_default_type(self): - '''If the client didn't provide a content type, leave it blank.''' - return '' - - def parse_request(self): - # Need to track the bytes-on-the-wire for S3 signatures -- eventlet - # would do it for us, but since we rewrite the path on py3, we need to - # fix it ourselves later. - self.__raw_path_info = None - - if not six.PY2: - # request lines *should* be ascii per the RFC, but historically - # we've allowed (and even have func tests that use) arbitrary - # bytes. This breaks on py3 (see https://bugs.python.org/issue33973 - # ) but the work-around is simple: munge the request line to be - # properly quoted. - if self.raw_requestline.count(b' ') >= 2: - parts = self.raw_requestline.split(b' ', 2) - path, q, query = parts[1].partition(b'?') - self.__raw_path_info = path - # unquote first, so we don't over-quote something - # that was *correctly* quoted - path = wsgi_to_bytes(wsgi_quote(wsgi_unquote( - bytes_to_wsgi(path)))) - query = b'&'.join( - sep.join([ - wsgi_to_bytes(wsgi_quote_plus(wsgi_unquote_plus( - bytes_to_wsgi(key)))), - wsgi_to_bytes(wsgi_quote_plus(wsgi_unquote_plus( - bytes_to_wsgi(val)))) - ]) - for part in query.split(b'&') - for key, sep, val in (part.partition(b'='), )) - parts[1] = path + q + query - self.raw_requestline = b' '.join(parts) - # else, mangled protocol, most likely; let base class deal with it - return wsgi.HttpProtocol.parse_request(self) - - if not six.PY2: - def get_environ(self, *args, **kwargs): - environ = wsgi.HttpProtocol.get_environ(self, *args, **kwargs) - environ['RAW_PATH_INFO'] = bytes_to_wsgi( - self.__raw_path_info) - header_payload = self.headers.get_payload() - if isinstance(header_payload, list) and len(header_payload) == 1: - header_payload = header_payload[0].get_payload() - if header_payload: - # This shouldn't be here. We must've bumped up against - # https://bugs.python.org/issue37093 - headers_raw = list(environ['headers_raw']) - for line in header_payload.rstrip('\r\n').split('\n'): - if ':' not in line or line[:1] in ' \t': - # Well, we're no more broken than we were before... - # Should we support line folding? - # Should we 400 a bad header line? - break - header, value = line.split(':', 1) - value = value.strip(' \t\n\r') - # NB: Eventlet looks at the headers obj to figure out - # whether the client said the connection should close; - # see https://github.com/eventlet/eventlet/blob/v0.25.0/ - # eventlet/wsgi.py#L504 - self.headers.add_header(header, value) - headers_raw.append((header, value)) - wsgi_key = 'HTTP_' + header.replace('-', '_').encode( - 'latin1').upper().decode('latin1') - if wsgi_key in ('HTTP_CONTENT_LENGTH', - 'HTTP_CONTENT_TYPE'): - wsgi_key = wsgi_key[5:] - environ[wsgi_key] = value - environ['headers_raw'] = tuple(headers_raw) - # Since we parsed some more headers, check to see if they - # change how our wsgi.input should behave - te = environ.get('HTTP_TRANSFER_ENCODING', '').lower() - if te.rsplit(',', 1)[-1].strip() == 'chunked': - environ['wsgi.input'].chunked_input = True - else: - length = environ.get('CONTENT_LENGTH') - if length: - length = int(length) - environ['wsgi.input'].content_length = length - if environ.get('HTTP_EXPECT', '').lower() == '100-continue': - environ['wsgi.input'].wfile = self.wfile - environ['wsgi.input'].wfile_line = \ - b'HTTP/1.1 100 Continue\r\n' - return environ - - def _read_request_line(self): - # Note this is not a new-style class, so super() won't work - got = wsgi.HttpProtocol._read_request_line(self) - # See https://github.com/eventlet/eventlet/pull/590 - if self.pre_shutdown_bugfix_eventlet: - self.conn_state[2] = wsgi.STATE_REQUEST - return got - - def handle_one_request(self): - # Note this is not a new-style class, so super() won't work - got = wsgi.HttpProtocol.handle_one_request(self) - # See https://github.com/eventlet/eventlet/pull/590 - if self.pre_shutdown_bugfix_eventlet: - if self.conn_state[2] != wsgi.STATE_CLOSE: - self.conn_state[2] = wsgi.STATE_IDLE - return got - - -class SwiftHttpProxiedProtocol(SwiftHttpProtocol): - """ - Protocol object that speaks HTTP, including multiple requests, but with - a single PROXY line as the very first thing coming in over the socket. - This is so we can learn what the client's IP address is when Swift is - behind a TLS terminator, like hitch, that does not understand HTTP and - so cannot add X-Forwarded-For or other similar headers. - - See http://www.haproxy.org/download/1.7/doc/proxy-protocol.txt for - protocol details. - """ - def __init__(self, *a, **kw): - self.proxy_address = None - SwiftHttpProtocol.__init__(self, *a, **kw) - - def handle_error(self, connection_line): - if not six.PY2: - connection_line = connection_line.decode('latin-1') - - # No further processing will proceed on this connection under any - # circumstances. We always send the request into the superclass to - # handle any cleanup - this ensures that the request will not be - # processed. - self.rfile.close() - # We don't really have any confidence that an HTTP Error will be - # processable by the client as our transmission broken down between - # ourselves and our gateway proxy before processing the client - # protocol request. Hopefully the operator will know what to do! - msg = 'Invalid PROXY line %r' % connection_line - self.log_message(msg) - # Even assuming HTTP we don't even known what version of HTTP the - # client is sending? This entire endeavor seems questionable. - self.request_version = self.default_request_version - # appease http.server - self.command = 'PROXY' - self.send_error(400, msg) - - def handle(self): - """Handle multiple requests if necessary.""" - # ensure the opening line for the connection is a valid PROXY protcol - # line; this is the only IO we do on this connection before any - # additional wrapping further pollutes the raw socket. - connection_line = self.rfile.readline(self.server.url_length_limit) - - if not connection_line.startswith(b'PROXY '): - return self.handle_error(connection_line) - - proxy_parts = connection_line.strip(b'\r\n').split(b' ') - if proxy_parts[1].startswith(b'UNKNOWN'): - # "UNKNOWN", in PROXY protocol version 1, means "not - # TCP4 or TCP6". This includes completely legitimate - # things like QUIC or Unix domain sockets. The PROXY - # protocol (section 2.1) states that the receiver - # (that's us) MUST ignore anything after "UNKNOWN" and - # before the CRLF, essentially discarding the first - # line. - pass - elif proxy_parts[1] in (b'TCP4', b'TCP6') and len(proxy_parts) == 6: - if six.PY2: - self.client_address = (proxy_parts[2], proxy_parts[4]) - self.proxy_address = (proxy_parts[3], proxy_parts[5]) - else: - self.client_address = ( - proxy_parts[2].decode('latin-1'), - proxy_parts[4].decode('latin-1')) - self.proxy_address = ( - proxy_parts[3].decode('latin-1'), - proxy_parts[5].decode('latin-1')) - else: - self.handle_error(connection_line) - - return SwiftHttpProtocol.handle(self) - - def get_environ(self, *args, **kwargs): - environ = SwiftHttpProtocol.get_environ(self, *args, **kwargs) - if self.proxy_address: - environ['SERVER_ADDR'] = self.proxy_address[0] - environ['SERVER_PORT'] = self.proxy_address[1] - if self.proxy_address[1] == '443': - environ['wsgi.url_scheme'] = 'https' - environ['HTTPS'] = 'on' - return environ - - def run_server(conf, logger, sock, global_conf=None, ready_callback=None, allow_modify_pipeline=True): # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on diff -Nru swift-2.30.0/test/functional/__init__.py swift-2.30.1/test/functional/__init__.py --- swift-2.30.0/test/functional/__init__.py 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/test/functional/__init__.py 2023-01-30 23:23:08.000000000 +0000 @@ -54,7 +54,8 @@ from swift.common import constraints, utils, ring, storage_policy from swift.common.ring import Ring -from swift.common.wsgi import loadapp, SwiftHttpProtocol +from swift.common.http_protocol import SwiftHttpProtocol +from swift.common.wsgi import loadapp from swift.common.utils import config_true_value, split_path from swift.account import server as account_server from swift.container import server as container_server diff -Nru swift-2.30.0/test/functional/s3api/test_xxe_injection.py swift-2.30.1/test/functional/s3api/test_xxe_injection.py --- swift-2.30.0/test/functional/s3api/test_xxe_injection.py 1970-01-01 00:00:00.000000000 +0000 +++ swift-2.30.1/test/functional/s3api/test_xxe_injection.py 2023-01-30 23:23:08.000000000 +0000 @@ -0,0 +1,231 @@ +#!/usr/bin/env python +# Copyright (c) 2022 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import requests + +import botocore + +import test.functional as tf +from test.functional.s3api import S3ApiBaseBoto3 + + +def setUpModule(): + tf.setup_package() + + +def tearDownModule(): + tf.teardown_package() + + +class TestS3ApiXxeInjection(S3ApiBaseBoto3): + + def setUp(self): + super(TestS3ApiXxeInjection, self).setUp() + self.bucket = 'test-s3api-xxe-injection' + + def _create_bucket(self, **kwargs): + resp = self.conn.create_bucket(Bucket=self.bucket, **kwargs) + response_metadata = resp.pop('ResponseMetadata', {}) + self.assertEqual(200, response_metadata.get('HTTPStatusCode')) + + @staticmethod + def _clear_data(request, **_kwargs): + request.data = b'' + + def _presign_url(self, method, key=None, **kwargs): + params = { + 'Bucket': self.bucket + } + if key: + params['Key'] = key + params.update(kwargs) + try: + # https://github.com/boto/boto3/issues/2192 + self.conn.meta.events.register( + 'before-sign.s3.*', self._clear_data) + return self.conn.generate_presigned_url( + method, Params=params, ExpiresIn=60) + finally: + self.conn.meta.events.unregister( + 'before-sign.s3.*', self._clear_data) + + def test_put_bucket_acl(self): + if not tf.cluster_info['s3api'].get('s3_acl'): + self.skipTest('s3_acl must be enabled') + + self._create_bucket() + + url = self._presign_url('put_bucket_acl') + resp = requests.put(url, data=""" + ]> + + + test:tester + test:tester + + + + + name&xxe; + id&xxe; + + WRITE + + + +""") # noqa: E501 + self.assertEqual(200, resp.status_code) + self.assertNotIn(b'xxe', resp.content) + self.assertNotIn(b'[swift-hash]', resp.content) + + acl = self.conn.get_bucket_acl(Bucket=self.bucket) + response_metadata = acl.pop('ResponseMetadata', {}) + self.assertEqual(200, response_metadata.get('HTTPStatusCode')) + self.assertDictEqual({ + 'Owner': { + 'DisplayName': 'test:tester', + 'ID': 'test:tester' + }, + 'Grants': [ + { + 'Grantee': { + 'DisplayName': 'id', + 'ID': 'id', + 'Type': 'CanonicalUser' + }, + 'Permission': 'WRITE' + } + ] + }, acl) + + def test_create_bucket(self): + url = self._presign_url('create_bucket') + resp = requests.put(url, data=""" + ]> + + &xxe; + +""") # noqa: E501 + self.assertEqual(400, resp.status_code) + self.assertNotIn(b'xxe', resp.content) + self.assertNotIn(b'[swift-hash]', resp.content) + + self.assertRaisesRegex( + botocore.exceptions.ClientError, 'Not Found', + self.conn.head_bucket, Bucket=self.bucket) + + def test_delete_objects(self): + self._create_bucket() + + url = self._presign_url( + 'delete_objects', + Delete={ + 'Objects': [ + { + 'Key': 'string', + 'VersionId': 'string' + } + ] + }) + body = """ + ]> + + + &xxe; + + +""" + body = body.encode('utf-8') + resp = requests.post(url, data=body) + self.assertEqual(400, resp.status_code, resp.content) + self.assertNotIn(b'xxe', resp.content) + self.assertNotIn(b'[swift-hash]', resp.content) + + def test_complete_multipart_upload(self): + self._create_bucket() + + resp = self.conn.create_multipart_upload( + Bucket=self.bucket, Key='test') + response_metadata = resp.pop('ResponseMetadata', {}) + self.assertEqual(200, response_metadata.get('HTTPStatusCode')) + uploadid = resp.get('UploadId') + + try: + url = self._presign_url( + 'complete_multipart_upload', + Key='key', + MultipartUpload={ + 'Parts': [ + { + 'ETag': 'string', + 'PartNumber': 1 + } + ], + }, + UploadId=uploadid) + resp = requests.post(url, data=""" + ]> + + + "{uploadid}" + &xxe; + + +""") # noqa: E501 + self.assertEqual(404, resp.status_code) + self.assertNotIn(b'xxe', resp.content) + self.assertNotIn(b'[swift-hash]', resp.content) + + resp = requests.post(url, data=""" + ]> + + + "&xxe;" + 1 + + +""") # noqa: E501 + self.assertEqual(404, resp.status_code) + self.assertNotIn(b'xxe', resp.content) + self.assertNotIn(b'[swift-hash]', resp.content) + finally: + resp = self.conn.abort_multipart_upload( + Bucket=self.bucket, Key='test', UploadId=uploadid) + response_metadata = resp.pop('ResponseMetadata', {}) + self.assertEqual(204, response_metadata.get('HTTPStatusCode')) + + def test_put_bucket_versioning(self): + self._create_bucket() + + url = self._presign_url( + 'put_bucket_versioning', + VersioningConfiguration={ + 'Status': 'Enabled' + }) + resp = requests.put(url, data=""" + ]> + + &xxe; + +""") # noqa: E501 + self.assertEqual(400, resp.status_code) + self.assertNotIn(b'xxe', resp.content) + self.assertNotIn(b'[swift-hash]', resp.content) + + versioning = self.conn.get_bucket_versioning(Bucket=self.bucket) + response_metadata = versioning.pop('ResponseMetadata', {}) + self.assertEqual(200, response_metadata.get('HTTPStatusCode')) + self.assertDictEqual({}, versioning) diff -Nru swift-2.30.0/test/unit/common/middleware/s3api/test_multi_delete.py swift-2.30.1/test/unit/common/middleware/s3api/test_multi_delete.py --- swift-2.30.0/test/unit/common/middleware/s3api/test_multi_delete.py 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/test/unit/common/middleware/s3api/test_multi_delete.py 2023-01-30 23:23:08.000000000 +0000 @@ -523,6 +523,7 @@ body=body) status, headers, body = self.call_s3api(req) self.assertEqual(status.split()[0], '200') + self.assertIn(b'Key1Server Error', body) def _test_object_multi_DELETE(self, account): self.keys = ['Key1', 'Key2'] @@ -580,6 +581,45 @@ elem = fromstring(body) self.assertEqual(len(elem.findall('Deleted')), len(self.keys)) + def test_object_multi_DELETE_with_system_entity(self): + self.keys = ['Key1', 'Key2'] + self.swift.register( + 'DELETE', '/v1/AUTH_test/bucket/%s' % self.keys[0], + swob.HTTPNotFound, {}, None) + self.swift.register( + 'DELETE', '/v1/AUTH_test/bucket/%s' % self.keys[1], + swob.HTTPNoContent, {}, None) + + elem = Element('Delete') + for key in self.keys: + obj = SubElement(elem, 'Object') + SubElement(obj, 'Key').text = key + body = tostring(elem, use_s3ns=False) + body = body.replace( + b'?>\n', + b'?>\n ]>\n', + ).replace(b'>Key1<', b'>Key1&ent;<') + content_md5 = ( + base64.b64encode(md5(body, usedforsecurity=False).digest()) + .strip()) + + req = Request.blank('/bucket?delete', + environ={'REQUEST_METHOD': 'POST'}, + headers={ + 'Authorization': 'AWS test:full_control:hmac', + 'Date': self.get_date_header(), + 'Content-MD5': content_md5}, + body=body) + req.date = datetime.now() + req.content_type = 'text/plain' + + status, headers, body = self.call_s3api(req) + self.assertEqual(status, '200 OK', body) + self.assertIn(b'Key2', body) + self.assertNotIn(b'root:/root', body) + self.assertIn(b'Key1', body) + def _test_no_body(self, use_content_length=False, use_transfer_encoding=False, string_to_md5=b''): content_md5 = (base64.b64encode( diff -Nru swift-2.30.0/test/unit/common/test_http_protocol.py swift-2.30.1/test/unit/common/test_http_protocol.py --- swift-2.30.0/test/unit/common/test_http_protocol.py 1970-01-01 00:00:00.000000000 +0000 +++ swift-2.30.1/test/unit/common/test_http_protocol.py 2023-01-30 23:23:08.000000000 +0000 @@ -0,0 +1,412 @@ +# Copyright (c) 2010-2022 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from argparse import Namespace +from io import BytesIO +import json +import mock +import types +import unittest +import eventlet.wsgi +import six +from swift.common import http_protocol, swob + + +class TestSwiftHttpProtocol(unittest.TestCase): + def _proto_obj(self): + # Make an object we can exercise... note the base class's __init__() + # does a bunch of work, so we just new up an object like eventlet.wsgi + # does. + proto_class = http_protocol.SwiftHttpProtocol + try: + the_obj = types.InstanceType(proto_class) + except AttributeError: + the_obj = proto_class.__new__(proto_class) + # Install some convenience mocks + the_obj.server = Namespace(app=Namespace(logger=mock.Mock()), + url_length_limit=777, + log=mock.Mock()) + the_obj.send_error = mock.Mock() + + return the_obj + + def test_swift_http_protocol_log_request(self): + proto_obj = self._proto_obj() + self.assertEqual(None, proto_obj.log_request('ignored')) + + def test_swift_http_protocol_log_message(self): + proto_obj = self._proto_obj() + + proto_obj.log_message('a%sc', 'b') + self.assertEqual([mock.call.error('ERROR WSGI: a%sc', 'b')], + proto_obj.server.app.logger.mock_calls) + + def test_swift_http_protocol_log_message_no_logger(self): + # If the app somehow had no logger attribute or it was None, don't blow + # up + proto_obj = self._proto_obj() + delattr(proto_obj.server.app, 'logger') + + proto_obj.log_message('a%sc', 'b') + self.assertEqual([mock.call.info('ERROR WSGI: a%sc', 'b')], + proto_obj.server.log.mock_calls) + + proto_obj.server.log.reset_mock() + proto_obj.server.app.logger = None + + proto_obj.log_message('a%sc', 'b') + self.assertEqual([mock.call.info('ERROR WSGI: a%sc', 'b')], + proto_obj.server.log.mock_calls) + + def test_swift_http_protocol_parse_request_no_proxy(self): + proto_obj = self._proto_obj() + proto_obj.raw_requestline = b'jimmy jam' + proto_obj.client_address = ('a', '123') + + self.assertEqual(False, proto_obj.parse_request()) + + self.assertEqual([ + mock.call(400, "Bad HTTP/0.9 request type ('jimmy')"), + ], proto_obj.send_error.mock_calls) + self.assertEqual(('a', '123'), proto_obj.client_address) + + def test_bad_request_line(self): + proto_obj = self._proto_obj() + proto_obj.raw_requestline = b'None //' + self.assertEqual(False, proto_obj.parse_request()) + + +class ProtocolTest(unittest.TestCase): + def _run_bytes_through_protocol(self, bytes_from_client, app=None): + rfile = BytesIO(bytes_from_client) + wfile = BytesIO() + + # All this fakery is needed to make the WSGI server process one + # connection, possibly with multiple requests, in the main + # greenthread. It doesn't hurt correctness if the function is called + # in a separate greenthread, but it makes using the debugger harder. + class FakeGreenthread(object): + def link(self, a_callable, *args): + a_callable(self, *args) + + class FakePool(object): + def spawn(self, a_callable, *args, **kwargs): + a_callable(*args, **kwargs) + return FakeGreenthread() + + def spawn_n(self, a_callable, *args, **kwargs): + a_callable(*args, **kwargs) + + def waitall(self): + pass + + addr = ('127.0.0.1', 8359) + fake_tcp_socket = mock.Mock( + setsockopt=lambda *a: None, + makefile=lambda mode, bufsize: rfile if 'r' in mode else wfile, + getsockname=lambda *a: addr + ) + fake_listen_socket = mock.Mock( + accept=mock.MagicMock( + side_effect=[[fake_tcp_socket, addr], + # KeyboardInterrupt breaks the WSGI server out of + # its infinite accept-process-close loop. + KeyboardInterrupt]), + getsockname=lambda *a: addr) + del fake_listen_socket.do_handshake + + # If we let the WSGI server close rfile/wfile then we can't access + # their contents any more. + with mock.patch.object(wfile, 'close', lambda: None), \ + mock.patch.object(rfile, 'close', lambda: None): + eventlet.wsgi.server( + fake_listen_socket, app or self.app, + protocol=self.protocol_class, + custom_pool=FakePool(), + log_output=False, # quiet the test run + ) + return wfile.getvalue() + + +class TestSwiftHttpProtocolSomeMore(ProtocolTest): + protocol_class = http_protocol.SwiftHttpProtocol + + @staticmethod + def app(env, start_response): + start_response("200 OK", []) + return [swob.wsgi_to_bytes(env['RAW_PATH_INFO'])] + + def test_simple(self): + bytes_out = self._run_bytes_through_protocol( + b"GET /someurl HTTP/1.0\r\n" + b"User-Agent: something or other\r\n" + b"\r\n" + ) + + lines = [l for l in bytes_out.split(b"\r\n") if l] + self.assertEqual(lines[0], b"HTTP/1.1 200 OK") # sanity check + self.assertEqual(lines[-1], b'/someurl') + + def test_quoted(self): + bytes_out = self._run_bytes_through_protocol( + b"GET /some%fFpath%D8%AA HTTP/1.0\r\n" + b"User-Agent: something or other\r\n" + b"\r\n" + ) + + lines = [l for l in bytes_out.split(b"\r\n") if l] + self.assertEqual(lines[0], b"HTTP/1.1 200 OK") # sanity check + self.assertEqual(lines[-1], b'/some%fFpath%D8%AA') + + def test_messy(self): + bytes_out = self._run_bytes_through_protocol( + b"GET /oh\xffboy%what$now%E2%80%bd HTTP/1.0\r\n" + b"User-Agent: something or other\r\n" + b"\r\n" + ) + + lines = [l for l in bytes_out.split(b"\r\n") if l] + self.assertEqual(lines[-1], b'/oh\xffboy%what$now%E2%80%bd') + + def test_bad_request(self): + bytes_out = self._run_bytes_through_protocol(( + b"ONLY-METHOD\r\n" + b"Server: example.com\r\n" + b"\r\n" + )) + lines = [l for l in bytes_out.split(b"\r\n") if l] + self.assertEqual( + lines[0], b"HTTP/1.1 400 Bad request syntax ('ONLY-METHOD')") + self.assertIn(b"Bad request syntax or unsupported method.", lines[-1]) + + def test_leading_slashes(self): + bytes_out = self._run_bytes_through_protocol(( + b"GET ///some-leading-slashes HTTP/1.0\r\n" + b"User-Agent: blah blah blah\r\n" + b"\r\n" + )) + lines = [l for l in bytes_out.split(b"\r\n") if l] + self.assertEqual(lines[-1], b'///some-leading-slashes') + + def test_request_lines(self): + def app(env, start_response): + start_response("200 OK", []) + if six.PY2: + return [json.dumps({ + 'RAW_PATH_INFO': env['RAW_PATH_INFO'].decode('latin1'), + 'QUERY_STRING': (None if 'QUERY_STRING' not in env else + env['QUERY_STRING'].decode('latin1')), + }).encode('ascii')] + return [json.dumps({ + 'RAW_PATH_INFO': env['RAW_PATH_INFO'], + 'QUERY_STRING': env.get('QUERY_STRING'), + }).encode('ascii')] + + def do_test(request_line, expected): + bytes_out = self._run_bytes_through_protocol( + request_line + b'\r\n\r\n', + app, + ) + print(bytes_out) + resp_body = bytes_out.partition(b'\r\n\r\n')[2] + self.assertEqual(json.loads(resp_body), expected) + + do_test(b'GET / HTTP/1.1', { + 'RAW_PATH_INFO': u'/', + 'QUERY_STRING': None, + }) + do_test(b'GET /%FF HTTP/1.1', { + 'RAW_PATH_INFO': u'/%FF', + 'QUERY_STRING': None, + }) + + do_test(b'GET /\xff HTTP/1.1', { + 'RAW_PATH_INFO': u'/\xff', + 'QUERY_STRING': None, + }) + do_test(b'PUT /Here%20Is%20A%20SnowMan:\xe2\x98\x83 HTTP/1.0', { + 'RAW_PATH_INFO': u'/Here%20Is%20A%20SnowMan:\xe2\x98\x83', + 'QUERY_STRING': None, + }) + do_test( + b'POST /?and%20it=does+nothing+to+params&' + b'PALMTREE=\xf0%9f\x8c%b4 HTTP/1.1', { + 'RAW_PATH_INFO': u'/', + 'QUERY_STRING': (u'and%20it=does+nothing+to+params' + u'&PALMTREE=\xf0%9f\x8c%b4'), + } + ) + do_test(b'GET // HTTP/1.1', { + 'RAW_PATH_INFO': u'//', + 'QUERY_STRING': None, + }) + do_test(b'GET //bar HTTP/1.1', { + 'RAW_PATH_INFO': u'//bar', + 'QUERY_STRING': None, + }) + do_test(b'GET //////baz HTTP/1.1', { + 'RAW_PATH_INFO': u'//////baz', + 'QUERY_STRING': None, + }) + + +class TestProxyProtocol(ProtocolTest): + protocol_class = http_protocol.SwiftHttpProxiedProtocol + + @staticmethod + def app(env, start_response): + start_response("200 OK", []) + body = '\r\n'.join([ + 'got addr: %s %s' % ( + env.get("REMOTE_ADDR", ""), + env.get("REMOTE_PORT", "")), + 'on addr: %s %s' % ( + env.get("SERVER_ADDR", ""), + env.get("SERVER_PORT", "")), + 'https is %s (scheme %s)' % ( + env.get("HTTPS", ""), + env.get("wsgi.url_scheme", "")), + ]) + '\r\n' + return [body.encode("utf-8")] + + def test_request_with_proxy(self): + bytes_out = self._run_bytes_through_protocol( + b"PROXY TCP4 192.168.0.1 192.168.0.11 56423 4433\r\n" + b"GET /someurl HTTP/1.0\r\n" + b"User-Agent: something or other\r\n" + b"\r\n" + ) + + lines = [l for l in bytes_out.split(b"\r\n") if l] + self.assertEqual(lines[0], b"HTTP/1.1 200 OK") # sanity check + self.assertEqual(lines[-3:], [ + b"got addr: 192.168.0.1 56423", + b"on addr: 192.168.0.11 4433", + b"https is (scheme http)", + ]) + + def test_request_with_proxy_https(self): + bytes_out = self._run_bytes_through_protocol( + b"PROXY TCP4 192.168.0.1 192.168.0.11 56423 443\r\n" + b"GET /someurl HTTP/1.0\r\n" + b"User-Agent: something or other\r\n" + b"\r\n" + ) + + lines = [l for l in bytes_out.split(b"\r\n") if l] + self.assertEqual(lines[0], b"HTTP/1.1 200 OK") # sanity check + self.assertEqual(lines[-3:], [ + b"got addr: 192.168.0.1 56423", + b"on addr: 192.168.0.11 443", + b"https is on (scheme https)", + ]) + + def test_multiple_requests_with_proxy(self): + bytes_out = self._run_bytes_through_protocol( + b"PROXY TCP4 192.168.0.1 192.168.0.11 56423 443\r\n" + b"GET /someurl HTTP/1.1\r\n" + b"User-Agent: something or other\r\n" + b"\r\n" + b"GET /otherurl HTTP/1.1\r\n" + b"User-Agent: something or other\r\n" + b"Connection: close\r\n" + b"\r\n" + ) + + lines = bytes_out.split(b"\r\n") + self.assertEqual(lines[0], b"HTTP/1.1 200 OK") # sanity check + + # the address in the PROXY line is applied to every request + addr_lines = [l for l in lines if l.startswith(b"got addr")] + self.assertEqual(addr_lines, [b"got addr: 192.168.0.1 56423"] * 2) + addr_lines = [l for l in lines if l.startswith(b"on addr")] + self.assertEqual(addr_lines, [b"on addr: 192.168.0.11 443"] * 2) + addr_lines = [l for l in lines if l.startswith(b"https is")] + self.assertEqual(addr_lines, [b"https is on (scheme https)"] * 2) + + def test_missing_proxy_line(self): + bytes_out = self._run_bytes_through_protocol( + # whoops, no PROXY line here + b"GET /someurl HTTP/1.0\r\n" + b"User-Agent: something or other\r\n" + b"\r\n" + ) + + lines = [l for l in bytes_out.split(b"\r\n") if l] + self.assertIn(b"400 Invalid PROXY line", lines[0]) + + def test_malformed_proxy_lines(self): + for bad_line in [b'PROXY jojo', + b'PROXYjojo a b c d e', + b'PROXY a b c d e', # bad INET protocol and family + ]: + bytes_out = self._run_bytes_through_protocol(bad_line) + lines = [l for l in bytes_out.split(b"\r\n") if l] + self.assertIn(b"400 Invalid PROXY line", lines[0]) + + def test_unknown_client_addr(self): + # For "UNKNOWN", the rest of the line before the CRLF may be omitted by + # the sender, and the receiver must ignore anything presented before + # the CRLF is found. + for unknown_line in [b'PROXY UNKNOWN', # mimimal valid unknown + b'PROXY UNKNOWNblahblah', # also valid + b'PROXY UNKNOWN a b c d']: + bytes_out = self._run_bytes_through_protocol( + unknown_line + (b"\r\n" + b"GET /someurl HTTP/1.0\r\n" + b"User-Agent: something or other\r\n" + b"\r\n") + ) + lines = [l for l in bytes_out.split(b"\r\n") if l] + self.assertIn(b"200 OK", lines[0]) + + def test_address_and_environ(self): + # Make an object we can exercise... note the base class's __init__() + # does a bunch of work, so we just new up an object like eventlet.wsgi + # does. + dummy_env = {'OTHER_ENV_KEY': 'OTHER_ENV_VALUE'} + mock_protocol = mock.Mock(get_environ=lambda s: dummy_env) + patcher = mock.patch( + 'swift.common.http_protocol.SwiftHttpProtocol', mock_protocol + ) + self.mock_super = patcher.start() + self.addCleanup(patcher.stop) + + proto_class = http_protocol.SwiftHttpProxiedProtocol + try: + proxy_obj = types.InstanceType(proto_class) + except AttributeError: + proxy_obj = proto_class.__new__(proto_class) + + # Install some convenience mocks + proxy_obj.server = Namespace(app=Namespace(logger=mock.Mock()), + url_length_limit=777, + log=mock.Mock()) + proxy_obj.send_error = mock.Mock() + + proxy_obj.rfile = BytesIO( + b'PROXY TCP4 111.111.111.111 222.222.222.222 111 222' + ) + + assert proxy_obj.handle() + + self.assertEqual(proxy_obj.client_address, ('111.111.111.111', '111')) + self.assertEqual(proxy_obj.proxy_address, ('222.222.222.222', '222')) + expected_env = { + 'SERVER_PORT': '222', + 'SERVER_ADDR': '222.222.222.222', + 'OTHER_ENV_KEY': 'OTHER_ENV_VALUE' + } + self.assertEqual(proxy_obj.get_environ(), expected_env) diff -Nru swift-2.30.0/test/unit/common/test_wsgi.py swift-2.30.1/test/unit/common/test_wsgi.py --- swift-2.30.0/test/unit/common/test_wsgi.py 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/test/unit/common/test_wsgi.py 2023-01-30 23:23:08.000000000 +0000 @@ -15,20 +15,16 @@ """Tests for swift.common.wsgi""" -from argparse import Namespace import errno import logging import socket import unittest import os -import types -import eventlet.wsgi from collections import defaultdict from io import BytesIO from textwrap import dedent -import six from six.moves.urllib.parse import quote import mock @@ -40,7 +36,7 @@ import swift.obj.server as obj_server import swift.container.server as container_server import swift.account.server as account_server -from swift.common.swob import Request, wsgi_to_bytes +from swift.common.swob import Request from swift.common import wsgi, utils from swift.common.storage_policy import POLICIES @@ -1064,335 +1060,6 @@ self.assertIs(newenv.get('swift.infocache'), oldenv['swift.infocache']) -class TestSwiftHttpProtocol(unittest.TestCase): - def _proto_obj(self): - # Make an object we can exercise... note the base class's __init__() - # does a bunch of work, so we just new up an object like eventlet.wsgi - # does. - proto_class = wsgi.SwiftHttpProtocol - try: - the_obj = types.InstanceType(proto_class) - except AttributeError: - the_obj = proto_class.__new__(proto_class) - # Install some convenience mocks - the_obj.server = Namespace(app=Namespace(logger=mock.Mock()), - url_length_limit=777, - log=mock.Mock()) - the_obj.send_error = mock.Mock() - - return the_obj - - def test_swift_http_protocol_log_request(self): - proto_obj = self._proto_obj() - self.assertEqual(None, proto_obj.log_request('ignored')) - - def test_swift_http_protocol_log_message(self): - proto_obj = self._proto_obj() - - proto_obj.log_message('a%sc', 'b') - self.assertEqual([mock.call.error('ERROR WSGI: a%sc', 'b')], - proto_obj.server.app.logger.mock_calls) - - def test_swift_http_protocol_log_message_no_logger(self): - # If the app somehow had no logger attribute or it was None, don't blow - # up - proto_obj = self._proto_obj() - delattr(proto_obj.server.app, 'logger') - - proto_obj.log_message('a%sc', 'b') - self.assertEqual([mock.call.info('ERROR WSGI: a%sc', 'b')], - proto_obj.server.log.mock_calls) - - proto_obj.server.log.reset_mock() - proto_obj.server.app.logger = None - - proto_obj.log_message('a%sc', 'b') - self.assertEqual([mock.call.info('ERROR WSGI: a%sc', 'b')], - proto_obj.server.log.mock_calls) - - def test_swift_http_protocol_parse_request_no_proxy(self): - proto_obj = self._proto_obj() - proto_obj.raw_requestline = b'jimmy jam' - proto_obj.client_address = ('a', '123') - - self.assertEqual(False, proto_obj.parse_request()) - - self.assertEqual([ - mock.call(400, "Bad HTTP/0.9 request type ('jimmy')"), - ], proto_obj.send_error.mock_calls) - self.assertEqual(('a', '123'), proto_obj.client_address) - - def test_request_line_cleanup(self): - def do_test(line_from_socket, expected_line=None): - if expected_line is None: - expected_line = line_from_socket - - proto_obj = self._proto_obj() - proto_obj.raw_requestline = line_from_socket - with mock.patch('swift.common.wsgi.wsgi.HttpProtocol') \ - as mock_super: - proto_obj.parse_request() - - self.assertEqual([mock.call.parse_request(proto_obj)], - mock_super.mock_calls) - self.assertEqual(proto_obj.raw_requestline, expected_line) - - do_test(b'GET / HTTP/1.1') - do_test(b'GET /%FF HTTP/1.1') - - if not six.PY2: - do_test(b'GET /\xff HTTP/1.1', b'GET /%FF HTTP/1.1') - do_test(b'PUT /Here%20Is%20A%20SnowMan:\xe2\x98\x83 HTTP/1.0', - b'PUT /Here%20Is%20A%20SnowMan%3A%E2%98%83 HTTP/1.0') - do_test( - b'POST /?and%20it=fixes+params&' - b'PALMTREE=\xf0%9f\x8c%b4 HTTP/1.1', - b'POST /?and+it=fixes+params&PALMTREE=%F0%9F%8C%B4 HTTP/1.1') - - -class ProtocolTest(unittest.TestCase): - def _run_bytes_through_protocol(self, bytes_from_client): - rfile = BytesIO(bytes_from_client) - wfile = BytesIO() - - # All this fakery is needed to make the WSGI server process one - # connection, possibly with multiple requests, in the main - # greenthread. It doesn't hurt correctness if the function is called - # in a separate greenthread, but it makes using the debugger harder. - class FakeGreenthread(object): - def link(self, a_callable, *args): - a_callable(self, *args) - - class FakePool(object): - def spawn(self, a_callable, *args, **kwargs): - a_callable(*args, **kwargs) - return FakeGreenthread() - - def spawn_n(self, a_callable, *args, **kwargs): - a_callable(*args, **kwargs) - - def waitall(self): - pass - - addr = ('127.0.0.1', 8359) - fake_tcp_socket = mock.Mock( - setsockopt=lambda *a: None, - makefile=lambda mode, bufsize: rfile if 'r' in mode else wfile, - getsockname=lambda *a: addr - ) - fake_listen_socket = mock.Mock( - accept=mock.MagicMock( - side_effect=[[fake_tcp_socket, addr], - # KeyboardInterrupt breaks the WSGI server out of - # its infinite accept-process-close loop. - KeyboardInterrupt]), - getsockname=lambda *a: addr) - del fake_listen_socket.do_handshake - - # If we let the WSGI server close rfile/wfile then we can't access - # their contents any more. - with mock.patch.object(wfile, 'close', lambda: None), \ - mock.patch.object(rfile, 'close', lambda: None): - eventlet.wsgi.server( - fake_listen_socket, self.app, - protocol=self.protocol_class, - custom_pool=FakePool(), - log_output=False, # quiet the test run - ) - return wfile.getvalue() - - -class TestSwiftHttpProtocolSomeMore(ProtocolTest): - protocol_class = wsgi.SwiftHttpProtocol - - @staticmethod - def app(env, start_response): - start_response("200 OK", []) - return [wsgi_to_bytes(env['RAW_PATH_INFO'])] - - def test_simple(self): - bytes_out = self._run_bytes_through_protocol(( - b"GET /someurl HTTP/1.0\r\n" - b"User-Agent: something or other\r\n" - b"\r\n" - )) - - lines = [l for l in bytes_out.split(b"\r\n") if l] - self.assertEqual(lines[0], b"HTTP/1.1 200 OK") # sanity check - self.assertEqual(lines[-1], b'/someurl') - - def test_quoted(self): - bytes_out = self._run_bytes_through_protocol(( - b"GET /some%fFpath%D8%AA HTTP/1.0\r\n" - b"User-Agent: something or other\r\n" - b"\r\n" - )) - - lines = [l for l in bytes_out.split(b"\r\n") if l] - self.assertEqual(lines[0], b"HTTP/1.1 200 OK") # sanity check - self.assertEqual(lines[-1], b'/some%fFpath%D8%AA') - - def test_messy(self): - bytes_out = self._run_bytes_through_protocol(( - b"GET /oh\xffboy%what$now%E2%80%bd HTTP/1.0\r\n" - b"User-Agent: something or other\r\n" - b"\r\n" - )) - - lines = [l for l in bytes_out.split(b"\r\n") if l] - self.assertEqual(lines[-1], b'/oh\xffboy%what$now%E2%80%bd') - - -class TestProxyProtocol(ProtocolTest): - protocol_class = wsgi.SwiftHttpProxiedProtocol - - @staticmethod - def app(env, start_response): - start_response("200 OK", []) - body = '\r\n'.join([ - 'got addr: %s %s' % ( - env.get("REMOTE_ADDR", ""), - env.get("REMOTE_PORT", "")), - 'on addr: %s %s' % ( - env.get("SERVER_ADDR", ""), - env.get("SERVER_PORT", "")), - 'https is %s (scheme %s)' % ( - env.get("HTTPS", ""), - env.get("wsgi.url_scheme", "")), - ]) + '\r\n' - return [body.encode("utf-8")] - - def test_request_with_proxy(self): - bytes_out = self._run_bytes_through_protocol(( - b"PROXY TCP4 192.168.0.1 192.168.0.11 56423 4433\r\n" - b"GET /someurl HTTP/1.0\r\n" - b"User-Agent: something or other\r\n" - b"\r\n" - )) - - lines = [l for l in bytes_out.split(b"\r\n") if l] - self.assertEqual(lines[0], b"HTTP/1.1 200 OK") # sanity check - self.assertEqual(lines[-3:], [ - b"got addr: 192.168.0.1 56423", - b"on addr: 192.168.0.11 4433", - b"https is (scheme http)", - ]) - - def test_request_with_proxy_https(self): - bytes_out = self._run_bytes_through_protocol(( - b"PROXY TCP4 192.168.0.1 192.168.0.11 56423 443\r\n" - b"GET /someurl HTTP/1.0\r\n" - b"User-Agent: something or other\r\n" - b"\r\n" - )) - - lines = [l for l in bytes_out.split(b"\r\n") if l] - self.assertEqual(lines[0], b"HTTP/1.1 200 OK") # sanity check - self.assertEqual(lines[-3:], [ - b"got addr: 192.168.0.1 56423", - b"on addr: 192.168.0.11 443", - b"https is on (scheme https)", - ]) - - def test_multiple_requests_with_proxy(self): - bytes_out = self._run_bytes_through_protocol(( - b"PROXY TCP4 192.168.0.1 192.168.0.11 56423 443\r\n" - b"GET /someurl HTTP/1.1\r\n" - b"User-Agent: something or other\r\n" - b"\r\n" - b"GET /otherurl HTTP/1.1\r\n" - b"User-Agent: something or other\r\n" - b"Connection: close\r\n" - b"\r\n" - )) - - lines = bytes_out.split(b"\r\n") - self.assertEqual(lines[0], b"HTTP/1.1 200 OK") # sanity check - - # the address in the PROXY line is applied to every request - addr_lines = [l for l in lines if l.startswith(b"got addr")] - self.assertEqual(addr_lines, [b"got addr: 192.168.0.1 56423"] * 2) - addr_lines = [l for l in lines if l.startswith(b"on addr")] - self.assertEqual(addr_lines, [b"on addr: 192.168.0.11 443"] * 2) - addr_lines = [l for l in lines if l.startswith(b"https is")] - self.assertEqual(addr_lines, [b"https is on (scheme https)"] * 2) - - def test_missing_proxy_line(self): - bytes_out = self._run_bytes_through_protocol(( - # whoops, no PROXY line here - b"GET /someurl HTTP/1.0\r\n" - b"User-Agent: something or other\r\n" - b"\r\n" - )) - - lines = [l for l in bytes_out.split(b"\r\n") if l] - self.assertIn(b"400 Invalid PROXY line", lines[0]) - - def test_malformed_proxy_lines(self): - for bad_line in [b'PROXY jojo', - b'PROXYjojo a b c d e', - b'PROXY a b c d e', # bad INET protocol and family - ]: - bytes_out = self._run_bytes_through_protocol(bad_line) - lines = [l for l in bytes_out.split(b"\r\n") if l] - self.assertIn(b"400 Invalid PROXY line", lines[0]) - - def test_unknown_client_addr(self): - # For "UNKNOWN", the rest of the line before the CRLF may be omitted by - # the sender, and the receiver must ignore anything presented before - # the CRLF is found. - for unknown_line in [b'PROXY UNKNOWN', # mimimal valid unknown - b'PROXY UNKNOWNblahblah', # also valid - b'PROXY UNKNOWN a b c d']: - bytes_out = self._run_bytes_through_protocol(( - unknown_line + (b"\r\n" - b"GET /someurl HTTP/1.0\r\n" - b"User-Agent: something or other\r\n" - b"\r\n") - )) - lines = [l for l in bytes_out.split(b"\r\n") if l] - self.assertIn(b"200 OK", lines[0]) - - def test_address_and_environ(self): - # Make an object we can exercise... note the base class's __init__() - # does a bunch of work, so we just new up an object like eventlet.wsgi - # does. - dummy_env = {'OTHER_ENV_KEY': 'OTHER_ENV_VALUE'} - mock_protocol = mock.Mock(get_environ=lambda s: dummy_env) - patcher = mock.patch( - 'swift.common.wsgi.SwiftHttpProtocol', mock_protocol - ) - self.mock_super = patcher.start() - self.addCleanup(patcher.stop) - - proto_class = wsgi.SwiftHttpProxiedProtocol - try: - proxy_obj = types.InstanceType(proto_class) - except AttributeError: - proxy_obj = proto_class.__new__(proto_class) - - # Install some convenience mocks - proxy_obj.server = Namespace(app=Namespace(logger=mock.Mock()), - url_length_limit=777, - log=mock.Mock()) - proxy_obj.send_error = mock.Mock() - - proxy_obj.rfile = BytesIO( - b'PROXY TCP4 111.111.111.111 222.222.222.222 111 222' - ) - - assert proxy_obj.handle() - - self.assertEqual(proxy_obj.client_address, ('111.111.111.111', '111')) - self.assertEqual(proxy_obj.proxy_address, ('222.222.222.222', '222')) - expected_env = { - 'SERVER_PORT': '222', - 'SERVER_ADDR': '222.222.222.222', - 'OTHER_ENV_KEY': 'OTHER_ENV_VALUE' - } - self.assertEqual(proxy_obj.get_environ(), expected_env) - - class CommonTestMixin(object): @mock.patch('swift.common.wsgi.capture_stdio') diff -Nru swift-2.30.0/test/unit/helpers.py swift-2.30.1/test/unit/helpers.py --- swift-2.30.0/test/unit/helpers.py 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/test/unit/helpers.py 2023-01-30 23:23:08.000000000 +0000 @@ -40,7 +40,7 @@ from swift.common.middleware import listing_formats, proxy_logging from swift.common import utils from swift.common.utils import mkdirs, normalize_timestamp, NullLogger -from swift.common.wsgi import SwiftHttpProtocol +from swift.common.http_protocol import SwiftHttpProtocol from swift.container import server as container_server from swift.obj import server as object_server from swift.proxy import server as proxy_server diff -Nru swift-2.30.0/test/unit/proxy/test_server.py swift-2.30.1/test/unit/proxy/test_server.py --- swift-2.30.0/test/unit/proxy/test_server.py 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/test/unit/proxy/test_server.py 2023-01-30 23:23:08.000000000 +0000 @@ -71,7 +71,8 @@ from swift.common.utils import hash_path, storage_directory, \ parse_content_type, parse_mime_headers, StatsdClient, \ iter_multipart_mime_documents, public, mkdirs, NullLogger, md5 -from swift.common.wsgi import loadapp, ConfigString, SwiftHttpProtocol +from swift.common.wsgi import loadapp, ConfigString +from swift.common.http_protocol import SwiftHttpProtocol from swift.proxy.controllers import base as proxy_base from swift.proxy.controllers.base import get_cache_key, cors_validation, \ get_account_info, get_container_info diff -Nru swift-2.30.0/tools/playbooks/common/install_dependencies.yaml swift-2.30.1/tools/playbooks/common/install_dependencies.yaml --- swift-2.30.0/tools/playbooks/common/install_dependencies.yaml 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/tools/playbooks/common/install_dependencies.yaml 2023-01-30 23:23:08.000000000 +0000 @@ -35,11 +35,23 @@ - name: install python modules with pip pip: name={{ item }} state=present extra_args='--upgrade' with_items: - - crudini + # For some reason, pip on py2 isn't smart enough to prevent us + # trying to install a too-new mock?? + - 'mock<4' + # crudini pulls in iniparse which can conflict with distro-installed + # packages on centos7 + - 'crudini<0.9.4' + # Order matters; install constrained versions *first*, then unconstrained - eventlet - nose - pyeclib - python-swiftclient - # For some reason, pip on py2 isn't smart enough to prevent us - # trying to install a too-new mock?? - - 'mock<4' + + - name: install PasteDeploy - CentOS 7 + pip: name={{ item }} state=present extra_args='--upgrade' + with_items: + # py2_constraints isn't obeyed by pip install swift + - 'PasteDeploy==2.1.1' + when: + - ansible_facts['distribution'] == "CentOS" + - ansible_facts['distribution_major_version'] == "7" diff -Nru swift-2.30.0/tools/playbooks/dsvm/pre.yaml swift-2.30.1/tools/playbooks/dsvm/pre.yaml --- swift-2.30.0/tools/playbooks/dsvm/pre.yaml 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/tools/playbooks/dsvm/pre.yaml 2023-01-30 23:23:08.000000000 +0000 @@ -6,7 +6,13 @@ bindep_profile: test bindep_dir: "{{ zuul_work_dir }}" - test-setup - - ensure-tox + # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0 + # released after zed was released and has some incompatible changes + # and it is ok not to fix the issues caused by tox 4.0.0 in stable + # beanches jobs. We can continue testing the stable/zed and lower + # branches with tox<4.0.0 + - role: ensure-tox + ensure_tox_version: "<4" - additional-tempauth-users - additional-keystone-users - dsvm-additional-middlewares diff -Nru swift-2.30.0/tools/playbooks/multinode_setup/common_config.yaml swift-2.30.1/tools/playbooks/multinode_setup/common_config.yaml --- swift-2.30.0/tools/playbooks/multinode_setup/common_config.yaml 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/tools/playbooks/multinode_setup/common_config.yaml 2023-01-30 23:23:08.000000000 +0000 @@ -69,7 +69,7 @@ - hosts: test-runner tasks: - name: add new env. variable for running tests - lineinfile: dest=/home/{{ ansible_ssh_user }}/.bashrc line="export SWIFT_TEST_CONFIG_FILE=/home/{{ ansible_ssh_user}}/test.conf" + lineinfile: dest=/home/{{ ansible_user }}/.bashrc line="export SWIFT_TEST_CONFIG_FILE=/home/{{ ansible_user}}/test.conf" - name: copy the sample configuration files for running tests - template: src=test.conf.j2 dest=/home/{{ ansible_ssh_user }}/test.conf + template: src=test.conf.j2 dest=/home/{{ ansible_user }}/test.conf diff -Nru swift-2.30.0/tools/playbooks/multinode_setup/make_rings.yaml swift-2.30.1/tools/playbooks/multinode_setup/make_rings.yaml --- swift-2.30.0/tools/playbooks/multinode_setup/make_rings.yaml 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/tools/playbooks/multinode_setup/make_rings.yaml 2023-01-30 23:23:08.000000000 +0000 @@ -126,20 +126,20 @@ when: previous_swift_version | default(latest_swift_release.stdout) is not match("^(rocky|stein|train|ussuri)-em.*|^2\.(1?[0-9]|2[0-5])\.|^(origin/)?stable/[a-u].*") - name: create remakerings from template - template: src=make_multinode_rings.j2 dest=/home/{{ ansible_ssh_user }}/make_multinode_rings mode=0755 + template: src=make_multinode_rings.j2 dest=/home/{{ ansible_user }}/make_multinode_rings mode=0755 - name: create rings dir file: > - path=/home/{{ ansible_ssh_user }}/rings + path=/home/{{ ansible_user }}/rings state=directory - name: make rings shell: - cmd: /home/{{ ansible_ssh_user }}/make_multinode_rings + cmd: /home/{{ ansible_user }}/make_multinode_rings executable: /bin/bash - name: scp rings to all swift-cluster nodes - command: scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=5 -o ConnectionAttempts=360 /home/{{ ansible_ssh_user }}/rings/{{ item[0] }} {{ ansible_ssh_user }}@{{ hostvars[item[1]].nodepool.public_ipv4 }}:/etc/swift + command: scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=5 -o ConnectionAttempts=360 /home/{{ ansible_user }}/rings/{{ item[0] }} {{ ansible_user }}@{{ hostvars[item[1]].nodepool.public_ipv4 }}:/etc/swift with_nested: - ['account.ring.gz', 'container.ring.gz', 'object.ring.gz', 'object-1.ring.gz', 'object-2.ring.gz'] - "{{ groups['swift-cluster'] }}" diff -Nru swift-2.30.0/tools/playbooks/multinode_setup/pre.yaml swift-2.30.1/tools/playbooks/multinode_setup/pre.yaml --- swift-2.30.0/tools/playbooks/multinode_setup/pre.yaml 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/tools/playbooks/multinode_setup/pre.yaml 2023-01-30 23:23:08.000000000 +0000 @@ -4,4 +4,10 @@ - role: bindep bindep_dir: "{{ zuul_work_dir }}" - test-setup - - ensure-tox + # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0 + # released after zed was released and has some incompatible changes + # and it is ok not to fix the issues caused by tox 4.0.0 in stable + # beanches jobs. We can continue testing the stable/zed and lower + # branches with tox<4.0.0 + - role: ensure-tox + ensure_tox_version: "<4" diff -Nru swift-2.30.0/tools/playbooks/multinode_setup/run.yaml swift-2.30.1/tools/playbooks/multinode_setup/run.yaml --- swift-2.30.0/tools/playbooks/multinode_setup/run.yaml 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/tools/playbooks/multinode_setup/run.yaml 2023-01-30 23:23:08.000000000 +0000 @@ -40,4 +40,4 @@ tox_envlist: func-py3 tox_environment: TOX_CONSTRAINTS_FILE: https://releases.openstack.org/constraints/upper/yoga - SWIFT_TEST_CONFIG_FILE: /home/{{ ansible_ssh_user }}/test.conf + SWIFT_TEST_CONFIG_FILE: /home/{{ ansible_user }}/test.conf diff -Nru swift-2.30.0/tools/playbooks/multinode_setup/templates/make_multinode_rings.j2 swift-2.30.1/tools/playbooks/multinode_setup/templates/make_multinode_rings.j2 --- swift-2.30.0/tools/playbooks/multinode_setup/templates/make_multinode_rings.j2 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/tools/playbooks/multinode_setup/templates/make_multinode_rings.j2 2023-01-30 23:23:08.000000000 +0000 @@ -2,7 +2,7 @@ set -e -cd /home/{{ ansible_ssh_user }}/rings +cd /home/{{ ansible_user }}/rings rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz diff -Nru swift-2.30.0/tools/playbooks/s3api-tests/run.yaml swift-2.30.1/tools/playbooks/s3api-tests/run.yaml --- swift-2.30.0/tools/playbooks/s3api-tests/run.yaml 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/tools/playbooks/s3api-tests/run.yaml 2023-01-30 23:23:08.000000000 +0000 @@ -14,7 +14,13 @@ # limitations under the License. - hosts: all roles: - - ensure-tox + # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0 + # released after zed was released and has some incompatible changes + # and it is ok not to fix the issues caused by tox 4.0.0 in stable + # beanches jobs. We can continue testing the stable/zed and lower + # branches with tox<4.0.0 + - role: ensure-tox + ensure_tox_version: "<4" tasks: - name: Run s3api tests include_role: diff -Nru swift-2.30.0/tools/playbooks/saio_single_node_setup/setup_saio.yaml swift-2.30.1/tools/playbooks/saio_single_node_setup/setup_saio.yaml --- swift-2.30.0/tools/playbooks/saio_single_node_setup/setup_saio.yaml 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/tools/playbooks/saio_single_node_setup/setup_saio.yaml 2023-01-30 23:23:08.000000000 +0000 @@ -161,22 +161,22 @@ create: no - name: copy the SAIO scripts for resetting the environment - command: cp -r {{ zuul.project.src_dir }}/doc/saio/bin /home/{{ ansible_ssh_user }}/bin creates=/home/{{ ansible_ssh_user }}/bin + command: cp -r {{ zuul.project.src_dir }}/doc/saio/bin /home/{{ ansible_user }}/bin creates=/home/{{ ansible_user }}/bin - name: set the correct file mode for SAIO scripts - file: dest=/home/{{ ansible_ssh_user }}/bin mode=0777 recurse=yes + file: dest=/home/{{ ansible_user }}/bin mode=0777 recurse=yes - name: add new env. variable for loopback device - lineinfile: dest=/home/{{ ansible_ssh_user }}/.bashrc line="export SAIO_BLOCK_DEVICE=/srv/swift-disk" + lineinfile: dest=/home/{{ ansible_user }}/.bashrc line="export SAIO_BLOCK_DEVICE=/srv/swift-disk" - name: remove line from resetswift - lineinfile: dest=/home/{{ ansible_ssh_user }}/bin/resetswift line="sudo find /var/log/swift -type f -exec rm -f {} \;" state=absent + lineinfile: dest=/home/{{ ansible_user }}/bin/resetswift line="sudo find /var/log/swift -type f -exec rm -f {} \;" state=absent - name: add new env. variable for running tests - lineinfile: dest=/home/{{ ansible_ssh_user }}/.bashrc line="export SWIFT_TEST_CONFIG_FILE=/etc/swift/test.conf" + lineinfile: dest=/home/{{ ansible_user }}/.bashrc line="export SWIFT_TEST_CONFIG_FILE=/etc/swift/test.conf" - name: make sure PATH includes the bin directory - lineinfile: dest=/home/{{ ansible_ssh_user }}/.bashrc line="export PATH=${PATH}:/home/{{ ansible_ssh_user }}/bin" + lineinfile: dest=/home/{{ ansible_user }}/.bashrc line="export PATH=${PATH}:/home/{{ ansible_user }}/bin" - name: increase open files limit to run probe tests - lineinfile: dest=/home/{{ ansible_ssh_user }}/.bashrc line="ulimit -n 4096" + lineinfile: dest=/home/{{ ansible_user }}/.bashrc line="ulimit -n 4096" diff -Nru swift-2.30.0/tools/test-setup.sh swift-2.30.1/tools/test-setup.sh --- swift-2.30.0/tools/test-setup.sh 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/tools/test-setup.sh 2023-01-30 23:23:08.000000000 +0000 @@ -25,6 +25,11 @@ cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" -e "CloudLinux" && \ cat /etc/*release | grep -q 'release 8' } +function is_rhel9 { + [ -f /usr/bin/dnf ] && \ + cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" -e "CloudLinux" && \ + cat /etc/*release | grep -q 'release 9' +} if is_rhel7; then @@ -40,3 +45,10 @@ sudo dnf install -y centos-release-openstack-xena sudo dnf install -y liberasurecode-devel fi + +if is_rhel9; then + # Install CentOS OpenStack repos so that we have access to some extra + # packages. + sudo dnf install -y centos-release-openstack-yoga + sudo dnf install -y liberasurecode-devel +fi diff -Nru swift-2.30.0/tox.ini swift-2.30.1/tox.ini --- swift-2.30.0/tox.ini 2022-08-18 05:21:45.000000000 +0000 +++ swift-2.30.1/tox.ini 2023-01-30 23:23:08.000000000 +0000 @@ -11,7 +11,7 @@ NOSE_COVER_BRANCHES=1 NOSE_COVER_HTML_DIR={toxinidir}/cover deps = - -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/zed} -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = @@ -104,8 +104,9 @@ [testenv:docs] basepython = python3 deps = - -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/zed} -r{toxinidir}/doc/requirements.txt + -r{toxinidir}/requirements.txt commands = sphinx-build -W -b html doc/source doc/build/html [testenv:api-ref] @@ -157,7 +158,7 @@ [testenv:releasenotes] basepython = python3 deps = - -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/zed} -r{toxinidir}/doc/requirements.txt commands = sphinx-build -a -W -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html