Version in base suite: 20.11.6-1~deb11u1 Base version: dpdk_20.11.6-1~deb11u1 Target version: dpdk_20.11.7-1~deb11u1 Base file: /srv/ftp-master.debian.org/ftp/pool/main/d/dpdk/dpdk_20.11.6-1~deb11u1.dsc Target file: /srv/ftp-master.debian.org/policy/pool/main/d/dpdk/dpdk_20.11.7-1~deb11u1.dsc .github/workflows/build.yml | 19 VERSION | 2 app/test-pmd/cmdline.c | 50 ++ app/test-pmd/config.c | 4 app/test-pmd/csumonly.c | 6 app/test-pmd/meson.build | 1 app/test-pmd/noisy_vnf.c | 2 app/test-pmd/testpmd.c | 6 app/test-pmd/testpmd.h | 3 app/test/meson.build | 2 app/test/test_common.c | 52 +- app/test/test_cryptodev.c | 20 app/test/test_cryptodev_asym.c | 4 app/test/test_cryptodev_security_pdcp_test_vectors.h | 280 ++++++------- app/test/test_efd_perf.c | 1 app/test/test_event_timer_adapter.c | 2 app/test/test_hash_perf.c | 11 app/test/test_hash_readwrite_lf_perf.c | 1 app/test/test_ipsec.c | 9 app/test/test_member.c | 1 app/test/test_member_perf.c | 1 app/test/test_service_cores.c | 49 +- app/test/test_trace.c | 59 +- app/test/test_trace.h | 2 config/arm/meson.build | 2 config/meson.build | 2 config/x86/meson.build | 2 debian/changelog | 8 debian/librte-vhost21.symbols | 1 devtools/checkpatches.sh | 35 - doc/guides/contributing/abi_policy.rst | 2 doc/guides/contributing/abi_versioning.rst | 2 doc/guides/cryptodevs/armv8.rst | 2 doc/guides/cryptodevs/bcmfs.rst | 2 doc/guides/freebsd_gsg/build_dpdk.rst | 2 doc/guides/howto/openwrt.rst | 2 doc/guides/linux_gsg/build_dpdk.rst | 8 doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst | 4 doc/guides/nics/ark.rst | 2 doc/guides/nics/index.rst | 8 doc/guides/nics/mlx5.rst | 2 doc/guides/nics/mvneta.rst | 3 doc/guides/nics/mvpp2.rst | 3 doc/guides/nics/virtio.rst | 2 doc/guides/platform/bluefield.rst | 4 doc/guides/platform/octeontx.rst | 8 doc/guides/platform/octeontx2.rst | 4 doc/guides/prog_guide/build-sdk-meson.rst | 39 + doc/guides/prog_guide/event_timer_adapter.rst | 11 doc/guides/prog_guide/lto.rst | 2 doc/guides/prog_guide/profile_app.rst | 2 doc/guides/prog_guide/ring_lib.rst | 2 doc/guides/prog_guide/trace_lib.rst | 14 doc/guides/prog_guide/vhost_lib.rst | 6 doc/guides/rel_notes/release_20_11.rst | 336 ++++++++++++++++ doc/guides/sample_app_ug/vm_power_management.rst | 4 doc/guides/testpmd_app_ug/testpmd_funcs.rst | 8 doc/guides/tools/proc_info.rst | 16 doc/guides/windows_gsg/build_dpdk.rst | 4 drivers/baseband/acc100/rte_acc100_pmd.c | 112 +++-- drivers/bus/dpaa/base/qbman/bman.h | 4 drivers/common/iavf/iavf_adminq.c | 3 drivers/common/mlx5/linux/meson.build | 2 drivers/common/sfc_efx/base/ef10_nic.c | 2 drivers/crypto/kasumi/rte_kasumi_pmd.c | 7 drivers/crypto/qat/qat_sym_session.c | 23 - drivers/crypto/snow3g/rte_snow3g_pmd.c | 7 drivers/event/dlb2/dlb2.c | 9 drivers/event/dsw/dsw_evdev.h | 8 drivers/event/dsw/dsw_event.c | 313 ++++++++++---- drivers/event/sw/sw_evdev.c | 4 drivers/event/sw/sw_evdev_selftest.c | 5 drivers/net/atlantic/atl_rxtx.c | 5 drivers/net/axgbe/axgbe_rxtx.c | 117 +++-- drivers/net/axgbe/axgbe_rxtx.h | 6 drivers/net/bnxt/bnxt_ethdev.c | 5 drivers/net/bnxt/bnxt_hwrm.c | 2 drivers/net/bnxt/tf_ulp/ulp_flow_db.h | 4 drivers/net/bonding/rte_eth_bond_api.c | 5 drivers/net/bonding/rte_eth_bond_pmd.c | 121 +++-- drivers/net/dpaa/dpaa_ethdev.c | 5 drivers/net/dpaa/dpaa_flow.c | 13 drivers/net/dpaa/dpaa_flow.h | 5 drivers/net/dpaa/dpaa_rxtx.c | 23 - drivers/net/ena/ena_ethdev.c | 10 drivers/net/hns3/hns3_cmd.h | 9 drivers/net/hns3/hns3_dcb.h | 3 drivers/net/hns3/hns3_ethdev.c | 39 - drivers/net/hns3/hns3_ethdev.h | 15 drivers/net/hns3/hns3_ethdev_vf.c | 31 - drivers/net/hns3/hns3_fdir.c | 3 drivers/net/hns3/hns3_fdir.h | 3 drivers/net/hns3/hns3_flow.c | 264 +++++++----- drivers/net/hns3/hns3_intr.c | 29 - drivers/net/hns3/hns3_intr.h | 4 drivers/net/hns3/hns3_mbx.c | 8 drivers/net/hns3/hns3_mbx.h | 4 drivers/net/hns3/hns3_regs.h | 2 drivers/net/hns3/hns3_rss.c | 293 ++++++++++--- drivers/net/hns3/hns3_rss.h | 8 drivers/net/hns3/hns3_rxtx.c | 18 drivers/net/hns3/hns3_rxtx.h | 22 - drivers/net/hns3/hns3_rxtx_vec_sve.c | 27 - drivers/net/hns3/hns3_stats.c | 43 +- drivers/net/hns3/hns3_stats.h | 13 drivers/net/i40e/i40e_ethdev.c | 9 drivers/net/i40e/i40e_vf_representor.c | 4 drivers/net/iavf/iavf.h | 2 drivers/net/iavf/iavf_ethdev.c | 5 drivers/net/iavf/iavf_fdir.c | 32 + drivers/net/iavf/iavf_hash.c | 7 drivers/net/iavf/iavf_rxtx.c | 54 +- drivers/net/iavf/iavf_rxtx.h | 2 drivers/net/iavf/iavf_rxtx_vec_avx2.c | 118 ++++- drivers/net/iavf/iavf_rxtx_vec_avx512.c | 133 ++++-- drivers/net/iavf/iavf_rxtx_vec_sse.c | 76 ++- drivers/net/iavf/iavf_vchnl.c | 147 ++++++- drivers/net/ice/base/ice_common.c | 2 drivers/net/ice/base/ice_switch.c | 8 drivers/net/ice/ice_ethdev.c | 4 drivers/net/ice/ice_rxtx.c | 43 +- drivers/net/ice/ice_rxtx.h | 2 drivers/net/igc/base/igc_i225.c | 14 drivers/net/igc/base/igc_phy.c | 6 drivers/net/ionic/ionic_dev.c | 5 drivers/net/ionic/ionic_lif.c | 6 drivers/net/ionic/ionic_rxtx.c | 29 - drivers/net/ixgbe/ixgbe_ethdev.c | 12 drivers/net/ixgbe/ixgbe_pf.c | 8 drivers/net/memif/rte_eth_memif.c | 8 drivers/net/mlx4/meson.build | 2 drivers/net/mlx4/mlx4.c | 9 drivers/net/mlx4/mlx4_mp.c | 7 drivers/net/mlx5/linux/mlx5_mp_os.c | 6 drivers/net/mlx5/linux/mlx5_os.c | 11 drivers/net/mlx5/mlx5.c | 33 + drivers/net/mlx5/mlx5.h | 7 drivers/net/mlx5/mlx5_devx.c | 3 drivers/net/mlx5/mlx5_flow.c | 54 +- drivers/net/mlx5/mlx5_flow.h | 3 drivers/net/mlx5/mlx5_flow_dv.c | 77 +-- drivers/net/mlx5/mlx5_flow_meter.c | 3 drivers/net/mlx5/mlx5_flow_verbs.c | 23 - drivers/net/mlx5/mlx5_rxq.c | 5 drivers/net/mlx5/mlx5_rxtx.c | 104 +++- drivers/net/mlx5/mlx5_trigger.c | 16 drivers/net/mvneta/mvneta_rxtx.c | 4 drivers/net/nfp/nfp_net.c | 12 drivers/net/nfp/nfpcore/nfp_hwinfo.c | 2 drivers/net/qede/base/ecore_init_fw_funcs.c | 2 drivers/net/qede/base/ecore_int.c | 4 drivers/net/qede/qede_rxtx.c | 3 drivers/net/tap/tap_flow.c | 2 drivers/net/tap/tap_tcmsgs.c | 18 drivers/net/tap/tap_tcmsgs.h | 16 drivers/net/txgbe/base/txgbe_eeprom.c | 32 - drivers/net/txgbe/base/txgbe_type.h | 4 drivers/net/txgbe/txgbe_ethdev.c | 11 drivers/net/virtio/virtio_ethdev.c | 7 drivers/net/virtio/virtqueue.h | 4 drivers/vdpa/ifc/ifcvf_vdpa.c | 27 + examples/fips_validation/main.c | 4 examples/ipsec-secgw/ipsec-secgw.c | 21 - examples/ipsec-secgw/sa.c | 45 +- examples/l2fwd-crypto/main.c | 2 examples/performance-thread/pthread_shim/pthread_shim.c | 9 examples/qos_sched/cfg_file.c | 2 examples/qos_sched/profile.cfg | 2 examples/vhost/main.c | 7 examples/vm_power_manager/channel_manager.c | 19 kernel/linux/kni/meson.build | 2 kernel/linux/meson.build | 4 lib/librte_cryptodev/rte_cryptodev.c | 20 lib/librte_cryptodev/rte_cryptodev_pmd.c | 4 lib/librte_cryptodev/rte_cryptodev_pmd.h | 2 lib/librte_eal/common/eal_common_proc.c | 17 lib/librte_eal/common/eal_common_trace.c | 69 +-- lib/librte_eal/common/eal_common_trace_ctf.c | 3 lib/librte_eal/common/eal_common_trace_utils.c | 11 lib/librte_eal/common/eal_trace.h | 3 lib/librte_eal/common/malloc_heap.c | 2 lib/librte_eal/common/malloc_mp.c | 2 lib/librte_eal/common/rte_service.c | 44 +- lib/librte_eal/include/rte_common.h | 4 lib/librte_eal/include/rte_memzone.h | 3 lib/librte_eal/include/rte_uuid.h | 4 lib/librte_eal/x86/include/rte_memcpy.h | 17 lib/librte_eventdev/rte_event_crypto_adapter.c | 30 + lib/librte_eventdev/rte_event_eth_rx_adapter.h | 2 lib/librte_eventdev/rte_event_eth_tx_adapter.c | 15 lib/librte_graph/rte_graph_worker.h | 4 lib/librte_gro/gro_tcp4.c | 10 lib/librte_gro/gro_udp4.c | 6 lib/librte_hash/rte_cuckoo_hash.c | 1 lib/librte_ipsec/esp_outb.c | 8 lib/librte_kni/rte_kni.h | 4 lib/librte_mbuf/rte_mbuf.h | 3 lib/librte_mempool/rte_mempool.h | 1 lib/librte_net/rte_ip.h | 17 lib/librte_node/ethdev_ctrl.c | 2 lib/librte_pdump/rte_pdump.c | 6 lib/librte_power/rte_power.h | 55 -- lib/librte_ring/rte_ring.h | 15 lib/librte_ring/rte_ring_core.h | 4 lib/librte_ring/rte_ring_elem.h | 1 lib/librte_sched/rte_sched.c | 2 lib/librte_telemetry/telemetry.c | 11 lib/librte_telemetry/telemetry_json.h | 48 ++ lib/librte_timer/rte_timer.c | 13 lib/librte_vhost/rte_vhost.h | 15 lib/librte_vhost/version.map | 3 lib/librte_vhost/vhost.c | 30 + lib/librte_vhost/vhost_user.c | 1 license/README | 9 meson.build | 2 215 files changed, 3133 insertions(+), 1553 deletions(-) diff -Nru dpdk-20.11.6/.github/workflows/build.yml dpdk-20.11.7/.github/workflows/build.yml --- dpdk-20.11.6/.github/workflows/build.yml 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/.github/workflows/build.yml 2022-12-13 10:50:22.000000000 +0000 @@ -57,18 +57,15 @@ steps: - name: Checkout sources - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Generate cache keys id: get_ref_keys run: | - echo -n '::set-output name=ccache::' - echo 'ccache-${{ matrix.config.os }}-${{ matrix.config.compiler }}-${{ matrix.config.cross }}-'$(date -u +%Y-w%W) - echo -n '::set-output name=libabigail::' - echo 'libabigail-${{ matrix.config.os }}' - echo -n '::set-output name=abi::' - echo 'abi-${{ matrix.config.os }}-${{ matrix.config.compiler }}-${{ matrix.config.cross }}-${{ env.LIBABIGAIL_VERSION }}-${{ env.REF_GIT_TAG }}' + echo 'ccache=ccache-${{ matrix.config.os }}-${{ matrix.config.compiler }}-${{ matrix.config.cross }}-'$(date -u +%Y-w%W) >> $GITHUB_OUTPUT + echo 'libabigail=libabigail-${{ matrix.config.os }}' >> $GITHUB_OUTPUT + echo 'abi=abi-${{ matrix.config.os }}-${{ matrix.config.compiler }}-${{ matrix.config.cross }}-${{ env.LIBABIGAIL_VERSION }}-${{ env.REF_GIT_TAG }}' >> $GITHUB_OUTPUT - name: Retrieve ccache cache - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: ~/.ccache key: ${{ steps.get_ref_keys.outputs.ccache }}-${{ github.ref }} @@ -76,13 +73,13 @@ ${{ steps.get_ref_keys.outputs.ccache }}-refs/heads/main - name: Retrieve libabigail cache id: libabigail-cache - uses: actions/cache@v2 + uses: actions/cache@v3 if: env.ABI_CHECKS == 'true' with: path: libabigail key: ${{ steps.get_ref_keys.outputs.libabigail }} - name: Retrieve ABI reference cache - uses: actions/cache@v2 + uses: actions/cache@v3 if: env.ABI_CHECKS == 'true' with: path: reference @@ -119,7 +116,7 @@ run: .ci/linux-build.sh - name: Upload logs on failure if: failure() - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: meson-logs-${{ join(matrix.config.*, '-') }} path: | diff -Nru dpdk-20.11.6/VERSION dpdk-20.11.7/VERSION --- dpdk-20.11.6/VERSION 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/VERSION 2022-12-13 10:50:22.000000000 +0000 @@ -1 +1 @@ -20.11.6 +20.11.7 diff -Nru dpdk-20.11.6/app/test/meson.build dpdk-20.11.7/app/test/meson.build --- dpdk-20.11.6/app/test/meson.build 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test/meson.build 2022-12-13 10:50:22.000000000 +0000 @@ -437,7 +437,7 @@ driver_install_path), install: true) -has_hugepage = run_command('has-hugepage.sh', check: true).stdout().strip() != '0' +has_hugepage = run_command('has-hugepage.sh').stdout().strip() != '0' message('hugepage availability: @0@'.format(has_hugepage)) # some perf tests (eg: memcpy perf autotest)take very long diff -Nru dpdk-20.11.6/app/test/test_common.c dpdk-20.11.7/app/test/test_common.c --- dpdk-20.11.6/app/test/test_common.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test/test_common.c 2022-12-13 10:50:22.000000000 +0000 @@ -25,27 +25,49 @@ #define SMALLER 0x1000U #define BIGGER 0x2000U #define PTR_DIFF BIGGER - SMALLER -#define FAIL_MACRO(x)\ - {printf(#x "() test failed!\n");\ - return -1;} uintptr_t unused = 0; + uint32_t arr[3]; RTE_SET_USED(unused); - if ((uintptr_t)RTE_PTR_ADD(SMALLER, PTR_DIFF) != BIGGER) - FAIL_MACRO(RTE_PTR_ADD); - if ((uintptr_t)RTE_PTR_SUB(BIGGER, PTR_DIFF) != SMALLER) - FAIL_MACRO(RTE_PTR_SUB); - if (RTE_PTR_DIFF(BIGGER, SMALLER) != PTR_DIFF) - FAIL_MACRO(RTE_PTR_DIFF); - if (RTE_MAX(SMALLER, BIGGER) != BIGGER) - FAIL_MACRO(RTE_MAX); - if (RTE_MIN(SMALLER, BIGGER) != SMALLER) - FAIL_MACRO(RTE_MIN); + RTE_TEST_ASSERT_EQUAL((uintptr_t)RTE_PTR_ADD(SMALLER, PTR_DIFF), BIGGER, + "RTE_PTR_ADD"); + RTE_TEST_ASSERT_EQUAL((uintptr_t)RTE_PTR_SUB(BIGGER, PTR_DIFF), SMALLER, + "RTE_PTR_SUB"); + RTE_TEST_ASSERT_EQUAL(RTE_PTR_DIFF(BIGGER, SMALLER), PTR_DIFF, + "RTE_PTR_DIFF"); + RTE_TEST_ASSERT_EQUAL(RTE_MAX(SMALLER, BIGGER), BIGGER, + "RTE_MAX"); + RTE_TEST_ASSERT_EQUAL(RTE_MIN(SMALLER, BIGGER), SMALLER, + "RTE_MIN"); - if (strncmp(RTE_STR(test), "test", sizeof("test"))) - FAIL_MACRO(RTE_STR); + RTE_TEST_ASSERT_EQUAL(RTE_PTR_ADD(arr + 1, sizeof(arr[0])), &arr[2], + "RTE_PTR_ADD(expr, x)"); + RTE_TEST_ASSERT_EQUAL(RTE_PTR_SUB(arr + 1, sizeof(arr[0])), &arr[0], + "RTE_PTR_SUB(expr, x)"); + RTE_TEST_ASSERT_EQUAL(RTE_PTR_ALIGN_FLOOR(arr + 2, 4), &arr[2], + "RTE_PTR_ALIGN_FLOOR(expr, x)"); + RTE_TEST_ASSERT_EQUAL(RTE_PTR_ALIGN_CEIL(arr + 2, 4), &arr[2], + "RTE_PTR_ALIGN_CEIL(expr, x)"); + RTE_TEST_ASSERT_EQUAL(RTE_PTR_ALIGN(arr + 2, 4), &arr[2], + "RTE_PTR_ALIGN(expr, x)"); + + RTE_TEST_ASSERT_EQUAL( + RTE_PTR_ALIGN_FLOOR(RTE_PTR_ADD(&arr[1], 1), 4), &arr[1], + "RTE_PTR_ALIGN_FLOOR(x < y/2, y)"); + RTE_TEST_ASSERT_EQUAL( + RTE_PTR_ALIGN_FLOOR(RTE_PTR_ADD(&arr[1], 3), 4), &arr[1], + "RTE_PTR_ALIGN_FLOOR(x > y/2, y)"); + RTE_TEST_ASSERT_EQUAL( + RTE_PTR_ALIGN_CEIL(RTE_PTR_ADD(&arr[1], 3), 4), &arr[2], + "RTE_PTR_ALIGN_CEIL(x < y/2, y)"); + RTE_TEST_ASSERT_EQUAL( + RTE_PTR_ALIGN_CEIL(RTE_PTR_ADD(&arr[1], 1), 4), &arr[2], + "RTE_PTR_ALIGN_CEIL(x > y/2, y)"); + + RTE_TEST_ASSERT(strncmp(RTE_STR(test), "test", sizeof("test")) == 0, + "RTE_STR"); return 0; } diff -Nru dpdk-20.11.6/app/test/test_cryptodev.c dpdk-20.11.7/app/test/test_cryptodev.c --- dpdk-20.11.6/app/test/test_cryptodev.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test/test_cryptodev.c 2022-12-13 10:50:22.000000000 +0000 @@ -2636,6 +2636,16 @@ remaining_off -= rte_pktmbuf_data_len(sgl_buf); sgl_buf = sgl_buf->next; } + + /* The last segment should be large enough to hold full digest */ + if (sgl_buf->data_len < auth_tag_len) { + rte_pktmbuf_free(sgl_buf->next); + sgl_buf->next = NULL; + TEST_ASSERT_NOT_NULL(rte_pktmbuf_append(sgl_buf, + auth_tag_len - sgl_buf->data_len), + "No room to append auth tag"); + } + sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(sgl_buf, uint8_t *, remaining_off); sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(sgl_buf, @@ -6384,8 +6394,10 @@ */ snow3g_hash_test_vector_setup(&snow3g_test_case_7, &snow3g_hash_data); - return test_snow3g_decryption(&snow3g_test_case_7) & - test_snow3g_authentication_verify(&snow3g_hash_data); + if (test_snow3g_decryption(&snow3g_test_case_7)) + return TEST_FAILED; + + return test_snow3g_authentication_verify(&snow3g_hash_data); } static int @@ -7535,7 +7547,7 @@ rte_pktmbuf_iova(ut_params->ibuf); /* Copy AAD 18 bytes after the AAD pointer, according to the API */ memcpy(sym_op->aead.aad.data + 18, tdata->aad.data, tdata->aad.len); - debug_hexdump(stdout, "aad:", sym_op->aead.aad.data, + debug_hexdump(stdout, "aad:", sym_op->aead.aad.data + 18, tdata->aad.len); /* Append IV at the end of the crypto operation*/ @@ -7544,7 +7556,7 @@ /* Copy IV 1 byte after the IV pointer, according to the API */ rte_memcpy(iv_ptr + 1, tdata->iv.data, tdata->iv.len); - debug_hexdump(stdout, "iv:", iv_ptr, + debug_hexdump(stdout, "iv:", iv_ptr + 1, tdata->iv.len); } else { aad_pad_len = RTE_ALIGN_CEIL(tdata->aad.len, 16); diff -Nru dpdk-20.11.6/app/test/test_cryptodev_asym.c dpdk-20.11.7/app/test/test_cryptodev_asym.c --- dpdk-20.11.6/app/test/test_cryptodev_asym.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test/test_cryptodev_asym.c 2022-12-13 10:50:22.000000000 +0000 @@ -209,8 +209,8 @@ status = TEST_FAILED; goto error_exit; } - debug_hexdump(stdout, "encrypted message", asym_op->rsa.message.data, - asym_op->rsa.message.length); + debug_hexdump(stdout, "encrypted message", asym_op->rsa.cipher.data, + asym_op->rsa.cipher.length); /* Use the resulted output as decryption Input vector*/ asym_op = result_op->asym; diff -Nru dpdk-20.11.6/app/test/test_cryptodev_security_pdcp_test_vectors.h dpdk-20.11.7/app/test/test_cryptodev_security_pdcp_test_vectors.h --- dpdk-20.11.6/app/test/test_cryptodev_security_pdcp_test_vectors.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test/test_cryptodev_security_pdcp_test_vectors.h 2022-12-13 10:50:22.000000000 +0000 @@ -4145,7 +4145,7 @@ /*************** 12-bit C-plane ****************/ /* Control Plane w/NULL enc. + NULL int. UL */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4159,7 +4159,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* Control Plane w/NULL enc. + SNOW f9 int. UL */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4173,7 +4173,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* Control Plane w/NULL enc. + AES CMAC int. UL */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4187,7 +4187,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* Control Plane w/NULL enc. + ZUC int. UL */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4202,7 +4202,7 @@ 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* Control Plane w/SNOW f8 enc. + NULL int. UL */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4216,7 +4216,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* Control Plane w/SNOW f8 enc. + SNOW f9 int. UL */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4230,7 +4230,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* Control Plane w/SNOW f8 enc. + AES CMAC int. UL */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4244,7 +4244,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* Control Plane w/SNOW f8 enc. + ZUC int. UL */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4259,7 +4259,7 @@ 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* Control Plane w/AES CTR enc. + NULL int. UL */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4273,7 +4273,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* Control Plane w/AES CTR enc. + SNOW f9 int. UL */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4287,7 +4287,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* Control Plane w/AES CTR enc. + AES CMAC int. UL */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4301,7 +4301,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* Control Plane w/AES CTR enc. + ZUC int. UL */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4316,7 +4316,7 @@ 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* Control Plane w/ZUC enc. + NULL int. UL */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4330,7 +4330,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* Control Plane w/ZUC enc. + SNOW f9 int. UL */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4344,7 +4344,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* Control Plane w/ZUC enc. + AES CMAC int. UL */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4358,7 +4358,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* Control Plane w/ZUC enc. + ZUC int. UL */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4391,7 +4391,7 @@ (uint8_t[]){0x8b, 0x26, 0xad, 0x9c, 0x44, 0x1f, 0x89, 0x0b, 0x38, 0xc4, 0x57, 0xa4, 0x9d, 0x42, 0x14, 0x07, 0xe8}, /* User Plane w/NULL enc. UL for 18-bit SN*/ - (uint8_t[]){0xF8, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, + (uint8_t[]){0x80, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, 0x36, @@ -4425,7 +4425,7 @@ (uint8_t[]){0x8b, 0x26, 0xad, 0x9c, 0x44, 0x1f, 0x89, 0x0b, 0x38, 0xc4, 0x57, 0xa4, 0x9d, 0x42, 0x14, 0x07, 0xe8}, /* User Plane w/SNOW enc. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -4459,7 +4459,7 @@ (uint8_t[]){0x8b, 0x26, 0xad, 0x9c, 0x44, 0x1f, 0x89, 0x0b, 0x38, 0xc4, 0x57, 0xa4, 0x9d, 0x42, 0x14, 0x07, 0xe8}, /* User Plane w/AES enc. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -4493,7 +4493,7 @@ (uint8_t[]){0x8b, 0x26, 0xad, 0x9c, 0x44, 0x1f, 0x89, 0x0b, 0x38, 0xc4, 0x57, 0xa4, 0x9d, 0x42, 0x14, 0x07, 0xe8}, /* User Plane w/ZUC enc. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -4511,7 +4511,7 @@ /*************** u-plane with integrity for 12-bit SN *****/ /* User Plane w/NULL enc. + NULL int. UL for 12-bit SN*/ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4525,7 +4525,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* User Plane w/NULL enc. + SNOW f9 int. UL for 12-bit SN*/ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4539,7 +4539,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* User Plane w/NULL enc. + AES CMAC int. UL for 12-bit SN*/ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4553,7 +4553,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* User Plane w/NULL enc. + ZUC int. UL for 12-bit SN*/ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4568,7 +4568,7 @@ 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* User Plane w/SNOW f8 enc. + NULL int. UL for 12-bit SN*/ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4582,7 +4582,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* User Plane w/SNOW f8 enc. + SNOW f9 int. UL for 12-bit SN*/ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4596,7 +4596,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* User Plane w/SNOW f8 enc. + AES CMAC int. UL for 12-bit SN*/ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4610,7 +4610,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* User Plane w/SNOW f8 enc. + ZUC int. UL for 12-bit SN*/ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4625,7 +4625,7 @@ 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* User Plane w/AES CTR enc. + NULL int. UL for 12-bit SN*/ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4639,7 +4639,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* User Plane w/AES CTR enc. + SNOW f9 int. UL for 12-bit SN*/ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4653,7 +4653,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* User Plane w/AES CTR enc. + AES CMAC int. UL for 12-bit SN*/ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4667,7 +4667,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* User Plane w/AES CTR enc. + ZUC int. UL for 12-bit SN*/ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4682,7 +4682,7 @@ 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* User Plane w/ZUC enc. + NULL int. UL for 12-bit SN*/ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4696,7 +4696,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* User Plane w/ZUC enc. + SNOW f9 int. UL for 12-bit SN*/ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4710,7 +4710,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* User Plane w/ZUC enc. + AES CMAC int. UL for 12-bit SN*/ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4724,7 +4724,7 @@ 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* User Plane w/ZUC enc. + ZUC int. UL for 12-bit SN*/ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, @@ -4740,7 +4740,7 @@ /*************** u-plane with integrity for 18-bit SN *****/ /* User Plane w/NULL enc. + NULL int. UL for 18-bit SN*/ - (uint8_t[]){0xF8, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, + (uint8_t[]){0x80, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, 0x36, @@ -4756,7 +4756,7 @@ 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69}, /* User Plane w/NULL enc. + SNOW f9 int. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -4772,7 +4772,7 @@ 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69}, /* User Plane w/NULL enc. + AES CMAC int. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -4788,7 +4788,7 @@ 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69}, /* User Plane w/NULL enc. + ZUC int. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -4804,7 +4804,7 @@ 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69}, /* User Plane w/SNOW f8 enc. + NULL int. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -4820,7 +4820,7 @@ 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69}, /* User Plane w/SNOW f8 enc. + SNOW f9 int. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -4836,7 +4836,7 @@ 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69}, /* User Plane w/SNOW f8 enc. + AES CMAC int. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -4852,7 +4852,7 @@ 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69}, /* User Plane w/SNOW f8 enc. + ZUC int. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -4868,7 +4868,7 @@ 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69}, /* User Plane w/AES CTR enc. + NULL int. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -4884,7 +4884,7 @@ 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69}, /* User Plane w/AES CTR enc. + SNOW f9 int. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -4900,7 +4900,7 @@ 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69}, /* User Plane w/AES CTR enc. + AES CMAC int. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -4916,7 +4916,7 @@ 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69}, /* User Plane w/AES CTR enc. + ZUC int. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -4932,7 +4932,7 @@ 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69}, /* User Plane w/ZUC enc. + NULL int. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -4948,7 +4948,7 @@ 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69}, /* User Plane w/ZUC enc. + SNOW f9 int. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -4964,7 +4964,7 @@ 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69}, /* User Plane w/ZUC enc. + AES CMAC int. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -4980,7 +4980,7 @@ 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69}, /* User Plane w/ZUC enc. + ZUC int. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, @@ -5435,7 +5435,7 @@ /************ C-plane 12-bit ****************************/ /* Control Plane w/NULL enc. + NULL int. UL LONG SN */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, @@ -5451,13 +5451,13 @@ 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* Control Plane w/NULL enc. + SNOW f9 int. UL LONG SN */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, - 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x74, 0xB8, 0x27, 0x96}, + 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x33, 0x22, 0x02, 0x10}, /* Control Plane w/NULL enc. + SNOW f9 int. DL LONG SN */ (uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, @@ -5467,13 +5467,13 @@ 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x97, 0x50, 0x3F, 0xF7}, /* Control Plane w/NULL enc. + AES CMAC int. UL LONG SN */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, - 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x3F, 0x71, 0x26, 0x2E}, + 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x1B, 0xB0, 0x4A, 0xBF}, /* Control Plane w/NULL enc. + AES CMAC int. DL LONG SN */ (uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, @@ -5483,13 +5483,13 @@ 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xE8, 0xBB, 0xE9, 0x36}, /* Control Plane w/NULL enc. + ZUC int. UL LONG SN */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + (uint8_t[]){0x00, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, - 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x54, 0xEF, 0x25, 0xC3}, + 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x28, 0x41, 0xAB, 0x16}, /* Control Plane w/NULL enc. + ZUC int. DL LONG SN */ (uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, @@ -5500,7 +5500,7 @@ 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x76, 0xD0, 0x5B, 0x2C}, /* Control Plane w/SNOW f8 enc. + NULL int. UL LONG SN */ - (uint8_t[]){0x50, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, + (uint8_t[]){0x00, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, 0x2B, 0xAB, 0xC7, 0x32, 0xDA, 0x49, 0xD0, 0xC7, 0x54, 0xCA, 0x18, 0xBB, 0x05, 0x6D, 0xC5, 0x5F, 0xD3, 0xA7, 0xE6, 0xD8, 0xE1, 0xDF, 0x7C, 0x4F, 0x3C, 0x8B, 0x86, 0xC6, 0x8E, 0x24, @@ -5516,13 +5516,13 @@ 0x0F, 0xD8, 0x38, 0xE6, 0x3F, 0xD4, 0x59, 0x7A, 0x9A, 0xB7, 0xF4, 0x52, 0xC6, 0x66, 0xC2, 0x73, 0xDC, 0x32, 0x96, 0x65}, /* Control Plane w/SNOW f8 enc. + SNOW f9 int. UL LONG SN */ - (uint8_t[]){0x50, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, + (uint8_t[]){0x00, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, 0x2B, 0xAB, 0xC7, 0x32, 0xDA, 0x49, 0xD0, 0xC7, 0x54, 0xCA, 0x18, 0xBB, 0x05, 0x6D, 0xC5, 0x5F, 0xD3, 0xA7, 0xE6, 0xD8, 0xE1, 0xDF, 0x7C, 0x4F, 0x3C, 0x8B, 0x86, 0xC6, 0x8E, 0x24, 0xF7, 0xBC, 0x45, 0x2A, 0x2E, 0xB4, 0xF5, 0xD0, 0x39, 0x5B, 0x70, 0xB4, 0x53, 0x90, 0x98, 0x8A, 0x7C, 0x87, 0x21, 0xED, - 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x66, 0xBF, 0x8B, 0x05}, + 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x21, 0x25, 0xAE, 0x83}, /* Control Plane w/SNOW f8 enc. + SNOW f9 int. DL LONG SN */ (uint8_t[]){0xA0, 0x00, 0xC1, 0x3A, 0x28, 0xBC, 0xEB, 0xAC, 0x49, 0xB9, 0xA1, 0xFC, 0xD6, 0x83, 0xEC, 0xA2, 0x89, 0xE6, 0x8F, 0xCA, @@ -5532,13 +5532,13 @@ 0x0F, 0xD8, 0x38, 0xE6, 0x3F, 0xD4, 0x59, 0x7A, 0x9A, 0xB7, 0xF4, 0x52, 0xC6, 0x66, 0xC2, 0x73, 0x4B, 0x62, 0xA9, 0x92}, /* Control Plane w/SNOW f8 enc. + AES CMAC int. UL LONG SN */ - (uint8_t[]){0x50, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, + (uint8_t[]){0x00, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, 0x2B, 0xAB, 0xC7, 0x32, 0xDA, 0x49, 0xD0, 0xC7, 0x54, 0xCA, 0x18, 0xBB, 0x05, 0x6D, 0xC5, 0x5F, 0xD3, 0xA7, 0xE6, 0xD8, 0xE1, 0xDF, 0x7C, 0x4F, 0x3C, 0x8B, 0x86, 0xC6, 0x8E, 0x24, 0xF7, 0xBC, 0x45, 0x2A, 0x2E, 0xB4, 0xF5, 0xD0, 0x39, 0x5B, 0x70, 0xB4, 0x53, 0x90, 0x98, 0x8A, 0x7C, 0x87, 0x21, 0xED, - 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x2D, 0x76, 0x8A, 0xBD}, + 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x09, 0xB7, 0xE6, 0x2C}, /* Control Plane w/SNOW f8 enc. + AES CMAC int. DL LONG SN */ (uint8_t[]){0xA0, 0x00, 0xC1, 0x3A, 0x28, 0xBC, 0xEB, 0xAC, 0x49, 0xB9, 0xA1, 0xFC, 0xD6, 0x83, 0xEC, 0xA2, 0x89, 0xE6, 0x8F, 0xCA, @@ -5548,13 +5548,13 @@ 0x0F, 0xD8, 0x38, 0xE6, 0x3F, 0xD4, 0x59, 0x7A, 0x9A, 0xB7, 0xF4, 0x52, 0xC6, 0x66, 0xC2, 0x73, 0x34, 0x89, 0x7F, 0x53}, /* Control Plane w/SNOW f8 enc. + ZUC int. UL LONG SN */ - (uint8_t[]){0x50, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, + (uint8_t[]){0x00, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, 0x2B, 0xAB, 0xC7, 0x32, 0xDA, 0x49, 0xD0, 0xC7, 0x54, 0xCA, 0x18, 0xBB, 0x05, 0x6D, 0xC5, 0x5F, 0xD3, 0xA7, 0xE6, 0xD8, 0xE1, 0xDF, 0x7C, 0x4F, 0x3C, 0x8B, 0x86, 0xC6, 0x8E, 0x24, 0xF7, 0xBC, 0x45, 0x2A, 0x2E, 0xB4, 0xF5, 0xD0, 0x39, 0x5B, 0x70, 0xB4, 0x53, 0x90, 0x98, 0x8A, 0x7C, 0x87, 0x21, 0xED, - 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x46, 0xE8, 0x89, 0x50}, + 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x3A, 0x46, 0x07, 0x85}, /* Control Plane w/SNOW f8 enc. + ZUC int. DL LONG SN */ (uint8_t[]){0xA0, 0x00, 0xC1, 0x3A, 0x28, 0xBC, 0xEB, 0xAC, 0x49, 0xB9, 0xA1, 0xFC, 0xD6, 0x83, 0xEC, 0xA2, 0x89, 0xE6, 0x8F, 0xCA, @@ -5565,7 +5565,7 @@ 0xF4, 0x52, 0xC6, 0x66, 0xC2, 0x73, 0xAA, 0xE2, 0xCD, 0x49}, /* Control Plane w/AES CTR enc. + NULL int. UL LONG SN */ - (uint8_t[]){0x50, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, + (uint8_t[]){0x00, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, 0xE9, 0x97, 0x27, 0xE8, 0xA3, 0xDE, 0x4C, 0xF6, 0xD1, 0x10, 0x4A, 0x7D, 0xC0, 0xD0, 0xF7, 0x1B, 0x3E, 0x16, 0xF0, 0xA8, 0x4F, 0xBC, 0x17, 0x73, 0x9A, 0x69, 0x73, 0x6C, 0x83, 0xE5, @@ -5582,13 +5582,13 @@ 0x7E, 0xF5, 0xBD, 0x60, 0xEB, 0x9E, 0xC2, 0xC9, 0x54, 0x65, 0x7D, 0xAC, 0xB6, 0x47, 0xFF, 0x1C, 0x87, 0x7A, 0x32, 0x1B}, /* Control Plane w/AES CTR enc. + SNOW f9 int. UL LONG SN */ - (uint8_t[]){0x50, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, + (uint8_t[]){0x00, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, 0xE9, 0x97, 0x27, 0xE8, 0xA3, 0xDE, 0x4C, 0xF6, 0xD1, 0x10, 0x4A, 0x7D, 0xC0, 0xD0, 0xF7, 0x1B, 0x3E, 0x16, 0xF0, 0xA8, 0x4F, 0xBC, 0x17, 0x73, 0x9A, 0x69, 0x73, 0x6C, 0x83, 0xE5, 0x9D, 0x56, 0xBA, 0xF7, 0x08, 0x6D, 0xC5, 0x89, 0xFB, 0xAB, 0x99, 0xD1, 0x37, 0x42, 0x89, 0x8F, 0xE1, 0xAE, 0xA3, 0x22, - 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xF2, 0x8B, 0x18, 0xAA}, + 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xB5, 0x11, 0x3D, 0x2C}, /* Control Plane w/AES CTR enc. + SNOW f9 int. DL LONG SN */ (uint8_t[]){0xA0, 0x00, 0x84, 0x3D, 0x5A, 0x2C, 0xBA, 0x02, 0xC1, 0x6C, @@ -5599,13 +5599,13 @@ 0x7E, 0xF5, 0xBD, 0x60, 0xEB, 0x9E, 0xC2, 0xC9, 0x54, 0x65, 0x7D, 0xAC, 0xB6, 0x47, 0xFF, 0x1C, 0x10, 0x2A, 0x0D, 0xEC}, /* Control Plane w/AES CTR enc. + AES CMAC int. UL LONG SN */ - (uint8_t[]){0x50, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, + (uint8_t[]){0x00, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, 0xE9, 0x97, 0x27, 0xE8, 0xA3, 0xDE, 0x4C, 0xF6, 0xD1, 0x10, 0x4A, 0x7D, 0xC0, 0xD0, 0xF7, 0x1B, 0x3E, 0x16, 0xF0, 0xA8, 0x4F, 0xBC, 0x17, 0x73, 0x9A, 0x69, 0x73, 0x6C, 0x83, 0xE5, 0x9D, 0x56, 0xBA, 0xF7, 0x08, 0x6D, 0xC5, 0x89, 0xFB, 0xAB, 0x99, 0xD1, 0x37, 0x42, 0x89, 0x8F, 0xE1, 0xAE, 0xA3, 0x22, - 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xB9, 0x42, 0x19, 0x12}, + 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0x9D, 0x83, 0x75, 0x83}, /* Control Plane w/AES CTR enc. + AES CMAC int. DL LONG SN */ (uint8_t[]){0xA0, 0x00, 0x84, 0x3D, 0x5A, 0x2C, 0xBA, 0x02, 0xC1, 0x6C, 0x8D, 0x78, 0xB5, 0x1F, 0x51, 0x70, 0x18, 0x61, 0x92, 0x10, @@ -5615,13 +5615,13 @@ 0x7E, 0xF5, 0xBD, 0x60, 0xEB, 0x9E, 0xC2, 0xC9, 0x54, 0x65, 0x7D, 0xAC, 0xB6, 0x47, 0xFF, 0x1C, 0x6F, 0xC1, 0xDB, 0x2D}, /* Control Plane w/AES CTR enc. + ZUC int. UL LONG SN */ - (uint8_t[]){0x50, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, + (uint8_t[]){0x00, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, 0xE9, 0x97, 0x27, 0xE8, 0xA3, 0xDE, 0x4C, 0xF6, 0xD1, 0x10, 0x4A, 0x7D, 0xC0, 0xD0, 0xF7, 0x1B, 0x3E, 0x16, 0xF0, 0xA8, 0x4F, 0xBC, 0x17, 0x73, 0x9A, 0x69, 0x73, 0x6C, 0x83, 0xE5, 0x9D, 0x56, 0xBA, 0xF7, 0x08, 0x6D, 0xC5, 0x89, 0xFB, 0xAB, 0x99, 0xD1, 0x37, 0x42, 0x89, 0x8F, 0xE1, 0xAE, 0xA3, 0x22, - 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xD2, 0xDC, 0x1A, 0xFF}, + 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xAE, 0x72, 0x94, 0x2A}, /* Control Plane w/AES CTR enc. + ZUC int. DL LONG SN */ (uint8_t[]){0xA0, 0x00, 0x84, 0x3D, 0x5A, 0x2C, 0xBA, 0x02, 0xC1, 0x6C, 0x8D, 0x78, 0xB5, 0x1F, 0x51, 0x70, 0x18, 0x61, 0x92, 0x10, @@ -5631,7 +5631,7 @@ 0x7E, 0xF5, 0xBD, 0x60, 0xEB, 0x9E, 0xC2, 0xC9, 0x54, 0x65, 0x7D, 0xAC, 0xB6, 0x47, 0xFF, 0x1C, 0xF1, 0xAA, 0x69, 0x37}, /* Control Plane w/ZUC enc. + NULL int. UL LONG SN */ - (uint8_t[]){0x50, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, + (uint8_t[]){0x00, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, 0xC5, 0xFF, 0xC2, 0xB7, 0x7D, 0x30, 0xBA, 0xFB, 0x43, 0xED, 0x79, 0xC9, 0x9D, 0x9D, 0x38, 0x35, 0xC6, 0x7B, 0xD0, 0xAA, 0x33, 0x08, 0x88, 0x72, 0x16, 0x1D, 0xF7, 0xA0, 0xD9, 0xEC, @@ -5647,13 +5647,13 @@ 0x85, 0xAF, 0x0A, 0xFF, 0xAC, 0x6A, 0x00, 0x19, 0xC1, 0x51, 0x53, 0xDE, 0x78, 0x07, 0x6D, 0x10, 0x90, 0xF5, 0xBD, 0x56}, /* Control Plane w/ZUC enc. + SNOW f9 int. UL LONG SN */ - (uint8_t[]){0x50, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, + (uint8_t[]){0x00, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, 0xC5, 0xFF, 0xC2, 0xB7, 0x7D, 0x30, 0xBA, 0xFB, 0x43, 0xED, 0x79, 0xC9, 0x9D, 0x9D, 0x38, 0x35, 0xC6, 0x7B, 0xD0, 0xAA, 0x33, 0x08, 0x88, 0x72, 0x16, 0x1D, 0xF7, 0xA0, 0xD9, 0xEC, 0x73, 0x45, 0x51, 0x87, 0xFF, 0x64, 0xFB, 0x3C, 0xA6, 0xB5, 0xD0, 0x1C, 0xD6, 0x90, 0x3D, 0x40, 0x54, 0x22, 0x2F, 0x6C, - 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x69, 0x75, 0x1D, 0x76}, + 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x2E, 0xEF, 0x38, 0xF0}, /* Control Plane w/ZUC enc. + SNOW f9 int. DL LONG SN */ (uint8_t[]){0xA0, 0x00, 0x3F, 0x01, 0xCE, 0xBD, 0x8A, 0x98, 0x7B, 0x26, 0xF1, 0x28, 0x74, 0xDC, 0x26, 0x2B, 0x02, 0xE8, 0x9C, 0xBC, @@ -5663,13 +5663,13 @@ 0x85, 0xAF, 0x0A, 0xFF, 0xAC, 0x6A, 0x00, 0x19, 0xC1, 0x51, 0x53, 0xDE, 0x78, 0x07, 0x6D, 0x10, 0x07, 0xA5, 0x82, 0xA1}, /* Control Plane w/ZUC enc. + AES CMAC int. UL LONG SN */ - (uint8_t[]){0x50, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, + (uint8_t[]){0x00, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, 0xC5, 0xFF, 0xC2, 0xB7, 0x7D, 0x30, 0xBA, 0xFB, 0x43, 0xED, 0x79, 0xC9, 0x9D, 0x9D, 0x38, 0x35, 0xC6, 0x7B, 0xD0, 0xAA, 0x33, 0x08, 0x88, 0x72, 0x16, 0x1D, 0xF7, 0xA0, 0xD9, 0xEC, 0x73, 0x45, 0x51, 0x87, 0xFF, 0x64, 0xFB, 0x3C, 0xA6, 0xB5, 0xD0, 0x1C, 0xD6, 0x90, 0x3D, 0x40, 0x54, 0x22, 0x2F, 0x6C, - 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x22, 0xBC, 0x1C, 0xCE}, + 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x06, 0x7D, 0x70, 0x5F}, /* Control Plane w/ZUC enc. + AES CMAC int. DL LONG SN */ (uint8_t[]){0xA0, 0x00, 0x3F, 0x01, 0xCE, 0xBD, 0x8A, 0x98, 0x7B, 0x26, 0xF1, 0x28, 0x74, 0xDC, 0x26, 0x2B, 0x02, 0xE8, 0x9C, 0xBC, @@ -5679,13 +5679,13 @@ 0x85, 0xAF, 0x0A, 0xFF, 0xAC, 0x6A, 0x00, 0x19, 0xC1, 0x51, 0x53, 0xDE, 0x78, 0x07, 0x6D, 0x10, 0x78, 0x4E, 0x54, 0x60}, /* Control Plane w/ZUC enc. + ZUC int. UL LONG SN */ - (uint8_t[]){0x50, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, + (uint8_t[]){0x00, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, 0xC5, 0xFF, 0xC2, 0xB7, 0x7D, 0x30, 0xBA, 0xFB, 0x43, 0xED, 0x79, 0xC9, 0x9D, 0x9D, 0x38, 0x35, 0xC6, 0x7B, 0xD0, 0xAA, 0x33, 0x08, 0x88, 0x72, 0x16, 0x1D, 0xF7, 0xA0, 0xD9, 0xEC, 0x73, 0x45, 0x51, 0x87, 0xFF, 0x64, 0xFB, 0x3C, 0xA6, 0xB5, 0xD0, 0x1C, 0xD6, 0x90, 0x3D, 0x40, 0x54, 0x22, 0x2F, 0x6C, - 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x49, 0x22, 0x1F, 0x23}, + 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x35, 0x8C, 0x91, 0xF6}, /* Control Plane w/ZUC enc. + ZUC int. DL LONG SN */ (uint8_t[]){0xA0, 0x00, 0x3F, 0x01, 0xCE, 0xBD, 0x8A, 0x98, 0x7B, 0x26, 0xF1, 0x28, 0x74, 0xDC, 0x26, 0x2B, 0x02, 0xE8, 0x9C, 0xBC, @@ -5714,7 +5714,7 @@ (uint8_t[]){0x8b, 0x26, 0xad, 0x9c, 0x44, 0x1f, 0x89, 0x0b, 0x38, 0xc4, 0x57, 0xa4, 0x9d, 0x42, 0x14, 0x07, 0xe8}, /* User Plane w/NULL enc. UL for 18-bit SN*/ - (uint8_t[]){0xF8, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, + (uint8_t[]){0x80, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, 0x36, @@ -5748,7 +5748,7 @@ (uint8_t[]){0x8b, 0x26, 0x0b, 0x50, 0xf3, 0xff, 0x37, 0xe3, 0x6b, 0xaf, 0x08, 0xd8, 0xf6, 0x1f, 0xca, 0x6f, 0xbc}, /* User Plane w/SNOW enc. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, + (uint8_t[]){0x80, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, 0xCF, 0xBB, 0x8A, 0x2C, 0xB7, 0x57, 0xB6, 0x27, 0x89, 0x0D, 0x91, 0x03, 0x2C, 0x2B, 0x8D, 0x29, 0x4A, 0xBD, 0x8D, 0x48, 0xD2, 0x69, 0x37, 0xB1, 0xA1, 0x97, 0x12, 0xBD, 0x0A, 0x91, 0x4D, 0xEB, 0x76, @@ -5781,7 +5781,7 @@ (uint8_t[]){0x8b, 0x26, 0xc7, 0xf2, 0x23, 0xb3, 0xbe, 0xc0, 0xdf, 0xc5, 0xed, 0x37, 0x35, 0x7c, 0x66, 0xa3, 0xf9}, /* User Plane w/AES enc. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, + (uint8_t[]){0x80, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, 0xF1, 0x90, 0xC2, 0x22, 0xD0, 0xD2, 0x3D, 0x44, 0x75, 0x7F, 0xC5, 0x0F, 0xAC, 0x7C, 0x18, 0x46, 0xA5, 0x3E, 0x2F, 0x0F, 0x26, 0x9E, 0x5A, 0x49, 0xF7, 0xCB, 0x70, 0x17, 0xBC, 0x01, 0x1D, 0xA3, 0x65, 0x0E, 0x4B, 0x53, @@ -5813,7 +5813,7 @@ (uint8_t[]){0x8b, 0x26, 0xa3, 0x1a, 0x1e, 0x22, 0xf7, 0x17, 0x8a, 0xb5, 0x59, 0xd8, 0x2b, 0x13, 0xdd, 0x12, 0x4e}, /* User Plane w/ZUC enc. UL for 18-bit SN*/ - (uint8_t[]){0x0C, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, + (uint8_t[]){0x80, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, 0x74, 0xC2, 0xD7, 0xFF, 0x74, 0x59, 0x3A, 0x69, 0xD1, 0x8B, 0x65, 0x98, 0xB9, 0x3C, 0xFB, 0x63, 0xB1, 0x9E, 0xB7, 0xCA, 0x04, 0x68, 0xB9, 0xAB, 0xA2, 0x5A, 0xAF, 0x15, 0x8E, 0x71, 0xED, 0xE4, 0xFA, 0x99, 0x79, 0xF9, @@ -5829,7 +5829,7 @@ /************************* 12-bit u-plane with int ************/ /* User Plane w/NULL enc. + NULL int. UL for 12-bit SN */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, @@ -5845,13 +5845,13 @@ 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD}, /* User Plane w/NULL enc. + SNOW f9 int. UL for 12-bit SN */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, - 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x74, 0xB8, 0x27, 0x96}, + 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x6A, 0x4D, 0xA1, 0xE0}, /* User Plane w/NULL enc. + SNOW f9 int. DL for 12-bit SN */ (uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, @@ -5861,13 +5861,13 @@ 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x97, 0x50, 0x3F, 0xF7}, /* User Plane w/NULL enc. + AES CMAC int. UL for 12-bit SN */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, - 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x3F, 0x71, 0x26, 0x2E}, + 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xB4, 0x36, 0x24, 0x75}, /* User Plane w/NULL enc. + AES CMAC int. DL for 12-bit SN */ (uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, @@ -5877,13 +5877,13 @@ 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xE8, 0xBB, 0xE9, 0x36}, /* User Plane w/NULL enc. + ZUC int. UL for 12-bit SN */ - (uint8_t[]){0x50, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, + (uint8_t[]){0x80, 0x01, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, 0x36, 0x47, 0x0E, 0x8F, 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, - 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x54, 0xEF, 0x25, 0xC3}, + 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x5B, 0x05, 0x40, 0x0B}, /* User Plane w/NULL enc. + ZUC int. DL for 12-bit SN */ (uint8_t[]){0xA0, 0x00, 0x86, 0xB8, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, @@ -5894,7 +5894,7 @@ 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0x76, 0xD0, 0x5B, 0x2C}, /* User Plane w/SNOW f8 enc. + NULL int. UL for 12-bit SN */ - (uint8_t[]){0x50, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, + (uint8_t[]){0x80, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, 0x2B, 0xAB, 0xC7, 0x32, 0xDA, 0x49, 0xD0, 0xC7, 0x54, 0xCA, 0x18, 0xBB, 0x05, 0x6D, 0xC5, 0x5F, 0xD3, 0xA7, 0xE6, 0xD8, 0xE1, 0xDF, 0x7C, 0x4F, 0x3C, 0x8B, 0x86, 0xC6, 0x8E, 0x24, @@ -5910,13 +5910,13 @@ 0x0F, 0xD8, 0x38, 0xE6, 0x3F, 0xD4, 0x59, 0x7A, 0x9A, 0xB7, 0xF4, 0x52, 0xC6, 0x66, 0xC2, 0x73, 0xDC, 0x32, 0x96, 0x65}, /* User Plane w/SNOW f8 enc. + SNOW f9 int. UL for 12-bit SN */ - (uint8_t[]){0x50, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, + (uint8_t[]){0x80, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, 0x2B, 0xAB, 0xC7, 0x32, 0xDA, 0x49, 0xD0, 0xC7, 0x54, 0xCA, 0x18, 0xBB, 0x05, 0x6D, 0xC5, 0x5F, 0xD3, 0xA7, 0xE6, 0xD8, 0xE1, 0xDF, 0x7C, 0x4F, 0x3C, 0x8B, 0x86, 0xC6, 0x8E, 0x24, 0xF7, 0xBC, 0x45, 0x2A, 0x2E, 0xB4, 0xF5, 0xD0, 0x39, 0x5B, 0x70, 0xB4, 0x53, 0x90, 0x98, 0x8A, 0x7C, 0x87, 0x21, 0xED, - 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x66, 0xBF, 0x8B, 0x05}, + 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x78, 0x4A, 0x0D, 0x73}, /* User Plane w/SNOW f8 enc. + SNOW f9 int. DL for 12-bit SN */ (uint8_t[]){0xA0, 0x00, 0xC1, 0x3A, 0x28, 0xBC, 0xEB, 0xAC, 0x49, 0xB9, 0xA1, 0xFC, 0xD6, 0x83, 0xEC, 0xA2, 0x89, 0xE6, 0x8F, 0xCA, @@ -5926,13 +5926,13 @@ 0x0F, 0xD8, 0x38, 0xE6, 0x3F, 0xD4, 0x59, 0x7A, 0x9A, 0xB7, 0xF4, 0x52, 0xC6, 0x66, 0xC2, 0x73, 0x4B, 0x62, 0xA9, 0x92}, /* User Plane w/SNOW f8 enc. + AES CMAC int. UL for 12-bit SN */ - (uint8_t[]){0x50, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, + (uint8_t[]){0x80, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, 0x2B, 0xAB, 0xC7, 0x32, 0xDA, 0x49, 0xD0, 0xC7, 0x54, 0xCA, 0x18, 0xBB, 0x05, 0x6D, 0xC5, 0x5F, 0xD3, 0xA7, 0xE6, 0xD8, 0xE1, 0xDF, 0x7C, 0x4F, 0x3C, 0x8B, 0x86, 0xC6, 0x8E, 0x24, 0xF7, 0xBC, 0x45, 0x2A, 0x2E, 0xB4, 0xF5, 0xD0, 0x39, 0x5B, 0x70, 0xB4, 0x53, 0x90, 0x98, 0x8A, 0x7C, 0x87, 0x21, 0xED, - 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x2D, 0x76, 0x8A, 0xBD}, + 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0xA6, 0x31, 0x88, 0xE6}, /* User Plane w/SNOW f8 enc. + AES CMAC int. DL for 12-bit SN */ (uint8_t[]){0xA0, 0x00, 0xC1, 0x3A, 0x28, 0xBC, 0xEB, 0xAC, 0x49, 0xB9, 0xA1, 0xFC, 0xD6, 0x83, 0xEC, 0xA2, 0x89, 0xE6, 0x8F, 0xCA, @@ -5942,13 +5942,13 @@ 0x0F, 0xD8, 0x38, 0xE6, 0x3F, 0xD4, 0x59, 0x7A, 0x9A, 0xB7, 0xF4, 0x52, 0xC6, 0x66, 0xC2, 0x73, 0x34, 0x89, 0x7F, 0x53}, /* User Plane w/SNOW f8 enc. + ZUC int. UL for 12-bit SN */ - (uint8_t[]){0x50, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, + (uint8_t[]){0x80, 0x01, 0xD6, 0xCC, 0xB5, 0xCE, 0x7C, 0xF8, 0xBE, 0x68, 0x2B, 0xAB, 0xC7, 0x32, 0xDA, 0x49, 0xD0, 0xC7, 0x54, 0xCA, 0x18, 0xBB, 0x05, 0x6D, 0xC5, 0x5F, 0xD3, 0xA7, 0xE6, 0xD8, 0xE1, 0xDF, 0x7C, 0x4F, 0x3C, 0x8B, 0x86, 0xC6, 0x8E, 0x24, 0xF7, 0xBC, 0x45, 0x2A, 0x2E, 0xB4, 0xF5, 0xD0, 0x39, 0x5B, 0x70, 0xB4, 0x53, 0x90, 0x98, 0x8A, 0x7C, 0x87, 0x21, 0xED, - 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x46, 0xE8, 0x89, 0x50}, + 0x76, 0x83, 0x63, 0x39, 0x2C, 0xDB, 0x49, 0x02, 0xEC, 0x98}, /* User Plane w/SNOW f8 enc. + ZUC int. DL for 12-bit SN */ (uint8_t[]){0xA0, 0x00, 0xC1, 0x3A, 0x28, 0xBC, 0xEB, 0xAC, 0x49, 0xB9, 0xA1, 0xFC, 0xD6, 0x83, 0xEC, 0xA2, 0x89, 0xE6, 0x8F, 0xCA, @@ -5958,7 +5958,7 @@ 0x0F, 0xD8, 0x38, 0xE6, 0x3F, 0xD4, 0x59, 0x7A, 0x9A, 0xB7, 0xF4, 0x52, 0xC6, 0x66, 0xC2, 0x73, 0xAA, 0xE2, 0xCD, 0x49}, /* User Plane w/AES CTR enc. + NULL int. UL for 12-bit SN */ - (uint8_t[]){0x50, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, + (uint8_t[]){0x80, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, 0xE9, 0x97, 0x27, 0xE8, 0xA3, 0xDE, 0x4C, 0xF6, 0xD1, 0x10, 0x4A, 0x7D, 0xC0, 0xD0, 0xF7, 0x1B, 0x3E, 0x16, 0xF0, 0xA8, 0x4F, 0xBC, 0x17, 0x73, 0x9A, 0x69, 0x73, 0x6C, 0x83, 0xE5, @@ -5975,13 +5975,13 @@ 0x7E, 0xF5, 0xBD, 0x60, 0xEB, 0x9E, 0xC2, 0xC9, 0x54, 0x65, 0x7D, 0xAC, 0xB6, 0x47, 0xFF, 0x1C, 0x87, 0x7A, 0x32, 0x1B}, /* User Plane w/AES CTR enc. + SNOW f9 int. UL for 12-bit SN */ - (uint8_t[]){0x50, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, + (uint8_t[]){0x80, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, 0xE9, 0x97, 0x27, 0xE8, 0xA3, 0xDE, 0x4C, 0xF6, 0xD1, 0x10, 0x4A, 0x7D, 0xC0, 0xD0, 0xF7, 0x1B, 0x3E, 0x16, 0xF0, 0xA8, 0x4F, 0xBC, 0x17, 0x73, 0x9A, 0x69, 0x73, 0x6C, 0x83, 0xE5, 0x9D, 0x56, 0xBA, 0xF7, 0x08, 0x6D, 0xC5, 0x89, 0xFB, 0xAB, 0x99, 0xD1, 0x37, 0x42, 0x89, 0x8F, 0xE1, 0xAE, 0xA3, 0x22, - 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xF2, 0x8B, 0x18, 0xAA}, + 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xEC, 0x7E, 0x9E, 0xDC}, /* User Plane w/AES CTR enc. + SNOW f9 int. DL for 12-bit SN */ (uint8_t[]){0xA0, 0x00, 0x84, 0x3D, 0x5A, 0x2C, 0xBA, 0x02, 0xC1, 0x6C, @@ -5992,13 +5992,13 @@ 0x7E, 0xF5, 0xBD, 0x60, 0xEB, 0x9E, 0xC2, 0xC9, 0x54, 0x65, 0x7D, 0xAC, 0xB6, 0x47, 0xFF, 0x1C, 0x10, 0x2A, 0x0D, 0xEC}, /* User Plane w/AES CTR enc. + AES CMAC int. UL for 12-bit SN */ - (uint8_t[]){0x50, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, + (uint8_t[]){0x80, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, 0xE9, 0x97, 0x27, 0xE8, 0xA3, 0xDE, 0x4C, 0xF6, 0xD1, 0x10, 0x4A, 0x7D, 0xC0, 0xD0, 0xF7, 0x1B, 0x3E, 0x16, 0xF0, 0xA8, 0x4F, 0xBC, 0x17, 0x73, 0x9A, 0x69, 0x73, 0x6C, 0x83, 0xE5, 0x9D, 0x56, 0xBA, 0xF7, 0x08, 0x6D, 0xC5, 0x89, 0xFB, 0xAB, 0x99, 0xD1, 0x37, 0x42, 0x89, 0x8F, 0xE1, 0xAE, 0xA3, 0x22, - 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xB9, 0x42, 0x19, 0x12}, + 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0x32, 0x05, 0x1B, 0x49}, /* User Plane w/AES CTR enc. + AES CMAC int. DL for 12-bit SN */ (uint8_t[]){0xA0, 0x00, 0x84, 0x3D, 0x5A, 0x2C, 0xBA, 0x02, 0xC1, 0x6C, 0x8D, 0x78, 0xB5, 0x1F, 0x51, 0x70, 0x18, 0x61, 0x92, 0x10, @@ -6008,13 +6008,13 @@ 0x7E, 0xF5, 0xBD, 0x60, 0xEB, 0x9E, 0xC2, 0xC9, 0x54, 0x65, 0x7D, 0xAC, 0xB6, 0x47, 0xFF, 0x1C, 0x6F, 0xC1, 0xDB, 0x2D}, /* User Plane w/AES CTR enc. + ZUC int. UL for 12-bit SN */ - (uint8_t[]){0x50, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, + (uint8_t[]){0x80, 0x01, 0x57, 0xB2, 0x7E, 0x21, 0xE7, 0xDD, 0x56, 0xCF, 0xE9, 0x97, 0x27, 0xE8, 0xA3, 0xDE, 0x4C, 0xF6, 0xD1, 0x10, 0x4A, 0x7D, 0xC0, 0xD0, 0xF7, 0x1B, 0x3E, 0x16, 0xF0, 0xA8, 0x4F, 0xBC, 0x17, 0x73, 0x9A, 0x69, 0x73, 0x6C, 0x83, 0xE5, 0x9D, 0x56, 0xBA, 0xF7, 0x08, 0x6D, 0xC5, 0x89, 0xFB, 0xAB, 0x99, 0xD1, 0x37, 0x42, 0x89, 0x8F, 0xE1, 0xAE, 0xA3, 0x22, - 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xD2, 0xDC, 0x1A, 0xFF}, + 0x60, 0x98, 0xFD, 0x79, 0x32, 0xDB, 0xDD, 0x36, 0x7F, 0x37}, /* User Plane w/AES CTR enc. + ZUC int. DL for 12-bit SN */ (uint8_t[]){0xA0, 0x00, 0x84, 0x3D, 0x5A, 0x2C, 0xBA, 0x02, 0xC1, 0x6C, 0x8D, 0x78, 0xB5, 0x1F, 0x51, 0x70, 0x18, 0x61, 0x92, 0x10, @@ -6025,7 +6025,7 @@ 0x7D, 0xAC, 0xB6, 0x47, 0xFF, 0x1C, 0xF1, 0xAA, 0x69, 0x37}, /* User Plane w/ZUC enc. + NULL int. UL for 12-bit SN */ - (uint8_t[]){0x50, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, + (uint8_t[]){0x80, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, 0xC5, 0xFF, 0xC2, 0xB7, 0x7D, 0x30, 0xBA, 0xFB, 0x43, 0xED, 0x79, 0xC9, 0x9D, 0x9D, 0x38, 0x35, 0xC6, 0x7B, 0xD0, 0xAA, 0x33, 0x08, 0x88, 0x72, 0x16, 0x1D, 0xF7, 0xA0, 0xD9, 0xEC, @@ -6041,13 +6041,13 @@ 0x85, 0xAF, 0x0A, 0xFF, 0xAC, 0x6A, 0x00, 0x19, 0xC1, 0x51, 0x53, 0xDE, 0x78, 0x07, 0x6D, 0x10, 0x90, 0xF5, 0xBD, 0x56}, /* User Plane w/ZUC enc. + SNOW f9 int. UL for 12-bit SN */ - (uint8_t[]){0x50, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, + (uint8_t[]){0x80, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, 0xC5, 0xFF, 0xC2, 0xB7, 0x7D, 0x30, 0xBA, 0xFB, 0x43, 0xED, 0x79, 0xC9, 0x9D, 0x9D, 0x38, 0x35, 0xC6, 0x7B, 0xD0, 0xAA, 0x33, 0x08, 0x88, 0x72, 0x16, 0x1D, 0xF7, 0xA0, 0xD9, 0xEC, 0x73, 0x45, 0x51, 0x87, 0xFF, 0x64, 0xFB, 0x3C, 0xA6, 0xB5, 0xD0, 0x1C, 0xD6, 0x90, 0x3D, 0x40, 0x54, 0x22, 0x2F, 0x6C, - 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x69, 0x75, 0x1D, 0x76}, + 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x77, 0x80, 0x9B, 0x00}, /* User Plane w/ZUC enc. + SNOW f9 int. DL for 12-bit SN */ (uint8_t[]){0xA0, 0x00, 0x3F, 0x01, 0xCE, 0xBD, 0x8A, 0x98, 0x7B, 0x26, 0xF1, 0x28, 0x74, 0xDC, 0x26, 0x2B, 0x02, 0xE8, 0x9C, 0xBC, @@ -6057,13 +6057,13 @@ 0x85, 0xAF, 0x0A, 0xFF, 0xAC, 0x6A, 0x00, 0x19, 0xC1, 0x51, 0x53, 0xDE, 0x78, 0x07, 0x6D, 0x10, 0x07, 0xA5, 0x82, 0xA1}, /* User Plane w/ZUC enc. + AES CMAC int. UL for 12-bit SN */ - (uint8_t[]){0x50, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, + (uint8_t[]){0x80, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, 0xC5, 0xFF, 0xC2, 0xB7, 0x7D, 0x30, 0xBA, 0xFB, 0x43, 0xED, 0x79, 0xC9, 0x9D, 0x9D, 0x38, 0x35, 0xC6, 0x7B, 0xD0, 0xAA, 0x33, 0x08, 0x88, 0x72, 0x16, 0x1D, 0xF7, 0xA0, 0xD9, 0xEC, 0x73, 0x45, 0x51, 0x87, 0xFF, 0x64, 0xFB, 0x3C, 0xA6, 0xB5, 0xD0, 0x1C, 0xD6, 0x90, 0x3D, 0x40, 0x54, 0x22, 0x2F, 0x6C, - 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x22, 0xBC, 0x1C, 0xCE}, + 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0xA9, 0xFB, 0x1E, 0x95}, /* User Plane w/ZUC enc. + AES CMAC int. DL for 12-bit SN */ (uint8_t[]){0xA0, 0x00, 0x3F, 0x01, 0xCE, 0xBD, 0x8A, 0x98, 0x7B, 0x26, 0xF1, 0x28, 0x74, 0xDC, 0x26, 0x2B, 0x02, 0xE8, 0x9C, 0xBC, @@ -6073,13 +6073,13 @@ 0x85, 0xAF, 0x0A, 0xFF, 0xAC, 0x6A, 0x00, 0x19, 0xC1, 0x51, 0x53, 0xDE, 0x78, 0x07, 0x6D, 0x10, 0x78, 0x4E, 0x54, 0x60}, /* User Plane w/ZUC enc. + ZUC int. UL for 12-bit SN */ - (uint8_t[]){0x50, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, + (uint8_t[]){0x80, 0x01, 0x47, 0x9B, 0x21, 0xD1, 0xB2, 0x99, 0x23, 0x56, 0xC5, 0xFF, 0xC2, 0xB7, 0x7D, 0x30, 0xBA, 0xFB, 0x43, 0xED, 0x79, 0xC9, 0x9D, 0x9D, 0x38, 0x35, 0xC6, 0x7B, 0xD0, 0xAA, 0x33, 0x08, 0x88, 0x72, 0x16, 0x1D, 0xF7, 0xA0, 0xD9, 0xEC, 0x73, 0x45, 0x51, 0x87, 0xFF, 0x64, 0xFB, 0x3C, 0xA6, 0xB5, 0xD0, 0x1C, 0xD6, 0x90, 0x3D, 0x40, 0x54, 0x22, 0x2F, 0x6C, - 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x49, 0x22, 0x1F, 0x23}, + 0xE4, 0xB1, 0x71, 0x15, 0x78, 0x54, 0x46, 0xC8, 0x7A, 0xEB}, /* User Plane w/ZUC enc. + ZUC int. DL for 12-bit SN */ (uint8_t[]){0xA0, 0x00, 0x3F, 0x01, 0xCE, 0xBD, 0x8A, 0x98, 0x7B, 0x26, 0xF1, 0x28, 0x74, 0xDC, 0x26, 0x2B, 0x02, 0xE8, 0x9C, 0xBC, @@ -6091,7 +6091,7 @@ /************************* 18-bit u-plane with int ************/ /* User Plane w/NULL enc. + NULL int. UL for 18-bit SN */ - (uint8_t[]){0xF8, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, + (uint8_t[]){0x80, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, 0x64, 0xD3, 0x5E, 0xAF, 0x3F, 0x57, 0xC2, 0xE2, 0x91, 0x91, 0xA3, 0x9C, 0xE6, 0x30, 0x69, 0x70, 0x33, 0x8A, 0x15, 0xD0, 0x36, @@ -6107,13 +6107,13 @@ 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69, 0x00, 0x00, 0x00, 0x00}, /* User Plane w/NULL enc. + SNOW f9 int. UL for 18-bit SN */ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, 0x31, 0xA2, 0x76, 0xBA, 0xFC, 0x5A, 0xDB, 0xAA, 0xA3, 0x0B, 0x6A, 0xD2, 0xEE, 0xD6, 0x93, 0xE4, 0x1B, 0x11, 0x4F, 0xC4, 0xD7, 0xDA, - 0x91, 0x7F, 0x71, 0x17, 0x69}, + 0x91, 0x7F, 0x58, 0x24, 0x17}, /* User Plane w/NULL enc. + SNOW f9 int. DL for 18-bit SN */ (uint8_t[]){0xF8, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, @@ -6122,12 +6122,12 @@ 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69, 0x84, 0x45, 0xA8, 0x88}, /* User Plane w/NULL enc. + AES CMAC int. UL for 18-bit SN */ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, 0x31, 0xA2, 0x76, 0xBA, 0xFC, 0x5A, 0xDB, 0xAA, 0xA3, 0x0B, 0x6A, 0xD2, 0xEE, 0xD6, 0x93, - 0xE4, 0x1B, 0x11, 0x4F, 0xC4, 0xD7, 0xDA, 0x91, 0x33, 0x9B, 0x38, 0xF7}, + 0xE4, 0x1B, 0x11, 0x4F, 0xC4, 0xD7, 0xDA, 0x91, 0x83, 0xB7, 0xF2, 0x0B}, /* User Plane w/NULL enc. + AES CMAC int. DL for 18-bit SN */ (uint8_t[]){0xF8, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, @@ -6136,12 +6136,12 @@ 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69, 0xD9, 0x0B, 0x89, 0x7F}, /* User Plane w/NULL enc. + ZUC int. UL for 18-bit SN */ - (uint8_t[]){0x0C, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, + (uint8_t[]){0x80, 0x00, 0x01, 0xB8, 0x33, 0x4F, 0x85, 0x8C, 0x2C, 0x65, 0x7D, 0x8F, 0x5D, 0x40, 0x57, 0x60, 0x52, 0x4F, 0xB9, 0xF1, 0x69, 0xE9, 0x68, 0x04, 0xFC, 0x7A, 0xBE, 0xD2, 0x5B, 0x4A, 0x21, 0x7F, 0x13, 0x52, 0x08, 0xBA, 0xBD, 0x69, 0x51, 0xC9, 0x63, 0xCF, 0x06, 0x62, 0x31, 0xA2, 0x76, 0xBA, 0xFC, 0x5A, 0xDB, 0xAA, 0xA3, 0x0B, 0x6A, 0xD2, 0xEE, 0xD6, 0x93, - 0xE4, 0x1B, 0x11, 0x4F, 0xC4, 0xD7, 0xDA, 0x91, 0xB5, 0xD9, 0x5D, 0xE0}, + 0xE4, 0x1B, 0x11, 0x4F, 0xC4, 0xD7, 0xDA, 0x91, 0xAB, 0x98, 0xC0, 0x1A}, /* User Plane w/NULL enc. + ZUC int. DL for 18-bit SN */ (uint8_t[]){0xF8, 0x00, 0x00, 0xF8, 0xDB, 0x2D, 0x3F, 0x23, 0x82, 0x53, 0xFD, 0x37, 0xDE, 0x88, 0x63, 0x08, 0x4F, 0xD3, 0x71, 0xFB, 0xEB, 0x35, 0xF3, @@ -6150,7 +6150,7 @@ 0xEE, 0x2C, 0x96, 0x0C, 0xD7, 0x7D, 0x70, 0x1B, 0x01, 0x7F, 0x96, 0x46, 0x53, 0xB0, 0xA4, 0x7A, 0xF9, 0xDD, 0xCC, 0x69, 0xDA, 0xE9, 0x17, 0x96}, /* User Plane w/SNOW f8 enc. + NULL int. UL for 18-bit SN */ - (uint8_t[]){0x0C, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, + (uint8_t[]){0x80, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, 0xCF, 0xBB, 0x8A, 0x2C, 0xB7, 0x57, 0xB6, 0x27, 0x89, 0x0D, 0x91, 0x03, 0x2C, 0x2B, 0x8D, 0x29, 0x4A, 0xBD, 0x8D, 0x48, 0xD2, 0x69, 0x37, 0xB1, 0xA1, 0x97, 0x12, 0xBD, 0x0A, 0x91, 0x4D, 0xEB, 0x76, @@ -6165,12 +6165,12 @@ 0xC4, 0xB0, 0xB8, 0x31, 0x50, 0x9E, 0x37, 0x15, 0x0E, 0x0D, 0x29, 0x9D, 0xB3, 0x78, 0xFB, 0x9D, 0x5C, 0x90, 0xF8, 0x80, 0x53, 0x93, 0xEF, 0x7C}, /* User Plane w/SNOW f8 enc. + SNOW f9 int. UL for 18-bit SN */ - (uint8_t[]){0x0C, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, + (uint8_t[]){0x80, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, 0xCF, 0xBB, 0x8A, 0x2C, 0xB7, 0x57, 0xB6, 0x27, 0x89, 0x0D, 0x91, 0x03, 0x2C, 0x2B, 0x8D, 0x29, 0x4A, 0xBD, 0x8D, 0x48, 0xD2, 0x69, 0x37, 0xB1, 0xA1, 0x97, 0x12, 0xBD, 0x0A, 0x91, 0x4D, 0xEB, 0x76, 0xC8, 0x96, 0x7A, 0x0A, 0x25, 0x08, 0xEB, 0x41, 0x30, 0x00, 0x33, 0xC7, 0xFF, 0x33, 0x4E, - 0xC1, 0xFE, 0x5C, 0x0F, 0x15, 0xE7, 0x9F, 0x31, 0x2A, 0xAB, 0x0F, 0x24}, + 0xC1, 0xFE, 0x5C, 0x0F, 0x15, 0xE7, 0x9F, 0x31, 0x2A, 0x82, 0x3C, 0x5A}, /* User Plane w/SNOW f8 enc. + SNOW f9 int. DL for 18-bit SN */ (uint8_t[]){0xF8, 0x00, 0x00, 0x22, 0x2D, 0x15, 0xBA, 0x95, 0xAC, 0x47, 0x5A, 0xE3, 0x90, 0x82, 0xEA, 0xC2, 0x93, 0x80, 0x23, 0xE9, 0xAC, 0xEA, 0x5D, @@ -6179,12 +6179,12 @@ 0xC4, 0xB0, 0xB8, 0x31, 0x50, 0x9E, 0x37, 0x15, 0x0E, 0x0D, 0x29, 0x9D, 0xB3, 0x78, 0xFB, 0x9D, 0x5C, 0x90, 0xF8, 0x80, 0xD7, 0xD6, 0x47, 0xF4}, /* User Plane w/SNOW f8 enc. + AES CMAC int. UL for 18-bit SN */ - (uint8_t[]){0x0C, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, + (uint8_t[]){0x80, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, 0xCF, 0xBB, 0x8A, 0x2C, 0xB7, 0x57, 0xB6, 0x27, 0x89, 0x0D, 0x91, 0x03, 0x2C, 0x2B, 0x8D, 0x29, 0x4A, 0xBD, 0x8D, 0x48, 0xD2, 0x69, 0x37, 0xB1, 0xA1, 0x97, 0x12, 0xBD, 0x0A, 0x91, 0x4D, 0xEB, 0x76, 0xC8, 0x96, 0x7A, 0x0A, 0x25, 0x08, 0xEB, 0x41, 0x30, 0x00, 0x33, 0xC7, 0xFF, 0x33, 0x4E, - 0xC1, 0xFE, 0x5C, 0x0F, 0x15, 0xE7, 0x9F, 0x31, 0x66, 0x41, 0x20, 0xBA}, + 0xC1, 0xFE, 0x5C, 0x0F, 0x15, 0xE7, 0x9F, 0x31, 0xD6, 0x6D, 0xEA, 0x46}, /* User Plane w/SNOW f8 enc. + AES CMAC int. DL for 18-bit SN */ (uint8_t[]){0xF8, 0x00, 0x00, 0x22, 0x2D, 0x15, 0xBA, 0x95, 0xAC, 0x47, 0x5A, 0xE3, 0x90, 0x82, 0xEA, 0xC2, 0x93, 0x80, 0x23, 0xE9, 0xAC, 0xEA, 0x5D, @@ -6193,12 +6193,12 @@ 0xC4, 0xB0, 0xB8, 0x31, 0x50, 0x9E, 0x37, 0x15, 0x0E, 0x0D, 0x29, 0x9D, 0xB3, 0x78, 0xFB, 0x9D, 0x5C, 0x90, 0xF8, 0x80, 0x8A, 0x98, 0x66, 0x03}, /* User Plane w/SNOW f8 enc. + ZUC int. UL for 18-bit SN */ - (uint8_t[]){0x0C, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, + (uint8_t[]){0x80, 0x00, 0x01, 0x9A, 0xAF, 0x1D, 0x21, 0x2F, 0x48, 0xB2, 0x30, 0xCF, 0xBB, 0x8A, 0x2C, 0xB7, 0x57, 0xB6, 0x27, 0x89, 0x0D, 0x91, 0x03, 0x2C, 0x2B, 0x8D, 0x29, 0x4A, 0xBD, 0x8D, 0x48, 0xD2, 0x69, 0x37, 0xB1, 0xA1, 0x97, 0x12, 0xBD, 0x0A, 0x91, 0x4D, 0xEB, 0x76, 0xC8, 0x96, 0x7A, 0x0A, 0x25, 0x08, 0xEB, 0x41, 0x30, 0x00, 0x33, 0xC7, 0xFF, 0x33, 0x4E, - 0xC1, 0xFE, 0x5C, 0x0F, 0x15, 0xE7, 0x9F, 0x31, 0xE0, 0x03, 0x45, 0xAD}, + 0xC1, 0xFE, 0x5C, 0x0F, 0x15, 0xE7, 0x9F, 0x31, 0xFE, 0x42, 0xD8, 0x57}, /* User Plane w/SNOW f8 enc. + ZUC int. DL for 18-bit SN */ (uint8_t[]){0xF8, 0x00, 0x00, 0x22, 0x2D, 0x15, 0xBA, 0x95, 0xAC, 0x47, 0x5A, 0xE3, 0x90, 0x82, 0xEA, 0xC2, 0x93, 0x80, 0x23, 0xE9, 0xAC, 0xEA, 0x5D, @@ -6207,7 +6207,7 @@ 0xC4, 0xB0, 0xB8, 0x31, 0x50, 0x9E, 0x37, 0x15, 0x0E, 0x0D, 0x29, 0x9D, 0xB3, 0x78, 0xFB, 0x9D, 0x5C, 0x90, 0xF8, 0x80, 0x89, 0x7A, 0xF8, 0xEA}, /* User Plane w/AES CTR enc. + NULL int. UL for 18-bit SN */ - (uint8_t[]){0x0C, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, + (uint8_t[]){0x80, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, 0xF1, 0x90, 0xC2, 0x22, 0xD0, 0xD2, 0x3D, 0x44, 0x75, 0x7F, 0xC5, 0x0F, 0xAC, 0x7C, 0x18, 0x46, 0xA5, 0x3E, 0x2F, 0x0F, 0x26, 0x9E, 0x5A, 0x49, 0xF7, 0xCB, 0x70, 0x17, 0xBC, 0x01, 0x1D, 0xA3, 0x65, 0x0E, 0x4B, 0x53, @@ -6221,12 +6221,12 @@ 0x39, 0x22, 0xB2, 0xF6, 0x5F, 0xBD, 0x58, 0xE3, 0xE0, 0xDB, 0xD5, 0x7F, 0xFB, 0x78, 0x95, 0xE1, 0x5E, 0x36, 0xF8, 0x52, 0x98, 0x15, 0x68, 0x35}, /* User Plane w/AES CTR enc. + SNOW f9 int. UL for 18-bit SN */ - (uint8_t[]){0x0C, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, + (uint8_t[]){0x80, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, 0xF1, 0x90, 0xC2, 0x22, 0xD0, 0xD2, 0x3D, 0x44, 0x75, 0x7F, 0xC5, 0x0F, 0xAC, 0x7C, 0x18, 0x46, 0xA5, 0x3E, 0x2F, 0x0F, 0x26, 0x9E, 0x5A, 0x49, 0xF7, 0xCB, 0x70, 0x17, 0xBC, 0x01, 0x1D, 0xA3, 0x65, 0x0E, 0x4B, 0x53, 0x14, 0x73, 0x76, 0xDE, 0x54, 0xA0, 0xF9, 0x4C, 0xC2, 0x8F, 0x02, 0x88, - 0x36, 0xC7, 0xC4, 0x5A, 0x57, 0x7D, 0xA1, 0x0D, 0xBE, 0x17, 0x81, 0xA1}, + 0x36, 0xC7, 0xC4, 0x5A, 0x57, 0x7D, 0xA1, 0x0D, 0xBE, 0x3E, 0xB2, 0xDF}, /* User Plane w/AES CTR enc. + SNOW f9 int. DL for 18-bit SN */ (uint8_t[]){0xF8, 0x00, 0x00, 0x01, 0x0D, 0x4B, 0x5E, 0xD3, 0xCE, 0x96, 0xE1, 0x9A, 0x9D, 0xB3, 0x01, 0xD6, 0x40, 0x50, 0x00, 0x6C, 0x63, 0xFD, 0x37, @@ -6235,12 +6235,12 @@ 0x39, 0x22, 0xB2, 0xF6, 0x5F, 0xBD, 0x58, 0xE3, 0xE0, 0xDB, 0xD5, 0x7F, 0xFB, 0x78, 0x95, 0xE1, 0x5E, 0x36, 0xF8, 0x52, 0x1C, 0x50, 0xC0, 0xBD}, /* User Plane w/AES CTR enc. + AES CMAC int. UL for 18-bit SN */ - (uint8_t[]){0x0C, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, + (uint8_t[]){0x80, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, 0xF1, 0x90, 0xC2, 0x22, 0xD0, 0xD2, 0x3D, 0x44, 0x75, 0x7F, 0xC5, 0x0F, 0xAC, 0x7C, 0x18, 0x46, 0xA5, 0x3E, 0x2F, 0x0F, 0x26, 0x9E, 0x5A, 0x49, 0xF7, 0xCB, 0x70, 0x17, 0xBC, 0x01, 0x1D, 0xA3, 0x65, 0x0E, 0x4B, 0x53, 0x14, 0x73, 0x76, 0xDE, 0x54, 0xA0, 0xF9, 0x4C, 0xC2, 0x8F, 0x02, 0x88, - 0x36, 0xC7, 0xC4, 0x5A, 0x57, 0x7D, 0xA1, 0x0D, 0xF2, 0xFD, 0xAE, 0x3F}, + 0x36, 0xC7, 0xC4, 0x5A, 0x57, 0x7D, 0xA1, 0x0D, 0x42, 0xD1, 0x64, 0xC3}, /* User Plane w/AES CTR enc. + AES CMAC int. DL for 18-bit SN */ (uint8_t[]){0xF8, 0x00, 0x00, 0x01, 0x0D, 0x4B, 0x5E, 0xD3, 0xCE, 0x96, 0xE1, 0x9A, 0x9D, 0xB3, 0x01, 0xD6, 0x40, 0x50, 0x00, 0x6C, 0x63, 0xFD, 0x37, @@ -6249,12 +6249,12 @@ 0x39, 0x22, 0xB2, 0xF6, 0x5F, 0xBD, 0x58, 0xE3, 0xE0, 0xDB, 0xD5, 0x7F, 0xFB, 0x78, 0x95, 0xE1, 0x5E, 0x36, 0xF8, 0x52, 0x41, 0x1E, 0xE1, 0x4A}, /* User Plane w/AES CTR enc. + ZUC int. UL for 18-bit SN */ - (uint8_t[]){0x0C, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, + (uint8_t[]){0x80, 0x00, 0x01, 0xBF, 0x31, 0x94, 0xCF, 0x6E, 0x99, 0x84, 0x08, 0xF1, 0x90, 0xC2, 0x22, 0xD0, 0xD2, 0x3D, 0x44, 0x75, 0x7F, 0xC5, 0x0F, 0xAC, 0x7C, 0x18, 0x46, 0xA5, 0x3E, 0x2F, 0x0F, 0x26, 0x9E, 0x5A, 0x49, 0xF7, 0xCB, 0x70, 0x17, 0xBC, 0x01, 0x1D, 0xA3, 0x65, 0x0E, 0x4B, 0x53, 0x14, 0x73, 0x76, 0xDE, 0x54, 0xA0, 0xF9, 0x4C, 0xC2, 0x8F, 0x02, 0x88, - 0x36, 0xC7, 0xC4, 0x5A, 0x57, 0x7D, 0xA1, 0x0D, 0x74, 0xBF, 0xCB, 0x28}, + 0x36, 0xC7, 0xC4, 0x5A, 0x57, 0x7D, 0xA1, 0x0D, 0x6A, 0xFE, 0x56, 0xD2}, /* User Plane w/AES CTR enc. + ZUC int. DL for 18-bit SN */ (uint8_t[]){0xF8, 0x00, 0x00, 0x01, 0x0D, 0x4B, 0x5E, 0xD3, 0xCE, 0x96, 0xE1, 0x9A, 0x9D, 0xB3, 0x01, 0xD6, 0x40, 0x50, 0x00, 0x6C, 0x63, 0xFD, 0x37, @@ -6263,7 +6263,7 @@ 0x39, 0x22, 0xB2, 0xF6, 0x5F, 0xBD, 0x58, 0xE3, 0xE0, 0xDB, 0xD5, 0x7F, 0xFB, 0x78, 0x95, 0xE1, 0x5E, 0x36, 0xF8, 0x52, 0x42, 0xFC, 0x7F, 0xA3}, /* User Plane w/ZUC enc. + NULL int. UL for 18-bit SN */ - (uint8_t[]){0x0C, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, + (uint8_t[]){0x80, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, 0x74, 0xC2, 0xD7, 0xFF, 0x74, 0x59, 0x3A, 0x69, 0xD1, 0x8B, 0x65, 0x98, 0xB9, 0x3C, 0xFB, 0x63, 0xB1, 0x9E, 0xB7, 0xCA, 0x04, 0x68, 0xB9, 0xAB, 0xA2, 0x5A, 0xAF, 0x15, 0x8E, 0x71, 0xED, 0xE4, 0xFA, 0x99, 0x79, 0xF9, @@ -6277,12 +6277,12 @@ 0x7D, 0x2D, 0xE0, 0x3C, 0xE3, 0x81, 0xAA, 0xEA, 0xCC, 0xD7, 0xFC, 0x46, 0x07, 0x7C, 0x8E, 0x8E, 0x0E, 0x99, 0xB8, 0x31, 0x65, 0x17, 0xF6, 0xE3}, /* User Plane w/ZUC enc. + SNOW f9 int. UL for 18-bit SN */ - (uint8_t[]){0x0C, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, + (uint8_t[]){0x80, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, 0x74, 0xC2, 0xD7, 0xFF, 0x74, 0x59, 0x3A, 0x69, 0xD1, 0x8B, 0x65, 0x98, 0xB9, 0x3C, 0xFB, 0x63, 0xB1, 0x9E, 0xB7, 0xCA, 0x04, 0x68, 0xB9, 0xAB, 0xA2, 0x5A, 0xAF, 0x15, 0x8E, 0x71, 0xED, 0xE4, 0xFA, 0x99, 0x79, 0xF9, 0x51, 0x54, 0x82, 0x69, 0x4C, 0x45, 0x0B, 0xFA, 0x87, 0x4D, 0x97, 0x6E, - 0xB0, 0xC9, 0x06, 0x08, 0x6B, 0xFC, 0x4A, 0x85, 0x3C, 0x13, 0x64, 0xB1}, + 0xB0, 0xC9, 0x06, 0x08, 0x6B, 0xFC, 0x4A, 0x85, 0x3C, 0x3A, 0x57, 0xCF}, /* User Plane w/ZUC enc. + SNOW f9 int. DL for 18-bit SN */ (uint8_t[]){0xF8, 0x00, 0x00, 0x30, 0x62, 0x48, 0xC0, 0xB1, 0xED, 0x1F, 0x13, 0x8A, 0x7A, 0x62, 0x40, 0x12, 0x35, 0x54, 0x03, 0x93, 0xBD, 0xE5, 0x88, @@ -6291,12 +6291,12 @@ 0x7D, 0x2D, 0xE0, 0x3C, 0xE3, 0x81, 0xAA, 0xEA, 0xCC, 0xD7, 0xFC, 0x46, 0x07, 0x7C, 0x8E, 0x8E, 0x0E, 0x99, 0xB8, 0x31, 0xE1, 0x52, 0x5E, 0x6B}, /* User Plane w/ZUC enc. + AES CMAC int. UL for 18-bit SN */ - (uint8_t[]){0x0C, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, + (uint8_t[]){0x80, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, 0x74, 0xC2, 0xD7, 0xFF, 0x74, 0x59, 0x3A, 0x69, 0xD1, 0x8B, 0x65, 0x98, 0xB9, 0x3C, 0xFB, 0x63, 0xB1, 0x9E, 0xB7, 0xCA, 0x04, 0x68, 0xB9, 0xAB, 0xA2, 0x5A, 0xAF, 0x15, 0x8E, 0x71, 0xED, 0xE4, 0xFA, 0x99, 0x79, 0xF9, 0x51, 0x54, 0x82, 0x69, 0x4C, 0x45, 0x0B, 0xFA, 0x87, 0x4D, 0x97, 0x6E, - 0xB0, 0xC9, 0x06, 0x08, 0x6B, 0xFC, 0x4A, 0x85, 0x70, 0xF9, 0x4B, 0x2F}, + 0xB0, 0xC9, 0x06, 0x08, 0x6B, 0xFC, 0x4A, 0x85, 0xC0, 0xD5, 0x81, 0xD3}, /* User Plane w/ZUC enc. + AES CMAC int. DL for 18-bit SN */ (uint8_t[]){0xF8, 0x00, 0x00, 0x30, 0x62, 0x48, 0xC0, 0xB1, 0xED, 0x1F, 0x13, 0x8A, 0x7A, 0x62, 0x40, 0x12, 0x35, 0x54, 0x03, 0x93, 0xBD, 0xE5, 0x88, @@ -6305,12 +6305,12 @@ 0x7D, 0x2D, 0xE0, 0x3C, 0xE3, 0x81, 0xAA, 0xEA, 0xCC, 0xD7, 0xFC, 0x46, 0x07, 0x7C, 0x8E, 0x8E, 0x0E, 0x99, 0xB8, 0x31, 0xBC, 0x1C, 0x7F, 0x9C}, /* User Plane w/ZUC enc. + ZUC int. UL for 18-bit SN */ - (uint8_t[]){0x0C, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, + (uint8_t[]){0x80, 0x00, 0x01, 0x32, 0xF9, 0x21, 0x1D, 0xBB, 0xF8, 0xE5, 0x7C, 0x74, 0xC2, 0xD7, 0xFF, 0x74, 0x59, 0x3A, 0x69, 0xD1, 0x8B, 0x65, 0x98, 0xB9, 0x3C, 0xFB, 0x63, 0xB1, 0x9E, 0xB7, 0xCA, 0x04, 0x68, 0xB9, 0xAB, 0xA2, 0x5A, 0xAF, 0x15, 0x8E, 0x71, 0xED, 0xE4, 0xFA, 0x99, 0x79, 0xF9, 0x51, 0x54, 0x82, 0x69, 0x4C, 0x45, 0x0B, 0xFA, 0x87, 0x4D, 0x97, 0x6E, - 0xB0, 0xC9, 0x06, 0x08, 0x6B, 0xFC, 0x4A, 0x85, 0xF6, 0xBB, 0x2E, 0x38}, + 0xB0, 0xC9, 0x06, 0x08, 0x6B, 0xFC, 0x4A, 0x85, 0xE8, 0xFA, 0xB3, 0xC2}, /* User Plane w/ZUC enc. + ZUC int. DL for 18-bit SN */ (uint8_t[]){0xF8, 0x00, 0x00, 0x30, 0x62, 0x48, 0xC0, 0xB1, 0xED, 0x1F, 0x13, 0x8A, 0x7A, 0x62, 0x40, 0x12, 0x35, 0x54, 0x03, 0x93, 0xBD, 0xE5, 0x88, diff -Nru dpdk-20.11.6/app/test/test_efd_perf.c dpdk-20.11.7/app/test/test_efd_perf.c --- dpdk-20.11.6/app/test/test_efd_perf.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test/test_efd_perf.c 2022-12-13 10:50:22.000000000 +0000 @@ -143,7 +143,6 @@ qsort(keys, KEYS_TO_ADD, MAX_KEYSIZE, key_compare); /* Sift through the list of keys and look for duplicates */ - int num_duplicates = 0; for (i = 0; i < KEYS_TO_ADD - 1; i++) { if (memcmp(keys[i], keys[i + 1], params->key_size) == 0) { /* This key already exists, try again */ diff -Nru dpdk-20.11.6/app/test/test_event_timer_adapter.c dpdk-20.11.7/app/test/test_event_timer_adapter.c --- dpdk-20.11.6/app/test/test_event_timer_adapter.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test/test_event_timer_adapter.c 2022-12-13 10:50:22.000000000 +0000 @@ -764,7 +764,6 @@ { RTE_SET_USED(args); struct rte_event_timer *ev_tim = NULL; - uint64_t cancel_count = 0; uint16_t ret; while (!arm_done || rte_ring_count(timer_producer_ring) > 0) { @@ -774,7 +773,6 @@ ret = rte_event_timer_cancel_burst(timdev, &ev_tim, 1); TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel timer"); rte_mempool_put(eventdev_test_mempool, (void *)ev_tim); - cancel_count++; } return TEST_SUCCESS; diff -Nru dpdk-20.11.6/app/test/test_hash_perf.c dpdk-20.11.7/app/test/test_hash_perf.c --- dpdk-20.11.6/app/test/test_hash_perf.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test/test_hash_perf.c 2022-12-13 10:50:22.000000000 +0000 @@ -480,6 +480,11 @@ (const void **)keys_burst, &signatures[j * BURST_SIZE], BURST_SIZE, positions_burst); + if (ret != 0) { + printf("rte_hash_lookup_with_hash_bulk failed with %d\n", + ret); + return -1; + } for (k = 0; k < BURST_SIZE; k++) { if (positions_burst[k] != positions[j * @@ -492,10 +497,14 @@ } } } else { - rte_hash_lookup_bulk(h[table_index], + ret = rte_hash_lookup_bulk(h[table_index], (const void **) keys_burst, BURST_SIZE, positions_burst); + if (ret != 0) { + printf("rte_hash_lookup_bulk failed with %d\n", ret); + return -1; + } for (k = 0; k < BURST_SIZE; k++) { if (positions_burst[k] != positions[j * BURST_SIZE + k]) { printf("Key looked up in %d, should be in %d\n", diff -Nru dpdk-20.11.6/app/test/test_hash_readwrite_lf_perf.c dpdk-20.11.7/app/test/test_hash_readwrite_lf_perf.c --- dpdk-20.11.6/app/test/test_hash_readwrite_lf_perf.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test/test_hash_readwrite_lf_perf.c 2022-12-13 10:50:22.000000000 +0000 @@ -1102,7 +1102,6 @@ rte_eal_remote_launch(test_rwc_reader, (void *)(uintptr_t)read_type, enabled_core_ids[i]); - write_type = WRITE_KEY_SHIFT; pos_core = 0; /* Launch writers */ diff -Nru dpdk-20.11.6/app/test/test_ipsec.c dpdk-20.11.7/app/test/test_ipsec.c --- dpdk-20.11.6/app/test/test_ipsec.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test/test_ipsec.c 2022-12-13 10:50:22.000000000 +0000 @@ -310,8 +310,10 @@ } } - if (ts_params->valid_dev_found == 0) - return TEST_FAILED; + if (ts_params->valid_dev_found == 0) { + RTE_LOG(WARNING, USER1, "No compatible crypto device found.\n"); + return TEST_SKIPPED; + } ts_params->mbuf_pool = rte_pktmbuf_pool_create( "CRYPTO_MBUFPOOL", @@ -617,7 +619,8 @@ rte_memcpy(dst, string, len); dst += len; /* copy pad bytes */ - rte_memcpy(dst, esp_pad_bytes, padlen); + rte_memcpy(dst, esp_pad_bytes, RTE_MIN(padlen, + sizeof(esp_pad_bytes))); dst += padlen; /* copy ESP tail header */ rte_memcpy(dst, &espt, sizeof(espt)); diff -Nru dpdk-20.11.6/app/test/test_member.c dpdk-20.11.7/app/test/test_member.c --- dpdk-20.11.6/app/test/test_member.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test/test_member.c 2022-12-13 10:50:22.000000000 +0000 @@ -545,7 +545,6 @@ qsort(generated_keys, MAX_ENTRIES, KEY_SIZE, key_compare); /* Sift through the list of keys and look for duplicates */ - int num_duplicates = 0; for (i = 0; i < MAX_ENTRIES - 1; i++) { if (memcmp(generated_keys[i], generated_keys[i + 1], KEY_SIZE) == 0) { diff -Nru dpdk-20.11.6/app/test/test_member_perf.c dpdk-20.11.7/app/test/test_member_perf.c --- dpdk-20.11.6/app/test/test_member_perf.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test/test_member_perf.c 2022-12-13 10:50:22.000000000 +0000 @@ -150,7 +150,6 @@ qsort(keys, KEYS_TO_ADD, MAX_KEYSIZE, key_compare); /* Sift through the list of keys and look for duplicates */ - int num_duplicates = 0; for (i = 0; i < KEYS_TO_ADD - 1; i++) { if (memcmp(keys[i], keys[i + 1], params->key_size) == 0) { diff -Nru dpdk-20.11.6/app/test/test_service_cores.c dpdk-20.11.7/app/test/test_service_cores.c --- dpdk-20.11.6/app/test/test_service_cores.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test/test_service_cores.c 2022-12-13 10:50:22.000000000 +0000 @@ -22,6 +22,7 @@ static uint32_t service_remote_launch_flag; #define SERVICE_DELAY 1 +#define TIMEOUT_MS 1000 #define DUMMY_SERVICE_NAME "dummy_service" #define MT_SAFE_SERVICE_NAME "mt_safe_service" @@ -119,15 +120,15 @@ return TEST_SUCCESS; } -/* Wait until service lcore not active, or for 100x SERVICE_DELAY */ +/* Wait until service lcore not active, or for TIMEOUT_MS */ static void wait_slcore_inactive(uint32_t slcore_id) { int i; for (i = 0; rte_service_lcore_may_be_active(slcore_id) == 1 && - i < 100; i++) - rte_delay_ms(SERVICE_DELAY); + i < TIMEOUT_MS; i++) + rte_delay_ms(1); } /* register a single dummy service */ @@ -903,12 +904,25 @@ return unregister_all(); } +static int +service_ensure_stopped_with_timeout(uint32_t sid) +{ + /* give the service time to stop running */ + int i; + for (i = 0; i < TIMEOUT_MS; i++) { + if (!rte_service_may_be_active(sid)) + break; + rte_delay_ms(1); + } + + return rte_service_may_be_active(sid); +} + /* stop a service and wait for it to become inactive */ static int service_may_be_active(void) { const uint32_t sid = 0; - int i; /* expected failure cases */ TEST_ASSERT_EQUAL(-EINVAL, rte_service_may_be_active(10000), @@ -928,19 +942,11 @@ TEST_ASSERT_EQUAL(1, service_lcore_running_check(), "Service core expected to poll service but it didn't"); - /* stop the service */ + /* stop the service, and wait for not-active with timeout */ TEST_ASSERT_EQUAL(0, rte_service_runstate_set(sid, 0), "Error: Service stop returned non-zero"); - - /* give the service 100ms to stop running */ - for (i = 0; i < 100; i++) { - if (!rte_service_may_be_active(sid)) - break; - rte_delay_ms(SERVICE_DELAY); - } - - TEST_ASSERT_EQUAL(0, rte_service_may_be_active(sid), - "Error: Service not stopped after 100ms"); + TEST_ASSERT_EQUAL(0, service_ensure_stopped_with_timeout(sid), + "Error: Service not stopped after timeout period."); return unregister_all(); } @@ -954,7 +960,6 @@ return TEST_SKIPPED; const uint32_t sid = 0; - int i; uint32_t lcore = rte_get_next_lcore(/* start core */ -1, /* skip main */ 1, @@ -984,16 +989,8 @@ /* stop the service */ TEST_ASSERT_EQUAL(0, rte_service_runstate_set(sid, 0), "Error: Service stop returned non-zero"); - - /* give the service 100ms to stop running */ - for (i = 0; i < 100; i++) { - if (!rte_service_may_be_active(sid)) - break; - rte_delay_ms(SERVICE_DELAY); - } - - TEST_ASSERT_EQUAL(0, rte_service_may_be_active(sid), - "Error: Service not stopped after 100ms"); + TEST_ASSERT_EQUAL(0, service_ensure_stopped_with_timeout(sid), + "Error: Service not stopped after timeout period."); return unregister_all(); } diff -Nru dpdk-20.11.6/app/test/test_trace.c dpdk-20.11.7/app/test/test_trace.c --- dpdk-20.11.6/app/test/test_trace.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test/test_trace.c 2022-12-13 10:50:22.000000000 +0000 @@ -9,6 +9,8 @@ #include "test.h" #include "test_trace.h" +int app_dpdk_test_tp_count; + static int32_t test_trace_point_globbing(void) { @@ -70,8 +72,15 @@ static int32_t test_trace_point_disable_enable(void) { + int expected; int rc; + /* At tp registration, the associated counter increases once. */ + expected = 1; + TEST_ASSERT_EQUAL(app_dpdk_test_tp_count, expected, + "Expecting %d, but got %d for app_dpdk_test_tp_count", + expected, app_dpdk_test_tp_count); + rc = rte_trace_point_disable(&__app_dpdk_test_tp); if (rc < 0) goto failed; @@ -79,6 +88,12 @@ if (rte_trace_point_is_enabled(&__app_dpdk_test_tp)) goto failed; + /* No emission expected */ + app_dpdk_test_tp("app.dpdk.test.tp"); + TEST_ASSERT_EQUAL(app_dpdk_test_tp_count, expected, + "Expecting %d, but got %d for app_dpdk_test_tp_count", + expected, app_dpdk_test_tp_count); + rc = rte_trace_point_enable(&__app_dpdk_test_tp); if (rc < 0) goto failed; @@ -88,6 +103,11 @@ /* Emit the trace */ app_dpdk_test_tp("app.dpdk.test.tp"); + expected++; + TEST_ASSERT_EQUAL(app_dpdk_test_tp_count, expected, + "Expecting %d, but got %d for app_dpdk_test_tp_count", + expected, app_dpdk_test_tp_count); + return TEST_SUCCESS; failed: @@ -101,9 +121,6 @@ current = rte_trace_mode_get(); - if (!rte_trace_is_enabled()) - return TEST_SKIPPED; - rte_trace_mode_set(RTE_TRACE_MODE_DISCARD); if (rte_trace_mode_get() != RTE_TRACE_MODE_DISCARD) goto failed; @@ -172,6 +189,23 @@ return TEST_SUCCESS; } +static int +test_trace_dump(void) +{ + rte_trace_dump(stdout); + return 0; +} + +REGISTER_TEST_COMMAND(trace_dump, test_trace_dump); + +static int +test_trace_metadata_dump(void) +{ + return rte_trace_metadata_dump(stdout); +} + +REGISTER_TEST_COMMAND(trace_metadata_dump, test_trace_metadata_dump); + static struct unit_test_suite trace_tests = { .suite_name = "trace autotest", .setup = NULL, @@ -184,6 +218,8 @@ TEST_CASE(test_trace_point_globbing), TEST_CASE(test_trace_point_regex), TEST_CASE(test_trace_points_lookup), + TEST_CASE(test_trace_dump), + TEST_CASE(test_trace_metadata_dump), TEST_CASES_END() } }; @@ -195,20 +231,3 @@ } REGISTER_TEST_COMMAND(trace_autotest, test_trace); - -static int -test_trace_dump(void) -{ - rte_trace_dump(stdout); - return 0; -} - -REGISTER_TEST_COMMAND(trace_dump, test_trace_dump); - -static int -test_trace_metadata_dump(void) -{ - return rte_trace_metadata_dump(stdout); -} - -REGISTER_TEST_COMMAND(trace_metadata_dump, test_trace_metadata_dump); diff -Nru dpdk-20.11.6/app/test/test_trace.h dpdk-20.11.7/app/test/test_trace.h --- dpdk-20.11.6/app/test/test_trace.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test/test_trace.h 2022-12-13 10:50:22.000000000 +0000 @@ -3,10 +3,12 @@ */ #include +extern int app_dpdk_test_tp_count; RTE_TRACE_POINT( app_dpdk_test_tp, RTE_TRACE_POINT_ARGS(const char *str), rte_trace_point_emit_string(str); + app_dpdk_test_tp_count++; ) RTE_TRACE_POINT_FP( diff -Nru dpdk-20.11.6/app/test-pmd/cmdline.c dpdk-20.11.7/app/test-pmd/cmdline.c --- dpdk-20.11.6/app/test-pmd/cmdline.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test-pmd/cmdline.c 2022-12-13 10:50:22.000000000 +0000 @@ -4778,6 +4778,55 @@ }, }; +struct cmd_csum_mac_swap_result { + cmdline_fixed_string_t csum; + cmdline_fixed_string_t parse; + cmdline_fixed_string_t onoff; + portid_t port_id; +}; + +static void +cmd_csum_mac_swap_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_csum_mac_swap_result *res = parsed_result; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + if (strcmp(res->onoff, "on") == 0) + ports[res->port_id].fwd_mac_swap = 1; + else + ports[res->port_id].fwd_mac_swap = 0; +} + +static cmdline_parse_token_string_t cmd_csum_mac_swap_csum = + TOKEN_STRING_INITIALIZER(struct cmd_csum_mac_swap_result, + csum, "csum"); +static cmdline_parse_token_string_t cmd_csum_mac_swap_parse = + TOKEN_STRING_INITIALIZER(struct cmd_csum_mac_swap_result, + parse, "mac-swap"); +static cmdline_parse_token_string_t cmd_csum_mac_swap_onoff = + TOKEN_STRING_INITIALIZER(struct cmd_csum_mac_swap_result, + onoff, "on#off"); +static cmdline_parse_token_num_t cmd_csum_mac_swap_portid = + TOKEN_NUM_INITIALIZER(struct cmd_csum_mac_swap_result, + port_id, RTE_UINT16); + +static cmdline_parse_inst_t cmd_csum_mac_swap = { + .f = cmd_csum_mac_swap_parsed, + .data = NULL, + .help_str = "csum mac-swap on|off : " + "Enable/Disable forward mac address swap", + .tokens = { + (void *)&cmd_csum_mac_swap_csum, + (void *)&cmd_csum_mac_swap_parse, + (void *)&cmd_csum_mac_swap_onoff, + (void *)&cmd_csum_mac_swap_portid, + NULL, + }, +}; + /* *** ENABLE HARDWARE SEGMENTATION IN TX NON-TUNNELED PACKETS *** */ struct cmd_tso_set_result { cmdline_fixed_string_t tso; @@ -16913,6 +16962,7 @@ (cmdline_parse_inst_t *)&cmd_csum_set, (cmdline_parse_inst_t *)&cmd_csum_show, (cmdline_parse_inst_t *)&cmd_csum_tunnel, + (cmdline_parse_inst_t *)&cmd_csum_mac_swap, (cmdline_parse_inst_t *)&cmd_tso_set, (cmdline_parse_inst_t *)&cmd_tso_show, (cmdline_parse_inst_t *)&cmd_tunnel_tso_set, diff -Nru dpdk-20.11.6/app/test-pmd/config.c dpdk-20.11.7/app/test-pmd/config.c --- dpdk-20.11.6/app/test-pmd/config.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test-pmd/config.c 2022-12-13 10:50:22.000000000 +0000 @@ -1842,7 +1842,6 @@ { struct rte_port *port; struct port_shared_action **tmp; - uint32_t c = 0; int ret = 0; if (port_id_is_invalid(port_id, ENABLED_WARN) || @@ -1877,7 +1876,6 @@ } if (i == n) tmp = &(*tmp)->next; - ++c; } return ret; } @@ -2186,7 +2184,6 @@ { struct rte_port *port; struct port_flow **tmp; - uint32_t c = 0; int ret = 0; if (port_id_is_invalid(port_id, ENABLED_WARN) || @@ -2219,7 +2216,6 @@ } if (i == n) tmp = &(*tmp)->next; - ++c; } return ret; } diff -Nru dpdk-20.11.6/app/test-pmd/csumonly.c dpdk-20.11.7/app/test-pmd/csumonly.c --- dpdk-20.11.6/app/test-pmd/csumonly.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test-pmd/csumonly.c 2022-12-13 10:50:22.000000000 +0000 @@ -888,6 +888,12 @@ * and inner headers */ eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); + if (ports[fs->tx_port].fwd_mac_swap) { + rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], + ð_hdr->d_addr); + rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, + ð_hdr->s_addr); + } parse_ethernet(eth_hdr, &info); l3_hdr = (char *)eth_hdr + info.l2_len; diff -Nru dpdk-20.11.6/app/test-pmd/meson.build dpdk-20.11.7/app/test-pmd/meson.build --- dpdk-20.11.6/app/test-pmd/meson.build 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test-pmd/meson.build 2022-12-13 10:50:22.000000000 +0000 @@ -51,6 +51,7 @@ deps += 'net_i40e' endif if dpdk_conf.has('RTE_NET_IXGBE') + cflags += ['-DRTE_LIBRTE_IXGBE_BYPASS'] deps += 'net_ixgbe' endif if dpdk_conf.has('RTE_NET_DPAA') diff -Nru dpdk-20.11.6/app/test-pmd/noisy_vnf.c dpdk-20.11.7/app/test-pmd/noisy_vnf.c --- dpdk-20.11.6/app/test-pmd/noisy_vnf.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test-pmd/noisy_vnf.c 2022-12-13 10:50:22.000000000 +0000 @@ -56,8 +56,8 @@ static inline void do_read(char *vnf_mem) { + uint64_t r __rte_unused; uint64_t i = rte_rand(); - uint64_t r; r = vnf_mem[i % ((noisy_lkup_mem_sz * 1024 * 1024) / RTE_CACHE_LINE_SIZE)]; diff -Nru dpdk-20.11.6/app/test-pmd/testpmd.c dpdk-20.11.7/app/test-pmd/testpmd.c --- dpdk-20.11.6/app/test-pmd/testpmd.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test-pmd/testpmd.c 2022-12-13 10:50:22.000000000 +0000 @@ -216,7 +216,7 @@ * In container, it cannot terminate the process which running with 'stats-period' * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. */ -uint8_t f_quit; +static volatile uint8_t f_quit; uint8_t cl_quit; /* Quit testpmd from cmdline. */ /* @@ -3813,8 +3813,10 @@ "rte_zmalloc(%d struct rte_port) failed\n", RTE_MAX_ETHPORTS); } - for (i = 0; i < RTE_MAX_ETHPORTS; i++) + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + ports[i].fwd_mac_swap = 1; LIST_INIT(&ports[i].flow_tunnel_list); + } /* Initialize ports NUMA structures */ memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); diff -Nru dpdk-20.11.6/app/test-pmd/testpmd.h dpdk-20.11.7/app/test-pmd/testpmd.h --- dpdk-20.11.6/app/test-pmd/testpmd.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/app/test-pmd/testpmd.h 2022-12-13 10:50:22.000000000 +0000 @@ -237,7 +237,8 @@ struct rte_ether_addr *mc_addr_pool; /**< pool of multicast addrs */ uint32_t mc_addr_nb; /**< nb. of addr. in mc_addr_pool */ uint8_t slave_flag : 1, /**< bonding slave port */ - bond_flag : 1; /**< port is bond device */ + bond_flag : 1, /**< port is bond device */ + fwd_mac_swap : 1; /**< swap packet MAC before forward */ struct port_flow *flow_list; /**< Associated flows. */ struct port_shared_action *actions_list; /**< Associated shared actions. */ diff -Nru dpdk-20.11.6/config/arm/meson.build dpdk-20.11.7/config/arm/meson.build --- dpdk-20.11.6/config/arm/meson.build 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/config/arm/meson.build 2022-12-13 10:50:22.000000000 +0000 @@ -158,7 +158,7 @@ # 'Primary Part number', 'Revision'] detect_vendor = find_program(join_paths( meson.current_source_dir(), 'armv8_machine.py')) - cmd = run_command(detect_vendor.path(), check: false) + cmd = run_command(detect_vendor.path()) if cmd.returncode() == 0 cmd_output = cmd.stdout().to_lower().strip().split(' ') endif diff -Nru dpdk-20.11.6/config/meson.build dpdk-20.11.7/config/meson.build --- dpdk-20.11.6/config/meson.build 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/config/meson.build 2022-12-13 10:50:22.000000000 +0000 @@ -23,7 +23,7 @@ pver = meson.project_version().split('.') major_version = '@0@.@1@'.format(pver.get(0), pver.get(1)) abi_version = run_command(find_program('cat', 'more'), - abi_version_file, check: true).stdout().strip() + abi_version_file).stdout().strip() # Libraries have the abi_version as the filename extension # and have the soname be all but the final part of the abi_version. diff -Nru dpdk-20.11.6/config/x86/meson.build dpdk-20.11.7/config/x86/meson.build --- dpdk-20.11.6/config/x86/meson.build 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/config/x86/meson.build 2022-12-13 10:50:22.000000000 +0000 @@ -4,7 +4,7 @@ # get binutils version for the workaround of Bug 97 binutils_ok = true if not is_windows and (is_linux or cc.get_id() == 'gcc') - binutils_ok = run_command(binutils_avx512_check, check: false).returncode() == 0 + binutils_ok = run_command(binutils_avx512_check).returncode() == 0 if not binutils_ok and cc.has_argument('-mno-avx512f') machine_args += '-mno-avx512f' warning('Binutils error with AVX512 assembly, disabling AVX512 support') diff -Nru dpdk-20.11.6/debian/changelog dpdk-20.11.7/debian/changelog --- dpdk-20.11.6/debian/changelog 2022-08-29 16:19:25.000000000 +0000 +++ dpdk-20.11.7/debian/changelog 2022-12-14 00:32:18.000000000 +0000 @@ -1,3 +1,11 @@ +dpdk (20.11.7-1~deb11u1) bullseye; urgency=medium + + * New upstream release 20.11.7; for a full list of changes see: + http://doc.dpdk.org/guides-20.11/rel_notes/release_20_11.html + * Add new experimental symbol to librte-vhost + + -- Luca Boccassi Wed, 14 Dec 2022 00:32:18 +0000 + dpdk (20.11.6-1~deb11u1) bullseye-security; urgency=high [ Henning Schild ] diff -Nru dpdk-20.11.6/debian/librte-vhost21.symbols dpdk-20.11.7/debian/librte-vhost21.symbols --- dpdk-20.11.6/debian/librte-vhost21.symbols 2022-08-29 16:19:07.000000000 +0000 +++ dpdk-20.11.7/debian/librte-vhost21.symbols 2022-12-14 00:32:18.000000000 +0000 @@ -67,3 +67,4 @@ rte_vhost_slave_config_change@EXPERIMENTAL 20.08 rte_vhost_submit_enqueue_burst@EXPERIMENTAL 20.08 rte_vhost_vring_call@DPDK_21 20.11 + rte_vhost_vring_call_nonblock@EXPERIMENTAL 20.11.7 diff -Nru dpdk-20.11.6/devtools/checkpatches.sh dpdk-20.11.7/devtools/checkpatches.sh --- dpdk-20.11.6/devtools/checkpatches.sh 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/devtools/checkpatches.sh 2022-12-13 10:50:22.000000000 +0000 @@ -229,12 +229,12 @@ total=0 status=0 -check () { # +check () { # <patch-file> <commit> local ret=0 + local subject='' headline_printed=false total=$(($total + 1)) - ! $verbose || print_headline "$3" if [ -n "$1" ] ; then tmpinput=$1 else @@ -249,10 +249,14 @@ fi fi + # Subject can be on 2 lines + subject=$(sed '/^Subject: */!d;s///;N;s,\n[[:space:]]\+, ,;s,\n.*,,;q' "$tmpinput") + ! $verbose || print_headline "$subject" + ! $verbose || printf 'Running checkpatch.pl:\n' report=$($DPDK_CHECKPATCH_PATH $options "$tmpinput" 2>/dev/null) if [ $? -ne 0 ] ; then - $headline_printed || print_headline "$3" + $headline_printed || print_headline "$subject" printf '%s\n' "$report" | sed -n '1,/^total:.*lines checked$/p' ret=1 fi @@ -260,7 +264,7 @@ ! $verbose || printf '\nChecking API additions/removals:\n' report=$($VALIDATE_NEW_API "$tmpinput") if [ $? -ne 0 ] ; then - $headline_printed || print_headline "$3" + $headline_printed || print_headline "$subject" printf '%s\n' "$report" ret=1 fi @@ -268,7 +272,7 @@ ! $verbose || printf '\nChecking forbidden tokens additions:\n' report=$(check_forbidden_additions "$tmpinput") if [ $? -ne 0 ] ; then - $headline_printed || print_headline "$3" + $headline_printed || print_headline "$subject" printf '%s\n' "$report" ret=1 fi @@ -276,7 +280,7 @@ ! $verbose || printf '\nChecking __rte_experimental tags:\n' report=$(check_experimental_tags "$tmpinput") if [ $? -ne 0 ] ; then - $headline_printed || print_headline "$3" + $headline_printed || print_headline "$subject" printf '%s\n' "$report" ret=1 fi @@ -284,7 +288,7 @@ ! $verbose || printf '\nChecking __rte_internal tags:\n' report=$(check_internal_tags "$tmpinput") if [ $? -ne 0 ] ; then - $headline_printed || print_headline "$3" + $headline_printed || print_headline "$subject" printf '%s\n' "$report" ret=1 fi @@ -300,20 +304,10 @@ if [ -n "$1" ] ; then for patch in "$@" ; do - # Subject can be on 2 lines - subject=$(sed '/^Subject: */!d;s///;N;s,\n[[:space:]]\+, ,;s,\n.*,,;q' "$patch") - check "$patch" '' "$subject" + check "$patch" '' done elif [ ! -t 0 ] ; then # stdin - subject=$(while read header value ; do - if [ "$header" = 'Subject:' ] ; then - IFS= read next - continuation=$(echo "$next" | sed -n 's,^[[:space:]]\+, ,p') - echo $value$continuation - break - fi - done) - check '' '' "$subject" + check '' '' else if [ $number -eq 0 ] ; then commits=$(git rev-list --reverse $range) @@ -321,8 +315,7 @@ commits=$(git rev-list --reverse --max-count=$number HEAD) fi for commit in $commits ; do - subject=$(git log --format='%s' -1 $commit) - check '' $commit "$subject" + check '' $commit done fi pass=$(($total - $status)) diff -Nru dpdk-20.11.6/doc/guides/contributing/abi_policy.rst dpdk-20.11.7/doc/guides/contributing/abi_policy.rst --- dpdk-20.11.6/doc/guides/contributing/abi_policy.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/contributing/abi_policy.rst 2022-12-13 10:50:22.000000000 +0000 @@ -165,7 +165,7 @@ API becomes non-experimental, then the old one is marked with ``__rte_deprecated``. - - The depreciated API should follow the notification process to be removed, + - The deprecated API should follow the notification process to be removed, see :ref:`deprecation_notices`. - At the declaration of the next major ABI version, those ABI changes then diff -Nru dpdk-20.11.6/doc/guides/contributing/abi_versioning.rst dpdk-20.11.7/doc/guides/contributing/abi_versioning.rst --- dpdk-20.11.6/doc/guides/contributing/abi_versioning.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/contributing/abi_versioning.rst 2022-12-13 10:50:22.000000000 +0000 @@ -94,7 +94,7 @@ ... However when a new ABI version is declared, for example DPDK ``22``, old -depreciated functions may be safely removed at this point and the entire old +deprecated functions may be safely removed at this point and the entire old major ABI version removed, see the section :ref:`deprecating_entire_abi` on how this may be done. diff -Nru dpdk-20.11.6/doc/guides/cryptodevs/armv8.rst dpdk-20.11.7/doc/guides/cryptodevs/armv8.rst --- dpdk-20.11.6/doc/guides/cryptodevs/armv8.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/cryptodevs/armv8.rst 2022-12-13 10:50:22.000000000 +0000 @@ -47,7 +47,7 @@ .. code-block:: console - meson build + meson setup build ninja -C build The corresponding device can be created only if the following features diff -Nru dpdk-20.11.6/doc/guides/cryptodevs/bcmfs.rst dpdk-20.11.7/doc/guides/cryptodevs/bcmfs.rst --- dpdk-20.11.6/doc/guides/cryptodevs/bcmfs.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/cryptodevs/bcmfs.rst 2022-12-13 10:50:22.000000000 +0000 @@ -70,7 +70,7 @@ .. code-block:: console cd <DPDK-source-directory> - meson <dest-dir> --cross-file config/arm/arm64_stingray_linux_gcc + meson setup <dest-dir> --cross-file config/arm/arm64_stingray_linux_gcc cd <dest-dir> ninja diff -Nru dpdk-20.11.6/doc/guides/freebsd_gsg/build_dpdk.rst dpdk-20.11.7/doc/guides/freebsd_gsg/build_dpdk.rst --- dpdk-20.11.6/doc/guides/freebsd_gsg/build_dpdk.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/freebsd_gsg/build_dpdk.rst 2022-12-13 10:50:22.000000000 +0000 @@ -41,7 +41,7 @@ The following commands can be used to build and install DPDK on a system. The final, install, step generally needs to be run as root:: - meson build + meson setup build cd build ninja ninja install diff -Nru dpdk-20.11.6/doc/guides/howto/openwrt.rst dpdk-20.11.7/doc/guides/howto/openwrt.rst --- dpdk-20.11.6/doc/guides/howto/openwrt.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/howto/openwrt.rst 2022-12-13 10:50:22.000000000 +0000 @@ -100,7 +100,7 @@ ar = 'x86_64-openwrt-linux-ar' strip = 'x86_64-openwrt-linux-strip' - meson builddir --cross-file openwrt-cross + meson setup builddir --cross-file openwrt-cross ninja -C builddir Running DPDK application on OpenWrt diff -Nru dpdk-20.11.6/doc/guides/linux_gsg/build_dpdk.rst dpdk-20.11.7/doc/guides/linux_gsg/build_dpdk.rst --- dpdk-20.11.6/doc/guides/linux_gsg/build_dpdk.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/linux_gsg/build_dpdk.rst 2022-12-13 10:50:22.000000000 +0000 @@ -42,7 +42,7 @@ .. code-block:: console - meson <options> build + meson setup <options> build where "build" is the desired output build directory, and "<options>" can be empty or one of a number of meson or DPDK-specific build options, described @@ -100,7 +100,7 @@ To do so, pass a comma-separated list of the examples to build to the `-Dexamples` meson option as below:: - meson -Dexamples=l2fwd,l3fwd build + meson setup -Dexamples=l2fwd,l3fwd build As with other meson options, this can also be set post-initial-config using `meson configure` in the build directory. There is also a special value "all" to request that all example applications whose @@ -126,12 +126,12 @@ assuming the relevant 32-bit development packages, such as a 32-bit libc, are installed:: PKG_CONFIG_LIBDIR=/usr/lib/pkgconfig \ - meson -Dc_args='-m32' -Dc_link_args='-m32' build + meson setup -Dc_args='-m32' -Dc_link_args='-m32' build For Debian/Ubuntu systems, the equivalent command is:: PKG_CONFIG_LIBDIR=/usr/lib/i386-linux-gnu/pkgconfig \ - meson -Dc_args='-m32' -Dc_link_args='-m32' build + meson setup -Dc_args='-m32' -Dc_link_args='-m32' build Once the build directory has been configured, DPDK can be compiled using ``ninja`` as described above. diff -Nru dpdk-20.11.6/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst dpdk-20.11.7/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst --- dpdk-20.11.6/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst 2022-12-13 10:50:22.000000000 +0000 @@ -91,11 +91,11 @@ To cross-compile DPDK on a desired target machine we can use the following command:: - meson cross-build --cross-file <target_machine_configuration> + meson setup cross-build --cross-file <target_machine_configuration> ninja -C cross-build For example if the target machine is arm64 we can use the following command:: - meson arm64-build --cross-file config/arm/arm64_armv8_linux_gcc + meson setup arm64-build --cross-file config/arm/arm64_armv8_linux_gcc ninja -C arm64-build diff -Nru dpdk-20.11.6/doc/guides/nics/ark.rst dpdk-20.11.7/doc/guides/nics/ark.rst --- dpdk-20.11.6/doc/guides/nics/ark.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/nics/ark.rst 2022-12-13 10:50:22.000000000 +0000 @@ -145,7 +145,7 @@ CFLAGS environment prior to the meson build step. I.e., export CFLAGS="-DRTE_LIBRTE_ARK_MIN_TX_PKTLEN=60" - meson build + meson setup build Supported ARK RTL PCIe Instances diff -Nru dpdk-20.11.6/doc/guides/nics/index.rst dpdk-20.11.7/doc/guides/nics/index.rst --- dpdk-20.11.6/doc/guides/nics/index.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/nics/index.rst 2022-12-13 10:50:22.000000000 +0000 @@ -26,6 +26,7 @@ ena enetc enic + fail_safe fm10k hinic hns3 @@ -33,10 +34,10 @@ ice igb igc + intel_vf ionic ipn3ke ixgbe - intel_vf kni liquidio memif @@ -50,6 +51,7 @@ null octeontx octeontx2 + pcap_ring pfe qede sfc_efx @@ -59,8 +61,6 @@ thunderx txgbe vdev_netvsc - virtio vhost + virtio vmxnet3 - pcap_ring - fail_safe diff -Nru dpdk-20.11.6/doc/guides/nics/mlx5.rst dpdk-20.11.7/doc/guides/nics/mlx5.rst --- dpdk-20.11.6/doc/guides/nics/mlx5.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/nics/mlx5.rst 2022-12-13 10:50:22.000000000 +0000 @@ -306,6 +306,8 @@ TCP header (122B). - Rx queue with LRO offload enabled, receiving a non-LRO packet, can forward it with size limited to max LRO size, not to max RX packet length. + - The driver rounds down the port configuration value ``max_lro_pkt_size`` + (from ``rte_eth_rxmode``) to a multiple of 256 due to hardware limitation. - LRO can be used with outer header of TCP packets of the standard format: eth (with or without vlan) / ipv4 or ipv6 / tcp / payload diff -Nru dpdk-20.11.6/doc/guides/nics/mvneta.rst dpdk-20.11.7/doc/guides/nics/mvneta.rst --- dpdk-20.11.6/doc/guides/nics/mvneta.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/nics/mvneta.rst 2022-12-13 10:50:22.000000000 +0000 @@ -114,7 +114,8 @@ .. code-block:: console - meson -Dlib_musdk_dir=/path/to/musdk build ninja -C build + meson setup build --cross-file config/arm/arm64_armada_linux_gcc + ninja -C build Usage Example diff -Nru dpdk-20.11.6/doc/guides/nics/mvpp2.rst dpdk-20.11.7/doc/guides/nics/mvpp2.rst --- dpdk-20.11.6/doc/guides/nics/mvpp2.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/nics/mvpp2.rst 2022-12-13 10:50:22.000000000 +0000 @@ -130,7 +130,8 @@ .. code-block:: console - meson -Dlib_musdk_dir=/path/to/musdk build ninja -C build + meson setup build --cross-file config/arm/arm64_armada_linux_gcc + ninja -C build Usage Example diff -Nru dpdk-20.11.6/doc/guides/nics/virtio.rst dpdk-20.11.7/doc/guides/nics/virtio.rst --- dpdk-20.11.6/doc/guides/nics/virtio.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/nics/virtio.rst 2022-12-13 10:50:22.000000000 +0000 @@ -43,7 +43,7 @@ In this release, the virtio PMD provides the basic functionality of packet reception and transmission. * It supports merge-able buffers per packet when receiving packets and scattered buffer per packet - when transmitting packets. The packet size supported is from 64 to 1518. + when transmitting packets. The packet size supported is from 64 to 9728. * It supports multicast packets and promiscuous mode. diff -Nru dpdk-20.11.6/doc/guides/platform/bluefield.rst dpdk-20.11.7/doc/guides/platform/bluefield.rst --- dpdk-20.11.6/doc/guides/platform/bluefield.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/platform/bluefield.rst 2022-12-13 10:50:22.000000000 +0000 @@ -62,7 +62,7 @@ .. code-block:: console - meson build + meson setup build ninja -C build Cross Compilation @@ -117,5 +117,5 @@ .. code-block:: console - meson build --cross-file config/arm/arm64_bluefield_linux_gcc + meson setup build --cross-file config/arm/arm64_bluefield_linux_gcc ninja -C build diff -Nru dpdk-20.11.6/doc/guides/platform/octeontx.rst dpdk-20.11.7/doc/guides/platform/octeontx.rst --- dpdk-20.11.6/doc/guides/platform/octeontx.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/platform/octeontx.rst 2022-12-13 10:50:22.000000000 +0000 @@ -94,14 +94,14 @@ .. code-block:: console - meson build -Dexamples=<application> + meson setup build -Dexamples=<application> ninja -C build The example applications can be compiled using the following: .. code-block:: console - meson build -Dexamples=<application> + meson setup build -Dexamples=<application> ninja -C build Cross Compilation @@ -127,7 +127,7 @@ .. code-block:: console cd <dpdk directory> - meson build --cross-file config/arm/arm64_thunderx_linux_gcc + meson setup build --cross-file config/arm/arm64_thunderx_linux_gcc ninja -C build The example applications can be compiled using the following: @@ -135,7 +135,7 @@ .. code-block:: console cd <dpdk directory> - meson build --cross-file config/arm/arm64_thunderx_linux_gcc -Dexamples=<application> + meson setup build --cross-file config/arm/arm64_thunderx_linux_gcc -Dexamples=<application> ninja -C build .. note:: diff -Nru dpdk-20.11.6/doc/guides/platform/octeontx2.rst dpdk-20.11.7/doc/guides/platform/octeontx2.rst --- dpdk-20.11.6/doc/guides/platform/octeontx2.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/platform/octeontx2.rst 2022-12-13 10:50:22.000000000 +0000 @@ -505,7 +505,7 @@ .. code-block:: console - meson build + meson setup build ninja -C build Cross Compilation @@ -515,7 +515,7 @@ .. code-block:: console - meson build --cross-file config/arm/arm64_octeontx2_linux_gcc + meson setup build --cross-file config/arm/arm64_octeontx2_linux_gcc ninja -C build .. note:: diff -Nru dpdk-20.11.6/doc/guides/prog_guide/build-sdk-meson.rst dpdk-20.11.7/doc/guides/prog_guide/build-sdk-meson.rst --- dpdk-20.11.6/doc/guides/prog_guide/build-sdk-meson.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/prog_guide/build-sdk-meson.rst 2022-12-13 10:50:22.000000000 +0000 @@ -9,7 +9,7 @@ For many platforms, compiling and installing DPDK should work using the following set of commands:: - meson build + meson setup build cd build ninja ninja install @@ -51,12 +51,12 @@ ---------------------- To configure a build, run the meson tool, passing the path to the directory -to be used for the build e.g. ``meson build``, as shown above. If calling +to be used for the build e.g. ``meson setup build``, as shown above. If calling meson from somewhere other than the root directory of the DPDK project the path to the root directory should be passed as the first parameter, and the build path as the second. For example, to build DPDK in /tmp/dpdk-build:: - user@host:/tmp$ meson ~user/dpdk dpdk-build + user@host:/tmp$ meson setup ~user/dpdk dpdk-build Meson will then configure the build based on settings in the project's meson.build files, and by checking the build environment for e.g. compiler @@ -74,24 +74,29 @@ Examples of adjusting the defaults when doing initial meson configuration. Project-specific options are passed used -Doption=value:: - meson --werror werrorbuild # build with warnings as errors + # build with warnings as errors + meson setup --werror werrorbuild - meson --buildtype=debug debugbuild # build for debugging + # build for debugging + meson setup --buildtype=debug debugbuild - meson -Dexamples=l3fwd,l2fwd fwdbuild # build some examples as - # part of the normal DPDK build + # build some examples as part of the normal DPDK build + meson setup -Dexamples=l3fwd,l2fwd fwdbuild - meson -Dmax_lcores=8 smallbuild # scale build for smaller systems + # scale build for smaller systems + meson setup -Dmax_lcores=8 smallbuild - meson -Denable_docs=true fullbuild # build and install docs + # build and install docs + meson setup -Denable_docs=true fullbuild - meson -Dmachine=default # use builder-independent baseline -march + # use builder-independent baseline -march + meson setup -Dcpu_instruction_set=generic - meson -Ddisable_drivers=event/*,net/tap # disable tap driver and all - # eventdev PMDs for a smaller build + # disable tap driver and all eventdev PMDs for a smaller build + meson setup -Ddisable_drivers=event/*,net/tap - meson -Denable_trace_fp=true tracebuild # build with fast path traces - # enabled + # build with fast path traces enabled + meson setup -Denable_trace_fp=true tracebuild Examples of setting some of the same options using meson configure:: @@ -121,7 +126,7 @@ such as the compiler to use can be passed via environment variables. For example:: - CC=clang meson clang-build + CC=clang meson setup clang-build .. note:: @@ -174,12 +179,12 @@ To cross-compile DPDK on a desired target machine we can use the following command:: - meson cross-build --cross-file <target_machine_configuration> + meson setup cross-build --cross-file <target_machine_configuration> For example if the target machine is arm64 we can use the following command:: - meson arm-build --cross-file config/arm/arm64_armv8_linux_gcc + meson setup arm-build --cross-file config/arm/arm64_armv8_linux_gcc where config/arm/arm64_armv8_linux_gcc contains settings for the compilers and other build tools to be used, as well as characteristics of the target diff -Nru dpdk-20.11.6/doc/guides/prog_guide/event_timer_adapter.rst dpdk-20.11.7/doc/guides/prog_guide/event_timer_adapter.rst --- dpdk-20.11.6/doc/guides/prog_guide/event_timer_adapter.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/prog_guide/event_timer_adapter.rst 2022-12-13 10:50:22.000000000 +0000 @@ -107,18 +107,19 @@ .. code-block:: c - #define NSECPERSEC 1E9 // No of ns in 1 sec + #define NSECPERSEC 1E9 const struct rte_event_timer_adapter_conf adapter_config = { .event_dev_id = event_dev_id, .timer_adapter_id = 0, + .socket_id = rte_socket_id(), .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK, - .timer_tick_ns = NSECPERSEC / 10, // 100 milliseconds - .max_tmo_nsec = 180 * NSECPERSEC // 2 minutes + .timer_tick_ns = NSECPERSEC / 10, + .max_tmo_ns = 180 * NSECPERSEC, .nb_timers = 40000, - .timer_adapter_flags = 0, + .flags = 0, }; - struct rte_event_timer_adapter *adapter = NULL; + struct rte_event_timer_adapter *adapter; adapter = rte_event_timer_adapter_create(&adapter_config); if (adapter == NULL) { ... }; diff -Nru dpdk-20.11.6/doc/guides/prog_guide/lto.rst dpdk-20.11.7/doc/guides/prog_guide/lto.rst --- dpdk-20.11.6/doc/guides/prog_guide/lto.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/prog_guide/lto.rst 2022-12-13 10:50:22.000000000 +0000 @@ -30,4 +30,4 @@ .. code-block:: console - meson build -Db_lto=true + meson setup build -Db_lto=true diff -Nru dpdk-20.11.6/doc/guides/prog_guide/profile_app.rst dpdk-20.11.7/doc/guides/prog_guide/profile_app.rst --- dpdk-20.11.6/doc/guides/prog_guide/profile_app.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/prog_guide/profile_app.rst 2022-12-13 10:50:22.000000000 +0000 @@ -42,7 +42,7 @@ .. code-block:: console - meson build + meson setup build meson configure build -Dc_args=-DRTE_ETHDEV_PROFILE_WITH_VTUNE ninja -C build diff -Nru dpdk-20.11.6/doc/guides/prog_guide/ring_lib.rst dpdk-20.11.7/doc/guides/prog_guide/ring_lib.rst --- dpdk-20.11.6/doc/guides/prog_guide/ring_lib.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/prog_guide/ring_lib.rst 2022-12-13 10:50:22.000000000 +0000 @@ -172,7 +172,7 @@ .. figure:: img/ring-dequeue1.* - Dequeue last step + Dequeue first step Dequeue Second Step diff -Nru dpdk-20.11.6/doc/guides/prog_guide/trace_lib.rst dpdk-20.11.7/doc/guides/prog_guide/trace_lib.rst --- dpdk-20.11.6/doc/guides/prog_guide/trace_lib.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/prog_guide/trace_lib.rst 2022-12-13 10:50:22.000000000 +0000 @@ -271,10 +271,16 @@ The trace memory will be allocated through an internal function ``__rte_trace_mem_per_thread_alloc()``. The trace memory will be allocated per thread to enable lock less trace-emit function. -The memory for the trace memory for DPDK lcores will be allocated on -``rte_eal_init()`` if the trace is enabled through a EAL option. -For non DPDK threads, on the first trace emission, the memory will be -allocated. + +For non lcore threads, the trace memory is allocated on the first trace +emission. + +For lcore threads, if trace points are enabled through a EAL option, the trace +memory is allocated when the threads are known of DPDK +(``rte_eal_init`` for EAL lcores, ``rte_thread_register`` for non-EAL lcores). +Otherwise, when trace points are enabled later in the life of the application, +the behavior is the same as non lcore threads and the trace memory is allocated +on the first trace emission. Trace memory layout ~~~~~~~~~~~~~~~~~~~ diff -Nru dpdk-20.11.6/doc/guides/prog_guide/vhost_lib.rst dpdk-20.11.7/doc/guides/prog_guide/vhost_lib.rst --- dpdk-20.11.6/doc/guides/prog_guide/vhost_lib.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/prog_guide/vhost_lib.rst 2022-12-13 10:50:22.000000000 +0000 @@ -271,6 +271,12 @@ Poll enqueue completion status from async data path. Completed packets are returned to applications through ``pkts``. +* ``rte_vhost_vring_call_nonblock(int vid, uint16_t vring_idx)`` + + Notify the guest that used descriptors have been added to the vring. This function + will return -EAGAIN when vq's access lock is held by other thread, user should try + again later. + Vhost-user Implementations -------------------------- diff -Nru dpdk-20.11.6/doc/guides/rel_notes/release_20_11.rst dpdk-20.11.7/doc/guides/rel_notes/release_20_11.rst --- dpdk-20.11.6/doc/guides/rel_notes/release_20_11.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/rel_notes/release_20_11.rst 2022-12-13 10:50:22.000000000 +0000 @@ -3550,3 +3550,339 @@ ~~~~~~~~~~~~~~~~~~~~ + +20.11.7 Release Notes +--------------------- + + +20.11.7 Fixes +~~~~~~~~~~~~~ + +* app/testpmd: fix build with clang 15 +* app/testpmd: fix build with clang 15 in flow code +* app/testpmd: fix MAC header in checksum forward engine +* app/testpmd: make quit flag volatile +* app/testpmd: restore ixgbe bypass commands +* baseband/acc100: add LDPC encoder padding function +* baseband/acc100: add null checks +* baseband/acc100: check turbo dec/enc input +* baseband/acc100: fix clearing PF IR outside handler +* baseband/acc100: fix close cleanup +* baseband/acc100: fix device minimum alignment +* baseband/acc100: fix input error related to padding +* baseband/acc100: fix input length for CRC24B +* baseband/acc100: fix memory leak +* baseband/acc100: fix ring/queue allocation +* bus/dpaa: fix build with clang 15 +* ci: bump versions of actions in GHA +* ci: update to new API for step outputs in GHA +* common/iavf: avoid copy in async mode +* common/sfc_efx/base: fix maximum Tx data count +* cryptodev: fix unduly newlines in logs +* crypto/ipsec_mb: fix build with GCC 12 +* crypto/qat: fix build with GCC 12 +* crypto/qat: fix null hash algorithm digest size +* devtools: fix checkpatch header retrieval from stdin +* doc: add LRO size limitation in mlx5 guide +* doc: avoid meson deprecation in setup +* doc: fix application name in procinfo guide +* doc: fix colons in testpmd aged flow rules +* doc: fix event timer adapter guide +* doc: fix maximum packet size of virtio driver +* doc: fix net drivers ordering +* doc: fix typo depreciated instead of deprecated +* doc: fix underlines in testpmd guide +* drivers: fix typos found by Lintian +* eal: fix data race in multi-process support +* eal: fix doxygen comments for UUID +* eal: fix side effect in some pointer arithmetic macros +* eal/x86: add 256 bytes copy for AVX2 +* eventdev/crypto: fix multi-process +* eventdev/eth_tx: add spinlock for adapter start/stop +* eventdev/eth_tx: fix adapter stop +* eventdev/eth_tx: fix queue delete +* eventdev: fix name of Rx conf type in documentation +* event/dlb2: handle enqueuing more than maximum depth +* event/dsw: fix flow migration +* event/sw: fix device name in dump +* event/sw: fix flow ID init in self test +* event/sw: fix log in self test +* examples/fips_validation: fix typo in error log +* examples/ipsec-secgw: fix Tx checksum offload flag +* examples/ipsec-secgw: fix Tx checksum offload flag +* examples/ipsec-secgw: use Tx checksum offload conditionally +* examples/l2fwd-crypto: fix typo in error message +* examples/performance-thread: fix build with GCC 12 +* examples/qos_sched: fix number of subport profiles +* examples/vhost: fix use after free +* examples/vm_power_manager: use safe list iterator +* graph: fix node objects allocation +* gro: check payload length after trim +* gro: fix chain index for more than 2 packets +* gro: trim tail padding bytes +* hash: fix RCU configuration memory leak +* ipsec: fix build with GCC 12 +* license: fix paths +* malloc: fix storage size for some allocations +* mem: fix API doc about allocation on secondary processes +* mempool: fix get objects from mempool with cache +* net: accept unaligned data in checksum routines +* net/atlantic: fix build with clang 15 +* net/axgbe: clear buffer on scattered Rx chaining failure +* net/axgbe: fix checksum and RSS in scattered Rx +* net/axgbe: fix length of each segment in scattered Rx +* net/axgbe: fix mbuf lengths in scattered Rx +* net/axgbe: fix scattered Rx +* net/axgbe: optimise scattered Rx +* net/axgbe: remove freeing buffer in scattered Rx +* net/axgbe: reset end of packet in scattered Rx +* net/axgbe: save segment data in scattered Rx +* net/bnxt: fix build with GCC 13 +* net/bnxt: fix null pointer dereference in LED config +* net/bnxt: fix representor info freeing +* net/bnxt: remove unnecessary check +* net/bonding: fix array overflow in Rx burst +* net/bonding: fix descriptor limit reporting +* net/bonding: fix double slave link status query +* net/bonding: fix dropping valid MAC packets +* net/bonding: fix flow flush order on close +* net/bonding: fix mbuf fast free handling +* net/bonding: fix mode 4 with dedicated queues +* net/bonding: fix slave device Rx/Tx offload configuration +* net/bonding: fix Tx hash for TCP +* net/bonding: set initial value of descriptor count alignment +* net/dpaa: fix buffer freeing in slow path +* net/dpaa: fix jumbo packet Rx in case of VSP +* net/ena: fix build with GCC 12 +* net/ena: remove useless address check +* net/hns3: add L3 and L4 RSS types +* net/hns3: delete unused markup +* net/hns3: extract functions to create RSS and FDIR flow rule +* net/hns3: fix build with gcov +* net/hns3: fix clearing hardware MAC statistics +* net/hns3: fix code check warnings +* net/hns3: fix crash when secondary process access FW +* net/hns3: fix header files includes +* net/hns3: fix IPv4 and IPv6 RSS +* net/hns3: fix IPv4 RSS +* net/hns3: fix lock protection of RSS flow rule +* net/hns3: fix minimum Tx frame length +* net/hns3: fix next-to-use overflow in simple Tx +* net/hns3: fix next-to-use overflow in SVE Tx +* net/hns3: fix packet type for GENEVE +* net/hns3: fix restore filter function input +* net/hns3: fix RSS filter restore +* net/hns3: fix RSS flow rule restore +* net/hns3: fix RSS rule restore +* net/hns3: fix typos in IPv6 SCTP fields +* net/hns3: fix VF mailbox message handling +* net/hns3: move flow direction rule recovery +* net/hns3: optimize SVE Tx performance +* net/hns3: revert fix mailbox communication with HW +* net/i40e: fix jumbo frame Rx with X722 +* net/i40e: fix VF representor release +* net/iavf: add thread for event callbacks +* net/iavf: check illegal packet sizes +* net/iavf: fix outer checksum flags +* net/iavf: fix pattern check for flow director parser +* net/iavf: fix queue stop for large VF +* net/iavf: fix tainted scalar +* net/iavf: fix Tx done descriptors cleanup +* net/ice/base: fix add MAC rule +* net/ice/base: fix array overflow in add switch recipe +* net/ice/base: fix duplicate flow rules +* net/ice/base: fix media type of PHY 10G SFI C2C +* net/ice/base: ignore promiscuous already exist +* net/ice: check illegal packet sizes +* net/ice: fix null function pointer call +* net/ice: fix RSS hash update +* net/ice: fix scalar Rx path segment +* net/ice: fix scalar Tx path segment +* net/igc: remove unnecessary PHY ID checks +* net/ionic: fix adapter name for logging +* net/ionic: fix endianness for RSS +* net/ionic: fix endianness for Rx and Tx +* net/ionic: fix reported error stats +* net/ixgbe: fix broadcast Rx on VF after promisc removal +* net/ixgbe: fix unexpected VLAN Rx in promisc mode on VF +* net/ixgbevf: fix promiscuous and allmulti +* net/memif: fix crash with different number of Rx/Tx queues +* net/mlx4: fix Verbs FD leak in secondary process +* net/mlx5: fix check for orphan wait descriptor +* net/mlx5: fix drop action validation +* net/mlx5: fix first segment inline length +* net/mlx5: fix hairpin split with set VLAN VID action +* net/mlx5: fix inline length exceeding descriptor limit +* net/mlx5: fix maximum LRO message size +* net/mlx5: fix meter profile delete after disable +* net/mlx5: fix modify action with tunnel decapsulation +* net/mlx5: fix port closing +* net/mlx5: fix port event cleaning order +* net/mlx5: fix port initialization with small LRO +* net/mlx5: fix race condition in counter pool resizing +* net/mlx5: fix RSS expansion buffer size +* net/mlx5: fix single not inline packet storing +* net/mlx5: fix thread workspace memory leak +* net/mlx5: fix tunnel header with IPIP offload +* net/mlx5: fix Tx check for hardware descriptor length +* net/mlx5: fix Verbs FD leak in secondary process +* net/mvneta: fix build with GCC 12 +* net/nfp: fix memory leak in Rx +* net/nfp: fix Rx descriptor DMA address +* net/nfp: improve HW info header log readability +* net/qede/base: fix 32-bit build with GCC 12 +* net/qede: fix minsize build +* net/tap: fix overflow of network interface index +* net/txgbe: remove semaphore between SW/FW +* net/txgbe: rename some extended statistics +* net/virtio: fix crash when configured twice +* net/virtio: remove declaration of undefined function +* node: check Rx element allocation +* pdump: do not allow enable/disable in primary process +* power: fix some doxygen comments +* Revert "build: fix warnings when running external commands" +* Revert "mempool: fix get objects from mempool with cache" +* ring: fix description +* ring: remove leftover comment about watermark +* sched: fix subport profile configuration +* service: fix build with clang 15 +* service: fix early move to inactive status +* service: fix stats race condition for MT safe service +* telemetry: fix escaping of invalid json characters +* test/crypto: fix bitwise operator in a SNOW3G case +* test/crypto: fix debug messages +* test/crypto: fix PDCP vectors +* test/crypto: fix wireless auth digest segment +* test/efd: fix build with clang 15 +* test/event: fix build with clang 15 +* test/hash: fix bulk lookup check +* test/hash: remove dead code in extendable bucket test +* test/ipsec: fix build with GCC 12 +* test/ipsec: skip if no compatible device +* test/member: fix build with clang 15 +* test/service: fix spurious failures by extending timeout +* timer: fix stopping all timers +* trace: fix dynamically enabling trace points +* trace: fix leak with regexp +* trace: fix metadata dump +* trace: fix mode change +* trace: fix mode for new trace point +* trace: fix race in debug dump +* vdpa/ifc: handle data path update failure +* version: 20.11.7-rc1 +* vhost: add non-blocking API for posting interrupt +* vhost: fix virtqueue use after free on NUMA reallocation + +20.11.7 Validation +~~~~~~~~~~~~~~~~~~ + +* Intel(R) Testing + + * Basic Intel(R) NIC(ixgbe, i40e and ice) testing + * PF (i40e/ixgbe): test scenarios including RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. + * VF (i40e/ixgbe): test scenarios including VF-RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. + * PF/VF (ice): test scenarios including Switch features/Package Management/Flow Director/Advanced Tx/Advanced RSS/ACL/DCF/Flexible Descriptor, etc. + * Compile Testing + * Intel NIC single core/NIC performance + * Power and IPsec + + * Basic cryptodev and virtio testing + + * vhost/virtio basic loopback, PVP and performance test + * cryptodev Function/Performance: Cryptodev API testing/CompressDev ISA-L/QAT/ZLIB PMD Testing/FIPS, etc. + + +* Red Hat(R) Testing + + * Platform + + * RHEL 8 + * Kernel 4.18 + * Qemu 6.2 + * X540-AT2 NIC(ixgbe, 10G) + + * Functionality + + * Guest with device assignment(PF) throughput testing(1G hugepage size) + * Guest with device assignment(PF) throughput testing(2M hugepage size) + * Guest with device assignment(VF) throughput testing + * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing + * PVP vhost-user 2Q throughput testing + * PVP vhost-user 1Q cross numa node throughput testing + * Guest with vhost-user 2 queues throughput testing + * vhost-user reconnect with dpdk-client, qemu-server: qemu reconnect + * vhost-user reconnect with dpdk-client, qemu-server: ovs reconnect + * PVP 1Q live migration testing + * PVP 1Q cross numa node live migration testing + * Guest with ovs+dpdk+vhost-user 1Q live migration testing + * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M) + * Guest with ovs+dpdk+vhost-user 2Q live migration testing + * Guest with ovs+dpdk+vhost-user 4Q live migration testing + * Host PF + DPDK testing + * Host VF + DPDK testing + + +* Nvidia(R) Testing + + * Basic functionality via testpmd/example applications + + * Tx/Rx + * xstats + * Timestamps + * Link status + * RTE flow and flow_director + * RSS + * VLAN filtering, stripping and insertion + * Checksum/TSO + * ptype + * link_status_interrupt example application + * l3fwd-power example application + * Multi-process example applications + * Hardware LRO tests + + * Build tests + + * Ubuntu 20.04.1 with MLNX_OFED_LINUX-5.8-1.1.2.1. + * Ubuntu 20.04.5 with MLNX_OFED_LINUX-5.8-1.1.2.1. + * Ubuntu 20.04.5 with rdma-core master (76cfaa1). + * Ubuntu 20.04.5 with rdma-core v28.0. + * Ubuntu 18.04.6 with rdma-core v17.1. + * Ubuntu 18.04.6 with rdma-core master (76cfaa1) (i386). + * Ubuntu 16.04.7 with rdma-core v22.7. + * Fedora 37 with rdma-core v41.0. + * Fedora 38 (Rawhide) with rdma-core v41.0. + * CentOS 7 7.9.2009 with rdma-core master (76cfaa1). + * CentOS 7 7.9.2009 with MLNX_OFED_LINUX-5.8-1.0.1.1. + * CentOS 8 8.4.2105 with rdma-core master (76cfaa1). + * OpenSUSE Leap 15.4 with rdma-core v38.1. + * Windows Server 2019 with Clang 11.0.0. + + * BlueField-2 + + * DOCA 1.5.0 + * fw 24.35.1012 + + * ConnectX-6 Dx + + * Ubuntu 20.04 + * Driver MLNX_OFED_LINUX-5.8-1.0.1.1 + * fw 22.35.1012 + + * ConnectX-5 + + * Ubuntu 20.04 + * Driver MLNX_OFED_LINUX-5.8-1.0.1.1 + * fw 16.35.1012 + + * ConnectX-4 Lx + + * Ubuntu 20.04 + * Driver MLNX_OFED_LINUX-5.8-1.0.1.1 + * fw 14.32.1010 + +20.11.7 Known Issues +~~~~~~~~~~~~~~~~~~~~ + +* net/iavf + + * vf_interrupt_pmd/nic_interrupt_VF_vfio_pci: l3fwd-power Wake up failed on X722 37d0 diff -Nru dpdk-20.11.6/doc/guides/sample_app_ug/vm_power_management.rst dpdk-20.11.7/doc/guides/sample_app_ug/vm_power_management.rst --- dpdk-20.11.6/doc/guides/sample_app_ug/vm_power_management.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/sample_app_ug/vm_power_management.rst 2022-12-13 10:50:22.000000000 +0000 @@ -255,7 +255,7 @@ .. code-block:: console cd dpdk - meson build + meson setup build cd build ninja meson configure -Dexamples=vm_power_manager @@ -494,7 +494,7 @@ .. code-block:: console cd dpdk - meson build + meson setup build cd build ninja meson configure -Dexamples=vm_power_manager/guest_cli diff -Nru dpdk-20.11.6/doc/guides/testpmd_app_ug/testpmd_funcs.rst dpdk-20.11.7/doc/guides/testpmd_app_ug/testpmd_funcs.rst --- dpdk-20.11.6/doc/guides/testpmd_app_ug/testpmd_funcs.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/testpmd_app_ug/testpmd_funcs.rst 2022-12-13 10:50:22.000000000 +0000 @@ -4241,7 +4241,7 @@ testpmd> Dumping HW internal information -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``flow dump`` dumps the hardware's internal representation information of all flows. It is bound to ``rte_flow_dev_dump()``:: @@ -4257,10 +4257,10 @@ Caught error type [...] ([...]): [...] Listing and destroying aged flow rules -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``flow aged`` simply lists aged flow rules be get from api ``rte_flow_get_aged_flows``, -and ``destroy`` parameter can be used to destroy those flow rules in PMD. +and ``destroy`` parameter can be used to destroy those flow rules in PMD:: flow aged {port_id} [destroy] @@ -4295,7 +4295,7 @@ 1 0 0 i-- 0 0 0 i-- -If attach ``destroy`` parameter, the command will destroy all the list aged flow rules. +If attach ``destroy`` parameter, the command will destroy all the list aged flow rules:: testpmd> flow aged 0 destroy Port 0 total aged flows: 4 diff -Nru dpdk-20.11.6/doc/guides/tools/proc_info.rst dpdk-20.11.7/doc/guides/tools/proc_info.rst --- dpdk-20.11.6/doc/guides/tools/proc_info.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/tools/proc_info.rst 2022-12-13 10:50:22.000000000 +0000 @@ -1,10 +1,10 @@ .. SPDX-License-Identifier: BSD-3-Clause Copyright(c) 2015 Intel Corporation. -dpdk-procinfo Application -========================= +dpdk-proc-info Application +========================== -The dpdk-procinfo application is a Data Plane Development Kit (DPDK) application +The dpdk-proc-info application is a Data Plane Development Kit (DPDK) application that runs as a DPDK secondary process and is capable of retrieving port statistics, resetting port statistics, printing DPDK memory information and displaying debug information for port. @@ -17,7 +17,7 @@ .. code-block:: console - ./<build_dir>/app/dpdk-procinfo -- -m | [-p PORTMASK] [--stats | --xstats | + ./<build_dir>/app/dpdk-proc-info -- -m | [-p PORTMASK] [--stats | --xstats | --stats-reset | --xstats-reset] [ --show-port | --show-tm | --show-crypto | --show-ring[=name] | --show-mempool[=name] | --iter-mempool=name ] @@ -72,14 +72,14 @@ Limitations ----------- -* dpdk-procinfo should run alongside primary process with same DPDK version. +* dpdk-proc-info should run alongside primary process with same DPDK version. -* When running ``dpdk-procinfo`` with shared library mode, it is required to +* When running ``dpdk-proc-info`` with shared library mode, it is required to pass the same NIC PMD libraries as used for the primary application. Any mismatch in PMD library arguments can lead to undefined behavior and results affecting primary application too. -* Stats retrieval using ``dpdk-procinfo`` is not supported for virtual devices like PCAP and TAP. +* Stats retrieval using ``dpdk-proc-info`` is not supported for virtual devices like PCAP and TAP. -* Since default DPDK EAL arguments for ``dpdk-procinfo`` are ``-c1, -n4 & --proc-type=secondary``, +* Since default DPDK EAL arguments for ``dpdk-proc-info`` are ``-c1, -n4 & --proc-type=secondary``, It is not expected that the user passes any EAL arguments. diff -Nru dpdk-20.11.6/doc/guides/windows_gsg/build_dpdk.rst dpdk-20.11.7/doc/guides/windows_gsg/build_dpdk.rst --- dpdk-20.11.6/doc/guides/windows_gsg/build_dpdk.rst 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/doc/guides/windows_gsg/build_dpdk.rst 2022-12-13 10:50:22.000000000 +0000 @@ -100,7 +100,7 @@ .. code-block:: console cd C:\Users\me\dpdk - meson -Dexamples=helloworld build + meson setup -Dexamples=helloworld build ninja -C build Option 2. Cross-Compile with MinGW-w64 @@ -111,5 +111,5 @@ .. code-block:: console - meson --cross-file config/x86/cross-mingw -Dexamples=helloworld build + meson setup --cross-file config/x86/cross-mingw -Dexamples=helloworld build ninja -C build diff -Nru dpdk-20.11.6/drivers/baseband/acc100/rte_acc100_pmd.c dpdk-20.11.7/drivers/baseband/acc100/rte_acc100_pmd.c --- dpdk-20.11.6/drivers/baseband/acc100/rte_acc100_pmd.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/baseband/acc100/rte_acc100_pmd.c 2022-12-13 10:50:22.000000000 +0000 @@ -423,11 +423,12 @@ while (ring_data->valid) { if ((ring_data->int_nb < ACC100_PF_INT_DMA_DL_DESC_IRQ) || ( ring_data->int_nb > - ACC100_PF_INT_DMA_DL5G_DESC_IRQ)) + ACC100_PF_INT_DMA_DL5G_DESC_IRQ)) { rte_bbdev_log(WARNING, "InfoRing: ITR:%d Info:0x%x", ring_data->int_nb, ring_data->detailed_info); - /* Initialize Info Ring entry and move forward */ - ring_data->val = 0; + /* Initialize Info Ring entry and move forward */ + ring_data->val = 0; + } info_ring_head++; ring_data = acc100_dev->info_ring + (info_ring_head & ACC100_INFO_RING_MASK); @@ -569,9 +570,9 @@ reg_addr = &vf_reg_addr; /* Allocate InfoRing */ d->info_ring = rte_zmalloc_socket("Info Ring", - ACC100_INFO_RING_NUM_ENTRIES * - sizeof(*d->info_ring), RTE_CACHE_LINE_SIZE, - dev->data->socket_id); + ACC100_INFO_RING_NUM_ENTRIES * + sizeof(*d->info_ring), RTE_CACHE_LINE_SIZE, + dev->data->socket_id); if (d->info_ring == NULL) { rte_bbdev_log(ERR, "Failed to allocate Info Ring for %s:%u", @@ -660,7 +661,8 @@ acc100_reg_write(d, reg_addr->ring_size, value); /* Configure tail pointer for use when SDONE enabled */ - d->tail_ptrs = rte_zmalloc_socket( + if (d->tail_ptrs == NULL) + d->tail_ptrs = rte_zmalloc_socket( dev->device->driver->name, ACC100_NUM_QGRPS * ACC100_NUM_AQS * sizeof(uint32_t), RTE_CACHE_LINE_SIZE, socket_id); @@ -668,8 +670,8 @@ rte_bbdev_log(ERR, "Failed to allocate tail ptr for %s:%u", dev->device->driver->name, dev->data->dev_id); - rte_free(d->sw_rings); - return -ENOMEM; + ret = -ENOMEM; + goto free_sw_rings; } d->tail_ptr_iova = rte_malloc_virt2iova(d->tail_ptrs); @@ -692,15 +694,16 @@ /* Continue */ } - d->harq_layout = rte_zmalloc_socket("HARQ Layout", + if (d->harq_layout == NULL) + d->harq_layout = rte_zmalloc_socket("HARQ Layout", ACC100_HARQ_LAYOUT * sizeof(*d->harq_layout), RTE_CACHE_LINE_SIZE, dev->data->socket_id); if (d->harq_layout == NULL) { rte_bbdev_log(ERR, "Failed to allocate harq_layout for %s:%u", dev->device->driver->name, dev->data->dev_id); - rte_free(d->sw_rings); - return -ENOMEM; + ret = -ENOMEM; + goto free_tail_ptrs; } /* Mark as configured properly */ @@ -709,8 +712,16 @@ rte_bbdev_log_debug( "ACC100 (%s) configured sw_rings = %p, sw_rings_iova = %#" PRIx64, dev->data->name, d->sw_rings, d->sw_rings_iova); - return 0; + +free_tail_ptrs: + rte_free(d->tail_ptrs); + d->tail_ptrs = NULL; +free_sw_rings: + rte_free(d->sw_rings_base); + d->sw_rings = NULL; + + return ret; } static int @@ -768,6 +779,9 @@ rte_free(d->info_ring); rte_free(d->sw_rings_base); d->sw_rings_base = NULL; + d->tail_ptrs = NULL; + d->info_ring = NULL; + d->harq_layout = NULL; } /* Ensure all in flight HW transactions are completed */ usleep(ACC100_LONG_WAIT); @@ -824,6 +838,10 @@ struct acc100_queue *q; int16_t q_idx; + if (d == NULL) { + rte_bbdev_log(ERR, "Undefined device"); + return -ENODEV; + } /* Allocate the queue data structure. */ q = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q), RTE_CACHE_LINE_SIZE, conf->socket); @@ -831,10 +849,6 @@ rte_bbdev_log(ERR, "Failed to allocate queue memory"); return -ENOMEM; } - if (d == NULL) { - rte_bbdev_log(ERR, "Undefined device"); - return -ENODEV; - } q->d = d; q->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id)); @@ -1081,7 +1095,7 @@ d->acc100_conf.q_ul_4g.num_qgroups - 1; dev_info->default_queue_conf = default_queue_conf; dev_info->cpu_flag_reqs = NULL; - dev_info->min_alignment = 64; + dev_info->min_alignment = 1; dev_info->capabilities = bbdev_capabilities; #ifdef ACC100_EXT_MEM dev_info->harq_buffer_size = d->ddr_size; @@ -1618,12 +1632,25 @@ return 0; } +/* May need to pad LDPC Encoder input to avoid small beat for ACC100. */ +static inline uint16_t +pad_le_in(uint16_t blen, struct acc100_queue *q __rte_unused) +{ + uint16_t last_beat; + + last_beat = blen % 64; + if ((last_beat > 0) && (last_beat <= 8)) + blen += 8; + + return blen; +} + static inline int acc100_dma_desc_le_fill(struct rte_bbdev_enc_op *op, struct acc100_dma_req_desc *desc, struct rte_mbuf **input, struct rte_mbuf *output, uint32_t *in_offset, uint32_t *out_offset, uint32_t *out_length, - uint32_t *mbuf_total_left, uint32_t *seg_total_left) + uint32_t *mbuf_total_left, uint32_t *seg_total_left, struct acc100_queue *q) { int next_triplet = 1; /* FCW already done */ uint16_t K, in_length_in_bits, in_length_in_bytes; @@ -1633,8 +1660,7 @@ K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c; in_length_in_bits = K - enc->n_filler; - if ((enc->op_flags & RTE_BBDEV_LDPC_CRC_24A_ATTACH) || - (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH)) + if (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH) in_length_in_bits -= 24; in_length_in_bytes = in_length_in_bits >> 3; @@ -1647,8 +1673,7 @@ } next_triplet = acc100_dma_fill_blk_type_in(desc, input, in_offset, - in_length_in_bytes, - seg_total_left, next_triplet); + pad_le_in(in_length_in_bytes, q), seg_total_left, next_triplet); if (unlikely(next_triplet < 0)) { rte_bbdev_log(ERR, "Mismatch between data to process and mbuf data length in bbdev_op: %p", @@ -2080,6 +2105,11 @@ return -1; } + if (unlikely(turbo_enc->input.length == 0)) { + rte_bbdev_log(ERR, "input length null"); + return -1; + } + if (turbo_enc->code_block_mode == 0) { tb = &turbo_enc->tb_params; if ((tb->k_neg < RTE_BBDEV_TURBO_MIN_CB_SIZE @@ -2099,11 +2129,12 @@ RTE_BBDEV_TURBO_MAX_CB_SIZE); return -1; } - if (tb->c_neg > (RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1)) + if (unlikely(tb->c_neg > 0)) { rte_bbdev_log(ERR, - "c_neg (%u) is out of range 0 <= value <= %u", - tb->c_neg, - RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1); + "c_neg (%u) expected to be null", + tb->c_neg); + return -1; + } if (tb->c < 1 || tb->c > RTE_BBDEV_TURBO_MAX_CODE_BLOCKS) { rte_bbdev_log(ERR, "c (%u) is out of range 1 <= value <= %u", @@ -2363,7 +2394,7 @@ acc100_header_init(&desc->req); desc->req.numCBs = num; - in_length_in_bytes = ops[0]->ldpc_enc.input.data->data_len; + in_length_in_bytes = pad_le_in(ops[0]->ldpc_enc.input.data->data_len, q); out_length = (enc->cb_params.e + 7) >> 3; desc->req.m2dlen = 1 + num; desc->req.d2mlen = num; @@ -2432,7 +2463,7 @@ ret = acc100_dma_desc_le_fill(op, &desc->req, &input, output, &in_offset, &out_offset, &out_length, &mbuf_total_left, - &seg_total_left); + &seg_total_left, q); if (unlikely(ret < 0)) return ret; @@ -2490,6 +2521,10 @@ r = op->turbo_enc.tb_params.r; while (mbuf_total_left > 0 && r < c) { + if (unlikely(input == NULL)) { + rte_bbdev_log(ERR, "Not enough input segment"); + return -EINVAL; + } seg_total_left = rte_pktmbuf_data_len(input) - in_offset; /* Set up DMA descriptor */ desc = q->ring_addr + ((q->sw_ring_head + total_enqueued_cbs) @@ -2595,6 +2630,11 @@ return -1; } + if (unlikely(turbo_dec->input.length == 0)) { + rte_bbdev_log(ERR, "input length null"); + return -1; + } + if (turbo_dec->code_block_mode == 0) { tb = &turbo_dec->tb_params; if ((tb->k_neg < RTE_BBDEV_TURBO_MIN_CB_SIZE @@ -2615,11 +2655,13 @@ RTE_BBDEV_TURBO_MAX_CB_SIZE); return -1; } - if (tb->c_neg > (RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1)) + if (unlikely(tb->c_neg > (RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1))) { rte_bbdev_log(ERR, "c_neg (%u) is out of range 0 <= value <= %u", tb->c_neg, RTE_BBDEV_TURBO_MAX_CODE_BLOCKS - 1); + return -1; + } if (tb->c < 1 || tb->c > RTE_BBDEV_TURBO_MAX_CODE_BLOCKS) { rte_bbdev_log(ERR, "c (%u) is out of range 1 <= value <= %u", @@ -3514,6 +3556,8 @@ break; enqueued_cbs += ret; } + if (unlikely(enqueued_cbs == 0)) + return 0; /* Nothing to enqueue */ acc100_dma_enqueue(q, enqueued_cbs, &q_data->queue_stats); @@ -3666,8 +3710,6 @@ /* Clearing status, it will be set based on response */ op->status = 0; - op->status |= ((rsp.input_err) - ? (1 << RTE_BBDEV_DATA_ERROR) : 0); op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); @@ -3738,8 +3780,6 @@ rte_bbdev_log_debug("Resp. desc %p: %x", desc, rsp.val); - op->status |= ((rsp.input_err) - ? (1 << RTE_BBDEV_DATA_ERROR) : 0); op->status |= ((rsp.dma_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); op->status |= ((rsp.fcw_err) ? (1 << RTE_BBDEV_DRV_ERROR) : 0); @@ -4056,6 +4096,8 @@ for (i = 0; i < dequeue_num; ++i) { op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs) & q->sw_ring_wrap_mask))->req.op_addr; + if (unlikely(op == NULL)) + break; if (op->turbo_dec.code_block_mode == 0) ret = dequeue_dec_one_op_tb(q, &ops[i], dequeued_cbs, &aq_dequeued); @@ -4101,6 +4143,8 @@ for (i = 0; i < dequeue_num; ++i) { op = (q->ring_addr + ((q->sw_ring_tail + dequeued_cbs) & q->sw_ring_wrap_mask))->req.op_addr; + if (unlikely(op == NULL)) + break; if (op->ldpc_dec.code_block_mode == 0) ret = dequeue_dec_one_op_tb(q, &ops[i], dequeued_cbs, &aq_dequeued); diff -Nru dpdk-20.11.6/drivers/bus/dpaa/base/qbman/bman.h dpdk-20.11.7/drivers/bus/dpaa/base/qbman/bman.h --- dpdk-20.11.6/drivers/bus/dpaa/base/qbman/bman.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/bus/dpaa/base/qbman/bman.h 2022-12-13 10:50:22.000000000 +0000 @@ -519,7 +519,6 @@ struct bm_mc_command *bm_cmd; struct bm_mc_result *bm_res; - int aq_count = 0; bool stop = false; while (!stop) { @@ -532,8 +531,7 @@ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) { /* Pool is empty */ stop = true; - } else - ++aq_count; + } }; return 0; } diff -Nru dpdk-20.11.6/drivers/common/iavf/iavf_adminq.c dpdk-20.11.7/drivers/common/iavf/iavf_adminq.c --- dpdk-20.11.6/drivers/common/iavf/iavf_adminq.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/common/iavf/iavf_adminq.c 2022-12-13 10:50:22.000000000 +0000 @@ -788,7 +788,8 @@ } /* if ready, copy the desc back to temp */ - if (iavf_asq_done(hw)) { + if (iavf_asq_done(hw) && + !details->async && !details->postpone) { iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA); if (buff != NULL) diff -Nru dpdk-20.11.6/drivers/common/mlx5/linux/meson.build dpdk-20.11.7/drivers/common/mlx5/linux/meson.build --- dpdk-20.11.6/drivers/common/mlx5/linux/meson.build 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/common/mlx5/linux/meson.build 2022-12-13 10:50:22.000000000 +0000 @@ -37,7 +37,7 @@ endforeach if static_ibverbs or dlopen_ibverbs # Build without adding shared libs to Requires.private - ibv_cflags = run_command(pkgconf, '--cflags', 'libibverbs', check: true).stdout() + ibv_cflags = run_command(pkgconf, '--cflags', 'libibverbs').stdout() ext_deps += declare_dependency(compile_args: ibv_cflags.split()) endif if static_ibverbs diff -Nru dpdk-20.11.6/drivers/common/sfc_efx/base/ef10_nic.c dpdk-20.11.7/drivers/common/sfc_efx/base/ef10_nic.c --- dpdk-20.11.6/drivers/common/sfc_efx/base/ef10_nic.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/common/sfc_efx/base/ef10_nic.c 2022-12-13 10:50:22.000000000 +0000 @@ -2117,7 +2117,7 @@ /* Alignment for WPTR updates */ encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN; - encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT); + encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_TX_KER_BYTE_CNT); /* No boundary crossing limits */ encp->enc_tx_dma_desc_boundary = 0; diff -Nru dpdk-20.11.6/drivers/crypto/kasumi/rte_kasumi_pmd.c dpdk-20.11.7/drivers/crypto/kasumi/rte_kasumi_pmd.c --- dpdk-20.11.6/drivers/crypto/kasumi/rte_kasumi_pmd.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/crypto/kasumi/rte_kasumi_pmd.c 2022-12-13 10:50:22.000000000 +0000 @@ -385,12 +385,13 @@ op->sym->session = NULL; } - enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, (void **)&op, - processed_op, NULL); + if (unlikely(processed_op != 1)) + return 0; + enqueued_op = rte_ring_enqueue(qp->processed_ops, op); qp->qp_stats.enqueued_count += enqueued_op; *accumulated_enqueued_ops += enqueued_op; - return enqueued_op; + return 1; } static uint16_t diff -Nru dpdk-20.11.6/drivers/crypto/qat/qat_sym_session.c dpdk-20.11.7/drivers/crypto/qat/qat_sym_session.c --- dpdk-20.11.6/drivers/crypto/qat/qat_sym_session.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/crypto/qat/qat_sym_session.c 2022-12-13 10:50:22.000000000 +0000 @@ -1409,6 +1409,10 @@ QAT_LOG(ERR, "invalid keylen %u", auth_keylen); return -EFAULT; } + + RTE_VERIFY(auth_keylen <= sizeof(ipad)); + RTE_VERIFY(auth_keylen <= sizeof(opad)); + rte_memcpy(ipad, auth_key, auth_keylen); rte_memcpy(opad, auth_key, auth_keylen); @@ -1736,7 +1740,12 @@ hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd); hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr; hash->auth_config.reserved = 0; - hash->auth_config.config = + if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) + hash->auth_config.config = + ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode, + cdesc->qat_hash_alg, 4); + else + hash->auth_config.config = ICP_QAT_HW_AUTH_CONFIG_BUILD(cdesc->auth_mode, cdesc->qat_hash_alg, digestsize); @@ -2000,10 +2009,16 @@ /* Auth CD config setup */ hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3; hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; - hash_cd_ctrl->inner_res_sz = digestsize; - hash_cd_ctrl->final_sz = digestsize; hash_cd_ctrl->inner_state1_sz = state1_size; - auth_param->auth_res_sz = digestsize; + if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) { + hash_cd_ctrl->inner_res_sz = 4; + hash_cd_ctrl->final_sz = 4; + auth_param->auth_res_sz = 4; + } else { + hash_cd_ctrl->inner_res_sz = digestsize; + hash_cd_ctrl->final_sz = digestsize; + auth_param->auth_res_sz = digestsize; + } hash_cd_ctrl->inner_state2_sz = state2_size; hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + diff -Nru dpdk-20.11.6/drivers/crypto/snow3g/rte_snow3g_pmd.c dpdk-20.11.7/drivers/crypto/snow3g/rte_snow3g_pmd.c --- dpdk-20.11.6/drivers/crypto/snow3g/rte_snow3g_pmd.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/crypto/snow3g/rte_snow3g_pmd.c 2022-12-13 10:50:22.000000000 +0000 @@ -410,12 +410,13 @@ op->sym->session = NULL; } - enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, - (void **)&op, processed_op, NULL); + if (unlikely(processed_op != 1)) + return 0; + enqueued_op = rte_ring_enqueue(qp->processed_ops, op); qp->qp_stats.enqueued_count += enqueued_op; *accumulated_enqueued_ops += enqueued_op; - return enqueued_op; + return 1; } static uint16_t diff -Nru dpdk-20.11.6/drivers/event/dlb2/dlb2.c dpdk-20.11.7/drivers/event/dlb2/dlb2.c --- dpdk-20.11.6/drivers/event/dlb2/dlb2.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/event/dlb2/dlb2.c 2022-12-13 10:50:22.000000000 +0000 @@ -460,7 +460,7 @@ cfg->num_ldb_queues; cfg->num_hist_list_entries = resources_asked->num_ldb_ports * - DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT; + evdev_dlb2_default_info.max_event_port_dequeue_depth; DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n", cfg->num_ldb_queues, @@ -1137,7 +1137,7 @@ cfg.cq_depth = rte_align32pow2(dequeue_depth); cfg.cq_depth_threshold = 1; - cfg.cq_history_list_size = DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT; + cfg.cq_history_list_size = cfg.cq_depth; if (handle->cos_id == DLB2_COS_DEFAULT) cfg.cos_id = 0; @@ -2616,6 +2616,7 @@ struct dlb2_eventdev_port *ev_port = event_port; struct dlb2_port *qm_port = &ev_port->qm_port; struct process_local_port_data *port_data; + int num_tx; int i; RTE_ASSERT(ev_port->enq_configured); @@ -2624,8 +2625,8 @@ i = 0; port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)]; - - while (i < num) { + num_tx = RTE_MIN(num, ev_port->conf.enqueue_depth); + while (i < num_tx) { uint8_t sched_types[DLB2_NUM_QES_PER_CACHE_LINE]; uint8_t queue_ids[DLB2_NUM_QES_PER_CACHE_LINE]; int pop_offs = 0; diff -Nru dpdk-20.11.6/drivers/event/dsw/dsw_evdev.h dpdk-20.11.7/drivers/event/dsw/dsw_evdev.h --- dpdk-20.11.6/drivers/event/dsw/dsw_evdev.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/event/dsw/dsw_evdev.h 2022-12-13 10:50:22.000000000 +0000 @@ -126,7 +126,6 @@ enum dsw_migration_state { DSW_MIGRATION_STATE_IDLE, DSW_MIGRATION_STATE_PAUSING, - DSW_MIGRATION_STATE_FORWARDING, DSW_MIGRATION_STATE_UNPAUSING }; @@ -190,6 +189,13 @@ uint16_t paused_events_len; struct rte_event paused_events[DSW_MAX_EVENTS]; + uint16_t emigrating_events_len; + /* Buffer for not-yet-processed events pertaining to a flow + * emigrating from this port. These events will be forwarded + * to the target port. + */ + struct rte_event emigrating_events[DSW_MAX_EVENTS]; + uint16_t seen_events_len; uint16_t seen_events_idx; struct dsw_queue_flow seen_events[DSW_MAX_EVENTS_RECORDED]; diff -Nru dpdk-20.11.6/drivers/event/dsw/dsw_event.c dpdk-20.11.7/drivers/event/dsw/dsw_event.c --- dpdk-20.11.6/drivers/event/dsw/dsw_event.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/event/dsw/dsw_event.c 2022-12-13 10:50:22.000000000 +0000 @@ -234,6 +234,15 @@ queue_id, flow_hash); } +static __rte_always_inline bool +dsw_port_is_flow_migrating(struct dsw_port *port, uint8_t queue_id, + uint16_t flow_hash) +{ + return dsw_is_queue_flow_in_ary(port->emigration_target_qfs, + port->emigration_targets_len, + queue_id, flow_hash); +} + static void dsw_port_add_paused_flows(struct dsw_port *port, struct dsw_queue_flow *qfs, uint8_t qfs_len) @@ -268,9 +277,19 @@ port->paused_flows[i] = port->paused_flows[last_idx]; port->paused_flows_len--; - break; + + DSW_LOG_DP_PORT(DEBUG, port->id, + "Unpausing queue_id %d flow_hash %d.\n", + target_qf->queue_id, + target_qf->flow_hash); + + return; } } + + DSW_LOG_DP_PORT(ERR, port->id, + "Failed to unpause queue_id %d flow_hash %d.\n", + target_qf->queue_id, target_qf->flow_hash); } static void @@ -281,7 +300,6 @@ for (i = 0; i < qfs_len; i++) dsw_port_remove_paused_flow(port, &qfs[i]); - } static void @@ -434,14 +452,15 @@ static bool dsw_select_emigration_target(struct dsw_evdev *dsw, - struct dsw_queue_flow_burst *bursts, - uint16_t num_bursts, uint8_t source_port_id, - int16_t *port_loads, uint16_t num_ports, - uint8_t *target_port_ids, - struct dsw_queue_flow *target_qfs, - uint8_t *targets_len) + struct dsw_port *source_port, + struct dsw_queue_flow_burst *bursts, + uint16_t num_bursts, + int16_t *port_loads, uint16_t num_ports, + uint8_t *target_port_ids, + struct dsw_queue_flow *target_qfs, + uint8_t *targets_len) { - int16_t source_port_load = port_loads[source_port_id]; + int16_t source_port_load = port_loads[source_port->id]; struct dsw_queue_flow *candidate_qf = NULL; uint8_t candidate_port_id = 0; int16_t candidate_weight = -1; @@ -466,7 +485,7 @@ for (port_id = 0; port_id < num_ports; port_id++) { int16_t weight; - if (port_id == source_port_id) + if (port_id == source_port->id) continue; if (!dsw_is_serving_port(dsw, port_id, qf->queue_id)) @@ -488,7 +507,7 @@ if (candidate_weight < 0) return false; - DSW_LOG_DP_PORT(DEBUG, source_port_id, "Selected queue_id %d " + DSW_LOG_DP_PORT(DEBUG, source_port->id, "Selected queue_id %d " "flow_hash %d (with flow load %d) for migration " "to port %d.\n", candidate_qf->queue_id, candidate_qf->flow_hash, @@ -496,7 +515,7 @@ candidate_port_id); port_loads[candidate_port_id] += candidate_flow_load; - port_loads[source_port_id] -= candidate_flow_load; + port_loads[source_port->id] -= candidate_flow_load; target_port_ids[*targets_len] = candidate_port_id; target_qfs[*targets_len] = *candidate_qf; @@ -522,8 +541,8 @@ for (i = 0; i < DSW_MAX_FLOWS_PER_MIGRATION; i++) { bool found; - found = dsw_select_emigration_target(dsw, bursts, num_bursts, - source_port->id, + found = dsw_select_emigration_target(dsw, source_port, + bursts, num_bursts, port_loads, dsw->num_ports, target_port_ids, target_qfs, @@ -603,6 +622,7 @@ port->paused_events_len++; } + static void dsw_port_buffer_non_paused(struct dsw_evdev *dsw, struct dsw_port *source_port, uint8_t dest_port_id, const struct rte_event *event) @@ -674,40 +694,39 @@ } static void -dsw_port_flush_paused_events(struct dsw_evdev *dsw, - struct dsw_port *source_port, - const struct dsw_queue_flow *qf) +dsw_port_flush_no_longer_paused_events(struct dsw_evdev *dsw, + struct dsw_port *source_port) { uint16_t paused_events_len = source_port->paused_events_len; struct rte_event paused_events[paused_events_len]; - uint8_t dest_port_id; uint16_t i; if (paused_events_len == 0) return; - if (dsw_port_is_flow_paused(source_port, qf->queue_id, qf->flow_hash)) - return; - rte_memcpy(paused_events, source_port->paused_events, paused_events_len * sizeof(struct rte_event)); source_port->paused_events_len = 0; - dest_port_id = dsw_schedule(dsw, qf->queue_id, qf->flow_hash); - for (i = 0; i < paused_events_len; i++) { struct rte_event *event = &paused_events[i]; uint16_t flow_hash; flow_hash = dsw_flow_id_hash(event->flow_id); - if (event->queue_id == qf->queue_id && - flow_hash == qf->flow_hash) + if (dsw_port_is_flow_paused(source_port, event->queue_id, + flow_hash)) + dsw_port_buffer_paused(source_port, event); + else { + uint8_t dest_port_id; + + dest_port_id = dsw_schedule(dsw, event->queue_id, + flow_hash); + dsw_port_buffer_non_paused(dsw, source_port, dest_port_id, event); - else - dsw_port_buffer_paused(source_port, event); + } } } @@ -750,11 +769,6 @@ DSW_LOG_DP_PORT(DEBUG, port->id, "Migration completed for " "queue_id %d flow_hash %d.\n", queue_id, flow_hash); - - if (queue_schedule_type == RTE_SCHED_TYPE_ATOMIC) { - dsw_port_remove_paused_flow(port, qf); - dsw_port_flush_paused_events(dsw, port, qf); - } } finished = port->emigration_targets_len - left_qfs_len; @@ -821,10 +835,31 @@ if (dsw->num_ports == 1) return; - if (seen_events_len < DSW_MAX_EVENTS_RECORDED) + DSW_LOG_DP_PORT(DEBUG, source_port->id, "Considering emigration.\n"); + + if (seen_events_len < DSW_MAX_EVENTS_RECORDED) { + DSW_LOG_DP_PORT(DEBUG, source_port->id, "Not enough events " + "are recorded to allow for a migration.\n"); return; + } - DSW_LOG_DP_PORT(DEBUG, source_port->id, "Considering emigration.\n"); + /* A flow migration cannot be initiated if there are paused + * events, since some/all of those events may be have been + * produced as a result of processing the flow(s) selected for + * migration. Moving such a flow would potentially introduced + * reordering, since processing the migrated flow on the + * receiving flow may commence before the to-be-enqueued-to + + * flows are unpaused, leading to paused events on the second + * port as well, destined for the same paused flow(s). When + * those flows are unpaused, the resulting events are + * delivered the owning port in an undefined order. + */ + if (source_port->paused_events_len > 0) { + DSW_LOG_DP_PORT(DEBUG, source_port->id, "There are " + "events in the paus buffer.\n"); + return; + } /* Randomize interval to avoid having all threads considering * emigration at the same in point in time, which might lead @@ -921,9 +956,8 @@ } static void -dsw_port_flush_paused_events(struct dsw_evdev *dsw, - struct dsw_port *source_port, - const struct dsw_queue_flow *qf); +dsw_port_flush_no_longer_paused_events(struct dsw_evdev *dsw, + struct dsw_port *source_port); static void dsw_port_handle_unpause_flows(struct dsw_evdev *dsw, struct dsw_port *port, @@ -948,60 +982,121 @@ if (dsw_schedule(dsw, qf->queue_id, qf->flow_hash) == port->id) port->immigrations++; + } + + dsw_port_flush_no_longer_paused_events(dsw, port); +} + +static void +dsw_port_buffer_in_buffer(struct dsw_port *port, + const struct rte_event *event) + +{ + RTE_ASSERT(port->in_buffer_start == 0); + + port->in_buffer[port->in_buffer_len] = *event; + port->in_buffer_len++; +} + +static void +dsw_port_forward_emigrated_event(struct dsw_evdev *dsw, + struct dsw_port *source_port, + struct rte_event *event) +{ + uint16_t i; + + for (i = 0; i < source_port->emigration_targets_len; i++) { + struct dsw_queue_flow *qf = + &source_port->emigration_target_qfs[i]; + uint8_t dest_port_id = + source_port->emigration_target_port_ids[i]; + struct dsw_port *dest_port = &dsw->ports[dest_port_id]; - dsw_port_flush_paused_events(dsw, port, qf); + if (event->queue_id == qf->queue_id && + dsw_flow_id_hash(event->flow_id) == qf->flow_hash) { + /* No need to care about bursting forwarded + * events (to the destination port's in_ring), + * since migration doesn't happen very often, + * and also the majority of the dequeued + * events will likely *not* be forwarded. + */ + while (rte_event_ring_enqueue_burst(dest_port->in_ring, + event, 1, + NULL) != 1) + rte_pause(); + return; + } } + + /* Event did not belong to the emigrated flows */ + dsw_port_buffer_in_buffer(source_port, event); +} + +static void +dsw_port_stash_migrating_event(struct dsw_port *port, + const struct rte_event *event) +{ + port->emigrating_events[port->emigrating_events_len] = *event; + port->emigrating_events_len++; } -#define FORWARD_BURST_SIZE (32) +#define DRAIN_DEQUEUE_BURST_SIZE (32) static void -dsw_port_forward_emigrated_flow(struct dsw_port *source_port, - struct rte_event_ring *dest_ring, - uint8_t queue_id, - uint16_t flow_hash) +dsw_port_drain_in_ring(struct dsw_port *source_port) { - uint16_t events_left; + uint16_t num_events; + uint16_t dequeued; /* Control ring message should been seen before the ring count * is read on the port's in_ring. */ rte_smp_rmb(); - events_left = rte_event_ring_count(source_port->in_ring); + num_events = rte_event_ring_count(source_port->in_ring); - while (events_left > 0) { - uint16_t in_burst_size = - RTE_MIN(FORWARD_BURST_SIZE, events_left); - struct rte_event in_burst[in_burst_size]; - uint16_t in_len; + for (dequeued = 0; dequeued < num_events; ) { + uint16_t burst_size = RTE_MIN(DRAIN_DEQUEUE_BURST_SIZE, + num_events - dequeued); + struct rte_event events[burst_size]; + uint16_t len; uint16_t i; - in_len = rte_event_ring_dequeue_burst(source_port->in_ring, - in_burst, - in_burst_size, NULL); - /* No need to care about bursting forwarded events (to - * the destination port's in_ring), since migration - * doesn't happen very often, and also the majority of - * the dequeued events will likely *not* be forwarded. - */ - for (i = 0; i < in_len; i++) { - struct rte_event *e = &in_burst[i]; - if (e->queue_id == queue_id && - dsw_flow_id_hash(e->flow_id) == flow_hash) { - while (rte_event_ring_enqueue_burst(dest_ring, - e, 1, - NULL) != 1) - rte_pause(); - } else { - uint16_t last_idx = source_port->in_buffer_len; - source_port->in_buffer[last_idx] = *e; - source_port->in_buffer_len++; - } + len = rte_event_ring_dequeue_burst(source_port->in_ring, + events, burst_size, + NULL); + + for (i = 0; i < len; i++) { + struct rte_event *event = &events[i]; + uint16_t flow_hash; + + flow_hash = dsw_flow_id_hash(event->flow_id); + + if (unlikely(dsw_port_is_flow_migrating(source_port, + event->queue_id, + flow_hash))) + dsw_port_stash_migrating_event(source_port, + event); + else + dsw_port_buffer_in_buffer(source_port, event); } - events_left -= in_len; + dequeued += len; + } +} + +static void +dsw_port_forward_emigrated_flows(struct dsw_evdev *dsw, + struct dsw_port *source_port) +{ + uint16_t i; + + for (i = 0; i < source_port->emigrating_events_len; i++) { + struct rte_event *event = &source_port->emigrating_events[i]; + + dsw_port_forward_emigrated_event(dsw, source_port, event); } + source_port->emigrating_events_len = 0; } static void @@ -1012,22 +1107,27 @@ dsw_port_flush_out_buffers(dsw, source_port); - rte_smp_wmb(); - for (i = 0; i < source_port->emigration_targets_len; i++) { struct dsw_queue_flow *qf = &source_port->emigration_target_qfs[i]; uint8_t dest_port_id = source_port->emigration_target_port_ids[i]; - struct dsw_port *dest_port = &dsw->ports[dest_port_id]; dsw->queues[qf->queue_id].flow_to_port_map[qf->flow_hash] = - dest_port_id; - - dsw_port_forward_emigrated_flow(source_port, dest_port->in_ring, - qf->queue_id, qf->flow_hash); + dest_port_id; } + rte_smp_wmb(); + + dsw_port_drain_in_ring(source_port); + dsw_port_forward_emigrated_flows(dsw, source_port); + + dsw_port_remove_paused_flows(source_port, + source_port->emigration_target_qfs, + source_port->emigration_targets_len); + + dsw_port_flush_no_longer_paused_events(dsw, source_port); + /* Flow table update and migration destination port's enqueues * must be seen before the control message. */ @@ -1048,9 +1148,7 @@ if (port->cfm_cnt == (dsw->num_ports-1)) { switch (port->migration_state) { case DSW_MIGRATION_STATE_PAUSING: - DSW_LOG_DP_PORT(DEBUG, port->id, "Going into forwarding " - "migration state.\n"); - port->migration_state = DSW_MIGRATION_STATE_FORWARDING; + dsw_port_move_emigrating_flows(dsw, port); break; case DSW_MIGRATION_STATE_UNPAUSING: dsw_port_end_emigration(dsw, port, @@ -1090,18 +1188,18 @@ static void dsw_port_note_op(struct dsw_port *port, uint16_t num_events) { - /* To pull the control ring reasonably often on busy ports, - * each dequeued/enqueued event is considered an 'op' too. - */ port->ops_since_bg_task += (num_events+1); } static void dsw_port_bg_process(struct dsw_evdev *dsw, struct dsw_port *port) { - if (unlikely(port->migration_state == DSW_MIGRATION_STATE_FORWARDING && - port->pending_releases == 0)) - dsw_port_move_emigrating_flows(dsw, port); + /* For simplicity (in the migration logic), avoid all + * background processing in case event processing is in + * progress. + */ + if (port->pending_releases > 0) + return; /* Polling the control ring is relatively inexpensive, and * polling it often helps bringing down migration latency, so @@ -1161,7 +1259,7 @@ uint16_t i; DSW_LOG_DP_PORT(DEBUG, source_port->id, "Attempting to enqueue %d " - "events to port %d.\n", events_len, source_port->id); + "events.\n", events_len); dsw_port_bg_process(dsw, source_port); @@ -1344,6 +1442,38 @@ return rte_event_ring_dequeue_burst(port->in_ring, events, num, NULL); } +static void +dsw_port_stash_migrating_events(struct dsw_port *port, + struct rte_event *events, uint16_t *num) +{ + uint16_t i; + + /* The assumption here - performance-wise - is that events + * belonging to migrating flows are relatively rare. + */ + for (i = 0; i < (*num); ) { + struct rte_event *event = &events[i]; + uint16_t flow_hash; + + flow_hash = dsw_flow_id_hash(event->flow_id); + + if (unlikely(dsw_port_is_flow_migrating(port, event->queue_id, + flow_hash))) { + uint16_t left; + + dsw_port_stash_migrating_event(port, event); + + (*num)--; + left = *num - i; + + if (left > 0) + memmove(event, event + 1, + left * sizeof(struct rte_event)); + } else + i++; + } +} + uint16_t dsw_event_dequeue_burst(void *port, struct rte_event *events, uint16_t num, uint64_t wait __rte_unused) @@ -1361,6 +1491,11 @@ dequeued = dsw_port_dequeue_burst(source_port, events, num); + if (unlikely(source_port->migration_state == + DSW_MIGRATION_STATE_PAUSING)) + dsw_port_stash_migrating_events(source_port, events, + &dequeued); + source_port->pending_releases = dequeued; dsw_port_load_record(source_port, dequeued); diff -Nru dpdk-20.11.6/drivers/event/sw/sw_evdev.c dpdk-20.11.7/drivers/event/sw/sw_evdev.c --- dpdk-20.11.6/drivers/event/sw/sw_evdev.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/event/sw/sw_evdev.c 2022-12-13 10:50:22.000000000 +0000 @@ -625,8 +625,8 @@ "Ordered", "Atomic", "Parallel", "Directed" }; uint32_t i; - fprintf(f, "EventDev %s: ports %d, qids %d\n", "todo-fix-name", - sw->port_count, sw->qid_count); + fprintf(f, "EventDev %s: ports %d, qids %d\n", + dev->data->name, sw->port_count, sw->qid_count); fprintf(f, "\trx %"PRIu64"\n\tdrop %"PRIu64"\n\ttx %"PRIu64"\n", sw->stats.rx_pkts, sw->stats.rx_dropped, sw->stats.tx_pkts); diff -Nru dpdk-20.11.6/drivers/event/sw/sw_evdev_selftest.c dpdk-20.11.7/drivers/event/sw/sw_evdev_selftest.c --- dpdk-20.11.6/drivers/event/sw/sw_evdev_selftest.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/event/sw/sw_evdev_selftest.c 2022-12-13 10:50:22.000000000 +0000 @@ -1488,6 +1488,7 @@ goto fail; } ev.queue_id = t->qid[i]; + ev.flow_id = 0; ev.op = RTE_EVENT_OP_NEW; ev.mbuf = arp; *rte_event_pmd_selftest_seqn(arp) = i; @@ -1640,8 +1641,8 @@ } if (val != port_expected[i]) { printf("%d: %s value incorrect, expected %"PRIu64 - " got %d\n", __LINE__, port_names[i], - port_expected[i], id); + " got %" PRIu64 "\n", __LINE__, port_names[i], + port_expected[i], val); failed = 1; } /* reset to zero */ diff -Nru dpdk-20.11.6/drivers/net/atlantic/atl_rxtx.c dpdk-20.11.7/drivers/net/atlantic/atl_rxtx.c --- dpdk-20.11.6/drivers/net/atlantic/atl_rxtx.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/atlantic/atl_rxtx.c 2022-12-13 10:50:22.000000000 +0000 @@ -1132,10 +1132,9 @@ if (txq != NULL) { sw_ring = txq->sw_ring; int head = txq->tx_head; - int cnt; - int i; + int cnt = head; - for (i = 0, cnt = head; ; i++) { + while (true) { txd = &txq->hw_ring[cnt]; if (txd->dd) diff -Nru dpdk-20.11.6/drivers/net/axgbe/axgbe_rxtx.c dpdk-20.11.7/drivers/net/axgbe/axgbe_rxtx.c --- dpdk-20.11.6/drivers/net/axgbe/axgbe_rxtx.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/axgbe/axgbe_rxtx.c 2022-12-13 10:50:22.000000000 +0000 @@ -321,19 +321,18 @@ struct axgbe_rx_queue *rxq = rx_queue; volatile union axgbe_rx_desc *desc; - uint64_t old_dirty = rxq->dirty; struct rte_mbuf *first_seg = NULL; struct rte_mbuf *mbuf, *tmbuf; - unsigned int err; - uint32_t error_status; + unsigned int err = 0; + uint32_t error_status = 0; uint16_t idx, pidx, data_len = 0, pkt_len = 0; + bool eop = 0; idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); + while (nb_rx < nb_pkts) { - bool eop = 0; next_desc: - if (unlikely(idx == rxq->nb_desc)) - idx = 0; + idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); desc = &rxq->desc[idx]; @@ -361,19 +360,6 @@ } mbuf = rxq->sw_ring[idx]; - /* Check for any errors and free mbuf*/ - err = AXGMAC_GET_BITS_LE(desc->write.desc3, - RX_NORMAL_DESC3, ES); - error_status = 0; - if (unlikely(err)) { - error_status = desc->write.desc3 & AXGBE_ERR_STATUS; - if ((error_status != AXGBE_L3_CSUM_ERR) - && (error_status != AXGBE_L4_CSUM_ERR)) { - rxq->errors++; - rte_pktmbuf_free(mbuf); - goto err_set; - } - } rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *)); if (!AXGMAC_GET_BITS_LE(desc->write.desc3, @@ -384,58 +370,90 @@ } else { eop = 1; pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, - RX_NORMAL_DESC3, PL); - data_len = pkt_len - rxq->crc_len; + RX_NORMAL_DESC3, PL) - rxq->crc_len; + data_len = pkt_len % rxq->buf_size; + /* Check for any errors and free mbuf*/ + err = AXGMAC_GET_BITS_LE(desc->write.desc3, + RX_NORMAL_DESC3, ES); + error_status = 0; + if (unlikely(err)) { + error_status = desc->write.desc3 & + AXGBE_ERR_STATUS; + if (error_status != AXGBE_L3_CSUM_ERR && + error_status != AXGBE_L4_CSUM_ERR) { + rxq->errors++; + rte_pktmbuf_free(mbuf); + rte_pktmbuf_free(first_seg); + first_seg = NULL; + eop = 0; + goto err_set; + } + } + + } + /* Mbuf populate */ + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->data_len = data_len; + mbuf->pkt_len = data_len; + + if (rxq->saved_mbuf) { + first_seg = rxq->saved_mbuf; + rxq->saved_mbuf = NULL; } if (first_seg != NULL) { - if (rte_pktmbuf_chain(first_seg, mbuf) != 0) - rte_mempool_put(rxq->mb_pool, - first_seg); + if (rte_pktmbuf_chain(first_seg, mbuf) != 0) { + rte_pktmbuf_free(first_seg); + first_seg = NULL; + rte_pktmbuf_free(mbuf); + rxq->saved_mbuf = NULL; + rxq->errors++; + eop = 0; + break; + } } else { first_seg = mbuf; } /* Get the RSS hash */ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) - mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1); - - /* Mbuf populate */ - mbuf->data_off = RTE_PKTMBUF_HEADROOM; - mbuf->data_len = data_len; + first_seg->hash.rss = + rte_le_to_cpu_32(desc->write.desc1); err_set: rxq->cur++; - rxq->sw_ring[idx++] = tmbuf; + rxq->sw_ring[idx] = tmbuf; desc->read.baddr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf)); memset((void *)(&desc->read.desc2), 0, 8); AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1); - rxq->dirty++; - if (!eop) { - rte_pktmbuf_free(mbuf); + if (!eop) goto next_desc; - } + eop = 0; - first_seg->pkt_len = pkt_len; rxq->bytes += pkt_len; - mbuf->next = NULL; first_seg->port = rxq->port_id; if (rxq->pdata->rx_csum_enable) { - mbuf->ol_flags = 0; - mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; - mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + first_seg->ol_flags = 0; + first_seg->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + first_seg->ol_flags |= PKT_RX_L4_CKSUM_GOOD; if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) { - mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD; - mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; - mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD; - mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; + first_seg->ol_flags &= + ~PKT_RX_IP_CKSUM_GOOD; + first_seg->ol_flags |= + PKT_RX_IP_CKSUM_BAD; + first_seg->ol_flags &= + ~PKT_RX_L4_CKSUM_GOOD; + first_seg->ol_flags |= + PKT_RX_L4_CKSUM_UNKNOWN; } else if (unlikely(error_status == AXGBE_L4_CSUM_ERR)) { - mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD; - mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + first_seg->ol_flags &= + ~PKT_RX_L4_CKSUM_GOOD; + first_seg->ol_flags |= + PKT_RX_L4_CKSUM_BAD; } } @@ -445,15 +463,20 @@ first_seg = NULL; } + /* Check if we need to save state before leaving */ + if (first_seg != NULL && eop == 0) + rxq->saved_mbuf = first_seg; + /* Save receive context.*/ rxq->pkts += nb_rx; - if (rxq->dirty != old_dirty) { + if (rxq->dirty != rxq->cur) { rte_wmb(); - idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1); + idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur - 1); AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, low32_value(rxq->ring_phys_addr + (idx * sizeof(union axgbe_rx_desc)))); + rxq->dirty = rxq->cur; } return nb_rx; } diff -Nru dpdk-20.11.6/drivers/net/axgbe/axgbe_rxtx.h dpdk-20.11.7/drivers/net/axgbe/axgbe_rxtx.h --- dpdk-20.11.6/drivers/net/axgbe/axgbe_rxtx.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/axgbe/axgbe_rxtx.h 2022-12-13 10:50:22.000000000 +0000 @@ -65,6 +65,12 @@ uint16_t crc_len; /* address of s/w rx buffers */ struct rte_mbuf **sw_ring; + + /* For segemented packets - save the current state + * of packet, if next descriptor is not ready yet + */ + struct rte_mbuf *saved_mbuf; + /* Port private data */ struct axgbe_port *pdata; /* Number of Rx descriptors in queue */ diff -Nru dpdk-20.11.6/drivers/net/bnxt/bnxt_ethdev.c dpdk-20.11.7/drivers/net/bnxt/bnxt_ethdev.c --- dpdk-20.11.6/drivers/net/bnxt/bnxt_ethdev.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/bnxt/bnxt_ethdev.c 2022-12-13 10:50:22.000000000 +0000 @@ -1611,6 +1611,7 @@ bnxt_free_link_info(bp); bnxt_free_parent_info(bp); bnxt_uninit_locks(bp); + bnxt_free_rep_info(bp); rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); bp->tx_mem_zone = NULL; @@ -5902,9 +5903,7 @@ bnxt_uninit_ctx_mem(bp); bnxt_free_flow_stats_info(bp); - if (bp->rep_info != NULL) - bnxt_free_switch_domain(bp); - bnxt_free_rep_info(bp); + bnxt_free_switch_domain(bp); rte_free(bp->ptp_cfg); bp->ptp_cfg = NULL; return rc; diff -Nru dpdk-20.11.6/drivers/net/bnxt/bnxt_hwrm.c dpdk-20.11.7/drivers/net/bnxt/bnxt_hwrm.c --- dpdk-20.11.6/drivers/net/bnxt/bnxt_hwrm.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/bnxt/bnxt_hwrm.c 2022-12-13 10:50:22.000000000 +0000 @@ -4409,7 +4409,7 @@ uint16_t duration = 0; int rc, i; - if (!bp->leds->num_leds || BNXT_VF(bp)) + if (BNXT_VF(bp) || !bp->leds || !bp->leds->num_leds) return -EOPNOTSUPP; HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB); diff -Nru dpdk-20.11.6/drivers/net/bnxt/tf_ulp/ulp_flow_db.h dpdk-20.11.7/drivers/net/bnxt/tf_ulp/ulp_flow_db.h --- dpdk-20.11.6/drivers/net/bnxt/tf_ulp/ulp_flow_db.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/bnxt/tf_ulp/ulp_flow_db.h 2022-12-13 10:50:22.000000000 +0000 @@ -199,13 +199,13 @@ * Flush all flows in the flow database. * * ulp_ctxt [in] Ptr to ulp context - * tbl_idx [in] The index to table + * flow_type [in] - specify default or regular * * returns 0 on success or negative number on failure */ int32_t ulp_flow_db_flush_flows(struct bnxt_ulp_context *ulp_ctx, - uint32_t idx); + enum bnxt_ulp_fdb_type flow_type); /* * Flush all flows in the flow database that belong to a device function. diff -Nru dpdk-20.11.6/drivers/net/bonding/rte_eth_bond_api.c dpdk-20.11.7/drivers/net/bonding/rte_eth_bond_api.c --- dpdk-20.11.6/drivers/net/bonding/rte_eth_bond_api.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/bonding/rte_eth_bond_api.c 2022-12-13 10:50:22.000000000 +0000 @@ -541,6 +541,11 @@ return ret; } + /* Bond mode Broadcast & 8023AD don't support MBUF_FAST_FREE offload. */ + if (internals->mode == BONDING_MODE_8023AD || + internals->mode == BONDING_MODE_BROADCAST) + internals->tx_offload_capa &= ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; + bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &= internals->flow_type_rss_offloads; diff -Nru dpdk-20.11.6/drivers/net/bonding/rte_eth_bond_pmd.c dpdk-20.11.7/drivers/net/bonding/rte_eth_bond_pmd.c --- dpdk-20.11.6/drivers/net/bonding/rte_eth_bond_pmd.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/bonding/rte_eth_bond_pmd.c 2022-12-13 10:50:22.000000000 +0000 @@ -82,7 +82,7 @@ bufs + num_rx_total, nb_pkts); num_rx_total += num_rx_slave; nb_pkts -= num_rx_slave; - if (++active_slave == slave_count) + if (++active_slave >= slave_count) active_slave = 0; } @@ -198,7 +198,7 @@ if (slave_info.max_rx_queues < bond_dev->data->nb_rx_queues || slave_info.max_tx_queues < bond_dev->data->nb_tx_queues) { RTE_BOND_LOG(ERR, - "%s: Slave %d capabilities doesn't allow to allocate additional queues", + "%s: Slave %d capabilities doesn't allow allocating additional queues", __func__, slave_port); return -1; } @@ -271,6 +271,24 @@ return 0; } +static bool +is_bond_mac_addr(const struct rte_ether_addr *ea, + const struct rte_ether_addr *mac_addrs, uint32_t max_mac_addrs) +{ + uint32_t i; + + for (i = 0; i < max_mac_addrs; i++) { + /* skip zero address */ + if (rte_is_zero_ether_addr(&mac_addrs[i])) + continue; + + if (rte_is_same_ether_addr(ea, &mac_addrs[i])) + return true; + } + + return false; +} + static inline uint16_t rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts, bool dedicated_rxq) @@ -331,8 +349,9 @@ /* Remove packet from array if: * - it is slow packet but no dedicated rxq is present, * - slave is not in collecting state, - * - bonding interface is not in promiscuous mode: - * - packet is unicast and address does not match, + * - bonding interface is not in promiscuous mode and + * packet address isn't in mac_addrs array: + * - packet is unicast, * - packet is multicast and bonding interface * is not in allmulti, */ @@ -342,12 +361,10 @@ bufs[j])) || !collecting || (!promisc && - ((rte_is_unicast_ether_addr(&hdr->d_addr) && - !rte_is_same_ether_addr(bond_mac, - &hdr->d_addr)) || - (!allmulti && - rte_is_multicast_ether_addr(&hdr->d_addr)))))) { - + !is_bond_mac_addr(&hdr->d_addr, bond_mac, + BOND_MAX_MAC_ADDRS) && + (rte_is_unicast_ether_addr(&hdr->d_addr) || + !allmulti)))) { if (hdr->ether_type == ether_type_slow_be) { bond_mode_8023ad_handle_slow_pkt( internals, slaves[idx], bufs[j]); @@ -772,7 +789,7 @@ ((char *)ipv4_hdr + ip_hdr_offset); if ((size_t)tcp_hdr + sizeof(*tcp_hdr) - < pkt_end) + <= pkt_end) l4hash = HASH_L4_PORTS(tcp_hdr); } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) { @@ -1723,20 +1740,11 @@ bonded_eth_dev->data->dev_conf.rxmode.mq_mode; } - slave_eth_dev->data->dev_conf.txmode.offloads |= - bonded_eth_dev->data->dev_conf.txmode.offloads; - - slave_eth_dev->data->dev_conf.txmode.offloads &= - (bonded_eth_dev->data->dev_conf.txmode.offloads | - ~internals->tx_offload_capa); - - slave_eth_dev->data->dev_conf.rxmode.offloads |= - bonded_eth_dev->data->dev_conf.rxmode.offloads; - - slave_eth_dev->data->dev_conf.rxmode.offloads &= - (bonded_eth_dev->data->dev_conf.rxmode.offloads | - ~internals->rx_offload_capa); + slave_eth_dev->data->dev_conf.txmode.offloads = + bonded_eth_dev->data->dev_conf.txmode.offloads; + slave_eth_dev->data->dev_conf.rxmode.offloads = + bonded_eth_dev->data->dev_conf.rxmode.offloads; nb_rx_queues = bonded_eth_dev->data->nb_rx_queues; nb_tx_queues = bonded_eth_dev->data->nb_tx_queues; @@ -1817,7 +1825,18 @@ rte_flow_destroy(slave_eth_dev->data->port_id, internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id], &flow_error); + } + /* Start device */ + errval = rte_eth_dev_start(slave_eth_dev->data->port_id); + if (errval != 0) { + RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)", + slave_eth_dev->data->port_id, errval); + return -1; + } + + if (internals->mode == BONDING_MODE_8023AD && + internals->mode4.dedicated_queues.enabled == 1) { errval = bond_ethdev_8023ad_flow_set(bonded_eth_dev, slave_eth_dev->data->port_id); if (errval != 0) { @@ -1828,14 +1847,6 @@ } } - /* Start device */ - errval = rte_eth_dev_start(slave_eth_dev->data->port_id); - if (errval != 0) { - RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)", - slave_eth_dev->data->port_id, errval); - return -1; - } - /* If RSS is enabled for bonding, synchronize RETA */ if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) { int i; @@ -2137,6 +2148,10 @@ return 0; RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name); + + /* Flush flows in all back-end devices before removing them */ + bond_flow_ops.flush(dev, &ferror); + while (internals->slave_count != skipped) { uint16_t port_id = internals->slaves[skipped].port_id; @@ -2154,7 +2169,6 @@ skipped++; } } - bond_flow_ops.flush(dev, &ferror); bond_ethdev_free_queues(dev); rte_bitmap_reset(internals->vlan_filter_bmp); rte_bitmap_free(internals->vlan_filter_bmp); @@ -2183,8 +2197,6 @@ uint16_t max_nb_rx_queues = UINT16_MAX; uint16_t max_nb_tx_queues = UINT16_MAX; - uint16_t max_rx_desc_lim = UINT16_MAX; - uint16_t max_tx_desc_lim = UINT16_MAX; dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS; @@ -2218,12 +2230,6 @@ if (slave_info.max_tx_queues < max_nb_tx_queues) max_nb_tx_queues = slave_info.max_tx_queues; - - if (slave_info.rx_desc_lim.nb_max < max_rx_desc_lim) - max_rx_desc_lim = slave_info.rx_desc_lim.nb_max; - - if (slave_info.tx_desc_lim.nb_max < max_tx_desc_lim) - max_tx_desc_lim = slave_info.tx_desc_lim.nb_max; } } @@ -2235,8 +2241,10 @@ memcpy(&dev_info->default_txconf, &internals->default_txconf, sizeof(dev_info->default_txconf)); - dev_info->rx_desc_lim.nb_max = max_rx_desc_lim; - dev_info->tx_desc_lim.nb_max = max_tx_desc_lim; + memcpy(&dev_info->rx_desc_lim, &internals->rx_desc_lim, + sizeof(dev_info->rx_desc_lim)); + memcpy(&dev_info->tx_desc_lim, &internals->tx_desc_lim, + sizeof(dev_info->tx_desc_lim)); /** * If dedicated hw queues enabled for link bonding device in LACP mode @@ -2397,9 +2405,6 @@ * event callback */ if (slave_ethdev->data->dev_link.link_status != internals->slaves[i].last_link_status) { - internals->slaves[i].last_link_status = - slave_ethdev->data->dev_link.link_status; - bond_ethdev_lsc_event_callback(internals->slaves[i].port_id, RTE_ETH_EVENT_INTR_LSC, &bonded_ethdev->data->port_id, @@ -2898,7 +2903,7 @@ uint8_t lsc_flag = 0; int valid_slave = 0; - uint16_t active_pos; + uint16_t active_pos, slave_idx; uint16_t i; if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL) @@ -2919,6 +2924,7 @@ for (i = 0; i < internals->slave_count; i++) { if (internals->slaves[i].port_id == port_id) { valid_slave = 1; + slave_idx = i; break; } } @@ -3007,6 +3013,7 @@ * slaves */ bond_ethdev_link_update(bonded_eth_dev, 0); + internals->slaves[slave_idx].last_link_status = link.link_status; if (lsc_flag) { /* Cancel any possible outstanding interrupts if delays are enabled */ @@ -3370,6 +3377,15 @@ memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim)); memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim)); + /* + * Do not restrict descriptor counts until + * the first back-end device gets attached. + */ + internals->rx_desc_lim.nb_max = UINT16_MAX; + internals->tx_desc_lim.nb_max = UINT16_MAX; + internals->rx_desc_lim.nb_align = 1; + internals->tx_desc_lim.nb_align = 1; + memset(internals->active_slaves, 0, sizeof(internals->active_slaves)); memset(internals->slaves, 0, sizeof(internals->slaves)); @@ -3569,7 +3585,6 @@ const char *name = dev->device->name; struct bond_dev_private *internals = dev->data->dev_private; struct rte_kvargs *kvlist = internals->kvlist; - uint64_t offloads; int arg_count; uint16_t port_id = dev - rte_eth_devices; uint8_t agg_mode; @@ -3630,16 +3645,6 @@ } } - offloads = dev->data->dev_conf.txmode.offloads; - if ((offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) && - (internals->mode == BONDING_MODE_8023AD || - internals->mode == BONDING_MODE_BROADCAST)) { - RTE_BOND_LOG(WARNING, - "bond mode broadcast & 8023AD don't support MBUF_FAST_FREE offload, force disable it."); - offloads &= ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; - dev->data->dev_conf.txmode.offloads = offloads; - } - /* set the max_rx_pktlen */ internals->max_rx_pktlen = internals->candidate_max_rx_pktlen; diff -Nru dpdk-20.11.6/drivers/net/dpaa/dpaa_ethdev.c dpdk-20.11.7/drivers/net/dpaa/dpaa_ethdev.c --- dpdk-20.11.6/drivers/net/dpaa/dpaa_ethdev.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/dpaa/dpaa_ethdev.c 2022-12-13 10:50:22.000000000 +0000 @@ -995,8 +995,7 @@ DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is" " larger than a single mbuf (%u) and scattered" " mode has not been requested", - dev->data->dev_conf.rxmode.max_rx_pkt_len, - buffsz - RTE_PKTMBUF_HEADROOM); + dev->data->dev_conf.rxmode.max_rx_pkt_len, buffsz); } dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); @@ -1011,7 +1010,7 @@ if (vsp_id >= 0) { ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id, DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid, - fif); + fif, buffsz + RTE_PKTMBUF_HEADROOM); if (ret) { DPAA_PMD_ERR("dpaa_port_vsp_update failed"); return ret; diff -Nru dpdk-20.11.6/drivers/net/dpaa/dpaa_flow.c dpdk-20.11.7/drivers/net/dpaa/dpaa_flow.c --- dpdk-20.11.6/drivers/net/dpaa/dpaa_flow.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/dpaa/dpaa_flow.c 2022-12-13 10:50:22.000000000 +0000 @@ -939,7 +939,7 @@ static int dpaa_port_vsp_configure(struct dpaa_if *dpaa_intf, uint8_t vsp_id, t_handle fman_handle, - struct fman_if *fif) + struct fman_if *fif, u32 mbuf_data_room_size) { t_fm_vsp_params vsp_params; t_fm_buffer_prefix_content buf_prefix_cont; @@ -976,10 +976,8 @@ return -1; } vsp_params.ext_buf_pools.num_of_pools_used = 1; - vsp_params.ext_buf_pools.ext_buf_pool[0].id = - dpaa_intf->vsp_bpid[vsp_id]; - vsp_params.ext_buf_pools.ext_buf_pool[0].size = - RTE_MBUF_DEFAULT_BUF_SIZE; + vsp_params.ext_buf_pools.ext_buf_pool[0].id = dpaa_intf->vsp_bpid[vsp_id]; + vsp_params.ext_buf_pools.ext_buf_pool[0].size = mbuf_data_room_size; dpaa_intf->vsp_handle[vsp_id] = fm_vsp_config(&vsp_params); if (!dpaa_intf->vsp_handle[vsp_id]) { @@ -1023,7 +1021,7 @@ int dpaa_port_vsp_update(struct dpaa_if *dpaa_intf, bool fmc_mode, uint8_t vsp_id, uint32_t bpid, - struct fman_if *fif) + struct fman_if *fif, u32 mbuf_data_room_size) { int ret = 0; t_handle fman_handle; @@ -1054,7 +1052,8 @@ dpaa_intf->vsp_bpid[vsp_id] = bpid; - return dpaa_port_vsp_configure(dpaa_intf, vsp_id, fman_handle, fif); + return dpaa_port_vsp_configure(dpaa_intf, vsp_id, fman_handle, fif, + mbuf_data_room_size); } int dpaa_port_vsp_cleanup(struct dpaa_if *dpaa_intf, struct fman_if *fif) diff -Nru dpdk-20.11.6/drivers/net/dpaa/dpaa_flow.h dpdk-20.11.7/drivers/net/dpaa/dpaa_flow.h --- dpdk-20.11.6/drivers/net/dpaa/dpaa_flow.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/dpaa/dpaa_flow.h 2022-12-13 10:50:22.000000000 +0000 @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2017,2019 NXP + * Copyright 2017,2019,2022 NXP */ #ifndef __DPAA_FLOW_H__ @@ -11,7 +11,8 @@ int dpaa_fm_deconfig(struct dpaa_if *dpaa_intf, struct fman_if *fif); void dpaa_write_fm_config_to_file(void); int dpaa_port_vsp_update(struct dpaa_if *dpaa_intf, - bool fmc_mode, uint8_t vsp_id, uint32_t bpid, struct fman_if *fif); + bool fmc_mode, uint8_t vsp_id, uint32_t bpid, struct fman_if *fif, + u32 mbuf_data_room_size); int dpaa_port_vsp_cleanup(struct dpaa_if *dpaa_intf, struct fman_if *fif); int dpaa_port_fmc_init(struct fman_if *fif, uint32_t *fqids, int8_t *vspids, int max_nb_rxq); diff -Nru dpdk-20.11.6/drivers/net/dpaa/dpaa_rxtx.c dpdk-20.11.7/drivers/net/dpaa/dpaa_rxtx.c --- dpdk-20.11.6/drivers/net/dpaa/dpaa_rxtx.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/dpaa/dpaa_rxtx.c 2022-12-13 10:50:22.000000000 +0000 @@ -445,7 +445,7 @@ bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid); format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT; if (unlikely(format == qm_fd_sg)) { - struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp; + struct rte_mbuf *first_seg, *cur_seg; struct qm_sg_entry *sgt, *sg_temp; void *vaddr, *sg_vaddr; int i = 0; @@ -459,32 +459,25 @@ sgt = vaddr + fd_offset; sg_temp = &sgt[i++]; hw_sg_to_cpu(sg_temp); - temp = (struct rte_mbuf *) - ((char *)vaddr - bp_info->meta_data_size); sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_sg_entry_get64(sg_temp)); - first_seg = (struct rte_mbuf *)((char *)sg_vaddr - bp_info->meta_data_size); first_seg->nb_segs = 1; - prev_seg = first_seg; while (i < DPAA_SGT_MAX_ENTRIES) { sg_temp = &sgt[i++]; hw_sg_to_cpu(sg_temp); - sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, + if (sg_temp->bpid != 0xFF) { + bp_info = DPAA_BPID_TO_POOL_INFO(sg_temp->bpid); + sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_sg_entry_get64(sg_temp)); - cur_seg = (struct rte_mbuf *)((char *)sg_vaddr - + cur_seg = (struct rte_mbuf *)((char *)sg_vaddr - bp_info->meta_data_size); - first_seg->nb_segs += 1; - prev_seg->next = cur_seg; - if (sg_temp->final) { - cur_seg->next = NULL; - break; + rte_pktmbuf_free_seg(cur_seg); } - prev_seg = cur_seg; + if (sg_temp->final) + break; } - - rte_pktmbuf_free_seg(temp); rte_pktmbuf_free_seg(first_seg); return 0; } diff -Nru dpdk-20.11.6/drivers/net/ena/ena_ethdev.c dpdk-20.11.7/drivers/net/ena/ena_ethdev.c --- dpdk-20.11.6/drivers/net/ena/ena_ethdev.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/ena/ena_ethdev.c 2022-12-13 10:50:22.000000000 +0000 @@ -292,15 +292,14 @@ static uint8_t default_key[ENA_HASH_KEY_SIZE]; size_t i; - RTE_ASSERT(size <= ENA_HASH_KEY_SIZE); - if (!key_generated) { - for (i = 0; i < ENA_HASH_KEY_SIZE; ++i) + for (i = 0; i < RTE_DIM(default_key); ++i) default_key[i] = rte_rand() & 0xff; key_generated = true; } - rte_memcpy(key, default_key, size); + RTE_ASSERT(size <= sizeof(default_key)); + rte_memcpy(key, default_key, RTE_MIN(size, sizeof(default_key))); } static inline void ena_trigger_reset(struct ena_adapter *adapter, @@ -643,8 +642,7 @@ int reta_conf_idx; int reta_idx; - if (reta_size == 0 || reta_conf == NULL || - (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) + if (reta_size == 0 || reta_conf == NULL) return -EINVAL; rte_spinlock_lock(&adapter->admin_lock); diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_cmd.h dpdk-20.11.7/drivers/net/hns3/hns3_cmd.h --- dpdk-20.11.6/drivers/net/hns3/hns3_cmd.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_cmd.h 2022-12-13 10:50:22.000000000 +0000 @@ -7,6 +7,9 @@ #include <stdint.h> +#include <rte_byteorder.h> +#include <rte_spinlock.h> + #define HNS3_CMDQ_TX_TIMEOUT 30000 #define HNS3_CMDQ_CLEAR_WAIT_TIME 200 #define HNS3_CMDQ_RX_INVLD_B 0 @@ -872,6 +875,12 @@ uint32_t max_tm_rate; }; +struct hns3_dev_specs_1_cmd { + uint8_t rsv0[12]; + uint8_t min_tx_pkt_len; + uint8_t rsv1[11]; +}; + #define HNS3_MAX_TQP_NUM_HIP08_PF 64 #define HNS3_DEFAULT_TX_BUF 0x4000 /* 16k bytes */ #define HNS3_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_dcb.h dpdk-20.11.7/drivers/net/hns3/hns3_dcb.h --- dpdk-20.11.6/drivers/net/hns3/hns3_dcb.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_dcb.h 2022-12-13 10:50:22.000000000 +0000 @@ -7,7 +7,10 @@ #include <stdint.h> +#include <rte_ethdev.h> + #include "hns3_cmd.h" +#include "hns3_ethdev.h" #define HNS3_ETHER_MAX_RATE 100000 diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_ethdev.c dpdk-20.11.7/drivers/net/hns3/hns3_ethdev.c --- dpdk-20.11.6/drivers/net/hns3/hns3_ethdev.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_ethdev.c 2022-12-13 10:50:22.000000000 +0000 @@ -8,13 +8,13 @@ #include <rte_io.h> #include <rte_pci.h> -#include "hns3_ethdev.h" #include "hns3_logs.h" #include "hns3_rxtx.h" #include "hns3_intr.h" #include "hns3_regs.h" #include "hns3_dcb.h" #include "hns3_mp.h" +#include "hns3_ethdev.h" #define HNS3_DEFAULT_PORT_CONF_BURST_SIZE 32 #define HNS3_DEFAULT_PORT_CONF_QUEUES_NUM 1 @@ -1711,7 +1711,7 @@ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); hns3_warn(hw, - "Failed to roll back to del setted mac addr(%s): %d", + "Failed to roll back to del set mac addr(%s): %d", mac_str, ret_val); } @@ -3022,14 +3022,17 @@ hns3_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) { struct hns3_dev_specs_0_cmd *req0; + struct hns3_dev_specs_1_cmd *req1; req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; + req1 = (struct hns3_dev_specs_1_cmd *)desc[1].data; hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); hw->max_tm_rate = rte_le_to_cpu_32(req0->max_tm_rate); hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); + hw->min_tx_pkt_len = req1->min_tx_pkt_len; } static int @@ -3129,7 +3132,6 @@ hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; hw->vlan_mode = HNS3_HW_SHIFT_AND_DISCARD_MODE; - hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; pf->tqp_config_mode = HNS3_FLEX_MAX_TQP_NUM_MODE; hw->rss_info.ipv6_sctp_offload_supported = true; hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE; @@ -3173,6 +3175,7 @@ struct hns3_cfg cfg; int ret; + memset(&cfg, 0, sizeof(cfg)); ret = hns3_get_board_cfg(hw, &cfg); if (ret) { PMD_INIT_LOG(ERR, "get board config failed %d", ret); @@ -4807,7 +4810,7 @@ goto err_get_config; } - ret = hns3_tqp_stats_init(hw); + ret = hns3_stats_init(hw); if (ret) goto err_get_config; @@ -4841,7 +4844,7 @@ (void)hns3_firmware_compat_config(hw, false); hns3_uninit_umv_space(hw); err_init_hw: - hns3_tqp_stats_uninit(hw); + hns3_stats_uninit(hw); err_get_config: hns3_pf_disable_irq0(hw); rte_intr_disable(&pci_dev->intr_handle); @@ -4875,7 +4878,7 @@ hns3_fdir_filter_uninit(hns); (void)hns3_firmware_compat_config(hw, false); hns3_uninit_umv_space(hw); - hns3_tqp_stats_uninit(hw); + hns3_stats_uninit(hw); hns3_pf_disable_irq0(hw); rte_intr_disable(&pci_dev->intr_handle); hns3_intr_unregister(&pci_dev->intr_handle, hns3_interrupt_handler, @@ -4910,7 +4913,7 @@ PMD_INIT_LOG(ERR, "failed to enable MAC, ret = %d", ret); goto err_config_mac_mode; } - return 0; + return hns3_restore_filter(hns); err_config_mac_mode: hns3_dev_release_mbufs(hns); @@ -5018,12 +5021,6 @@ return 0; } -static void -hns3_restore_filter(struct rte_eth_dev *dev) -{ - hns3_restore_rss_filter(dev); -} - static int hns3_dev_start(struct rte_eth_dev *dev) { @@ -5075,8 +5072,6 @@ hns3_mp_req_start_rxtx(dev); rte_eal_alarm_set(HNS3_SERVICE_INTERVAL, hns3_service_handler, dev); - hns3_restore_filter(dev); - /* Enable interrupt of all rx queues before enabling queues */ hns3_dev_all_rx_queue_intr_enable(hw, true); @@ -5458,7 +5453,15 @@ struct hns3_hw *hw = &hns->hw; enum hns3_reset_level reset; - hns3_check_event_cause(hns, NULL); + /* + * Check the registers to confirm whether there is reset pending. + * Note: This check may lead to schedule reset task, but only primary + * process can process the reset event. Therefore, limit the + * checking under only primary process. + */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + hns3_check_event_cause(hns, NULL); + reset = hns3_get_reset_level(hns, &hw->reset.pending); if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) { hns3_warn(hw, "High level reset %d is pending", reset); @@ -5752,10 +5755,6 @@ if (ret) goto err_promisc; - ret = hns3_restore_all_fdir_filter(hns); - if (ret) - goto err_promisc; - ret = hns3_restore_rx_interrupt(hw); if (ret) goto err_promisc; diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_ethdev.h dpdk-20.11.7/drivers/net/hns3/hns3_ethdev.h --- dpdk-20.11.6/drivers/net/hns3/hns3_ethdev.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_ethdev.h 2022-12-13 10:50:22.000000000 +0000 @@ -69,7 +69,6 @@ #define HNS3_DEFAULT_MTU 1500UL #define HNS3_DEFAULT_FRAME_LEN (HNS3_DEFAULT_MTU + HNS3_ETH_OVERHEAD) #define HNS3_HIP08_MIN_TX_PKT_LEN 33 -#define HNS3_HIP09_MIN_TX_PKT_LEN 9 #define HNS3_BITS_PER_BYTE 8 @@ -474,7 +473,7 @@ * The minimum length of the packet supported by hardware in the Tx * direction. */ - uint32_t min_tx_pkt_len; + uint8_t min_tx_pkt_len; struct hns3_queue_intr intr; /* @@ -810,9 +809,9 @@ hns3_get_bit((hw)->capability, HNS3_DEV_SUPPORT_STASH_B) #define HNS3_DEV_PRIVATE_TO_HW(adapter) \ - (&((struct hns3_adapter *)adapter)->hw) + (&((struct hns3_adapter *)(adapter))->hw) #define HNS3_DEV_PRIVATE_TO_PF(adapter) \ - (&((struct hns3_adapter *)adapter)->pf) + (&((struct hns3_adapter *)(adapter))->pf) #define HNS3_DEV_HW_TO_ADAPTER(hw) \ container_of(hw, struct hns3_adapter, hw) @@ -899,10 +898,10 @@ #define NEXT_ITEM_OF_ACTION(act, actions, index) \ do { \ - act = (actions) + (index); \ - while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \ + (act) = (actions) + (index); \ + while ((act)->type == RTE_FLOW_ACTION_TYPE_VOID) { \ (index)++; \ - act = actions + index; \ + (act) = (actions) + (index); \ } \ } while (0) @@ -934,7 +933,7 @@ __atomic_fetch_and(addr, ~(1UL << nr), __ATOMIC_RELAXED); } -static inline int64_t +static inline uint64_t hns3_test_and_clear_bit(unsigned int nr, volatile uint64_t *addr) { uint64_t mask = (1UL << nr); diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_ethdev_vf.c dpdk-20.11.7/drivers/net/hns3/hns3_ethdev_vf.c --- dpdk-20.11.6/drivers/net/hns3/hns3_ethdev_vf.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_ethdev_vf.c 2022-12-13 10:50:22.000000000 +0000 @@ -1146,13 +1146,16 @@ hns3vf_parse_dev_specifications(struct hns3_hw *hw, struct hns3_cmd_desc *desc) { struct hns3_dev_specs_0_cmd *req0; + struct hns3_dev_specs_1_cmd *req1; req0 = (struct hns3_dev_specs_0_cmd *)desc[0].data; + req1 = (struct hns3_dev_specs_1_cmd *)desc[1].data; hw->max_non_tso_bd_num = req0->max_non_tso_bd_num; hw->rss_ind_tbl_size = rte_le_to_cpu_16(req0->rss_ind_tbl_size); hw->rss_key_size = rte_le_to_cpu_16(req0->rss_key_size); hw->intr.int_ql_max = rte_le_to_cpu_16(req0->intr_ql_max); + hw->min_tx_pkt_len = req1->min_tx_pkt_len; } static int @@ -1235,7 +1238,6 @@ hw->intr.mapping_mode = HNS3_INTR_MAPPING_VEC_ALL; hw->intr.gl_unit = HNS3_INTR_COALESCE_GL_UINT_1US; hw->tso_mode = HNS3_TSO_HW_CAL_PSEUDO_H_CSUM; - hw->min_tx_pkt_len = HNS3_HIP09_MIN_TX_PKT_LEN; hw->rss_info.ipv6_sctp_offload_supported = true; hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE; @@ -1813,7 +1815,7 @@ goto err_get_config; } - ret = hns3_tqp_stats_init(hw); + ret = hns3_stats_init(hw); if (ret) goto err_get_config; @@ -1844,7 +1846,7 @@ return 0; err_set_tc_queue: - hns3_tqp_stats_uninit(hw); + hns3_stats_uninit(hw); err_get_config: hns3vf_disable_irq0(hw); @@ -1875,7 +1877,7 @@ (void)hns3vf_set_alive(hw, false); (void)hns3vf_set_promisc_mode(hw, false, false, false); hns3_flow_uninit(eth_dev); - hns3_tqp_stats_uninit(hw); + hns3_stats_uninit(hw); hns3vf_disable_irq0(hw); rte_intr_disable(&pci_dev->intr_handle); hns3_intr_unregister(&pci_dev->intr_handle, hns3vf_interrupt_handler, @@ -2097,7 +2099,7 @@ if (ret) hns3_err(hw, "failed to init queues, ret = %d.", ret); - return ret; + return hns3_restore_filter(hns); } static int @@ -2194,12 +2196,6 @@ return 0; } -static void -hns3vf_restore_filter(struct rte_eth_dev *dev) -{ - hns3_restore_rss_filter(dev); -} - static int hns3vf_dev_start(struct rte_eth_dev *dev) { @@ -2250,8 +2246,6 @@ hns3_mp_req_start_rxtx(dev); hns3vf_service_handler(dev); - hns3vf_restore_filter(dev); - /* Enable interrupt of all rx queues before enabling queues */ hns3_dev_all_rx_queue_intr_enable(hw, true); @@ -2316,8 +2310,15 @@ if (hw->reset.level == HNS3_VF_FULL_RESET) return false; - /* Check the registers to confirm whether there is reset pending */ - hns3vf_check_event_cause(hns, NULL); + /* + * Check the registers to confirm whether there is reset pending. + * Note: This check may lead to schedule reset task, but only primary + * process can process the reset event. Therefore, limit the + * checking under only primary process. + */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + hns3vf_check_event_cause(hns, NULL); + reset = hns3vf_get_reset_level(hw, &hw->reset.pending); if (hw->reset.level != HNS3_NONE_RESET && hw->reset.level < reset) { hns3_warn(hw, "High level reset %d is pending", reset); diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_fdir.c dpdk-20.11.7/drivers/net/hns3/hns3_fdir.c --- dpdk-20.11.6/drivers/net/hns3/hns3_fdir.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_fdir.c 2022-12-13 10:50:22.000000000 +0000 @@ -1056,6 +1056,9 @@ bool err = false; int ret; + if (hns->is_vf) + return 0; + /* * This API is called in the reset recovery process, the parent function * must hold hw->lock. diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_fdir.h dpdk-20.11.7/drivers/net/hns3/hns3_fdir.h --- dpdk-20.11.6/drivers/net/hns3/hns3_fdir.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_fdir.h 2022-12-13 10:50:22.000000000 +0000 @@ -5,6 +5,8 @@ #ifndef _HNS3_FDIR_H_ #define _HNS3_FDIR_H_ +#include <stdint.h> + #include <rte_flow.h> struct hns3_fd_key_cfg { @@ -205,6 +207,7 @@ uint32_t counter_id; }; struct hns3_adapter; +struct hns3_hw; int hns3_init_fd_config(struct hns3_adapter *hns); int hns3_fdir_filter_init(struct hns3_adapter *hns); diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_flow.c dpdk-20.11.7/drivers/net/hns3/hns3_flow.c --- dpdk-20.11.6/drivers/net/hns3/hns3_flow.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_flow.c 2022-12-13 10:50:22.000000000 +0000 @@ -65,7 +65,7 @@ struct items_step_mngr { enum rte_flow_item_type *items; - int count; + size_t count; }; static inline void @@ -1120,7 +1120,7 @@ struct items_step_mngr step_mngr, struct rte_flow_error *error) { - int i; + uint32_t i; if (item->last) return rte_flow_error_set(error, ENOTSUP, @@ -1476,11 +1476,9 @@ } static int -hns3_update_indir_table(struct rte_eth_dev *dev, +hns3_update_indir_table(struct hns3_hw *hw, const struct rte_flow_action_rss *conf, uint16_t num) { - struct hns3_adapter *hns = dev->data->dev_private; - struct hns3_hw *hw = &hns->hw; uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX]; uint16_t j; uint32_t i; @@ -1503,12 +1501,9 @@ } static int -hns3_config_rss_filter(struct rte_eth_dev *dev, +hns3_config_rss_filter(struct hns3_hw *hw, const struct hns3_rss_conf *conf, bool add) { - struct hns3_adapter *hns = dev->data->dev_private; - struct hns3_rss_conf_ele *rss_filter_ptr; - struct hns3_hw *hw = &hns->hw; struct hns3_rss_conf *rss_info; uint64_t flow_types; uint16_t num; @@ -1556,45 +1551,29 @@ rss_info->conf.queue_num = 0; } - /* set RSS func invalid after flushed */ - rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX; return 0; } /* Set rx queues to use */ - num = RTE_MIN(dev->data->nb_rx_queues, rss_flow_conf.queue_num); + num = RTE_MIN(hw->data->nb_rx_queues, rss_flow_conf.queue_num); if (rss_flow_conf.queue_num > num) hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated", rss_flow_conf.queue_num); hns3_info(hw, "Max of contiguous %u PF queues are configured", num); - - rte_spinlock_lock(&hw->lock); if (num) { - ret = hns3_update_indir_table(dev, &rss_flow_conf, num); + ret = hns3_update_indir_table(hw, &rss_flow_conf, num); if (ret) - goto rss_config_err; + return ret; } /* Set hash algorithm and flow types by the user's config */ ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf); if (ret) - goto rss_config_err; + return ret; ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf); - if (ret) { + if (ret) hns3_err(hw, "RSS config init fail(%d)", ret); - goto rss_config_err; - } - - /* - * When create a new RSS rule, the old rule will be overlaid and set - * invalid. - */ - TAILQ_FOREACH(rss_filter_ptr, &hw->flow_rss_list, entries) - rss_filter_ptr->filter_info.valid = false; - -rss_config_err: - rte_spinlock_unlock(&hw->lock); return ret; } @@ -1612,7 +1591,7 @@ rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); while (rss_filter_ptr) { TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries); - ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info, + ret = hns3_config_rss_filter(hw, &rss_filter_ptr->filter_info, false); if (ret) rss_rule_fail_cnt++; @@ -1632,17 +1611,41 @@ return ret; } +static int +hns3_restore_rss_filter(struct hns3_hw *hw) +{ + struct hns3_rss_conf_ele *filter; + int ret = 0; + + pthread_mutex_lock(&hw->flows_lock); + TAILQ_FOREACH(filter, &hw->flow_rss_list, entries) { + if (!filter->filter_info.valid) + continue; + + ret = hns3_config_rss_filter(hw, &filter->filter_info, true); + if (ret != 0) { + hns3_err(hw, "restore RSS filter failed, ret=%d", ret); + goto out; + } + } + +out: + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +} + int -hns3_restore_rss_filter(struct rte_eth_dev *dev) +hns3_restore_filter(struct hns3_adapter *hns) { - struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; + int ret; - /* When user flush all rules, it doesn't need to restore RSS rule */ - if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX) - return 0; + ret = hns3_restore_all_fdir_filter(hns); + if (ret != 0) + return ret; - return hns3_config_rss_filter(dev, &hw->rss_info, true); + return hns3_restore_rss_filter(hw); } static int @@ -1659,7 +1662,7 @@ return -EINVAL; } - return hns3_config_rss_filter(dev, conf, add); + return hns3_config_rss_filter(hw, conf, add); } static int @@ -1711,6 +1714,114 @@ return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error); } +static int +hns3_flow_create_rss_rule(struct rte_eth_dev *dev, + const struct rte_flow_action *act, + struct rte_flow *flow) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_rss_conf_ele *rss_filter_ptr; + struct hns3_rss_conf_ele *filter_ptr; + const struct hns3_rss_conf *rss_conf; + int ret; + + rss_filter_ptr = rte_zmalloc("hns3 rss filter", + sizeof(struct hns3_rss_conf_ele), 0); + if (rss_filter_ptr == NULL) { + hns3_err(hw, "failed to allocate hns3_rss_filter memory"); + return -ENOMEM; + } + + /* + * After all the preceding tasks are successfully configured, configure + * rules to the hardware to simplify the rollback of rules in the + * hardware. + */ + rss_conf = (const struct hns3_rss_conf *)act->conf; + ret = hns3_flow_parse_rss(dev, rss_conf, true); + if (ret != 0) { + rte_free(rss_filter_ptr); + return ret; + } + + hns3_rss_conf_copy(&rss_filter_ptr->filter_info, &rss_conf->conf); + rss_filter_ptr->filter_info.valid = true; + + /* + * When create a new RSS rule, the old rule will be overlaid and set + * invalid. + */ + TAILQ_FOREACH(filter_ptr, &hw->flow_rss_list, entries) + filter_ptr->filter_info.valid = false; + + TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries); + flow->rule = rss_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_HASH; + + return 0; +} + +static int +hns3_flow_create_fdir_rule(struct rte_eth_dev *dev, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct rte_flow *flow) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + struct hns3_fdir_rule_ele *fdir_rule_ptr; + struct hns3_fdir_rule fdir_rule; + int ret; + + memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule)); + ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error); + if (ret != 0) + return ret; + + if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) { + ret = hns3_counter_new(dev, fdir_rule.act_cnt.shared, + fdir_rule.act_cnt.id, error); + if (ret != 0) + return ret; + + flow->counter_id = fdir_rule.act_cnt.id; + } + + fdir_rule_ptr = rte_zmalloc("hns3 fdir rule", + sizeof(struct hns3_fdir_rule_ele), 0); + if (fdir_rule_ptr == NULL) { + hns3_err(hw, "failed to allocate fdir_rule memory."); + ret = -ENOMEM; + goto err_malloc; + } + + /* + * After all the preceding tasks are successfully configured, configure + * rules to the hardware to simplify the rollback of rules in the + * hardware. + */ + ret = hns3_fdir_filter_program(hns, &fdir_rule, false); + if (ret != 0) + goto err_fdir_filter; + + memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule, + sizeof(struct hns3_fdir_rule)); + TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries); + flow->rule = fdir_rule_ptr; + flow->filter_type = RTE_ETH_FILTER_FDIR; + + return 0; + +err_fdir_filter: + rte_free(fdir_rule_ptr); +err_malloc: + if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) + hns3_counter_release(dev, fdir_rule.act_cnt.id); + + return ret; +} + /* * Create or destroy a flow rule. * Theorically one rule can match more than one filters. @@ -1725,13 +1836,9 @@ { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - const struct hns3_rss_conf *rss_conf; - struct hns3_fdir_rule_ele *fdir_rule_ptr; - struct hns3_rss_conf_ele *rss_filter_ptr; struct hns3_flow_mem *flow_node; const struct rte_flow_action *act; struct rte_flow *flow; - struct hns3_fdir_rule fdir_rule; int ret; ret = hns3_flow_validate(dev, attr, pattern, actions, error); @@ -1757,77 +1864,20 @@ TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries); act = hns3_find_rss_general_action(pattern, actions); - if (act) { - rss_conf = act->conf; - - ret = hns3_flow_parse_rss(dev, rss_conf, true); - if (ret) - goto err; - - rss_filter_ptr = rte_zmalloc("hns3 rss filter", - sizeof(struct hns3_rss_conf_ele), - 0); - if (rss_filter_ptr == NULL) { - hns3_err(hw, - "Failed to allocate hns3_rss_filter memory"); - ret = -ENOMEM; - goto err; - } - hns3_rss_conf_copy(&rss_filter_ptr->filter_info, - &rss_conf->conf); - rss_filter_ptr->filter_info.valid = true; - TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries); - - flow->rule = rss_filter_ptr; - flow->filter_type = RTE_ETH_FILTER_HASH; - return flow; - } - - memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule)); - ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error); - if (ret) - goto out; - - if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) { - ret = hns3_counter_new(dev, fdir_rule.act_cnt.shared, - fdir_rule.act_cnt.id, error); - if (ret) - goto out; - - flow->counter_id = fdir_rule.act_cnt.id; - } - - fdir_rule_ptr = rte_zmalloc("hns3 fdir rule", - sizeof(struct hns3_fdir_rule_ele), - 0); - if (fdir_rule_ptr == NULL) { - hns3_err(hw, "failed to allocate fdir_rule memory."); - ret = -ENOMEM; - goto err_fdir; - } - - ret = hns3_fdir_filter_program(hns, &fdir_rule, false); - if (!ret) { - memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule, - sizeof(struct hns3_fdir_rule)); - TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries); - flow->rule = fdir_rule_ptr; - flow->filter_type = RTE_ETH_FILTER_FDIR; - + if (act) + ret = hns3_flow_create_rss_rule(dev, act, flow); + else + ret = hns3_flow_create_fdir_rule(dev, pattern, actions, + error, flow); + if (ret == 0) return flow; - } - rte_free(fdir_rule_ptr); -err_fdir: - if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) - hns3_counter_release(dev, fdir_rule.act_cnt.id); -err: rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to create flow"); -out: TAILQ_REMOVE(&hw->flow_list, flow_node, entries); rte_free(flow_node); rte_free(flow); + return NULL; } @@ -1870,7 +1920,7 @@ break; case RTE_ETH_FILTER_HASH: rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule; - ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info, + ret = hns3_config_rss_filter(hw, &rss_filter_ptr->filter_info, false); if (ret) return rte_flow_error_set(error, EIO, diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_intr.c dpdk-20.11.7/drivers/net/hns3/hns3_intr.c --- dpdk-20.11.6/drivers/net/hns3/hns3_intr.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_intr.c 2022-12-13 10:50:22.000000000 +0000 @@ -10,18 +10,12 @@ #include "hns3_ethdev.h" #include "hns3_logs.h" -#include "hns3_intr.h" #include "hns3_regs.h" #include "hns3_rxtx.h" +#include "hns3_intr.h" #define SWITCH_CONTEXT_US 10 -#define HNS3_CHECK_MERGE_CNT(val) \ - do { \ - if (val) \ - hw->reset.stats.merge_cnt++; \ - } while (0) - static const char *reset_string[HNS3_MAX_RESET] = { "none", "vf_func", "vf_pf_func", "vf_full", "flr", "vf_global", "pf_func", "global", "IMP", @@ -1890,20 +1884,20 @@ hns3_clear_reset_level(struct hns3_hw *hw, uint64_t *levels) { uint64_t merge_cnt = hw->reset.stats.merge_cnt; - int64_t tmp; + uint64_t tmp; switch (hw->reset.level) { case HNS3_IMP_RESET: hns3_atomic_clear_bit(HNS3_IMP_RESET, levels); tmp = hns3_test_and_clear_bit(HNS3_GLOBAL_RESET, levels); - HNS3_CHECK_MERGE_CNT(tmp); + merge_cnt = tmp > 0 ? merge_cnt + 1 : merge_cnt; tmp = hns3_test_and_clear_bit(HNS3_FUNC_RESET, levels); - HNS3_CHECK_MERGE_CNT(tmp); + merge_cnt = tmp > 0 ? merge_cnt + 1 : merge_cnt; break; case HNS3_GLOBAL_RESET: hns3_atomic_clear_bit(HNS3_GLOBAL_RESET, levels); tmp = hns3_test_and_clear_bit(HNS3_FUNC_RESET, levels); - HNS3_CHECK_MERGE_CNT(tmp); + merge_cnt = tmp > 0 ? merge_cnt + 1 : merge_cnt; break; case HNS3_FUNC_RESET: hns3_atomic_clear_bit(HNS3_FUNC_RESET, levels); @@ -1911,19 +1905,19 @@ case HNS3_VF_RESET: hns3_atomic_clear_bit(HNS3_VF_RESET, levels); tmp = hns3_test_and_clear_bit(HNS3_VF_PF_FUNC_RESET, levels); - HNS3_CHECK_MERGE_CNT(tmp); + merge_cnt = tmp > 0 ? merge_cnt + 1 : merge_cnt; tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels); - HNS3_CHECK_MERGE_CNT(tmp); + merge_cnt = tmp > 0 ? merge_cnt + 1 : merge_cnt; break; case HNS3_VF_FULL_RESET: hns3_atomic_clear_bit(HNS3_VF_FULL_RESET, levels); tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels); - HNS3_CHECK_MERGE_CNT(tmp); + merge_cnt = tmp > 0 ? merge_cnt + 1 : merge_cnt; break; case HNS3_VF_PF_FUNC_RESET: hns3_atomic_clear_bit(HNS3_VF_PF_FUNC_RESET, levels); tmp = hns3_test_and_clear_bit(HNS3_VF_FUNC_RESET, levels); - HNS3_CHECK_MERGE_CNT(tmp); + merge_cnt = tmp > 0 ? merge_cnt + 1 : merge_cnt; break; case HNS3_VF_FUNC_RESET: hns3_atomic_clear_bit(HNS3_VF_FUNC_RESET, levels); @@ -1935,13 +1929,16 @@ default: return; }; - if (merge_cnt != hw->reset.stats.merge_cnt) + + if (merge_cnt != hw->reset.stats.merge_cnt) { hns3_warn(hw, "No need to do low-level reset after %s reset. " "merge cnt: %" PRIu64 " total merge cnt: %" PRIu64, reset_string[hw->reset.level], hw->reset.stats.merge_cnt - merge_cnt, hw->reset.stats.merge_cnt); + hw->reset.stats.merge_cnt = merge_cnt; + } } static bool diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_intr.h dpdk-20.11.7/drivers/net/hns3/hns3_intr.h --- dpdk-20.11.6/drivers/net/hns3/hns3_intr.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_intr.h 2022-12-13 10:50:22.000000000 +0000 @@ -98,7 +98,7 @@ const struct hns3_hw_error *hw_err; }; -int hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool state); +int hns3_enable_hw_error_intr(struct hns3_adapter *hns, bool en); void hns3_handle_msix_error(struct hns3_adapter *hns, uint64_t *levels); void hns3_handle_ras_error(struct hns3_adapter *hns, uint64_t *levels); @@ -111,7 +111,7 @@ void hns3_schedule_delayed_reset(struct hns3_adapter *hns); int hns3_reset_req_hw_reset(struct hns3_adapter *hns); int hns3_reset_process(struct hns3_adapter *hns, - enum hns3_reset_level reset_level); + enum hns3_reset_level new_level); void hns3_reset_abort(struct hns3_adapter *hns); #endif /* _HNS3_INTR_H_ */ diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_mbx.c dpdk-20.11.7/drivers/net/hns3/hns3_mbx.c --- dpdk-20.11.6/drivers/net/hns3/hns3_mbx.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_mbx.c 2022-12-13 10:50:22.000000000 +0000 @@ -435,15 +435,17 @@ * Clear opcode to inform intr thread don't process * again. */ - crq->desc[crq->next_to_use].opcode = 0; + crq->desc[next_to_use].opcode = 0; } scan_next: next_to_use = (next_to_use + 1) % hw->cmq.crq.desc_num; } - crq->next_to_use = next_to_use; - hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use); + /* + * Note: the crq->next_to_use field should not updated, otherwise, + * mailbox messages may be discarded. + */ } void diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_mbx.h dpdk-20.11.7/drivers/net/hns3/hns3_mbx.h --- dpdk-20.11.6/drivers/net/hns3/hns3_mbx.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_mbx.h 2022-12-13 10:50:22.000000000 +0000 @@ -5,6 +5,10 @@ #ifndef _HNS3_MBX_H_ #define _HNS3_MBX_H_ +#include <stdint.h> + +#include <rte_spinlock.h> + enum HNS3_MBX_OPCODE { HNS3_MBX_RESET = 0x01, /* (VF -> PF) assert reset */ HNS3_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset */ diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_regs.h dpdk-20.11.7/drivers/net/hns3/hns3_regs.h --- dpdk-20.11.6/drivers/net/hns3/hns3_regs.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_regs.h 2022-12-13 10:50:22.000000000 +0000 @@ -5,6 +5,8 @@ #ifndef _HNS3_REGS_H_ #define _HNS3_REGS_H_ +#include <rte_dev_info.h> + /* bar registers for cmdq */ #define HNS3_CMDQ_TX_ADDR_L_REG 0x27000 #define HNS3_CMDQ_TX_ADDR_H_REG 0x27004 diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_rss.c dpdk-20.11.7/drivers/net/hns3/hns3_rss.c --- dpdk-20.11.6/drivers/net/hns3/hns3_rss.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_rss.c 2022-12-13 10:50:22.000000000 +0000 @@ -10,7 +10,7 @@ #include "hns3_logs.h" /* Default hash keys */ -const uint8_t hns3_hash_key[] = { +const uint8_t hns3_hash_key[HNS3_RSS_KEY_SIZE] = { 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, @@ -57,8 +57,8 @@ HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S, /* IPV6_SCTP ENABLE FIELD */ - HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D = 48, - HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S, + HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D = 48, + HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S, HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D, HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S, HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER, @@ -70,116 +70,209 @@ HNS3_RSS_FIELD_IPV6_FRAG_IP_S }; +enum hns3_rss_tuple_type { + HNS3_RSS_IP_TUPLE, + HNS3_RSS_IP_L4_TUPLE, +}; + static const struct { uint64_t rss_types; + uint16_t tuple_type; uint64_t rss_field; } hns3_set_tuple_table[] = { + /* IPV4-FRAG */ { ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) }, { ETH_RSS_FRAG_IPV4 | ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) }, + { ETH_RSS_FRAG_IPV4, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) }, + + /* IPV4 */ + { ETH_RSS_IPV4 | ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) }, + { ETH_RSS_IPV4 | ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) }, + { ETH_RSS_IPV4, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) }, + + /* IPV4-OTHER */ + { ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) }, + { ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) }, + { ETH_RSS_NONFRAG_IPV4_OTHER, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) }, + + /* IPV4-TCP */ { ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) }, { ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) }, { ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) }, { ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_L4_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) }, + { ETH_RSS_NONFRAG_IPV4_TCP, + HNS3_RSS_IP_L4_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) }, + + /* IPV4-UDP */ { ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) }, { ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) }, { ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) }, { ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_L4_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) }, + { ETH_RSS_NONFRAG_IPV4_UDP, + HNS3_RSS_IP_L4_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) | BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) }, + + /* IPV4-SCTP */ { ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) }, { ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) }, { ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) }, { ETH_RSS_NONFRAG_IPV4_SCTP | ETH_RSS_L4_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) }, - { ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_SRC_ONLY, - BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) }, - { ETH_RSS_NONFRAG_IPV4_OTHER | ETH_RSS_L3_DST_ONLY, - BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) }, + { ETH_RSS_NONFRAG_IPV4_SCTP, + HNS3_RSS_IP_L4_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) | + BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER) }, + + /* IPV6-FRAG */ { ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) }, { ETH_RSS_FRAG_IPV6 | ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) }, + { ETH_RSS_FRAG_IPV6, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) | BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) }, + + /* IPV6 */ + { ETH_RSS_IPV6 | ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) }, + { ETH_RSS_IPV6 | ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }, + { ETH_RSS_IPV6, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }, + + /* IPV6-OTHER */ + { ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) }, + { ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }, + { ETH_RSS_NONFRAG_IPV6_OTHER, + HNS3_RSS_IP_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }, + + /* IPV6-TCP */ { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) }, { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) }, { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) }, { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_L4_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) }, + { ETH_RSS_NONFRAG_IPV6_TCP, + HNS3_RSS_IP_L4_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) | BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) }, + + /* IPV6-UDP */ { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) }, { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) }, { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) }, { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_L4_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) }, + { ETH_RSS_NONFRAG_IPV6_UDP, + HNS3_RSS_IP_L4_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) }, + + /* IPV6-SCTP */ { ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_SRC_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) }, { ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L3_DST_ONLY, + HNS3_RSS_IP_L4_TUPLE, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) }, { ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_SRC_ONLY, - BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) }, + HNS3_RSS_IP_L4_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S) }, { ETH_RSS_NONFRAG_IPV6_SCTP | ETH_RSS_L4_DST_ONLY, - BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) }, - { ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_SRC_ONLY, - BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) }, - { ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_L3_DST_ONLY, - BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) }, -}; - -static const struct { - uint64_t rss_types; - uint64_t rss_field; -} hns3_set_rss_types[] = { - { ETH_RSS_FRAG_IPV4, BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_D) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_FRAG_IP_S) }, - { ETH_RSS_NONFRAG_IPV4_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_IP_D) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_TCP_EN_TCP_D) }, - { ETH_RSS_NONFRAG_IPV4_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_IP_D) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_UDP_EN_UDP_D) }, - { ETH_RSS_NONFRAG_IPV4_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_IP_D) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_D) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_SCTP_EN_SCTP_VER) }, - { ETH_RSS_NONFRAG_IPV4_OTHER, - BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV4_EN_NONFRAG_IP_D) }, - { ETH_RSS_FRAG_IPV6, BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV6_FRAG_IP_D) }, - { ETH_RSS_NONFRAG_IPV6_TCP, BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_IP_D) | - BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV6_TCP_EN_TCP_D) }, - { ETH_RSS_NONFRAG_IPV6_UDP, BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_IP_D) | - BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV6_UDP_EN_UDP_D) }, - { ETH_RSS_NONFRAG_IPV6_SCTP, BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) | + HNS3_RSS_IP_L4_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D) }, + { ETH_RSS_NONFRAG_IPV6_SCTP, + HNS3_RSS_IP_L4_TUPLE, + BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_S) | BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_IP_D) | - BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_D) | - BIT_ULL(HNS3_RSS_FILED_IPV6_SCTP_EN_SCTP_S) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_D) | + BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_S) | BIT_ULL(HNS3_RSS_FIELD_IPV6_SCTP_EN_SCTP_VER) }, - { ETH_RSS_NONFRAG_IPV6_OTHER, - BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_S) | - BIT_ULL(HNS3_RSS_FIELD_IPV6_NONFRAG_IP_D) } }; /* @@ -307,46 +400,80 @@ return ret; } +static void +hns3_rss_check_l3l4_types(struct hns3_hw *hw, uint64_t rss_hf) +{ + uint64_t ip_mask = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | + ETH_RSS_NONFRAG_IPV4_OTHER | + ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | + ETH_RSS_NONFRAG_IPV6_OTHER; + uint64_t l4_mask = ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_NONFRAG_IPV4_SCTP | + ETH_RSS_NONFRAG_IPV6_TCP | + ETH_RSS_NONFRAG_IPV6_UDP | + ETH_RSS_NONFRAG_IPV6_SCTP; + uint64_t l3_src_dst_mask = ETH_RSS_L3_SRC_ONLY | + ETH_RSS_L3_DST_ONLY; + uint64_t l4_src_dst_mask = ETH_RSS_L4_SRC_ONLY | + ETH_RSS_L4_DST_ONLY; + + if (rss_hf & l3_src_dst_mask && + !(rss_hf & ip_mask || rss_hf & l4_mask)) + hns3_warn(hw, "packet type isn't specified, L3_SRC/DST_ONLY is ignored."); + + if (rss_hf & l4_src_dst_mask && !(rss_hf & l4_mask)) + hns3_warn(hw, "packet type isn't specified, L4_SRC/DST_ONLY is ignored."); +} + +static uint64_t +hns3_rss_calc_tuple_filed(struct hns3_hw *hw, uint64_t rss_hf) +{ + uint64_t l3_only_mask = ETH_RSS_L3_SRC_ONLY | + ETH_RSS_L3_DST_ONLY; + uint64_t l4_only_mask = ETH_RSS_L4_SRC_ONLY | + ETH_RSS_L4_DST_ONLY; + uint64_t l3_l4_only_mask = l3_only_mask | l4_only_mask; + bool has_l3_l4_only = !!(rss_hf & l3_l4_only_mask); + bool has_l3_only = !!(rss_hf & l3_only_mask); + uint64_t tuple = 0; + uint32_t i; + + for (i = 0; i < RTE_DIM(hns3_set_tuple_table); i++) { + if ((rss_hf & hns3_set_tuple_table[i].rss_types) != + hns3_set_tuple_table[i].rss_types) + continue; + + if (hns3_set_tuple_table[i].tuple_type == HNS3_RSS_IP_TUPLE) { + if (hns3_set_tuple_table[i].rss_types & l3_only_mask || + !has_l3_only) + tuple |= hns3_set_tuple_table[i].rss_field; + continue; + } + + /* For IP types with L4, we need check both L3 and L4 */ + if (hns3_set_tuple_table[i].rss_types & l3_l4_only_mask || + !has_l3_l4_only) + tuple |= hns3_set_tuple_table[i].rss_field; + } + hns3_rss_check_l3l4_types(hw, rss_hf); + + return tuple; +} + int hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, uint64_t rss_hf) { struct hns3_rss_input_tuple_cmd *req; struct hns3_cmd_desc desc; - uint32_t fields_count = 0; /* count times for setting tuple fields */ - uint32_t i; + uint64_t tuple_field; int ret; hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_RSS_INPUT_TUPLE, false); - req = (struct hns3_rss_input_tuple_cmd *)desc.data; - for (i = 0; i < RTE_DIM(hns3_set_tuple_table); i++) { - if ((rss_hf & hns3_set_tuple_table[i].rss_types) == - hns3_set_tuple_table[i].rss_types) { - req->tuple_field |= - rte_cpu_to_le_64(hns3_set_tuple_table[i].rss_field); - fields_count++; - } - } - - /* - * When user does not specify the following types or a combination of - * the following types, it enables all fields for the supported RSS - * types. the following types as: - * - ETH_RSS_L3_SRC_ONLY - * - ETH_RSS_L3_DST_ONLY - * - ETH_RSS_L4_SRC_ONLY - * - ETH_RSS_L4_DST_ONLY - */ - if (fields_count == 0) { - for (i = 0; i < RTE_DIM(hns3_set_rss_types); i++) { - if ((rss_hf & hns3_set_rss_types[i].rss_types) == - hns3_set_rss_types[i].rss_types) - req->tuple_field |= rte_cpu_to_le_64( - hns3_set_rss_types[i].rss_field); - } - } - + tuple_field = hns3_rss_calc_tuple_filed(hw, rss_hf); + req->tuple_field = rte_cpu_to_le_64(tuple_field); ret = hns3_cmd_send(hw, &desc, 1); if (ret) { hns3_err(hw, "Update RSS flow types tuples failed %d", ret); diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_rss.h dpdk-20.11.7/drivers/net/hns3/hns3_rss.h --- dpdk-20.11.6/drivers/net/hns3/hns3_rss.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_rss.h 2022-12-13 10:50:22.000000000 +0000 @@ -4,15 +4,18 @@ #ifndef _HNS3_RSS_H_ #define _HNS3_RSS_H_ + #include <rte_ethdev.h> #include <rte_flow.h> #define HNS3_ETH_RSS_SUPPORT ( \ + ETH_RSS_IPV4 | \ ETH_RSS_FRAG_IPV4 | \ ETH_RSS_NONFRAG_IPV4_TCP | \ ETH_RSS_NONFRAG_IPV4_UDP | \ ETH_RSS_NONFRAG_IPV4_SCTP | \ ETH_RSS_NONFRAG_IPV4_OTHER | \ + ETH_RSS_IPV6 | \ ETH_RSS_FRAG_IPV6 | \ ETH_RSS_NONFRAG_IPV6_TCP | \ ETH_RSS_NONFRAG_IPV6_UDP | \ @@ -88,9 +91,10 @@ return 1UL << fls(x - 1); } -extern const uint8_t hns3_hash_key[]; +extern const uint8_t hns3_hash_key[HNS3_RSS_KEY_SIZE]; struct hns3_adapter; +struct hns3_hw; int hns3_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); @@ -110,6 +114,6 @@ void hns3_rss_uninit(struct hns3_adapter *hns); int hns3_set_rss_tuple_by_rss_hf(struct hns3_hw *hw, uint64_t rss_hf); int hns3_set_rss_algo_key(struct hns3_hw *hw, const uint8_t *key); -int hns3_restore_rss_filter(struct rte_eth_dev *dev); +int hns3_restore_filter(struct hns3_adapter *hns); #endif /* _HNS3_RSS_H_ */ diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_rxtx.c dpdk-20.11.7/drivers/net/hns3/hns3_rxtx.c --- dpdk-20.11.6/drivers/net/hns3/hns3_rxtx.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_rxtx.c 2022-12-13 10:50:22.000000000 +0000 @@ -16,9 +16,9 @@ #endif #include "hns3_ethdev.h" -#include "hns3_rxtx.h" #include "hns3_regs.h" #include "hns3_logs.h" +#include "hns3_rxtx.h" #define HNS3_CFG_DESC_NUM(num) ((num) / 8 - 1) #define HNS3_RX_RING_PREFETCTH_MASK 3 @@ -1989,7 +1989,7 @@ RTE_PTYPE_INNER_L4_TCP, RTE_PTYPE_INNER_L4_SCTP, RTE_PTYPE_INNER_L4_ICMP, - RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_TUNNEL_GRENAT, RTE_PTYPE_TUNNEL_NVGRE, RTE_PTYPE_UNKNOWN }; @@ -2053,7 +2053,7 @@ tbl->ol3table[5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT; tbl->ol4table[0] = RTE_PTYPE_UNKNOWN; - tbl->ol4table[1] = RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN; + tbl->ol4table[1] = RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_GRENAT; tbl->ol4table[2] = RTE_PTYPE_TUNNEL_NVGRE; } @@ -2504,7 +2504,7 @@ } uint16_t __rte_weak -hns3_recv_pkts_vec(__rte_unused void *tx_queue, +hns3_recv_pkts_vec(__rte_unused void *rx_queue, __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) { @@ -2512,7 +2512,7 @@ } uint16_t __rte_weak -hns3_recv_pkts_vec_sve(__rte_unused void *tx_queue, +hns3_recv_pkts_vec_sve(__rte_unused void *rx_queue, __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts) { @@ -3710,14 +3710,16 @@ } txq->tx_bd_ready -= nb_pkts; - if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) { + if (txq->next_to_use + nb_pkts >= txq->nb_tx_desc) { nb_tx = txq->nb_tx_desc - txq->next_to_use; hns3_tx_fill_hw_ring(txq, tx_pkts, nb_tx); txq->next_to_use = 0; } - hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx); - txq->next_to_use += nb_pkts - nb_tx; + if (nb_pkts > nb_tx) { + hns3_tx_fill_hw_ring(txq, tx_pkts + nb_tx, nb_pkts - nb_tx); + txq->next_to_use += nb_pkts - nb_tx; + } hns3_write_reg_opt(txq->io_tail_reg, nb_pkts); diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_rxtx.h dpdk-20.11.7/drivers/net/hns3/hns3_rxtx.h --- dpdk-20.11.6/drivers/net/hns3/hns3_rxtx.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_rxtx.h 2022-12-13 10:50:22.000000000 +0000 @@ -6,7 +6,15 @@ #define _HNS3_RXTX_H_ #include <stdint.h> + #include <rte_mbuf_core.h> +#include <rte_ethdev.h> +#include <rte_ethdev_core.h> +#include <rte_io.h> +#include <rte_mempool.h> +#include <rte_memzone.h> + +#include "hns3_ethdev.h" #define HNS3_MIN_RING_DESC 64 #define HNS3_MAX_RING_DESC 32768 @@ -627,10 +635,12 @@ int (*callback)(struct hns3_rx_queue *, void *), void *arg); void hns3_dev_release_mbufs(struct hns3_adapter *hns); int hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, - unsigned int socket, const struct rte_eth_rxconf *conf, + unsigned int socket_id, + const struct rte_eth_rxconf *conf, struct rte_mempool *mp); int hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc, - unsigned int socket, const struct rte_eth_txconf *conf); + unsigned int socket_id, + const struct rte_eth_txconf *conf); uint32_t hns3_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id); int hns3_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); int hns3_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); @@ -640,9 +650,11 @@ uint16_t nb_pkts); uint16_t hns3_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); -uint16_t hns3_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, +uint16_t hns3_recv_pkts_vec(void *__restrict rx_queue, + struct rte_mbuf **__restrict rx_pkts, uint16_t nb_pkts); -uint16_t hns3_recv_pkts_vec_sve(void *rx_queue, struct rte_mbuf **rx_pkts, +uint16_t hns3_recv_pkts_vec_sve(void *__restrict rx_queue, + struct rte_mbuf **__restrict rx_pkts, uint16_t nb_pkts); int hns3_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id, @@ -685,7 +697,7 @@ struct rte_eth_rxq_info *qinfo); void hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_txq_info *qinfo); -uint32_t hns3_get_tqp_reg_offset(uint16_t idx); +uint32_t hns3_get_tqp_reg_offset(uint16_t queue_id); int hns3_start_all_txqs(struct rte_eth_dev *dev); int hns3_start_all_rxqs(struct rte_eth_dev *dev); void hns3_stop_all_txqs(struct rte_eth_dev *dev); diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_rxtx_vec_sve.c dpdk-20.11.7/drivers/net/hns3/hns3_rxtx_vec_sve.c --- dpdk-20.11.6/drivers/net/hns3/hns3_rxtx_vec_sve.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_rxtx_vec_sve.c 2022-12-13 10:50:22.000000000 +0000 @@ -384,10 +384,12 @@ HNS3_UINT32_BIT; svuint64_t base_addr, buf_iova, data_off, data_len, addr; svuint64_t offsets = svindex_u64(0, BD_SIZE); - uint32_t i = 0; - svbool_t pg = svwhilelt_b64_u32(i, nb_pkts); + uint32_t cnt = svcntd(); + svbool_t pg; + uint32_t i; - do { + for (i = 0; i < nb_pkts; /* i is updated in the inner loop */) { + pg = svwhilelt_b64_u32(i, nb_pkts); base_addr = svld1_u64(pg, (uint64_t *)pkts); /* calc mbuf's field buf_iova address */ buf_iova = svadd_n_u64_z(pg, base_addr, @@ -429,12 +431,11 @@ offsets, svdup_n_u64(valid_bit)); /* update index for next loop */ - i += svcntd(); - pkts += svcntd(); - txdp += svcntd(); - tx_entry += svcntd(); - pg = svwhilelt_b64_u32(i, nb_pkts); - } while (svptest_any(svptrue_b64(), pg)); + i += cnt; + pkts += cnt; + txdp += cnt; + tx_entry += cnt; + } } static uint16_t @@ -454,14 +455,16 @@ return 0; } - if (txq->next_to_use + nb_pkts > txq->nb_tx_desc) { + if (txq->next_to_use + nb_pkts >= txq->nb_tx_desc) { nb_tx = txq->nb_tx_desc - txq->next_to_use; hns3_tx_fill_hw_ring_sve(txq, tx_pkts, nb_tx); txq->next_to_use = 0; } - hns3_tx_fill_hw_ring_sve(txq, tx_pkts + nb_tx, nb_pkts - nb_tx); - txq->next_to_use += nb_pkts - nb_tx; + if (nb_pkts > nb_tx) { + hns3_tx_fill_hw_ring_sve(txq, tx_pkts + nb_tx, nb_pkts - nb_tx); + txq->next_to_use += nb_pkts - nb_tx; + } txq->tx_bd_ready -= nb_pkts; hns3_write_reg_opt(txq->io_tail_reg, nb_pkts); diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_stats.c dpdk-20.11.7/drivers/net/hns3/hns3_stats.c --- dpdk-20.11.6/drivers/net/hns3/hns3_stats.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_stats.c 2022-12-13 10:50:22.000000000 +0000 @@ -427,15 +427,6 @@ return 0; } -static int -hns3_query_update_mac_stats(struct rte_eth_dev *dev) -{ - struct hns3_adapter *hns = dev->data->dev_private; - struct hns3_hw *hw = &hns->hw; - - return hns3_update_mac_stats(hw); -} - /* Get tqp stats from register */ static int hns3_update_tqp_stats(struct hns3_hw *hw) @@ -602,14 +593,13 @@ } static int -hns3_mac_stats_reset(__rte_unused struct rte_eth_dev *dev) +hns3_mac_stats_reset(struct hns3_hw *hw) { - struct hns3_adapter *hns = dev->data->dev_private; - struct hns3_hw *hw = &hns->hw; struct hns3_mac_stats *mac_stats = &hw->mac_stats; int ret; - ret = hns3_query_update_mac_stats(dev); + /* Clear hardware MAC statistics by reading it. */ + ret = hns3_update_mac_stats(hw); if (ret) { hns3_err(hw, "Clear Mac stats fail : %d", ret); return ret; @@ -727,8 +717,7 @@ count = 0; if (!hns->is_vf) { - /* Update Mac stats */ - ret = hns3_query_update_mac_stats(dev); + ret = hns3_update_mac_stats(hw); if (ret < 0) { hns3_err(hw, "Update Mac stats fail : %d", ret); return ret; @@ -1095,8 +1084,7 @@ if (hns->is_vf) return 0; - /* HW registers are cleared on read */ - ret = hns3_mac_stats_reset(dev); + ret = hns3_mac_stats_reset(&hns->hw); if (ret) return ret; @@ -1106,7 +1094,7 @@ return 0; } -int +static int hns3_tqp_stats_init(struct hns3_hw *hw) { struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats; @@ -1130,7 +1118,7 @@ return 0; } -void +static void hns3_tqp_stats_uninit(struct hns3_hw *hw) { struct hns3_tqp_stats *tqp_stats = &hw->tqp_stats; @@ -1151,3 +1139,20 @@ memset(stats->rcb_rx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num); memset(stats->rcb_tx_ring_pktnum, 0, sizeof(uint64_t) * hw->tqps_num); } + +int +hns3_stats_init(struct hns3_hw *hw) +{ + struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); + + if (!hns->is_vf) + hns3_mac_stats_reset(hw); + + return hns3_tqp_stats_init(hw); +} + +void +hns3_stats_uninit(struct hns3_hw *hw) +{ + hns3_tqp_stats_uninit(hw); +} \ No newline at end of file diff -Nru dpdk-20.11.6/drivers/net/hns3/hns3_stats.h dpdk-20.11.7/drivers/net/hns3/hns3_stats.h --- dpdk-20.11.6/drivers/net/hns3/hns3_stats.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/hns3/hns3_stats.h 2022-12-13 10:50:22.000000000 +0000 @@ -5,6 +5,8 @@ #ifndef _HNS3_STATS_H_ #define _HNS3_STATS_H_ +#include <rte_ethdev.h> + /* TQP stats */ struct hns3_tqp_stats { uint64_t rcb_tx_ring_pktnum_rcd; /* Total num of transmitted packets */ @@ -128,7 +130,10 @@ #define HNS3_TX_ERROR_STATS_FIELD_OFFSET(f) \ (offsetof(struct hns3_tx_queue, f)) -int hns3_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats); +struct hns3_hw; + +int hns3_stats_get(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *rte_stats); int hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int n); int hns3_dev_xstats_reset(struct rte_eth_dev *dev); @@ -143,10 +148,10 @@ struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, uint32_t size); -int hns3_stats_reset(struct rte_eth_dev *dev); +int hns3_stats_reset(struct rte_eth_dev *eth_dev); void hns3_error_int_stats_add(struct hns3_adapter *hns, const char *err); -int hns3_tqp_stats_init(struct hns3_hw *hw); -void hns3_tqp_stats_uninit(struct hns3_hw *hw); +int hns3_stats_init(struct hns3_hw *hw); +void hns3_stats_uninit(struct hns3_hw *hw); int hns3_query_mac_stats_reg_num(struct hns3_hw *hw); #endif /* _HNS3_STATS_H_ */ diff -Nru dpdk-20.11.6/drivers/net/i40e/i40e_ethdev.c dpdk-20.11.7/drivers/net/i40e/i40e_ethdev.c --- dpdk-20.11.6/drivers/net/i40e/i40e_ethdev.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/i40e/i40e_ethdev.c 2022-12-13 10:50:22.000000000 +0000 @@ -13141,8 +13141,13 @@ enum i40e_status_code status; bool can_be_set = true; - /* I40E_MEDIA_TYPE_BASET link up can be ignored */ - if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET) { + /* + * I40E_MEDIA_TYPE_BASET link up can be ignored + * I40E_MEDIA_TYPE_BASET link down that hw->phy.media_type + * is I40E_MEDIA_TYPE_UNKNOWN + */ + if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && + hw->phy.media_type != I40E_MEDIA_TYPE_UNKNOWN) { do { update_link_reg(hw, &link); if (link.link_status) diff -Nru dpdk-20.11.6/drivers/net/i40e/i40e_vf_representor.c dpdk-20.11.7/drivers/net/i40e/i40e_vf_representor.c --- dpdk-20.11.6/drivers/net/i40e/i40e_vf_representor.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/i40e/i40e_vf_representor.c 2022-12-13 10:50:22.000000000 +0000 @@ -29,8 +29,6 @@ struct rte_eth_dev_info *dev_info) { struct i40e_vf_representor *representor = ethdev->data->dev_private; - struct rte_eth_dev_data *pf_dev_data = - representor->adapter->pf.dev_data; /* get dev info for the vdev */ dev_info->device = ethdev->device; @@ -102,7 +100,7 @@ }; dev_info->switch_info.name = - rte_eth_devices[pf_dev_data->port_id].device->name; + rte_eth_devices[ethdev->data->port_id].device->name; dev_info->switch_info.domain_id = representor->switch_domain_id; dev_info->switch_info.port_id = representor->vf_id; diff -Nru dpdk-20.11.6/drivers/net/iavf/iavf.h dpdk-20.11.7/drivers/net/iavf/iavf.h --- dpdk-20.11.6/drivers/net/iavf/iavf.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/iavf/iavf.h 2022-12-13 10:50:22.000000000 +0000 @@ -287,6 +287,8 @@ int iavf_check_api_version(struct iavf_adapter *adapter); int iavf_get_vf_resource(struct iavf_adapter *adapter); +void iavf_dev_event_handler_fini(void); +int iavf_dev_event_handler_init(void); void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev); int iavf_enable_vlan_strip(struct iavf_adapter *adapter); int iavf_disable_vlan_strip(struct iavf_adapter *adapter); diff -Nru dpdk-20.11.6/drivers/net/iavf/iavf_ethdev.c dpdk-20.11.7/drivers/net/iavf/iavf_ethdev.c --- dpdk-20.11.6/drivers/net/iavf/iavf_ethdev.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/iavf/iavf_ethdev.c 2022-12-13 10:50:22.000000000 +0000 @@ -2034,6 +2034,9 @@ rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); + if (iavf_dev_event_handler_init()) + return 0; + /* register callback func to eal lib */ rte_intr_callback_register(&pci_dev->intr_handle, iavf_dev_interrupt_handler, @@ -2121,6 +2124,8 @@ iavf_dev_close(dev); + iavf_dev_event_handler_fini(); + return 0; } diff -Nru dpdk-20.11.6/drivers/net/iavf/iavf_fdir.c dpdk-20.11.7/drivers/net/iavf/iavf_fdir.c --- dpdk-20.11.6/drivers/net/iavf/iavf_fdir.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/iavf/iavf_fdir.c 2022-12-13 10:50:22.000000000 +0000 @@ -571,6 +571,14 @@ return -rte_errno; } + /* Mask for IPv4 src/dst addrs not supported */ + if (ipv4_mask->hdr.src_addr && + ipv4_mask->hdr.src_addr != UINT32_MAX) + return -rte_errno; + if (ipv4_mask->hdr.dst_addr && + ipv4_mask->hdr.dst_addr != UINT32_MAX) + return -rte_errno; + if (ipv4_mask->hdr.type_of_service == UINT8_MAX) { input_set |= IAVF_INSET_IPV4_TOS; @@ -670,6 +678,14 @@ return -rte_errno; } + /* Mask for UDP src/dst ports not supported */ + if (udp_mask->hdr.src_port && + udp_mask->hdr.src_port != UINT16_MAX) + return -rte_errno; + if (udp_mask->hdr.dst_port && + udp_mask->hdr.dst_port != UINT16_MAX) + return -rte_errno; + if (udp_mask->hdr.src_port == UINT16_MAX) { input_set |= IAVF_INSET_UDP_SRC_PORT; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT); @@ -714,6 +730,14 @@ return -rte_errno; } + /* Mask for TCP src/dst ports not supported */ + if (tcp_mask->hdr.src_port && + tcp_mask->hdr.src_port != UINT16_MAX) + return -rte_errno; + if (tcp_mask->hdr.dst_port && + tcp_mask->hdr.dst_port != UINT16_MAX) + return -rte_errno; + if (tcp_mask->hdr.src_port == UINT16_MAX) { input_set |= IAVF_INSET_TCP_SRC_PORT; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT); @@ -752,6 +776,14 @@ return -rte_errno; } + /* Mask for SCTP src/dst ports not supported */ + if (sctp_mask->hdr.src_port && + sctp_mask->hdr.src_port != UINT16_MAX) + return -rte_errno; + if (sctp_mask->hdr.dst_port && + sctp_mask->hdr.dst_port != UINT16_MAX) + return -rte_errno; + if (sctp_mask->hdr.src_port == UINT16_MAX) { input_set |= IAVF_INSET_SCTP_SRC_PORT; VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT); diff -Nru dpdk-20.11.6/drivers/net/iavf/iavf_hash.c dpdk-20.11.7/drivers/net/iavf/iavf_hash.c --- dpdk-20.11.6/drivers/net/iavf/iavf_hash.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/iavf/iavf_hash.c 2022-12-13 10:50:22.000000000 +0000 @@ -904,7 +904,6 @@ uint64_t pattern_hint, struct iavf_rss_meta *rss_meta, struct rte_flow_error *error) { - struct virtchnl_proto_hdrs *proto_hdrs; enum rte_flow_action_type action_type; const struct rte_flow_action_rss *rss; const struct rte_flow_action *action; @@ -961,8 +960,10 @@ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, action, "RSS type not supported"); - proto_hdrs = match_item->meta; - rss_meta->proto_hdrs = *proto_hdrs; + + memcpy(&rss_meta->proto_hdrs, match_item->meta, + sizeof(struct virtchnl_proto_hdrs)); + iavf_refine_proto_hdrs(&rss_meta->proto_hdrs, rss_type, pattern_hint); break; diff -Nru dpdk-20.11.6/drivers/net/iavf/iavf_rxtx.c dpdk-20.11.7/drivers/net/iavf/iavf_rxtx.c --- dpdk-20.11.6/drivers/net/iavf/iavf_rxtx.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/iavf/iavf_rxtx.c 2022-12-13 10:50:22.000000000 +0000 @@ -833,6 +833,7 @@ { struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct iavf_rx_queue *rxq; int err; @@ -841,7 +842,11 @@ if (rx_queue_id >= dev->data->nb_rx_queues) return -EINVAL; - err = iavf_switch_queue(adapter, rx_queue_id, true, false); + if (!vf->lv_enabled) + err = iavf_switch_queue(adapter, rx_queue_id, true, false); + else + err = iavf_switch_queue_lv(adapter, rx_queue_id, true, false); + if (err) { PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", rx_queue_id); @@ -861,6 +866,7 @@ { struct iavf_adapter *adapter = IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct iavf_tx_queue *txq; int err; @@ -869,7 +875,11 @@ if (tx_queue_id >= dev->data->nb_tx_queues) return -EINVAL; - err = iavf_switch_queue(adapter, tx_queue_id, false, false); + if (!vf->lv_enabled) + err = iavf_switch_queue(adapter, tx_queue_id, false, false); + else + err = iavf_switch_queue_lv(adapter, tx_queue_id, false, false); + if (err) { PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", tx_queue_id); @@ -1085,7 +1095,9 @@ return 0; if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) { - flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD); + flags |= (PKT_RX_IP_CKSUM_GOOD | + PKT_RX_L4_CKSUM_GOOD | + PKT_RX_OUTER_L4_CKSUM_GOOD); return flags; } @@ -1102,6 +1114,11 @@ if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))) flags |= PKT_RX_EIP_CKSUM_BAD; + if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))) + flags |= PKT_RX_OUTER_L4_CKSUM_BAD; + else + flags |= PKT_RX_OUTER_L4_CKSUM_GOOD; + return flags; } @@ -2329,12 +2346,15 @@ /* TX prep functions */ uint16_t -iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, +iavf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { int i, ret; uint64_t ol_flags; struct rte_mbuf *m; + struct iavf_tx_queue *txq = tx_queue; + struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id]; + uint16_t max_frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD; for (i = 0; i < nb_pkts; i++) { m = tx_pkts[i]; @@ -2358,6 +2378,14 @@ return i; } + /* check the data_len in mbuf */ + if (m->data_len < IAVF_TX_MIN_PKT_LEN || + m->data_len > max_frame_size) { + rte_errno = EINVAL; + PMD_DRV_LOG(ERR, "INVALID mbuf: bad data_len=[%hu]", m->data_len); + return i; + } + #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { @@ -2565,14 +2593,14 @@ uint32_t free_cnt) { struct iavf_tx_entry *swr_ring = txq->sw_ring; - uint16_t i, tx_last, tx_id; + uint16_t tx_last, tx_id; uint16_t nb_tx_free_last; uint16_t nb_tx_to_clean; - uint32_t pkt_cnt; + uint32_t pkt_cnt = 0; - /* Start free mbuf from the next of tx_tail */ - tx_last = txq->tx_tail; - tx_id = swr_ring[tx_last].next_id; + /* Start free mbuf from tx_tail */ + tx_id = txq->tx_tail; + tx_last = tx_id; if (txq->nb_free == 0 && iavf_xmit_cleanup(txq)) return 0; @@ -2585,10 +2613,8 @@ /* Loop through swr_ring to count the amount of * freeable mubfs and packets. */ - for (pkt_cnt = 0; pkt_cnt < free_cnt; ) { - for (i = 0; i < nb_tx_to_clean && - pkt_cnt < free_cnt && - tx_id != tx_last; i++) { + while (pkt_cnt < free_cnt) { + do { if (swr_ring[tx_id].mbuf != NULL) { rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf); swr_ring[tx_id].mbuf = NULL; @@ -2601,7 +2627,7 @@ } tx_id = swr_ring[tx_id].next_id; - } + } while (--nb_tx_to_clean && pkt_cnt < free_cnt && tx_id != tx_last); if (txq->rs_thresh > txq->nb_tx_desc - txq->nb_free || tx_id == tx_last) diff -Nru dpdk-20.11.6/drivers/net/iavf/iavf_rxtx.h dpdk-20.11.7/drivers/net/iavf/iavf_rxtx.h --- dpdk-20.11.6/drivers/net/iavf/iavf_rxtx.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/iavf/iavf_rxtx.h 2022-12-13 10:50:22.000000000 +0000 @@ -39,6 +39,8 @@ #define IAVF_TSO_MAX_SEG UINT8_MAX #define IAVF_TX_MAX_MTU_SEG 8 +#define IAVF_TX_MIN_PKT_LEN 17 + #define IAVF_TX_CKSUM_OFFLOAD_MASK ( \ PKT_TX_IP_CKSUM | \ PKT_TX_L4_MASK | \ diff -Nru dpdk-20.11.6/drivers/net/iavf/iavf_rxtx_vec_avx2.c dpdk-20.11.7/drivers/net/iavf/iavf_rxtx_vec_avx2.c --- dpdk-20.11.6/drivers/net/iavf/iavf_rxtx_vec_avx2.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/iavf/iavf_rxtx_vec_avx2.c 2022-12-13 10:50:22.000000000 +0000 @@ -622,43 +622,88 @@ * bit13 is for VLAN indication. */ const __m256i flags_mask = - _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13)); + _mm256_set1_epi32((0xF << 4) | (1 << 12) | (1 << 13)); /** * data to be shuffled by the result of the flags mask shifted by 4 * bits. This gives use the l3_l4 flags. */ - const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, - /* shift right 1 bit to make sure it not exceed 255 */ - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | - PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | - PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, - /* second 128-bits */ - 0, 0, 0, 0, 0, 0, 0, 0, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | - PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | - PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); + const __m256i l3_l4_flags_shuf = + _mm256_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + /** + * second 128-bits + * shift right 20 bits to use the low two bits to indicate + * outer checksum status + * shift right 1 bit to make sure it not exceed 255 + */ + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1); const __m256i cksum_mask = - _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | - PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_EIP_CKSUM_BAD); + _mm256_set1_epi32(PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD | + PKT_RX_OUTER_L4_CKSUM_MASK); /** * data to be shuffled by result of flag mask, shifted down 12. * If RSS(bit12)/VLAN(bit13) are set, @@ -824,6 +869,15 @@ __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, _mm256_srli_epi32(flag_bits, 4)); l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); + __m256i l4_outer_mask = _mm256_set1_epi32(0x6); + __m256i l4_outer_flags = + _mm256_and_si256(l3_l4_flags, l4_outer_mask); + l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20); + + __m256i l3_l4_mask = _mm256_set1_epi32(~0x6); + + l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask); + l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags); l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); /* set rss and vlan flags */ const __m256i rss_vlan_flag_bits = diff -Nru dpdk-20.11.6/drivers/net/iavf/iavf_rxtx_vec_avx512.c dpdk-20.11.7/drivers/net/iavf/iavf_rxtx_vec_avx512.c --- dpdk-20.11.6/drivers/net/iavf/iavf_rxtx_vec_avx512.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/iavf/iavf_rxtx_vec_avx512.c 2022-12-13 10:50:22.000000000 +0000 @@ -757,43 +757,103 @@ * bit13 is for VLAN indication. */ const __m256i flags_mask = - _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13)); + _mm256_set1_epi32((0xF << 4) | (1 << 12) | (1 << 13)); /** * data to be shuffled by the result of the flags mask shifted by 4 * bits. This gives use the l3_l4 flags. */ - const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, - /* shift right 1 bit to make sure it not exceed 255 */ - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | - PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | - PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, - /* second 128-bits */ - 0, 0, 0, 0, 0, 0, 0, 0, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | - PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | - PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); + const __m256i l3_l4_flags_shuf = + _mm256_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + /** + * second 128-bits + * shift right 20 bits to use the low two bits + * to indicate outer checksum status + * shift right 1 bit to make sure it not exceed 255 + */ + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); const __m256i cksum_mask = - _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | - PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_EIP_CKSUM_BAD); + _mm256_set1_epi32(PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | + PKT_RX_EIP_CKSUM_BAD | + PKT_RX_OUTER_L4_CKSUM_MASK); /** * data to be shuffled by result of flag mask, shifted down 12. * If RSS(bit12)/VLAN(bit13) are set, @@ -953,6 +1013,15 @@ __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, _mm256_srli_epi32(flag_bits, 4)); l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); + __m256i l4_outer_mask = _mm256_set1_epi32(0x6); + __m256i l4_outer_flags = + _mm256_and_si256(l3_l4_flags, l4_outer_mask); + l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20); + + __m256i l3_l4_mask = _mm256_set1_epi32(~0x6); + + l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask); + l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags); l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); /* set rss and vlan flags */ const __m256i rss_vlan_flag_bits = diff -Nru dpdk-20.11.6/drivers/net/iavf/iavf_rxtx_vec_sse.c dpdk-20.11.7/drivers/net/iavf/iavf_rxtx_vec_sse.c --- dpdk-20.11.6/drivers/net/iavf/iavf_rxtx_vec_sse.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/iavf/iavf_rxtx_vec_sse.c 2022-12-13 10:50:22.000000000 +0000 @@ -222,39 +222,68 @@ * bit12 for RSS indication. * bit13 for VLAN indication. */ - const __m128i desc_mask = _mm_set_epi32(0x3070, 0x3070, - 0x3070, 0x3070); + const __m128i desc_mask = _mm_set_epi32(0x30f0, 0x30f0, + 0x30f0, 0x30f0); const __m128i cksum_mask = _mm_set_epi32(PKT_RX_IP_CKSUM_MASK | PKT_RX_L4_CKSUM_MASK | + PKT_RX_OUTER_L4_CKSUM_MASK | PKT_RX_EIP_CKSUM_BAD, PKT_RX_IP_CKSUM_MASK | PKT_RX_L4_CKSUM_MASK | + PKT_RX_OUTER_L4_CKSUM_MASK | PKT_RX_EIP_CKSUM_BAD, PKT_RX_IP_CKSUM_MASK | PKT_RX_L4_CKSUM_MASK | + PKT_RX_OUTER_L4_CKSUM_MASK | PKT_RX_EIP_CKSUM_BAD, PKT_RX_IP_CKSUM_MASK | PKT_RX_L4_CKSUM_MASK | + PKT_RX_OUTER_L4_CKSUM_MASK | PKT_RX_EIP_CKSUM_BAD); /* map the checksum, rss and vlan fields to the checksum, rss * and vlan flag */ - const __m128i cksum_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, - /* shift right 1 bit to make sure it not exceed 255 */ - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | - PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | - PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); + const __m128i cksum_flags = + _mm_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + /** + * shift right 20 bits to use the low two bits to indicate + * outer checksum status + * shift right 1 bit to make sure it not exceed 255 + */ + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_EIP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1); const __m128i rss_vlan_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, @@ -274,6 +303,13 @@ flags = _mm_shuffle_epi8(cksum_flags, tmp_desc); /* then we shift left 1 bit */ flags = _mm_slli_epi32(flags, 1); + __m128i l4_outer_mask = _mm_set_epi32(0x6, 0x6, 0x6, 0x6); + __m128i l4_outer_flags = _mm_and_si128(flags, l4_outer_mask); + l4_outer_flags = _mm_slli_epi32(l4_outer_flags, 20); + + __m128i l3_l4_mask = _mm_set_epi32(~0x6, ~0x6, ~0x6, ~0x6); + __m128i l3_l4_flags = _mm_and_si128(flags, l3_l4_mask); + flags = _mm_or_si128(l3_l4_flags, l4_outer_flags); /* we need to mask out the redundant bits introduced by RSS or * VLAN fields. */ @@ -325,10 +361,10 @@ * appropriate flags means that we have to do a shift and blend for * each mbuf before we do the write. */ - rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 8), 0x10); - rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 4), 0x10); - rearm2 = _mm_blend_epi16(mbuf_init, flags, 0x10); - rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(flags, 4), 0x10); + rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 8), 0x30); + rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 4), 0x30); + rearm2 = _mm_blend_epi16(mbuf_init, flags, 0x30); + rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(flags, 4), 0x30); /* write the rearm data and the olflags in one write */ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != diff -Nru dpdk-20.11.6/drivers/net/iavf/iavf_vchnl.c dpdk-20.11.7/drivers/net/iavf/iavf_vchnl.c --- dpdk-20.11.6/drivers/net/iavf/iavf_vchnl.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/iavf/iavf_vchnl.c 2022-12-13 10:50:22.000000000 +0000 @@ -2,6 +2,7 @@ * Copyright(c) 2017 Intel Corporation */ +#include <fcntl.h> #include <stdio.h> #include <errno.h> #include <stdint.h> @@ -26,6 +27,146 @@ #define MAX_TRY_TIMES 200 #define ASQ_DELAY_MS 10 +#define MAX_EVENT_PENDING 16 + +struct iavf_event_element { + TAILQ_ENTRY(iavf_event_element) next; + struct rte_eth_dev *dev; + enum rte_eth_event_type event; + void *param; + size_t param_alloc_size; + uint8_t param_alloc_data[0]; +}; + +struct iavf_event_handler { + uint32_t ndev; + pthread_t tid; + int fd[2]; + pthread_mutex_t lock; + TAILQ_HEAD(event_list, iavf_event_element) pending; +}; + +static struct iavf_event_handler event_handler = { + .fd = {-1, -1}, +}; + +#ifndef TAILQ_FOREACH_SAFE +#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = TAILQ_FIRST((head)); \ + (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ + (var) = (tvar)) +#endif + +static void * +iavf_dev_event_handle(void *param __rte_unused) +{ + struct iavf_event_handler *handler = &event_handler; + TAILQ_HEAD(event_list, iavf_event_element) pending; + + while (true) { + char unused[MAX_EVENT_PENDING]; + ssize_t nr = read(handler->fd[0], &unused, sizeof(unused)); + if (nr <= 0) + break; + + TAILQ_INIT(&pending); + pthread_mutex_lock(&handler->lock); + TAILQ_CONCAT(&pending, &handler->pending, next); + pthread_mutex_unlock(&handler->lock); + + struct iavf_event_element *pos, *save_next; + TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) { + TAILQ_REMOVE(&pending, pos, next); + rte_eth_dev_callback_process(pos->dev, pos->event, pos->param); + rte_free(pos); + } + } + + return NULL; +} + +static void +iavf_dev_event_post(struct rte_eth_dev *dev, + enum rte_eth_event_type event, + void *param, size_t param_alloc_size) +{ + struct iavf_event_handler *handler = &event_handler; + char notify_byte; + struct iavf_event_element *elem = rte_malloc(NULL, sizeof(*elem) + param_alloc_size, 0); + if (!elem) + return; + + elem->dev = dev; + elem->event = event; + elem->param = param; + elem->param_alloc_size = param_alloc_size; + if (param && param_alloc_size) { + rte_memcpy(elem->param_alloc_data, param, param_alloc_size); + elem->param = elem->param_alloc_data; + } + + pthread_mutex_lock(&handler->lock); + TAILQ_INSERT_TAIL(&handler->pending, elem, next); + pthread_mutex_unlock(&handler->lock); + + ssize_t nw = write(handler->fd[1], ¬ify_byte, 1); + RTE_SET_USED(nw); +} + +int +iavf_dev_event_handler_init(void) +{ + struct iavf_event_handler *handler = &event_handler; + + if (__atomic_add_fetch(&handler->ndev, 1, __ATOMIC_RELAXED) != 1) + return 0; +#if defined(RTE_EXEC_ENV_IS_WINDOWS) && RTE_EXEC_ENV_IS_WINDOWS != 0 + int err = _pipe(handler->fd, MAX_EVENT_PENDING, O_BINARY); +#else + int err = pipe(handler->fd); +#endif + if (err != 0) { + __atomic_sub_fetch(&handler->ndev, 1, __ATOMIC_RELAXED); + return -1; + } + + TAILQ_INIT(&handler->pending); + pthread_mutex_init(&handler->lock, NULL); + + if (rte_ctrl_thread_create(&handler->tid, "iavf-event-thread", + NULL, iavf_dev_event_handle, NULL)) { + __atomic_sub_fetch(&handler->ndev, 1, __ATOMIC_RELAXED); + return -1; + } + + return 0; +} + +void +iavf_dev_event_handler_fini(void) +{ + struct iavf_event_handler *handler = &event_handler; + + if (__atomic_sub_fetch(&handler->ndev, 1, __ATOMIC_RELAXED) != 0) + return; + + int unused = pthread_cancel(handler->tid); + RTE_SET_USED(unused); + close(handler->fd[0]); + close(handler->fd[1]); + handler->fd[0] = -1; + handler->fd[1] = -1; + + pthread_join(handler->tid, NULL); + pthread_mutex_destroy(&handler->lock); + + struct iavf_event_element *pos, *save_next; + TAILQ_FOREACH_SAFE(pos, &handler->pending, next, save_next) { + TAILQ_REMOVE(&handler->pending, pos, next); + rte_free(pos); + } +} + static uint32_t iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed) { @@ -262,8 +403,8 @@ case VIRTCHNL_EVENT_RESET_IMPENDING: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event"); vf->vf_reset = true; - rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, - NULL); + iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_RESET, + NULL, 0); break; case VIRTCHNL_EVENT_LINK_CHANGE: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event"); @@ -277,7 +418,7 @@ vf->link_speed = iavf_convert_link_speed(speed); } iavf_dev_link_update(dev, 0); - rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_LSC, NULL, 0); break; case VIRTCHNL_EVENT_PF_DRIVER_CLOSE: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event"); diff -Nru dpdk-20.11.6/drivers/net/ice/base/ice_common.c dpdk-20.11.7/drivers/net/ice/base/ice_common.c --- dpdk-20.11.6/drivers/net/ice/base/ice_common.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/ice/base/ice_common.c 2022-12-13 10:50:22.000000000 +0000 @@ -289,7 +289,6 @@ case ICE_PHY_TYPE_LOW_1000BASE_LX: case ICE_PHY_TYPE_LOW_10GBASE_SR: case ICE_PHY_TYPE_LOW_10GBASE_LR: - case ICE_PHY_TYPE_LOW_10G_SFI_C2C: case ICE_PHY_TYPE_LOW_25GBASE_SR: case ICE_PHY_TYPE_LOW_25GBASE_LR: case ICE_PHY_TYPE_LOW_40GBASE_SR4: @@ -346,6 +345,7 @@ case ICE_PHY_TYPE_LOW_2500BASE_X: case ICE_PHY_TYPE_LOW_5GBASE_KR: case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: + case ICE_PHY_TYPE_LOW_10G_SFI_C2C: case ICE_PHY_TYPE_LOW_25GBASE_KR: case ICE_PHY_TYPE_LOW_25GBASE_KR1: case ICE_PHY_TYPE_LOW_25GBASE_KR_S: diff -Nru dpdk-20.11.6/drivers/net/ice/base/ice_switch.c dpdk-20.11.7/drivers/net/ice/base/ice_switch.c --- dpdk-20.11.6/drivers/net/ice/base/ice_switch.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/ice/base/ice_switch.c 2022-12-13 10:50:22.000000000 +0000 @@ -4180,7 +4180,8 @@ if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); - m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; + if (m_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI) + m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; /* update the src in case it is VSI num */ if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) return ICE_ERR_PARAM; @@ -5548,7 +5549,7 @@ status = _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vlan_id, lport, sw); - if (status) + if (status && status != ICE_ERR_ALREADY_EXISTS) break; } @@ -6625,7 +6626,6 @@ last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) { - last_chain_entry->fv_idx[i] = entry->chain_idx; buf[recps].content.lkup_indx[i] = entry->chain_idx; buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF); ice_set_bit(entry->rid, rm->r_bitmap); @@ -7901,7 +7901,7 @@ /* A rule already exists with the new VSI being added */ if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle)) - return ICE_SUCCESS; + return ICE_ERR_ALREADY_EXISTS; /* Update the previously created VSI list set with * the new VSI ID passed in diff -Nru dpdk-20.11.6/drivers/net/ice/ice_ethdev.c dpdk-20.11.7/drivers/net/ice/ice_ethdev.c --- dpdk-20.11.6/drivers/net/ice/ice_ethdev.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/ice/ice_ethdev.c 2022-12-13 10:50:22.000000000 +0000 @@ -4427,10 +4427,8 @@ if (status) return status; - if (rss_conf->rss_hf == 0) { + if (rss_conf->rss_hf == 0) pf->rss_hf = 0; - return 0; - } /* RSS hash configuration */ ice_rss_hash_set(pf, rss_conf->rss_hf); diff -Nru dpdk-20.11.6/drivers/net/ice/ice_rxtx.c dpdk-20.11.7/drivers/net/ice/ice_rxtx.c --- dpdk-20.11.6/drivers/net/ice/ice_rxtx.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/ice/ice_rxtx.c 2022-12-13 10:50:22.000000000 +0000 @@ -1158,7 +1158,8 @@ return; } - q->rx_rel_mbufs(q); + if (q->rx_rel_mbufs != NULL) + q->rx_rel_mbufs(q); rte_free(q->sw_ring); rte_memzone_free(q->mz); rte_free(q); @@ -1356,7 +1357,8 @@ return; } - q->tx_rel_mbufs(q); + if (q->tx_rel_mbufs != NULL) + q->tx_rel_mbufs(q); rte_free(q->sw_ring); rte_memzone_free(q->mz); rte_free(q); @@ -1848,6 +1850,10 @@ } else rxm->data_len = (uint16_t)(rx_packet_len - RTE_ETHER_CRC_LEN); + } else if (rx_packet_len == 0) { + rte_pktmbuf_free_seg(rxm); + first_seg->nb_segs--; + last_seg->next = NULL; } first_seg->port = rxq->port_id; @@ -3202,6 +3208,22 @@ #define ICE_MIN_TSO_MSS 64 #define ICE_MAX_TSO_MSS 9728 #define ICE_MAX_TSO_FRAME_SIZE 262144 + +/*Check for empty mbuf*/ +static inline uint16_t +ice_check_empty_mbuf(struct rte_mbuf *tx_pkt) +{ + struct rte_mbuf *txd = tx_pkt; + + while (txd != NULL) { + if (txd->data_len == 0) + return -1; + txd = txd->next; + } + + return 0; +} + uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) @@ -3209,6 +3231,9 @@ int i, ret; uint64_t ol_flags; struct rte_mbuf *m; + struct ice_tx_queue *txq = tx_queue; + struct rte_eth_dev *dev = &rte_eth_devices[txq->port_id]; + uint16_t max_frame_size = dev->data->mtu + ICE_ETH_OVERHEAD; for (i = 0; i < nb_pkts; i++) { m = tx_pkts[i]; @@ -3225,6 +3250,14 @@ return i; } + /* check the data_len in mbuf */ + if (m->data_len < ICE_TX_MIN_PKT_LEN || + m->data_len > max_frame_size) { + rte_errno = EINVAL; + PMD_DRV_LOG(ERR, "INVALID mbuf: bad data_len=[%hu]", m->data_len); + return i; + } + #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { @@ -3237,6 +3270,12 @@ rte_errno = -ret; return i; } + + if (ice_check_empty_mbuf(m) != 0) { + rte_errno = EINVAL; + PMD_DRV_LOG(ERR, "INVALID mbuf: last mbuf data_len=[0]"); + return i; + } } return i; } diff -Nru dpdk-20.11.6/drivers/net/ice/ice_rxtx.h dpdk-20.11.7/drivers/net/ice/ice_rxtx.h --- dpdk-20.11.6/drivers/net/ice/ice_rxtx.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/ice/ice_rxtx.h 2022-12-13 10:50:22.000000000 +0000 @@ -40,6 +40,8 @@ #define ICE_RXDID_COMMS_OVS 22 +#define ICE_TX_MIN_PKT_LEN 17 + typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq); typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq); typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq, diff -Nru dpdk-20.11.6/drivers/net/igc/base/igc_i225.c dpdk-20.11.7/drivers/net/igc/base/igc_i225.c --- dpdk-20.11.6/drivers/net/igc/base/igc_i225.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/igc/base/igc_i225.c 2022-12-13 10:50:22.000000000 +0000 @@ -173,18 +173,8 @@ phy->ops.write_reg = igc_write_phy_reg_gpy; ret_val = igc_get_phy_id(hw); - /* Verify phy id and set remaining function pointers */ - switch (phy->id) { - case I225_I_PHY_ID: - phy->type = igc_phy_i225; - phy->ops.set_d0_lplu_state = igc_set_d0_lplu_state_i225; - phy->ops.set_d3_lplu_state = igc_set_d3_lplu_state_i225; - /* TODO - complete with GPY PHY information */ - break; - default: - ret_val = -IGC_ERR_PHY; - goto out; - } + phy->type = igc_phy_i225; + out: return ret_val; diff -Nru dpdk-20.11.6/drivers/net/igc/base/igc_phy.c dpdk-20.11.7/drivers/net/igc/base/igc_phy.c --- dpdk-20.11.6/drivers/net/igc/base/igc_phy.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/igc/base/igc_phy.c 2022-12-13 10:50:22.000000000 +0000 @@ -1474,8 +1474,7 @@ return ret_val; } - if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && - hw->phy.id == I225_I_PHY_ID) { + if (phy->autoneg_mask & ADVERTISE_2500_FULL) { /* Read the MULTI GBT AN Control Register - reg 7.32 */ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << MMD_DEVADDR_SHIFT) | @@ -1615,8 +1614,7 @@ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); - if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && - hw->phy.id == I225_I_PHY_ID) + if (phy->autoneg_mask & ADVERTISE_2500_FULL) ret_val = phy->ops.write_reg(hw, (STANDARD_AN_REG_MASK << MMD_DEVADDR_SHIFT) | diff -Nru dpdk-20.11.6/drivers/net/ionic/ionic_dev.c dpdk-20.11.7/drivers/net/ionic/ionic_dev.c --- dpdk-20.11.6/drivers/net/ionic/ionic_dev.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/ionic/ionic_dev.c 2022-12-13 10:50:22.000000000 +0000 @@ -55,7 +55,10 @@ ioread8(&idev->dev_info->fw_version[i]); adapter->fw_version[IONIC_DEVINFO_FWVERS_BUFLEN - 1] = '\0'; - IONIC_PRINT(DEBUG, "Firmware version: %s", adapter->fw_version); + adapter->name = adapter->pci_dev->device.name; + + IONIC_PRINT(DEBUG, "%s firmware version: %s", + adapter->name, adapter->fw_version); /* BAR1: doorbells */ bar++; diff -Nru dpdk-20.11.6/drivers/net/ionic/ionic_lif.c dpdk-20.11.7/drivers/net/ionic/ionic_lif.c --- dpdk-20.11.6/drivers/net/ionic/ionic_lif.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/ionic/ionic_lif.c 2022-12-13 10:50:22.000000000 +0000 @@ -114,7 +114,7 @@ for (i = 0; i < lif->nrxqcqs; i++) { struct ionic_rx_stats *rx_stats = &lif->rxqcqs[i]->stats.rx; - stats->imissed += + stats->ierrors += rx_stats->no_cb_arg + rx_stats->bad_cq_status + rx_stats->no_room + @@ -126,10 +126,8 @@ ls->rx_mcast_drop_packets + ls->rx_bcast_drop_packets; - stats->imissed += - ls->rx_queue_empty + + stats->ierrors += ls->rx_dma_error + - ls->rx_queue_disabled + ls->rx_desc_fetch_error + ls->rx_desc_data_error; diff -Nru dpdk-20.11.6/drivers/net/ionic/ionic_rxtx.c dpdk-20.11.7/drivers/net/ionic/ionic_rxtx.c --- dpdk-20.11.6/drivers/net/ionic/ionic_rxtx.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/ionic/ionic_rxtx.c 2022-12-13 10:50:22.000000000 +0000 @@ -279,18 +279,20 @@ uint16_t vlan_tci, bool has_vlan, bool start, bool done) { + uint64_t cmd; uint8_t flags = 0; flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; - desc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, + cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr); - desc->len = len; - desc->vlan_tci = vlan_tci; - desc->hdr_len = hdrlen; - desc->mss = mss; + desc->cmd = rte_cpu_to_le_64(cmd); + desc->len = rte_cpu_to_le_16(len); + desc->vlan_tci = rte_cpu_to_le_16(vlan_tci); + desc->hdr_len = rte_cpu_to_le_16(hdrlen); + desc->mss = rte_cpu_to_le_16(mss); ionic_q_post(q, done, NULL, done ? txm : NULL); } @@ -397,7 +399,7 @@ len = RTE_MIN(frag_left, left); frag_left -= len; elem->addr = next_addr; - elem->len = len; + elem->len = rte_cpu_to_le_16(len); elem++; desc_nsge++; } else { @@ -445,7 +447,7 @@ bool encap; bool has_vlan; uint64_t ol_flags = txm->ol_flags; - uint64_t addr; + uint64_t addr, cmd; uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; uint8_t flags = 0; @@ -477,13 +479,14 @@ addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); - desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); - desc->len = txm->data_len; - desc->vlan_tci = txm->vlan_tci; + cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); + desc->cmd = rte_cpu_to_le_64(cmd); + desc->len = rte_cpu_to_le_16(txm->data_len); + desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci); txm_seg = txm->next; while (txm_seg != NULL) { - elem->len = txm_seg->data_len; + elem->len = rte_cpu_to_le_16(txm_seg->data_len); elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg)); stats->frags++; elem++; @@ -791,12 +794,12 @@ /* RSS */ pkt_flags |= PKT_RX_RSS_HASH; - rxm->hash.rss = cq_desc->rss_hash; + rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash); /* Vlan Strip */ if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; - rxm->vlan_tci = cq_desc->vlan_tci; + rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci); } /* Checksum */ diff -Nru dpdk-20.11.6/drivers/net/ixgbe/ixgbe_ethdev.c dpdk-20.11.7/drivers/net/ixgbe/ixgbe_ethdev.c --- dpdk-20.11.6/drivers/net/ixgbe/ixgbe_ethdev.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/ixgbe/ixgbe_ethdev.c 2022-12-13 10:50:22.000000000 +0000 @@ -8033,9 +8033,13 @@ ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int mode = IXGBEVF_XCAST_MODE_NONE; int ret; - switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE)) { + if (dev->data->all_multicast) + mode = IXGBEVF_XCAST_MODE_ALLMULTI; + + switch (hw->mac.ops.update_xcast_mode(hw, mode)) { case IXGBE_SUCCESS: ret = 0; break; @@ -8057,6 +8061,9 @@ int ret; int mode = IXGBEVF_XCAST_MODE_ALLMULTI; + if (dev->data->promiscuous) + return 0; + switch (hw->mac.ops.update_xcast_mode(hw, mode)) { case IXGBE_SUCCESS: ret = 0; @@ -8078,6 +8085,9 @@ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); int ret; + if (dev->data->promiscuous) + return 0; + switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) { case IXGBE_SUCCESS: ret = 0; diff -Nru dpdk-20.11.6/drivers/net/ixgbe/ixgbe_pf.c dpdk-20.11.7/drivers/net/ixgbe/ixgbe_pf.c --- dpdk-20.11.6/drivers/net/ixgbe/ixgbe_pf.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/ixgbe/ixgbe_pf.c 2022-12-13 10:50:22.000000000 +0000 @@ -757,9 +757,9 @@ switch (xcast_mode) { case IXGBEVF_XCAST_MODE_NONE: - disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + disable = IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; - enable = 0; + enable = IXGBE_VMOLR_BAM; break; case IXGBEVF_XCAST_MODE_MULTI: disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; @@ -781,9 +781,9 @@ return -1; } - disable = 0; + disable = IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | - IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE; break; default: return -1; diff -Nru dpdk-20.11.6/drivers/net/memif/rte_eth_memif.c dpdk-20.11.7/drivers/net/memif/rte_eth_memif.c --- dpdk-20.11.6/drivers/net/memif/rte_eth_memif.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/memif/rte_eth_memif.c 2022-12-13 10:50:22.000000000 +0000 @@ -1396,8 +1396,8 @@ stats->opackets = 0; stats->obytes = 0; - tmp = (pmd->role == MEMIF_ROLE_CLIENT) ? pmd->run.num_c2s_rings : - pmd->run.num_s2c_rings; + tmp = (pmd->role == MEMIF_ROLE_CLIENT) ? pmd->run.num_s2c_rings : + pmd->run.num_c2s_rings; nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp : RTE_ETHDEV_QUEUE_STAT_CNTRS; @@ -1410,8 +1410,8 @@ stats->ibytes += mq->n_bytes; } - tmp = (pmd->role == MEMIF_ROLE_CLIENT) ? pmd->run.num_s2c_rings : - pmd->run.num_c2s_rings; + tmp = (pmd->role == MEMIF_ROLE_CLIENT) ? pmd->run.num_c2s_rings : + pmd->run.num_s2c_rings; nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp : RTE_ETHDEV_QUEUE_STAT_CNTRS; diff -Nru dpdk-20.11.6/drivers/net/mlx4/meson.build dpdk-20.11.7/drivers/net/mlx4/meson.build --- dpdk-20.11.6/drivers/net/mlx4/meson.build 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/mlx4/meson.build 2022-12-13 10:50:22.000000000 +0000 @@ -42,7 +42,7 @@ endforeach if static_ibverbs or dlopen_ibverbs # Build without adding shared libs to Requires.private - ibv_cflags = run_command(pkgconf, '--cflags', 'libibverbs', check:true).stdout() + ibv_cflags = run_command(pkgconf, '--cflags', 'libibverbs').stdout() ext_deps += declare_dependency(compile_args: ibv_cflags.split()) endif if static_ibverbs diff -Nru dpdk-20.11.6/drivers/net/mlx4/mlx4.c dpdk-20.11.7/drivers/net/mlx4/mlx4.c --- dpdk-20.11.6/drivers/net/mlx4/mlx4.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/mlx4/mlx4.c 2022-12-13 10:50:22.000000000 +0000 @@ -877,6 +877,8 @@ snprintf(name, sizeof(name), "%s port %u", mlx4_glue->get_device_name(ibv_dev), port); if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + int fd; + eth_dev = rte_eth_dev_attach_secondary(name); if (eth_dev == NULL) { ERROR("can not attach rte ethdev"); @@ -899,13 +901,14 @@ if (err) goto err_secondary; /* Receive command fd from primary process. */ - err = mlx4_mp_req_verbs_cmd_fd(eth_dev); - if (err < 0) { + fd = mlx4_mp_req_verbs_cmd_fd(eth_dev); + if (fd < 0) { err = rte_errno; goto err_secondary; } /* Remap UAR for Tx queues. */ - err = mlx4_tx_uar_init_secondary(eth_dev, err); + err = mlx4_tx_uar_init_secondary(eth_dev, fd); + close(fd); if (err) { err = rte_errno; goto err_secondary; diff -Nru dpdk-20.11.6/drivers/net/mlx4/mlx4_mp.c dpdk-20.11.7/drivers/net/mlx4/mlx4_mp.c --- dpdk-20.11.6/drivers/net/mlx4/mlx4_mp.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/mlx4/mlx4_mp.c 2022-12-13 10:50:22.000000000 +0000 @@ -5,6 +5,7 @@ #include <stdio.h> #include <time.h> +#include <unistd.h> #include <rte_eal.h> #include <rte_ethdev_driver.h> @@ -134,15 +135,19 @@ mlx4_tx_uar_uninit_secondary(dev); mlx4_proc_priv_uninit(dev); ret = mlx4_proc_priv_init(dev); - if (ret) + if (ret) { + close(mp_msg->fds[0]); return -rte_errno; + } ret = mlx4_tx_uar_init_secondary(dev, mp_msg->fds[0]); if (ret) { + close(mp_msg->fds[0]); mlx4_proc_priv_uninit(dev); return -rte_errno; } } #endif + close(mp_msg->fds[0]); rte_mb(); mp_init_msg(dev, &mp_res, param->type); res->result = 0; diff -Nru dpdk-20.11.6/drivers/net/mlx5/linux/mlx5_mp_os.c dpdk-20.11.7/drivers/net/mlx5/linux/mlx5_mp_os.c --- dpdk-20.11.6/drivers/net/mlx5/linux/mlx5_mp_os.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/mlx5/linux/mlx5_mp_os.c 2022-12-13 10:50:22.000000000 +0000 @@ -138,14 +138,18 @@ mlx5_tx_uar_uninit_secondary(dev); mlx5_proc_priv_uninit(dev); ret = mlx5_proc_priv_init(dev); - if (ret) + if (ret) { + close(mp_msg->fds[0]); return -rte_errno; + } ret = mlx5_tx_uar_init_secondary(dev, mp_msg->fds[0]); if (ret) { + close(mp_msg->fds[0]); mlx5_proc_priv_uninit(dev); return -rte_errno; } } + close(mp_msg->fds[0]); rte_mb(); mp_init_msg(&priv->mp_id, &mp_res, param->type); res->result = 0; diff -Nru dpdk-20.11.6/drivers/net/mlx5/linux/mlx5_os.c dpdk-20.11.7/drivers/net/mlx5/linux/mlx5_os.c --- dpdk-20.11.6/drivers/net/mlx5/linux/mlx5_os.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/mlx5/linux/mlx5_os.c 2022-12-13 10:50:22.000000000 +0000 @@ -813,6 +813,7 @@ DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name); if (rte_eal_process_type() == RTE_PROC_SECONDARY) { struct mlx5_mp_id mp_id; + int fd; eth_dev = rte_eth_dev_attach_secondary(name); if (eth_dev == NULL) { @@ -836,11 +837,12 @@ mp_id.port_id = eth_dev->data->port_id; strlcpy(mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN); /* Receive command fd from primary process */ - err = mlx5_mp_req_verbs_cmd_fd(&mp_id); - if (err < 0) + fd = mlx5_mp_req_verbs_cmd_fd(&mp_id); + if (fd < 0) goto err_secondary; /* Remap UAR for Tx queues. */ - err = mlx5_tx_uar_init_secondary(eth_dev, err); + err = mlx5_tx_uar_init_secondary(eth_dev, fd); + close(fd); if (err) goto err_secondary; /* @@ -1638,6 +1640,9 @@ return eth_dev; error: if (priv) { + priv->sh->port[priv->dev_port - 1].nl_ih_port_id = + RTE_MAX_ETHPORTS; + rte_io_wmb(); if (priv->mreg_cp_tbl) mlx5_hlist_destroy(priv->mreg_cp_tbl); if (priv->sh) diff -Nru dpdk-20.11.6/drivers/net/mlx5/mlx5.c dpdk-20.11.7/drivers/net/mlx5/mlx5.c --- dpdk-20.11.6/drivers/net/mlx5/mlx5.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/mlx5/mlx5.c 2022-12-13 10:50:22.000000000 +0000 @@ -447,22 +447,38 @@ * * @param[in] sh * Pointer to mlx5_dev_ctx_shared object to free + * + * @return + * 0 on success, otherwise negative errno value and rte_errno is set. */ -static void +static int mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh) { int i; + void *pools; + pools = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(struct mlx5_flow_counter_pool *) * + MLX5_COUNTER_POOLS_MAX_NUM, + 0, SOCKET_ID_ANY); + if (!pools) { + DRV_LOG(ERR, + "Counter management allocation was failed."); + rte_errno = ENOMEM; + return -rte_errno; + } memset(&sh->cmng, 0, sizeof(sh->cmng)); TAILQ_INIT(&sh->cmng.flow_counters); sh->cmng.min_id = MLX5_CNT_BATCH_OFFSET; sh->cmng.max_id = -1; sh->cmng.last_pool_idx = POOL_IDX_INVALID; + sh->cmng.pools = pools; rte_spinlock_init(&sh->cmng.pool_update_sl); for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) { TAILQ_INIT(&sh->cmng.counters[i]); rte_spinlock_init(&sh->cmng.csl[i]); } + return 0; } /** @@ -520,8 +536,7 @@ claim_zero (mlx5_flow_os_destroy_flow_action (cnt->action)); - if (fallback && MLX5_POOL_GET_CNT - (pool, j)->dcs_when_free) + if (fallback && cnt->dcs_when_free) claim_zero(mlx5_devx_cmd_destroy (cnt->dcs_when_free)); } @@ -1002,8 +1017,12 @@ err = rte_errno; goto error; } + err = mlx5_flow_counters_mng_init(sh); + if (err) { + DRV_LOG(ERR, "Fail to initialize counters manage."); + goto error; + } mlx5_flow_aging_init(sh); - mlx5_flow_counters_mng_init(sh); mlx5_flow_ipool_create(sh, config); /* Add device to memory callback list. */ rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); @@ -1409,6 +1428,12 @@ DRV_LOG(WARNING, "port %u some flows still remain", dev->data->port_id); mlx5_cache_list_destroy(&priv->hrxqs); + priv->sh->port[priv->dev_port - 1].nl_ih_port_id = RTE_MAX_ETHPORTS; + /* + * The interrupt handler port id must be reset before priv is reset + * since 'mlx5_dev_interrupt_nl_cb' uses priv. + */ + rte_io_wmb(); /* * Free the shared context in last turn, because the cleanup * routines above may use some shared fields, like diff -Nru dpdk-20.11.6/drivers/net/mlx5/mlx5.h dpdk-20.11.7/drivers/net/mlx5/mlx5.h --- dpdk-20.11.6/drivers/net/mlx5/mlx5.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/mlx5/mlx5.h 2022-12-13 10:50:22.000000000 +0000 @@ -280,9 +280,10 @@ uint16_t refcnt; /* Reference count for representors. */ }; +#define MLX5_COUNTER_POOLS_MAX_NUM (1 << 15) #define MLX5_COUNTERS_PER_POOL 512 #define MLX5_MAX_PENDING_QUERIES 4 -#define MLX5_CNT_CONTAINER_RESIZE 64 +#define MLX5_CNT_MR_ALLOC_BULK 64 #define MLX5_CNT_SHARED_OFFSET 0x80000000 #define IS_SHARED_CNT(cnt) (!!((cnt) & MLX5_CNT_SHARED_OFFSET)) #define IS_BATCH_CNT(cnt) (((cnt) & (MLX5_CNT_SHARED_OFFSET - 1)) >= \ @@ -442,7 +443,6 @@ /* Counter global management structure. */ struct mlx5_flow_counter_mng { volatile uint16_t n_valid; /* Number of valid pools. */ - uint16_t n; /* Number of pools. */ uint16_t last_pool_idx; /* Last used pool index */ int min_id; /* The minimum counter ID in the pools. */ int max_id; /* The maximum counter ID in the pools. */ @@ -514,6 +514,7 @@ }; #define MLX5_ASO_AGE_ACTIONS_PER_POOL 512 +#define MLX5_ASO_AGE_CONTAINER_RESIZE 64 struct mlx5_aso_age_pool { struct mlx5_devx_obj *flow_hit_aso_obj; @@ -980,7 +981,7 @@ uint32_t refcnt; /**< Reference counter. */ /**< Verbs modify header action object. */ uint8_t ft_type; /**< Flow table type, Rx or Tx. */ - uint8_t max_lro_msg_size; + uint32_t max_lro_msg_size; /* Tags resources cache. */ uint32_t link_speed_capa; /* Link speed capabilities. */ struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */ diff -Nru dpdk-20.11.6/drivers/net/mlx5/mlx5_devx.c dpdk-20.11.7/drivers/net/mlx5/mlx5_devx.c --- dpdk-20.11.6/drivers/net/mlx5/mlx5_devx.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/mlx5/mlx5_devx.c 2022-12-13 10:50:22.000000000 +0000 @@ -905,7 +905,8 @@ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; if (lro) { tir_attr->lro_timeout_period_usecs = priv->config.lro.timeout; - tir_attr->lro_max_msg_sz = priv->max_lro_msg_size; + tir_attr->lro_max_msg_sz = + priv->max_lro_msg_size / MLX5_LRO_SEG_CHUNK_SIZE; tir_attr->lro_enable_mask = MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; diff -Nru dpdk-20.11.6/drivers/net/mlx5/mlx5_flow.c dpdk-20.11.7/drivers/net/mlx5/mlx5_flow.c --- dpdk-20.11.6/drivers/net/mlx5/mlx5_flow.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/mlx5/mlx5_flow.c 2022-12-13 10:50:22.000000000 +0000 @@ -2217,7 +2217,7 @@ RTE_FLOW_ERROR_TYPE_ITEM, item, "IPv4 cannot follow L2/VLAN layer " "which ether type is not IPv4"); - if (item_flags & MLX5_FLOW_LAYER_TUNNEL) { + if (item_flags & MLX5_FLOW_LAYER_IPIP) { if (mask && spec) next_proto = mask->hdr.next_proto_id & spec->hdr.next_proto_id; @@ -2325,7 +2325,7 @@ "which ether type is not IPv6"); if (mask && mask->hdr.proto == UINT8_MAX && spec) next_proto = spec->hdr.proto; - if (item_flags & MLX5_FLOW_LAYER_TUNNEL) { + if (item_flags & MLX5_FLOW_LAYER_IPIP) { if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -3729,6 +3729,7 @@ int queue_action = 0; int action_n = 0; int split = 0; + int push_vlan = 0; const struct rte_flow_action_queue *queue; const struct rte_flow_action_rss *rss; const struct rte_flow_action_raw_encap *raw_encap; @@ -3737,6 +3738,8 @@ if (!attr->ingress) return 0; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + if (actions->type == RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) + push_vlan = 1; switch (actions->type) { case RTE_FLOW_ACTION_TYPE_QUEUE: queue = actions->conf; @@ -3761,11 +3764,15 @@ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: - case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: split++; action_n++; break; + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: + if (push_vlan) + split++; + action_n++; + break; case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: raw_encap = actions->conf; if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) @@ -4177,19 +4184,32 @@ struct mlx5_rte_flow_item_tag *tag_item; struct rte_flow_item *item; char *addr; + int push_vlan = 0; int encap = 0; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + if (actions->type == RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) + push_vlan = 1; switch (actions->type) { case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: - case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP: rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); actions_tx++; break; + case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: + if (push_vlan) { + rte_memcpy(actions_tx, actions, + sizeof(struct rte_flow_action)); + actions_tx++; + } else { + rte_memcpy(actions_rx, actions, + sizeof(struct rte_flow_action)); + actions_rx++; + } + break; case RTE_FLOW_ACTION_TYPE_COUNT: if (encap) { rte_memcpy(actions_tx, actions, @@ -5478,7 +5498,7 @@ int shared_actions_n = MLX5_MAX_SHARED_ACTIONS; union { struct mlx5_flow_expand_rss buf; - uint8_t buffer[4096]; + uint8_t buffer[8192]; } expand_buffer; union { struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS]; @@ -5691,8 +5711,8 @@ rte_errno = ret; /* Restore rte_errno. */ ret = rte_errno; rte_errno = ret; - mlx5_flow_pop_thread_workspace(); error_before_hairpin_split: + mlx5_flow_pop_thread_workspace(); rte_free(translated_actions); return 0; } @@ -6647,7 +6667,7 @@ { struct mlx5_counter_stats_mem_mng *mem_mng; volatile struct flow_counter_stats *raw_data; - int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES; + int raws_n = MLX5_CNT_MR_ALLOC_BULK + MLX5_MAX_PENDING_QUERIES; int size = (sizeof(struct flow_counter_stats) * MLX5_COUNTERS_PER_POOL + sizeof(struct mlx5_counter_stats_raw)) * raws_n + @@ -6685,7 +6705,7 @@ } for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i) LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, - mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i, + mem_mng->raws + MLX5_CNT_MR_ALLOC_BULK + i, next); LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next); sh->cmng.mem_mng = mem_mng; @@ -6709,14 +6729,13 @@ { struct mlx5_flow_counter_mng *cmng = &sh->cmng; /* Resize statistic memory once used out. */ - if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) && + if (!(pool->index % MLX5_CNT_MR_ALLOC_BULK) && mlx5_flow_create_counter_stat_mem_mng(sh)) { DRV_LOG(ERR, "Cannot resize counter stat mem."); return -1; } rte_spinlock_lock(&pool->sl); - pool->raw = cmng->mem_mng->raws + pool->index % - MLX5_CNT_CONTAINER_RESIZE; + pool->raw = cmng->mem_mng->raws + pool->index % MLX5_CNT_MR_ALLOC_BULK; rte_spinlock_unlock(&pool->sl); pool->raw_hw = NULL; return 0; @@ -6758,13 +6777,13 @@ mlx5_flow_query_alarm(void *arg) { struct mlx5_dev_ctx_shared *sh = arg; - int ret; - uint16_t pool_index = sh->cmng.pool_index; struct mlx5_flow_counter_mng *cmng = &sh->cmng; + uint16_t pool_index = cmng->pool_index; struct mlx5_flow_counter_pool *pool; uint16_t n_valid; + int ret; - if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES) + if (cmng->pending_queries >= MLX5_MAX_PENDING_QUERIES) goto set_alarm; rte_spinlock_lock(&cmng->pool_update_sl); pool = cmng->pools[pool_index]; @@ -6776,8 +6795,7 @@ if (pool->raw_hw) /* There is a pool query in progress. */ goto set_alarm; - pool->raw_hw = - LIST_FIRST(&sh->cmng.free_stat_raws); + pool->raw_hw = LIST_FIRST(&cmng->free_stat_raws); if (!pool->raw_hw) /* No free counter statistics raw memory. */ goto set_alarm; @@ -6803,12 +6821,12 @@ goto set_alarm; } LIST_REMOVE(pool->raw_hw, next); - sh->cmng.pending_queries++; + cmng->pending_queries++; pool_index++; if (pool_index >= n_valid) pool_index = 0; set_alarm: - sh->cmng.pool_index = pool_index; + cmng->pool_index = pool_index; mlx5_set_query_alarm(sh); } diff -Nru dpdk-20.11.6/drivers/net/mlx5/mlx5_flow.h dpdk-20.11.7/drivers/net/mlx5/mlx5_flow.h --- dpdk-20.11.6/drivers/net/mlx5/mlx5_flow.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/mlx5/mlx5_flow.h 2022-12-13 10:50:22.000000000 +0000 @@ -217,6 +217,9 @@ #define MLX5_FLOW_ACTION_TUNNEL_SET (1ull << 37) #define MLX5_FLOW_ACTION_TUNNEL_MATCH (1ull << 38) +#define MLX5_FLOW_DROP_INCLUSIVE_ACTIONS \ + (MLX5_FLOW_ACTION_COUNT | MLX5_FLOW_ACTION_SAMPLE | MLX5_FLOW_ACTION_AGE) + #define MLX5_FLOW_FATE_ACTIONS \ (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \ MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP | \ diff -Nru dpdk-20.11.6/drivers/net/mlx5/mlx5_flow_dv.c dpdk-20.11.7/drivers/net/mlx5/mlx5_flow_dv.c --- dpdk-20.11.6/drivers/net/mlx5/mlx5_flow_dv.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/mlx5/mlx5_flow_dv.c 2022-12-13 10:50:22.000000000 +0000 @@ -104,6 +104,7 @@ struct mlx5_flow *dev_flow, bool tunnel_decap) { uint64_t layers = dev_flow->handle->layers; + bool tunnel_match = false; /* * If layers is already initialized, it means this dev_flow is the @@ -112,6 +113,13 @@ * have the user defined items as the flow is split. */ if (layers) { + if (tunnel_decap) { + /* + * If decap action before modify, it means the driver + * should take the inner as outer for the modify actions. + */ + layers = ((layers >> 6) & MLX5_FLOW_LAYER_OUTER); + } if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4) attr->ipv4 = 1; else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6) @@ -133,8 +141,10 @@ case RTE_FLOW_ITEM_TYPE_GENEVE: case RTE_FLOW_ITEM_TYPE_MPLS: case RTE_FLOW_ITEM_TYPE_GTP: - if (tunnel_decap) + if (tunnel_decap) { attr->attr = 0; + tunnel_match = true; + } break; case RTE_FLOW_ITEM_TYPE_IPV4: if (!attr->ipv6) @@ -148,7 +158,8 @@ ((const struct rte_flow_item_ipv4 *) (item->mask))->hdr.next_proto_id; if ((next_protocol == IPPROTO_IPIP || - next_protocol == IPPROTO_IPV6) && tunnel_decap) + next_protocol == IPPROTO_IPV6) && tunnel_decap && + !tunnel_match) attr->attr = 0; break; case RTE_FLOW_ITEM_TYPE_IPV6: @@ -163,7 +174,8 @@ ((const struct rte_flow_item_ipv6 *) (item->mask))->hdr.proto; if ((next_protocol == IPPROTO_IPIP || - next_protocol == IPPROTO_IPV6) && tunnel_decap) + next_protocol == IPPROTO_IPV6) && tunnel_decap && + !tunnel_match) attr->attr = 0; break; case RTE_FLOW_ITEM_TYPE_UDP: @@ -4638,7 +4650,7 @@ /* Decrease to original index and clear shared bit. */ idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1); - MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n); + MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < MLX5_COUNTER_POOLS_MAX_NUM); pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL]; MLX5_ASSERT(pool); if (ppool) @@ -4715,39 +4727,6 @@ } /** - * Resize a counter container. - * - * @param[in] dev - * Pointer to the Ethernet device structure. - * - * @return - * 0 on success, otherwise negative errno value and rte_errno is set. - */ -static int -flow_dv_container_resize(struct rte_eth_dev *dev) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; - void *old_pools = cmng->pools; - uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE; - uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize; - void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY); - - if (!pools) { - rte_errno = ENOMEM; - return -ENOMEM; - } - if (old_pools) - memcpy(pools, old_pools, cmng->n * - sizeof(struct mlx5_flow_counter_pool *)); - cmng->n = resize; - cmng->pools = pools; - if (old_pools) - mlx5_free(old_pools); - return 0; -} - -/** * Query a devx flow counter. * * @param[in] dev @@ -4798,8 +4777,6 @@ * The devX counter handle. * @param[in] age * Whether the pool is for counter that was allocated for aging. - * @param[in/out] cont_cur - * Pointer to the container pointer, it will be update in pool resize. * * @return * The pool container pointer on success, NULL otherwise and rte_errno is set. @@ -4811,9 +4788,14 @@ struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter_pool *pool; struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; - bool fallback = priv->sh->cmng.counter_fallback; + bool fallback = cmng->counter_fallback; uint32_t size = sizeof(*pool); + if (cmng->n_valid == MLX5_COUNTER_POOLS_MAX_NUM) { + DRV_LOG(ERR, "All counter is in used, try again later."); + rte_errno = EAGAIN; + return NULL; + } size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE; size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE); pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY); @@ -4832,11 +4814,6 @@ pool->time_of_last_age_check = MLX5_CURR_TIME_SEC; rte_spinlock_lock(&cmng->pool_update_sl); pool->index = cmng->n_valid; - if (pool->index == cmng->n && flow_dv_container_resize(dev)) { - mlx5_free(pool); - rte_spinlock_unlock(&cmng->pool_update_sl); - return NULL; - } cmng->pools[pool->index] = pool; cmng->n_valid++; if (unlikely(fallback)) { @@ -6178,18 +6155,18 @@ /* * Validate the drop action mutual exclusion with other actions. * Drop action is mutually-exclusive with any other action, except for - * Count action. + * Count/Sample/Age actions. * Drop action compatibility with tunnel offload was already validated. */ if (action_flags & (MLX5_FLOW_ACTION_TUNNEL_MATCH | MLX5_FLOW_ACTION_TUNNEL_MATCH)); else if ((action_flags & MLX5_FLOW_ACTION_DROP) && - (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT))) + (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_DROP_INCLUSIVE_ACTIONS))) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Drop action is mutually-exclusive " "with any other action, except for " - "Count action"); + "Count/Sample/Age action"); /* Eswitch has few restrictions on using items and actions */ if (attr->transfer) { if (!mlx5_flow_ext_mreg_supported(dev) && @@ -9521,7 +9498,7 @@ } /** - * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools. + * Resize the ASO age pools array by MLX5_ASO_AGE_CONTAINER_RESIZE pools. * * @param[in] dev * Pointer to the Ethernet device structure. @@ -9535,7 +9512,7 @@ struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng; void *old_pools = mng->pools; - uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE; + uint32_t resize = mng->n + MLX5_ASO_AGE_CONTAINER_RESIZE; uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize; void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY); diff -Nru dpdk-20.11.6/drivers/net/mlx5/mlx5_flow_meter.c dpdk-20.11.7/drivers/net/mlx5/mlx5_flow_meter.c --- dpdk-20.11.6/drivers/net/mlx5/mlx5_flow_meter.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/mlx5/mlx5_flow_meter.c 2022-12-13 10:50:22.000000000 +0000 @@ -942,7 +942,7 @@ fm->profile = fmp; /* Update meter params in HW (if not disabled). */ if (fm->active_state == MLX5_FLOW_METER_DISABLE) - return 0; + goto dec_ref_cnt; ret = mlx5_flow_meter_action_modify(priv, fm, &fm->profile->srtcm_prm, modify_bits, fm->active_state); if (ret) { @@ -952,6 +952,7 @@ NULL, "Failed to update meter" " parameters in hardware."); } +dec_ref_cnt: old_fmp->ref_cnt--; fmp->ref_cnt++; return 0; diff -Nru dpdk-20.11.6/drivers/net/mlx5/mlx5_flow_verbs.c dpdk-20.11.7/drivers/net/mlx5/mlx5_flow_verbs.c --- dpdk-20.11.6/drivers/net/mlx5/mlx5_flow_verbs.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/mlx5/mlx5_flow_verbs.c 2022-12-13 10:50:22.000000000 +0000 @@ -274,27 +274,14 @@ break; } if (!cnt) { - struct mlx5_flow_counter_pool **pools; uint32_t size; - if (n_valid == cmng->n) { - /* Resize the container pool array. */ - size = sizeof(struct mlx5_flow_counter_pool *) * - (n_valid + MLX5_CNT_CONTAINER_RESIZE); - pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0, - SOCKET_ID_ANY); - if (!pools) - return 0; - if (n_valid) { - memcpy(pools, cmng->pools, - sizeof(struct mlx5_flow_counter_pool *) * - n_valid); - mlx5_free(cmng->pools); - } - cmng->pools = pools; - cmng->n += MLX5_CNT_CONTAINER_RESIZE; + if (n_valid == MLX5_COUNTER_POOLS_MAX_NUM) { + DRV_LOG(ERR, "All counter is in used, try again later."); + rte_errno = EAGAIN; + return 0; } - /* Allocate memory for new pool*/ + /* Allocate memory for new pool */ size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL; pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY); if (!pool) diff -Nru dpdk-20.11.6/drivers/net/mlx5/mlx5_rxq.c dpdk-20.11.7/drivers/net/mlx5/mlx5_rxq.c --- dpdk-20.11.6/drivers/net/mlx5/mlx5_rxq.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/mlx5/mlx5_rxq.c 2022-12-13 10:50:22.000000000 +0000 @@ -1378,8 +1378,6 @@ MLX5_MAX_TCP_HDR_OFFSET) max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET; max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE); - MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE); - max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE; if (priv->max_lro_msg_size) priv->max_lro_msg_size = RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size); @@ -1387,8 +1385,7 @@ priv->max_lro_msg_size = max_lro_size; DRV_LOG(DEBUG, "port %u Rx Queue %u max LRO message size adjusted to %u bytes", - dev->data->port_id, idx, - priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE); + dev->data->port_id, idx, priv->max_lro_msg_size); } /** diff -Nru dpdk-20.11.6/drivers/net/mlx5/mlx5_rxtx.c dpdk-20.11.7/drivers/net/mlx5/mlx5_rxtx.c --- dpdk-20.11.6/drivers/net/mlx5/mlx5_rxtx.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/mlx5/mlx5_rxtx.c 2022-12-13 10:50:22.000000000 +0000 @@ -3152,6 +3152,9 @@ * Pointer to TX queue structure. * @param loc * Pointer to burst routine local context. + * @param elts + * Number of free elements in elts buffer to be checked, for zero + * value the check is optimized out by compiler. * @param olx * Configured Tx offloads mask. It is fully defined at * compile time and may be used for optimization. @@ -3165,6 +3168,7 @@ static __rte_always_inline enum mlx5_txcmp_code mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq, struct mlx5_txq_local *restrict loc, + uint16_t elts, unsigned int olx) { if (MLX5_TXOFF_CONFIG(TXPP) && @@ -3179,7 +3183,7 @@ * to the queue and we won't get the orphan WAIT WQE. */ if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE || - loc->elts_free < NB_SEGS(loc->mbuf)) + loc->elts_free < elts) return MLX5_TXCMP_CODE_EXIT; /* Convert the timestamp into completion to wait. */ ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *); @@ -3226,11 +3230,12 @@ struct mlx5_wqe *__rte_restrict wqe; unsigned int ds, dlen, inlen, ntcp, vlan = 0; + MLX5_ASSERT(loc->elts_free >= NB_SEGS(loc->mbuf)); if (MLX5_TXOFF_CONFIG(TXPP)) { enum mlx5_txcmp_code wret; /* Generate WAIT for scheduling if requested. */ - wret = mlx5_tx_schedule_send(txq, loc, olx); + wret = mlx5_tx_schedule_send(txq, loc, 0, olx); if (wret == MLX5_TXCMP_CODE_EXIT) return MLX5_TXCMP_CODE_EXIT; if (wret == MLX5_TXCMP_CODE_ERROR) @@ -3269,7 +3274,7 @@ if (unlikely(loc->wqe_free < ((ds + 3) / 4))) return MLX5_TXCMP_CODE_EXIT; /* Check for maximal WQE size. */ - if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4))) + if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ds)) return MLX5_TXCMP_CODE_ERROR; #ifdef MLX5_PMD_SOFT_COUNTERS /* Update sent data bytes/packets counters. */ @@ -3326,11 +3331,12 @@ unsigned int ds, nseg; MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1); + MLX5_ASSERT(loc->elts_free >= NB_SEGS(loc->mbuf)); if (MLX5_TXOFF_CONFIG(TXPP)) { enum mlx5_txcmp_code wret; /* Generate WAIT for scheduling if requested. */ - wret = mlx5_tx_schedule_send(txq, loc, olx); + wret = mlx5_tx_schedule_send(txq, loc, 0, olx); if (wret == MLX5_TXCMP_CODE_EXIT) return MLX5_TXCMP_CODE_EXIT; if (wret == MLX5_TXCMP_CODE_ERROR) @@ -3346,7 +3352,7 @@ if (unlikely(loc->wqe_free < ((ds + 3) / 4))) return MLX5_TXCMP_CODE_EXIT; /* Check for maximal WQE size. */ - if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4))) + if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ds)) return MLX5_TXCMP_CODE_ERROR; /* * Some Tx offloads may cause an error if @@ -3444,16 +3450,7 @@ MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE)); MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1); - if (MLX5_TXOFF_CONFIG(TXPP)) { - enum mlx5_txcmp_code wret; - - /* Generate WAIT for scheduling if requested. */ - wret = mlx5_tx_schedule_send(txq, loc, olx); - if (wret == MLX5_TXCMP_CODE_EXIT) - return MLX5_TXCMP_CODE_EXIT; - if (wret == MLX5_TXCMP_CODE_ERROR) - return MLX5_TXCMP_CODE_ERROR; - } + MLX5_ASSERT(loc->elts_free >= NB_SEGS(loc->mbuf)); /* * First calculate data length to be inlined * to estimate the required space for WQE. @@ -3493,6 +3490,8 @@ } else if (mbuf->ol_flags & PKT_TX_DYNF_NOINLINE || nxlen > txq->inlen_send) { return mlx5_tx_packet_multi_send(txq, loc, olx); + } else if (nxlen <= MLX5_ESEG_MIN_INLINE_SIZE) { + inlen = MLX5_ESEG_MIN_INLINE_SIZE; } else { goto do_first; } @@ -3560,6 +3559,16 @@ * supposing no any mbufs is being freed during inlining. */ do_build: + if (MLX5_TXOFF_CONFIG(TXPP)) { + enum mlx5_txcmp_code wret; + + /* Generate WAIT for scheduling if requested. */ + wret = mlx5_tx_schedule_send(txq, loc, 0, olx); + if (wret == MLX5_TXCMP_CODE_EXIT) + return MLX5_TXCMP_CODE_EXIT; + if (wret == MLX5_TXCMP_CODE_ERROR) + return MLX5_TXCMP_CODE_ERROR; + } MLX5_ASSERT(inlen <= txq->inlen_send); ds = NB_SEGS(loc->mbuf) + 2 + (inlen - MLX5_ESEG_MIN_INLINE_SIZE + @@ -3568,8 +3577,24 @@ if (unlikely(loc->wqe_free < ((ds + 3) / 4))) return MLX5_TXCMP_CODE_EXIT; /* Check for maximal WQE size. */ - if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ds)) - return MLX5_TXCMP_CODE_ERROR; + if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ds)) { + /* Check if we can adjust the inline length. */ + if (unlikely(txq->inlen_mode)) { + ds = NB_SEGS(loc->mbuf) + 2 + + (txq->inlen_mode - + MLX5_ESEG_MIN_INLINE_SIZE + + MLX5_WSEG_SIZE + + MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE; + if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ds)) + return MLX5_TXCMP_CODE_ERROR; + } + /* We have lucky opportunity to adjust. */ + inlen = RTE_MIN(inlen, MLX5_WQE_SIZE_MAX - + MLX5_WSEG_SIZE * 2 - + MLX5_WSEG_SIZE * NB_SEGS(loc->mbuf) - + MLX5_WSEG_SIZE + + MLX5_ESEG_MIN_INLINE_SIZE); + } #ifdef MLX5_PMD_SOFT_COUNTERS /* Update sent data bytes/packets counters. */ txq->stats.obytes += dlen + vlan; @@ -3723,7 +3748,7 @@ enum mlx5_txcmp_code wret; /* Generate WAIT for scheduling if requested. */ - wret = mlx5_tx_schedule_send(txq, loc, olx); + wret = mlx5_tx_schedule_send(txq, loc, 1, olx); if (wret == MLX5_TXCMP_CODE_EXIT) return MLX5_TXCMP_CODE_EXIT; if (wret == MLX5_TXCMP_CODE_ERROR) @@ -4109,16 +4134,6 @@ next_empw: MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); - if (MLX5_TXOFF_CONFIG(TXPP)) { - enum mlx5_txcmp_code wret; - - /* Generate WAIT for scheduling if requested. */ - wret = mlx5_tx_schedule_send(txq, loc, olx); - if (wret == MLX5_TXCMP_CODE_EXIT) - return MLX5_TXCMP_CODE_EXIT; - if (wret == MLX5_TXCMP_CODE_ERROR) - return MLX5_TXCMP_CODE_ERROR; - } part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ? MLX5_MPW_MAX_PACKETS : MLX5_EMPW_MAX_PACKETS); @@ -4129,6 +4144,16 @@ /* But we still able to send at least minimal eMPW. */ part = loc->elts_free; } + if (MLX5_TXOFF_CONFIG(TXPP)) { + enum mlx5_txcmp_code wret; + + /* Generate WAIT for scheduling if requested. */ + wret = mlx5_tx_schedule_send(txq, loc, 0, olx); + if (wret == MLX5_TXCMP_CODE_EXIT) + return MLX5_TXCMP_CODE_EXIT; + if (wret == MLX5_TXCMP_CODE_ERROR) + return MLX5_TXCMP_CODE_ERROR; + } /* Check whether we have enough WQEs */ if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) { if (unlikely(loc->wqe_free < @@ -4285,23 +4310,23 @@ unsigned int slen = 0; MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); + /* + * Limits the amount of packets in one WQE + * to improve CQE latency generation. + */ + nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ? + MLX5_MPW_INLINE_MAX_PACKETS : + MLX5_EMPW_MAX_PACKETS); if (MLX5_TXOFF_CONFIG(TXPP)) { enum mlx5_txcmp_code wret; /* Generate WAIT for scheduling if requested. */ - wret = mlx5_tx_schedule_send(txq, loc, olx); + wret = mlx5_tx_schedule_send(txq, loc, nlim, olx); if (wret == MLX5_TXCMP_CODE_EXIT) return MLX5_TXCMP_CODE_EXIT; if (wret == MLX5_TXCMP_CODE_ERROR) return MLX5_TXCMP_CODE_ERROR; } - /* - * Limits the amount of packets in one WQE - * to improve CQE latency generation. - */ - nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ? - MLX5_MPW_INLINE_MAX_PACKETS : - MLX5_EMPW_MAX_PACKETS); /* Check whether we have minimal amount WQEs */ if (unlikely(loc->wqe_free < ((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4))) @@ -4570,11 +4595,12 @@ enum mlx5_txcmp_code ret; MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1); + MLX5_ASSERT(loc->elts_free); if (MLX5_TXOFF_CONFIG(TXPP)) { enum mlx5_txcmp_code wret; /* Generate WAIT for scheduling if requested. */ - wret = mlx5_tx_schedule_send(txq, loc, olx); + wret = mlx5_tx_schedule_send(txq, loc, 0, olx); if (wret == MLX5_TXCMP_CODE_EXIT) return MLX5_TXCMP_CODE_EXIT; if (wret == MLX5_TXCMP_CODE_ERROR) @@ -4814,7 +4840,9 @@ * if no inlining is configured, this is done * by calling routine in a batch copy. */ - MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE)); + if (MLX5_TXOFF_CONFIG(INLINE)) + txq->elts[txq->elts_head++ & txq->elts_m] = + loc->mbuf; --loc->elts_free; #ifdef MLX5_PMD_SOFT_COUNTERS /* Update sent data bytes counter. */ diff -Nru dpdk-20.11.6/drivers/net/mlx5/mlx5_trigger.c dpdk-20.11.7/drivers/net/mlx5/mlx5_trigger.c --- dpdk-20.11.6/drivers/net/mlx5/mlx5_trigger.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/mlx5/mlx5_trigger.c 2022-12-13 10:50:22.000000000 +0000 @@ -1070,6 +1070,22 @@ else rte_net_mlx5_dynf_inline_mask = 0; if (dev->data->nb_rx_queues > 0) { + uint32_t max_lro_msg_size = priv->max_lro_msg_size; + + if (max_lro_msg_size < MLX5_LRO_SEG_CHUNK_SIZE) { + uint32_t i; + struct mlx5_rxq_ctrl *rxq_ctrl; + + for (i = 0; i != priv->rxqs_n; ++i) { + rxq_ctrl = mlx5_rxq_get(dev, i); + if (rxq_ctrl && rxq_ctrl->rxq.lro) { + DRV_LOG(ERR, "port %u invalid max LRO size", + dev->data->port_id); + rte_errno = EINVAL; + return -rte_errno; + } + } + } ret = mlx5_dev_configure_rss_reta(dev); if (ret) { DRV_LOG(ERR, "port %u reta config failed: %s", diff -Nru dpdk-20.11.6/drivers/net/mvneta/mvneta_rxtx.c dpdk-20.11.7/drivers/net/mvneta/mvneta_rxtx.c --- dpdk-20.11.6/drivers/net/mvneta/mvneta_rxtx.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/mvneta/mvneta_rxtx.c 2022-12-13 10:50:22.000000000 +0000 @@ -79,6 +79,10 @@ int i, ret; uint16_t nb_desc = *num; + /* To prevent GCC-12 warning. */ + if (unlikely(nb_desc == 0)) + return -1; + ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, nb_desc); if (ret) { MVNETA_LOG(ERR, "Failed to allocate %u mbufs.", nb_desc); diff -Nru dpdk-20.11.6/drivers/net/nfp/nfp_net.c dpdk-20.11.7/drivers/net/nfp/nfp_net.c --- dpdk-20.11.6/drivers/net/nfp/nfp_net.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/nfp/nfp_net.c 2022-12-13 10:50:22.000000000 +0000 @@ -2041,8 +2041,9 @@ struct rte_mbuf *new_mb; uint16_t nb_hold; uint64_t dma_addr; - int avail; + uint16_t avail; + avail = 0; rxq = rx_queue; if (unlikely(rxq == NULL)) { /* @@ -2050,11 +2051,10 @@ * enabled. But the queue needs to be configured */ RTE_LOG_DP(ERR, PMD, "RX Bad queue\n"); - return -EINVAL; + return avail; } hw = rxq->hw; - avail = 0; nb_hold = 0; while (avail < nb_pkts) { @@ -2087,8 +2087,6 @@ break; } - nb_hold++; - /* * Grab the mbuf and refill the descriptor with the * previously allocated mbuf @@ -2120,7 +2118,8 @@ hw->rx_offset, rxq->mbuf_size - hw->rx_offset, mb->data_len); - return -EINVAL; + rte_pktmbuf_free(mb); + break; } /* Filling the received mbuf with packet info */ @@ -2158,6 +2157,7 @@ rxds->fld.dd = 0; rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff; rxds->fld.dma_addr_lo = dma_addr & 0xffffffff; + nb_hold++; rxq->rd_p++; if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/ diff -Nru dpdk-20.11.6/drivers/net/nfp/nfpcore/nfp_hwinfo.c dpdk-20.11.7/drivers/net/nfp/nfpcore/nfp_hwinfo.c --- dpdk-20.11.6/drivers/net/nfp/nfpcore/nfp_hwinfo.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/nfp/nfpcore/nfp_hwinfo.c 2022-12-13 10:50:22.000000000 +0000 @@ -108,7 +108,7 @@ goto exit_free; header = (void *)db; - printf("NFP HWINFO header: %08x\n", *(uint32_t *)header); + printf("NFP HWINFO header: %#08x\n", *(uint32_t *)header); if (nfp_hwinfo_is_updating(header)) goto exit_free; diff -Nru dpdk-20.11.6/drivers/net/qede/base/ecore_init_fw_funcs.c dpdk-20.11.7/drivers/net/qede/base/ecore_init_fw_funcs.c --- dpdk-20.11.6/drivers/net/qede/base/ecore_init_fw_funcs.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/qede/base/ecore_init_fw_funcs.c 2022-12-13 10:50:22.000000000 +0000 @@ -1416,7 +1416,7 @@ u32 i; \ for (i = 0; i < (arr_size); i++) \ ecore_wr(dev, ptt, ((addr) + (4 * i)), \ - ((u32 *)&(arr))[i]); \ + ((u32 *)(arr))[i]); \ } while (0) #ifndef DWORDS_TO_BYTES diff -Nru dpdk-20.11.6/drivers/net/qede/base/ecore_int.c dpdk-20.11.7/drivers/net/qede/base/ecore_int.c --- dpdk-20.11.6/drivers/net/qede/base/ecore_int.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/qede/base/ecore_int.c 2022-12-13 10:50:22.000000000 +0000 @@ -366,7 +366,7 @@ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID) - DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp); + DP_NOTICE(p_hwfn, false, "ICPL error - %08x\n", tmp); tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) { @@ -378,7 +378,7 @@ PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); DP_NOTICE(p_hwfn, false, - "ICPL erorr - %08x [Address %08x:%08x]\n", + "ICPL error - %08x [Address %08x:%08x]\n", tmp, addr_hi, addr_lo); } diff -Nru dpdk-20.11.6/drivers/net/qede/qede_rxtx.c dpdk-20.11.7/drivers/net/qede/qede_rxtx.c --- dpdk-20.11.6/drivers/net/qede/qede_rxtx.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/qede/qede_rxtx.c 2022-12-13 10:50:22.000000000 +0000 @@ -714,9 +714,10 @@ { uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); - struct eth_rx_prod_data rx_prods = { 0 }; + struct eth_rx_prod_data rx_prods; /* Update producers */ + memset(&rx_prods, 0, sizeof(rx_prods)); rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod); rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod); diff -Nru dpdk-20.11.6/drivers/net/tap/tap_flow.c dpdk-20.11.7/drivers/net/tap/tap_flow.c --- dpdk-20.11.6/drivers/net/tap/tap_flow.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/tap/tap_flow.c 2022-12-13 10:50:22.000000000 +0000 @@ -1684,7 +1684,7 @@ struct rte_flow_item *items = implicit_rte_flows[idx].items; struct rte_flow_attr *attr = &implicit_rte_flows[idx].attr; struct rte_flow_item_eth eth_local = { .type = 0 }; - uint16_t if_index = pmd->remote_if_index; + unsigned int if_index = pmd->remote_if_index; struct rte_flow *remote_flow = NULL; struct nlmsg *msg = NULL; int err = 0; diff -Nru dpdk-20.11.6/drivers/net/tap/tap_tcmsgs.c dpdk-20.11.7/drivers/net/tap/tap_tcmsgs.c --- dpdk-20.11.6/drivers/net/tap/tap_tcmsgs.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/tap/tap_tcmsgs.c 2022-12-13 10:50:22.000000000 +0000 @@ -19,7 +19,7 @@ struct list_args { int nlsk_fd; - uint16_t ifindex; + unsigned int ifindex; void *custom_arg; }; @@ -42,7 +42,7 @@ * Overrides the default netlink flags for this msg with those specified. */ void -tc_init_msg(struct nlmsg *msg, uint16_t ifindex, uint16_t type, uint16_t flags) +tc_init_msg(struct nlmsg *msg, unsigned int ifindex, uint16_t type, uint16_t flags) { struct nlmsghdr *n = &msg->nh; @@ -70,7 +70,7 @@ * 0 on success, -1 otherwise with errno set. */ static int -qdisc_del(int nlsk_fd, uint16_t ifindex, struct qdisc *qinfo) +qdisc_del(int nlsk_fd, unsigned int ifindex, struct qdisc *qinfo) { struct nlmsg msg; int fd = 0; @@ -114,7 +114,7 @@ * 0 on success, -1 otherwise with errno set. */ int -qdisc_add_multiq(int nlsk_fd, uint16_t ifindex) +qdisc_add_multiq(int nlsk_fd, unsigned int ifindex) { struct tc_multiq_qopt opt = {0}; struct nlmsg msg; @@ -144,7 +144,7 @@ * 0 on success, -1 otherwise with errno set. */ int -qdisc_add_ingress(int nlsk_fd, uint16_t ifindex) +qdisc_add_ingress(int nlsk_fd, unsigned int ifindex) { struct nlmsg msg; @@ -208,7 +208,7 @@ * 0 on success, -1 otherwise with errno set. */ static int -qdisc_iterate(int nlsk_fd, uint16_t ifindex, +qdisc_iterate(int nlsk_fd, unsigned int ifindex, int (*callback)(struct nlmsghdr *, void *), void *arg) { struct nlmsg msg; @@ -238,7 +238,7 @@ * 0 on success, -1 otherwise with errno set. */ int -qdisc_flush(int nlsk_fd, uint16_t ifindex) +qdisc_flush(int nlsk_fd, unsigned int ifindex) { return qdisc_iterate(nlsk_fd, ifindex, qdisc_del_cb, NULL); } @@ -256,7 +256,7 @@ * Return -1 otherwise. */ int -qdisc_create_multiq(int nlsk_fd, uint16_t ifindex) +qdisc_create_multiq(int nlsk_fd, unsigned int ifindex) { int err = 0; @@ -282,7 +282,7 @@ * Return -1 otherwise. */ int -qdisc_create_ingress(int nlsk_fd, uint16_t ifindex) +qdisc_create_ingress(int nlsk_fd, unsigned int ifindex) { int err = 0; diff -Nru dpdk-20.11.6/drivers/net/tap/tap_tcmsgs.h dpdk-20.11.7/drivers/net/tap/tap_tcmsgs.h --- dpdk-20.11.6/drivers/net/tap/tap_tcmsgs.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/tap/tap_tcmsgs.h 2022-12-13 10:50:22.000000000 +0000 @@ -24,14 +24,14 @@ #define MULTIQ_MAJOR_HANDLE (1 << 16) -void tc_init_msg(struct nlmsg *msg, uint16_t ifindex, uint16_t type, +void tc_init_msg(struct nlmsg *msg, unsigned int ifindex, uint16_t type, uint16_t flags); -int qdisc_list(int nlsk_fd, uint16_t ifindex); -int qdisc_flush(int nlsk_fd, uint16_t ifindex); -int qdisc_create_ingress(int nlsk_fd, uint16_t ifindex); -int qdisc_create_multiq(int nlsk_fd, uint16_t ifindex); -int qdisc_add_ingress(int nlsk_fd, uint16_t ifindex); -int qdisc_add_multiq(int nlsk_fd, uint16_t ifindex); -int filter_list_ingress(int nlsk_fd, uint16_t ifindex); +int qdisc_list(int nlsk_fd, unsigned int ifindex); +int qdisc_flush(int nlsk_fd, unsigned int ifindex); +int qdisc_create_ingress(int nlsk_fd, unsigned int ifindex); +int qdisc_create_multiq(int nlsk_fd, unsigned int ifindex); +int qdisc_add_ingress(int nlsk_fd, unsigned int ifindex); +int qdisc_add_multiq(int nlsk_fd, unsigned int ifindex); +int filter_list_ingress(int nlsk_fd, unsigned int ifindex); #endif /* _TAP_TCMSGS_H_ */ diff -Nru dpdk-20.11.6/drivers/net/txgbe/base/txgbe_eeprom.c dpdk-20.11.7/drivers/net/txgbe/base/txgbe_eeprom.c --- dpdk-20.11.6/drivers/net/txgbe/base/txgbe_eeprom.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/txgbe/base/txgbe_eeprom.c 2022-12-13 10:50:22.000000000 +0000 @@ -110,37 +110,6 @@ status = 0; } - /* Now get the semaphore between SW/FW through the SWESMBI bit */ - if (status == 0) { - for (i = 0; i < timeout; i++) { - /* Set the SW EEPROM semaphore bit to request access */ - wr32m(hw, TXGBE_MNGSWSYNC, - TXGBE_MNGSWSYNC_REQ, TXGBE_MNGSWSYNC_REQ); - - /* - * If we set the bit successfully then we got the - * semaphore. - */ - swsm = rd32(hw, TXGBE_MNGSWSYNC); - if (swsm & TXGBE_MNGSWSYNC_REQ) - break; - - usec_delay(50); - } - - /* - * Release semaphores and return error if SW EEPROM semaphore - * was not granted because we don't have access to the EEPROM - */ - if (i >= timeout) { - DEBUGOUT("SWESMBI Software EEPROM semaphore not granted."); - txgbe_release_eeprom_semaphore(hw); - status = TXGBE_ERR_EEPROM; - } - } else { - DEBUGOUT("Software semaphore SMBI between device drivers not granted."); - } - return status; } @@ -152,7 +121,6 @@ **/ void txgbe_release_eeprom_semaphore(struct txgbe_hw *hw) { - wr32m(hw, TXGBE_MNGSWSYNC, TXGBE_MNGSWSYNC_REQ, 0); wr32m(hw, TXGBE_SWSEM, TXGBE_SWSEM_PF, 0); txgbe_flush(hw); } diff -Nru dpdk-20.11.6/drivers/net/txgbe/base/txgbe_type.h dpdk-20.11.7/drivers/net/txgbe/base/txgbe_type.h --- dpdk-20.11.6/drivers/net/txgbe/base/txgbe_type.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/txgbe/base/txgbe_type.h 2022-12-13 10:50:22.000000000 +0000 @@ -285,9 +285,9 @@ u64 tx_management_packets; u64 rx_management_dropped; u64 rx_dma_drop; - u64 rx_drop_packets; /* Basic Error */ + u64 rx_rdb_drop; u64 rx_crc_errors; u64 rx_illegal_byte_errors; u64 rx_error_bytes; @@ -295,7 +295,7 @@ u64 rx_length_errors; u64 rx_undersize_errors; u64 rx_fragment_errors; - u64 rx_oversize_errors; + u64 rx_oversize_cnt; u64 rx_jabber_errors; u64 rx_l3_l4_xsum_error; u64 mac_local_errors; diff -Nru dpdk-20.11.6/drivers/net/txgbe/txgbe_ethdev.c dpdk-20.11.7/drivers/net/txgbe/txgbe_ethdev.c --- dpdk-20.11.6/drivers/net/txgbe/txgbe_ethdev.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/txgbe/txgbe_ethdev.c 2022-12-13 10:50:22.000000000 +0000 @@ -173,8 +173,10 @@ HW_XSTAT(rx_management_packets), HW_XSTAT(tx_management_packets), HW_XSTAT(rx_management_dropped), + HW_XSTAT(rx_dma_drop), /* Basic Error */ + HW_XSTAT(rx_rdb_drop), HW_XSTAT(rx_crc_errors), HW_XSTAT(rx_illegal_byte_errors), HW_XSTAT(rx_error_bytes), @@ -182,7 +184,7 @@ HW_XSTAT(rx_length_errors), HW_XSTAT(rx_undersize_errors), HW_XSTAT(rx_fragment_errors), - HW_XSTAT(rx_oversize_errors), + HW_XSTAT(rx_oversize_cnt), HW_XSTAT(rx_jabber_errors), HW_XSTAT(rx_l3_l4_xsum_error), HW_XSTAT(mac_local_errors), @@ -1910,7 +1912,7 @@ hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL); hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL); hw_stats->rx_dma_drop += rd32(hw, TXGBE_DMARXDROP); - hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP); + hw_stats->rx_rdb_drop += rd32(hw, TXGBE_PBRXDROP); /* MAC Stats */ hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL); @@ -1942,7 +1944,7 @@ rd64(hw, TXGBE_MACTX1024TOMAXL); hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL); - hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE); + hw_stats->rx_oversize_cnt += rd32(hw, TXGBE_MACRXOVERSIZE); hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER); /* MNG Stats */ @@ -2064,8 +2066,7 @@ hw_stats->rx_mac_short_packet_dropped + hw_stats->rx_length_errors + hw_stats->rx_undersize_errors + - hw_stats->rx_oversize_errors + - hw_stats->rx_drop_packets + + hw_stats->rx_rdb_drop + hw_stats->rx_illegal_byte_errors + hw_stats->rx_error_bytes + hw_stats->rx_fragment_errors + diff -Nru dpdk-20.11.6/drivers/net/virtio/virtio_ethdev.c dpdk-20.11.7/drivers/net/virtio/virtio_ethdev.c --- dpdk-20.11.6/drivers/net/virtio/virtio_ethdev.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/virtio/virtio_ethdev.c 2022-12-13 10:50:22.000000000 +0000 @@ -2356,6 +2356,13 @@ return ret; } + /* if queues are not allocated, reinit the device */ + if (hw->vqs == NULL) { + ret = virtio_init_device(dev, hw->req_guest_features); + if (ret < 0) + return ret; + } + if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM)) && !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) { diff -Nru dpdk-20.11.6/drivers/net/virtio/virtqueue.h dpdk-20.11.7/drivers/net/virtio/virtqueue.h --- dpdk-20.11.6/drivers/net/virtio/virtqueue.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/net/virtio/virtqueue.h 2022-12-13 10:50:22.000000000 +0000 @@ -450,10 +450,6 @@ } /** - * Dump virtqueue internal structures, for debug purpose only. - */ -void virtqueue_dump(struct virtqueue *vq); -/** * Get all mbufs to be freed. */ struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq); diff -Nru dpdk-20.11.6/drivers/vdpa/ifc/ifcvf_vdpa.c dpdk-20.11.7/drivers/vdpa/ifc/ifcvf_vdpa.c --- dpdk-20.11.6/drivers/vdpa/ifc/ifcvf_vdpa.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/drivers/vdpa/ifc/ifcvf_vdpa.c 2022-12-13 10:50:22.000000000 +0000 @@ -894,7 +894,12 @@ internal = list->internal; internal->vid = vid; rte_atomic32_set(&internal->dev_attached, 1); - update_datapath(internal); + if (update_datapath(internal) < 0) { + DRV_LOG(ERR, "failed to update datapath for vDPA device %s", + vdev->device->name); + rte_atomic32_set(&internal->dev_attached, 0); + return -1; + } if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0) DRV_LOG(NOTICE, "vDPA (%s): software relay is used.", @@ -936,7 +941,12 @@ internal->sw_fallback_running = false; } else { rte_atomic32_set(&internal->dev_attached, 0); - update_datapath(internal); + if (update_datapath(internal) < 0) { + DRV_LOG(ERR, "failed to update datapath for vDPA device %s", + vdev->device->name); + internal->configured = 0; + return -1; + } } internal->configured = 0; @@ -1255,7 +1265,15 @@ pthread_mutex_unlock(&internal_list_lock); rte_atomic32_set(&internal->started, 1); - update_datapath(internal); + if (update_datapath(internal) < 0) { + DRV_LOG(ERR, "failed to update datapath %s", pci_dev->name); + rte_atomic32_set(&internal->started, 0); + rte_vdpa_unregister_device(internal->vdev); + pthread_mutex_lock(&internal_list_lock); + TAILQ_REMOVE(&internal_list, list, next); + pthread_mutex_unlock(&internal_list_lock); + goto error; + } rte_kvargs_free(kvlist); return 0; @@ -1284,7 +1302,8 @@ internal = list->internal; rte_atomic32_set(&internal->started, 0); - update_datapath(internal); + if (update_datapath(internal) < 0) + DRV_LOG(ERR, "failed to update datapath %s", pci_dev->name); rte_pci_unmap_device(internal->pdev); rte_vfio_container_destroy(internal->vfio_container_fd); diff -Nru dpdk-20.11.6/examples/fips_validation/main.c dpdk-20.11.7/examples/fips_validation/main.c --- dpdk-20.11.6/examples/fips_validation/main.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/examples/fips_validation/main.c 2022-12-13 10:50:22.000000000 +0000 @@ -848,7 +848,7 @@ if (rte_cryptodev_sym_capability_check_auth(cap, auth_xform->key.length, auth_xform->digest_length, 0) != 0) { - RTE_LOG(ERR, USER1, "PMD %s key length %u IV length %u\n", + RTE_LOG(ERR, USER1, "PMD %s key length %u Digest length %u\n", info.device_name, auth_xform->key.length, auth_xform->digest_length); return -EPERM; @@ -977,7 +977,7 @@ if (rte_cryptodev_sym_capability_check_auth(cap, auth_xform->key.length, auth_xform->digest_length, 0) != 0) { - RTE_LOG(ERR, USER1, "PMD %s key length %u IV length %u\n", + RTE_LOG(ERR, USER1, "PMD %s key length %u Digest length %u\n", info.device_name, auth_xform->key.length, auth_xform->digest_length); return -EPERM; diff -Nru dpdk-20.11.6/examples/ipsec-secgw/ipsec-secgw.c dpdk-20.11.7/examples/ipsec-secgw/ipsec-secgw.c --- dpdk-20.11.6/examples/ipsec-secgw/ipsec-secgw.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/examples/ipsec-secgw/ipsec-secgw.c 2022-12-13 10:50:22.000000000 +0000 @@ -2262,12 +2262,6 @@ qconf = &lcore_conf[lcore_id]; qconf->tx_queue_id[portid] = tx_queueid; - /* Pre-populate pkt offloads based on capabilities */ - qconf->outbound.ipv4_offloads = PKT_TX_IPV4; - qconf->outbound.ipv6_offloads = PKT_TX_IPV6; - if (local_port_conf.txmode.offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) - qconf->outbound.ipv4_offloads |= PKT_TX_IP_CKSUM; - tx_queueid++; /* init RX queues */ @@ -2807,6 +2801,7 @@ uint64_t req_rx_offloads[RTE_MAX_ETHPORTS]; uint64_t req_tx_offloads[RTE_MAX_ETHPORTS]; struct eh_conf *eh_conf = NULL; + uint32_t ipv4_cksum_port_mask = 0; size_t sess_sz; nb_bufs_in_pool = 0; @@ -2912,6 +2907,20 @@ &req_tx_offloads[portid]); port_init(portid, req_rx_offloads[portid], req_tx_offloads[portid]); + if ((req_tx_offloads[portid] & DEV_TX_OFFLOAD_IPV4_CKSUM)) + ipv4_cksum_port_mask |= 1U << portid; + } + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_is_enabled(lcore_id) == 0) + continue; + + /* Pre-populate pkt offloads based on capabilities */ + lcore_conf[lcore_id].outbound.ipv4_offloads = PKT_TX_IPV4; + lcore_conf[lcore_id].outbound.ipv6_offloads = PKT_TX_IPV6; + /* Update per lcore checksum offload support only if all ports support it */ + if (ipv4_cksum_port_mask == enabled_port_mask) + lcore_conf[lcore_id].outbound.ipv4_offloads |= PKT_TX_IP_CKSUM; } /* diff -Nru dpdk-20.11.6/examples/ipsec-secgw/sa.c dpdk-20.11.7/examples/ipsec-secgw/sa.c --- dpdk-20.11.6/examples/ipsec-secgw/sa.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/examples/ipsec-secgw/sa.c 2022-12-13 10:50:22.000000000 +0000 @@ -1593,10 +1593,18 @@ struct ipsec_sa *rule; uint32_t idx_sa; enum rte_security_session_action_type rule_type; + struct rte_eth_dev_info dev_info; + int ret; *rx_offloads = 0; *tx_offloads = 0; + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + rte_exit(EXIT_FAILURE, + "Error during getting device (port %u) info: %s\n", + port_id, strerror(-ret)); + /* Check for inbound rules that use offloads and use this port */ for (idx_sa = 0; idx_sa < nb_sa_in; idx_sa++) { rule = &sa_in[idx_sa]; @@ -1612,11 +1620,38 @@ for (idx_sa = 0; idx_sa < nb_sa_out; idx_sa++) { rule = &sa_out[idx_sa]; rule_type = ipsec_get_action_type(rule); - if ((rule_type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || - rule_type == - RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) - && rule->portid == port_id) - *tx_offloads |= DEV_TX_OFFLOAD_SECURITY; + if (rule->portid == port_id) { + switch (rule_type) { + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: + /* Checksum offload is not needed for inline + * protocol as all processing for Outbound IPSec + * packets will be implicitly taken care and for + * non-IPSec packets, there is no need of + * IPv4 Checksum offload. + */ + *tx_offloads |= DEV_TX_OFFLOAD_SECURITY; + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: + *tx_offloads |= DEV_TX_OFFLOAD_SECURITY; + if (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_IPV4_CKSUM) + *tx_offloads |= + DEV_TX_OFFLOAD_IPV4_CKSUM; + break; + default: + /* Enable IPv4 checksum offload even if + * one of lookaside SA's are present. + */ + if (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_IPV4_CKSUM) + *tx_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; + break; + } + } else { + if (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_IPV4_CKSUM) + *tx_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; + } } return 0; } diff -Nru dpdk-20.11.6/examples/l2fwd-crypto/main.c dpdk-20.11.7/examples/l2fwd-crypto/main.c --- dpdk-20.11.6/examples/l2fwd-crypto/main.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/examples/l2fwd-crypto/main.c 2022-12-13 10:50:22.000000000 +0000 @@ -2724,7 +2724,7 @@ /* Enable Ethernet ports */ enabled_portcount = initialize_ports(&options); if (enabled_portcount < 1) - rte_exit(EXIT_FAILURE, "Failed to initial Ethernet ports\n"); + rte_exit(EXIT_FAILURE, "Failed to initialize Ethernet ports\n"); /* Initialize the port/queue configuration of each logical core */ RTE_ETH_FOREACH_DEV(portid) { diff -Nru dpdk-20.11.6/examples/performance-thread/pthread_shim/pthread_shim.c dpdk-20.11.7/examples/performance-thread/pthread_shim/pthread_shim.c --- dpdk-20.11.6/examples/performance-thread/pthread_shim/pthread_shim.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/examples/performance-thread/pthread_shim/pthread_shim.c 2022-12-13 10:50:22.000000000 +0000 @@ -586,6 +586,11 @@ return _sys_pthread_funcs.f_pthread_self(); } +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 120000) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + int pthread_setspecific(pthread_key_t key, const void *data) { if (override) { @@ -595,6 +600,10 @@ return _sys_pthread_funcs.f_pthread_setspecific(key, data); } +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION >= 120000) +#pragma GCC diagnostic pop +#endif + int pthread_spin_init(pthread_spinlock_t *a, int b) { NOT_IMPLEMENTED; diff -Nru dpdk-20.11.6/examples/qos_sched/cfg_file.c dpdk-20.11.7/examples/qos_sched/cfg_file.c --- dpdk-20.11.6/examples/qos_sched/cfg_file.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/examples/qos_sched/cfg_file.c 2022-12-13 10:50:22.000000000 +0000 @@ -155,7 +155,7 @@ profiles = rte_cfgfile_num_sections(cfg, "subport profile", sizeof("subport profile") - 1); - subport_params[0].n_pipe_profiles = profiles; + port_params.n_subport_profiles = profiles; for (i = 0; i < profiles; i++) { char sec_name[32]; diff -Nru dpdk-20.11.6/examples/qos_sched/profile.cfg dpdk-20.11.7/examples/qos_sched/profile.cfg --- dpdk-20.11.6/examples/qos_sched/profile.cfg 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/examples/qos_sched/profile.cfg 2022-12-13 10:50:22.000000000 +0000 @@ -26,8 +26,6 @@ number of pipes per subport = 4096 queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64 -subport 0-8 = 0 ; These subports are configured with subport profile 0 - [subport profile 0] tb rate = 1250000000 ; Bytes per second tb size = 1000000 ; Bytes diff -Nru dpdk-20.11.6/examples/vhost/main.c dpdk-20.11.7/examples/vhost/main.c --- dpdk-20.11.6/examples/vhost/main.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/examples/vhost/main.c 2022-12-13 10:50:22.000000000 +0000 @@ -1163,8 +1163,13 @@ rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count); } - if (!async_vhost_driver) + if (!async_vhost_driver) { free_pkts(pkts, rx_count); + } else { + uint16_t enqueue_fail = rx_count - enqueue_count; + if (enqueue_fail > 0) + free_pkts(&pkts[enqueue_count], enqueue_fail); + } } static __rte_always_inline void diff -Nru dpdk-20.11.6/examples/vm_power_manager/channel_manager.c dpdk-20.11.7/examples/vm_power_manager/channel_manager.c --- dpdk-20.11.6/examples/vm_power_manager/channel_manager.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/examples/vm_power_manager/channel_manager.c 2022-12-13 10:50:22.000000000 +0000 @@ -23,6 +23,7 @@ #include <rte_log.h> #include <rte_atomic.h> #include <rte_spinlock.h> +#include <rte_tailq.h> #include <libvirt/libvirt.h> @@ -59,16 +60,16 @@ virDomainInfo info; rte_spinlock_t config_spinlock; int allow_query; - LIST_ENTRY(virtual_machine_info) vms_info; + RTE_TAILQ_ENTRY(virtual_machine_info) vms_info; }; -LIST_HEAD(, virtual_machine_info) vm_list_head; +RTE_TAILQ_HEAD(, virtual_machine_info) vm_list_head; static struct virtual_machine_info * find_domain_by_name(const char *name) { struct virtual_machine_info *info; - LIST_FOREACH(info, &vm_list_head, vms_info) { + RTE_TAILQ_FOREACH(info, &vm_list_head, vms_info) { if (!strncmp(info->name, name, CHANNEL_MGR_MAX_NAME_LEN-1)) return info; } @@ -877,7 +878,7 @@ new_domain->allow_query = 0; rte_spinlock_init(&(new_domain->config_spinlock)); - LIST_INSERT_HEAD(&vm_list_head, new_domain, vms_info); + TAILQ_INSERT_HEAD(&vm_list_head, new_domain, vms_info); return 0; } @@ -899,7 +900,7 @@ rte_spinlock_unlock(&vm_info->config_spinlock); return -1; } - LIST_REMOVE(vm_info, vms_info); + TAILQ_REMOVE(&vm_list_head, vm_info, vms_info); rte_spinlock_unlock(&vm_info->config_spinlock); rte_free(vm_info); return 0; @@ -952,7 +953,7 @@ { virNodeInfo info; - LIST_INIT(&vm_list_head); + TAILQ_INIT(&vm_list_head); if (connect_hypervisor(path) < 0) { global_n_host_cpus = 64; global_hypervisor_available = 0; @@ -1004,9 +1005,9 @@ { unsigned i; char mask[RTE_MAX_LCORE]; - struct virtual_machine_info *vm_info; + struct virtual_machine_info *vm_info, *tmp; - LIST_FOREACH(vm_info, &vm_list_head, vms_info) { + RTE_TAILQ_FOREACH_SAFE(vm_info, &vm_list_head, vms_info, tmp) { rte_spinlock_lock(&(vm_info->config_spinlock)); @@ -1021,7 +1022,7 @@ } rte_spinlock_unlock(&(vm_info->config_spinlock)); - LIST_REMOVE(vm_info, vms_info); + TAILQ_REMOVE(&vm_list_head, vm_info, vms_info); rte_free(vm_info); } diff -Nru dpdk-20.11.6/kernel/linux/kni/meson.build dpdk-20.11.7/kernel/linux/kni/meson.build --- dpdk-20.11.6/kernel/linux/kni/meson.build 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/kernel/linux/kni/meson.build 2022-12-13 10:50:22.000000000 +0000 @@ -5,7 +5,7 @@ # Ref: https://jira.devtools.intel.com/browse/DPDK-29263 kmod_cflags = '' file_path = kernel_source_dir + '/include/linux/netdevice.h' -run_cmd = run_command('grep', 'ndo_tx_timeout', file_path, check: false) +run_cmd = run_command('grep', 'ndo_tx_timeout', file_path) if run_cmd.stdout().contains('txqueue') == true kmod_cflags = '-DHAVE_ARG_TX_QUEUE' diff -Nru dpdk-20.11.6/kernel/linux/meson.build dpdk-20.11.7/kernel/linux/meson.build --- dpdk-20.11.6/kernel/linux/meson.build 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/kernel/linux/meson.build 2022-12-13 10:50:22.000000000 +0000 @@ -11,7 +11,7 @@ kernel_dir = get_option('kernel_dir') if kernel_dir == '' # use default path for native builds - kernel_version = run_command('uname', '-r', check: true).stdout().strip() + kernel_version = run_command('uname', '-r').stdout().strip() kernel_dir = '/lib/modules/' + kernel_version endif @@ -23,7 +23,7 @@ # test running make in kernel directory, using "make kernelversion" make_returncode = run_command('make', '-sC', kernel_dir + '/build', - 'kernelversion', check: true).returncode() + 'kernelversion').returncode() if make_returncode != 0 error('Cannot compile kernel modules as requested - are kernel headers installed?') endif diff -Nru dpdk-20.11.6/lib/librte_cryptodev/rte_cryptodev.c dpdk-20.11.7/lib/librte_cryptodev/rte_cryptodev.c --- dpdk-20.11.6/lib/librte_cryptodev/rte_cryptodev.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_cryptodev/rte_cryptodev.c 2022-12-13 10:50:22.000000000 +0000 @@ -1088,13 +1088,13 @@ } if (!qp_conf) { - CDEV_LOG_ERR("qp_conf cannot be NULL\n"); + CDEV_LOG_ERR("qp_conf cannot be NULL"); return -EINVAL; } if ((qp_conf->mp_session && !qp_conf->mp_session_private) || (!qp_conf->mp_session && qp_conf->mp_session_private)) { - CDEV_LOG_ERR("Invalid mempools\n"); + CDEV_LOG_ERR("Invalid mempools"); return -EINVAL; } @@ -1107,7 +1107,7 @@ pool_priv = rte_mempool_get_priv(qp_conf->mp_session); if (!pool_priv || qp_conf->mp_session->private_data_size < sizeof(*pool_priv)) { - CDEV_LOG_ERR("Invalid mempool\n"); + CDEV_LOG_ERR("Invalid mempool"); return -EINVAL; } @@ -1118,7 +1118,7 @@ obj_size) || (s.nb_drivers <= dev->driver_id) || rte_cryptodev_sym_get_private_session_size(dev_id) > obj_priv_size) { - CDEV_LOG_ERR("Invalid mempool\n"); + CDEV_LOG_ERR("Invalid mempool"); return -EINVAL; } } @@ -1407,7 +1407,7 @@ obj_sz = rte_cryptodev_sym_get_header_session_size() + user_data_size; if (obj_sz > elt_size) - CDEV_LOG_INFO("elt_size %u is expanded to %u\n", elt_size, + CDEV_LOG_INFO("elt_size %u is expanded to %u", elt_size, obj_sz); else obj_sz = elt_size; @@ -1417,14 +1417,14 @@ NULL, NULL, NULL, NULL, socket_id, 0); if (mp == NULL) { - CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d\n", + CDEV_LOG_ERR("%s(name=%s) failed, rte_errno=%d", __func__, name, rte_errno); return NULL; } pool_priv = rte_mempool_get_priv(mp); if (!pool_priv) { - CDEV_LOG_ERR("%s(name=%s) failed to get private data\n", + CDEV_LOG_ERR("%s(name=%s) failed to get private data", __func__, name); rte_mempool_free(mp); return NULL; @@ -1472,7 +1472,7 @@ struct rte_cryptodev_sym_session_pool_private_data *pool_priv; if (!rte_cryptodev_sym_is_valid_session_pool(mp)) { - CDEV_LOG_ERR("Invalid mempool\n"); + CDEV_LOG_ERR("Invalid mempool"); return NULL; } @@ -1506,7 +1506,7 @@ rte_cryptodev_asym_get_header_session_size(); if (!mp) { - CDEV_LOG_ERR("invalid mempool\n"); + CDEV_LOG_ERR("invalid mempool"); return NULL; } @@ -1889,7 +1889,7 @@ elt_size += RTE_MAX(sizeof(struct rte_crypto_sym_op), sizeof(struct rte_crypto_asym_op)); } else { - CDEV_LOG_ERR("Invalid op_type\n"); + CDEV_LOG_ERR("Invalid op_type"); return NULL; } diff -Nru dpdk-20.11.6/lib/librte_cryptodev/rte_cryptodev_pmd.c dpdk-20.11.7/lib/librte_cryptodev/rte_cryptodev_pmd.c --- dpdk-20.11.6/lib/librte_cryptodev/rte_cryptodev_pmd.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_cryptodev/rte_cryptodev_pmd.c 2022-12-13 10:50:22.000000000 +0000 @@ -94,11 +94,11 @@ struct rte_cryptodev *cryptodev; if (params->name[0] != '\0') { - CDEV_LOG_INFO("User specified device name = %s\n", params->name); + CDEV_LOG_INFO("User specified device name = %s", params->name); name = params->name; } - CDEV_LOG_INFO("Creating cryptodev %s\n", name); + CDEV_LOG_INFO("Creating cryptodev %s", name); CDEV_LOG_INFO("Initialisation parameters - name: %s," "socket id: %d, max queue pairs: %u", diff -Nru dpdk-20.11.6/lib/librte_cryptodev/rte_cryptodev_pmd.h dpdk-20.11.7/lib/librte_cryptodev/rte_cryptodev_pmd.h --- dpdk-20.11.6/lib/librte_cryptodev/rte_cryptodev_pmd.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_cryptodev/rte_cryptodev_pmd.h 2022-12-13 10:50:22.000000000 +0000 @@ -541,7 +541,7 @@ uint8_t driver_id, void *private_data) { if (unlikely(sess->nb_drivers <= driver_id)) { - CDEV_LOG_ERR("Set private data for driver %u not allowed\n", + CDEV_LOG_ERR("Set private data for driver %u not allowed", driver_id); return; } diff -Nru dpdk-20.11.6/lib/librte_eal/common/eal_common_proc.c dpdk-20.11.7/lib/librte_eal/common/eal_common_proc.c --- dpdk-20.11.6/lib/librte_eal/common/eal_common_proc.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_eal/common/eal_common_proc.c 2022-12-13 10:50:22.000000000 +0000 @@ -262,7 +262,7 @@ } static int -read_msg(struct mp_msg_internal *m, struct sockaddr_un *s) +read_msg(int fd, struct mp_msg_internal *m, struct sockaddr_un *s) { int msglen; struct iovec iov; @@ -283,7 +283,7 @@ msgh.msg_controllen = sizeof(control); retry: - msglen = recvmsg(mp_fd, &msgh, 0); + msglen = recvmsg(fd, &msgh, 0); /* zero length message means socket was closed */ if (msglen == 0) @@ -392,11 +392,12 @@ { struct mp_msg_internal msg; struct sockaddr_un sa; + int fd; - while (mp_fd >= 0) { + while ((fd = __atomic_load_n(&mp_fd, __ATOMIC_RELAXED)) >= 0) { int ret; - ret = read_msg(&msg, &sa); + ret = read_msg(fd, &msg, &sa); if (ret <= 0) break; @@ -640,9 +641,8 @@ NULL, mp_handle, NULL) < 0) { RTE_LOG(ERR, EAL, "failed to create mp thread: %s\n", strerror(errno)); - close(mp_fd); close(dir_fd); - mp_fd = -1; + close(__atomic_exchange_n(&mp_fd, -1, __ATOMIC_RELAXED)); return -1; } @@ -658,11 +658,10 @@ { int fd; - if (mp_fd < 0) + fd = __atomic_exchange_n(&mp_fd, -1, __ATOMIC_RELAXED); + if (fd < 0) return; - fd = mp_fd; - mp_fd = -1; pthread_cancel(mp_handle_tid); pthread_join(mp_handle_tid, NULL); close_socket_fd(fd); diff -Nru dpdk-20.11.6/lib/librte_eal/common/eal_common_trace.c dpdk-20.11.7/lib/librte_eal/common/eal_common_trace.c --- dpdk-20.11.6/lib/librte_eal/common/eal_common_trace.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_eal/common/eal_common_trace.c 2022-12-13 10:50:22.000000000 +0000 @@ -48,12 +48,6 @@ goto fail; } - if (!STAILQ_EMPTY(&trace.args)) - trace.status = true; - - if (!rte_trace_is_enabled()) - return 0; - rte_spinlock_init(&trace.lock); /* Is duplicate trace name registered */ @@ -72,13 +66,9 @@ if (trace_metadata_create() < 0) goto fail; - /* Create trace directory */ - if (trace_mkdir()) - goto free_meta; - /* Save current epoch timestamp for future use */ if (trace_epoch_time_save() < 0) - goto fail; + goto free_meta; /* Apply global configurations */ STAILQ_FOREACH(arg, &trace.args, next) @@ -98,8 +88,6 @@ void eal_trace_fini(void) { - if (!rte_trace_is_enabled()) - return; trace_mem_free(); trace_metadata_destroy(); eal_trace_args_free(); @@ -108,17 +96,17 @@ bool rte_trace_is_enabled(void) { - return trace.status; + return __atomic_load_n(&trace.status, __ATOMIC_ACQUIRE) != 0; } static void -trace_mode_set(rte_trace_point_t *trace, enum rte_trace_mode mode) +trace_mode_set(rte_trace_point_t *t, enum rte_trace_mode mode) { if (mode == RTE_TRACE_MODE_OVERWRITE) - __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_DISCARD, + __atomic_and_fetch(t, ~__RTE_TRACE_FIELD_ENABLE_DISCARD, __ATOMIC_RELEASE); else - __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_DISCARD, + __atomic_or_fetch(t, __RTE_TRACE_FIELD_ENABLE_DISCARD, __ATOMIC_RELEASE); } @@ -127,9 +115,6 @@ { struct trace_point *tp; - if (!rte_trace_is_enabled()) - return; - STAILQ_FOREACH(tp, &tp_list, next) trace_mode_set(tp->handle, mode); @@ -149,36 +134,42 @@ } bool -rte_trace_point_is_enabled(rte_trace_point_t *trace) +rte_trace_point_is_enabled(rte_trace_point_t *t) { uint64_t val; - if (trace_point_is_invalid(trace)) + if (trace_point_is_invalid(t)) return false; - val = __atomic_load_n(trace, __ATOMIC_ACQUIRE); + val = __atomic_load_n(t, __ATOMIC_ACQUIRE); return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0; } int -rte_trace_point_enable(rte_trace_point_t *trace) +rte_trace_point_enable(rte_trace_point_t *t) { - if (trace_point_is_invalid(trace)) + uint64_t prev; + + if (trace_point_is_invalid(t)) return -ERANGE; - __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_MASK, - __ATOMIC_RELEASE); + prev = __atomic_fetch_or(t, __RTE_TRACE_FIELD_ENABLE_MASK, __ATOMIC_RELEASE); + if ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) == 0) + __atomic_add_fetch(&trace.status, 1, __ATOMIC_RELEASE); return 0; } int -rte_trace_point_disable(rte_trace_point_t *trace) +rte_trace_point_disable(rte_trace_point_t *t) { - if (trace_point_is_invalid(trace)) + uint64_t prev; + + if (trace_point_is_invalid(t)) return -ERANGE; - __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_MASK, - __ATOMIC_RELEASE); + prev = __atomic_fetch_and(t, ~__RTE_TRACE_FIELD_ENABLE_MASK, __ATOMIC_RELEASE); + if ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) != 0) + __atomic_sub_fetch(&trace.status, 1, __ATOMIC_RELEASE); return 0; } @@ -221,8 +212,10 @@ rc = rte_trace_point_disable(tp->handle); found = 1; } - if (rc < 0) - return rc; + if (rc < 0) { + found = 0; + break; + } } regfree(&r); @@ -262,10 +255,9 @@ struct __rte_trace_header *header; uint32_t count; - if (trace->nb_trace_mem_list == 0) - return; - rte_spinlock_lock(&trace->lock); + if (trace->nb_trace_mem_list == 0) + goto out; fprintf(f, "nb_trace_mem_list = %d\n", trace->nb_trace_mem_list); fprintf(f, "\nTrace mem info\n--------------\n"); for (count = 0; count < trace->nb_trace_mem_list; count++) { @@ -276,6 +268,7 @@ header->stream_header.lcore_id, header->stream_header.thread_name); } +out: rte_spinlock_unlock(&trace->lock); } @@ -414,9 +407,6 @@ struct trace *trace = trace_obj_get(); uint32_t count; - if (!rte_trace_is_enabled()) - return; - rte_spinlock_lock(&trace->lock); for (count = 0; count < trace->nb_trace_mem_list; count++) { trace_mem_per_thread_free_unlocked(&trace->lcore_meta[count]); @@ -513,6 +503,7 @@ /* Form the trace handle */ *handle = sz; *handle |= trace.nb_trace_points << __RTE_TRACE_FIELD_ID_SHIFT; + trace_mode_set(handle, trace.mode); trace.nb_trace_points++; tp->handle = handle; diff -Nru dpdk-20.11.6/lib/librte_eal/common/eal_common_trace_ctf.c dpdk-20.11.7/lib/librte_eal/common/eal_common_trace_ctf.c --- dpdk-20.11.6/lib/librte_eal/common/eal_common_trace_ctf.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_eal/common/eal_common_trace_ctf.c 2022-12-13 10:50:22.000000000 +0000 @@ -359,9 +359,6 @@ char *ctf_meta = trace->ctf_meta; int rc; - if (!rte_trace_is_enabled()) - return 0; - if (ctf_meta == NULL) return -EINVAL; diff -Nru dpdk-20.11.6/lib/librte_eal/common/eal_common_trace_utils.c dpdk-20.11.7/lib/librte_eal/common/eal_common_trace_utils.c --- dpdk-20.11.6/lib/librte_eal/common/eal_common_trace_utils.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_eal/common/eal_common_trace_utils.c 2022-12-13 10:50:22.000000000 +0000 @@ -314,14 +314,18 @@ return 0; } -int +static int trace_mkdir(void) { struct trace *trace = trace_obj_get(); char session[TRACE_DIR_STR_LEN]; + static bool already_done; char *dir_path; int rc; + if (already_done) + return 0; + if (!trace->dir_offset) { dir_path = calloc(1, sizeof(trace->dir)); if (dir_path == NULL) { @@ -365,6 +369,7 @@ } RTE_LOG(INFO, EAL, "Trace dir: %s\n", trace->dir); + already_done = true; return 0; } @@ -434,6 +439,10 @@ if (trace->nb_trace_mem_list == 0) return rc; + rc = trace_mkdir(); + if (rc < 0) + return rc; + rc = trace_meta_save(trace); if (rc) return rc; diff -Nru dpdk-20.11.6/lib/librte_eal/common/eal_trace.h dpdk-20.11.7/lib/librte_eal/common/eal_trace.h --- dpdk-20.11.6/lib/librte_eal/common/eal_trace.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_eal/common/eal_trace.h 2022-12-13 10:50:22.000000000 +0000 @@ -54,7 +54,7 @@ char dir[PATH_MAX]; int dir_offset; int register_errno; - bool status; + uint32_t status; enum rte_trace_mode mode; rte_uuid_t uuid; uint32_t buff_len; @@ -104,7 +104,6 @@ int trace_metadata_create(void); void trace_metadata_destroy(void); char *trace_metadata_fixup_field(const char *field); -int trace_mkdir(void); int trace_epoch_time_save(void); void trace_mem_free(void); void trace_mem_per_thread_free(void); diff -Nru dpdk-20.11.6/lib/librte_eal/common/malloc_heap.c dpdk-20.11.7/lib/librte_eal/common/malloc_heap.c --- dpdk-20.11.6/lib/librte_eal/common/malloc_heap.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_eal/common/malloc_heap.c 2022-12-13 10:50:22.000000000 +0000 @@ -396,7 +396,7 @@ int n_segs; bool callback_triggered = false; - alloc_sz = RTE_ALIGN_CEIL(align + elt_size + + alloc_sz = RTE_ALIGN_CEIL(RTE_ALIGN_CEIL(elt_size, align) + MALLOC_ELEM_OVERHEAD, pg_sz); n_segs = alloc_sz / pg_sz; diff -Nru dpdk-20.11.6/lib/librte_eal/common/malloc_mp.c dpdk-20.11.7/lib/librte_eal/common/malloc_mp.c --- dpdk-20.11.6/lib/librte_eal/common/malloc_mp.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_eal/common/malloc_mp.c 2022-12-13 10:50:22.000000000 +0000 @@ -185,7 +185,7 @@ int n_segs; void *map_addr; - alloc_sz = RTE_ALIGN_CEIL(ar->align + ar->elt_size + + alloc_sz = RTE_ALIGN_CEIL(RTE_ALIGN_CEIL(ar->elt_size, ar->align) + MALLOC_ELEM_OVERHEAD, ar->page_sz); n_segs = alloc_sz / ar->page_sz; diff -Nru dpdk-20.11.6/lib/librte_eal/common/rte_service.c dpdk-20.11.7/lib/librte_eal/common/rte_service.c --- dpdk-20.11.6/lib/librte_eal/common/rte_service.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_eal/common/rte_service.c 2022-12-13 10:50:22.000000000 +0000 @@ -56,10 +56,17 @@ * on currently. */ uint32_t num_mapped_cores; - uint64_t calls; - uint64_t cycles_spent; + + /* 32-bit builds won't naturally align a uint64_t, so force alignment, + * allowing regular reads to be atomic. + */ + uint64_t calls __rte_aligned(8); + uint64_t cycles_spent __rte_aligned(8); } __rte_cache_aligned; +/* Mask used to ensure uint64_t 8 byte vars are naturally aligned. */ +#define RTE_SERVICE_STAT_ALIGN_MASK (8 - 1) + /* the internal values of a service core */ struct core_state { /* map of services IDs are run on this core */ @@ -103,14 +110,12 @@ } int i; - int count = 0; struct rte_config *cfg = rte_eal_get_configuration(); for (i = 0; i < RTE_MAX_LCORE; i++) { if (lcore_config[i].core_role == ROLE_SERVICE) { if ((unsigned int)i == cfg->main_lcore) continue; rte_service_lcore_add(i); - count++; } } @@ -365,13 +370,29 @@ { void *userdata = s->spec.callback_userdata; + /* Ensure the atomically stored variables are naturally aligned, + * as required for regular loads to be atomic. + */ + RTE_BUILD_BUG_ON((offsetof(struct rte_service_spec_impl, calls) + & RTE_SERVICE_STAT_ALIGN_MASK) != 0); + RTE_BUILD_BUG_ON((offsetof(struct rte_service_spec_impl, cycles_spent) + & RTE_SERVICE_STAT_ALIGN_MASK) != 0); + if (service_stats_enabled(s)) { uint64_t start = rte_rdtsc(); s->spec.callback(userdata); uint64_t end = rte_rdtsc(); - s->cycles_spent += end - start; + uint64_t cycles = end - start; cs->calls_per_service[service_idx]++; - s->calls++; + if (service_mt_safe(s)) { + __atomic_fetch_add(&s->cycles_spent, cycles, __ATOMIC_RELAXED); + __atomic_fetch_add(&s->calls, 1, __ATOMIC_RELAXED); + } else { + uint64_t cycles_new = s->cycles_spent + cycles; + uint64_t calls_new = s->calls++; + __atomic_store_n(&s->cycles_spent, cycles_new, __ATOMIC_RELAXED); + __atomic_store_n(&s->calls, calls_new, __ATOMIC_RELAXED); + } } else s->spec.callback(userdata); } @@ -478,6 +499,12 @@ cs->loops++; } + /* Switch off this core for all services, to ensure that future + * calls to may_be_active() know this core is switched off. + */ + for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) + cs->service_active_on_lcore[i] = 0; + /* Use SEQ CST memory ordering to avoid any re-ordering around * this store, ensuring that once this store is visible, the service * lcore thread really is done in service cores code. @@ -774,11 +801,6 @@ __atomic_load_n(&rte_services[i].num_mapped_cores, __ATOMIC_RELAXED)); - /* Switch off this core for all services, to ensure that future - * calls to may_be_active() know this core is switched off. - */ - cs->service_active_on_lcore[i] = 0; - /* if the core is mapped, and the service is running, and this * is the only core that is mapped, the service would cease to * run if this core stopped, so fail instead. diff -Nru dpdk-20.11.6/lib/librte_eal/include/rte_common.h dpdk-20.11.7/lib/librte_eal/include/rte_common.h --- dpdk-20.11.6/lib/librte_eal/include/rte_common.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_eal/include/rte_common.h 2022-12-13 10:50:22.000000000 +0000 @@ -255,7 +255,7 @@ /** * subtract a byte-value offset from a pointer */ -#define RTE_PTR_SUB(ptr, x) ((void*)((uintptr_t)ptr - (x))) +#define RTE_PTR_SUB(ptr, x) ((void *)((uintptr_t)(ptr) - (x))) /** * get the difference between two pointer values, i.e. how far apart @@ -280,7 +280,7 @@ * must be a power-of-two value. */ #define RTE_PTR_ALIGN_FLOOR(ptr, align) \ - ((typeof(ptr))RTE_ALIGN_FLOOR((uintptr_t)ptr, align)) + ((typeof(ptr))RTE_ALIGN_FLOOR((uintptr_t)(ptr), align)) /** * Macro to align a value to a given power-of-two. The resultant value diff -Nru dpdk-20.11.6/lib/librte_eal/include/rte_memzone.h dpdk-20.11.7/lib/librte_eal/include/rte_memzone.h --- dpdk-20.11.6/lib/librte_eal/include/rte_memzone.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_eal/include/rte_memzone.h 2022-12-13 10:50:22.000000000 +0000 @@ -118,7 +118,6 @@ * on error. * On error case, rte_errno will be set appropriately: * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure - * - E_RTE_SECONDARY - function was called from a secondary process instance * - ENOSPC - the maximum number of memzones has already been allocated * - EEXIST - a memzone with the same name already exists * - ENOMEM - no appropriate memory area found in which to create memzone @@ -184,7 +183,6 @@ * on error. * On error case, rte_errno will be set appropriately: * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure - * - E_RTE_SECONDARY - function was called from a secondary process instance * - ENOSPC - the maximum number of memzones has already been allocated * - EEXIST - a memzone with the same name already exists * - ENOMEM - no appropriate memory area found in which to create memzone @@ -256,7 +254,6 @@ * on error. * On error case, rte_errno will be set appropriately: * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure - * - E_RTE_SECONDARY - function was called from a secondary process instance * - ENOSPC - the maximum number of memzones has already been allocated * - EEXIST - a memzone with the same name already exists * - ENOMEM - no appropriate memory area found in which to create memzone diff -Nru dpdk-20.11.6/lib/librte_eal/include/rte_uuid.h dpdk-20.11.7/lib/librte_eal/include/rte_uuid.h --- dpdk-20.11.6/lib/librte_eal/include/rte_uuid.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_eal/include/rte_uuid.h 2022-12-13 10:50:22.000000000 +0000 @@ -37,6 +37,9 @@ ((e) >> 8) & 0xff, (e) & 0xff \ } +/** UUID string length */ +#define RTE_UUID_STRLEN (36 + 1) + /** * Test if UUID is all zeros. * @@ -95,7 +98,6 @@ * @param len * Sizeof the available string buffer */ -#define RTE_UUID_STRLEN (36 + 1) void rte_uuid_unparse(const rte_uuid_t uu, char *out, size_t len); #ifdef __cplusplus diff -Nru dpdk-20.11.6/lib/librte_eal/x86/include/rte_memcpy.h dpdk-20.11.7/lib/librte_eal/x86/include/rte_memcpy.h --- dpdk-20.11.6/lib/librte_eal/x86/include/rte_memcpy.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_eal/x86/include/rte_memcpy.h 2022-12-13 10:50:22.000000000 +0000 @@ -372,6 +372,23 @@ } /** + * Copy 256 bytes from one location to another, + * locations should not overlap. + */ +static __rte_always_inline void +rte_mov256(uint8_t *dst, const uint8_t *src) +{ + rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32); + rte_mov32((uint8_t *)dst + 1 * 32, (const uint8_t *)src + 1 * 32); + rte_mov32((uint8_t *)dst + 2 * 32, (const uint8_t *)src + 2 * 32); + rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32); + rte_mov32((uint8_t *)dst + 4 * 32, (const uint8_t *)src + 4 * 32); + rte_mov32((uint8_t *)dst + 5 * 32, (const uint8_t *)src + 5 * 32); + rte_mov32((uint8_t *)dst + 6 * 32, (const uint8_t *)src + 6 * 32); + rte_mov32((uint8_t *)dst + 7 * 32, (const uint8_t *)src + 7 * 32); +} + +/** * Copy 128-byte blocks from one location to another, * locations should not overlap. */ diff -Nru dpdk-20.11.6/lib/librte_eventdev/rte_event_crypto_adapter.c dpdk-20.11.7/lib/librte_eventdev/rte_event_crypto_adapter.c --- dpdk-20.11.6/lib/librte_eventdev/rte_event_crypto_adapter.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_eventdev/rte_event_crypto_adapter.c 2022-12-13 10:50:22.000000000 +0000 @@ -30,6 +30,8 @@ */ #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024 +#define ECA_ADAPTER_ARRAY "crypto_adapter_array" + struct rte_event_crypto_adapter { /* Event device identifier */ uint8_t eventdev_id; @@ -118,7 +120,6 @@ static int eca_init(void) { - const char *name = "crypto_adapter_array"; const struct rte_memzone *mz; unsigned int sz; @@ -126,9 +127,10 @@ RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE; sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE); - mz = rte_memzone_lookup(name); + mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY); if (mz == NULL) { - mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0, + mz = rte_memzone_reserve_aligned(ECA_ADAPTER_ARRAY, sz, + rte_socket_id(), 0, RTE_CACHE_LINE_SIZE); if (mz == NULL) { RTE_EDEV_LOG_ERR("failed to reserve memzone err = %" @@ -141,6 +143,22 @@ return 0; } +static int +eca_memzone_lookup(void) +{ + const struct rte_memzone *mz; + + if (event_crypto_adapter == NULL) { + mz = rte_memzone_lookup(ECA_ADAPTER_ARRAY); + if (mz == NULL) + return -ENOMEM; + + event_crypto_adapter = mz->addr; + } + + return 0; +} + static inline struct rte_event_crypto_adapter * eca_id_to_adapter(uint8_t id) { @@ -1047,6 +1065,9 @@ uint32_t i; int ret; + if (eca_memzone_lookup()) + return -ENOMEM; + EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); adapter = eca_id_to_adapter(id); @@ -1088,6 +1109,9 @@ struct rte_eventdev *dev; uint32_t i; + if (eca_memzone_lookup()) + return -ENOMEM; + EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); adapter = eca_id_to_adapter(id); diff -Nru dpdk-20.11.6/lib/librte_eventdev/rte_event_eth_rx_adapter.h dpdk-20.11.7/lib/librte_eventdev/rte_event_eth_rx_adapter.h --- dpdk-20.11.6/lib/librte_eventdev/rte_event_eth_rx_adapter.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_eventdev/rte_event_eth_rx_adapter.h 2022-12-13 10:50:22.000000000 +0000 @@ -332,7 +332,7 @@ * @see RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ * * @param conf - * Additional configuration structure of type *rte_event_eth_rx_adapter_conf* + * Additional configuration structure of type *rte_event_eth_rx_adapter_queue_conf* * * @return * - 0: Success, Receive queue added correctly. diff -Nru dpdk-20.11.6/lib/librte_eventdev/rte_event_eth_tx_adapter.c dpdk-20.11.7/lib/librte_eventdev/rte_event_eth_tx_adapter.c --- dpdk-20.11.6/lib/librte_eventdev/rte_event_eth_tx_adapter.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_eventdev/rte_event_eth_tx_adapter.c 2022-12-13 10:50:22.000000000 +0000 @@ -44,7 +44,7 @@ #define RTE_EVENT_ETH_TX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) \ do { \ if (!txa_valid_id(id)) { \ - RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d", id); \ + RTE_EDEV_LOG_ERR("Invalid eth Tx adapter id = %d", id); \ return retval; \ } \ } while (0) @@ -468,14 +468,13 @@ struct txa_service_data *txa; txa = txa_service_id_to_data(id); - if (txa->service_id == TXA_INVALID_SERVICE_ID) + if (txa == NULL || txa->service_id == TXA_INVALID_SERVICE_ID) return 0; + rte_spinlock_lock(&txa->tx_lock); ret = rte_service_runstate_set(txa->service_id, start); - if (ret == 0 && !start) { - while (rte_service_may_be_active(txa->service_id)) - rte_pause(); - } + rte_spinlock_unlock(&txa->tx_lock); + return ret; } @@ -822,6 +821,8 @@ uint16_t i, q, nb_queues; int ret = 0; + if (txa->txa_ethdev == NULL) + return 0; nb_queues = txa->txa_ethdev[port_id].nb_queues; if (nb_queues == 0) return 0; @@ -834,10 +835,10 @@ if (tqi[q].added) { ret = txa_service_queue_del(id, dev, q); + i++; if (ret != 0) break; } - i++; q++; } return ret; diff -Nru dpdk-20.11.6/lib/librte_graph/rte_graph_worker.h dpdk-20.11.7/lib/librte_graph/rte_graph_worker.h --- dpdk-20.11.6/lib/librte_graph/rte_graph_worker.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_graph/rte_graph_worker.h 2022-12-13 10:50:22.000000000 +0000 @@ -224,7 +224,7 @@ __rte_node_enqueue_tail_update(graph, node); if (unlikely(node->size < (idx + space))) - __rte_node_stream_alloc(graph, node); + __rte_node_stream_alloc_size(graph, node, node->size + space); } /** @@ -432,7 +432,7 @@ uint16_t free_space = node->size - idx; if (unlikely(free_space < nb_objs)) - __rte_node_stream_alloc_size(graph, node, nb_objs); + __rte_node_stream_alloc_size(graph, node, node->size + nb_objs); return &node->objs[idx]; } diff -Nru dpdk-20.11.6/lib/librte_gro/gro_tcp4.c dpdk-20.11.7/lib/librte_gro/gro_tcp4.c --- dpdk-20.11.6/lib/librte_gro/gro_tcp4.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_gro/gro_tcp4.c 2022-12-13 10:50:22.000000000 +0000 @@ -199,7 +199,7 @@ struct rte_tcp_hdr *tcp_hdr; uint32_t sent_seq; int32_t tcp_dl; - uint16_t ip_id, hdr_len, frag_off; + uint16_t ip_id, hdr_len, frag_off, ip_tlen; uint8_t is_atomic; struct tcp4_flow_key key; @@ -226,6 +226,12 @@ */ if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG) return -1; + + /* trim the tail padding bytes */ + ip_tlen = rte_be_to_cpu_16(ipv4_hdr->total_length); + if (pkt->pkt_len > (uint32_t)(ip_tlen + pkt->l2_len)) + rte_pktmbuf_trim(pkt, pkt->pkt_len - ip_tlen - pkt->l2_len); + /* * Don't process the packet whose payload length is less than or * equal to 0. @@ -306,7 +312,7 @@ * length is greater than the max value. Store * the packet into the flow. */ - if (insert_new_item(tbl, pkt, start_time, prev_idx, + if (insert_new_item(tbl, pkt, start_time, cur_idx, sent_seq, ip_id, is_atomic) == INVALID_ARRAY_INDEX) return -1; diff -Nru dpdk-20.11.6/lib/librte_gro/gro_udp4.c dpdk-20.11.7/lib/librte_gro/gro_udp4.c --- dpdk-20.11.6/lib/librte_gro/gro_udp4.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_gro/gro_udp4.c 2022-12-13 10:50:22.000000000 +0000 @@ -221,6 +221,11 @@ if (!is_ipv4_fragment(ipv4_hdr)) return -1; + ip_dl = rte_be_to_cpu_16(ipv4_hdr->total_length); + /* trim the tail padding bytes */ + if (pkt->pkt_len > (uint32_t)(ip_dl + pkt->l2_len)) + rte_pktmbuf_trim(pkt, pkt->pkt_len - ip_dl - pkt->l2_len); + /* * Don't process the packet whose payload length is less than or * equal to 0. @@ -228,7 +233,6 @@ if (pkt->pkt_len <= hdr_len) return -1; - ip_dl = rte_be_to_cpu_16(ipv4_hdr->total_length); if (ip_dl <= pkt->l3_len) return -1; diff -Nru dpdk-20.11.6/lib/librte_hash/rte_cuckoo_hash.c dpdk-20.11.7/lib/librte_hash/rte_cuckoo_hash.c --- dpdk-20.11.6/lib/librte_hash/rte_cuckoo_hash.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_hash/rte_cuckoo_hash.c 2022-12-13 10:50:22.000000000 +0000 @@ -527,6 +527,7 @@ rte_free(h->buckets_ext); rte_free(h->tbl_chng_cnt); rte_free(h->ext_bkt_to_free); + rte_free(h->hash_rcu_cfg); rte_free(h); rte_free(te); } diff -Nru dpdk-20.11.6/lib/librte_ipsec/esp_outb.c dpdk-20.11.7/lib/librte_ipsec/esp_outb.c --- dpdk-20.11.6/lib/librte_ipsec/esp_outb.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_ipsec/esp_outb.c 2022-12-13 10:50:22.000000000 +0000 @@ -172,8 +172,10 @@ /* pad length */ pdlen -= sizeof(*espt); + RTE_ASSERT(pdlen <= sizeof(esp_pad_bytes)); + /* copy padding data */ - rte_memcpy(pt, esp_pad_bytes, pdlen); + rte_memcpy(pt, esp_pad_bytes, RTE_MIN(pdlen, sizeof(esp_pad_bytes))); /* update esp trailer */ espt = (struct rte_esp_tail *)(pt + pdlen); @@ -339,8 +341,10 @@ /* pad length */ pdlen -= sizeof(*espt); + RTE_ASSERT(pdlen <= sizeof(esp_pad_bytes)); + /* copy padding data */ - rte_memcpy(pt, esp_pad_bytes, pdlen); + rte_memcpy(pt, esp_pad_bytes, RTE_MIN(pdlen, sizeof(esp_pad_bytes))); /* update esp trailer */ espt = (struct rte_esp_tail *)(pt + pdlen); diff -Nru dpdk-20.11.6/lib/librte_kni/rte_kni.h dpdk-20.11.7/lib/librte_kni/rte_kni.h --- dpdk-20.11.6/lib/librte_kni/rte_kni.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_kni/rte_kni.h 2022-12-13 10:50:22.000000000 +0000 @@ -66,8 +66,8 @@ uint32_t core_id; /* Core ID to bind kernel thread on */ uint16_t group_id; /* Group ID */ unsigned mbuf_size; /* mbuf size */ - struct rte_pci_addr addr; /* depreciated */ - struct rte_pci_id id; /* depreciated */ + struct rte_pci_addr addr; /* deprecated */ + struct rte_pci_id id; /* deprecated */ __extension__ uint8_t force_bind : 1; /* Flag to bind kernel thread */ diff -Nru dpdk-20.11.6/lib/librte_mbuf/rte_mbuf.h dpdk-20.11.7/lib/librte_mbuf/rte_mbuf.h --- dpdk-20.11.6/lib/librte_mbuf/rte_mbuf.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_mbuf/rte_mbuf.h 2022-12-13 10:50:22.000000000 +0000 @@ -685,7 +685,6 @@ * The pointer to the new allocated mempool, on success. NULL on error * with rte_errno set appropriately. Possible rte_errno values include: * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure - * - E_RTE_SECONDARY - function was called from a secondary process instance * - EINVAL - cache size provided is too large, or priv_size is not aligned. * - ENOSPC - the maximum number of memzones has already been allocated * - EEXIST - a memzone with the same name already exists @@ -727,7 +726,6 @@ * The pointer to the new allocated mempool, on success. NULL on error * with rte_errno set appropriately. Possible rte_errno values include: * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure - * - E_RTE_SECONDARY - function was called from a secondary process instance * - EINVAL - cache size provided is too large, or priv_size is not aligned. * - ENOSPC - the maximum number of memzones has already been allocated * - EEXIST - a memzone with the same name already exists @@ -781,7 +779,6 @@ * The pointer to the new allocated mempool, on success. NULL on error * with rte_errno set appropriately. Possible rte_errno values include: * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure - * - E_RTE_SECONDARY - function was called from a secondary process instance * - EINVAL - cache size provided is too large, or priv_size is not aligned. * - ENOSPC - the maximum number of memzones has already been allocated * - EEXIST - a memzone with the same name already exists diff -Nru dpdk-20.11.6/lib/librte_mempool/rte_mempool.h dpdk-20.11.7/lib/librte_mempool/rte_mempool.h --- dpdk-20.11.6/lib/librte_mempool/rte_mempool.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_mempool/rte_mempool.h 2022-12-13 10:50:22.000000000 +0000 @@ -993,7 +993,6 @@ * The pointer to the new allocated mempool, on success. NULL on error * with rte_errno set appropriately. Possible rte_errno values include: * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure - * - E_RTE_SECONDARY - function was called from a secondary process instance * - EINVAL - cache size provided is too large * - ENOSPC - the maximum number of memzones has already been allocated * - EEXIST - a memzone with the same name already exists diff -Nru dpdk-20.11.6/lib/librte_net/rte_ip.h dpdk-20.11.7/lib/librte_net/rte_ip.h --- dpdk-20.11.6/lib/librte_net/rte_ip.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_net/rte_ip.h 2022-12-13 10:50:22.000000000 +0000 @@ -134,18 +134,21 @@ static inline uint32_t __rte_raw_cksum(const void *buf, size_t len, uint32_t sum) { - /* extend strict-aliasing rules */ - typedef uint16_t __attribute__((__may_alias__)) u16_p; - const u16_p *u16_buf = (const u16_p *)buf; - const u16_p *end = u16_buf + len / sizeof(*u16_buf); + const void *end; - for (; u16_buf != end; ++u16_buf) - sum += *u16_buf; + for (end = RTE_PTR_ADD(buf, RTE_ALIGN_FLOOR(len, sizeof(uint16_t))); + buf != end; buf = RTE_PTR_ADD(buf, sizeof(uint16_t))) { + uint16_t v; + + memcpy(&v, buf, sizeof(uint16_t)); + sum += v; + } /* if length is odd, keeping it byte order independent */ if (unlikely(len % 2)) { uint16_t left = 0; - *(unsigned char *)&left = *(const unsigned char *)end; + + memcpy(&left, end, 1); sum += left; } diff -Nru dpdk-20.11.6/lib/librte_node/ethdev_ctrl.c dpdk-20.11.7/lib/librte_node/ethdev_ctrl.c --- dpdk-20.11.6/lib/librte_node/ethdev_ctrl.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_node/ethdev_ctrl.c 2022-12-13 10:50:22.000000000 +0000 @@ -77,6 +77,8 @@ /* Add it to list of ethdev rx nodes for lookup */ elem = malloc(sizeof(ethdev_rx_node_elem_t)); + if (elem == NULL) + return -ENOMEM; memset(elem, 0, sizeof(ethdev_rx_node_elem_t)); elem->ctx.port_id = port_id; elem->ctx.queue_id = j; diff -Nru dpdk-20.11.6/lib/librte_pdump/rte_pdump.c dpdk-20.11.7/lib/librte_pdump/rte_pdump.c --- dpdk-20.11.6/lib/librte_pdump/rte_pdump.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_pdump/rte_pdump.c 2022-12-13 10:50:22.000000000 +0000 @@ -442,6 +442,12 @@ struct pdump_request *req = (struct pdump_request *)mp_req.param; struct pdump_response *resp; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + PDUMP_LOG(ERR, + "pdump enable/disable not allowed in primary process\n"); + return -EINVAL; + } + req->ver = 1; req->flags = flags; req->op = operation; diff -Nru dpdk-20.11.6/lib/librte_power/rte_power.h dpdk-20.11.7/lib/librte_power/rte_power.h --- dpdk-20.11.6/lib/librte_power/rte_power.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_power/rte_power.h 2022-12-13 10:50:22.000000000 +0000 @@ -171,14 +171,6 @@ * Scale up the frequency of a specific lcore according to the available * frequencies. * Review each environments specific documentation for usage. - * - * @param lcore_id - * lcore id. - * - * @return - * - 1 on success with frequency changed. - * - 0 on success without frequency changed. - * - Negative on error. */ extern rte_power_freq_change_t rte_power_freq_up; @@ -186,30 +178,13 @@ * Scale down the frequency of a specific lcore according to the available * frequencies. * Review each environments specific documentation for usage. - * - * @param lcore_id - * lcore id. - * - * @return - * - 1 on success with frequency changed. - * - 0 on success without frequency changed. - * - Negative on error. */ - extern rte_power_freq_change_t rte_power_freq_down; /** * Scale up the frequency of a specific lcore to the highest according to the * available frequencies. * Review each environments specific documentation for usage. - * - * @param lcore_id - * lcore id. - * - * @return - * - 1 on success with frequency changed. - * - 0 on success without frequency changed. - * - Negative on error. */ extern rte_power_freq_change_t rte_power_freq_max; @@ -217,54 +192,24 @@ * Scale down the frequency of a specific lcore to the lowest according to the * available frequencies. * Review each environments specific documentation for usage.. - * - * @param lcore_id - * lcore id. - * - * @return - * - 1 on success with frequency changed. - * - 0 on success without frequency changed. - * - Negative on error. */ extern rte_power_freq_change_t rte_power_freq_min; /** * Query the Turbo Boost status of a specific lcore. * Review each environments specific documentation for usage.. - * - * @param lcore_id - * lcore id. - * - * @return - * - 1 Turbo Boost is enabled for this lcore. - * - 0 Turbo Boost is disabled for this lcore. - * - Negative on error. */ extern rte_power_freq_change_t rte_power_turbo_status; /** * Enable Turbo Boost for this lcore. * Review each environments specific documentation for usage.. - * - * @param lcore_id - * lcore id. - * - * @return - * - 0 on success. - * - Negative on error. */ extern rte_power_freq_change_t rte_power_freq_enable_turbo; /** * Disable Turbo Boost for this lcore. * Review each environments specific documentation for usage.. - * - * @param lcore_id - * lcore id. - * - * @return - * - 0 on success. - * - Negative on error. */ extern rte_power_freq_change_t rte_power_freq_disable_turbo; diff -Nru dpdk-20.11.6/lib/librte_ring/rte_ring.h dpdk-20.11.7/lib/librte_ring/rte_ring.h --- dpdk-20.11.6/lib/librte_ring/rte_ring.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_ring/rte_ring.h 2022-12-13 10:50:22.000000000 +0000 @@ -66,10 +66,9 @@ * object table. It is advised to use rte_ring_get_memsize() to get the * appropriate size. * - * The ring size is set to *count*, which must be a power of two. Water - * marking is disabled by default. The real usable ring size is - * *count-1* instead of *count* to differentiate a free ring from an - * empty ring. + * The ring size is set to *count*, which must be a power of two. + * The real usable ring size is *count-1* instead of *count* to + * differentiate a full ring from an empty ring. * * The ring is not added in RTE_TAILQ_RING global list. Indeed, the * memory given by the caller may not be shareable among dpdk @@ -119,10 +118,9 @@ * This function uses ``memzone_reserve()`` to allocate memory. Then it * calls rte_ring_init() to initialize an empty ring. * - * The new ring size is set to *count*, which must be a power of - * two. Water marking is disabled by default. The real usable ring size - * is *count-1* instead of *count* to differentiate a free ring from an - * empty ring. + * The new ring size is set to *count*, which must be a power of two. + * The real usable ring size is *count-1* instead of *count* to + * differentiate a full ring from an empty ring. * * The ring is added in RTE_TAILQ_RING list. * @@ -164,7 +162,6 @@ * On success, the pointer to the new allocated ring. NULL on error with * rte_errno set appropriately. Possible errno values include: * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure - * - E_RTE_SECONDARY - function was called from a secondary process instance * - EINVAL - count provided is not a power of 2 * - ENOSPC - the maximum number of memzones has already been allocated * - EEXIST - a memzone with the same name already exists diff -Nru dpdk-20.11.6/lib/librte_ring/rte_ring_core.h dpdk-20.11.7/lib/librte_ring/rte_ring_core.h --- dpdk-20.11.6/lib/librte_ring/rte_ring_core.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_ring/rte_ring_core.h 2022-12-13 10:50:22.000000000 +0000 @@ -114,8 +114,8 @@ * An RTE ring structure. * * The producer and the consumer have a head and a tail index. The particularity - * of these index is that they are not between 0 and size(ring). These indexes - * are between 0 and 2^32, and we mask their value when we access the ring[] + * of these index is that they are not between 0 and size(ring)-1. These indexes + * are between 0 and 2^32 -1, and we mask their value when we access the ring[] * field. Thanks to this assumption, we can do subtractions between 2 index * values in a modulo-32bit base: that's why the overflow of the indexes is not * a problem. diff -Nru dpdk-20.11.6/lib/librte_ring/rte_ring_elem.h dpdk-20.11.7/lib/librte_ring/rte_ring_elem.h --- dpdk-20.11.6/lib/librte_ring/rte_ring_elem.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_ring/rte_ring_elem.h 2022-12-13 10:50:22.000000000 +0000 @@ -95,7 +95,6 @@ * On success, the pointer to the new allocated ring. NULL on error with * rte_errno set appropriately. Possible errno values include: * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure - * - E_RTE_SECONDARY - function was called from a secondary process instance * - EINVAL - esize is not a multiple of 4 or count provided is not a * power of 2. * - ENOSPC - the maximum number of memzones has already been allocated diff -Nru dpdk-20.11.6/lib/librte_sched/rte_sched.c dpdk-20.11.7/lib/librte_sched/rte_sched.c --- dpdk-20.11.6/lib/librte_sched/rte_sched.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_sched/rte_sched.c 2022-12-13 10:50:22.000000000 +0000 @@ -1148,8 +1148,6 @@ n_subports++; - subport_profile_id = 0; - /* Port */ port->subports[subport_id] = s; diff -Nru dpdk-20.11.6/lib/librte_telemetry/telemetry.c dpdk-20.11.7/lib/librte_telemetry/telemetry.c --- dpdk-20.11.6/lib/librte_telemetry/telemetry.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_telemetry/telemetry.c 2022-12-13 10:50:22.000000000 +0000 @@ -181,9 +181,14 @@ MAX_CMD_LEN, cmd ? cmd : "none"); break; case RTE_TEL_STRING: - used = snprintf(out_buf, sizeof(out_buf), "{\"%.*s\":\"%.*s\"}", - MAX_CMD_LEN, cmd, - RTE_TEL_MAX_SINGLE_STRING_LEN, d->data.str); + prefix_used = snprintf(out_buf, sizeof(out_buf), "{\"%.*s\":", + MAX_CMD_LEN, cmd); + cb_data_buf = &out_buf[prefix_used]; + buf_len = sizeof(out_buf) - prefix_used - 1; /* space for '}' */ + + used = rte_tel_json_str(cb_data_buf, buf_len, 0, d->data.str); + used += prefix_used; + used += strlcat(out_buf + used, "}", sizeof(out_buf) - used); break; case RTE_TEL_DICT: prefix_used = snprintf(out_buf, sizeof(out_buf), "{\"%.*s\":", diff -Nru dpdk-20.11.6/lib/librte_telemetry/telemetry_json.h dpdk-20.11.7/lib/librte_telemetry/telemetry_json.h --- dpdk-20.11.6/lib/librte_telemetry/telemetry_json.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_telemetry/telemetry_json.h 2022-12-13 10:50:22.000000000 +0000 @@ -44,6 +44,52 @@ return 0; /* nothing written or modified */ } +static const char control_chars[0x20] = { + ['\n'] = 'n', + ['\r'] = 'r', + ['\t'] = 't', +}; + +/** + * @internal + * Does the same as __json_snprintf(buf, len, "\"%s\"", str) + * except that it does proper escaping as necessary. + * Drops any invalid characters we don't support + */ +static inline int +__json_format_str(char *buf, const int len, const char *str) +{ + char tmp[len]; + int tmpidx = 0; + + tmp[tmpidx++] = '"'; + while (*str != '\0') { + if (*str < (int)RTE_DIM(control_chars)) { + int idx = *str; /* compilers don't like char type as index */ + if (control_chars[idx] != 0) { + tmp[tmpidx++] = '\\'; + tmp[tmpidx++] = control_chars[idx]; + } + } else if (*str == '"' || *str == '\\') { + tmp[tmpidx++] = '\\'; + tmp[tmpidx++] = *str; + } else + tmp[tmpidx++] = *str; + /* we always need space for closing quote and null character. + * Ensuring at least two free characters also means we can always take an + * escaped character like "\n" without overflowing + */ + if (tmpidx > len - 2) + return 0; + str++; + } + tmp[tmpidx++] = '"'; + tmp[tmpidx] = '\0'; + + strcpy(buf, tmp); + return tmpidx; +} + /* Copies an empty array into the provided buffer. */ static inline int rte_tel_json_empty_array(char *buf, const int len, const int used) @@ -62,7 +108,7 @@ static inline int rte_tel_json_str(char *buf, const int len, const int used, const char *str) { - return used + __json_snprintf(buf + used, len - used, "\"%s\"", str); + return used + __json_format_str(buf + used, len - used, str); } /* Appends a string into the JSON array in the provided buffer. */ diff -Nru dpdk-20.11.6/lib/librte_timer/rte_timer.c dpdk-20.11.7/lib/librte_timer/rte_timer.c --- dpdk-20.11.6/lib/librte_timer/rte_timer.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_timer/rte_timer.c 2022-12-13 10:50:22.000000000 +0000 @@ -587,7 +587,7 @@ } static int -__rte_timer_stop(struct rte_timer *tim, int local_is_locked, +__rte_timer_stop(struct rte_timer *tim, struct rte_timer_data *timer_data) { union rte_timer_status prev_status, status; @@ -609,7 +609,7 @@ /* remove it from list */ if (prev_status.state == RTE_TIMER_PENDING) { - timer_del(tim, prev_status, local_is_locked, priv_timer); + timer_del(tim, prev_status, 0, priv_timer); __TIMER_STAT_ADD(priv_timer, pending, -1); } @@ -638,7 +638,7 @@ TIMER_DATA_VALID_GET_OR_ERR_RET(timer_data_id, timer_data, -EINVAL); - return __rte_timer_stop(tim, 0, timer_data); + return __rte_timer_stop(tim, timer_data); } /* loop until rte_timer_stop() succeed */ @@ -994,21 +994,16 @@ walk_lcore = walk_lcores[i]; priv_timer = &timer_data->priv_timer[walk_lcore]; - rte_spinlock_lock(&priv_timer->list_lock); - for (tim = priv_timer->pending_head.sl_next[0]; tim != NULL; tim = next_tim) { next_tim = tim->sl_next[0]; - /* Call timer_stop with lock held */ - __rte_timer_stop(tim, 1, timer_data); + __rte_timer_stop(tim, timer_data); if (f) f(tim, f_arg); } - - rte_spinlock_unlock(&priv_timer->list_lock); } return 0; diff -Nru dpdk-20.11.6/lib/librte_vhost/rte_vhost.h dpdk-20.11.7/lib/librte_vhost/rte_vhost.h --- dpdk-20.11.6/lib/librte_vhost/rte_vhost.h 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_vhost/rte_vhost.h 2022-12-13 10:50:22.000000000 +0000 @@ -890,6 +890,21 @@ int rte_vhost_vring_call(int vid, uint16_t vring_idx); /** + * Notify the guest that used descriptors have been added to the vring. This + * function acts as a memory barrier. This function will return -EAGAIN when + * vq's access lock is held by other thread, user should try again later. + * + * @param vid + * vhost device ID + * @param vring_idx + * vring index + * @return + * 0 on success, -1 on failure, -EAGAIN for another retry + */ +__rte_experimental +int rte_vhost_vring_call_nonblock(int vid, uint16_t vring_idx); + +/** * Get vhost RX queue avail count. * * @param vid diff -Nru dpdk-20.11.6/lib/librte_vhost/version.map dpdk-20.11.7/lib/librte_vhost/version.map --- dpdk-20.11.6/lib/librte_vhost/version.map 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_vhost/version.map 2022-12-13 10:50:22.000000000 +0000 @@ -76,4 +76,7 @@ rte_vhost_async_channel_unregister; rte_vhost_submit_enqueue_burst; rte_vhost_poll_enqueue_completed; + + # added in 22.11 + rte_vhost_vring_call_nonblock; }; diff -Nru dpdk-20.11.6/lib/librte_vhost/vhost.c dpdk-20.11.7/lib/librte_vhost/vhost.c --- dpdk-20.11.6/lib/librte_vhost/vhost.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_vhost/vhost.c 2022-12-13 10:50:22.000000000 +0000 @@ -1277,6 +1277,36 @@ return 0; } +int +rte_vhost_vring_call_nonblock(int vid, uint16_t vring_idx) +{ + struct virtio_net *dev; + struct vhost_virtqueue *vq; + + dev = get_device(vid); + if (!dev) + return -1; + + if (vring_idx >= VHOST_MAX_VRING) + return -1; + + vq = dev->virtqueue[vring_idx]; + if (!vq) + return -1; + + if (!rte_spinlock_trylock(&vq->access_lock)) + return -EAGAIN; + + if (vq_is_packed(dev)) + vhost_vring_call_packed(dev, vq); + else + vhost_vring_call_split(dev, vq); + + rte_spinlock_unlock(&vq->access_lock); + + return 0; +} + uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id) { diff -Nru dpdk-20.11.6/lib/librte_vhost/vhost_user.c dpdk-20.11.7/lib/librte_vhost/vhost_user.c --- dpdk-20.11.6/lib/librte_vhost/vhost_user.c 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/lib/librte_vhost/vhost_user.c 2022-12-13 10:50:22.000000000 +0000 @@ -2388,6 +2388,7 @@ if (is_vring_iotlb(dev, vq, imsg)) { rte_spinlock_lock(&vq->access_lock); *pdev = dev = translate_ring_addresses(dev, i); + vq = dev->virtqueue[i]; rte_spinlock_unlock(&vq->access_lock); } } diff -Nru dpdk-20.11.6/license/README dpdk-20.11.7/license/README --- dpdk-20.11.6/license/README 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/license/README 2022-12-13 10:50:22.000000000 +0000 @@ -58,20 +58,19 @@ 3. Technical Board then approach Governing Board for such limited approval for the given contribution only. -Any approvals shall be documented in "Licenses/exceptions.txt" with record -dates. +Any approvals shall be documented in "license/exceptions.txt" with record dates. DPDK project supported licenses are: 1. BSD 3-clause "New" or "Revised" License SPDX-License-Identifier: BSD-3-Clause URL: http://spdx.org/licenses/BSD-3-Clause#licenseText - DPDK License text: licenses/bsd-3-clause.txt + DPDK License text: license/bsd-3-clause.txt 2. GNU General Public License v2.0 only SPDX-License-Identifier: GPL-2.0 URL: http://spdx.org/licenses/GPL-2.0.html#licenseText - DPDK License text: licenses/gpl-2.0.txt + DPDK License text: license/gpl-2.0.txt 3. GNU Lesser General Public License v2.1 SPDX-License-Identifier: LGPL-2.1 URL: http://spdx.org/licenses/LGPL-2.1.html#licenseText - DPDK License text: licenses/lgpl-2.1.txt + DPDK License text: license/lgpl-2.1.txt diff -Nru dpdk-20.11.6/meson.build dpdk-20.11.7/meson.build --- dpdk-20.11.6/meson.build 2022-08-29 12:12:02.000000000 +0000 +++ dpdk-20.11.7/meson.build 2022-12-13 10:50:22.000000000 +0000 @@ -5,7 +5,7 @@ # Get version number from file. # Fallback to "more" for Windows compatibility. version: run_command(find_program('cat', 'more'), - files('VERSION'), check: true).stdout().strip(), + files('VERSION')).stdout().strip(), license: 'BSD', default_options: [ 'buildtype=release',