Version in base suite: 22.11.4-1~deb12u1 Base version: dpdk_22.11.4-1~deb12u1 Target version: dpdk_22.11.5-1~deb12u1 Base file: /srv/ftp-master.debian.org/ftp/pool/main/d/dpdk/dpdk_22.11.4-1~deb12u1.dsc Target file: /srv/ftp-master.debian.org/policy/pool/main/d/dpdk/dpdk_22.11.5-1~deb12u1.dsc .github/workflows/build.yml | 20 .mailmap | 16 VERSION | 2 app/dumpcap/main.c | 5 app/meson.build | 2 app/pdump/main.c | 3 app/test-crypto-perf/cperf_options_parsing.c | 4 app/test-crypto-perf/cperf_test_common.c | 20 app/test-crypto-perf/cperf_test_verify.c | 71 +- app/test-pmd/cmdline.c | 2 app/test-pmd/config.c | 9 app/test-pmd/csumonly.c | 25 - app/test-pmd/parameters.c | 18 app/test/process.h | 51 +- app/test/test.c | 8 app/test/test_bpf.c | 1 app/test/test_cfgfile.c | 8 app/test/test_event_eth_tx_adapter.c | 4 app/test/test_eventdev.c | 10 app/test/test_mbuf.c | 5 app/test/test_power.c | 2 buildtools/subproject/meson.build | 9 config/meson.build | 19 debian/changelog | 8 debian/librte-common-cnxk23.symbols | 8 doc/guides/bbdevs/fpga_5gnr_fec.rst | 7 doc/guides/cryptodevs/overview.rst | 6 doc/guides/nics/features.rst | 24 doc/guides/nics/features/atlantic.ini | 1 doc/guides/nics/features/bnxt.ini | 1 doc/guides/nics/features/cnxk.ini | 1 doc/guides/nics/features/default.ini | 2 doc/guides/nics/features/dpaa.ini | 1 doc/guides/nics/features/dpaa2.ini | 1 doc/guides/nics/features/hns3.ini | 2 doc/guides/nics/features/i40e.ini | 2 doc/guides/nics/features/iavf.ini | 3 doc/guides/nics/features/ice.ini | 2 doc/guides/nics/features/ice_dcf.ini | 1 doc/guides/nics/features/igb.ini | 1 doc/guides/nics/features/igc.ini | 1 doc/guides/nics/features/ionic.ini | 1 doc/guides/nics/features/ipn3ke.ini | 1 doc/guides/nics/features/ixgbe.ini | 2 doc/guides/nics/features/mvpp2.ini | 3 doc/guides/nics/features/ngbe.ini | 1 doc/guides/nics/features/octeontx.ini | 1 doc/guides/nics/features/sfc.ini | 1 doc/guides/nics/features/thunderx.ini | 1 doc/guides/nics/features/txgbe.ini | 2 doc/guides/nics/hns3.rst | 2 doc/guides/nics/mlx5.rst | 4 doc/guides/platform/mlx5.rst | 2 doc/guides/prog_guide/ip_fragment_reassembly_lib.rst | 2 doc/guides/prog_guide/packet_framework.rst | 2 doc/guides/prog_guide/profile_app.rst | 2 doc/guides/rel_notes/release_22_11.rst | 322 +++++++++++++ doc/guides/testpmd_app_ug/run_app.rst | 4 doc/guides/tools/testeventdev.rst | 24 drivers/baseband/acc/acc_common.c | 7 drivers/baseband/acc/acc_common.h | 4 drivers/baseband/acc/meson.build | 2 drivers/baseband/acc/rte_acc100_pmd.c | 4 drivers/baseband/acc/rte_acc200_pmd.c | 4 drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 3 drivers/bus/dpaa/dpaa_bus.c | 4 drivers/bus/fslmc/fslmc_bus.c | 4 drivers/bus/ifpga/ifpga_logs.h | 4 drivers/bus/vdev/vdev.c | 26 + drivers/common/cnxk/cnxk_security.c | 229 --------- drivers/common/cnxk/cnxk_security.h | 12 drivers/common/cnxk/roc_cpt.c | 3 drivers/common/cnxk/roc_dev.c | 7 drivers/common/cnxk/roc_ie_on.h | 60 -- drivers/common/cnxk/roc_mbox.h | 16 drivers/common/cnxk/roc_nix.c | 2 drivers/common/cnxk/roc_nix.h | 2 drivers/common/cnxk/roc_nix_inl.c | 3 drivers/common/cnxk/roc_nix_inl.h | 50 -- drivers/common/cnxk/roc_nix_rss.c | 4 drivers/common/cnxk/roc_nix_tm.c | 3 drivers/common/cnxk/roc_npc_parse.c | 10 drivers/common/cnxk/version.map | 4 drivers/common/mlx5/mlx5_common_mr.c | 2 drivers/common/mlx5/mlx5_devx_cmds.c | 18 drivers/common/qat/meson.build | 6 drivers/common/sfc_efx/base/efx.h | 14 drivers/crypto/ipsec_mb/ipsec_mb_ops.c | 2 drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 14 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 10 drivers/dma/dpaa2/dpaa2_qdma.c | 5 drivers/dma/idxd/idxd_bus.c | 10 drivers/event/cnxk/cnxk_eventdev.c | 16 drivers/event/dlb2/dlb2.c | 3 drivers/event/opdl/opdl_ring.c | 2 drivers/meson.build | 2 drivers/net/af_xdp/rte_eth_af_xdp.c | 10 drivers/net/bnx2x/bnx2x.c | 2 drivers/net/bnx2x/bnx2x_stats.c | 14 drivers/net/bnx2x/bnx2x_vfpf.c | 14 drivers/net/bnxt/bnxt.h | 6 drivers/net/bnxt/bnxt_ethdev.c | 53 +- drivers/net/bnxt/bnxt_hwrm.c | 26 - drivers/net/bnxt/bnxt_reps.c | 29 - drivers/net/bnxt/bnxt_txq.c | 6 drivers/net/bnxt/bnxt_txq.h | 1 drivers/net/bnxt/bnxt_txr.c | 13 drivers/net/bnxt/bnxt_txr.h | 4 drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c | 1 drivers/net/bonding/rte_eth_bond_flow.c | 6 drivers/net/cnxk/cn10k_tx.h | 20 drivers/net/cnxk/cn9k_tx.h | 20 drivers/net/cnxk/cnxk_ethdev_devargs.c | 2 drivers/net/cnxk/cnxk_ethdev_ops.c | 40 - drivers/net/cnxk/cnxk_flow.c | 15 drivers/net/dpaa/dpaa_ethdev.c | 3 drivers/net/ena/base/ena_com.c | 9 drivers/net/ena/ena_ethdev.c | 41 - drivers/net/failsafe/failsafe_args.c | 2 drivers/net/hns3/hns3_cmd.c | 4 drivers/net/hns3/hns3_common.c | 2 drivers/net/hns3/hns3_dcb.c | 9 drivers/net/hns3/hns3_ethdev.c | 32 - drivers/net/hns3/hns3_ethdev.h | 2 drivers/net/hns3/hns3_ethdev_vf.c | 229 +++++---- drivers/net/hns3/hns3_mbx.c | 165 +++--- drivers/net/hns3/hns3_mbx.h | 92 ++- drivers/net/hns3/hns3_rxtx.c | 18 drivers/net/i40e/i40e_flow.c | 3 drivers/net/i40e/i40e_rxtx_vec_avx2.c | 62 -- drivers/net/iavf/iavf_ethdev.c | 7 drivers/net/iavf/iavf_ipsec_crypto.c | 5 drivers/net/iavf/iavf_rxtx_vec_avx2.c | 78 --- drivers/net/ice/ice_ethdev.c | 32 + drivers/net/ice/ice_ethdev.h | 4 drivers/net/ice/ice_rxtx_vec_avx2.c | 78 --- drivers/net/ice/ice_rxtx_vec_common.h | 4 drivers/net/ice/ice_tm.c | 12 drivers/net/ice/version.map | 2 drivers/net/ionic/ionic_ethdev.c | 39 - drivers/net/ionic/ionic_rxtx.c | 4 drivers/net/ionic/ionic_rxtx_sg.c | 8 drivers/net/ionic/ionic_rxtx_simple.c | 8 drivers/net/ixgbe/base/ixgbe_type.h | 2 drivers/net/ixgbe/ixgbe_ethdev.c | 17 drivers/net/ixgbe/ixgbe_rxtx.c | 19 drivers/net/mana/mana.c | 24 drivers/net/mana/mana.h | 6 drivers/net/mana/mr.c | 60 +- drivers/net/memif/rte_eth_memif.c | 6 drivers/net/mlx5/hws/mlx5dr_definer.c | 63 +- drivers/net/mlx5/hws/mlx5dr_matcher.c | 17 drivers/net/mlx5/hws/mlx5dr_rule.c | 18 drivers/net/mlx5/linux/mlx5_ethdev_os.c | 251 +++++++--- drivers/net/mlx5/linux/mlx5_os.c | 8 drivers/net/mlx5/mlx5.c | 13 drivers/net/mlx5/mlx5.h | 21 drivers/net/mlx5/mlx5_flow.c | 61 +- drivers/net/mlx5/mlx5_flow.h | 43 + drivers/net/mlx5/mlx5_flow_dv.c | 355 ++++++-------- drivers/net/mlx5/mlx5_flow_hw.c | 465 +++++++++++-------- drivers/net/mlx5/mlx5_flow_meter.c | 28 - drivers/net/mlx5/mlx5_hws_cnt.c | 110 +++- drivers/net/mlx5/mlx5_hws_cnt.h | 26 + drivers/net/mlx5/mlx5_rx.c | 19 drivers/net/mlx5/mlx5_stats.c | 58 +- drivers/net/mlx5/mlx5_trigger.c | 8 drivers/net/mlx5/windows/mlx5_ethdev_os.c | 22 drivers/net/mvneta/mvneta_ethdev.c | 3 drivers/net/mvpp2/mrvl_ethdev.c | 3 drivers/net/netvsc/hn_rxtx.c | 8 drivers/net/nfp/flower/nfp_flower.c | 72 -- drivers/net/nfp/flower/nfp_flower.h | 1 drivers/net/nfp/flower/nfp_flower_representor.c | 166 ++++++ drivers/net/nfp/flower/nfp_flower_representor.h | 1 drivers/net/nfp/nfp_common.h | 1 drivers/net/nfp/nfp_ethdev.c | 82 ++- drivers/net/nfp/nfpcore/nfp_mutex.c | 2 drivers/net/pfe/pfe_ethdev.c | 3 drivers/net/tap/rte_eth_tap.c | 1 drivers/net/tap/tap_flow.c | 48 + drivers/net/tap/tap_netlink.c | 3 drivers/net/thunderx/base/nicvf_mbox.c | 12 drivers/net/thunderx/base/nicvf_mbox.h | 10 drivers/net/thunderx/nicvf_ethdev.c | 28 + drivers/net/virtio/virtio_ethdev.c | 2 drivers/net/vmxnet3/vmxnet3_ethdev.c | 6 drivers/vdpa/mlx5/mlx5_vdpa_event.c | 29 - examples/ipsec-secgw/ipsec-secgw.c | 9 examples/ipsec-secgw/ipsec.c | 20 examples/ipsec-secgw/ipsec_worker.h | 4 examples/ipsec-secgw/parser.c | 2 examples/l3fwd/main.c | 6 examples/packet_ordering/main.c | 32 + examples/qos_sched/args.c | 4 examples/vhost/main.c | 3 kernel/freebsd/nic_uio/nic_uio.c | 8 lib/bbdev/rte_bbdev.c | 6 lib/cfgfile/rte_cfgfile.c | 14 lib/compressdev/rte_compressdev_pmd.c | 4 lib/cryptodev/rte_cryptodev.c | 4 lib/cryptodev/rte_cryptodev.h | 2 lib/dmadev/rte_dmadev.c | 4 lib/eal/common/eal_common_options.c | 26 - lib/eal/linux/eal_dev.c | 2 lib/eal/linux/eal_hugepage_info.c | 2 lib/eal/linux/eal_interrupts.c | 2 lib/eal/meson.build | 3 lib/eal/windows/eal_memory.c | 2 lib/eal/x86/rte_cycles.c | 16 lib/ethdev/ethdev_driver.c | 4 lib/ethdev/ethdev_pci.h | 2 lib/ethdev/ethdev_private.c | 2 lib/ethdev/rte_class_eth.c | 2 lib/ethdev/rte_ethdev.c | 40 - lib/ethdev/rte_flow.c | 2 lib/ethdev/rte_flow.h | 2 lib/eventdev/eventdev_pmd.h | 6 lib/eventdev/rte_event_crypto_adapter.c | 24 lib/eventdev/rte_event_eth_rx_adapter.c | 36 - lib/eventdev/rte_event_eth_tx_adapter.c | 2 lib/eventdev/rte_event_timer_adapter.c | 4 lib/eventdev/rte_eventdev.c | 10 lib/eventdev/rte_eventdev.h | 71 +- lib/hash/rte_cuckoo_hash.h | 11 lib/lpm/rte_lpm6.c | 6 lib/mempool/rte_mempool_ops.c | 2 lib/meson.build | 2 lib/metrics/rte_metrics_telemetry.c | 2 lib/net/rte_ether.h | 14 lib/net/rte_ip.h | 15 lib/net/rte_net_crc.c | 6 lib/node/ethdev_rx.c | 4 lib/node/ip4_lookup.c | 2 lib/pipeline/rte_swx_pipeline_spec.c | 4 lib/power/guest_channel.c | 2 lib/power/rte_power_pmd_mgmt.c | 6 lib/rawdev/rte_rawdev.c | 2 lib/rcu/rte_rcu_qsbr.c | 4 lib/rcu/rte_rcu_qsbr.h | 8 lib/regexdev/rte_regexdev.c | 2 lib/stack/rte_stack.c | 8 lib/telemetry/telemetry.c | 11 lib/vhost/vdpa.c | 10 lib/vhost/vhost_crypto.c | 6 lib/vhost/vhost_user.c | 2 lib/vhost/virtio_net.c | 24 meson.build | 6 248 files changed, 3251 insertions(+), 2061 deletions(-) diff -Nru dpdk-22.11.4/.github/workflows/build.yml dpdk-22.11.5/.github/workflows/build.yml --- dpdk-22.11.4/.github/workflows/build.yml 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/.github/workflows/build.yml 2024-04-22 11:25:10.000000000 +0000 @@ -64,7 +64,7 @@ steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Generate cache keys id: get_ref_keys run: | @@ -72,7 +72,7 @@ echo 'libabigail=libabigail-${{ env.LIBABIGAIL_VERSION }}-${{ matrix.config.os }}' >> $GITHUB_OUTPUT echo 'abi=abi-${{ matrix.config.os }}-${{ matrix.config.compiler }}-${{ matrix.config.cross }}-${{ env.LIBABIGAIL_VERSION }}-${{ env.REF_GIT_TAG }}' >> $GITHUB_OUTPUT - name: Retrieve ccache cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.ccache key: ${{ steps.get_ref_keys.outputs.ccache }}-${{ github.ref }} @@ -80,13 +80,13 @@ ${{ steps.get_ref_keys.outputs.ccache }}-refs/heads/main - name: Retrieve libabigail cache id: libabigail-cache - uses: actions/cache@v3 + uses: actions/cache@v4 if: env.ABI_CHECKS == 'true' with: path: libabigail key: ${{ steps.get_ref_keys.outputs.libabigail }} - name: Retrieve ABI reference cache - uses: actions/cache@v3 + uses: actions/cache@v4 if: env.ABI_CHECKS == 'true' with: path: reference @@ -133,7 +133,7 @@ run: .ci/linux-build.sh - name: Upload logs on failure if: failure() - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: meson-logs-${{ join(matrix.config.*, '-') }} path: | @@ -161,7 +161,7 @@ echo 'image=image-${{ matrix.config.image }}-'$(date -u +%Y-%m-%d) >> $GITHUB_OUTPUT - name: Retrieve image cache id: image_cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.image key: ${{ steps.get_keys.outputs.image }} @@ -207,7 +207,7 @@ steps: - name: Checkout sources - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Generate various keys id: get_keys run: | @@ -215,7 +215,7 @@ echo 'logs=meson-logs-${{ join(matrix.config.*, '-') }}' | tr -d ':' >> $GITHUB_OUTPUT - name: Retrieve image cache id: image_cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.image key: ${{ needs.prepare-container-images.outputs.image }} @@ -225,7 +225,7 @@ echo 'Image ${{ matrix.config.image }} is not cached.' false - name: Retrieve ccache cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.ccache key: ${{ steps.get_keys.outputs.ccache }}-${{ github.ref }} @@ -262,7 +262,7 @@ run: docker kill dpdk - name: Upload logs on failure if: failure() - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ${{ steps.get_keys.outputs.logs }} path: | diff -Nru dpdk-22.11.4/.mailmap dpdk-22.11.5/.mailmap --- dpdk-22.11.4/.mailmap 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/.mailmap 2024-04-22 11:25:10.000000000 +0000 @@ -27,9 +27,11 @@ Akash Saxena Akeem G Abodunrin Akhil Goyal +Akshay Dorwat Alain Leon Alan Carew Alan Dewar +Alan Elder Alan Liu Alan Winkowski Alejandro Lucero @@ -354,6 +356,7 @@ Elena Agostini Eli Britstein Elza Mathew +Emi Aoki Emma Finn Emma Kenny Emmanuel Roullit @@ -387,9 +390,11 @@ Ferdinand Thiessen Ferruh Yigit Fidaullah Noonari +Fidel Castro Fiona Trahe Flavia Musatescu Flavio Leitner +Flore Norceide Forrest Shi Francesco Santoro Francis Kelly @@ -501,6 +506,7 @@ Hiroki Shirokura Hiroshi Shimamoto Hiroyuki Mikita +Holly Nichols Hongbo Zheng Hongjun Ni Hongzhi Guo @@ -652,7 +658,7 @@ John Romein John W. Linville Jonas Pfefferle -Jonathan Erb +Jonathan Erb Jonathan Tsai Jon DeVree Jon Loeliger @@ -754,6 +760,7 @@ Levend Sayar Lev Faerman Lewei Yang +Lewis Donzis Leyi Rong Liang Ma Liang-Min Larry Wang @@ -858,6 +865,7 @@ Martyna Szapar Maryam Tahhan Masoud Hasanifard +Masoumeh Farhadi Nia Matan Azrad Matej Vido Mateusz Kowalski @@ -1085,6 +1093,7 @@ Przemyslaw Patynowski Przemyslaw Zegan Pu Xu <583493798@qq.com> +Qian Hao Qian Xu Qiao Liu Qi Fu @@ -1228,6 +1237,7 @@ Shannon Nelson Shannon Zhao Shaopeng He +Shaowei Sun <1819846787@qq.com> Sharmila Podury Sharon Haroni Shay Agroskin @@ -1369,6 +1379,7 @@ Timothy Miskell Timothy Redaelli Tim Shearer +Ting-Kai Ku Ting Xu Tiwei Bie Todd Fujinaka @@ -1382,6 +1393,7 @@ Tomasz Zawadzki Tom Barbette Tom Crugnale +Tom Jones Tom Millington Tom Rix Tone Zhang @@ -1428,6 +1440,7 @@ Vincent Jardin Vincent Li Vincent S. Cojot +Vinh Tran Vipin Varghese Vipul Ashri Visa Hankala @@ -1448,6 +1461,7 @@ Wang Sheng-Hui Wangyu (Eric) Waterman Cao +Wathsala Vithanage Weichun Chen Wei Dai Weifeng Li diff -Nru dpdk-22.11.4/VERSION dpdk-22.11.5/VERSION --- dpdk-22.11.4/VERSION 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/VERSION 2024-04-22 11:25:10.000000000 +0000 @@ -1 +1 @@ -22.11.4 +22.11.5 diff -Nru dpdk-22.11.4/app/dumpcap/main.c dpdk-22.11.5/app/dumpcap/main.c --- dpdk-22.11.4/app/dumpcap/main.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/dumpcap/main.c 2024-04-22 11:25:10.000000000 +0000 @@ -546,6 +546,11 @@ eal_argv[i++] = strdup(file_prefix); } + for (i = 0; i < (unsigned int)eal_argc; i++) { + if (eal_argv[i] == NULL) + rte_panic("No memory\n"); + } + if (rte_eal_init(eal_argc, eal_argv) < 0) rte_exit(EXIT_FAILURE, "EAL init failed: is primary process running?\n"); } diff -Nru dpdk-22.11.4/app/meson.build dpdk-22.11.5/app/meson.build --- dpdk-22.11.4/app/meson.build 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/meson.build 2024-04-22 11:25:10.000000000 +0000 @@ -83,7 +83,7 @@ if not build if reason != '' dpdk_apps_disabled += app - set_variable(app.underscorify() + '_disable_reason', reason) + set_variable('app_' + app.underscorify() + '_disable_reason', reason) endif continue endif diff -Nru dpdk-22.11.4/app/pdump/main.c dpdk-22.11.5/app/pdump/main.c --- dpdk-22.11.4/app/pdump/main.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/pdump/main.c 2024-04-22 11:25:10.000000000 +0000 @@ -171,6 +171,9 @@ struct pdump_tuples *pt = extra_args; pt->device_id = strdup(value); + if (pt->device_id == NULL) + return -1; + pt->dump_by_type = DEVICE_ID; return 0; diff -Nru dpdk-22.11.4/app/test/process.h dpdk-22.11.5/app/test/process.h --- dpdk-22.11.4/app/test/process.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/test/process.h 2024-04-22 11:25:10.000000000 +0000 @@ -17,6 +17,7 @@ #include #include /* strlcpy */ +#include #ifdef RTE_EXEC_ENV_FREEBSD #define self "curproc" @@ -34,6 +35,34 @@ #endif #endif +#define PREFIX_ALLOW "--allow=" + +static int +add_parameter_allow(char **argv, int max_capacity) +{ + struct rte_devargs *devargs; + int count = 0; + + RTE_EAL_DEVARGS_FOREACH(NULL, devargs) { + if (strlen(devargs->name) == 0) + continue; + + if (devargs->data == NULL || strlen(devargs->data) == 0) { + if (asprintf(&argv[count], PREFIX_ALLOW"%s", devargs->name) < 0) + break; + } else { + if (asprintf(&argv[count], PREFIX_ALLOW"%s,%s", + devargs->name, devargs->data) < 0) + break; + } + + if (++count == max_capacity) + break; + } + + return count; +} + /* * launches a second copy of the test process using the given argv parameters, * which should include argv[0] as the process name. To identify in the @@ -43,8 +72,10 @@ static inline int process_dup(const char *const argv[], int numargs, const char *env_value) { - int num; - char *argv_cpy[numargs + 1]; + int num = 0; + char **argv_cpy; + int allow_num; + int argv_num; int i, status; char path[32]; #ifdef RTE_LIB_PDUMP @@ -58,11 +89,21 @@ if (pid < 0) return -1; else if (pid == 0) { + allow_num = rte_devargs_type_count(RTE_DEVTYPE_ALLOWED); + argv_num = numargs + allow_num + 1; + argv_cpy = calloc(argv_num, sizeof(char *)); + if (!argv_cpy) + rte_panic("Memory allocation failed\n"); + /* make a copy of the arguments to be passed to exec */ - for (i = 0; i < numargs; i++) + for (i = 0; i < numargs; i++) { argv_cpy[i] = strdup(argv[i]); - argv_cpy[i] = NULL; - num = numargs; + if (argv_cpy[i] == NULL) + rte_panic("Error dup args\n"); + } + if (allow_num > 0) + num = add_parameter_allow(&argv_cpy[i], allow_num); + num += numargs; #ifdef RTE_EXEC_ENV_LINUX { diff -Nru dpdk-22.11.4/app/test/test.c dpdk-22.11.5/app/test/test.c --- dpdk-22.11.4/app/test/test.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/test/test.c 2024-04-22 11:25:10.000000000 +0000 @@ -343,11 +343,13 @@ if (test_success == TEST_SUCCESS) suite->succeeded++; - else if (test_success == TEST_SKIPPED) + else if (test_success == TEST_SKIPPED) { suite->skipped++; - else if (test_success == -ENOTSUP) + suite->executed--; + } else if (test_success == -ENOTSUP) { suite->unsupported++; - else + suite->executed--; + } else suite->failed++; } else if (test_success == -ENOTSUP) { suite->unsupported++; diff -Nru dpdk-22.11.4/app/test/test_bpf.c dpdk-22.11.5/app/test/test_bpf.c --- dpdk-22.11.4/app/test/test_bpf.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/test/test_bpf.c 2024-04-22 11:25:10.000000000 +0000 @@ -3341,6 +3341,7 @@ struct rte_ipv4_hdr ip_hdr; } *hdr; + memset(&mb, 0, sizeof(mb)); dummy_mbuf_prep(&mb, tbuf, sizeof(tbuf), plen); m = &mb; diff -Nru dpdk-22.11.4/app/test/test_cfgfile.c dpdk-22.11.5/app/test/test_cfgfile.c --- dpdk-22.11.4/app/test/test_cfgfile.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/test/test_cfgfile.c 2024-04-22 11:25:10.000000000 +0000 @@ -168,7 +168,7 @@ struct rte_cfgfile *cfgfile; cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/invalid_section.ini", 0); - TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur"); + TEST_ASSERT_NULL(cfgfile, "Expected failure did not occur"); return 0; } @@ -185,7 +185,7 @@ cfgfile = rte_cfgfile_load_with_params(CFG_FILES_ETC "/sample2.ini", 0, ¶ms); - TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur"); + TEST_ASSERT_NULL(cfgfile, "Expected failure did not occur"); return 0; } @@ -196,7 +196,7 @@ struct rte_cfgfile *cfgfile; cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/empty_key_value.ini", 0); - TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur"); + TEST_ASSERT_NULL(cfgfile, "Expected failure did not occur"); return 0; } @@ -236,7 +236,7 @@ struct rte_cfgfile *cfgfile; cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/missing_section.ini", 0); - TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur"); + TEST_ASSERT_NULL(cfgfile, "Expected failure did not occur"); return 0; } diff -Nru dpdk-22.11.4/app/test/test_event_eth_tx_adapter.c dpdk-22.11.5/app/test/test_event_eth_tx_adapter.c --- dpdk-22.11.4/app/test/test_event_eth_tx_adapter.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/test/test_event_eth_tx_adapter.c 2024-04-22 11:25:10.000000000 +0000 @@ -482,6 +482,10 @@ int internal_port; uint32_t cap; + /* Initialize mbufs */ + for (i = 0; i < RING_SIZE; i++) + rte_pktmbuf_reset(&bufs[i]); + memset(&dev_conf, 0, sizeof(dev_conf)); err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, &cap); diff -Nru dpdk-22.11.4/app/test/test_eventdev.c dpdk-22.11.5/app/test/test_eventdev.c --- dpdk-22.11.4/app/test/test_eventdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/test/test_eventdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -33,9 +33,15 @@ uint8_t count; count = rte_event_dev_count(); if (!count) { + int ret; + printf("Failed to find a valid event device," - " testing with event_skeleton device\n"); - return rte_vdev_init("event_skeleton", NULL); + " trying with event_skeleton device\n"); + ret = rte_vdev_init("event_skeleton", NULL); + if (ret != 0) { + printf("No event device, skipping\n"); + return TEST_SKIPPED; + } } return TEST_SUCCESS; } diff -Nru dpdk-22.11.4/app/test/test_mbuf.c dpdk-22.11.5/app/test/test_mbuf.c --- dpdk-22.11.4/app/test/test_mbuf.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/test/test_mbuf.c 2024-04-22 11:25:10.000000000 +0000 @@ -2345,16 +2345,13 @@ GOTO_FAIL("%s: External buffer is not attached to mbuf\n", __func__); - /* allocate one more mbuf */ + /* allocate one more mbuf, it is attached to the same external buffer */ clone = rte_pktmbuf_clone(m, pktmbuf_pool); if (clone == NULL) GOTO_FAIL("%s: mbuf clone allocation failed!\n", __func__); if (rte_pktmbuf_pkt_len(clone) != 0) GOTO_FAIL("%s: Bad packet length\n", __func__); - /* attach the same external buffer to the cloned mbuf */ - rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len, - ret_shinfo); if (clone->ol_flags != RTE_MBUF_F_EXTERNAL) GOTO_FAIL("%s: External buffer is not attached to mbuf\n", __func__); diff -Nru dpdk-22.11.4/app/test/test_power.c dpdk-22.11.5/app/test/test_power.c --- dpdk-22.11.4/app/test/test_power.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/test/test_power.c 2024-04-22 11:25:10.000000000 +0000 @@ -142,7 +142,7 @@ /* Test setting a valid environment */ ret = rte_power_set_env(envs[i]); if (ret != 0) { - printf("Unexpectedly unsucceeded on setting a valid environment\n"); + printf("Unexpectedly unsuccessful on setting a valid environment\n"); return -1; } diff -Nru dpdk-22.11.4/app/test-crypto-perf/cperf_options_parsing.c dpdk-22.11.5/app/test-crypto-perf/cperf_options_parsing.c --- dpdk-22.11.4/app/test-crypto-perf/cperf_options_parsing.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/test-crypto-perf/cperf_options_parsing.c 2024-04-22 11:25:10.000000000 +0000 @@ -516,6 +516,10 @@ const char *arg) { opts->test_file = strdup(arg); + if (opts->test_file == NULL) { + RTE_LOG(ERR, USER1, "Dup vector file failed!\n"); + return -1; + } if (access(opts->test_file, F_OK) != -1) return 0; RTE_LOG(ERR, USER1, "Test vector file doesn't exist\n"); diff -Nru dpdk-22.11.4/app/test-crypto-perf/cperf_test_common.c dpdk-22.11.5/app/test-crypto-perf/cperf_test_common.c --- dpdk-22.11.4/app/test-crypto-perf/cperf_test_common.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/test-crypto-perf/cperf_test_common.c 2024-04-22 11:25:10.000000000 +0000 @@ -49,7 +49,6 @@ { uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf); uint16_t remaining_segments = segments_nb; - struct rte_mbuf *next_mbuf; rte_iova_t next_seg_phys_addr = rte_mempool_virt2iova(obj) + mbuf_offset + mbuf_hdr_size; @@ -70,15 +69,15 @@ m->nb_segs = segments_nb; m->port = 0xff; rte_mbuf_refcnt_set(m, 1); - next_mbuf = (struct rte_mbuf *) ((uint8_t *) m + - mbuf_hdr_size + segment_sz); - m->next = next_mbuf; - m = next_mbuf; - remaining_segments--; + remaining_segments--; + if (remaining_segments > 0) { + m->next = (struct rte_mbuf *)((uint8_t *) m + mbuf_hdr_size + segment_sz); + m = m->next; + } else { + m->next = NULL; + } } while (remaining_segments > 0); - - m->next = NULL; } static void @@ -227,7 +226,8 @@ (mbuf_size * segments_nb); params.dst_buf_offset = *dst_buf_offset; /* Destination buffer will be one segment only */ - obj_size += max_size + sizeof(struct rte_mbuf); + obj_size += max_size + sizeof(struct rte_mbuf) + + options->headroom_sz + options->tailroom_sz; } *pool = rte_mempool_create_empty(pool_name, @@ -269,7 +269,7 @@ const struct cperf_options *options, const struct cperf_test_vector *test_vector) { - uint32_t segment_sz = options->segment_sz; + uint32_t segment_sz = options->segment_sz - options->headroom_sz - options->tailroom_sz; uint8_t *mbuf_data; uint8_t *test_data; uint32_t remaining_bytes = options->max_buffer_size; diff -Nru dpdk-22.11.4/app/test-crypto-perf/cperf_test_verify.c dpdk-22.11.5/app/test-crypto-perf/cperf_test_verify.c --- dpdk-22.11.4/app/test-crypto-perf/cperf_test_verify.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/test-crypto-perf/cperf_test_verify.c 2024-04-22 11:25:10.000000000 +0000 @@ -111,8 +111,10 @@ uint32_t len; uint16_t nb_segs; uint8_t *data; - uint32_t cipher_offset, auth_offset; - uint8_t cipher, auth; + uint32_t cipher_offset, auth_offset = 0; + bool cipher = false; + bool digest_verify = false; + bool is_encrypt = false; int res = 0; if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) @@ -150,57 +152,54 @@ switch (options->op_type) { case CPERF_CIPHER_ONLY: - cipher = 1; + cipher = true; cipher_offset = 0; - auth = 0; - auth_offset = 0; - break; - case CPERF_CIPHER_THEN_AUTH: - cipher = 1; - cipher_offset = 0; - auth = 1; - auth_offset = options->test_buffer_size; + is_encrypt = options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT; break; case CPERF_AUTH_ONLY: - cipher = 0; cipher_offset = 0; - auth = 1; - auth_offset = options->test_buffer_size; + if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) { + auth_offset = options->test_buffer_size; + digest_verify = true; + } break; + case CPERF_CIPHER_THEN_AUTH: case CPERF_AUTH_THEN_CIPHER: - cipher = 1; + cipher = true; cipher_offset = 0; - auth = 1; - auth_offset = options->test_buffer_size; + if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { + auth_offset = options->test_buffer_size; + digest_verify = true; + is_encrypt = true; + } break; case CPERF_AEAD: - cipher = 1; + cipher = true; cipher_offset = 0; - auth = 1; - auth_offset = options->test_buffer_size; + if (options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) { + auth_offset = options->test_buffer_size; + digest_verify = true; + is_encrypt = true; + } break; default: res = 1; goto out; } - if (cipher == 1) { - if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) - res += memcmp(data + cipher_offset, + if (cipher) { + if (is_encrypt) + res += !!memcmp(data + cipher_offset, vector->ciphertext.data, options->test_buffer_size); else - res += memcmp(data + cipher_offset, + res += !!memcmp(data + cipher_offset, vector->plaintext.data, options->test_buffer_size); } - if (auth == 1) { - if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) - res += memcmp(data + auth_offset, - vector->digest.data, - options->digest_sz); - } + if (digest_verify) + res += !!memcmp(data + auth_offset, vector->digest.data, options->digest_sz); out: rte_free(data); @@ -276,7 +275,6 @@ ops_needed, ctx->sess, ctx->options, ctx->test_vector, iv_offset, &imix_idx, NULL); - /* Populate the mbuf with the test vector, for verification */ for (i = 0; i < ops_needed; i++) cperf_mbuf_set(ops[i]->sym->m_src, @@ -294,6 +292,17 @@ } #endif /* CPERF_LINEARIZATION_ENABLE */ + /** + * When ops_needed is smaller than ops_enqd, the + * unused ops need to be moved to the front for + * next round use. + */ + if (unlikely(ops_enqd > ops_needed)) { + size_t nb_b_to_mov = ops_unused * sizeof(struct rte_crypto_op *); + + memmove(&ops[ops_needed], &ops[ops_enqd], nb_b_to_mov); + } + /* Enqueue burst of ops on crypto device */ ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, ops, burst_size); diff -Nru dpdk-22.11.4/app/test-pmd/cmdline.c dpdk-22.11.5/app/test-pmd/cmdline.c --- dpdk-22.11.4/app/test-pmd/cmdline.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/test-pmd/cmdline.c 2024-04-22 11:25:10.000000000 +0000 @@ -3454,6 +3454,8 @@ nb_item = 0; char *str2 = strdup(str); + if (str2 == NULL) + return nb_item; cur = strtok_r(str2, ",", &tmp); while (cur != NULL) { parsed_items[nb_item] = get_ptype(cur); diff -Nru dpdk-22.11.4/app/test-pmd/config.c dpdk-22.11.5/app/test-pmd/config.c --- dpdk-22.11.4/app/test-pmd/config.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/test-pmd/config.c 2024-04-22 11:25:10.000000000 +0000 @@ -2700,8 +2700,7 @@ flow = rte_flow_async_create(port_id, queue_id, &op_attr, pt->table, pattern, pattern_idx, actions, actions_idx, job, &error); if (!flow) { - uint32_t flow_id = pf->id; - port_queue_flow_destroy(port_id, queue_id, true, 1, &flow_id); + free(pf); free(job); return port_flow_complain(&error); } @@ -4428,7 +4427,6 @@ queueid_t nb_q; streamid_t sm_id; int start; - int end; nb_q = nb_rxq; if (nb_q > nb_txq) @@ -4436,7 +4434,7 @@ cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; cur_fwd_config.nb_fwd_ports = nb_fwd_ports; cur_fwd_config.nb_fwd_streams = - (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); + (streamid_t) (nb_q / num_procs * cur_fwd_config.nb_fwd_ports); if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) cur_fwd_config.nb_fwd_lcores = @@ -4458,7 +4456,6 @@ * the 2~3 queue for secondary process. */ start = proc_id * nb_q / num_procs; - end = start + nb_q / num_procs; rxp = 0; rxq = start; for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { @@ -4477,8 +4474,6 @@ continue; rxp = 0; rxq++; - if (rxq >= end) - rxq = start; } } diff -Nru dpdk-22.11.4/app/test-pmd/csumonly.c dpdk-22.11.5/app/test-pmd/csumonly.c --- dpdk-22.11.4/app/test-pmd/csumonly.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/test-pmd/csumonly.c 2024-04-22 11:25:10.000000000 +0000 @@ -867,17 +867,29 @@ nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst, nb_pkt_per_burst); inc_rx_burst_stats(fs, nb_rx); - if (unlikely(nb_rx == 0)) - return; + if (unlikely(nb_rx == 0)) { +#ifndef RTE_LIB_GRO + return ; +#else + gro_enable = gro_ports[fs->rx_port].enable; + /* + * Check if packets need to be flushed in the GRO context + * due to a timeout. + * + * Continue only in GRO heavyweight mode and if there are + * packets in the GRO context. + */ + if (!gro_enable || (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) || + (rte_gro_get_pkt_count(current_fwd_lcore()->gro_ctx) == 0)) + return ; +#endif + } fs->rx_packets += nb_rx; rx_bad_ip_csum = 0; rx_bad_l4_csum = 0; rx_bad_outer_l4_csum = 0; rx_bad_outer_ip_csum = 0; -#ifdef RTE_LIB_GRO - gro_enable = gro_ports[fs->rx_port].enable; -#endif txp = &ports[fs->tx_port]; tx_offloads = txp->dev_conf.txmode.offloads; @@ -1105,6 +1117,7 @@ } #ifdef RTE_LIB_GRO + gro_enable = gro_ports[fs->rx_port].enable; if (unlikely(gro_enable)) { if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { nb_rx = rte_gro_reassemble_burst(pkts_burst, nb_rx, @@ -1124,6 +1137,8 @@ gro_pkts_num); fs->gro_times = 0; } + if (nb_rx == 0) + return; } pkts_ip_csum_recalc(pkts_burst, nb_rx, tx_offloads); diff -Nru dpdk-22.11.4/app/test-pmd/parameters.c dpdk-22.11.5/app/test-pmd/parameters.c --- dpdk-22.11.4/app/test-pmd/parameters.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/app/test-pmd/parameters.c 2024-04-22 11:25:10.000000000 +0000 @@ -101,10 +101,6 @@ printf(" --eth-peer=X,M:M:M:M:M:M: set the MAC address of the X peer " "port (0 <= X < %d).\n", RTE_MAX_ETHPORTS); #endif -#ifdef RTE_LIB_LATENCYSTATS - printf(" --latencystats=N: enable latency and jitter statistics " - "monitoring on forwarding lcore id N.\n"); -#endif printf(" --disable-crc-strip: disable CRC stripping by hardware.\n"); printf(" --enable-scatter: enable scattered Rx.\n"); printf(" --enable-lro: enable large receive offload.\n"); @@ -167,8 +163,14 @@ printf(" --disable-device-start: do not automatically start port\n"); printf(" --no-lsc-interrupt: disable link status change interrupt.\n"); printf(" --no-rmv-interrupt: disable device removal interrupt.\n"); +#ifdef RTE_LIB_BITRATESTATS printf(" --bitrate-stats=N: set the logical core N to perform " "bit-rate calculation.\n"); +#endif +#ifdef RTE_LIB_LATENCYSTATS + printf(" --latencystats=N: enable latency and jitter statistics " + "monitoring on forwarding lcore id N.\n"); +#endif printf(" --print-event : " "enable print of designated event or all of them.\n"); printf(" --mask-event : " @@ -761,7 +763,7 @@ n = strtoul(optarg, &end, 10); if ((optarg[0] == '\0') || (end == NULL) || (*end != '\0')) - break; + rte_exit(EXIT_FAILURE, "Invalid stats-period value\n"); stats_period = n; break; @@ -1113,7 +1115,9 @@ 0, &dev_info); if (ret != 0) - return; + rte_exit(EXIT_FAILURE, "Failed to get driver " + "recommended burst size, please provide a " + "value between 1 and %d\n", MAX_PKT_BURST); rec_nb_pkts = dev_info .default_rxportconf.burst_size; @@ -1465,7 +1469,7 @@ break; default: usage(argv[0]); - fprintf(stderr, "Invalid option: %s\n", argv[optind]); + fprintf(stderr, "Invalid option: %s\n", argv[optind - 1]); rte_exit(EXIT_FAILURE, "Command line is incomplete or incorrect\n"); break; diff -Nru dpdk-22.11.4/buildtools/subproject/meson.build dpdk-22.11.5/buildtools/subproject/meson.build --- dpdk-22.11.4/buildtools/subproject/meson.build 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/buildtools/subproject/meson.build 2024-04-22 11:25:10.000000000 +0000 @@ -2,18 +2,23 @@ # Copyright(c) 2022 Intel Corporation message('DPDK subproject linking: ' + get_option('default_library')) +subproject_cflags = ['-include', 'rte_config.h'] + machine_args +if is_freebsd + subproject_cflags += ['-D__BSD_VISIBLE'] +endif if get_option('default_library') == 'static' dpdk_dep = declare_dependency( version: meson.project_version(), dependencies: dpdk_static_lib_deps, + compile_args: subproject_cflags, # static library deps in DPDK build don't include "link_with" parameters, # so explicitly link-in both libs and drivers - link_with: dpdk_static_libraries, - link_whole: dpdk_drivers, + link_whole: dpdk_static_libraries + dpdk_drivers, link_args: dpdk_extra_ldflags) else dpdk_dep = declare_dependency( version: meson.project_version(), + compile_args: subproject_cflags, # shared library deps include all necessary linking parameters dependencies: dpdk_shared_lib_deps) endif diff -Nru dpdk-22.11.4/config/meson.build dpdk-22.11.5/config/meson.build --- dpdk-22.11.4/config/meson.build 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/config/meson.build 2024-04-22 11:25:10.000000000 +0000 @@ -91,13 +91,14 @@ cpu_instruction_set = 'generic' endif endif + if platform == 'native' + if cpu_instruction_set == 'auto' + cpu_instruction_set = 'native' + endif + endif endif -if platform == 'native' - if cpu_instruction_set == 'auto' - cpu_instruction_set = 'native' - endif -elif platform == 'generic' +if platform == 'generic' if cpu_instruction_set == 'auto' cpu_instruction_set = 'generic' endif @@ -146,6 +147,9 @@ if not is_windows add_project_link_arguments('-Wl,--no-as-needed', language: 'c') + if cc.has_link_argument('-Wl,--undefined-version') + add_project_link_arguments('-Wl,--undefined-version', language: 'c') + endif endif # use pthreads if available for the platform @@ -209,6 +213,11 @@ libarchive = dependency('libarchive', required: false, method: 'pkg-config') if libarchive.found() dpdk_conf.set('RTE_HAS_LIBARCHIVE', 1) + # Push libarchive link dependency at the project level to support + # statically linking dpdk apps. Details at: + # https://inbox.dpdk.org/dev/20210605004024.660267a1@sovereign/ + add_project_link_arguments('-larchive', language: 'c') + dpdk_extra_ldflags += '-larchive' endif # check for libbsd diff -Nru dpdk-22.11.4/debian/changelog dpdk-22.11.5/debian/changelog --- dpdk-22.11.4/debian/changelog 2024-01-24 13:53:28.000000000 +0000 +++ dpdk-22.11.5/debian/changelog 2024-04-23 08:37:59.000000000 +0000 @@ -1,3 +1,11 @@ +dpdk (22.11.5-1~deb12u1) bookworm; urgency=medium + + * New upstream release 22.11.5; for a full list of changes see: + http://doc.dpdk.org/guides-22.11/rel_notes/release_22_11.html + * Update symbols file for private symbols change + + -- Luca Boccassi Tue, 23 Apr 2024 09:37:59 +0100 + dpdk (22.11.4-1~deb12u1) bookworm; urgency=medium * New upstream release 22.11.4; for a full list of changes see: diff -Nru dpdk-22.11.4/debian/librte-common-cnxk23.symbols dpdk-22.11.5/debian/librte-common-cnxk23.symbols --- dpdk-22.11.4/debian/librte-common-cnxk23.symbols 2024-01-24 13:53:18.000000000 +0000 +++ dpdk-22.11.5/debian/librte-common-cnxk23.symbols 2024-04-23 08:37:56.000000000 +0000 @@ -17,10 +17,10 @@ cnxk_logtype_tm@INTERNAL 21.08 cnxk_on_ipsec_inb_sa_create@INTERNAL 22.11 cnxk_on_ipsec_outb_sa_create@INTERNAL 22.11 - cnxk_onf_ipsec_inb_sa_fill@INTERNAL 21.11 - cnxk_onf_ipsec_inb_sa_valid@INTERNAL 21.11 - cnxk_onf_ipsec_outb_sa_fill@INTERNAL 21.11 - cnxk_onf_ipsec_outb_sa_valid@INTERNAL 21.11 +#MISSING: 22.11.5# cnxk_onf_ipsec_inb_sa_fill@INTERNAL 21.11 +#MISSING: 22.11.5# cnxk_onf_ipsec_inb_sa_valid@INTERNAL 21.11 +#MISSING: 22.11.5# cnxk_onf_ipsec_outb_sa_fill@INTERNAL 21.11 +#MISSING: 22.11.5# cnxk_onf_ipsec_outb_sa_valid@INTERNAL 21.11 cnxk_ot_ipsec_inb_sa_fill@INTERNAL 21.08 cnxk_ot_ipsec_inb_sa_valid@INTERNAL 21.08 cnxk_ot_ipsec_outb_sa_fill@INTERNAL 21.08 diff -Nru dpdk-22.11.4/doc/guides/bbdevs/fpga_5gnr_fec.rst dpdk-22.11.5/doc/guides/bbdevs/fpga_5gnr_fec.rst --- dpdk-22.11.4/doc/guides/bbdevs/fpga_5gnr_fec.rst 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/bbdevs/fpga_5gnr_fec.rst 2024-04-22 11:25:10.000000000 +0000 @@ -165,7 +165,6 @@ uint8_t dl_bandwidth; uint8_t ul_load_balance; uint8_t dl_load_balance; - uint16_t flr_time_out; }; - ``pf_mode_en``: identifies whether only PF is to be used, or the VFs. PF and @@ -191,10 +190,6 @@ If all hardware queues exceeds the watermark, no code blocks will be streamed in from UL/DL code block FIFO. -- ``flr_time_out``: specifies how many 16.384us to be FLR time out. The - time_out = flr_time_out x 16.384us. For instance, if you want to set 10ms for - the FLR time out then set this setting to 0x262=610. - An example configuration code calling the function ``rte_fpga_5gnr_fec_configure()`` is shown below: @@ -219,7 +214,7 @@ /* setup FPGA PF */ ret = rte_fpga_5gnr_fec_configure(info->dev_name, &conf); TEST_ASSERT_SUCCESS(ret, - "Failed to configure 4G FPGA PF for bbdev %s", + "Failed to configure 5GNR FPGA PF for bbdev %s", info->dev_name); diff -Nru dpdk-22.11.4/doc/guides/cryptodevs/overview.rst dpdk-22.11.5/doc/guides/cryptodevs/overview.rst --- dpdk-22.11.4/doc/guides/cryptodevs/overview.rst 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/cryptodevs/overview.rst 2024-04-22 11:25:10.000000000 +0000 @@ -20,17 +20,17 @@ - "OOP SGL In SGL Out" feature flag stands for "Out-of-place Scatter-gather list Input, Scatter-gather list Output", which means PMD supports different scatter-gather styled input and output buffers - (i.e. both can consists of multiple segments). + (i.e. both can consist of multiple segments). - "OOP SGL In LB Out" feature flag stands for "Out-of-place Scatter-gather list Input, Linear Buffers Output", - which means PMD supports input from scatter-gathered styled buffers, + which means PMD supports input from scatter-gather styled buffers, outputting linear buffers (i.e. single segment). - "OOP LB In SGL Out" feature flag stands for "Out-of-place Linear Buffers Input, Scatter-gather list Output", which means PMD supports input from linear buffer, outputting - scatter-gathered styled buffers. + scatter-gather styled buffers. - "OOP LB In LB Out" feature flag stands for "Out-of-place Linear Buffers Input, Linear Buffers Output", diff -Nru dpdk-22.11.4/doc/guides/nics/features/atlantic.ini dpdk-22.11.5/doc/guides/nics/features/atlantic.ini --- dpdk-22.11.4/doc/guides/nics/features/atlantic.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/atlantic.ini 2024-04-22 11:25:10.000000000 +0000 @@ -5,6 +5,7 @@ ; [Features] Speed capabilities = Y +Link speed configuration = Y Link status = Y Link status event = Y Queue start/stop = Y diff -Nru dpdk-22.11.4/doc/guides/nics/features/bnxt.ini dpdk-22.11.5/doc/guides/nics/features/bnxt.ini --- dpdk-22.11.4/doc/guides/nics/features/bnxt.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/bnxt.ini 2024-04-22 11:25:10.000000000 +0000 @@ -5,6 +5,7 @@ ; [Features] Speed capabilities = Y +Link speed configuration = Y Link status = Y Link status event = Y Rx interrupt = Y diff -Nru dpdk-22.11.4/doc/guides/nics/features/cnxk.ini dpdk-22.11.5/doc/guides/nics/features/cnxk.ini --- dpdk-22.11.4/doc/guides/nics/features/cnxk.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/cnxk.ini 2024-04-22 11:25:10.000000000 +0000 @@ -28,6 +28,7 @@ RSS reta update = Y Inner RSS = Y Congestion management = Y +Traffic manager = Y Inline protocol = Y Flow control = Y Scattered Rx = Y diff -Nru dpdk-22.11.4/doc/guides/nics/features/default.ini dpdk-22.11.5/doc/guides/nics/features/default.ini --- dpdk-22.11.4/doc/guides/nics/features/default.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/default.ini 2024-04-22 11:25:10.000000000 +0000 @@ -8,6 +8,7 @@ ; [Features] Speed capabilities = +Link speed configuration = Link status = Link status event = Removal event = @@ -42,6 +43,7 @@ Flow control = Rate limitation = Congestion management = +Traffic manager = Inline crypto = Inline protocol = CRC offload = diff -Nru dpdk-22.11.4/doc/guides/nics/features/dpaa.ini dpdk-22.11.5/doc/guides/nics/features/dpaa.ini --- dpdk-22.11.4/doc/guides/nics/features/dpaa.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/dpaa.ini 2024-04-22 11:25:10.000000000 +0000 @@ -5,6 +5,7 @@ ; [Features] Speed capabilities = Y +Link speed configuration = Y Link status = Y Link status event = Y Burst mode info = Y diff -Nru dpdk-22.11.4/doc/guides/nics/features/dpaa2.ini dpdk-22.11.5/doc/guides/nics/features/dpaa2.ini --- dpdk-22.11.4/doc/guides/nics/features/dpaa2.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/dpaa2.ini 2024-04-22 11:25:10.000000000 +0000 @@ -17,6 +17,7 @@ RSS hash = Y VLAN filter = Y Flow control = Y +Traffic manager = Y VLAN offload = Y L3 checksum offload = Y L4 checksum offload = Y diff -Nru dpdk-22.11.4/doc/guides/nics/features/hns3.ini dpdk-22.11.5/doc/guides/nics/features/hns3.ini --- dpdk-22.11.4/doc/guides/nics/features/hns3.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/hns3.ini 2024-04-22 11:25:10.000000000 +0000 @@ -5,6 +5,7 @@ ; [Features] Speed capabilities = Y +Link speed configuration = Y Link status = Y Link status event = Y Rx interrupt = Y @@ -28,6 +29,7 @@ DCB = Y VLAN filter = Y Flow control = Y +Traffic manager = Y CRC offload = Y VLAN offload = Y FEC = Y diff -Nru dpdk-22.11.4/doc/guides/nics/features/i40e.ini dpdk-22.11.5/doc/guides/nics/features/i40e.ini --- dpdk-22.11.4/doc/guides/nics/features/i40e.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/i40e.ini 2024-04-22 11:25:10.000000000 +0000 @@ -5,6 +5,7 @@ ; [Features] Speed capabilities = Y +Link speed configuration = Y Link status = Y Link status event = Y Rx interrupt = Y @@ -27,6 +28,7 @@ DCB = Y VLAN filter = Y Flow control = Y +Traffic manager = Y CRC offload = Y VLAN offload = Y QinQ offload = P diff -Nru dpdk-22.11.4/doc/guides/nics/features/iavf.ini dpdk-22.11.5/doc/guides/nics/features/iavf.ini --- dpdk-22.11.4/doc/guides/nics/features/iavf.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/iavf.ini 2024-04-22 11:25:10.000000000 +0000 @@ -20,6 +20,8 @@ RSS key update = Y RSS reta update = Y VLAN filter = Y +Traffic manager = Y +Inline crypto = Y CRC offload = Y VLAN offload = P L3 checksum offload = P @@ -30,7 +32,6 @@ Packet type parsing = Y Rx descriptor status = Y Tx descriptor status = Y -Inline crypto = Y Basic stats = Y Multiprocess aware = Y FreeBSD = Y diff -Nru dpdk-22.11.4/doc/guides/nics/features/ice.ini dpdk-22.11.5/doc/guides/nics/features/ice.ini --- dpdk-22.11.4/doc/guides/nics/features/ice.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/ice.ini 2024-04-22 11:25:10.000000000 +0000 @@ -8,6 +8,7 @@ ; [Features] Speed capabilities = Y +Link speed configuration = Y Link status = Y Link status event = Y Rx interrupt = Y @@ -26,6 +27,7 @@ RSS key update = Y RSS reta update = Y VLAN filter = Y +Traffic manager = Y CRC offload = Y VLAN offload = Y QinQ offload = P diff -Nru dpdk-22.11.4/doc/guides/nics/features/ice_dcf.ini dpdk-22.11.5/doc/guides/nics/features/ice_dcf.ini --- dpdk-22.11.4/doc/guides/nics/features/ice_dcf.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/ice_dcf.ini 2024-04-22 11:25:10.000000000 +0000 @@ -22,6 +22,7 @@ Allmulticast mode = Y Unicast MAC filter = Y VLAN filter = Y +Traffic manager = Y VLAN offload = Y Extended stats = Y Basic stats = Y diff -Nru dpdk-22.11.4/doc/guides/nics/features/igb.ini dpdk-22.11.5/doc/guides/nics/features/igb.ini --- dpdk-22.11.4/doc/guides/nics/features/igb.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/igb.ini 2024-04-22 11:25:10.000000000 +0000 @@ -5,6 +5,7 @@ ; [Features] Speed capabilities = P +Link speed configuration = Y Link status = Y Link status event = Y Rx interrupt = Y diff -Nru dpdk-22.11.4/doc/guides/nics/features/igc.ini dpdk-22.11.5/doc/guides/nics/features/igc.ini --- dpdk-22.11.4/doc/guides/nics/features/igc.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/igc.ini 2024-04-22 11:25:10.000000000 +0000 @@ -4,6 +4,7 @@ ; [Features] Speed capabilities = Y +Link speed configuration = Y Link status = Y Link status event = Y FW version = Y diff -Nru dpdk-22.11.4/doc/guides/nics/features/ionic.ini dpdk-22.11.5/doc/guides/nics/features/ionic.ini --- dpdk-22.11.4/doc/guides/nics/features/ionic.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/ionic.ini 2024-04-22 11:25:10.000000000 +0000 @@ -5,6 +5,7 @@ ; [Features] Speed capabilities = Y +Link speed configuration = Y Link status = Y Link status event = Y Fast mbuf free = Y diff -Nru dpdk-22.11.4/doc/guides/nics/features/ipn3ke.ini dpdk-22.11.5/doc/guides/nics/features/ipn3ke.ini --- dpdk-22.11.4/doc/guides/nics/features/ipn3ke.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/ipn3ke.ini 2024-04-22 11:25:10.000000000 +0000 @@ -25,6 +25,7 @@ DCB = Y VLAN filter = Y Flow control = Y +Traffic manager = Y CRC offload = Y VLAN offload = Y QinQ offload = Y diff -Nru dpdk-22.11.4/doc/guides/nics/features/ixgbe.ini dpdk-22.11.5/doc/guides/nics/features/ixgbe.ini --- dpdk-22.11.4/doc/guides/nics/features/ixgbe.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/ixgbe.ini 2024-04-22 11:25:10.000000000 +0000 @@ -5,6 +5,7 @@ ; [Features] Speed capabilities = Y +Link speed configuration = Y Link status = Y Link status event = Y Rx interrupt = Y @@ -27,6 +28,7 @@ VLAN filter = Y Flow control = Y Rate limitation = Y +Traffic manager = Y Inline crypto = Y CRC offload = P VLAN offload = P diff -Nru dpdk-22.11.4/doc/guides/nics/features/mvpp2.ini dpdk-22.11.5/doc/guides/nics/features/mvpp2.ini --- dpdk-22.11.4/doc/guides/nics/features/mvpp2.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/mvpp2.ini 2024-04-22 11:25:10.000000000 +0000 @@ -12,8 +12,9 @@ Unicast MAC filter = Y Multicast MAC filter = Y RSS hash = Y -Flow control = Y VLAN filter = Y +Flow control = Y +Traffic manager = Y CRC offload = Y L3 checksum offload = Y L4 checksum offload = Y diff -Nru dpdk-22.11.4/doc/guides/nics/features/ngbe.ini dpdk-22.11.5/doc/guides/nics/features/ngbe.ini --- dpdk-22.11.4/doc/guides/nics/features/ngbe.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/ngbe.ini 2024-04-22 11:25:10.000000000 +0000 @@ -5,6 +5,7 @@ ; [Features] Speed capabilities = Y +Link speed configuration = Y Link status = Y Link status event = Y Free Tx mbuf on demand = Y diff -Nru dpdk-22.11.4/doc/guides/nics/features/octeontx.ini dpdk-22.11.5/doc/guides/nics/features/octeontx.ini --- dpdk-22.11.4/doc/guides/nics/features/octeontx.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/octeontx.ini 2024-04-22 11:25:10.000000000 +0000 @@ -5,6 +5,7 @@ ; [Features] Speed capabilities = Y +Link speed configuration = Y Link status = Y Link status event = Y Lock-free Tx queue = Y diff -Nru dpdk-22.11.4/doc/guides/nics/features/sfc.ini dpdk-22.11.5/doc/guides/nics/features/sfc.ini --- dpdk-22.11.4/doc/guides/nics/features/sfc.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/sfc.ini 2024-04-22 11:25:10.000000000 +0000 @@ -5,6 +5,7 @@ ; [Features] Speed capabilities = Y +Link speed configuration = Y Link status = Y Link status event = Y Rx interrupt = Y diff -Nru dpdk-22.11.4/doc/guides/nics/features/thunderx.ini dpdk-22.11.5/doc/guides/nics/features/thunderx.ini --- dpdk-22.11.4/doc/guides/nics/features/thunderx.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/thunderx.ini 2024-04-22 11:25:10.000000000 +0000 @@ -5,6 +5,7 @@ ; [Features] Speed capabilities = Y +Link speed configuration = Y Link status = Y Link status event = Y Queue start/stop = Y diff -Nru dpdk-22.11.4/doc/guides/nics/features/txgbe.ini dpdk-22.11.5/doc/guides/nics/features/txgbe.ini --- dpdk-22.11.4/doc/guides/nics/features/txgbe.ini 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features/txgbe.ini 2024-04-22 11:25:10.000000000 +0000 @@ -5,6 +5,7 @@ ; [Features] Speed capabilities = Y +Link speed configuration = Y Link status = Y Link status event = Y Rx interrupt = Y @@ -26,6 +27,7 @@ VLAN filter = Y Flow control = Y Rate limitation = Y +Traffic manager = Y Inline crypto = Y CRC offload = P VLAN offload = P diff -Nru dpdk-22.11.4/doc/guides/nics/features.rst dpdk-22.11.5/doc/guides/nics/features.rst --- dpdk-22.11.4/doc/guides/nics/features.rst 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/features.rst 2024-04-22 11:25:10.000000000 +0000 @@ -34,6 +34,17 @@ * **[related] API**: ``rte_eth_dev_info_get()``. +.. _nic_features_link_speeds_config: + +Link speed configuration +------------------------ + +Supports configurating fixed speed and link autonegotiation. + +* **[uses] user config**: ``dev_conf.link_speeds:RTE_ETH_LINK_SPEED_*``. +* **[related] API**: ``rte_eth_dev_configure()``. + + .. _nic_features_link_status: Link status @@ -740,6 +751,19 @@ ``rte_eth_cman_config_set()``, ``rte_eth_cman_config_get()``. +.. _nic_features_traffic_manager: + +Traffic manager +--------------- + +Supports Traffic manager. + +* **[implements] rte_tm_ops**: ``capabilities_get``, ``shaper_profile_add``, + ``hierarchy_commit`` and so on. +* **[related] API**: ``rte_tm_capabilities_get()``, ``rte_tm_shaper_profile_add()``, + ``rte_tm_hierarchy_commit()`` and so on. + + .. _nic_features_fw_version: FW version diff -Nru dpdk-22.11.4/doc/guides/nics/hns3.rst dpdk-22.11.5/doc/guides/nics/hns3.rst --- dpdk-22.11.4/doc/guides/nics/hns3.rst 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/hns3.rst 2024-04-22 11:25:10.000000000 +0000 @@ -6,7 +6,7 @@ The hns3 PMD (**librte_net_hns3**) provides poll mode driver support for the inbuilt HiSilicon Network Subsystem(HNS) network engine -found in the HiSilicon Kunpeng 920 SoC and Kunpeng 930 SoC . +found in the HiSilicon Kunpeng 920 SoC (HIP08) and Kunpeng 930 SoC (HIP09/HIP10). Features -------- diff -Nru dpdk-22.11.4/doc/guides/nics/mlx5.rst dpdk-22.11.5/doc/guides/nics/mlx5.rst --- dpdk-22.11.4/doc/guides/nics/mlx5.rst 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/nics/mlx5.rst 2024-04-22 11:25:10.000000000 +0000 @@ -561,8 +561,8 @@ - Cannot co-exist with ASO meter, ASO age action in a single flow rule. - Flow rules insertion rate and memory consumption need more optimization. - - 256 ports maximum. - - 4M connections maximum with ``dv_flow_en`` 1 mode. 16M with ``dv_flow_en`` 2. + - 16 ports maximum. + - 32M connections maximum. - Multi-thread flow insertion: diff -Nru dpdk-22.11.4/doc/guides/platform/mlx5.rst dpdk-22.11.5/doc/guides/platform/mlx5.rst --- dpdk-22.11.4/doc/guides/platform/mlx5.rst 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/platform/mlx5.rst 2024-04-22 11:25:10.000000000 +0000 @@ -228,7 +228,7 @@ The DevX SDK must be installed on the machine building the Windows PMD. Additional information can be found at `How to Integrate Windows DevX in Your Development Environment -`_. +`_. The minimal supported WinOF2 version is 2.60. diff -Nru dpdk-22.11.4/doc/guides/prog_guide/ip_fragment_reassembly_lib.rst dpdk-22.11.5/doc/guides/prog_guide/ip_fragment_reassembly_lib.rst --- dpdk-22.11.4/doc/guides/prog_guide/ip_fragment_reassembly_lib.rst 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/prog_guide/ip_fragment_reassembly_lib.rst 2024-04-22 11:25:10.000000000 +0000 @@ -43,7 +43,7 @@ So if different execution contexts (threads/processes) will access the same table simultaneously, then some external syncing mechanism have to be provided. -Each table entry can hold information about packets consisting of up to RTE_LIBRTE_IP_FRAG_MAX (by default: 4) fragments. +Each table entry can hold information about packets consisting of up to RTE_LIBRTE_IP_FRAG_MAX (by default: 8) fragments. Code example, that demonstrates creation of a new Fragment table: diff -Nru dpdk-22.11.4/doc/guides/prog_guide/packet_framework.rst dpdk-22.11.5/doc/guides/prog_guide/packet_framework.rst --- dpdk-22.11.4/doc/guides/prog_guide/packet_framework.rst 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/prog_guide/packet_framework.rst 2024-04-22 11:25:10.000000000 +0000 @@ -512,7 +512,7 @@ This is because the cost of L2/L3 cache memory miss on memory read accesses is high, as usually due to data dependency between instructions, the CPU execution units have to stall until the read operation is completed from L3 cache memory or external DRAM memory. By using prefetch instructions, the latency of memory read accesses is hidden, -provided that it is preformed early enough before the respective data structure is actually used. +provided that it is performed early enough before the respective data structure is actually used. By splitting the processing into several stages that are executed on different packets (the packets from the input burst are interlaced), enough work is created to allow the prefetch instructions to complete successfully (before the prefetched data structures are actually accessed) and diff -Nru dpdk-22.11.4/doc/guides/prog_guide/profile_app.rst dpdk-22.11.5/doc/guides/prog_guide/profile_app.rst --- dpdk-22.11.4/doc/guides/prog_guide/profile_app.rst 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/prog_guide/profile_app.rst 2024-04-22 11:25:10.000000000 +0000 @@ -59,7 +59,7 @@ specific PMU (Performance Monitor Unit) events through raw events (``-e`` ``-rXX``). -For more derails refer to the +For more details refer to the `ARM64 specific PMU events enumeration `_. diff -Nru dpdk-22.11.4/doc/guides/rel_notes/release_22_11.rst dpdk-22.11.5/doc/guides/rel_notes/release_22_11.rst --- dpdk-22.11.4/doc/guides/rel_notes/release_22_11.rst 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/rel_notes/release_22_11.rst 2024-04-22 11:25:10.000000000 +0000 @@ -1896,3 +1896,325 @@ ~~~~~~~~~~~~~~~~~~~~ + +22.11.5 Release Notes +--------------------- + + +22.11.5 Fixes +~~~~~~~~~~~~~ + +* app/crypto-perf: add missing op resubmission +* app/crypto-perf: fix copy segment size +* app/crypto-perf: fix data comparison +* app/crypto-perf: fix encrypt operation verification +* app/crypto-perf: fix next segment mbuf +* app/crypto-perf: fix out-of-place mbuf size +* app/crypto-perf: verify strdup return +* app/dumpcap: verify strdup return +* app/pdump: verify strdup return +* app/testpmd: fix async flow create failure handling +* app/testpmd: fix burst option parsing +* app/testpmd: fix crash in multi-process forwarding +* app/testpmd: fix error message for invalid option +* app/testpmd: fix GRO packets flush on timeout +* app/testpmd: fix --stats-period option check +* app/testpmd: hide --bitrate-stats in help if disabled +* app/testpmd: return if no packets in GRO heavy weight mode +* app/testpmd: verify strdup return +* baseband/acc: fix common logs +* baseband/acc: fix logtypes register +* baseband/fpga_5gnr_fec: use a better random generator +* build: fix linker warnings about undefined symbols +* build: fix reasons conflict +* build: link static libs with whole-archive in subproject +* build: pass cflags in subproject +* bus/dpaa: verify strdup return +* bus/fslmc: verify strdup return +* bus/ifpga: remove dead code +* bus/vdev: fix devargs in secondary process +* bus/vdev: verify strdup return +* ci: update versions of actions in GHA +* common/cnxk: fix link config for SDP +* common/cnxk: fix mbox region copy +* common/cnxk: fix mbox struct attributes +* common/cnxk: fix memory leak in CPT init +* common/cnxk: fix possible out-of-bounds access +* common/cnxk: fix RSS RETA configuration +* common/cnxk: fix Tx MTU configuration +* common/cnxk: fix VLAN check for inner header +* common/cnxk: remove CN9K inline IPsec FP opcodes +* common/cnxk: remove dead code +* common/mlx5: fix calloc parameters +* common/mlx5: fix duplicate read of general capabilities +* common/sfc_efx/base: use C11 static assert +* config: fix CPU instruction set for cross-build +* cryptodev: remove unused extern variable +* crypto/ipsec_mb: fix incorrectly setting cipher keys +* crypto/qat: fix crash with CCM null AAD pointer +* dmadev: fix calloc parameters +* dma/dpaa2: fix logtype register +* dma/idxd: verify strdup return +* doc: add --latencystats option in testpmd guide +* doc: add link speeds configuration in features table +* doc: add traffic manager in features table +* doc: fix commands in eventdev test tool guide +* doc: fix configuration in baseband 5GNR driver guide +* doc: fix default IP fragments maximum in programmer guide +* doc: fix typo in packet framework guide +* doc: fix typo in profiling guide +* doc: fix typos in cryptodev overview +* doc: update link to Windows DevX in mlx5 guide +* drivers/net: fix buffer overflow for packet types list +* eal: verify strdup return +* eal/x86: add AMD vendor check for TSC calibration +* ethdev: fix NVGRE encap flow action description +* event/cnxk: fix dequeue timeout configuration +* event/cnxk: verify strdup return +* eventdev/crypto: fix enqueueing +* eventdev: fix calloc parameters +* eventdev: fix Doxygen processing of vector struct +* eventdev: improve Doxygen comments on configure struct +* event/dlb2: remove superfluous memcpy +* event/opdl: fix compile-time check +* examples/ipsec-secgw: fix cryptodev to SA mapping +* examples/ipsec-secgw: fix Rx queue ID in Rx callback +* examples/ipsec-secgw: fix typo in error message +* examples/ipsec-secgw: fix width of variables +* examples/l3fwd: fix Rx over not ready port +* examples/packet_ordering: fix Rx with reorder mode disabled +* examples/qos_sched: fix memory leak in args parsing +* examples/vhost: verify strdup return +* hash: remove some dead code +* kernel/freebsd: fix module build on FreeBSD 14 +* lib: add newline in logs +* lib: remove redundant newline from logs +* lib: use dedicated logtypes and macros +* net: add macros for VLAN metadata parsing +* net/af_xdp: fix leak on XSK configuration failure +* net/af_xdp: fix memzone leak on config failure +* net/bnx2x: fix calloc parameters +* net/bnx2x: fix warnings about memcpy lengths +* net/bnxt: fix 50G and 100G forced speed +* net/bnxt: fix array overflow +* net/bnxt: fix backward firmware compatibility +* net/bnxt: fix deadlock in ULP timer callback +* net/bnxt: fix null pointer dereference +* net/bnxt: fix number of Tx queues being created +* net/bnxt: fix speed change from 200G to 25G on Thor +* net/bnxt: modify locking for representor Tx +* net/bonding: fix flow count query +* net/cnxk: add cookies check for multi-segment offload +* net/cnxk: fix buffer size configuration +* net/cnxk: fix flow RSS configuration +* net/cnxk: fix mbuf fields in multi-segment Tx +* net/cnxk: fix MTU limit +* net/ena/base: limit exponential backoff +* net/ena/base: restructure interrupt handling +* net/ena: fix fast mbuf free +* net/ena: fix mbuf double free in fast free mode +* net/failsafe: fix memory leak in args parsing +* net: fix TCP/UDP checksum with padding data +* net/hns3: enable PFC for all user priorities +* net/hns3: fix disable command with firmware +* net/hns3: fix reset level comparison +* net/hns3: fix VF multiple count on one reset +* net/hns3: refactor handle mailbox function +* net/hns3: refactor PF mailbox message struct +* net/hns3: refactor send mailbox function +* net/hns3: refactor VF mailbox message struct +* net/hns3: remove QinQ insert support for VF +* net/hns3: support new device +* net/i40e: remove incorrect 16B descriptor read block +* net/i40e: remove redundant judgment in flow parsing +* net/iavf: fix memory leak on security context error +* net/iavf: remove error logs for VLAN offloading +* net/iavf: remove incorrect 16B descriptor read block +* net/ice: fix link update +* net/ice: fix memory leaks +* net/ice: fix tunnel TSO capabilities +* net/ice: fix version for experimental symbols +* net/ice: remove incorrect 16B descriptor read block +* net/ionic: fix device close +* net/ionic: fix missing volatile type for cqe pointers +* net/ionic: fix RSS query +* net/ixgbe: fix memoy leak after device init failure +* net/ixgbe: increase VF reset timeout +* net/ixgbevf: fix RSS init for x550 NICs +* net/mana: fix memory leak on MR allocation +* net/mana: handle MR cache expansion failure +* net/mana: prevent values overflow returned from RDMA layer +* net/memif: fix extra mbuf refcnt update in zero copy Tx +* net/mlx5: fix age position in hairpin split +* net/mlx5: fix async flow create error handling +* net/mlx5: fix condition of LACP miss flow +* net/mlx5: fix connection tracking action validation +* net/mlx5: fix conntrack action handle representation +* net/mlx5: fix counters map in bonding mode +* net/mlx5: fix DR context release ordering +* net/mlx5: fix drop action release timing +* net/mlx5: fix error packets drop in regular Rx +* net/mlx5: fix flow configure validation +* net/mlx5: fix flow counter cache starvation +* net/mlx5: fix GENEVE option item translation +* net/mlx5: fix GENEVE TLV option management +* net/mlx5: fix HWS meter actions availability +* net/mlx5: fix incorrect counter cache dereference +* net/mlx5: fix IP-in-IP tunnels recognition +* net/mlx5: fix jump action validation +* net/mlx5: fix meter policy priority +* net/mlx5: fix rollback on failed flow configure +* net/mlx5: fix stats query crash in secondary process +* net/mlx5: fix template clean up of FDB control flow rule +* net/mlx5: fix use after free when releasing Tx queues +* net/mlx5: fix VLAN handling in meter split +* net/mlx5: fix VLAN ID in flow modify +* net/mlx5: fix warning about copy length +* net/mlx5/hws: check not supported fields in VXLAN +* net/mlx5/hws: enable multiple integrity items +* net/mlx5/hws: fix port ID for root table +* net/mlx5/hws: fix tunnel protocol checks +* net/mlx5/hws: fix VLAN inner type +* net/mlx5/hws: fix VLAN item in non-relaxed mode +* net/mlx5: prevent ioctl failure log flooding +* net/mlx5: prevent querying aged flows on uninit port +* net/mlx5: remove device status check in flow creation +* net/mlx5: remove duplication of L3 flow item validation +* net/mlx5: remove GENEVE options length limitation +* net/netvsc: fix VLAN metadata parsing +* net/nfp: fix calloc parameters +* net/nfp: fix device close +* net/nfp: fix device resource freeing +* net/nfp: fix resource leak for CoreNIC firmware +* net/nfp: fix resource leak for exit of CoreNIC firmware +* net/nfp: fix resource leak for exit of flower firmware +* net/nfp: fix resource leak for flower firmware +* net/nfp: fix resource leak for PF initialization +* net/nfp: fix switch domain free check +* net/nfp: free switch domain ID on close +* net/tap: do not overwrite flow API errors +* net/tap: fix traffic control handle calculation +* net/tap: log Netlink extended ack unavailability +* net/thunderx: fix DMAC control register update +* net/virtio: remove duplicate queue xstats +* net/vmxnet3: fix initialization on FreeBSD +* net/vmxnet3: ignore Rx queue interrupt setup on FreeBSD +* pipeline: fix calloc parameters +* rawdev: fix calloc parameters +* regexdev: fix logtype register +* Revert "build: add libarchive to optional external dependencies" +* telemetry: fix connected clients count +* telemetry: fix empty JSON dictionaries +* test/bpf: fix mbuf init in some filter test +* test/cfgfile: fix typo in error messages +* test: do not count skipped tests as executed +* test/event: fix crash in Tx adapter freeing +* test/event: skip test if no driver is present +* test: fix probing in secondary process +* test/mbuf: fix external mbuf case with assert enabled +* test/power: fix typo in error message +* test: verify strdup return +* vdpa/mlx5: fix queue enable drain CQ +* version: 22.11.5-rc1 +* vhost: fix deadlock during vDPA SW live migration +* vhost: fix memory leak in Virtio Tx split path +* vhost: fix virtqueue access check in vhost-user setup + +22.11.5 Validation +~~~~~~~~~~~~~~~~~~ + +* Red Hat(R) Testing + + * Platform + + * RHEL 9 + * Kernel 5.14 + * Qemu 8.2.0 + * libvirt 10.0.0 + * X540-AT2 NIC(ixgbe, 10G) + + * Functionality + + * Guest with device assignment(PF) throughput testing(1G hugepage size) + * Guest with device assignment(PF) throughput testing(2M hugepage size) + * Guest with device assignment(VF) throughput testing + * PVP (host dpdk testpmd as vswitch) 1Q: throughput testing + * PVP vhost-user 2Q throughput testing + * PVP vhost-user 1Q cross numa node throughput testing + * Guest with vhost-user 2 queues throughput testing + * vhost-user reconnect with dpdk-client, qemu-server: qemu reconnect + * vhost-user reconnect with dpdk-client, qemu-server: ovs reconnect + * PVP reconnect with dpdk-client, qemu-server: PASS + * PVP 1Q live migration testing + * PVP 1Q cross numa node live migration testing + * Guest with ovs+dpdk+vhost-user 1Q live migration testing + * Guest with ovs+dpdk+vhost-user 1Q live migration testing (2M) + * Guest with ovs+dpdk+vhost-user 2Q live migration testing + * Guest with ovs+dpdk+vhost-user 4Q live migration testing + * Host PF + DPDK testing + * Host VF + DPDK testing + + +* Intel(R) Testing + + * Basic Intel(R) NIC testing + * Build & CFLAG compile: cover the build test combination with latest GCC/Clang version and the popular OS revision such as Ubuntu20.04, Ubuntu22.04, Fedora38, RHEL8.7, RHEL9.2, FreeBSD13.1, SUSE15, Centos7.9, openEuler22.03-SP1,OpenAnolis8.8 etc. + * PF(i40e, ixgbe): test scenarios including RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. + * VF(i40e, ixgbe): test scenarios including VF-RTE_FLOW/TSO/Jumboframe/checksum offload/VLAN/VXLAN, etc. + * PPF/VF(ice): test scenarios including Switch features/Package Management/Flow Director/Advanced Tx/Advanced RSS/ACL/DCF/Flexible Descriptor, etc. + * Intel NIC single core/NIC performance: test scenarios including PF/VF single core performance test, etc. + * IPsec: test scenarios including ipsec/ipsec-gw/ipsec library basic test - QAT&SW/FIB library, etc. + + * Basic cryptodev and virtio testing + * Virtio: both function and performance test are covered. Such as PVP/Virtio_loopback/virtio-user loopback/virtio-net VM2VM perf testing/VMAWARE ESXI 8.0, etc. + * Cryptodev: + * Function test: test scenarios including Cryptodev API testing/CompressDev ISA-L/QAT/ZLIB PMD Testing/FIPS, etc. + * Performance test: test scenarios including Thoughput Performance/Cryptodev Latency, etc. + + +* Nvidia(R) Testing + + * Basic functionality via testpmd/example applications + + * Tx/Rx + * xstats + * Timestamps + * Link status + * RTE flow and flow_director + * RSS + * VLAN filtering, stripping and insertion + * Checksum/TSO + * ptype + * link_status_interrupt example application + * l3fwd-power example application + * Multi-process example applications + * Hardware LRO tests + * Buffer Split tests + * Tx scheduling tests + + * Build tests + + * Ubuntu 20.04.6 with MLNX_OFED_LINUX-24.01-0.3.3.1. + * Ubuntu 20.04.6 with rdma-core master (4b08a22). + * Ubuntu 20.04.6 with rdma-core v28.0. + * Fedora 38 with rdma-core v44.0. + * Fedora 40 (Rawhide) with rdma-core v48.0. + * OpenSUSE Leap 15.5 with rdma-core v42.0. + * Windows Server 2019 with Clang 16.0.6. + + * BlueField-2 + + * DOCA 2.6.0 + * fw 24.40.1000 + + * ConnectX-7 + + * Ubuntu 20.04 + * Driver MLNX_OFED_LINUX-24.01-0.3.3.1 + * fw 28.40.1000 + + * ConnectX-6 Dx + + * Ubuntu 20.04 + * Driver MLNX_OFED_LINUX-24.01-0.3.3.1 + * fw 22.40.1000 diff -Nru dpdk-22.11.4/doc/guides/testpmd_app_ug/run_app.rst dpdk-22.11.5/doc/guides/testpmd_app_ug/run_app.rst --- dpdk-22.11.4/doc/guides/testpmd_app_ug/run_app.rst 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/testpmd_app_ug/run_app.rst 2024-04-22 11:25:10.000000000 +0000 @@ -418,6 +418,10 @@ Set the logical core N to perform bitrate calculation. +* ``--latencystats=N`` + + Set the logical core N to perform latency and jitter calculations. + * ``--print-event `` Enable printing the occurrence of the designated event. Using all will diff -Nru dpdk-22.11.4/doc/guides/tools/testeventdev.rst dpdk-22.11.5/doc/guides/tools/testeventdev.rst --- dpdk-22.11.4/doc/guides/tools/testeventdev.rst 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/doc/guides/tools/testeventdev.rst 2024-04-22 11:25:10.000000000 +0000 @@ -295,7 +295,7 @@ .. code-block:: console - sudo /app/dpdk-test-eventdev --vdev=event_sw0 -- \ + sudo /app/dpdk-test-eventdev -c 0x1f -s 0x10 --vdev=event_sw0 -- \ --test=order_queue --plcores 1 --wlcores 2,3 @@ -358,7 +358,7 @@ .. code-block:: console - sudo /app/dpdk-test-eventdev --vdev=event_octeontx -- \ + sudo /app/dpdk-test-eventdev -c 0x1f -- \ --test=order_atq --plcores 1 --wlcores 2,3 @@ -462,14 +462,14 @@ .. code-block:: console - sudo /app/dpdk-test-eventdev -c 0xf -s 0x1 --vdev=event_sw0 -- \ + sudo /app/dpdk-test-eventdev -c 0xf -s 0x2 --vdev=event_sw0 -- \ --test=perf_queue --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0 Example command to run perf queue test with producer enqueuing a burst of events: .. code-block:: console - sudo /app/dpdk-test-eventdev -c 0xf -s 0x1 --vdev=event_sw0 -- \ + sudo /app/dpdk-test-eventdev -c 0xf -s 0x2 --vdev=event_sw0 -- \ --test=perf_queue --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0 \ --prod_enq_burst_sz=32 @@ -477,15 +477,15 @@ .. code-block:: console - sudo build/app/dpdk-test-eventdev --vdev=event_sw0 -- \ + sudo build/app/dpdk-test-eventdev -c 0xf -s 0x2 --vdev=event_sw0 -- \ --test=perf_queue --plcores=2 --wlcore=3 --stlist=p --prod_type_ethdev Example command to run perf queue test with event timer adapter: .. code-block:: console - sudo /app/dpdk-test-eventdev --vdev="event_octeontx" -- \ - --wlcores 4 --plcores 12 --test perf_queue --stlist=a \ + sudo /app/dpdk-test-eventdev -c 0xfff1 \ + -- --wlcores 4 --plcores 12 --test perf_queue --stlist=a \ --prod_type_timerdev --fwd_latency PERF_ATQ Test @@ -572,15 +572,15 @@ .. code-block:: console - sudo /app/dpdk-test-eventdev --vdev=event_octeontx -- \ + sudo /app/dpdk-test-eventdev -c 0xf -- \ --test=perf_atq --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0 Example command to run perf ``all types queue`` test with event timer adapter: .. code-block:: console - sudo /app/dpdk-test-eventdev --vdev="event_octeontx" -- \ - --wlcores 4 --plcores 12 --test perf_atq --verbose 20 \ + sudo /app/dpdk-test-eventdev -c 0xfff1 \ + -- --wlcores 4 --plcores 12 --test perf_atq --verbose 20 \ --stlist=a --prod_type_timerdev --fwd_latency @@ -804,13 +804,13 @@ .. code-block:: console - sudo /app/dpdk-test-eventdev -c 0xf -s 0x8 --vdev=event_sw0 -- \ + sudo /app/dpdk-test-eventdev -c 0xf -- \ --test=pipeline_atq --wlcore=1 --prod_type_ethdev --stlist=a Example command to run pipeline atq test with vector events: .. code-block:: console - sudo /app/dpdk-test-eventdev -c 0xf -s 0x8 --vdev=event_sw0 -- \ + sudo /app/dpdk-test-eventdev -c 0xf -- \ --test=pipeline_atq --wlcore=1 --prod_type_ethdev --stlist=a \ --enable_vector --vector_size 512 diff -Nru dpdk-22.11.4/drivers/baseband/acc/acc_common.c dpdk-22.11.5/drivers/baseband/acc/acc_common.c --- dpdk-22.11.4/drivers/baseband/acc/acc_common.c 1970-01-01 00:00:00.000000000 +0000 +++ dpdk-22.11.5/drivers/baseband/acc/acc_common.c 2024-04-22 11:25:10.000000000 +0000 @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2023 Red Hat, Inc. + */ + +#include + +RTE_LOG_REGISTER_SUFFIX(acc_common_logtype, common, INFO); diff -Nru dpdk-22.11.4/drivers/baseband/acc/acc_common.h dpdk-22.11.5/drivers/baseband/acc/acc_common.h --- dpdk-22.11.4/drivers/baseband/acc/acc_common.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/baseband/acc/acc_common.h 2024-04-22 11:25:10.000000000 +0000 @@ -131,9 +131,11 @@ #define ACC_LIM_31 20 /* 0.31 */ #define ACC_MAX_E (128 * 1024 - 2) +extern int acc_common_logtype; + /* Helper macro for logging */ #define rte_acc_log(level, fmt, ...) \ - rte_log(RTE_LOG_ ## level, RTE_LOG_NOTICE, fmt "\n", \ + rte_log(RTE_LOG_ ## level, acc_common_logtype, fmt "\n", \ ##__VA_ARGS__) /* ACC100 DMA Descriptor triplet */ diff -Nru dpdk-22.11.4/drivers/baseband/acc/meson.build dpdk-22.11.5/drivers/baseband/acc/meson.build --- dpdk-22.11.4/drivers/baseband/acc/meson.build 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/baseband/acc/meson.build 2024-04-22 11:25:10.000000000 +0000 @@ -3,6 +3,6 @@ deps += ['bbdev', 'bus_pci'] -sources = files('rte_acc100_pmd.c', 'rte_acc200_pmd.c') +sources = files('acc_common.c', 'rte_acc100_pmd.c', 'rte_acc200_pmd.c') headers = files('rte_acc_cfg.h') diff -Nru dpdk-22.11.4/drivers/baseband/acc/rte_acc100_pmd.c dpdk-22.11.5/drivers/baseband/acc/rte_acc100_pmd.c --- dpdk-22.11.4/drivers/baseband/acc/rte_acc100_pmd.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/baseband/acc/rte_acc100_pmd.c 2024-04-22 11:25:10.000000000 +0000 @@ -26,9 +26,9 @@ #include "acc200_cfg.h" #ifdef RTE_LIBRTE_BBDEV_DEBUG -RTE_LOG_REGISTER_DEFAULT(acc100_logtype, DEBUG); +RTE_LOG_REGISTER_SUFFIX(acc100_logtype, acc100, DEBUG); #else -RTE_LOG_REGISTER_DEFAULT(acc100_logtype, NOTICE); +RTE_LOG_REGISTER_SUFFIX(acc100_logtype, acc100, NOTICE); #endif /* Calculate the offset of the enqueue register */ diff -Nru dpdk-22.11.4/drivers/baseband/acc/rte_acc200_pmd.c dpdk-22.11.5/drivers/baseband/acc/rte_acc200_pmd.c --- dpdk-22.11.4/drivers/baseband/acc/rte_acc200_pmd.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/baseband/acc/rte_acc200_pmd.c 2024-04-22 11:25:10.000000000 +0000 @@ -24,9 +24,9 @@ #include "acc200_pmd.h" #ifdef RTE_LIBRTE_BBDEV_DEBUG -RTE_LOG_REGISTER_DEFAULT(acc200_logtype, DEBUG); +RTE_LOG_REGISTER_SUFFIX(acc200_logtype, acc200, DEBUG); #else -RTE_LOG_REGISTER_DEFAULT(acc200_logtype, NOTICE); +RTE_LOG_REGISTER_SUFFIX(acc200_logtype, acc200, NOTICE); #endif /* Calculate the offset of the enqueue register. */ diff -Nru dpdk-22.11.4/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c dpdk-22.11.5/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c --- dpdk-22.11.4/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c 2024-04-22 11:25:10.000000000 +0000 @@ -16,6 +16,7 @@ #ifdef RTE_BBDEV_OFFLOAD_COST #include #endif +#include #include #include @@ -1502,7 +1503,7 @@ { uint32_t mutex_ctrl, mutex_read, cnt = 0; /* Assign a unique id for the duration of the DDR access */ - q->ddr_mutex_uuid = rand(); + q->ddr_mutex_uuid = rte_rand(); /* Request and wait for acquisition of the mutex */ mutex_ctrl = (q->ddr_mutex_uuid << 16) + 1; do { diff -Nru dpdk-22.11.4/drivers/bus/dpaa/dpaa_bus.c dpdk-22.11.5/drivers/bus/dpaa/dpaa_bus.c --- dpdk-22.11.4/drivers/bus/dpaa/dpaa_bus.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/bus/dpaa/dpaa_bus.c 2024-04-22 11:25:10.000000000 +0000 @@ -791,6 +791,10 @@ /* Now that name=device_name format is available, split */ dup = strdup(str); + if (dup == NULL) { + DPAA_BUS_DEBUG("Dup string (%s) failed!\n", str); + return NULL; + } dev_name = dup + strlen("name="); if (start != NULL) { diff -Nru dpdk-22.11.4/drivers/bus/fslmc/fslmc_bus.c dpdk-22.11.5/drivers/bus/fslmc/fslmc_bus.c --- dpdk-22.11.4/drivers/bus/fslmc/fslmc_bus.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/bus/fslmc/fslmc_bus.c 2024-04-22 11:25:10.000000000 +0000 @@ -634,6 +634,10 @@ /* Now that name=device_name format is available, split */ dup = strdup(str); + if (dup == NULL) { + DPAA2_BUS_DEBUG("Dup string (%s) failed!\n", str); + return NULL; + } dev_name = dup + strlen("name="); if (start != NULL) { diff -Nru dpdk-22.11.4/drivers/bus/ifpga/ifpga_logs.h dpdk-22.11.5/drivers/bus/ifpga/ifpga_logs.h --- dpdk-22.11.4/drivers/bus/ifpga/ifpga_logs.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/bus/ifpga/ifpga_logs.h 2024-04-22 11:25:10.000000000 +0000 @@ -9,10 +9,6 @@ extern int ifpga_bus_logtype; -#define IFPGA_LOG(level, fmt, args...) \ - rte_log(RTE_LOG_ ## level, ifpga_bus_logtype, "%s(): " fmt "\n", \ - __func__, ##args) - #define IFPGA_BUS_LOG(level, fmt, args...) \ rte_log(RTE_LOG_ ## level, ifpga_bus_logtype, "%s(): " fmt "\n", \ __func__, ##args) diff -Nru dpdk-22.11.4/drivers/bus/vdev/vdev.c dpdk-22.11.5/drivers/bus/vdev/vdev.c --- dpdk-22.11.4/drivers/bus/vdev/vdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/bus/vdev/vdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -247,6 +247,10 @@ devargs->data = strdup(args); else devargs->data = strdup(""); + if (devargs->data == NULL) { + free(devargs); + return NULL; + } devargs->args = devargs->data; ret = strlcpy(devargs->name, name, sizeof(devargs->name)); @@ -259,6 +263,22 @@ return devargs; } +static struct rte_devargs * +vdev_devargs_lookup(const char *name) +{ + struct rte_devargs *devargs; + char dev_name[32]; + + RTE_EAL_DEVARGS_FOREACH("vdev", devargs) { + devargs->bus->parse(devargs->name, &dev_name); + if (strcmp(dev_name, name) == 0) { + VDEV_LOG(INFO, "devargs matched %s", dev_name); + return devargs; + } + } + return NULL; +} + static int insert_vdev(const char *name, const char *args, struct rte_vdev_device **p_dev, @@ -271,7 +291,11 @@ if (name == NULL) return -EINVAL; - devargs = alloc_devargs(name, args); + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + devargs = alloc_devargs(name, args); + else + devargs = vdev_devargs_lookup(name); + if (!devargs) return -ENOMEM; diff -Nru dpdk-22.11.4/drivers/common/cnxk/cnxk_security.c dpdk-22.11.5/drivers/common/cnxk/cnxk_security.c --- dpdk-22.11.4/drivers/common/cnxk/cnxk_security.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/cnxk/cnxk_security.c 2024-04-22 11:25:10.000000000 +0000 @@ -614,235 +614,6 @@ return !!sa->w2.s.valid; } -static inline int -ipsec_xfrm_verify(struct rte_security_ipsec_xform *ipsec_xfrm, - struct rte_crypto_sym_xform *crypto_xfrm) -{ - if (crypto_xfrm->next == NULL) - return -EINVAL; - - if (ipsec_xfrm->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) { - if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_AUTH || - crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_CIPHER) - return -EINVAL; - } else { - if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_CIPHER || - crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_AUTH) - return -EINVAL; - } - - return 0; -} - -static int -onf_ipsec_sa_common_param_fill(struct roc_ie_onf_sa_ctl *ctl, uint8_t *salt, - uint8_t *cipher_key, uint8_t *hmac_opad_ipad, - struct rte_security_ipsec_xform *ipsec_xfrm, - struct rte_crypto_sym_xform *crypto_xfrm) -{ - struct rte_crypto_sym_xform *auth_xfrm, *cipher_xfrm; - int rc, length, auth_key_len; - const uint8_t *key = NULL; - uint8_t ccm_flag = 0; - - /* Set direction */ - switch (ipsec_xfrm->direction) { - case RTE_SECURITY_IPSEC_SA_DIR_INGRESS: - ctl->direction = ROC_IE_SA_DIR_INBOUND; - auth_xfrm = crypto_xfrm; - cipher_xfrm = crypto_xfrm->next; - break; - case RTE_SECURITY_IPSEC_SA_DIR_EGRESS: - ctl->direction = ROC_IE_SA_DIR_OUTBOUND; - cipher_xfrm = crypto_xfrm; - auth_xfrm = crypto_xfrm->next; - break; - default: - return -EINVAL; - } - - /* Set protocol - ESP vs AH */ - switch (ipsec_xfrm->proto) { - case RTE_SECURITY_IPSEC_SA_PROTO_ESP: - ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_ESP; - break; - case RTE_SECURITY_IPSEC_SA_PROTO_AH: - return -ENOTSUP; - default: - return -EINVAL; - } - - /* Set mode - transport vs tunnel */ - switch (ipsec_xfrm->mode) { - case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT: - ctl->ipsec_mode = ROC_IE_SA_MODE_TRANSPORT; - break; - case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL: - ctl->ipsec_mode = ROC_IE_SA_MODE_TUNNEL; - break; - default: - return -EINVAL; - } - - /* Set encryption algorithm */ - if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) { - length = crypto_xfrm->aead.key.length; - - switch (crypto_xfrm->aead.algo) { - case RTE_CRYPTO_AEAD_AES_GCM: - ctl->enc_type = ROC_IE_ON_SA_ENC_AES_GCM; - ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL; - memcpy(salt, &ipsec_xfrm->salt, 4); - key = crypto_xfrm->aead.key.data; - break; - case RTE_CRYPTO_AEAD_AES_CCM: - ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CCM; - ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL; - ccm_flag = 0x07 & ~ROC_CPT_AES_CCM_CTR_LEN; - *salt = ccm_flag; - memcpy(PLT_PTR_ADD(salt, 1), &ipsec_xfrm->salt, 3); - key = crypto_xfrm->aead.key.data; - break; - default: - return -ENOTSUP; - } - - } else { - rc = ipsec_xfrm_verify(ipsec_xfrm, crypto_xfrm); - if (rc) - return rc; - - switch (cipher_xfrm->cipher.algo) { - case RTE_CRYPTO_CIPHER_AES_CBC: - ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC; - break; - case RTE_CRYPTO_CIPHER_AES_CTR: - ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CTR; - break; - default: - return -ENOTSUP; - } - - switch (auth_xfrm->auth.algo) { - case RTE_CRYPTO_AUTH_SHA1_HMAC: - ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA1; - break; - default: - return -ENOTSUP; - } - auth_key_len = auth_xfrm->auth.key.length; - if (auth_key_len < 20 || auth_key_len > 64) - return -ENOTSUP; - - key = cipher_xfrm->cipher.key.data; - length = cipher_xfrm->cipher.key.length; - - ipsec_hmac_opad_ipad_gen(auth_xfrm, hmac_opad_ipad); - } - - switch (length) { - case ROC_CPT_AES128_KEY_LEN: - ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_128; - break; - case ROC_CPT_AES192_KEY_LEN: - ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_192; - break; - case ROC_CPT_AES256_KEY_LEN: - ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_256; - break; - default: - return -EINVAL; - } - - memcpy(cipher_key, key, length); - - if (ipsec_xfrm->options.esn) - ctl->esn_en = 1; - - ctl->spi = rte_cpu_to_be_32(ipsec_xfrm->spi); - return 0; -} - -int -cnxk_onf_ipsec_inb_sa_fill(struct roc_onf_ipsec_inb_sa *sa, - struct rte_security_ipsec_xform *ipsec_xfrm, - struct rte_crypto_sym_xform *crypto_xfrm) -{ - struct roc_ie_onf_sa_ctl *ctl = &sa->ctl; - int rc; - - rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key, - sa->hmac_key, ipsec_xfrm, - crypto_xfrm); - if (rc) - return rc; - - rte_wmb(); - - /* Enable SA */ - ctl->valid = 1; - return 0; -} - -int -cnxk_onf_ipsec_outb_sa_fill(struct roc_onf_ipsec_outb_sa *sa, - struct rte_security_ipsec_xform *ipsec_xfrm, - struct rte_crypto_sym_xform *crypto_xfrm) -{ - struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel; - struct roc_ie_onf_sa_ctl *ctl = &sa->ctl; - int rc; - - /* Fill common params */ - rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key, - sa->hmac_key, ipsec_xfrm, - crypto_xfrm); - if (rc) - return rc; - - if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) - goto skip_tunnel_info; - - /* Tunnel header info */ - switch (tunnel->type) { - case RTE_SECURITY_IPSEC_TUNNEL_IPV4: - memcpy(&sa->ip_src, &tunnel->ipv4.src_ip, - sizeof(struct in_addr)); - memcpy(&sa->ip_dst, &tunnel->ipv4.dst_ip, - sizeof(struct in_addr)); - break; - case RTE_SECURITY_IPSEC_TUNNEL_IPV6: - return -ENOTSUP; - default: - return -EINVAL; - } - - /* Update udp encap ports */ - if (ipsec_xfrm->options.udp_encap == 1) { - sa->udp_src = 4500; - sa->udp_dst = 4500; - } - -skip_tunnel_info: - rte_wmb(); - - /* Enable SA */ - ctl->valid = 1; - return 0; -} - -bool -cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa) -{ - return !!sa->ctl.valid; -} - -bool -cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa) -{ - return !!sa->ctl.valid; -} - uint8_t cnxk_ipsec_ivlen_get(enum rte_crypto_cipher_algorithm c_algo, enum rte_crypto_auth_algorithm a_algo, diff -Nru dpdk-22.11.4/drivers/common/cnxk/cnxk_security.h dpdk-22.11.5/drivers/common/cnxk/cnxk_security.h --- dpdk-22.11.4/drivers/common/cnxk/cnxk_security.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/cnxk/cnxk_security.h 2024-04-22 11:25:10.000000000 +0000 @@ -47,18 +47,6 @@ bool __roc_api cnxk_ot_ipsec_inb_sa_valid(struct roc_ot_ipsec_inb_sa *sa); bool __roc_api cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa); -/* [CN9K, CN10K) */ -int __roc_api -cnxk_onf_ipsec_inb_sa_fill(struct roc_onf_ipsec_inb_sa *sa, - struct rte_security_ipsec_xform *ipsec_xfrm, - struct rte_crypto_sym_xform *crypto_xfrm); -int __roc_api -cnxk_onf_ipsec_outb_sa_fill(struct roc_onf_ipsec_outb_sa *sa, - struct rte_security_ipsec_xform *ipsec_xfrm, - struct rte_crypto_sym_xform *crypto_xfrm); -bool __roc_api cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa); -bool __roc_api cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa); - /* [CN9K] */ int __roc_api cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec, diff -Nru dpdk-22.11.4/drivers/common/cnxk/roc_cpt.c dpdk-22.11.5/drivers/common/cnxk/roc_cpt.c --- dpdk-22.11.4/drivers/common/cnxk/roc_cpt.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/cnxk/roc_cpt.c 2024-04-22 11:25:10.000000000 +0000 @@ -656,7 +656,7 @@ rc = dev_init(dev, pci_dev); if (rc) { plt_err("Failed to init roc device"); - goto fail; + return rc; } cpt->pci_dev = pci_dev; @@ -688,6 +688,7 @@ return 0; fail: + dev_fini(dev, pci_dev); return rc; } diff -Nru dpdk-22.11.4/drivers/common/cnxk/roc_dev.c dpdk-22.11.5/drivers/common/cnxk/roc_dev.c --- dpdk-22.11.4/drivers/common/cnxk/roc_dev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/cnxk/roc_dev.c 2024-04-22 11:25:10.000000000 +0000 @@ -190,9 +190,8 @@ vf_msg = mbox_alloc_msg(&dev->mbox_vfpf_up, vf, sz); if (vf_msg) { mbox_req_init(MBOX_MSG_CGX_LINK_EVENT, vf_msg); - memcpy((uint8_t *)vf_msg + - sizeof(struct mbox_msghdr), &linfo, - sizeof(struct cgx_link_user_info)); + mbox_memcpy((uint8_t *)vf_msg + sizeof(struct mbox_msghdr), &linfo, + sizeof(struct cgx_link_user_info)); vf_msg->rc = msg->rc; vf_msg->pcifunc = msg->pcifunc; @@ -467,6 +466,8 @@ size_t size; size = PLT_ALIGN(mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN); + if (size < sizeof(struct mbox_msghdr)) + return; /* Send UP message to all VF's */ for (vf = 0; vf < vf_mbox->ndevs; vf++) { /* VF active */ diff -Nru dpdk-22.11.4/drivers/common/cnxk/roc_ie_on.h dpdk-22.11.5/drivers/common/cnxk/roc_ie_on.h --- dpdk-22.11.4/drivers/common/cnxk/roc_ie_on.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/cnxk/roc_ie_on.h 2024-04-22 11:25:10.000000000 +0000 @@ -268,66 +268,6 @@ #define ROC_IE_ON_UCC_L2_HDR_INFO_ERR 0xCF #define ROC_IE_ON_UCC_L2_HDR_LEN_ERR 0xE0 -struct roc_ie_onf_sa_ctl { - uint32_t spi; - uint64_t exp_proto_inter_frag : 8; - uint64_t rsvd_41_40 : 2; - /* Disable SPI, SEQ data in RPTR for Inbound inline */ - uint64_t spi_seq_dis : 1; - uint64_t esn_en : 1; - uint64_t rsvd_44_45 : 2; - uint64_t encap_type : 2; - uint64_t enc_type : 3; - uint64_t rsvd_48 : 1; - uint64_t auth_type : 4; - uint64_t valid : 1; - uint64_t direction : 1; - uint64_t outer_ip_ver : 1; - uint64_t inner_ip_ver : 1; - uint64_t ipsec_mode : 1; - uint64_t ipsec_proto : 1; - uint64_t aes_key_len : 2; -}; - -struct roc_onf_ipsec_outb_sa { - /* w0 */ - struct roc_ie_onf_sa_ctl ctl; - - /* w1 */ - uint8_t nonce[4]; - uint16_t udp_src; - uint16_t udp_dst; - - /* w2 */ - uint32_t ip_src; - uint32_t ip_dst; - - /* w3-w6 */ - uint8_t cipher_key[32]; - - /* w7-w12 */ - uint8_t hmac_key[48]; -}; - -struct roc_onf_ipsec_inb_sa { - /* w0 */ - struct roc_ie_onf_sa_ctl ctl; - - /* w1 */ - uint8_t nonce[4]; /* Only for AES-GCM */ - uint32_t unused; - - /* w2 */ - uint32_t esn_hi; - uint32_t esn_low; - - /* w3-w6 */ - uint8_t cipher_key[32]; - - /* w7-w12 */ - uint8_t hmac_key[48]; -}; - #define ROC_ONF_IPSEC_INB_MAX_L2_SZ 32UL #define ROC_ONF_IPSEC_OUTB_MAX_L2_SZ 30UL #define ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ (ROC_ONF_IPSEC_OUTB_MAX_L2_SZ + 2) diff -Nru dpdk-22.11.4/drivers/common/cnxk/roc_mbox.h dpdk-22.11.5/drivers/common/cnxk/roc_mbox.h --- dpdk-22.11.4/drivers/common/cnxk/roc_mbox.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/cnxk/roc_mbox.h 2024-04-22 11:25:10.000000000 +0000 @@ -855,12 +855,12 @@ struct nix_cn10k_aq_enq_rsp { struct mbox_msghdr hdr; union { - struct nix_cn10k_rq_ctx_s rq; - struct nix_cn10k_sq_ctx_s sq; - struct nix_cq_ctx_s cq; - struct nix_rsse_s rss; - struct nix_rx_mce_s mce; - struct nix_band_prof_s prof; + __io struct nix_cn10k_rq_ctx_s rq; + __io struct nix_cn10k_sq_ctx_s sq; + __io struct nix_cq_ctx_s cq; + __io struct nix_rsse_s rss; + __io struct nix_rx_mce_s mce; + __io struct nix_band_prof_s prof; }; }; @@ -1096,11 +1096,11 @@ #define RQ_CTX_MASK_MAX 6 union { uint64_t __io rq_ctx_word_set[RQ_CTX_MASK_MAX]; - struct nix_cn10k_rq_ctx_s rq_set; + __io struct nix_cn10k_rq_ctx_s rq_set; }; union { uint64_t __io rq_ctx_word_mask[RQ_CTX_MASK_MAX]; - struct nix_cn10k_rq_ctx_s rq_mask; + __io struct nix_cn10k_rq_ctx_s rq_mask; }; struct nix_lf_rx_ipec_cfg1_req { uint32_t __io spb_cpt_aura; diff -Nru dpdk-22.11.4/drivers/common/cnxk/roc_nix.c dpdk-22.11.5/drivers/common/cnxk/roc_nix.c --- dpdk-22.11.4/drivers/common/cnxk/roc_nix.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/cnxk/roc_nix.c 2024-04-22 11:25:10.000000000 +0000 @@ -426,7 +426,7 @@ sdp_lbk_id_update(pci_dev, nix); nix->pci_dev = pci_dev; nix->reta_sz = reta_sz; - nix->mtu = ROC_NIX_DEFAULT_HW_FRS; + nix->mtu = roc_nix_max_pkt_len(roc_nix); /* Always start with full FC for LBK */ if (nix->lbk_link) { diff -Nru dpdk-22.11.4/drivers/common/cnxk/roc_nix.h dpdk-22.11.5/drivers/common/cnxk/roc_nix.h --- dpdk-22.11.4/drivers/common/cnxk/roc_nix.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/cnxk/roc_nix.h 2024-04-22 11:25:10.000000000 +0000 @@ -236,8 +236,6 @@ #define ROC_NIX_RSS_KEY_LEN 48 /* 352 Bits */ #define ROC_NIX_RSS_MCAM_IDX_DEFAULT (-1) -#define ROC_NIX_DEFAULT_HW_FRS 1514 - #define ROC_NIX_VWQE_MAX_SIZE_LOG2 11 #define ROC_NIX_VWQE_MIN_SIZE_LOG2 2 diff -Nru dpdk-22.11.4/drivers/common/cnxk/roc_nix_inl.c dpdk-22.11.5/drivers/common/cnxk/roc_nix_inl.c --- dpdk-22.11.4/drivers/common/cnxk/roc_nix_inl.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/cnxk/roc_nix_inl.c 2024-04-22 11:25:10.000000000 +0000 @@ -399,8 +399,7 @@ return -EFAULT; PLT_SET_USED(max_frags); - if (idev == NULL) - return -ENOTSUP; + roc_cpt = idev->cpt; if (!roc_cpt) { plt_err("Cannot support inline inbound, cryptodev not probed"); diff -Nru dpdk-22.11.4/drivers/common/cnxk/roc_nix_inl.h dpdk-22.11.5/drivers/common/cnxk/roc_nix_inl.h --- dpdk-22.11.4/drivers/common/cnxk/roc_nix_inl.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/cnxk/roc_nix_inl.h 2024-04-22 11:25:10.000000000 +0000 @@ -4,24 +4,6 @@ #ifndef _ROC_NIX_INL_H_ #define _ROC_NIX_INL_H_ -/* ONF INB HW area */ -#define ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ \ - PLT_ALIGN(sizeof(struct roc_onf_ipsec_inb_sa), ROC_ALIGN) -/* ONF INB SW reserved area */ -#define ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD 384 -#define ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ \ - (ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ + ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD) -#define ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2 9 - -/* ONF OUTB HW area */ -#define ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ \ - PLT_ALIGN(sizeof(struct roc_onf_ipsec_outb_sa), ROC_ALIGN) -/* ONF OUTB SW reserved area */ -#define ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD 128 -#define ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ \ - (ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD) -#define ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2 8 - /* ON INB HW area */ #define ROC_NIX_INL_ON_IPSEC_INB_HW_SZ \ PLT_ALIGN(sizeof(struct roc_ie_on_inb_sa), ROC_ALIGN) @@ -31,10 +13,10 @@ (ROC_NIX_INL_ON_IPSEC_INB_HW_SZ + ROC_NIX_INL_ON_IPSEC_INB_SW_RSVD) #define ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2 10 -/* ONF OUTB HW area */ +/* ON OUTB HW area */ #define ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ \ PLT_ALIGN(sizeof(struct roc_ie_on_outb_sa), ROC_ALIGN) -/* ONF OUTB SW reserved area */ +/* ON OUTB SW reserved area */ #define ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD 256 #define ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ \ (ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD) @@ -107,34 +89,6 @@ return PLT_PTR_ADD(sa, ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ); } -static inline struct roc_onf_ipsec_inb_sa * -roc_nix_inl_onf_ipsec_inb_sa(uintptr_t base, uint64_t idx) -{ - uint64_t off = idx << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2; - - return PLT_PTR_ADD(base, off); -} - -static inline struct roc_onf_ipsec_outb_sa * -roc_nix_inl_onf_ipsec_outb_sa(uintptr_t base, uint64_t idx) -{ - uint64_t off = idx << ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2; - - return PLT_PTR_ADD(base, off); -} - -static inline void * -roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(void *sa) -{ - return PLT_PTR_ADD(sa, ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ); -} - -static inline void * -roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(void *sa) -{ - return PLT_PTR_ADD(sa, ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ); -} - static inline struct roc_ot_ipsec_inb_sa * roc_nix_inl_ot_ipsec_inb_sa(uintptr_t base, uint64_t idx) { diff -Nru dpdk-22.11.4/drivers/common/cnxk/roc_nix_rss.c dpdk-22.11.5/drivers/common/cnxk/roc_nix_rss.c --- dpdk-22.11.4/drivers/common/cnxk/roc_nix_rss.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/cnxk/roc_nix_rss.c 2024-04-22 11:25:10.000000000 +0000 @@ -182,7 +182,7 @@ if (rc) return rc; - memcpy(&nix->reta[group], reta, ROC_NIX_RSS_RETA_MAX); + memcpy(&nix->reta[group], reta, sizeof(uint16_t) * ROC_NIX_RSS_RETA_MAX); return 0; } @@ -195,7 +195,7 @@ if (group >= ROC_NIX_RSS_GRPS) return NIX_ERR_PARAM; - memcpy(reta, &nix->reta[group], ROC_NIX_RSS_RETA_MAX); + memcpy(reta, &nix->reta[group], sizeof(uint16_t) * ROC_NIX_RSS_RETA_MAX); return 0; } diff -Nru dpdk-22.11.4/drivers/common/cnxk/roc_nix_tm.c dpdk-22.11.5/drivers/common/cnxk/roc_nix_tm.c --- dpdk-22.11.4/drivers/common/cnxk/roc_nix_tm.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/cnxk/roc_nix_tm.c 2024-04-22 11:25:10.000000000 +0000 @@ -326,6 +326,9 @@ uint8_t k = 0; int rc = 0; + if (roc_nix_is_sdp(roc_nix)) + return 0; + sq_s = nix->sqs[sq]; if (!sq_s) return -ENOENT; diff -Nru dpdk-22.11.4/drivers/common/cnxk/roc_npc_parse.c dpdk-22.11.5/drivers/common/cnxk/roc_npc_parse.c --- dpdk-22.11.4/drivers/common/cnxk/roc_npc_parse.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/cnxk/roc_npc_parse.c 2024-04-22 11:25:10.000000000 +0000 @@ -1019,6 +1019,7 @@ { const struct roc_npc_item_info *pattern, *last_pattern; char hw_mask[NPC_MAX_EXTRACT_HW_LEN]; + const struct roc_npc_flow_item_eth *eth_item; struct npc_parse_item_info info; int lid, lt, lflags; int nr_vlans = 0; @@ -1035,10 +1036,12 @@ lt = NPC_LT_LF_TU_ETHER; lflags = 0; + eth_item = pst->pattern->spec; + /* No match support for vlan tags */ info.def_mask = NULL; info.hw_mask = NULL; - info.len = pst->pattern->size; + info.len = sizeof(eth_item->hdr); info.spec = NULL; info.mask = NULL; info.hw_hdr_len = 0; @@ -1069,12 +1072,15 @@ } info.hw_mask = &hw_mask; - info.len = pst->pattern->size; + info.len = sizeof(eth_item->hdr); info.hw_hdr_len = 0; npc_get_hw_supp_mask(pst, &info, lid, lt); info.spec = NULL; info.mask = NULL; + if (eth_item && eth_item->has_vlan) + pst->set_vlan_ltype_mask = true; + rc = npc_parse_item_basic(pst->pattern, &info); if (rc != 0) return rc; diff -Nru dpdk-22.11.4/drivers/common/cnxk/version.map dpdk-22.11.5/drivers/common/cnxk/version.map --- dpdk-22.11.4/drivers/common/cnxk/version.map 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/cnxk/version.map 2024-04-22 11:25:10.000000000 +0000 @@ -15,10 +15,6 @@ cnxk_logtype_sso; cnxk_logtype_tim; cnxk_logtype_tm; - cnxk_onf_ipsec_inb_sa_fill; - cnxk_onf_ipsec_outb_sa_fill; - cnxk_onf_ipsec_inb_sa_valid; - cnxk_onf_ipsec_outb_sa_valid; cnxk_ot_ipsec_inb_sa_fill; cnxk_ot_ipsec_outb_sa_fill; cnxk_ot_ipsec_inb_sa_valid; diff -Nru dpdk-22.11.4/drivers/common/mlx5/mlx5_common_mr.c dpdk-22.11.5/drivers/common/mlx5/mlx5_common_mr.c --- dpdk-22.11.4/drivers/common/mlx5/mlx5_common_mr.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/mlx5/mlx5_common_mr.c 2024-04-22 11:25:10.000000000 +0000 @@ -1381,7 +1381,7 @@ DRV_LOG(DEBUG, "Collecting chunks of regular mempool %s", mp->name); n = mp->nb_mem_chunks; - *out = calloc(sizeof(**out), n); + *out = calloc(n, sizeof(**out)); if (*out == NULL) return -1; rte_mempool_mem_iter(mp, mlx5_range_from_mempool_chunk, *out); diff -Nru dpdk-22.11.4/drivers/common/mlx5/mlx5_devx_cmds.c dpdk-22.11.5/drivers/common/mlx5/mlx5_devx_cmds.c --- dpdk-22.11.4/drivers/common/mlx5/mlx5_devx_cmds.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/mlx5/mlx5_devx_cmds.c 2024-04-22 11:25:10.000000000 +0000 @@ -902,18 +902,6 @@ attr->max_geneve_tlv_option_data_len = MLX5_GET(cmd_hca_cap, hcattr, max_geneve_tlv_option_data_len); attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos); - attr->qos.flow_meter_aso_sup = !!(MLX5_GET64(cmd_hca_cap, hcattr, - general_obj_types) & - MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_METER_ASO); - attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr, - general_obj_types) & - MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q); - attr->vdpa.queue_counters_valid = !!(MLX5_GET64(cmd_hca_cap, hcattr, - general_obj_types) & - MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS); - attr->parse_graph_flex_node = !!(MLX5_GET64(cmd_hca_cap, hcattr, - general_obj_types) & - MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE); attr->wqe_index_ignore = MLX5_GET(cmd_hca_cap, hcattr, wqe_index_ignore_cap); attr->cross_channel = MLX5_GET(cmd_hca_cap, hcattr, cd); @@ -937,6 +925,9 @@ /* Read the general_obj_types bitmap and extract the relevant bits. */ general_obj_types_supported = MLX5_GET64(cmd_hca_cap, hcattr, general_obj_types); + attr->qos.flow_meter_aso_sup = + !!(general_obj_types_supported & + MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_METER_ASO); attr->vdpa.valid = !!(general_obj_types_supported & MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q); attr->vdpa.queue_counters_valid = @@ -998,8 +989,7 @@ MLX5_GET(cmd_hca_cap, hcattr, umr_modify_entity_size_disabled); attr->wait_on_time = MLX5_GET(cmd_hca_cap, hcattr, wait_on_time); attr->crypto = MLX5_GET(cmd_hca_cap, hcattr, crypto); - attr->ct_offload = !!(MLX5_GET64(cmd_hca_cap, hcattr, - general_obj_types) & + attr->ct_offload = !!(general_obj_types_supported & MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD); attr->rq_delay_drop = MLX5_GET(cmd_hca_cap, hcattr, rq_delay_drop); attr->nic_flow_table = MLX5_GET(cmd_hca_cap, hcattr, nic_flow_table); diff -Nru dpdk-22.11.4/drivers/common/qat/meson.build dpdk-22.11.5/drivers/common/qat/meson.build --- dpdk-22.11.4/drivers/common/qat/meson.build 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/qat/meson.build 2024-04-22 11:25:10.000000000 +0000 @@ -17,13 +17,13 @@ if disable_drivers.contains(qat_crypto_path) qat_crypto = false dpdk_drvs_disabled += qat_crypto_path - set_variable(qat_crypto_path.underscorify() + '_disable_reason', + set_variable('drv_' + qat_crypto_path.underscorify() + '_disable_reason', 'Explicitly disabled via build config') endif if disable_drivers.contains(qat_compress_path) qat_compress = false dpdk_drvs_disabled += qat_compress_path - set_variable(qat_compress_path.underscorify() + '_disable_reason', + set_variable('drv_' + qat_compress_path.underscorify() + '_disable_reason', 'Explicitly disabled via build config') endif @@ -31,7 +31,7 @@ if qat_crypto and not libcrypto.found() qat_crypto = false dpdk_drvs_disabled += qat_crypto_path - set_variable(qat_crypto_path.underscorify() + '_disable_reason', + set_variable('drv_' + qat_crypto_path.underscorify() + '_disable_reason', 'missing dependency, libcrypto') endif diff -Nru dpdk-22.11.4/drivers/common/sfc_efx/base/efx.h dpdk-22.11.5/drivers/common/sfc_efx/base/efx.h --- dpdk-22.11.4/drivers/common/sfc_efx/base/efx.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/common/sfc_efx/base/efx.h 2024-04-22 11:25:10.000000000 +0000 @@ -7,6 +7,8 @@ #ifndef _SYS_EFX_H #define _SYS_EFX_H +#include + #include "efx_annote.h" #include "efsys.h" #include "efx_types.h" @@ -17,14 +19,20 @@ extern "C" { #endif -#define EFX_STATIC_ASSERT(_cond) \ - ((void)sizeof (char[(_cond) ? 1 : -1])) +/* + * Triggers an error at compilation time if the condition is false. + * + * The { } exists to workaround a bug in clang (#55821) + * where it would not handle _Static_assert in a switch case. + */ +#define EFX_STATIC_ASSERT(_cond) \ + { static_assert((_cond), #_cond); } #define EFX_ARRAY_SIZE(_array) \ (sizeof (_array) / sizeof ((_array)[0])) #define EFX_FIELD_OFFSET(_type, _field) \ - ((size_t)&(((_type *)0)->_field)) + offsetof(_type, _field) /* The macro expands divider twice */ #define EFX_DIV_ROUND_UP(_n, _d) (((_n) + (_d) - 1) / (_d)) diff -Nru dpdk-22.11.4/drivers/crypto/ipsec_mb/ipsec_mb_ops.c dpdk-22.11.5/drivers/crypto/ipsec_mb/ipsec_mb_ops.c --- dpdk-22.11.4/drivers/crypto/ipsec_mb/ipsec_mb_ops.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/crypto/ipsec_mb/ipsec_mb_ops.c 2024-04-22 11:25:10.000000000 +0000 @@ -406,7 +406,7 @@ resp_param->result = ipsec_mb_qp_release(dev, qp_id); break; default: - CDEV_LOG_ERR("invalid mp request type\n"); + CDEV_LOG_ERR("invalid mp request type"); } out: diff -Nru dpdk-22.11.4/drivers/crypto/ipsec_mb/pmd_aesni_mb.c dpdk-22.11.5/drivers/crypto/ipsec_mb/pmd_aesni_mb.c --- dpdk-22.11.4/drivers/crypto/ipsec_mb/pmd_aesni_mb.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/crypto/ipsec_mb/pmd_aesni_mb.c 2024-04-22 11:25:10.000000000 +0000 @@ -1017,9 +1017,6 @@ job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded; job->u.XCBC._k2 = session->auth.xcbc.k2; job->u.XCBC._k3 = session->auth.xcbc.k3; - - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; break; case IMB_AUTH_AES_CCM: @@ -1034,8 +1031,6 @@ job->u.CMAC._key_expanded = session->auth.cmac.expkey; job->u.CMAC._skey1 = session->auth.cmac.skey1; job->u.CMAC._skey2 = session->auth.cmac.skey2; - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; break; case IMB_AUTH_AES_GMAC: @@ -1331,24 +1326,17 @@ job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded; job->u.XCBC._k2 = session->auth.xcbc.k2; job->u.XCBC._k3 = session->auth.xcbc.k3; - - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; break; case IMB_AUTH_AES_CCM: job->u.CCM.aad = op->sym->aead.aad.data + 18; job->u.CCM.aad_len_in_bytes = session->aead.aad_len; - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; break; case IMB_AUTH_AES_CMAC: job->u.CMAC._key_expanded = session->auth.cmac.expkey; job->u.CMAC._skey1 = session->auth.cmac.skey1; job->u.CMAC._skey2 = session->auth.cmac.skey2; - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.decode; break; case IMB_AUTH_AES_GMAC: @@ -1396,8 +1384,6 @@ job->cipher_mode = IMB_CIPHER_CHACHA20_POLY1305_SGL; job->hash_alg = IMB_AUTH_CHACHA20_POLY1305_SGL; } - job->enc_keys = session->cipher.expanded_aes_keys.encode; - job->dec_keys = session->cipher.expanded_aes_keys.encode; break; default: job->u.HMAC._hashed_auth_key_xor_ipad = diff -Nru dpdk-22.11.4/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h dpdk-22.11.5/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h --- dpdk-22.11.4/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h 2024-04-22 11:25:10.000000000 +0000 @@ -812,10 +812,12 @@ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] = q - ICP_QAT_HW_CCM_NONCE_OFFSET; - rte_memcpy((uint8_t *)aad->va + - ICP_QAT_HW_CCM_NONCE_OFFSET, - (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET, - ctx->cipher_iv.length); + if (ctx->aad_len > 0) { + rte_memcpy((uint8_t *)aad->va + + ICP_QAT_HW_CCM_NONCE_OFFSET, + (uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET, + ctx->cipher_iv.length); + } break; default: break; diff -Nru dpdk-22.11.4/drivers/dma/dpaa2/dpaa2_qdma.c dpdk-22.11.5/drivers/dma/dpaa2/dpaa2_qdma.c --- dpdk-22.11.4/drivers/dma/dpaa2/dpaa2_qdma.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/dma/dpaa2/dpaa2_qdma.c 2024-04-22 11:25:10.000000000 +0000 @@ -16,9 +16,6 @@ #define DPAA2_QDMA_PREFETCH "prefetch" -/* Dynamic log type identifier */ -int dpaa2_qdma_logtype; - uint32_t dpaa2_coherent_no_alloc_cache; uint32_t dpaa2_coherent_alloc_cache; @@ -1699,4 +1696,4 @@ RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd); RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma, "no_prefetch= "); -RTE_LOG_REGISTER_DEFAULT(dpaa_qdma2_logtype, INFO); +RTE_LOG_REGISTER_DEFAULT(dpaa2_qdma_logtype, INFO); diff -Nru dpdk-22.11.4/drivers/dma/idxd/idxd_bus.c dpdk-22.11.5/drivers/dma/idxd/idxd_bus.c --- dpdk-22.11.4/drivers/dma/idxd/idxd_bus.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/dma/idxd/idxd_bus.c 2024-04-22 11:25:10.000000000 +0000 @@ -261,9 +261,15 @@ is_for_this_process_use(struct rte_dsa_device *dev, const char *name) { char *runtime_dir = strdup(rte_eal_get_runtime_dir()); - char *prefix = basename(runtime_dir); - int prefixlen = strlen(prefix); int retval = 0; + int prefixlen; + char *prefix; + + if (runtime_dir == NULL) + return retval; + + prefix = basename(runtime_dir); + prefixlen = strlen(prefix); if (strncmp(name, "dpdk_", 5) == 0) retval = 1; diff -Nru dpdk-22.11.4/drivers/event/cnxk/cnxk_eventdev.c dpdk-22.11.5/drivers/event/cnxk/cnxk_eventdev.c --- dpdk-22.11.4/drivers/event/cnxk/cnxk_eventdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/event/cnxk/cnxk_eventdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -150,16 +150,17 @@ deq_tmo_ns = conf->dequeue_timeout_ns; - if (deq_tmo_ns == 0) - deq_tmo_ns = dev->min_dequeue_timeout_ns; - if (deq_tmo_ns < dev->min_dequeue_timeout_ns || - deq_tmo_ns > dev->max_dequeue_timeout_ns) { + if (deq_tmo_ns && (deq_tmo_ns < dev->min_dequeue_timeout_ns || + deq_tmo_ns > dev->max_dequeue_timeout_ns)) { plt_err("Unsupported dequeue timeout requested"); return -EINVAL; } - if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) + if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { + if (deq_tmo_ns == 0) + deq_tmo_ns = dev->min_dequeue_timeout_ns; dev->is_timeout_deq = 1; + } dev->deq_tmo_ns = deq_tmo_ns; @@ -500,6 +501,9 @@ char *end = NULL; char *f = s; + if (s == NULL) + return; + while (*s) { if (*s == '[') start = s; @@ -587,7 +591,7 @@ } dev->is_timeout_deq = 0; - dev->min_dequeue_timeout_ns = 0; + dev->min_dequeue_timeout_ns = USEC2NSEC(1); dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF); dev->max_num_events = -1; dev->nb_event_queues = 0; diff -Nru dpdk-22.11.4/drivers/event/dlb2/dlb2.c dpdk-22.11.5/drivers/event/dlb2/dlb2.c --- dpdk-22.11.4/drivers/event/dlb2/dlb2.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/event/dlb2/dlb2.c 2024-04-22 11:25:10.000000000 +0000 @@ -216,7 +216,6 @@ dlb2_hw_query_resources(struct dlb2_eventdev *dlb2) { struct dlb2_hw_dev *handle = &dlb2->qm_instance; - struct dlb2_hw_resource_info *dlb2_info = &handle->info; int num_ldb_ports; int ret; @@ -278,8 +277,6 @@ handle->info.hw_rsrc_max.reorder_window_size = dlb2->hw_rsrc_query_results.num_hist_list_entries; - rte_memcpy(dlb2_info, &handle->info.hw_rsrc_max, sizeof(*dlb2_info)); - return 0; } diff -Nru dpdk-22.11.4/drivers/event/opdl/opdl_ring.c dpdk-22.11.5/drivers/event/opdl/opdl_ring.c --- dpdk-22.11.4/drivers/event/opdl/opdl_ring.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/event/opdl/opdl_ring.c 2024-04-22 11:25:10.000000000 +0000 @@ -910,7 +910,7 @@ RTE_CACHE_LINE_MASK) != 0); RTE_BUILD_BUG_ON((offsetof(struct opdl_ring, slots) & RTE_CACHE_LINE_MASK) != 0); - RTE_BUILD_BUG_ON(!rte_is_power_of_2(OPDL_DISCLAIMS_PER_LCORE)); + RTE_BUILD_BUG_ON(!RTE_IS_POWER_OF_2(OPDL_DISCLAIMS_PER_LCORE)); /* Parameter checking */ if (name == NULL) { diff -Nru dpdk-22.11.4/drivers/meson.build dpdk-22.11.5/drivers/meson.build --- dpdk-22.11.4/drivers/meson.build 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/meson.build 2024-04-22 11:25:10.000000000 +0000 @@ -159,7 +159,7 @@ # component disable printout in those cases if reason != '' dpdk_drvs_disabled += drv_path - set_variable(drv_path.underscorify() + '_disable_reason', reason) + set_variable('drv_' + drv_path.underscorify() + '_disable_reason', reason) endif continue endif diff -Nru dpdk-22.11.4/drivers/net/af_xdp/rte_eth_af_xdp.c dpdk-22.11.5/drivers/net/af_xdp/rte_eth_af_xdp.c --- dpdk-22.11.4/drivers/net/af_xdp/rte_eth_af_xdp.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/af_xdp/rte_eth_af_xdp.c 2024-04-22 11:25:10.000000000 +0000 @@ -938,6 +938,9 @@ static void xdp_umem_destroy(struct xsk_umem_info *umem) { + (void)xsk_umem__delete(umem->umem); + umem->umem = NULL; + #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) umem->mb_pool = NULL; #else @@ -970,11 +973,8 @@ break; xsk_socket__delete(rxq->xsk); - if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - == 0) { - (void)xsk_umem__delete(rxq->umem->umem); + if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) - 1 == 0) xdp_umem_destroy(rxq->umem); - } /* free pkt_tx_queue */ rte_free(rxq->pair); @@ -1211,6 +1211,7 @@ AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n"); goto err; } + umem->mz = mz; ret = xsk_umem__create(&umem->umem, mz->addr, ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE, @@ -1221,7 +1222,6 @@ AF_XDP_LOG(ERR, "Failed to create umem\n"); goto err; } - umem->mz = mz; return umem; diff -Nru dpdk-22.11.4/drivers/net/bnx2x/bnx2x.c dpdk-22.11.5/drivers/net/bnx2x/bnx2x.c --- dpdk-22.11.4/drivers/net/bnx2x/bnx2x.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/bnx2x/bnx2x.c 2024-04-22 11:25:10.000000000 +0000 @@ -2389,7 +2389,7 @@ static int bnx2x_alloc_ilt_lines_mem(struct bnx2x_softc *sc) { sc->ilt->lines = rte_calloc("", - sizeof(struct ilt_line), ILT_MAX_LINES, + ILT_MAX_LINES, sizeof(struct ilt_line), RTE_CACHE_LINE_SIZE); return sc->ilt->lines == NULL; } diff -Nru dpdk-22.11.4/drivers/net/bnx2x/bnx2x_stats.c dpdk-22.11.5/drivers/net/bnx2x/bnx2x_stats.c --- dpdk-22.11.4/drivers/net/bnx2x/bnx2x_stats.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/bnx2x/bnx2x_stats.c 2024-04-22 11:25:10.000000000 +0000 @@ -114,7 +114,7 @@ /* Update MCP's statistics if possible */ if (sc->func_stx) { - rte_memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats, + memcpy(BNX2X_SP(sc, func_stats), &sc->func_stats, sizeof(sc->func_stats)); } @@ -817,10 +817,10 @@ etherstatspktsover1522octets); } - rte_memcpy(old, new, sizeof(struct nig_stats)); + memcpy(old, new, sizeof(struct nig_stats)); - rte_memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), - sizeof(struct mac_stx)); + memcpy(RTE_PTR_ADD(estats, offsetof(struct bnx2x_eth_stats, rx_stat_ifhcinbadoctets_hi)), + &pstats->mac_stx[1], sizeof(struct mac_stx)); estats->brb_drop_hi = pstats->brb_drop_hi; estats->brb_drop_lo = pstats->brb_drop_lo; @@ -1492,9 +1492,11 @@ REG_RD(sc, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); if (!CHIP_IS_E3(sc)) { REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, - &(sc->port.old_nig_stats.egress_mac_pkt0_lo), 2); + RTE_PTR_ADD(&sc->port.old_nig_stats, + offsetof(struct nig_stats, egress_mac_pkt0_lo)), 2); REG_RD_DMAE(sc, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, - &(sc->port.old_nig_stats.egress_mac_pkt1_lo), 2); + RTE_PTR_ADD(&sc->port.old_nig_stats, + offsetof(struct nig_stats, egress_mac_pkt1_lo)), 2); } /* function stats */ diff -Nru dpdk-22.11.4/drivers/net/bnx2x/bnx2x_vfpf.c dpdk-22.11.5/drivers/net/bnx2x/bnx2x_vfpf.c --- dpdk-22.11.4/drivers/net/bnx2x/bnx2x_vfpf.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/bnx2x/bnx2x_vfpf.c 2024-04-22 11:25:10.000000000 +0000 @@ -52,9 +52,9 @@ /* check the mac address and VLAN and allocate memory if valid */ if (valid_bitmap & (1 << MAC_ADDR_VALID) && memcmp(bull->mac, sc->old_bulletin.mac, ETH_ALEN)) - rte_memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN); + memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN); if (valid_bitmap & (1 << VLAN_VALID)) - rte_memcpy(&bull->vlan, &sc->old_bulletin.vlan, RTE_VLAN_HLEN); + memcpy(&bull->vlan, &sc->old_bulletin.vlan, sizeof(bull->vlan)); sc->old_bulletin = *bull; @@ -569,7 +569,7 @@ bnx2x_check_bull(sc); - rte_memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN); + memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN); bnx2x_add_tlv(sc, query, query->first_tlv.tl.length, BNX2X_VF_TLV_LIST_END, @@ -583,9 +583,9 @@ while (BNX2X_VF_STATUS_FAILURE == reply->status && bnx2x_check_bull(sc)) { /* A new mac was configured by PF for us */ - rte_memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac, + memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac, ETH_ALEN); - rte_memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac, + memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac, ETH_ALEN); rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr); @@ -622,10 +622,10 @@ BNX2X_VF_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); - rte_memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key)); + memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key)); query->rss_key_size = T_ETH_RSS_KEY; - rte_memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); + memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); query->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE; query->rss_result_mask = params->rss_result_mask; diff -Nru dpdk-22.11.4/drivers/net/bnxt/bnxt.h dpdk-22.11.5/drivers/net/bnxt/bnxt.h --- dpdk-22.11.4/drivers/net/bnxt/bnxt.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/bnxt/bnxt.h 2024-04-22 11:25:10.000000000 +0000 @@ -441,8 +441,8 @@ struct bnxt_ctx_pg_info { uint32_t entries; - void *ctx_pg_arr[MAX_CTX_PAGES]; - rte_iova_t ctx_dma_arr[MAX_CTX_PAGES]; + void **ctx_pg_arr; + rte_iova_t *ctx_dma_arr; struct bnxt_ring_mem_info ring_mem; }; @@ -542,7 +542,6 @@ struct bnxt_rep_info { struct rte_eth_dev *vfr_eth_dev; - pthread_mutex_t vfr_lock; pthread_mutex_t vfr_start_lock; bool conduit_valid; }; @@ -867,6 +866,7 @@ struct rte_ether_addr *mcast_addr_list; rte_iova_t mc_list_dma_addr; uint32_t nb_mc_addr; +#define BNXT_DFLT_MAX_MC_ADDR 16 /* for compatibility with older firmware */ uint32_t max_mcast_addr; /* maximum number of mcast filters supported */ struct rte_eth_rss_conf rss_conf; /* RSS configuration. */ diff -Nru dpdk-22.11.4/drivers/net/bnxt/bnxt_ethdev.c dpdk-22.11.5/drivers/net/bnxt/bnxt_ethdev.c --- dpdk-22.11.4/drivers/net/bnxt/bnxt_ethdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/bnxt/bnxt_ethdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -1648,10 +1648,8 @@ pthread_mutex_destroy(&bp->def_cp_lock); pthread_mutex_destroy(&bp->health_check_lock); pthread_mutex_destroy(&bp->err_recovery_lock); - if (bp->rep_info) { - pthread_mutex_destroy(&bp->rep_info->vfr_lock); + if (bp->rep_info) pthread_mutex_destroy(&bp->rep_info->vfr_start_lock); - } } static void bnxt_drv_uninit(struct bnxt *bp) @@ -4702,7 +4700,7 @@ { struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; const struct rte_memzone *mz = NULL; - char mz_name[RTE_MEMZONE_NAMESIZE]; + char name[RTE_MEMZONE_NAMESIZE]; rte_iova_t mz_phys_addr; uint64_t valid_bits = 0; uint32_t sz; @@ -4714,6 +4712,19 @@ rmem->nr_pages = RTE_ALIGN_MUL_CEIL(mem_size, BNXT_PAGE_SIZE) / BNXT_PAGE_SIZE; rmem->page_size = BNXT_PAGE_SIZE; + + snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pg_arr%s_%x_%d", + suffix, idx, bp->eth_dev->data->port_id); + ctx_pg->ctx_pg_arr = rte_zmalloc(name, sizeof(void *) * rmem->nr_pages, 0); + if (ctx_pg->ctx_pg_arr == NULL) + return -ENOMEM; + + snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_dma_arr%s_%x_%d", + suffix, idx, bp->eth_dev->data->port_id); + ctx_pg->ctx_dma_arr = rte_zmalloc(name, sizeof(rte_iova_t *) * rmem->nr_pages, 0); + if (ctx_pg->ctx_dma_arr == NULL) + return -ENOMEM; + rmem->pg_arr = ctx_pg->ctx_pg_arr; rmem->dma_arr = ctx_pg->ctx_dma_arr; rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; @@ -4721,13 +4732,13 @@ valid_bits = PTU_PTE_VALID; if (rmem->nr_pages > 1) { - snprintf(mz_name, RTE_MEMZONE_NAMESIZE, + snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_pg_tbl%s_%x_%d", suffix, idx, bp->eth_dev->data->port_id); - mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; - mz = rte_memzone_lookup(mz_name); + name[RTE_MEMZONE_NAMESIZE - 1] = 0; + mz = rte_memzone_lookup(name); if (!mz) { - mz = rte_memzone_reserve_aligned(mz_name, + mz = rte_memzone_reserve_aligned(name, rmem->nr_pages * 8, bp->eth_dev->device->numa_node, RTE_MEMZONE_2MB | @@ -4746,11 +4757,11 @@ rmem->pg_tbl_mz = mz; } - snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", + snprintf(name, RTE_MEMZONE_NAMESIZE, "bnxt_ctx_%s_%x_%d", suffix, idx, bp->eth_dev->data->port_id); - mz = rte_memzone_lookup(mz_name); + mz = rte_memzone_lookup(name); if (!mz) { - mz = rte_memzone_reserve_aligned(mz_name, + mz = rte_memzone_reserve_aligned(name, mem_size, bp->eth_dev->device->numa_node, RTE_MEMZONE_1GB | @@ -4796,6 +4807,17 @@ return; bp->ctx->flags &= ~BNXT_CTX_FLAG_INITED; + rte_free(bp->ctx->qp_mem.ctx_pg_arr); + rte_free(bp->ctx->srq_mem.ctx_pg_arr); + rte_free(bp->ctx->cq_mem.ctx_pg_arr); + rte_free(bp->ctx->vnic_mem.ctx_pg_arr); + rte_free(bp->ctx->stat_mem.ctx_pg_arr); + rte_free(bp->ctx->qp_mem.ctx_dma_arr); + rte_free(bp->ctx->srq_mem.ctx_dma_arr); + rte_free(bp->ctx->cq_mem.ctx_dma_arr); + rte_free(bp->ctx->vnic_mem.ctx_dma_arr); + rte_free(bp->ctx->stat_mem.ctx_dma_arr); + rte_memzone_free(bp->ctx->qp_mem.ring_mem.mz); rte_memzone_free(bp->ctx->srq_mem.ring_mem.mz); rte_memzone_free(bp->ctx->cq_mem.ring_mem.mz); @@ -4808,6 +4830,8 @@ rte_memzone_free(bp->ctx->stat_mem.ring_mem.pg_tbl_mz); for (i = 0; i < bp->ctx->tqm_fp_rings_count + 1; i++) { + rte_free(bp->ctx->tqm_mem[i]->ctx_pg_arr); + rte_free(bp->ctx->tqm_mem[i]->ctx_dma_arr); if (bp->ctx->tqm_mem[i]) rte_memzone_free(bp->ctx->tqm_mem[i]->ring_mem.mz); } @@ -6073,13 +6097,6 @@ for (i = 0; i < BNXT_MAX_CFA_CODE; i++) bp->cfa_code_map[i] = BNXT_VF_IDX_INVALID; - rc = pthread_mutex_init(&bp->rep_info->vfr_lock, NULL); - if (rc) { - PMD_DRV_LOG(ERR, "Unable to initialize vfr_lock\n"); - bnxt_free_rep_info(bp); - return rc; - } - rc = pthread_mutex_init(&bp->rep_info->vfr_start_lock, NULL); if (rc) { PMD_DRV_LOG(ERR, "Unable to initialize vfr_start_lock\n"); diff -Nru dpdk-22.11.4/drivers/net/bnxt/bnxt_hwrm.c dpdk-22.11.5/drivers/net/bnxt/bnxt_hwrm.c --- dpdk-22.11.4/drivers/net/bnxt/bnxt_hwrm.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/bnxt/bnxt_hwrm.c 2024-04-22 11:25:10.000000000 +0000 @@ -907,7 +907,8 @@ bp->max_l2_ctx, bp->max_vnics); bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); bp->max_mcast_addr = rte_le_to_cpu_32(resp->max_mcast_filters); - + if (!bp->max_mcast_addr) + bp->max_mcast_addr = BNXT_DFLT_MAX_MC_ADDR; if (BNXT_PF(bp)) { bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics); if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) { @@ -2972,6 +2973,8 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed, struct bnxt_link_info *link_info) { + uint16_t support_pam4_speeds = link_info->support_pam4_speeds; + uint16_t support_speeds = link_info->support_speeds; uint16_t eth_link_speed = 0; if (conf_link_speed == RTE_ETH_LINK_SPEED_AUTONEG) @@ -3003,29 +3006,30 @@ case RTE_ETH_LINK_SPEED_25G: eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB; + link_info->link_signal_mode = BNXT_SIG_MODE_NRZ; break; case RTE_ETH_LINK_SPEED_40G: eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB; break; case RTE_ETH_LINK_SPEED_50G: - if (link_info->support_pam4_speeds & - HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) { - eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB; - link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; - } else { + if (support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) { eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB; link_info->link_signal_mode = BNXT_SIG_MODE_NRZ; + } else if (support_pam4_speeds & + HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) { + eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB; + link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; } break; case RTE_ETH_LINK_SPEED_100G: - if (link_info->support_pam4_speeds & - HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) { - eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB; - link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; - } else { + if (support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) { eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB; link_info->link_signal_mode = BNXT_SIG_MODE_NRZ; + } else if (support_pam4_speeds & + HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) { + eth_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB; + link_info->link_signal_mode = BNXT_SIG_MODE_PAM4; } break; case RTE_ETH_LINK_SPEED_200G: diff -Nru dpdk-22.11.4/drivers/net/bnxt/bnxt_reps.c dpdk-22.11.5/drivers/net/bnxt/bnxt_reps.c --- dpdk-22.11.4/drivers/net/bnxt/bnxt_reps.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/bnxt/bnxt_reps.c 2024-04-22 11:25:10.000000000 +0000 @@ -32,6 +32,14 @@ .flow_ops_get = bnxt_flow_ops_get_op }; +static bool bnxt_rep_check_parent(struct bnxt_representor *rep) +{ + if (!rep->parent_dev->data->dev_private) + return false; + + return true; +} + uint16_t bnxt_vfr_recv(uint16_t port_id, uint16_t queue_id, struct rte_mbuf *mbuf) { @@ -124,8 +132,8 @@ qid = vfr_txq->txq->queue_id; vf_rep_bp = vfr_txq->bp; parent = vf_rep_bp->parent_dev->data->dev_private; - pthread_mutex_lock(&parent->rep_info->vfr_lock); ptxq = parent->tx_queues[qid]; + pthread_mutex_lock(&ptxq->txq_lock); ptxq->vfr_tx_cfa_action = vf_rep_bp->vfr_tx_cfa_action; @@ -134,9 +142,9 @@ vf_rep_bp->tx_pkts[qid]++; } - rc = bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts); + rc = _bnxt_xmit_pkts(ptxq, tx_pkts, nb_pkts); ptxq->vfr_tx_cfa_action = 0; - pthread_mutex_unlock(&parent->rep_info->vfr_lock); + pthread_mutex_unlock(&ptxq->txq_lock); return rc; } @@ -266,12 +274,12 @@ PMD_DRV_LOG(DEBUG, "BNXT Port:%d VFR uninit\n", eth_dev->data->port_id); eth_dev->data->mac_addrs = NULL; - parent_bp = rep->parent_dev->data->dev_private; - if (!parent_bp) { + if (!bnxt_rep_check_parent(rep)) { PMD_DRV_LOG(DEBUG, "BNXT Port:%d already freed\n", eth_dev->data->port_id); return 0; } + parent_bp = rep->parent_dev->data->dev_private; parent_bp->num_reps--; vf_id = rep->vf_id; @@ -539,11 +547,12 @@ int rc = 0; /* MAC Specifics */ - parent_bp = rep_bp->parent_dev->data->dev_private; - if (!parent_bp) { - PMD_DRV_LOG(ERR, "Rep parent NULL!\n"); + if (!bnxt_rep_check_parent(rep_bp)) { + /* Need not be an error scenario, if parent is closed first */ + PMD_DRV_LOG(INFO, "Rep parent port does not exist.\n"); return rc; } + parent_bp = rep_bp->parent_dev->data->dev_private; PMD_DRV_LOG(DEBUG, "Representor dev_info_get_op\n"); dev_info->max_mac_addrs = parent_bp->max_l2_ctx; dev_info->max_hash_mac_addrs = 0; @@ -730,10 +739,10 @@ struct bnxt_tx_queue *parent_txq, *txq; struct bnxt_vf_rep_tx_queue *vfr_txq; - if (queue_idx >= rep_bp->rx_nr_rings) { + if (queue_idx >= rep_bp->tx_nr_rings) { PMD_DRV_LOG(ERR, "Cannot create Tx rings %d. %d rings available\n", - queue_idx, rep_bp->rx_nr_rings); + queue_idx, rep_bp->tx_nr_rings); return -EINVAL; } diff -Nru dpdk-22.11.4/drivers/net/bnxt/bnxt_txq.c dpdk-22.11.5/drivers/net/bnxt/bnxt_txq.c --- dpdk-22.11.4/drivers/net/bnxt/bnxt_txq.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/bnxt/bnxt_txq.c 2024-04-22 11:25:10.000000000 +0000 @@ -111,6 +111,7 @@ txq->mz = NULL; rte_free(txq->free); + pthread_mutex_destroy(&txq->txq_lock); rte_free(txq); dev->data->tx_queues[queue_idx] = NULL; } @@ -194,6 +195,11 @@ goto err; } + rc = pthread_mutex_init(&txq->txq_lock, NULL); + if (rc != 0) { + PMD_DRV_LOG(ERR, "TxQ mutex init failed!"); + goto err; + } return 0; err: bnxt_tx_queue_release_op(eth_dev, queue_idx); diff -Nru dpdk-22.11.4/drivers/net/bnxt/bnxt_txq.h dpdk-22.11.5/drivers/net/bnxt/bnxt_txq.h --- dpdk-22.11.4/drivers/net/bnxt/bnxt_txq.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/bnxt/bnxt_txq.h 2024-04-22 11:25:10.000000000 +0000 @@ -26,6 +26,7 @@ int index; int tx_wake_thresh; uint32_t vfr_tx_cfa_action; + pthread_mutex_t txq_lock; struct bnxt_tx_ring_info *tx_ring; unsigned int cp_nr_rings; diff -Nru dpdk-22.11.4/drivers/net/bnxt/bnxt_txr.c dpdk-22.11.5/drivers/net/bnxt/bnxt_txr.c --- dpdk-22.11.4/drivers/net/bnxt/bnxt_txr.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/bnxt/bnxt_txr.c 2024-04-22 11:25:10.000000000 +0000 @@ -516,6 +516,19 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { + struct bnxt_tx_queue *txq = tx_queue; + uint16_t rc; + + pthread_mutex_lock(&txq->txq_lock); + rc = _bnxt_xmit_pkts(tx_queue, tx_pkts, nb_pkts); + pthread_mutex_unlock(&txq->txq_lock); + + return rc; +} + +uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ int rc; uint16_t nb_tx_pkts = 0; uint16_t coal_pkts = 0; diff -Nru dpdk-22.11.4/drivers/net/bnxt/bnxt_txr.h dpdk-22.11.5/drivers/net/bnxt/bnxt_txr.h --- dpdk-22.11.4/drivers/net/bnxt/bnxt_txr.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/bnxt/bnxt_txr.h 2024-04-22 11:25:10.000000000 +0000 @@ -46,7 +46,9 @@ int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq); int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id); uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); + uint16_t nb_pkts); +uint16_t _bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); diff -Nru dpdk-22.11.4/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c dpdk-22.11.5/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c --- dpdk-22.11.4/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c 2024-04-22 11:25:10.000000000 +0000 @@ -171,6 +171,7 @@ myclient_cnt = bnxt_ulp_cntxt_num_shared_clients_get(ulp_ctx); if (myclient_cnt == 0) { + bnxt_ulp_cntxt_entry_release(); BNXT_TF_DBG(ERR, "PANIC Client Count is zero kill timer\n."); return; diff -Nru dpdk-22.11.4/drivers/net/bonding/rte_eth_bond_flow.c dpdk-22.11.5/drivers/net/bonding/rte_eth_bond_flow.c --- dpdk-22.11.4/drivers/net/bonding/rte_eth_bond_flow.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/bonding/rte_eth_bond_flow.c 2024-04-22 11:25:10.000000000 +0000 @@ -180,6 +180,8 @@ count->bytes = 0; count->hits = 0; + count->bytes_set = 0; + count->hits_set = 0; rte_memcpy(&slave_count, count, sizeof(slave_count)); for (i = 0; i < internals->slave_count; i++) { ret = rte_flow_query(internals->slaves[i].port_id, @@ -192,8 +194,12 @@ } count->bytes += slave_count.bytes; count->hits += slave_count.hits; + count->bytes_set |= slave_count.bytes_set; + count->hits_set |= slave_count.hits_set; slave_count.bytes = 0; slave_count.hits = 0; + slave_count.bytes_set = 0; + slave_count.hits_set = 0; } return 0; } diff -Nru dpdk-22.11.4/drivers/net/cnxk/cn10k_tx.h dpdk-22.11.5/drivers/net/cnxk/cn10k_tx.h --- dpdk-22.11.4/drivers/net/cnxk/cn10k_tx.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/cnxk/cn10k_tx.h 2024-04-22 11:25:10.000000000 +0000 @@ -943,6 +943,7 @@ len -= sg_u & 0xFFFF; nb_segs = m->nb_segs - 1; m_next = m->next; + m->nb_segs = 1; slist = &cmd[3 + off + 1]; /* Set invert df if buffer is not to be freed by H/W */ @@ -1387,6 +1388,9 @@ len -= dlen; sg_u = sg_u | ((uint64_t)dlen); + /* Mark mempool object as "put" since it is freed by NIX */ + RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); + nb_segs = m->nb_segs - 1; m_next = m->next; @@ -1401,6 +1405,7 @@ #endif m->next = NULL; + m->nb_segs = 1; m = m_next; /* Fill mbuf segments */ do { @@ -1433,6 +1438,9 @@ slist++; } m->next = NULL; + /* Mark mempool object as "put" since it is freed by NIX */ + RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); + m = m_next; } while (nb_segs); @@ -1469,6 +1477,8 @@ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); rte_io_wmb(); #endif + /* Mark mempool object as "put" since it is freed by NIX */ + RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); return; } @@ -1513,6 +1523,11 @@ *data128 |= ((__uint128_t)7) << *shift; *shift += 3; + /* Mark mempool object as "put" since it is freed by NIX */ + RTE_MEMPOOL_CHECK_COOKIES(mbufs[0]->pool, (void **)&mbufs[0], 1, 0); + RTE_MEMPOOL_CHECK_COOKIES(mbufs[1]->pool, (void **)&mbufs[1], 1, 0); + RTE_MEMPOOL_CHECK_COOKIES(mbufs[2]->pool, (void **)&mbufs[2], 1, 0); + RTE_MEMPOOL_CHECK_COOKIES(mbufs[3]->pool, (void **)&mbufs[3], 1, 0); return 1; } } @@ -1539,6 +1554,11 @@ vst1q_u64(lmt_addr + 10, cmd2[j + 1]); vst1q_u64(lmt_addr + 12, cmd1[j + 1]); vst1q_u64(lmt_addr + 14, cmd3[j + 1]); + + /* Mark mempool object as "put" since it is freed by NIX */ + RTE_MEMPOOL_CHECK_COOKIES(mbufs[j]->pool, (void **)&mbufs[j], 1, 0); + RTE_MEMPOOL_CHECK_COOKIES(mbufs[j + 1]->pool, + (void **)&mbufs[j + 1], 1, 0); } else if (flags & NIX_TX_NEED_EXT_HDR) { /* EXT header take 3 each, space for 2 segs.*/ cn10k_nix_prepare_mseg_vec(mbufs[j], diff -Nru dpdk-22.11.4/drivers/net/cnxk/cn9k_tx.h dpdk-22.11.5/drivers/net/cnxk/cn9k_tx.h --- dpdk-22.11.4/drivers/net/cnxk/cn9k_tx.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/cnxk/cn9k_tx.h 2024-04-22 11:25:10.000000000 +0000 @@ -449,6 +449,10 @@ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0); rte_io_wmb(); #endif +#ifdef RTE_ENABLE_ASSERT + m->next = NULL; + m->nb_segs = 1; +#endif m = m_next; if (!m) goto done; @@ -483,6 +487,9 @@ sg_u = sg->u; slist++; } +#ifdef RTE_ENABLE_ASSERT + m->next = NULL; +#endif m = m_next; } while (nb_segs); @@ -496,6 +503,9 @@ segdw += (off >> 1) + 1 + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F); send_hdr->w0.sizem1 = segdw - 1; +#ifdef RTE_ENABLE_ASSERT + rte_io_wmb(); +#endif return segdw; } @@ -699,6 +709,10 @@ rte_io_wmb(); #endif +#ifdef RTE_ENABLE_ASSERT + m->next = NULL; + m->nb_segs = 1; +#endif m = m_next; /* Fill mbuf segments */ do { @@ -728,6 +742,9 @@ sg_u = sg->u; slist++; } +#ifdef RTE_ENABLE_ASSERT + m->next = NULL; +#endif m = m_next; } while (nb_segs); @@ -743,6 +760,9 @@ !!(flags & NIX_TX_OFFLOAD_TSTAMP_F); sh->sizem1 = segdw - 1; +#ifdef RTE_ENABLE_ASSERT + rte_io_wmb(); +#endif return segdw; } diff -Nru dpdk-22.11.4/drivers/net/cnxk/cnxk_ethdev_devargs.c dpdk-22.11.5/drivers/net/cnxk/cnxk_ethdev_devargs.c --- dpdk-22.11.4/drivers/net/cnxk/cnxk_ethdev_devargs.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/cnxk/cnxk_ethdev_devargs.c 2024-04-22 11:25:10.000000000 +0000 @@ -75,7 +75,7 @@ if (errno) val = 0; - *(uint16_t *)extra_args = val; + *(uint32_t *)extra_args = val; return 0; } diff -Nru dpdk-22.11.4/drivers/net/cnxk/cnxk_ethdev_ops.c dpdk-22.11.5/drivers/net/cnxk/cnxk_ethdev_ops.c --- dpdk-22.11.4/drivers/net/cnxk/cnxk_ethdev_ops.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/cnxk/cnxk_ethdev_ops.c 2024-04-22 11:25:10.000000000 +0000 @@ -20,8 +20,7 @@ devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT; devinfo->max_mac_addrs = dev->max_mac_entries; devinfo->max_vfs = pci_dev->max_vfs; - devinfo->max_mtu = devinfo->max_rx_pktlen - - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN); + devinfo->max_mtu = devinfo->max_rx_pktlen - CNXK_NIX_L2_OVERHEAD; devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD; devinfo->rx_offload_capa = dev->rx_offload_capa; @@ -508,8 +507,9 @@ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); struct rte_eth_dev_data *data = eth_dev->data; struct roc_nix *nix = &dev->nix; + struct cnxk_eth_rxq_sp *rxq_sp; + uint32_t buffsz = 0; int rc = -EINVAL; - uint32_t buffsz; frame_size += CNXK_NIX_TIMESYNC_RX_OFFSET * dev->ptp_en; @@ -525,8 +525,24 @@ goto exit; } - buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; - old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD; + if (!eth_dev->data->nb_rx_queues) + goto skip_buffsz_check; + + /* Perform buff size check */ + if (data->min_rx_buf_size) { + buffsz = data->min_rx_buf_size; + } else if (eth_dev->data->rx_queues && eth_dev->data->rx_queues[0]) { + rxq_sp = cnxk_eth_rxq_to_sp(data->rx_queues[0]); + + if (rxq_sp->qconf.mp) + buffsz = rte_pktmbuf_data_room_size(rxq_sp->qconf.mp); + } + + /* Skip validation if RQ's are not yet setup */ + if (!buffsz) + goto skip_buffsz_check; + + buffsz -= RTE_PKTMBUF_HEADROOM; /* Refuse MTU that requires the support of scattered packets * when this feature has not been enabled before. @@ -544,6 +560,8 @@ goto exit; } +skip_buffsz_check: + old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD; /* if new MTU was smaller than old one, then flush all SQs before MTU change */ if (old_frame_size > frame_size) { if (data->dev_started) { @@ -555,19 +573,9 @@ frame_size -= RTE_ETHER_CRC_LEN; - /* Update mtu on Tx */ - rc = roc_nix_mac_mtu_set(nix, frame_size); - if (rc) { - plt_err("Failed to set MTU, rc=%d", rc); - goto exit; - } - - /* Sync same frame size on Rx */ + /* Set frame size on Rx */ rc = roc_nix_mac_max_rx_len_set(nix, frame_size); if (rc) { - /* Rollback to older mtu */ - roc_nix_mac_mtu_set(nix, - old_frame_size - RTE_ETHER_CRC_LEN); plt_err("Failed to max Rx frame length, rc=%d", rc); goto exit; } diff -Nru dpdk-22.11.4/drivers/net/cnxk/cnxk_flow.c dpdk-22.11.5/drivers/net/cnxk/cnxk_flow.c --- dpdk-22.11.4/drivers/net/cnxk/cnxk_flow.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/cnxk/cnxk_flow.c 2024-04-22 11:25:10.000000000 +0000 @@ -98,15 +98,19 @@ } static void -npc_rss_flowkey_get(struct cnxk_eth_dev *eth_dev, - const struct roc_npc_action *rss_action, - uint32_t *flowkey_cfg) +npc_rss_flowkey_get(struct cnxk_eth_dev *eth_dev, const struct roc_npc_action *rss_action, + uint32_t *flowkey_cfg, uint64_t default_rss_types) { const struct roc_npc_action_rss *rss; + uint64_t rss_types; rss = (const struct roc_npc_action_rss *)rss_action->conf; + rss_types = rss->types; + /* If no RSS types are specified, use default one */ + if (rss_types == 0) + rss_types = default_rss_types; - *flowkey_cfg = cnxk_rss_ethdev_to_nix(eth_dev, rss->types, rss->level); + *flowkey_cfg = cnxk_rss_ethdev_to_nix(eth_dev, rss_types, rss->level); } static int @@ -206,7 +210,8 @@ goto err_exit; in_actions[i].type = ROC_NPC_ACTION_TYPE_RSS; in_actions[i].conf = actions->conf; - npc_rss_flowkey_get(dev, &in_actions[i], flowkey_cfg); + npc_rss_flowkey_get(dev, &in_actions[i], flowkey_cfg, + eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); break; case RTE_FLOW_ACTION_TYPE_SECURITY: diff -Nru dpdk-22.11.4/drivers/net/dpaa/dpaa_ethdev.c dpdk-22.11.5/drivers/net/dpaa/dpaa_ethdev.c --- dpdk-22.11.4/drivers/net/dpaa/dpaa_ethdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/dpaa/dpaa_ethdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -363,7 +363,8 @@ RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP, RTE_PTYPE_L4_SCTP, - RTE_PTYPE_TUNNEL_ESP + RTE_PTYPE_TUNNEL_ESP, + RTE_PTYPE_UNKNOWN }; PMD_INIT_FUNC_TRACE(); diff -Nru dpdk-22.11.4/drivers/net/ena/base/ena_com.c dpdk-22.11.5/drivers/net/ena/base/ena_com.c --- dpdk-22.11.4/drivers/net/ena/base/ena_com.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/ena/base/ena_com.c 2024-04-22 11:25:10.000000000 +0000 @@ -34,6 +34,8 @@ #define ENA_REGS_ADMIN_INTR_MASK 1 +#define ENA_MAX_BACKOFF_DELAY_EXP 16U + #define ENA_MIN_ADMIN_POLL_US 100 #define ENA_MAX_ADMIN_POLL_US 5000 @@ -171,6 +173,7 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue, struct ena_comp_ctx *comp_ctx) { + comp_ctx->user_cqe = NULL; comp_ctx->occupied = false; ATOMIC32_DEC(&queue->outstanding_cmds); } @@ -464,6 +467,9 @@ return; } + if (!comp_ctx->occupied) + return; + comp_ctx->status = ENA_CMD_COMPLETED; comp_ctx->comp_status = cqe->acq_common_descriptor.status; @@ -539,8 +545,9 @@ static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us) { + exp = ENA_MIN32(ENA_MAX_BACKOFF_DELAY_EXP, exp); delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us); - delay_us = ENA_MIN32(delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US); + delay_us = ENA_MIN32(ENA_MAX_ADMIN_POLL_US, delay_us * (1U << exp)); ENA_USLEEP(delay_us); } diff -Nru dpdk-22.11.4/drivers/net/ena/ena_ethdev.c dpdk-22.11.5/drivers/net/ena/ena_ethdev.c --- dpdk-22.11.4/drivers/net/ena/ena_ethdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/ena/ena_ethdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -37,10 +37,10 @@ #define ENA_MIN_RING_DESC 128 /* - * We should try to keep ENA_CLEANUP_BUF_SIZE lower than + * We should try to keep ENA_CLEANUP_BUF_THRESH lower than * RTE_MEMPOOL_CACHE_MAX_SIZE, so we can fit this in mempool local cache. */ -#define ENA_CLEANUP_BUF_SIZE 256 +#define ENA_CLEANUP_BUF_THRESH 256 #define ENA_PTYPE_HAS_HASH (RTE_PTYPE_L4_TCP | RTE_PTYPE_L4_UDP) @@ -3018,33 +3018,12 @@ return 0; } -static __rte_always_inline size_t -ena_tx_cleanup_mbuf_fast(struct rte_mbuf **mbufs_to_clean, - struct rte_mbuf *mbuf, - size_t mbuf_cnt, - size_t buf_size) -{ - struct rte_mbuf *m_next; - - while (mbuf != NULL) { - m_next = mbuf->next; - mbufs_to_clean[mbuf_cnt++] = mbuf; - if (mbuf_cnt == buf_size) { - rte_mempool_put_bulk(mbufs_to_clean[0]->pool, (void **)mbufs_to_clean, - (unsigned int)mbuf_cnt); - mbuf_cnt = 0; - } - mbuf = m_next; - } - - return mbuf_cnt; -} - static int ena_tx_cleanup(void *txp, uint32_t free_pkt_cnt) { - struct rte_mbuf *mbufs_to_clean[ENA_CLEANUP_BUF_SIZE]; + struct rte_mbuf *pkts_to_clean[ENA_CLEANUP_BUF_THRESH]; struct ena_ring *tx_ring = (struct ena_ring *)txp; size_t mbuf_cnt = 0; + size_t pkt_cnt = 0; unsigned int total_tx_descs = 0; unsigned int total_tx_pkts = 0; uint16_t cleanup_budget; @@ -3075,8 +3054,13 @@ mbuf = tx_info->mbuf; if (fast_free) { - mbuf_cnt = ena_tx_cleanup_mbuf_fast(mbufs_to_clean, mbuf, mbuf_cnt, - ENA_CLEANUP_BUF_SIZE); + pkts_to_clean[pkt_cnt++] = mbuf; + mbuf_cnt += mbuf->nb_segs; + if (mbuf_cnt >= ENA_CLEANUP_BUF_THRESH) { + rte_pktmbuf_free_bulk(pkts_to_clean, pkt_cnt); + mbuf_cnt = 0; + pkt_cnt = 0; + } } else { rte_pktmbuf_free(mbuf); } @@ -3100,8 +3084,7 @@ } if (mbuf_cnt != 0) - rte_mempool_put_bulk(mbufs_to_clean[0]->pool, - (void **)mbufs_to_clean, mbuf_cnt); + rte_pktmbuf_free_bulk(pkts_to_clean, pkt_cnt); /* Notify completion handler that full cleanup was performed */ if (free_pkt_cnt == 0 || total_tx_pkts < cleanup_budget) diff -Nru dpdk-22.11.4/drivers/net/failsafe/failsafe_args.c dpdk-22.11.5/drivers/net/failsafe/failsafe_args.c --- dpdk-22.11.4/drivers/net/failsafe/failsafe_args.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/failsafe/failsafe_args.c 2024-04-22 11:25:10.000000000 +0000 @@ -248,7 +248,7 @@ goto free_args; } else { ERROR("Unrecognized device type: %.*s", (int)b, param); - return -EINVAL; + ret = -EINVAL; } free_args: free(args); diff -Nru dpdk-22.11.4/drivers/net/hns3/hns3_cmd.c dpdk-22.11.5/drivers/net/hns3/hns3_cmd.c --- dpdk-22.11.4/drivers/net/hns3/hns3_cmd.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/hns3/hns3_cmd.c 2024-04-22 11:25:10.000000000 +0000 @@ -539,7 +539,9 @@ if (device_id == HNS3_DEV_ID_25GE_RDMA || device_id == HNS3_DEV_ID_50GE_RDMA || device_id == HNS3_DEV_ID_100G_RDMA_MACSEC || - device_id == HNS3_DEV_ID_200G_RDMA) + device_id == HNS3_DEV_ID_200G_RDMA || + device_id == HNS3_DEV_ID_100G_ROH || + device_id == HNS3_DEV_ID_200G_ROH) hns3_set_bit(hw->capability, HNS3_DEV_SUPPORT_DCB_B, 1); } diff -Nru dpdk-22.11.4/drivers/net/hns3/hns3_common.c dpdk-22.11.5/drivers/net/hns3/hns3_common.c --- dpdk-22.11.4/drivers/net/hns3/hns3_common.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/hns3/hns3_common.c 2024-04-22 11:25:10.000000000 +0000 @@ -84,7 +84,7 @@ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE | RTE_ETH_TX_OFFLOAD_VLAN_INSERT); - if (!hw->port_base_vlan_cfg.state) + if (!hns->is_vf && !hw->port_base_vlan_cfg.state) info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT; if (hns3_dev_get_support(hw, OUTER_UDP_CKSUM)) diff -Nru dpdk-22.11.4/drivers/net/hns3/hns3_dcb.c dpdk-22.11.5/drivers/net/hns3/hns3_dcb.c --- dpdk-22.11.4/drivers/net/hns3/hns3_dcb.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/hns3/hns3_dcb.c 2024-04-22 11:25:10.000000000 +0000 @@ -1499,7 +1499,6 @@ static int hns3_dcb_hw_configure(struct hns3_adapter *hns) { - struct rte_eth_dcb_rx_conf *dcb_rx_conf; struct hns3_pf *pf = &hns->pf; struct hns3_hw *hw = &hns->hw; enum hns3_fc_status fc_status = hw->current_fc_status; @@ -1519,12 +1518,8 @@ } if (hw->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) { - dcb_rx_conf = &hw->data->dev_conf.rx_adv_conf.dcb_rx_conf; - if (dcb_rx_conf->nb_tcs == 0) - hw->dcb_info.pfc_en = 1; /* tc0 only */ - else - hw->dcb_info.pfc_en = - RTE_LEN2MASK((uint8_t)dcb_rx_conf->nb_tcs, uint8_t); + hw->dcb_info.pfc_en = + RTE_LEN2MASK((uint8_t)HNS3_MAX_USER_PRIO, uint8_t); hw->dcb_info.hw_pfc_map = hns3_dcb_undrop_tc_map(hw, hw->dcb_info.pfc_en); diff -Nru dpdk-22.11.4/drivers/net/hns3/hns3_ethdev.c dpdk-22.11.5/drivers/net/hns3/hns3_ethdev.c --- dpdk-22.11.4/drivers/net/hns3/hns3_ethdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/hns3/hns3_ethdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -384,7 +384,7 @@ hns3_warn(hw, "received reset interrupt"); hns3_schedule_reset(hns); } else if (event_cause == HNS3_VECTOR0_EVENT_MBX) { - hns3_dev_handle_mbx_msg(hw); + hns3pf_handle_mbx_msg(hw); } else if (event_cause != HNS3_VECTOR0_EVENT_PTP) { hns3_warn(hw, "received unknown event: vector0_int_stat:0x%x " "ras_int_stat:0x%x cmdq_int_stat:0x%x", @@ -5579,28 +5579,14 @@ static enum hns3_reset_level hns3_detect_reset_event(struct hns3_hw *hw) { - struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); enum hns3_reset_level new_req = HNS3_NONE_RESET; - enum hns3_reset_level last_req; uint32_t vector0_intr_state; - last_req = hns3_get_reset_level(hns, &hw->reset.pending); vector0_intr_state = hns3_read_dev(hw, HNS3_VECTOR0_OTHER_INT_STS_REG); - if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) { - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + if (BIT(HNS3_VECTOR0_IMPRESET_INT_B) & vector0_intr_state) new_req = HNS3_IMP_RESET; - } else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) { - __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + else if (BIT(HNS3_VECTOR0_GLOBALRESET_INT_B) & vector0_intr_state) new_req = HNS3_GLOBAL_RESET; - } - - if (new_req == HNS3_NONE_RESET) - return HNS3_NONE_RESET; - - if (last_req == HNS3_NONE_RESET || last_req < new_req) { - hns3_schedule_delayed_reset(hns); - hns3_warn(hw, "High level reset detected, delay do reset"); - } return new_req; } @@ -5620,10 +5606,14 @@ return false; new_req = hns3_detect_reset_event(hw); + if (new_req == HNS3_NONE_RESET) + return false; + last_req = hns3_get_reset_level(hns, &hw->reset.pending); - if (last_req != HNS3_NONE_RESET && new_req != HNS3_NONE_RESET && - new_req < last_req) { - hns3_warn(hw, "High level reset %d is pending", last_req); + if (last_req == HNS3_NONE_RESET || last_req < new_req) { + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + hns3_schedule_delayed_reset(hns); + hns3_warn(hw, "High level reset detected, delay do reset"); return true; } last_req = hns3_get_reset_level(hns, &hw->reset.request); @@ -6658,6 +6648,8 @@ { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_50GE_RDMA) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_RDMA_MACSEC) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_RDMA) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_100G_ROH) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HNS3_DEV_ID_200G_ROH) }, { .vendor_id = 0, }, /* sentinel */ }; diff -Nru dpdk-22.11.4/drivers/net/hns3/hns3_ethdev.h dpdk-22.11.5/drivers/net/hns3/hns3_ethdev.h --- dpdk-22.11.4/drivers/net/hns3/hns3_ethdev.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/hns3/hns3_ethdev.h 2024-04-22 11:25:10.000000000 +0000 @@ -28,7 +28,9 @@ #define HNS3_DEV_ID_25GE_RDMA 0xA222 #define HNS3_DEV_ID_50GE_RDMA 0xA224 #define HNS3_DEV_ID_100G_RDMA_MACSEC 0xA226 +#define HNS3_DEV_ID_100G_ROH 0xA227 #define HNS3_DEV_ID_200G_RDMA 0xA228 +#define HNS3_DEV_ID_200G_ROH 0xA22C #define HNS3_DEV_ID_100G_VF 0xA22E #define HNS3_DEV_ID_100G_RDMA_PFC_VF 0xA22F diff -Nru dpdk-22.11.4/drivers/net/hns3/hns3_ethdev_vf.c dpdk-22.11.5/drivers/net/hns3/hns3_ethdev_vf.c --- dpdk-22.11.4/drivers/net/hns3/hns3_ethdev_vf.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/hns3/hns3_ethdev_vf.c 2024-04-22 11:25:10.000000000 +0000 @@ -172,11 +172,13 @@ { /* mac address was checked by upper level interface */ char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct hns3_vf_to_pf_msg req; int ret; - ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, - HNS3_MBX_MAC_VLAN_UC_ADD, mac_addr->addr_bytes, - RTE_ETHER_ADDR_LEN, false, NULL, 0); + hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST, + HNS3_MBX_MAC_VLAN_UC_ADD); + memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); + ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); if (ret) { hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); @@ -191,12 +193,13 @@ { /* mac address was checked by upper level interface */ char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct hns3_vf_to_pf_msg req; int ret; - ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, - HNS3_MBX_MAC_VLAN_UC_REMOVE, - mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, - false, NULL, 0); + hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST, + HNS3_MBX_MAC_VLAN_UC_REMOVE); + memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); + ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); if (ret) { hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); @@ -215,6 +218,7 @@ struct rte_ether_addr *old_addr; uint8_t addr_bytes[HNS3_TWO_ETHER_ADDR_LEN]; /* for 2 MAC addresses */ char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct hns3_vf_to_pf_msg req; int ret; /* @@ -227,9 +231,10 @@ memcpy(&addr_bytes[RTE_ETHER_ADDR_LEN], old_addr->addr_bytes, RTE_ETHER_ADDR_LEN); - ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_UNICAST, - HNS3_MBX_MAC_VLAN_UC_MODIFY, addr_bytes, - HNS3_TWO_ETHER_ADDR_LEN, true, NULL, 0); + hns3vf_mbx_setup(&req, HNS3_MBX_SET_UNICAST, + HNS3_MBX_MAC_VLAN_UC_MODIFY); + memcpy(req.data, addr_bytes, HNS3_TWO_ETHER_ADDR_LEN); + ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); if (ret) { /* * The hns3 VF PMD depends on the hns3 PF kernel ethdev @@ -266,12 +271,13 @@ struct rte_ether_addr *mac_addr) { char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct hns3_vf_to_pf_msg req; int ret; - ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST, - HNS3_MBX_MAC_VLAN_MC_ADD, - mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, - NULL, 0); + hns3vf_mbx_setup(&req, HNS3_MBX_SET_MULTICAST, + HNS3_MBX_MAC_VLAN_MC_ADD); + memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); + ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); if (ret) { hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); @@ -287,12 +293,13 @@ struct rte_ether_addr *mac_addr) { char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; + struct hns3_vf_to_pf_msg req; int ret; - ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MULTICAST, - HNS3_MBX_MAC_VLAN_MC_REMOVE, - mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN, false, - NULL, 0); + hns3vf_mbx_setup(&req, HNS3_MBX_SET_MULTICAST, + HNS3_MBX_MAC_VLAN_MC_REMOVE); + memcpy(req.data, mac_addr->addr_bytes, RTE_ETHER_ADDR_LEN); + ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); if (ret) { hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, mac_addr); @@ -335,11 +342,12 @@ * the packets with vlan tag in promiscuous mode. */ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false); - req->msg[0] = HNS3_MBX_SET_PROMISC_MODE; - req->msg[1] = en_bc_pmc ? 1 : 0; - req->msg[2] = en_uc_pmc ? 1 : 0; - req->msg[3] = en_mc_pmc ? 1 : 0; - req->msg[4] = hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0; + req->msg.code = HNS3_MBX_SET_PROMISC_MODE; + req->msg.en_bc = en_bc_pmc ? 1 : 0; + req->msg.en_uc = en_uc_pmc ? 1 : 0; + req->msg.en_mc = en_mc_pmc ? 1 : 0; + req->msg.en_limit_promisc = + hw->promisc_mode == HNS3_LIMIT_PROMISC_MODE ? 1 : 0; ret = hns3_cmd_send(hw, &desc, 1); if (ret) @@ -428,30 +436,26 @@ bool mmap, enum hns3_ring_type queue_type, uint16_t queue_id) { - struct hns3_vf_bind_vector_msg bind_msg; + struct hns3_vf_to_pf_msg req = {0}; const char *op_str; - uint16_t code; int ret; - memset(&bind_msg, 0, sizeof(bind_msg)); - code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR : + req.code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR : HNS3_MBX_UNMAP_RING_TO_VECTOR; - bind_msg.vector_id = (uint8_t)vector_id; + req.vector_id = (uint8_t)vector_id; + req.ring_num = 1; if (queue_type == HNS3_RING_TYPE_RX) - bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX; + req.ring_param[0].int_gl_index = HNS3_RING_GL_RX; else - bind_msg.param[0].int_gl_index = HNS3_RING_GL_TX; - - bind_msg.param[0].ring_type = queue_type; - bind_msg.ring_num = 1; - bind_msg.param[0].tqp_index = queue_id; + req.ring_param[0].int_gl_index = HNS3_RING_GL_TX; + req.ring_param[0].ring_type = queue_type; + req.ring_param[0].tqp_index = queue_id; op_str = mmap ? "Map" : "Unmap"; - ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg, - sizeof(bind_msg), false, NULL, 0); + ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); if (ret) - hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.", - op_str, queue_id, bind_msg.vector_id, ret); + hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret = %d.", + op_str, queue_id, req.vector_id, ret); return ret; } @@ -534,10 +538,12 @@ static int hns3vf_config_mtu(struct hns3_hw *hw, uint16_t mtu) { + struct hns3_vf_to_pf_msg req; int ret; - ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_MTU, 0, (const uint8_t *)&mtu, - sizeof(mtu), true, NULL, 0); + hns3vf_mbx_setup(&req, HNS3_MBX_SET_MTU, 0); + memcpy(req.data, &mtu, sizeof(mtu)); + ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); if (ret) hns3_err(hw, "Failed to set mtu (%u) for vf: %d", mtu, ret); @@ -644,13 +650,8 @@ val = hns3_read_dev(hw, HNS3_VF_RST_ING); hns3_write_dev(hw, HNS3_VF_RST_ING, val | HNS3_VF_RST_ING_BIT); val = cmdq_stat_reg & ~BIT(HNS3_VECTOR0_RST_INT_B); - if (clearval) { - hw->reset.stats.global_cnt++; - hns3_warn(hw, "Global reset detected, clear reset status"); - } else { - hns3_schedule_delayed_reset(hns); - hns3_warn(hw, "Global reset detected, don't clear reset status"); - } + hw->reset.stats.global_cnt++; + hns3_warn(hw, "Global reset detected, clear reset status"); ret = HNS3VF_VECTOR0_EVENT_RST; goto out; @@ -665,9 +666,9 @@ val = 0; ret = HNS3VF_VECTOR0_EVENT_OTHER; + out: - if (clearval) - *clearval = val; + *clearval = val; return ret; } @@ -693,7 +694,7 @@ hns3_schedule_reset(hns); break; case HNS3VF_VECTOR0_EVENT_MBX: - hns3_dev_handle_mbx_msg(hw); + hns3vf_handle_mbx_msg(hw); break; default: break; @@ -728,12 +729,13 @@ uint16_t val = HNS3_PF_PUSH_LSC_CAP_NOT_SUPPORTED; uint16_t exp = HNS3_PF_PUSH_LSC_CAP_UNKNOWN; struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); + struct hns3_vf_to_pf_msg req; __atomic_store_n(&vf->pf_push_lsc_cap, HNS3_PF_PUSH_LSC_CAP_UNKNOWN, __ATOMIC_RELEASE); - (void)hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, - NULL, 0); + hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0); + (void)hns3vf_mbx_send(hw, &req, false, NULL, 0); while (remain_ms > 0) { rte_delay_ms(HNS3_POLL_RESPONE_MS); @@ -744,7 +746,7 @@ * driver has to actively handle the HNS3_MBX_LINK_STAT_CHANGE * mailbox from PF driver to get this capability. */ - hns3_dev_handle_mbx_msg(hw); + hns3vf_handle_mbx_msg(hw); if (__atomic_load_n(&vf->pf_push_lsc_cap, __ATOMIC_ACQUIRE) != HNS3_PF_PUSH_LSC_CAP_UNKNOWN) break; @@ -828,12 +830,13 @@ static int hns3vf_get_port_base_vlan_filter_state(struct hns3_hw *hw) { + struct hns3_vf_to_pf_msg req; uint8_t resp_msg; int ret; - ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, - HNS3_MBX_GET_PORT_BASE_VLAN_STATE, NULL, 0, - true, &resp_msg, sizeof(resp_msg)); + hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN, + HNS3_MBX_GET_PORT_BASE_VLAN_STATE); + ret = hns3vf_mbx_send(hw, &req, true, &resp_msg, sizeof(resp_msg)); if (ret) { if (ret == -ETIME) { /* @@ -874,10 +877,12 @@ { #define HNS3VF_TQPS_RSS_INFO_LEN 6 uint8_t resp_msg[HNS3VF_TQPS_RSS_INFO_LEN]; + struct hns3_vf_to_pf_msg req; int ret; - ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_QINFO, 0, NULL, 0, true, - resp_msg, HNS3VF_TQPS_RSS_INFO_LEN); + hns3vf_mbx_setup(&req, HNS3_MBX_GET_QINFO, 0); + ret = hns3vf_mbx_send(hw, &req, true, + resp_msg, HNS3VF_TQPS_RSS_INFO_LEN); if (ret) { PMD_INIT_LOG(ERR, "Failed to get tqp info from PF: %d", ret); return ret; @@ -915,10 +920,11 @@ { uint8_t resp_msg[HNS3_MBX_MAX_RESP_DATA_SIZE]; struct hns3_basic_info *basic_info; + struct hns3_vf_to_pf_msg req; int ret; - ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_BASIC_INFO, 0, NULL, 0, - true, resp_msg, sizeof(resp_msg)); + hns3vf_mbx_setup(&req, HNS3_MBX_GET_BASIC_INFO, 0); + ret = hns3vf_mbx_send(hw, &req, true, resp_msg, sizeof(resp_msg)); if (ret) { hns3_err(hw, "failed to get basic info from PF, ret = %d.", ret); @@ -938,10 +944,11 @@ hns3vf_get_host_mac_addr(struct hns3_hw *hw) { uint8_t host_mac[RTE_ETHER_ADDR_LEN]; + struct hns3_vf_to_pf_msg req; int ret; - ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_MAC_ADDR, 0, NULL, 0, - true, host_mac, RTE_ETHER_ADDR_LEN); + hns3vf_mbx_setup(&req, HNS3_MBX_GET_MAC_ADDR, 0); + ret = hns3vf_mbx_send(hw, &req, true, host_mac, RTE_ETHER_ADDR_LEN); if (ret) { hns3_err(hw, "Failed to get mac addr from PF: %d", ret); return ret; @@ -990,6 +997,7 @@ hns3vf_request_link_info(struct hns3_hw *hw) { struct hns3_vf *vf = HNS3_DEV_HW_TO_VF(hw); + struct hns3_vf_to_pf_msg req; bool send_req; int ret; @@ -1001,8 +1009,8 @@ if (!send_req) return; - ret = hns3_send_mbx_msg(hw, HNS3_MBX_GET_LINK_STATUS, 0, NULL, 0, false, - NULL, 0); + hns3vf_mbx_setup(&req, HNS3_MBX_GET_LINK_STATUS, 0); + ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); if (ret) { hns3_err(hw, "failed to fetch link status, ret = %d", ret); return; @@ -1046,19 +1054,18 @@ static int hns3vf_vlan_filter_configure(struct hns3_adapter *hns, uint16_t vlan_id, int on) { -#define HNS3VF_VLAN_MBX_MSG_LEN 5 + struct hns3_mbx_vlan_filter *vlan_filter; + struct hns3_vf_to_pf_msg req = {0}; struct hns3_hw *hw = &hns->hw; - uint8_t msg_data[HNS3VF_VLAN_MBX_MSG_LEN]; - uint16_t proto = htons(RTE_ETHER_TYPE_VLAN); - uint8_t is_kill = on ? 0 : 1; - - msg_data[0] = is_kill; - memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); - memcpy(&msg_data[3], &proto, sizeof(proto)); - - return hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_FILTER, - msg_data, HNS3VF_VLAN_MBX_MSG_LEN, true, NULL, - 0); + + req.code = HNS3_MBX_SET_VLAN; + req.subcode = HNS3_MBX_VLAN_FILTER; + vlan_filter = (struct hns3_mbx_vlan_filter *)req.data; + vlan_filter->is_kill = on ? 0 : 1; + vlan_filter->proto = rte_cpu_to_le_16(RTE_ETHER_TYPE_VLAN); + vlan_filter->vlan_id = rte_cpu_to_le_16(vlan_id); + + return hns3vf_mbx_send(hw, &req, true, NULL, 0); } static int @@ -1087,6 +1094,7 @@ static int hns3vf_en_vlan_filter(struct hns3_hw *hw, bool enable) { + struct hns3_vf_to_pf_msg req; uint8_t msg_data; int ret; @@ -1094,9 +1102,10 @@ return 0; msg_data = enable ? 1 : 0; - ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, - HNS3_MBX_ENABLE_VLAN_FILTER, &msg_data, - sizeof(msg_data), true, NULL, 0); + hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN, + HNS3_MBX_ENABLE_VLAN_FILTER); + memcpy(req.data, &msg_data, sizeof(msg_data)); + ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); if (ret) hns3_err(hw, "%s vlan filter failed, ret = %d.", enable ? "enable" : "disable", ret); @@ -1107,12 +1116,15 @@ static int hns3vf_en_hw_strip_rxvtag(struct hns3_hw *hw, bool enable) { + struct hns3_vf_to_pf_msg req; uint8_t msg_data; int ret; msg_data = enable ? 1 : 0; - ret = hns3_send_mbx_msg(hw, HNS3_MBX_SET_VLAN, HNS3_MBX_VLAN_RX_OFF_CFG, - &msg_data, sizeof(msg_data), false, NULL, 0); + hns3vf_mbx_setup(&req, HNS3_MBX_SET_VLAN, + HNS3_MBX_VLAN_RX_OFF_CFG); + memcpy(req.data, &msg_data, sizeof(msg_data)); + ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); if (ret) hns3_err(hw, "vf %s strip failed, ret = %d.", enable ? "enable" : "disable", ret); @@ -1256,11 +1268,13 @@ static int hns3vf_set_alive(struct hns3_hw *hw, bool alive) { + struct hns3_vf_to_pf_msg req; uint8_t msg_data; msg_data = alive ? 1 : 0; - return hns3_send_mbx_msg(hw, HNS3_MBX_SET_ALIVE, 0, &msg_data, - sizeof(msg_data), false, NULL, 0); + hns3vf_mbx_setup(&req, HNS3_MBX_SET_ALIVE, 0); + memcpy(req.data, &msg_data, sizeof(msg_data)); + return hns3vf_mbx_send(hw, &req, false, NULL, 0); } static void @@ -1268,11 +1282,12 @@ { struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; struct hns3_adapter *hns = eth_dev->data->dev_private; + struct hns3_vf_to_pf_msg req; struct hns3_hw *hw = &hns->hw; int ret; - ret = hns3_send_mbx_msg(hw, HNS3_MBX_KEEP_ALIVE, 0, NULL, 0, - false, NULL, 0); + hns3vf_mbx_setup(&req, HNS3_MBX_KEEP_ALIVE, 0); + ret = hns3vf_mbx_send(hw, &req, false, NULL, 0); if (ret) hns3_err(hw, "VF sends keeping alive cmd failed(=%d)", ret); @@ -1411,9 +1426,11 @@ static int hns3vf_clear_vport_list(struct hns3_hw *hw) { - return hns3_send_mbx_msg(hw, HNS3_MBX_HANDLE_VF_TBL, - HNS3_MBX_VPORT_LIST_CLEAR, NULL, 0, false, - NULL, 0); + struct hns3_vf_to_pf_msg req; + + hns3vf_mbx_setup(&req, HNS3_MBX_HANDLE_VF_TBL, + HNS3_MBX_VPORT_LIST_CLEAR); + return hns3vf_mbx_send(hw, &req, false, NULL, 0); } static int @@ -1790,11 +1807,25 @@ return true; } +static enum hns3_reset_level +hns3vf_detect_reset_event(struct hns3_hw *hw) +{ + enum hns3_reset_level reset = HNS3_NONE_RESET; + uint32_t cmdq_stat_reg; + + cmdq_stat_reg = hns3_read_dev(hw, HNS3_VECTOR0_CMDQ_STAT_REG); + if (BIT(HNS3_VECTOR0_RST_INT_B) & cmdq_stat_reg) + reset = HNS3_VF_RESET; + + return reset; +} + bool hns3vf_is_reset_pending(struct hns3_adapter *hns) { + enum hns3_reset_level last_req; struct hns3_hw *hw = &hns->hw; - enum hns3_reset_level reset; + enum hns3_reset_level new_req; /* * According to the protocol of PCIe, FLR to a PF device resets the PF @@ -1817,13 +1848,18 @@ if (rte_eal_process_type() != RTE_PROC_PRIMARY) return false; - hns3vf_check_event_cause(hns, NULL); - reset = hns3vf_get_reset_level(hw, &hw->reset.pending); - if (hw->reset.level != HNS3_NONE_RESET && reset != HNS3_NONE_RESET && - hw->reset.level < reset) { - hns3_warn(hw, "High level reset %d is pending", reset); + new_req = hns3vf_detect_reset_event(hw); + if (new_req == HNS3_NONE_RESET) + return false; + + last_req = hns3vf_get_reset_level(hw, &hw->reset.pending); + if (last_req == HNS3_NONE_RESET || last_req < new_req) { + __atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED); + hns3_schedule_delayed_reset(hns); + hns3_warn(hw, "High level reset detected, delay do reset"); return true; } + return false; } @@ -1882,12 +1918,13 @@ static int hns3vf_prepare_reset(struct hns3_adapter *hns) { + struct hns3_vf_to_pf_msg req; struct hns3_hw *hw = &hns->hw; int ret; if (hw->reset.level == HNS3_VF_FUNC_RESET) { - ret = hns3_send_mbx_msg(hw, HNS3_MBX_RESET, 0, NULL, - 0, true, NULL, 0); + hns3vf_mbx_setup(&req, HNS3_MBX_RESET, 0); + ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); if (ret) return ret; } diff -Nru dpdk-22.11.4/drivers/net/hns3/hns3_mbx.c dpdk-22.11.5/drivers/net/hns3/hns3_mbx.c --- dpdk-22.11.4/drivers/net/hns3/hns3_mbx.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/hns3/hns3_mbx.c 2024-04-22 11:25:10.000000000 +0000 @@ -11,8 +11,6 @@ #include "hns3_intr.h" #include "hns3_rxtx.h" -#define HNS3_CMD_CODE_OFFSET 2 - static const struct errno_respcode_map err_code_map[] = { {0, 0}, {1, -EPERM}, @@ -26,6 +24,14 @@ {95, -EOPNOTSUPP}, }; +void +hns3vf_mbx_setup(struct hns3_vf_to_pf_msg *req, uint8_t code, uint8_t subcode) +{ + memset(req, 0, sizeof(struct hns3_vf_to_pf_msg)); + req->code = code; + req->subcode = subcode; +} + static int hns3_resp_to_errno(uint16_t resp_code) { @@ -72,7 +78,7 @@ return -EIO; } - hns3_dev_handle_mbx_msg(hw); + hns3vf_handle_mbx_msg(hw); rte_delay_us(HNS3_WAIT_RESP_US); if (hw->mbx_resp.received_match_resp) @@ -120,44 +126,24 @@ } int -hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, - const uint8_t *msg_data, uint8_t msg_len, bool need_resp, - uint8_t *resp_data, uint16_t resp_len) +hns3vf_mbx_send(struct hns3_hw *hw, + struct hns3_vf_to_pf_msg *req, bool need_resp, + uint8_t *resp_data, uint16_t resp_len) { - struct hns3_mbx_vf_to_pf_cmd *req; + struct hns3_mbx_vf_to_pf_cmd *cmd; struct hns3_cmd_desc desc; - bool is_ring_vector_msg; - int offset; int ret; - req = (struct hns3_mbx_vf_to_pf_cmd *)desc.data; - - /* first two bytes are reserved for code & subcode */ - if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) { - hns3_err(hw, - "VF send mbx msg fail, msg len %u exceeds max payload len %d", - msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET); - return -EINVAL; - } - hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false); - req->msg[0] = code; - is_ring_vector_msg = (code == HNS3_MBX_MAP_RING_TO_VECTOR) || - (code == HNS3_MBX_UNMAP_RING_TO_VECTOR) || - (code == HNS3_MBX_GET_RING_VECTOR_MAP); - if (!is_ring_vector_msg) - req->msg[1] = subcode; - if (msg_data) { - offset = is_ring_vector_msg ? 1 : HNS3_CMD_CODE_OFFSET; - memcpy(&req->msg[offset], msg_data, msg_len); - } + cmd = (struct hns3_mbx_vf_to_pf_cmd *)desc.data; + cmd->msg = *req; /* synchronous send */ if (need_resp) { - req->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT; + cmd->mbx_need_resp |= HNS3_MBX_NEED_RESP_BIT; rte_spinlock_lock(&hw->mbx_resp.lock); - hns3_mbx_prepare_resp(hw, code, subcode); - req->match_id = hw->mbx_resp.match_id; + hns3_mbx_prepare_resp(hw, req->code, req->subcode); + cmd->match_id = hw->mbx_resp.match_id; ret = hns3_cmd_send(hw, &desc, 1); if (ret) { rte_spinlock_unlock(&hw->mbx_resp.lock); @@ -166,7 +152,8 @@ return ret; } - ret = hns3_get_mbx_resp(hw, code, subcode, resp_data, resp_len); + ret = hns3_get_mbx_resp(hw, req->code, req->subcode, + resp_data, resp_len); rte_spinlock_unlock(&hw->mbx_resp.lock); } else { /* asynchronous send */ @@ -193,17 +180,17 @@ hns3vf_handle_link_change_event(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) { + struct hns3_mbx_link_status *link_info = + (struct hns3_mbx_link_status *)req->msg.msg_data; uint8_t link_status, link_duplex; - uint16_t *msg_q = req->msg; uint8_t support_push_lsc; uint32_t link_speed; - memcpy(&link_speed, &msg_q[2], sizeof(link_speed)); - link_status = rte_le_to_cpu_16(msg_q[1]); - link_duplex = (uint8_t)rte_le_to_cpu_16(msg_q[4]); - hns3vf_update_link_status(hw, link_status, link_speed, - link_duplex); - support_push_lsc = (*(uint8_t *)&msg_q[5]) & 1u; + link_status = (uint8_t)rte_le_to_cpu_16(link_info->link_status); + link_speed = rte_le_to_cpu_32(link_info->speed); + link_duplex = (uint8_t)rte_le_to_cpu_16(link_info->duplex); + hns3vf_update_link_status(hw, link_status, link_speed, link_duplex); + support_push_lsc = (link_info->flag) & 1u; hns3vf_update_push_lsc_cap(hw, support_push_lsc); } @@ -212,7 +199,6 @@ struct hns3_mbx_pf_to_vf_cmd *req) { enum hns3_reset_level reset_level; - uint16_t *msg_q = req->msg; /* * PF has asserted reset hence VF should go in pending @@ -220,7 +206,7 @@ * has been completely reset. After this stack should * eventually be re-initialized. */ - reset_level = rte_le_to_cpu_16(msg_q[1]); + reset_level = rte_le_to_cpu_16(req->msg.reset_level); hns3_atomic_set_bit(reset_level, &hw->reset.pending); hns3_warn(hw, "PF inform reset level %d", reset_level); @@ -242,8 +228,9 @@ * to match the request. */ if (req->match_id == resp->match_id) { - resp->resp_status = hns3_resp_to_errno(req->msg[3]); - memcpy(resp->additional_info, &req->msg[4], + resp->resp_status = + hns3_resp_to_errno(req->msg.resp_status); + memcpy(resp->additional_info, &req->msg.resp_data, HNS3_MBX_MAX_RESP_DATA_SIZE); rte_io_wmb(); resp->received_match_resp = true; @@ -256,7 +243,8 @@ * support copy request's match_id to its response. So VF follows the * original scheme to process. */ - msg_data = (uint32_t)req->msg[1] << HNS3_MBX_RESP_CODE_OFFSET | req->msg[2]; + msg_data = (uint32_t)req->msg.vf_mbx_msg_code << + HNS3_MBX_RESP_CODE_OFFSET | req->msg.vf_mbx_msg_subcode; if (resp->req_msg_data != msg_data) { hns3_warn(hw, "received response tag (%u) is mismatched with requested tag (%u)", @@ -264,8 +252,8 @@ return; } - resp->resp_status = hns3_resp_to_errno(req->msg[3]); - memcpy(resp->additional_info, &req->msg[4], + resp->resp_status = hns3_resp_to_errno(req->msg.resp_status); + memcpy(resp->additional_info, &req->msg.resp_data, HNS3_MBX_MAX_RESP_DATA_SIZE); rte_io_wmb(); resp->received_match_resp = true; @@ -296,11 +284,8 @@ hns3pf_handle_link_change_event(struct hns3_hw *hw, struct hns3_mbx_vf_to_pf_cmd *req) { -#define LINK_STATUS_OFFSET 1 -#define LINK_FAIL_CODE_OFFSET 2 - - if (!req->msg[LINK_STATUS_OFFSET]) - hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]); + if (!req->msg.link_status) + hns3_link_fail_parse(hw, req->msg.link_fail_code); hns3_update_linkstatus_and_event(hw, true); } @@ -309,8 +294,7 @@ hns3_update_port_base_vlan_info(struct hns3_hw *hw, struct hns3_mbx_pf_to_vf_cmd *req) { -#define PVID_STATE_OFFSET 1 - uint16_t new_pvid_state = req->msg[PVID_STATE_OFFSET] ? + uint16_t new_pvid_state = req->msg.pvid_state ? HNS3_PORT_BASE_VLAN_ENABLE : HNS3_PORT_BASE_VLAN_DISABLE; /* * Currently, hardware doesn't support more than two layers VLAN offload @@ -359,7 +343,7 @@ while (next_to_use != tail) { desc = &crq->desc[next_to_use]; req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data; - opcode = req->msg[0] & 0xff; + opcode = req->msg.code & 0xff; flag = rte_le_to_cpu_16(crq->desc[next_to_use].flag); if (!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B)) @@ -388,9 +372,57 @@ } void -hns3_dev_handle_mbx_msg(struct hns3_hw *hw) +hns3pf_handle_mbx_msg(struct hns3_hw *hw) +{ + struct hns3_cmq_ring *crq = &hw->cmq.crq; + struct hns3_mbx_vf_to_pf_cmd *req; + struct hns3_cmd_desc *desc; + uint16_t flag; + + rte_spinlock_lock(&hw->cmq.crq.lock); + + while (!hns3_cmd_crq_empty(hw)) { + if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) { + rte_spinlock_unlock(&hw->cmq.crq.lock); + return; + } + desc = &crq->desc[crq->next_to_use]; + req = (struct hns3_mbx_vf_to_pf_cmd *)desc->data; + + flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag); + if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) { + hns3_warn(hw, + "dropped invalid mailbox message, code = %u", + req->msg.code); + + /* dropping/not processing this invalid message */ + crq->desc[crq->next_to_use].flag = 0; + hns3_mbx_ring_ptr_move_crq(crq); + continue; + } + + switch (req->msg.code) { + case HNS3_MBX_PUSH_LINK_STATUS: + hns3pf_handle_link_change_event(hw, req); + break; + default: + hns3_err(hw, "received unsupported(%u) mbx msg", + req->msg.code); + break; + } + crq->desc[crq->next_to_use].flag = 0; + hns3_mbx_ring_ptr_move_crq(crq); + } + + /* Write back CMDQ_RQ header pointer, IMP need this pointer */ + hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, crq->next_to_use); + + rte_spinlock_unlock(&hw->cmq.crq.lock); +} + +void +hns3vf_handle_mbx_msg(struct hns3_hw *hw) { - struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw); struct hns3_cmq_ring *crq = &hw->cmq.crq; struct hns3_mbx_pf_to_vf_cmd *req; struct hns3_cmd_desc *desc; @@ -401,7 +433,7 @@ rte_spinlock_lock(&hw->cmq.crq.lock); handle_out = (rte_eal_process_type() != RTE_PROC_PRIMARY || - !rte_thread_is_intr()) && hns->is_vf; + !rte_thread_is_intr()); if (handle_out) { /* * Currently, any threads in the primary and secondary processes @@ -432,7 +464,7 @@ desc = &crq->desc[crq->next_to_use]; req = (struct hns3_mbx_pf_to_vf_cmd *)desc->data; - opcode = req->msg[0] & 0xff; + opcode = req->msg.code & 0xff; flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag); if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) { @@ -446,8 +478,7 @@ continue; } - handle_out = hns->is_vf && desc->opcode == 0; - if (handle_out) { + if (desc->opcode == 0) { /* Message already processed by other thread */ crq->desc[crq->next_to_use].flag = 0; hns3_mbx_ring_ptr_move_crq(crq); @@ -464,16 +495,6 @@ case HNS3_MBX_ASSERTING_RESET: hns3_handle_asserting_reset(hw, req); break; - case HNS3_MBX_PUSH_LINK_STATUS: - /* - * This message is reported by the firmware and is - * reported in 'struct hns3_mbx_vf_to_pf_cmd' format. - * Therefore, we should cast the req variable to - * 'struct hns3_mbx_vf_to_pf_cmd' and then process it. - */ - hns3pf_handle_link_change_event(hw, - (struct hns3_mbx_vf_to_pf_cmd *)req); - break; case HNS3_MBX_PUSH_VLAN_INFO: /* * When the PVID configuration status of VF device is @@ -488,7 +509,7 @@ * hns3 PF kernel driver, VF driver will receive this * mailbox message from PF driver. */ - hns3_handle_promisc_info(hw, req->msg[1]); + hns3_handle_promisc_info(hw, req->msg.promisc_en); break; default: hns3_err(hw, "received unsupported(%u) mbx msg", diff -Nru dpdk-22.11.4/drivers/net/hns3/hns3_mbx.h dpdk-22.11.5/drivers/net/hns3/hns3_mbx.h --- dpdk-22.11.4/drivers/net/hns3/hns3_mbx.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/hns3/hns3_mbx.h 2024-04-22 11:25:10.000000000 +0000 @@ -89,7 +89,6 @@ HNS3_MBX_LF_XSFP_ABSENT, }; -#define HNS3_MBX_MAX_MSG_SIZE 16 #define HNS3_MBX_MAX_RESP_DATA_SIZE 8 #define HNS3_MBX_DEF_TIME_LIMIT_MS 500 @@ -107,6 +106,69 @@ uint8_t additional_info[HNS3_MBX_MAX_RESP_DATA_SIZE]; }; +struct hns3_ring_chain_param { + uint8_t ring_type; + uint8_t tqp_index; + uint8_t int_gl_index; +}; + +struct hns3_mbx_vlan_filter { + uint8_t is_kill; + uint16_t vlan_id; + uint16_t proto; +} __rte_packed; + +struct hns3_mbx_link_status { + uint16_t link_status; + uint32_t speed; + uint16_t duplex; + uint8_t flag; +} __rte_packed; + +#define HNS3_MBX_MSG_MAX_DATA_SIZE 14 +#define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM 4 +struct hns3_vf_to_pf_msg { + uint8_t code; + union { + struct { + uint8_t subcode; + uint8_t data[HNS3_MBX_MSG_MAX_DATA_SIZE]; + }; + struct { + uint8_t en_bc; + uint8_t en_uc; + uint8_t en_mc; + uint8_t en_limit_promisc; + }; + struct { + uint8_t vector_id; + uint8_t ring_num; + struct hns3_ring_chain_param + ring_param[HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM]; + }; + struct { + uint8_t link_status; + uint8_t link_fail_code; + }; + }; +}; + +struct hns3_pf_to_vf_msg { + uint16_t code; + union { + struct { + uint16_t vf_mbx_msg_code; + uint16_t vf_mbx_msg_subcode; + uint16_t resp_status; + uint8_t resp_data[HNS3_MBX_MAX_RESP_DATA_SIZE]; + }; + uint16_t promisc_en; + uint16_t reset_level; + uint16_t pvid_state; + uint8_t msg_data[HNS3_MBX_MSG_MAX_DATA_SIZE]; + }; +}; + struct errno_respcode_map { uint16_t resp_code; int err_no; @@ -122,7 +184,7 @@ uint8_t msg_len; uint8_t rsv2; uint16_t match_id; - uint8_t msg[HNS3_MBX_MAX_MSG_SIZE]; + struct hns3_vf_to_pf_msg msg; }; struct hns3_mbx_pf_to_vf_cmd { @@ -131,20 +193,7 @@ uint8_t msg_len; uint8_t rsv1; uint16_t match_id; - uint16_t msg[8]; -}; - -struct hns3_ring_chain_param { - uint8_t ring_type; - uint8_t tqp_index; - uint8_t int_gl_index; -}; - -#define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM 4 -struct hns3_vf_bind_vector_msg { - uint8_t vector_id; - uint8_t ring_num; - struct hns3_ring_chain_param param[HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM]; + struct hns3_pf_to_vf_msg msg; }; struct hns3_pf_rst_done_cmd { @@ -158,8 +207,11 @@ ((crq)->next_to_use = ((crq)->next_to_use + 1) % (crq)->desc_num) struct hns3_hw; -void hns3_dev_handle_mbx_msg(struct hns3_hw *hw); -int hns3_send_mbx_msg(struct hns3_hw *hw, uint16_t code, uint16_t subcode, - const uint8_t *msg_data, uint8_t msg_len, bool need_resp, - uint8_t *resp_data, uint16_t resp_len); +void hns3pf_handle_mbx_msg(struct hns3_hw *hw); +void hns3vf_handle_mbx_msg(struct hns3_hw *hw); +void hns3vf_mbx_setup(struct hns3_vf_to_pf_msg *req, + uint8_t code, uint8_t subcode); +int hns3vf_mbx_send(struct hns3_hw *hw, + struct hns3_vf_to_pf_msg *req_msg, bool need_resp, + uint8_t *resp_data, uint16_t resp_len); #endif /* HNS3_MBX_H */ diff -Nru dpdk-22.11.4/drivers/net/hns3/hns3_rxtx.c dpdk-22.11.5/drivers/net/hns3/hns3_rxtx.c --- dpdk-22.11.4/drivers/net/hns3/hns3_rxtx.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/hns3/hns3_rxtx.c 2024-04-22 11:25:10.000000000 +0000 @@ -686,13 +686,12 @@ static int hns3vf_reset_tqp(struct hns3_hw *hw, uint16_t queue_id) { - uint8_t msg_data[2]; + struct hns3_vf_to_pf_msg req; int ret; - memcpy(msg_data, &queue_id, sizeof(uint16_t)); - - ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data, - sizeof(msg_data), true, NULL, 0); + hns3vf_mbx_setup(&req, HNS3_MBX_QUEUE_RESET, 0); + memcpy(req.data, &queue_id, sizeof(uint16_t)); + ret = hns3vf_mbx_send(hw, &req, true, NULL, 0); if (ret) hns3_err(hw, "fail to reset tqp, queue_id = %u, ret = %d.", queue_id, ret); @@ -769,15 +768,14 @@ hns3vf_reset_all_tqps(struct hns3_hw *hw) { #define HNS3VF_RESET_ALL_TQP_DONE 1U + struct hns3_vf_to_pf_msg req; uint8_t reset_status; - uint8_t msg_data[2]; int ret; uint16_t i; - memset(msg_data, 0, sizeof(msg_data)); - ret = hns3_send_mbx_msg(hw, HNS3_MBX_QUEUE_RESET, 0, msg_data, - sizeof(msg_data), true, &reset_status, - sizeof(reset_status)); + hns3vf_mbx_setup(&req, HNS3_MBX_QUEUE_RESET, 0); + ret = hns3vf_mbx_send(hw, &req, true, + &reset_status, sizeof(reset_status)); if (ret) { hns3_err(hw, "fail to send rcb reset mbx, ret = %d.", ret); return ret; diff -Nru dpdk-22.11.4/drivers/net/i40e/i40e_flow.c dpdk-22.11.5/drivers/net/i40e/i40e_flow.c --- dpdk-22.11.4/drivers/net/i40e/i40e_flow.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/i40e/i40e_flow.c 2024-04-22 11:25:10.000000000 +0000 @@ -1708,8 +1708,7 @@ ether_type = rte_be_to_cpu_16(eth_spec->type); - if (next_type == RTE_FLOW_ITEM_TYPE_VLAN || - ether_type == RTE_ETHER_TYPE_IPV4 || + if (ether_type == RTE_ETHER_TYPE_IPV4 || ether_type == RTE_ETHER_TYPE_IPV6 || ether_type == i40e_get_outer_vlan(dev)) { rte_flow_error_set(error, EINVAL, diff -Nru dpdk-22.11.4/drivers/net/i40e/i40e_rxtx_vec_avx2.c dpdk-22.11.5/drivers/net/i40e/i40e_rxtx_vec_avx2.c --- dpdk-22.11.4/drivers/net/i40e/i40e_rxtx_vec_avx2.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/i40e/i40e_rxtx_vec_avx2.c 2024-04-22 11:25:10.000000000 +0000 @@ -276,46 +276,30 @@ _mm256_loadu_si256((void *)&sw_ring[i + 4])); #endif - __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; -#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC - /* for AVX we need alignment otherwise loads are not atomic */ - if (avx_aligned) { - /* load in descriptors, 2 at a time, in reverse order */ - raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); - rte_compiler_barrier(); - raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); - rte_compiler_barrier(); - raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); - rte_compiler_barrier(); - raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); - } else -#endif - do { - const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); - rte_compiler_barrier(); - const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); - rte_compiler_barrier(); - const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); - rte_compiler_barrier(); - const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); - rte_compiler_barrier(); - const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); - rte_compiler_barrier(); - const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); - rte_compiler_barrier(); - const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); - rte_compiler_barrier(); - const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); + const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); + rte_compiler_barrier(); + const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); + rte_compiler_barrier(); + const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); + rte_compiler_barrier(); + const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); + rte_compiler_barrier(); + const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); + rte_compiler_barrier(); + const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); + rte_compiler_barrier(); + const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); + rte_compiler_barrier(); + const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); - raw_desc6_7 = _mm256_inserti128_si256( - _mm256_castsi128_si256(raw_desc6), raw_desc7, 1); - raw_desc4_5 = _mm256_inserti128_si256( - _mm256_castsi128_si256(raw_desc4), raw_desc5, 1); - raw_desc2_3 = _mm256_inserti128_si256( - _mm256_castsi128_si256(raw_desc2), raw_desc3, 1); - raw_desc0_1 = _mm256_inserti128_si256( - _mm256_castsi128_si256(raw_desc0), raw_desc1, 1); - } while (0); + const __m256i raw_desc6_7 = _mm256_inserti128_si256( + _mm256_castsi128_si256(raw_desc6), raw_desc7, 1); + const __m256i raw_desc4_5 = _mm256_inserti128_si256( + _mm256_castsi128_si256(raw_desc4), raw_desc5, 1); + const __m256i raw_desc2_3 = _mm256_inserti128_si256( + _mm256_castsi128_si256(raw_desc2), raw_desc3, 1); + const __m256i raw_desc0_1 = _mm256_inserti128_si256( + _mm256_castsi128_si256(raw_desc0), raw_desc1, 1); if (split_packet) { int j; diff -Nru dpdk-22.11.4/drivers/net/iavf/iavf_ethdev.c dpdk-22.11.5/drivers/net/iavf/iavf_ethdev.c --- dpdk-22.11.4/drivers/net/iavf/iavf_ethdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/iavf/iavf_ethdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -609,7 +609,8 @@ RTE_ETH_VLAN_FILTER_MASK | RTE_ETH_VLAN_EXTEND_MASK); if (err) { - PMD_DRV_LOG(ERR, "Failed to update vlan offload"); + PMD_DRV_LOG(INFO, + "VLAN offloading is not supported, or offloading was refused by the PF"); return err; } @@ -685,9 +686,7 @@ vf->max_rss_qregion = IAVF_MAX_NUM_QUEUES_DFLT; } - ret = iavf_dev_init_vlan(dev); - if (ret) - PMD_DRV_LOG(ERR, "configure VLAN failed: %d", ret); + iavf_dev_init_vlan(dev); if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { if (iavf_init_rss(ad) != 0) { diff -Nru dpdk-22.11.4/drivers/net/iavf/iavf_ipsec_crypto.c dpdk-22.11.5/drivers/net/iavf/iavf_ipsec_crypto.c --- dpdk-22.11.4/drivers/net/iavf/iavf_ipsec_crypto.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/iavf/iavf_ipsec_crypto.c 2024-04-22 11:25:10.000000000 +0000 @@ -1485,8 +1485,11 @@ if (adapter->security_ctx == NULL) { adapter->security_ctx = rte_malloc("iavf_security_ctx", sizeof(struct iavf_security_ctx), 0); - if (adapter->security_ctx == NULL) + if (adapter->security_ctx == NULL) { + rte_free(adapter->vf.eth_dev->security_ctx); + adapter->vf.eth_dev->security_ctx = NULL; return -ENOMEM; + } } return 0; diff -Nru dpdk-22.11.4/drivers/net/iavf/iavf_rxtx_vec_avx2.c dpdk-22.11.5/drivers/net/iavf/iavf_rxtx_vec_avx2.c --- dpdk-22.11.4/drivers/net/iavf/iavf_rxtx_vec_avx2.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/iavf/iavf_rxtx_vec_avx2.c 2024-04-22 11:25:10.000000000 +0000 @@ -192,62 +192,30 @@ _mm256_loadu_si256((void *)&sw_ring[i + 4])); #endif - __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; -#ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC - /* for AVX we need alignment otherwise loads are not atomic */ - if (avx_aligned) { - /* load in descriptors, 2 at a time, in reverse order */ - raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); - rte_compiler_barrier(); - raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); - rte_compiler_barrier(); - raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); - rte_compiler_barrier(); - raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); - } else -#endif - { - const __m128i raw_desc7 = - _mm_load_si128((void *)(rxdp + 7)); - rte_compiler_barrier(); - const __m128i raw_desc6 = - _mm_load_si128((void *)(rxdp + 6)); - rte_compiler_barrier(); - const __m128i raw_desc5 = - _mm_load_si128((void *)(rxdp + 5)); - rte_compiler_barrier(); - const __m128i raw_desc4 = - _mm_load_si128((void *)(rxdp + 4)); - rte_compiler_barrier(); - const __m128i raw_desc3 = - _mm_load_si128((void *)(rxdp + 3)); - rte_compiler_barrier(); - const __m128i raw_desc2 = - _mm_load_si128((void *)(rxdp + 2)); - rte_compiler_barrier(); - const __m128i raw_desc1 = - _mm_load_si128((void *)(rxdp + 1)); - rte_compiler_barrier(); - const __m128i raw_desc0 = - _mm_load_si128((void *)(rxdp + 0)); + const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); + rte_compiler_barrier(); + const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); + rte_compiler_barrier(); + const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); + rte_compiler_barrier(); + const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); + rte_compiler_barrier(); + const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); + rte_compiler_barrier(); + const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); + rte_compiler_barrier(); + const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); + rte_compiler_barrier(); + const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); - raw_desc6_7 = - _mm256_inserti128_si256 - (_mm256_castsi128_si256(raw_desc6), - raw_desc7, 1); - raw_desc4_5 = - _mm256_inserti128_si256 - (_mm256_castsi128_si256(raw_desc4), - raw_desc5, 1); - raw_desc2_3 = - _mm256_inserti128_si256 - (_mm256_castsi128_si256(raw_desc2), - raw_desc3, 1); - raw_desc0_1 = - _mm256_inserti128_si256 - (_mm256_castsi128_si256(raw_desc0), - raw_desc1, 1); - } + const __m256i raw_desc6_7 = + _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc6), raw_desc7, 1); + const __m256i raw_desc4_5 = + _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc4), raw_desc5, 1); + const __m256i raw_desc2_3 = + _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc2), raw_desc3, 1); + const __m256i raw_desc0_1 = + _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc0), raw_desc1, 1); if (split_packet) { int j; diff -Nru dpdk-22.11.4/drivers/net/ice/ice_ethdev.c dpdk-22.11.5/drivers/net/ice/ice_ethdev.c --- dpdk-22.11.4/drivers/net/ice/ice_ethdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/ice/ice_ethdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -1773,6 +1773,7 @@ } pf->main_vsi = vsi; + rte_spinlock_init(&pf->link_lock); return 0; } @@ -3589,17 +3590,31 @@ return 0; } +static enum ice_status +ice_get_link_info_safe(struct ice_pf *pf, bool ena_lse, + struct ice_link_status *link) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + int ret; + + rte_spinlock_lock(&pf->link_lock); + + ret = ice_aq_get_link_info(hw->port_info, ena_lse, link, NULL); + + rte_spinlock_unlock(&pf->link_lock); + + return ret; +} + static void ice_get_init_link_status(struct rte_eth_dev *dev) { - struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; struct ice_link_status link_status; int ret; - ret = ice_aq_get_link_info(hw->port_info, enable_lse, - &link_status, NULL); + ret = ice_get_link_info_safe(pf, enable_lse, &link_status); if (ret != ICE_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to get link info"); pf->init_link_up = false; @@ -3844,7 +3859,11 @@ RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | - RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM; + RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | + RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO; dev_info->flow_type_rss_offloads |= ICE_RSS_OFFLOAD_ALL; } @@ -3960,7 +3979,7 @@ { #define CHECK_INTERVAL 50 /* 50ms */ #define MAX_REPEAT_TIME 40 /* 2s (40 * 50ms) in total */ - struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct ice_link_status link_status; struct rte_eth_link link, old; int status; @@ -3974,8 +3993,7 @@ do { /* Get link status information from hardware */ - status = ice_aq_get_link_info(hw->port_info, enable_lse, - &link_status, NULL); + status = ice_get_link_info_safe(pf, enable_lse, &link_status); if (status != ICE_SUCCESS) { link.link_speed = RTE_ETH_SPEED_NUM_100M; link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; diff -Nru dpdk-22.11.4/drivers/net/ice/ice_ethdev.h dpdk-22.11.5/drivers/net/ice/ice_ethdev.h --- dpdk-22.11.4/drivers/net/ice/ice_ethdev.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/ice/ice_ethdev.h 2024-04-22 11:25:10.000000000 +0000 @@ -550,6 +550,10 @@ uint64_t supported_rxdid; /* bitmap for supported RXDID */ uint64_t rss_hf; struct ice_tm_conf tm_conf; + /* lock prevent race condition between lsc interrupt handler + * and link status update during dev_start. + */ + rte_spinlock_t link_lock; }; #define ICE_MAX_QUEUE_NUM 2048 diff -Nru dpdk-22.11.4/drivers/net/ice/ice_rxtx_vec_avx2.c dpdk-22.11.5/drivers/net/ice/ice_rxtx_vec_avx2.c --- dpdk-22.11.4/drivers/net/ice/ice_rxtx_vec_avx2.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/ice/ice_rxtx_vec_avx2.c 2024-04-22 11:25:10.000000000 +0000 @@ -254,62 +254,30 @@ _mm256_loadu_si256((void *)&sw_ring[i + 4])); #endif - __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; -#ifdef RTE_LIBRTE_ICE_16BYTE_RX_DESC - /* for AVX we need alignment otherwise loads are not atomic */ - if (avx_aligned) { - /* load in descriptors, 2 at a time, in reverse order */ - raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); - rte_compiler_barrier(); - raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); - rte_compiler_barrier(); - raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); - rte_compiler_barrier(); - raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); - } else -#endif - { - const __m128i raw_desc7 = - _mm_load_si128((void *)(rxdp + 7)); - rte_compiler_barrier(); - const __m128i raw_desc6 = - _mm_load_si128((void *)(rxdp + 6)); - rte_compiler_barrier(); - const __m128i raw_desc5 = - _mm_load_si128((void *)(rxdp + 5)); - rte_compiler_barrier(); - const __m128i raw_desc4 = - _mm_load_si128((void *)(rxdp + 4)); - rte_compiler_barrier(); - const __m128i raw_desc3 = - _mm_load_si128((void *)(rxdp + 3)); - rte_compiler_barrier(); - const __m128i raw_desc2 = - _mm_load_si128((void *)(rxdp + 2)); - rte_compiler_barrier(); - const __m128i raw_desc1 = - _mm_load_si128((void *)(rxdp + 1)); - rte_compiler_barrier(); - const __m128i raw_desc0 = - _mm_load_si128((void *)(rxdp + 0)); + const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); + rte_compiler_barrier(); + const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); + rte_compiler_barrier(); + const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); + rte_compiler_barrier(); + const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); + rte_compiler_barrier(); + const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); + rte_compiler_barrier(); + const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); + rte_compiler_barrier(); + const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); + rte_compiler_barrier(); + const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); - raw_desc6_7 = - _mm256_inserti128_si256 - (_mm256_castsi128_si256(raw_desc6), - raw_desc7, 1); - raw_desc4_5 = - _mm256_inserti128_si256 - (_mm256_castsi128_si256(raw_desc4), - raw_desc5, 1); - raw_desc2_3 = - _mm256_inserti128_si256 - (_mm256_castsi128_si256(raw_desc2), - raw_desc3, 1); - raw_desc0_1 = - _mm256_inserti128_si256 - (_mm256_castsi128_si256(raw_desc0), - raw_desc1, 1); - } + const __m256i raw_desc6_7 = + _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc6), raw_desc7, 1); + const __m256i raw_desc4_5 = + _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc4), raw_desc5, 1); + const __m256i raw_desc2_3 = + _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc2), raw_desc3, 1); + const __m256i raw_desc0_1 = + _mm256_inserti128_si256(_mm256_castsi128_si256(raw_desc0), raw_desc1, 1); if (split_packet) { int j; diff -Nru dpdk-22.11.4/drivers/net/ice/ice_rxtx_vec_common.h dpdk-22.11.5/drivers/net/ice/ice_rxtx_vec_common.h --- dpdk-22.11.4/drivers/net/ice/ice_rxtx_vec_common.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/ice/ice_rxtx_vec_common.h 2024-04-22 11:25:10.000000000 +0000 @@ -251,6 +251,10 @@ RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ RTE_ETH_TX_OFFLOAD_TCP_TSO | \ + RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \ + RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \ + RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \ + RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) #define ICE_TX_VECTOR_OFFLOAD ( \ diff -Nru dpdk-22.11.4/drivers/net/ice/ice_tm.c dpdk-22.11.5/drivers/net/ice/ice_tm.c --- dpdk-22.11.4/drivers/net/ice/ice_tm.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/ice/ice_tm.c 2024-04-22 11:25:10.000000000 +0000 @@ -58,8 +58,15 @@ ice_tm_conf_uninit(struct rte_eth_dev *dev) { struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct ice_tm_shaper_profile *shaper_profile; struct ice_tm_node *tm_node; + /* clear profile */ + while ((shaper_profile = TAILQ_FIRST(&pf->tm_conf.shaper_profile_list))) { + TAILQ_REMOVE(&pf->tm_conf.shaper_profile_list, shaper_profile, node); + rte_free(shaper_profile); + } + /* clear node configuration */ while ((tm_node = TAILQ_FIRST(&pf->tm_conf.queue_list))) { TAILQ_REMOVE(&pf->tm_conf.queue_list, tm_node, node); @@ -648,6 +655,8 @@ uint16_t buf_size = ice_struct_size(buf, txqs, 1); buf = (struct ice_aqc_move_txqs_data *)ice_malloc(hw, sizeof(*buf)); + if (buf == NULL) + return -ENOMEM; queue_parent_node = queue_sched_node->parent; buf->src_teid = queue_parent_node->info.node_teid; @@ -659,6 +668,7 @@ NULL, buf, buf_size, &txqs_moved, NULL); if (ret || txqs_moved == 0) { PMD_DRV_LOG(ERR, "move lan queue %u failed", queue_id); + rte_free(buf); return ICE_ERR_PARAM; } @@ -668,12 +678,14 @@ } else { PMD_DRV_LOG(ERR, "invalid children number %d for queue %u", queue_parent_node->num_children, queue_id); + rte_free(buf); return ICE_ERR_PARAM; } dst_node->children[dst_node->num_children++] = queue_sched_node; queue_sched_node->parent = dst_node; ice_sched_query_elem(hw, queue_sched_node->info.node_teid, &queue_sched_node->info); + rte_free(buf); return ret; } diff -Nru dpdk-22.11.4/drivers/net/ice/version.map dpdk-22.11.5/drivers/net/ice/version.map --- dpdk-22.11.4/drivers/net/ice/version.map 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/ice/version.map 2024-04-22 11:25:10.000000000 +0000 @@ -7,5 +7,7 @@ # added in 19.11 rte_pmd_ice_dump_package; + + # added in 22.11 rte_pmd_ice_dump_switch; }; diff -Nru dpdk-22.11.4/drivers/net/ionic/ionic_ethdev.c dpdk-22.11.5/drivers/net/ionic/ionic_ethdev.c --- dpdk-22.11.4/drivers/net/ionic/ionic_ethdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/ionic/ionic_ethdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -561,7 +561,7 @@ struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); struct ionic_adapter *adapter = lif->adapter; struct ionic_identity *ident = &adapter->ident; - int i, num; + int i, j, num; uint16_t tbl_sz = rte_le_to_cpu_16(ident->lif.eth.rss_ind_tbl_sz); IONIC_PRINT_CALL(); @@ -582,9 +582,10 @@ num = reta_size / RTE_ETH_RETA_GROUP_SIZE; for (i = 0; i < num; i++) { - memcpy(reta_conf->reta, - &lif->rss_ind_tbl[i * RTE_ETH_RETA_GROUP_SIZE], - RTE_ETH_RETA_GROUP_SIZE); + for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) { + reta_conf->reta[j] = + lif->rss_ind_tbl[(i * RTE_ETH_RETA_GROUP_SIZE) + j]; + } reta_conf++; } @@ -969,19 +970,21 @@ ionic_lif_stop(lif); - ionic_lif_free_queues(lif); - IONIC_PRINT(NOTICE, "Removing device %s", eth_dev->device->name); if (adapter->intf->unconfigure_intr) (*adapter->intf->unconfigure_intr)(adapter); - rte_eth_dev_destroy(eth_dev, eth_ionic_dev_uninit); - ionic_port_reset(adapter); ionic_reset(adapter); + + ionic_lif_free_queues(lif); + ionic_lif_deinit(lif); + ionic_lif_free(lif); /* Does not free LIF object */ + if (adapter->intf->unmap_bars) (*adapter->intf->unmap_bars)(adapter); + lif->adapter = NULL; rte_free(adapter); return 0; @@ -1058,21 +1061,18 @@ static int eth_ionic_dev_uninit(struct rte_eth_dev *eth_dev) { - struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); - struct ionic_adapter *adapter = lif->adapter; - IONIC_PRINT_CALL(); if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - adapter->lif = NULL; - - ionic_lif_deinit(lif); - ionic_lif_free(lif); + if (eth_dev->state != RTE_ETH_DEV_UNUSED) + ionic_dev_close(eth_dev); - if (!(lif->state & IONIC_LIF_F_FW_RESET)) - ionic_lif_reset(lif); + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + eth_dev->tx_pkt_prepare = NULL; return 0; } @@ -1227,17 +1227,18 @@ { char name[RTE_ETH_NAME_MAX_LEN]; struct rte_eth_dev *eth_dev; + int ret = 0; /* Adapter lookup is using the eth_dev name */ snprintf(name, sizeof(name), "%s_lif", rte_dev->name); eth_dev = rte_eth_dev_allocated(name); if (eth_dev) - ionic_dev_close(eth_dev); + ret = rte_eth_dev_destroy(eth_dev, eth_ionic_dev_uninit); else IONIC_PRINT(DEBUG, "Cannot find device %s", rte_dev->name); - return 0; + return ret; } RTE_LOG_REGISTER_DEFAULT(ionic_logtype, NOTICE); diff -Nru dpdk-22.11.4/drivers/net/ionic/ionic_rxtx.c dpdk-22.11.5/drivers/net/ionic/ionic_rxtx.c --- dpdk-22.11.4/drivers/net/ionic/ionic_rxtx.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/ionic/ionic_rxtx.c 2024-04-22 11:25:10.000000000 +0000 @@ -752,7 +752,7 @@ { struct ionic_rx_qcq *rxq = rx_queue; struct ionic_qcq *qcq = &rxq->qcq; - struct ionic_rxq_comp *cq_desc; + volatile struct ionic_rxq_comp *cq_desc; uint16_t mask, head, tail, pos; bool done_color; @@ -791,7 +791,7 @@ { struct ionic_tx_qcq *txq = tx_queue; struct ionic_qcq *qcq = &txq->qcq; - struct ionic_txq_comp *cq_desc; + volatile struct ionic_txq_comp *cq_desc; uint16_t mask, head, tail, pos, cq_pos; bool done_color; diff -Nru dpdk-22.11.4/drivers/net/ionic/ionic_rxtx_sg.c dpdk-22.11.5/drivers/net/ionic/ionic_rxtx_sg.c --- dpdk-22.11.4/drivers/net/ionic/ionic_rxtx_sg.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/ionic/ionic_rxtx_sg.c 2024-04-22 11:25:10.000000000 +0000 @@ -27,7 +27,8 @@ struct ionic_cq *cq = &txq->qcq.cq; struct ionic_queue *q = &txq->qcq.q; struct rte_mbuf *txm; - struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base; + struct ionic_txq_comp *cq_desc_base = cq->base; + volatile struct ionic_txq_comp *cq_desc; void **info; uint32_t i; @@ -252,7 +253,7 @@ */ static __rte_always_inline void ionic_rx_clean_one_sg(struct ionic_rx_qcq *rxq, - struct ionic_rxq_comp *cq_desc, + volatile struct ionic_rxq_comp *cq_desc, struct ionic_rx_service *rx_svc) { struct ionic_queue *q = &rxq->qcq.q; @@ -438,7 +439,8 @@ struct ionic_cq *cq = &rxq->qcq.cq; struct ionic_queue *q = &rxq->qcq.q; struct ionic_rxq_desc *q_desc_base = q->base; - struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; + struct ionic_rxq_comp *cq_desc_base = cq->base; + volatile struct ionic_rxq_comp *cq_desc; uint32_t work_done = 0; uint64_t then, now, hz, delta; diff -Nru dpdk-22.11.4/drivers/net/ionic/ionic_rxtx_simple.c dpdk-22.11.5/drivers/net/ionic/ionic_rxtx_simple.c --- dpdk-22.11.4/drivers/net/ionic/ionic_rxtx_simple.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/ionic/ionic_rxtx_simple.c 2024-04-22 11:25:10.000000000 +0000 @@ -27,7 +27,8 @@ struct ionic_cq *cq = &txq->qcq.cq; struct ionic_queue *q = &txq->qcq.q; struct rte_mbuf *txm; - struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base; + struct ionic_txq_comp *cq_desc_base = cq->base; + volatile struct ionic_txq_comp *cq_desc; void **info; cq_desc = &cq_desc_base[cq->tail_idx]; @@ -225,7 +226,7 @@ */ static __rte_always_inline void ionic_rx_clean_one(struct ionic_rx_qcq *rxq, - struct ionic_rxq_comp *cq_desc, + volatile struct ionic_rxq_comp *cq_desc, struct ionic_rx_service *rx_svc) { struct ionic_queue *q = &rxq->qcq.q; @@ -359,7 +360,8 @@ struct ionic_cq *cq = &rxq->qcq.cq; struct ionic_queue *q = &rxq->qcq.q; struct ionic_rxq_desc *q_desc_base = q->base; - struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; + struct ionic_rxq_comp *cq_desc_base = cq->base; + volatile struct ionic_rxq_comp *cq_desc; uint32_t work_done = 0; uint64_t then, now, hz, delta; diff -Nru dpdk-22.11.4/drivers/net/ixgbe/base/ixgbe_type.h dpdk-22.11.5/drivers/net/ixgbe/base/ixgbe_type.h --- dpdk-22.11.4/drivers/net/ixgbe/base/ixgbe_type.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/ixgbe/base/ixgbe_type.h 2024-04-22 11:25:10.000000000 +0000 @@ -1799,7 +1799,7 @@ /* VFRE bitmask */ #define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF -#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ +#define IXGBE_VF_INIT_TIMEOUT 10000 /* Number of retries to clear RSTI */ /* RDHMPN and TDHMPN bitmasks */ #define IXGBE_RDHMPN_RDICADDR 0x007FF800 diff -Nru dpdk-22.11.4/drivers/net/ixgbe/ixgbe_ethdev.c dpdk-22.11.5/drivers/net/ixgbe/ixgbe_ethdev.c --- dpdk-22.11.4/drivers/net/ixgbe/ixgbe_ethdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/ixgbe/ixgbe_ethdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -1187,7 +1187,8 @@ diag = ixgbe_validate_eeprom_checksum(hw, &csum); if (diag != IXGBE_SUCCESS) { PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); - return -EIO; + ret = -EIO; + goto err_exit; } #ifdef RTE_LIBRTE_IXGBE_BYPASS @@ -1225,7 +1226,8 @@ PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); if (diag) { PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); - return -EIO; + ret = -EIO; + goto err_exit; } /* Reset the hw statistics */ @@ -1245,7 +1247,8 @@ "Failed to allocate %u bytes needed to store " "MAC addresses", RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); - return -ENOMEM; + ret = -ENOMEM; + goto err_exit; } /* Copy the permanent MAC address */ rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, @@ -1260,7 +1263,8 @@ RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; - return -ENOMEM; + ret = -ENOMEM; + goto err_exit; } /* initialize the vfta */ @@ -1344,6 +1348,11 @@ eth_dev->data->mac_addrs = NULL; rte_free(eth_dev->data->hash_mac_addrs); eth_dev->data->hash_mac_addrs = NULL; +err_exit: +#ifdef RTE_LIB_SECURITY + rte_free(eth_dev->security_ctx); + eth_dev->security_ctx = NULL; +#endif return ret; } diff -Nru dpdk-22.11.4/drivers/net/ixgbe/ixgbe_rxtx.c dpdk-22.11.5/drivers/net/ixgbe/ixgbe_rxtx.c --- dpdk-22.11.4/drivers/net/ixgbe/ixgbe_rxtx.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/ixgbe/ixgbe_rxtx.c 2024-04-22 11:25:10.000000000 +0000 @@ -5740,6 +5740,25 @@ IXGBE_PSRTYPE_RQPL_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); + /* Initialize the rss for x550_vf cards if enabled */ + switch (hw->mac.type) { + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + switch (dev->data->dev_conf.rxmode.mq_mode) { + case RTE_ETH_MQ_RX_RSS: + case RTE_ETH_MQ_RX_DCB_RSS: + case RTE_ETH_MQ_RX_VMDQ_RSS: + ixgbe_rss_configure(dev); + break; + default: + break; + } + break; + default: + break; + } + ixgbe_set_rx_function(dev); return 0; diff -Nru dpdk-22.11.4/drivers/net/mana/mana.c dpdk-22.11.5/drivers/net/mana/mana.c --- dpdk-22.11.4/drivers/net/mana/mana.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mana/mana.c 2024-04-22 11:25:10.000000000 +0000 @@ -292,8 +292,8 @@ dev_info->min_rx_bufsize = MIN_RX_BUF_SIZE; dev_info->max_rx_pktlen = MAX_FRAME_SIZE; - dev_info->max_rx_queues = priv->max_rx_queues; - dev_info->max_tx_queues = priv->max_tx_queues; + dev_info->max_rx_queues = RTE_MIN(priv->max_rx_queues, UINT16_MAX); + dev_info->max_tx_queues = RTE_MIN(priv->max_tx_queues, UINT16_MAX); dev_info->max_mac_addrs = MANA_MAX_MAC_ADDR; dev_info->max_hash_mac_addrs = 0; @@ -334,16 +334,20 @@ /* Buffer limits */ dev_info->rx_desc_lim.nb_min = MIN_BUFFERS_PER_QUEUE; - dev_info->rx_desc_lim.nb_max = priv->max_rx_desc; + dev_info->rx_desc_lim.nb_max = RTE_MIN(priv->max_rx_desc, UINT16_MAX); dev_info->rx_desc_lim.nb_align = MIN_BUFFERS_PER_QUEUE; - dev_info->rx_desc_lim.nb_seg_max = priv->max_recv_sge; - dev_info->rx_desc_lim.nb_mtu_seg_max = priv->max_recv_sge; + dev_info->rx_desc_lim.nb_seg_max = + RTE_MIN(priv->max_recv_sge, UINT16_MAX); + dev_info->rx_desc_lim.nb_mtu_seg_max = + RTE_MIN(priv->max_recv_sge, UINT16_MAX); dev_info->tx_desc_lim.nb_min = MIN_BUFFERS_PER_QUEUE; - dev_info->tx_desc_lim.nb_max = priv->max_tx_desc; + dev_info->tx_desc_lim.nb_max = RTE_MIN(priv->max_tx_desc, UINT16_MAX); dev_info->tx_desc_lim.nb_align = MIN_BUFFERS_PER_QUEUE; - dev_info->tx_desc_lim.nb_seg_max = priv->max_send_sge; - dev_info->rx_desc_lim.nb_mtu_seg_max = priv->max_recv_sge; + dev_info->tx_desc_lim.nb_seg_max = + RTE_MIN(priv->max_send_sge, UINT16_MAX); + dev_info->tx_desc_lim.nb_mtu_seg_max = + RTE_MIN(priv->max_send_sge, UINT16_MAX); /* Speed */ dev_info->speed_capa = RTE_ETH_LINK_SPEED_100G; @@ -1290,9 +1294,9 @@ priv->max_mr = dev_attr->orig_attr.max_mr; priv->max_mr_size = dev_attr->orig_attr.max_mr_size; - DRV_LOG(INFO, "dev %s max queues %d desc %d sge %d", + DRV_LOG(INFO, "dev %s max queues %d desc %d sge %d mr %" PRIu64, name, priv->max_rx_queues, priv->max_rx_desc, - priv->max_send_sge); + priv->max_send_sge, priv->max_mr_size); rte_eth_copy_pci_info(eth_dev, pci_dev); diff -Nru dpdk-22.11.4/drivers/net/mana/mana.h dpdk-22.11.5/drivers/net/mana/mana.h --- dpdk-22.11.4/drivers/net/mana/mana.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mana/mana.h 2024-04-22 11:25:10.000000000 +0000 @@ -517,9 +517,9 @@ void mana_mempool_chunk_cb(struct rte_mempool *mp, void *opaque, struct rte_mempool_memhdr *memhdr, unsigned int idx); -struct mana_mr_cache *mana_mr_btree_lookup(struct mana_mr_btree *bt, - uint16_t *idx, - uintptr_t addr, size_t len); +int mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, + uintptr_t addr, size_t len, + struct mana_mr_cache **cache); int mana_mr_btree_insert(struct mana_mr_btree *bt, struct mana_mr_cache *entry); int mana_mr_btree_init(struct mana_mr_btree *bt, int n, int socket); void mana_mr_btree_free(struct mana_mr_btree *bt); diff -Nru dpdk-22.11.4/drivers/net/mana/mr.c dpdk-22.11.5/drivers/net/mana/mr.c --- dpdk-22.11.4/drivers/net/mana/mr.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mana/mr.c 2024-04-22 11:25:10.000000000 +0000 @@ -40,7 +40,7 @@ struct ibv_mr *ibv_mr; struct mana_range ranges[pool->nb_mem_chunks]; uint32_t i; - struct mana_mr_cache *mr; + struct mana_mr_cache mr; int ret; rte_mempool_mem_iter(pool, mana_mempool_chunk_cb, ranges); @@ -75,14 +75,13 @@ DP_LOG(DEBUG, "MR lkey %u addr %p len %zu", ibv_mr->lkey, ibv_mr->addr, ibv_mr->length); - mr = rte_calloc("MANA MR", 1, sizeof(*mr), 0); - mr->lkey = ibv_mr->lkey; - mr->addr = (uintptr_t)ibv_mr->addr; - mr->len = ibv_mr->length; - mr->verb_obj = ibv_mr; + mr.lkey = ibv_mr->lkey; + mr.addr = (uintptr_t)ibv_mr->addr; + mr.len = ibv_mr->length; + mr.verb_obj = ibv_mr; rte_spinlock_lock(&priv->mr_btree_lock); - ret = mana_mr_btree_insert(&priv->mr_btree, mr); + ret = mana_mr_btree_insert(&priv->mr_btree, &mr); rte_spinlock_unlock(&priv->mr_btree_lock); if (ret) { ibv_dereg_mr(ibv_mr); @@ -90,7 +89,7 @@ return ret; } - ret = mana_mr_btree_insert(local_tree, mr); + ret = mana_mr_btree_insert(local_tree, &mr); if (ret) { /* Don't need to clean up MR as it's already * in the global tree @@ -138,8 +137,12 @@ try_again: /* First try to find the MR in local queue tree */ - mr = mana_mr_btree_lookup(local_mr_btree, &idx, - (uintptr_t)mbuf->buf_addr, mbuf->buf_len); + ret = mana_mr_btree_lookup(local_mr_btree, &idx, + (uintptr_t)mbuf->buf_addr, mbuf->buf_len, + &mr); + if (ret) + return NULL; + if (mr) { DP_LOG(DEBUG, "Local mr lkey %u addr 0x%" PRIxPTR " len %zu", mr->lkey, mr->addr, mr->len); @@ -148,11 +151,14 @@ /* If not found, try to find the MR in global tree */ rte_spinlock_lock(&priv->mr_btree_lock); - mr = mana_mr_btree_lookup(&priv->mr_btree, &idx, - (uintptr_t)mbuf->buf_addr, - mbuf->buf_len); + ret = mana_mr_btree_lookup(&priv->mr_btree, &idx, + (uintptr_t)mbuf->buf_addr, + mbuf->buf_len, &mr); rte_spinlock_unlock(&priv->mr_btree_lock); + if (ret) + return NULL; + /* If found in the global tree, add it to the local tree */ if (mr) { ret = mana_mr_btree_insert(local_mr_btree, mr); @@ -228,22 +234,23 @@ /* * Look for a region of memory in MR cache. */ -struct mana_mr_cache * -mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, - uintptr_t addr, size_t len) +int mana_mr_btree_lookup(struct mana_mr_btree *bt, uint16_t *idx, + uintptr_t addr, size_t len, + struct mana_mr_cache **cache) { struct mana_mr_cache *table; uint16_t n; uint16_t base = 0; int ret; - n = bt->len; + *cache = NULL; + n = bt->len; /* Try to double the cache if it's full */ if (n == bt->size) { ret = mana_mr_btree_expand(bt, bt->size << 1); if (ret) - return NULL; + return ret; } table = bt->table; @@ -262,14 +269,16 @@ *idx = base; - if (addr + len <= table[base].addr + table[base].len) - return &table[base]; + if (addr + len <= table[base].addr + table[base].len) { + *cache = &table[base]; + return 0; + } DP_LOG(DEBUG, "addr 0x%" PRIxPTR " len %zu idx %u sum 0x%" PRIxPTR " not found", addr, len, *idx, addr + len); - return NULL; + return 0; } int @@ -314,14 +323,21 @@ struct mana_mr_cache *table; uint16_t idx = 0; uint16_t shift; + int ret; + + ret = mana_mr_btree_lookup(bt, &idx, entry->addr, entry->len, &table); + if (ret) + return ret; - if (mana_mr_btree_lookup(bt, &idx, entry->addr, entry->len)) { + if (table) { DP_LOG(DEBUG, "Addr 0x%" PRIxPTR " len %zu exists in btree", entry->addr, entry->len); return 0; } if (bt->len >= bt->size) { + DP_LOG(ERR, "Btree overflow detected len %u size %u", + bt->len, bt->size); bt->overflow = 1; return -1; } diff -Nru dpdk-22.11.4/drivers/net/memif/rte_eth_memif.c dpdk-22.11.5/drivers/net/memif/rte_eth_memif.c --- dpdk-22.11.4/drivers/net/memif/rte_eth_memif.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/memif/rte_eth_memif.c 2024-04-22 11:25:10.000000000 +0000 @@ -261,8 +261,6 @@ cur_tail = __atomic_load_n(&ring->tail, __ATOMIC_ACQUIRE); while (mq->last_tail != cur_tail) { RTE_MBUF_PREFETCH_TO_FREE(mq->buffers[(mq->last_tail + 1) & mask]); - /* Decrement refcnt and free mbuf. (current segment) */ - rte_mbuf_refcnt_update(mq->buffers[mq->last_tail & mask], -1); rte_pktmbuf_free_seg(mq->buffers[mq->last_tail & mask]); mq->last_tail++; } @@ -707,10 +705,6 @@ next_in_chain: /* store pointer to mbuf to free it later */ mq->buffers[slot & mask] = mbuf; - /* Increment refcnt to make sure the buffer is not freed before server - * receives it. (current segment) - */ - rte_mbuf_refcnt_update(mbuf, 1); /* populate descriptor */ d0 = &ring->desc[slot & mask]; d0->length = rte_pktmbuf_data_len(mbuf); diff -Nru dpdk-22.11.4/drivers/net/mlx5/hws/mlx5dr_definer.c dpdk-22.11.5/drivers/net/mlx5/hws/mlx5dr_definer.c --- dpdk-22.11.4/drivers/net/mlx5/hws/mlx5dr_definer.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/hws/mlx5dr_definer.c 2024-04-22 11:25:10.000000000 +0000 @@ -8,7 +8,7 @@ #define BAD_PORT 0xBAD #define ETH_TYPE_IPV4_VXLAN 0x0800 #define ETH_TYPE_IPV6_VXLAN 0x86DD -#define ETH_VXLAN_DEFAULT_PORT 4789 +#define UDP_VXLAN_PORT 4789 #define STE_NO_VLAN 0x0 #define STE_SVLAN 0x1 @@ -31,6 +31,10 @@ (bit_off))); \ } while (0) +/* Getter function based on bit offset and mask, for 32bit DW*/ +#define DR_GET_32(p, byte_off, bit_off, mask) \ + ((rte_be_to_cpu_32(*((const rte_be32_t *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask)) + /* Setter function based on bit offset and mask */ #define DR_SET(p, v, byte_off, bit_off, mask) \ do { \ @@ -153,7 +157,7 @@ X(SET, gtp_ext_hdr_pdu, v->hdr.type, rte_flow_item_gtp_psc) \ X(SET, gtp_ext_hdr_qfi, v->hdr.qfi, rte_flow_item_gtp_psc) \ X(SET, vxlan_flags, v->flags, rte_flow_item_vxlan) \ - X(SET, vxlan_udp_port, ETH_VXLAN_DEFAULT_PORT, rte_flow_item_vxlan) \ + X(SET, vxlan_udp_port, UDP_VXLAN_PORT, rte_flow_item_vxlan) \ X(SET, source_qp, v->queue, mlx5_rte_flow_item_sq) \ X(SET, tag, v->data, rte_flow_item_tag) \ X(SET, metadata, v->data, rte_flow_item_meta) \ @@ -163,7 +167,9 @@ X(SET_BE32, gre_opt_key, v->key.key, rte_flow_item_gre_opt) \ X(SET_BE32, gre_opt_seq, v->sequence.sequence, rte_flow_item_gre_opt) \ X(SET_BE16, gre_opt_checksum, v->checksum_rsvd.checksum, rte_flow_item_gre_opt) \ - X(SET, meter_color, rte_col_2_mlx5_col(v->color), rte_flow_item_meter_color) + X(SET, meter_color, rte_col_2_mlx5_col(v->color), rte_flow_item_meter_color) \ + X(SET, cvlan, STE_CVLAN, rte_flow_item_vlan) \ + X(SET_BE16, inner_type, v->inner_type, rte_flow_item_vlan) /* Item set function format */ #define X(set_type, func_name, value, item_type) \ @@ -269,7 +275,7 @@ { bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_INTEGRITY_I); const struct rte_flow_item_integrity *v = item_spec; - uint32_t ok1_bits = 0; + uint32_t ok1_bits = DR_GET_32(tag, fc->byte_off, fc->bit_off, fc->bit_mask); if (v->l3_ok) ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L3_OK) | @@ -477,6 +483,15 @@ struct mlx5dr_definer_fc *fc; bool inner = cd->tunnel; + if (!cd->relaxed) { + /* Mark packet as tagged (CVLAN) */ + fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)]; + fc->item_idx = item_idx; + fc->tag_mask_set = &mlx5dr_definer_ones_set; + fc->tag_set = &mlx5dr_definer_cvlan_set; + DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, inner); + } + if (!m) return 0; @@ -485,8 +500,7 @@ return rte_errno; } - if (!cd->relaxed || m->has_more_vlan) { - /* Mark packet as tagged (CVLAN or SVLAN) even if TCI is not specified.*/ + if (m->has_more_vlan) { fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)]; fc->item_idx = item_idx; fc->tag_mask_set = &mlx5dr_definer_ones_set; @@ -504,7 +518,7 @@ if (m->inner_type) { fc = &cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)]; fc->item_idx = item_idx; - fc->tag_set = &mlx5dr_definer_eth_type_set; + fc->tag_set = &mlx5dr_definer_inner_type_set; DR_CALC_SET(fc, eth_l2, l3_ethertype, inner); } @@ -824,6 +838,12 @@ const struct rte_flow_item_gtp *m = item->mask; struct mlx5dr_definer_fc *fc; + if (cd->tunnel) { + DR_LOG(ERR, "Inner GTPU item not supported"); + rte_errno = ENOTSUP; + return rte_errno; + } + /* Overwrite GTPU dest port if not present */ fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, false)]; if (!fc->tag_set && !cd->relaxed) { @@ -996,9 +1016,20 @@ struct mlx5dr_definer_fc *fc; bool inner = cd->tunnel; - /* In order to match on VXLAN we must match on ether_type, ip_protocol - * and l4_dport. - */ + if (m && (m->rsvd0[0] != 0 || m->rsvd0[1] != 0 || m->rsvd0[2] != 0 || + m->rsvd1 != 0)) { + DR_LOG(ERR, "reserved fields are not supported"); + rte_errno = ENOTSUP; + return rte_errno; + } + + if (inner) { + DR_LOG(ERR, "Inner VXLAN item not supported"); + rte_errno = ENOTSUP; + return rte_errno; + } + + /* In order to match on VXLAN we must match on ip_protocol and l4_dport */ if (!cd->relaxed) { fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)]; if (!fc->tag_set) { @@ -1021,12 +1052,6 @@ return 0; if (m->flags) { - if (inner) { - DR_LOG(ERR, "Inner VXLAN flags item not supported"); - rte_errno = ENOTSUP; - return rte_errno; - } - fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_FLAGS]; fc->item_idx = item_idx; fc->tag_set = &mlx5dr_definer_vxlan_flags_set; @@ -1036,12 +1061,6 @@ } if (!is_mem_zero(m->vni, 3)) { - if (inner) { - DR_LOG(ERR, "Inner VXLAN vni item not supported"); - rte_errno = ENOTSUP; - return rte_errno; - } - fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_VNI]; fc->item_idx = item_idx; fc->tag_set = &mlx5dr_definer_vxlan_vni_set; diff -Nru dpdk-22.11.4/drivers/net/mlx5/hws/mlx5dr_matcher.c dpdk-22.11.5/drivers/net/mlx5/hws/mlx5dr_matcher.c --- dpdk-22.11.4/drivers/net/mlx5/hws/mlx5dr_matcher.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/hws/mlx5dr_matcher.c 2024-04-22 11:25:10.000000000 +0000 @@ -711,6 +711,7 @@ struct mlx5dv_flow_match_parameters *mask; struct mlx5_flow_attr flow_attr = {0}; struct rte_flow_error rte_error; + struct rte_flow_item *item; uint8_t match_criteria; int ret; @@ -739,6 +740,22 @@ return rte_errno; } + /* We need the port id in case of matching representor */ + item = matcher->mt[0]->items; + while (item->type != RTE_FLOW_ITEM_TYPE_END) { + if (item->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR || + item->type == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT) { + ret = flow_hw_get_port_id_from_ctx(ctx, &flow_attr.port_id); + if (ret) { + DR_LOG(ERR, "Failed to get port id for dev %s", + ctx->ibv_ctx->device->name); + rte_errno = EINVAL; + return rte_errno; + } + } + ++item; + } + mask = simple_calloc(1, MLX5_ST_SZ_BYTES(fte_match_param) + offsetof(struct mlx5dv_flow_match_parameters, match_buf)); if (!mask) { diff -Nru dpdk-22.11.4/drivers/net/mlx5/hws/mlx5dr_rule.c dpdk-22.11.5/drivers/net/mlx5/hws/mlx5dr_rule.c --- dpdk-22.11.4/drivers/net/mlx5/hws/mlx5dr_rule.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/hws/mlx5dr_rule.c 2024-04-22 11:25:10.000000000 +0000 @@ -382,10 +382,28 @@ struct mlx5dv_flow_match_parameters *value; struct mlx5_flow_attr flow_attr = {0}; struct mlx5dv_flow_action_attr *attr; + const struct rte_flow_item *cur_item; struct rte_flow_error error; uint8_t match_criteria; int ret; + /* We need the port id in case of matching representor */ + cur_item = items; + while (cur_item->type != RTE_FLOW_ITEM_TYPE_END) { + if (cur_item->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR || + cur_item->type == RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT) { + ret = flow_hw_get_port_id_from_ctx(rule->matcher->tbl->ctx, + &flow_attr.port_id); + if (ret) { + DR_LOG(ERR, "Failed to get port id for dev %s", + rule->matcher->tbl->ctx->ibv_ctx->device->name); + rte_errno = EINVAL; + return rte_errno; + } + } + ++cur_item; + } + attr = simple_calloc(num_actions, sizeof(*attr)); if (!attr) { rte_errno = ENOMEM; diff -Nru dpdk-22.11.4/drivers/net/mlx5/linux/mlx5_ethdev_os.c dpdk-22.11.5/drivers/net/mlx5/linux/mlx5_ethdev_os.c --- dpdk-22.11.4/drivers/net/mlx5/linux/mlx5_ethdev_os.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/linux/mlx5_ethdev_os.c 2024-04-22 11:25:10.000000000 +0000 @@ -671,7 +671,7 @@ ifr.ifr_data = (void *)ðpause; ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); if (ret) { - DRV_LOG(WARNING, + DRV_LOG(DEBUG, "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:" " %s", dev->data->port_id, strerror(rte_errno)); @@ -1286,13 +1286,16 @@ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; unsigned int i; struct ifreq ifr; - unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t); + unsigned int max_stats_n = RTE_MAX(xstats_ctrl->stats_n, xstats_ctrl->stats_n_2nd); + unsigned int stats_sz = max_stats_n * sizeof(uint64_t); unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz]; struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf; int ret; + uint16_t i_idx, o_idx; et_stats->cmd = ETHTOOL_GSTATS; - et_stats->n_stats = xstats_ctrl->stats_n; + /* Pass the maximum value, the driver may ignore this. */ + et_stats->n_stats = max_stats_n; ifr.ifr_data = (caddr_t)et_stats; if (pf >= 0) ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[pf].ifname, @@ -1305,21 +1308,34 @@ dev->data->port_id); return ret; } - for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) { - if (xstats_ctrl->info[i].dev) - continue; - stats[i] += (uint64_t) - et_stats->data[xstats_ctrl->dev_table_idx[i]]; + if (pf <= 0) { + for (i = 0; i != xstats_ctrl->mlx5_stats_n; i++) { + i_idx = xstats_ctrl->dev_table_idx[i]; + if (i_idx == UINT16_MAX || xstats_ctrl->info[i].dev) + continue; + o_idx = xstats_ctrl->xstats_o_idx[i]; + stats[o_idx] += (uint64_t)et_stats->data[i_idx]; + } + } else { + for (i = 0; i != xstats_ctrl->mlx5_stats_n; i++) { + i_idx = xstats_ctrl->dev_table_idx_2nd[i]; + if (i_idx == UINT16_MAX) + continue; + o_idx = xstats_ctrl->xstats_o_idx_2nd[i]; + stats[o_idx] += (uint64_t)et_stats->data[i_idx]; + } } return 0; } -/** +/* * Read device counters. * * @param dev * Pointer to Ethernet device. - * @param[out] stats + * @param bond_master + * Indicate if the device is a bond master. + * @param stats * Counters table output buffer. * * @return @@ -1327,7 +1343,7 @@ * rte_errno is set. */ int -mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) +mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; @@ -1335,7 +1351,7 @@ memset(stats, 0, sizeof(*stats) * xstats_ctrl->mlx5_stats_n); /* Read ifreq counters. */ - if (priv->master && priv->pf_bond >= 0) { + if (bond_master) { /* Sum xstats from bonding device member ports. */ for (i = 0; i < priv->sh->bond.n_port; i++) { ret = _mlx5_os_read_dev_counters(dev, i, stats); @@ -1347,13 +1363,17 @@ if (ret) return ret; } - /* Read IB counters. */ - for (i = 0; i != xstats_ctrl->mlx5_stats_n; ++i) { + /* + * Read IB counters. + * The counters are unique per IB device but not per net IF. + * In bonding mode, getting the stats name only from 1 port is enough. + */ + for (i = 0; i != xstats_ctrl->mlx5_stats_n; i++) { if (!xstats_ctrl->info[i].dev) continue; /* return last xstats counter if fail to read. */ if (mlx5_os_read_dev_stat(priv, xstats_ctrl->info[i].ctr_name, - &stats[i]) == 0) + &stats[i]) == 0) xstats_ctrl->xstats[i] = stats[i]; else stats[i] = xstats_ctrl->xstats[i]; @@ -1361,18 +1381,24 @@ return ret; } -/** +/* * Query the number of statistics provided by ETHTOOL. * * @param dev * Pointer to Ethernet device. + * @param bond_master + * Indicate if the device is a bond master. + * @param n_stats + * Pointer to number of stats to store. + * @param n_stats_sec + * Pointer to number of stats to store for the 2nd port of the bond. * * @return - * Number of statistics on success, negative errno value otherwise and - * rte_errno is set. + * 0 on success, negative errno value otherwise and rte_errno is set. */ int -mlx5_os_get_stats_n(struct rte_eth_dev *dev) +mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master, + uint16_t *n_stats, uint16_t *n_stats_sec) { struct mlx5_priv *priv = dev->data->dev_private; struct ethtool_drvinfo drvinfo; @@ -1381,18 +1407,34 @@ drvinfo.cmd = ETHTOOL_GDRVINFO; ifr.ifr_data = (caddr_t)&drvinfo; - if (priv->master && priv->pf_bond >= 0) - /* Bonding PF. */ + /* Bonding PFs. */ + if (bond_master) { ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, SIOCETHTOOL, &ifr); - else + if (ret) { + DRV_LOG(WARNING, "bonding port %u unable to query number of" + " statistics for the 1st slave, %d", PORT_ID(priv), ret); + return ret; + } + *n_stats = drvinfo.n_stats; + ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname, + SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(WARNING, "bonding port %u unable to query number of" + " statistics for the 2nd slave, %d", PORT_ID(priv), ret); + return ret; + } + *n_stats_sec = drvinfo.n_stats; + } else { ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); - if (ret) { - DRV_LOG(WARNING, "port %u unable to query number of statistics", - dev->data->port_id); - return ret; + if (ret) { + DRV_LOG(WARNING, "port %u unable to query number of statistics", + PORT_ID(priv)); + return ret; + } + *n_stats = drvinfo.n_stats; } - return drvinfo.n_stats; + return 0; } static const struct mlx5_counter_ctrl mlx5_counters_init[] = { @@ -1578,6 +1620,101 @@ static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init); +static int +mlx5_os_get_stats_strings(struct rte_eth_dev *dev, bool bond_master, + struct ethtool_gstrings *strings, + uint32_t stats_n, uint32_t stats_n_2nd) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + struct ifreq ifr; + int ret; + uint32_t i, j, idx; + + /* Ensure no out of bounds access before. */ + MLX5_ASSERT(xstats_n <= MLX5_MAX_XSTATS); + strings->cmd = ETHTOOL_GSTRINGS; + strings->string_set = ETH_SS_STATS; + strings->len = stats_n; + ifr.ifr_data = (caddr_t)strings; + if (bond_master) + ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, + SIOCETHTOOL, &ifr); + else + ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(WARNING, "port %u unable to get statistic names with %d", + PORT_ID(priv), ret); + return ret; + } + /* Reorganize the orders to reduce the iterations. */ + for (j = 0; j < xstats_n; j++) { + xstats_ctrl->dev_table_idx[j] = UINT16_MAX; + for (i = 0; i < stats_n; i++) { + const char *curr_string = + (const char *)&strings->data[i * ETH_GSTRING_LEN]; + + if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) { + idx = xstats_ctrl->mlx5_stats_n++; + xstats_ctrl->dev_table_idx[j] = i; + xstats_ctrl->xstats_o_idx[j] = idx; + xstats_ctrl->info[idx] = mlx5_counters_init[j]; + } + } + } + if (!bond_master) { + /* Add dev counters, unique per IB device. */ + for (j = 0; j != xstats_n; j++) { + if (mlx5_counters_init[j].dev) { + idx = xstats_ctrl->mlx5_stats_n++; + xstats_ctrl->info[idx] = mlx5_counters_init[j]; + xstats_ctrl->hw_stats[idx] = 0; + } + } + return 0; + } + + strings->len = stats_n_2nd; + ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[1].ifname, + SIOCETHTOOL, &ifr); + if (ret) { + DRV_LOG(WARNING, "port %u unable to get statistic names for 2nd slave with %d", + PORT_ID(priv), ret); + return ret; + } + /* The 2nd slave port may have a different strings set, based on the configuration. */ + for (j = 0; j != xstats_n; j++) { + xstats_ctrl->dev_table_idx_2nd[j] = UINT16_MAX; + for (i = 0; i != stats_n_2nd; i++) { + const char *curr_string = + (const char *)&strings->data[i * ETH_GSTRING_LEN]; + + if (!strcmp(mlx5_counters_init[j].ctr_name, curr_string)) { + xstats_ctrl->dev_table_idx_2nd[j] = i; + if (xstats_ctrl->dev_table_idx[j] != UINT16_MAX) { + /* Already mapped in the 1st slave port. */ + idx = xstats_ctrl->xstats_o_idx[j]; + xstats_ctrl->xstats_o_idx_2nd[j] = idx; + } else { + /* Append the new items to the end of the map. */ + idx = xstats_ctrl->mlx5_stats_n++; + xstats_ctrl->xstats_o_idx_2nd[j] = idx; + xstats_ctrl->info[idx] = mlx5_counters_init[j]; + } + } + } + } + /* Dev counters are always at the last now. */ + for (j = 0; j != xstats_n; j++) { + if (mlx5_counters_init[j].dev) { + idx = xstats_ctrl->mlx5_stats_n++; + xstats_ctrl->info[idx] = mlx5_counters_init[j]; + xstats_ctrl->hw_stats[idx] = 0; + } + } + return 0; +} + /** * Init the structures to read device counters. * @@ -1590,76 +1727,44 @@ struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; struct mlx5_stats_ctrl *stats_ctrl = &priv->stats_ctrl; - unsigned int i; - unsigned int j; - struct ifreq ifr; struct ethtool_gstrings *strings = NULL; - unsigned int dev_stats_n; + uint16_t dev_stats_n = 0; + uint16_t dev_stats_n_2nd = 0; + unsigned int max_stats_n; unsigned int str_sz; int ret; + bool bond_master = (priv->master && priv->pf_bond >= 0); /* So that it won't aggregate for each init. */ xstats_ctrl->mlx5_stats_n = 0; - ret = mlx5_os_get_stats_n(dev); + ret = mlx5_os_get_stats_n(dev, bond_master, &dev_stats_n, &dev_stats_n_2nd); if (ret < 0) { DRV_LOG(WARNING, "port %u no extended statistics available", dev->data->port_id); return; } - dev_stats_n = ret; + max_stats_n = RTE_MAX(dev_stats_n, dev_stats_n_2nd); /* Allocate memory to grab stat names and values. */ - str_sz = dev_stats_n * ETH_GSTRING_LEN; + str_sz = max_stats_n * ETH_GSTRING_LEN; strings = (struct ethtool_gstrings *) mlx5_malloc(0, str_sz + sizeof(struct ethtool_gstrings), 0, SOCKET_ID_ANY); if (!strings) { DRV_LOG(WARNING, "port %u unable to allocate memory for xstats", - dev->data->port_id); + dev->data->port_id); return; } - strings->cmd = ETHTOOL_GSTRINGS; - strings->string_set = ETH_SS_STATS; - strings->len = dev_stats_n; - ifr.ifr_data = (caddr_t)strings; - if (priv->master && priv->pf_bond >= 0) - /* Bonding master. */ - ret = mlx5_ifreq_by_ifname(priv->sh->bond.ports[0].ifname, - SIOCETHTOOL, &ifr); - else - ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr); - if (ret) { - DRV_LOG(WARNING, "port %u unable to get statistic names", + ret = mlx5_os_get_stats_strings(dev, bond_master, strings, + dev_stats_n, dev_stats_n_2nd); + if (ret < 0) { + DRV_LOG(WARNING, "port %u failed to get the stats strings", dev->data->port_id); goto free; } - for (i = 0; i != dev_stats_n; ++i) { - const char *curr_string = (const char *) - &strings->data[i * ETH_GSTRING_LEN]; - - for (j = 0; j != xstats_n; ++j) { - if (!strcmp(mlx5_counters_init[j].ctr_name, - curr_string)) { - unsigned int idx = xstats_ctrl->mlx5_stats_n++; - - xstats_ctrl->dev_table_idx[idx] = i; - xstats_ctrl->info[idx] = mlx5_counters_init[j]; - break; - } - } - } - /* Add dev counters. */ - MLX5_ASSERT(xstats_ctrl->mlx5_stats_n <= MLX5_MAX_XSTATS); - for (i = 0; i != xstats_n; ++i) { - if (mlx5_counters_init[i].dev) { - unsigned int idx = xstats_ctrl->mlx5_stats_n++; - - xstats_ctrl->info[idx] = mlx5_counters_init[i]; - xstats_ctrl->hw_stats[idx] = 0; - } - } xstats_ctrl->stats_n = dev_stats_n; + xstats_ctrl->stats_n_2nd = dev_stats_n_2nd; /* Copy to base at first time. */ - ret = mlx5_os_read_dev_counters(dev, xstats_ctrl->base); + ret = mlx5_os_read_dev_counters(dev, bond_master, xstats_ctrl->base); if (ret) DRV_LOG(ERR, "port %u cannot read device counters: %s", dev->data->port_id, strerror(rte_errno)); diff -Nru dpdk-22.11.4/drivers/net/mlx5/linux/mlx5_os.c dpdk-22.11.5/drivers/net/mlx5/linux/mlx5_os.c --- dpdk-22.11.4/drivers/net/mlx5/linux/mlx5_os.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/linux/mlx5_os.c 2024-04-22 11:25:10.000000000 +0000 @@ -2682,9 +2682,15 @@ if (priv->sh) { if (priv->q_counters != NULL && - strcmp(ctr_name, "out_of_buffer") == 0) + strcmp(ctr_name, "out_of_buffer") == 0) { + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + DRV_LOG(WARNING, "Devx out_of_buffer counter is not supported in the secondary process"); + rte_errno = ENOTSUP; + return 1; + } return mlx5_devx_cmd_queue_counter_query (priv->q_counters, 0, (uint32_t *)stat); + } MKSTR(path, "%s/ports/%d/hw_counters/%s", priv->sh->ibdev_path, priv->dev_port, diff -Nru dpdk-22.11.4/drivers/net/mlx5/mlx5.c dpdk-22.11.5/drivers/net/mlx5/mlx5.c --- dpdk-22.11.4/drivers/net/mlx5/mlx5.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/mlx5.c 2024-04-22 11:25:10.000000000 +0000 @@ -2058,6 +2058,7 @@ mlx5_flex_item_port_cleanup(dev); #ifdef HAVE_MLX5_HWS_SUPPORT flow_hw_destroy_vport_action(dev); + /* dr context will be closed after mlx5_os_free_shared_dr. */ flow_hw_resource_release(dev); flow_hw_clear_port_info(dev); if (priv->sh->config.dv_flow_en == 2) { @@ -2074,7 +2075,7 @@ mlx5_free(priv->rxq_privs); priv->rxq_privs = NULL; } - if (priv->txqs != NULL) { + if (priv->txqs != NULL && dev->data->tx_queues != NULL) { /* XXX race condition if mlx5_tx_burst() is still running. */ rte_delay_us_sleep(1000); for (i = 0; (i != priv->txqs_n); ++i) @@ -2083,16 +2084,22 @@ priv->txqs = NULL; } mlx5_proc_priv_uninit(dev); + if (priv->drop_queue.hrxq) + mlx5_drop_action_destroy(dev); if (priv->q_counters) { mlx5_devx_cmd_destroy(priv->q_counters); priv->q_counters = NULL; } - if (priv->drop_queue.hrxq) - mlx5_drop_action_destroy(dev); if (priv->mreg_cp_tbl) mlx5_hlist_destroy(priv->mreg_cp_tbl); mlx5_mprq_free_mp(dev); mlx5_os_free_shared_dr(priv); +#ifdef HAVE_MLX5_HWS_SUPPORT + if (priv->dr_ctx) { + claim_zero(mlx5dr_context_close(priv->dr_ctx)); + priv->dr_ctx = NULL; + } +#endif if (priv->rss_conf.rss_key != NULL) mlx5_free(priv->rss_conf.rss_key); if (priv->reta_idx != NULL) diff -Nru dpdk-22.11.4/drivers/net/mlx5/mlx5.h dpdk-22.11.5/drivers/net/mlx5/mlx5.h --- dpdk-22.11.4/drivers/net/mlx5/mlx5.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/mlx5.h 2024-04-22 11:25:10.000000000 +0000 @@ -234,14 +234,22 @@ struct mlx5_xstats_ctrl { /* Number of device stats. */ uint16_t stats_n; + /* Number of device stats, for the 2nd port in bond. */ + uint16_t stats_n_2nd; /* Number of device stats identified by PMD. */ - uint16_t mlx5_stats_n; + uint16_t mlx5_stats_n; /* Index in the device counters table. */ uint16_t dev_table_idx[MLX5_MAX_XSTATS]; + /* Index in the output table. */ + uint16_t xstats_o_idx[MLX5_MAX_XSTATS]; uint64_t base[MLX5_MAX_XSTATS]; uint64_t xstats[MLX5_MAX_XSTATS]; uint64_t hw_stats[MLX5_MAX_XSTATS]; struct mlx5_counter_ctrl info[MLX5_MAX_XSTATS]; + /* Index in the device counters table, for the 2nd port in bond. */ + uint16_t dev_table_idx_2nd[MLX5_MAX_XSTATS]; + /* Index in the output table, for the 2nd port in bond. */ + uint16_t xstats_o_idx_2nd[MLX5_MAX_XSTATS]; }; struct mlx5_stats_ctrl { @@ -1731,11 +1739,7 @@ rte_spinlock_t hw_ctrl_lock; LIST_HEAD(hw_ctrl_flow, mlx5_hw_ctrl_flow) hw_ctrl_flows; LIST_HEAD(hw_ext_ctrl_flow, mlx5_hw_ctrl_flow) hw_ext_ctrl_flows; - struct rte_flow_template_table *hw_esw_sq_miss_root_tbl; - struct rte_flow_template_table *hw_esw_sq_miss_tbl; - struct rte_flow_template_table *hw_esw_zero_tbl; - struct rte_flow_template_table *hw_tx_meta_cpy_tbl; - struct rte_flow_template_table *hw_lacp_rx_tbl; + struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb; struct rte_flow_pattern_template *hw_tx_repr_tagging_pt; struct rte_flow_actions_template *hw_tx_repr_tagging_at; struct rte_flow_template_table *hw_tx_repr_tagging_tbl; @@ -1974,8 +1978,9 @@ struct rte_dev_eeprom_info *info); int mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name, uint64_t *stat); -int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats); -int mlx5_os_get_stats_n(struct rte_eth_dev *dev); +int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats); +int mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master, + uint16_t *n_stats, uint16_t *n_stats_sec); void mlx5_os_stats_init(struct rte_eth_dev *dev); int mlx5_get_flag_dropless_rq(struct rte_eth_dev *dev); diff -Nru dpdk-22.11.4/drivers/net/mlx5/mlx5_flow.c dpdk-22.11.5/drivers/net/mlx5/mlx5_flow.c --- dpdk-22.11.4/drivers/net/mlx5/mlx5_flow.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/mlx5_flow.c 2024-04-22 11:25:10.000000000 +0000 @@ -5135,6 +5135,7 @@ } break; case RTE_FLOW_ACTION_TYPE_COUNT: + case RTE_FLOW_ACTION_TYPE_AGE: if (encap) { rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action)); @@ -5458,8 +5459,8 @@ struct mlx5_rte_flow_item_tag *tag_item_spec; struct mlx5_rte_flow_item_tag *tag_item_mask; uint32_t tag_id = 0; - struct rte_flow_item *vlan_item_dst = NULL; - const struct rte_flow_item *vlan_item_src = NULL; + bool vlan_actions; + struct rte_flow_item *orig_sfx_items = sfx_items; const struct rte_flow_item *orig_items = items; struct rte_flow_action *hw_mtr_action; struct rte_flow_action *action_pre_head = NULL; @@ -5476,6 +5477,7 @@ /* Prepare the suffix subflow items. */ tag_item = sfx_items++; + tag_item->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG; for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { int item_type = items->type; @@ -5498,10 +5500,13 @@ sfx_items++; break; case RTE_FLOW_ITEM_TYPE_VLAN: - /* Determine if copy vlan item below. */ - vlan_item_src = items; - vlan_item_dst = sfx_items++; - vlan_item_dst->type = RTE_FLOW_ITEM_TYPE_VOID; + /* + * Copy VLAN items in case VLAN actions are performed. + * If there are no VLAN actions, these items will be VOID. + */ + memcpy(sfx_items, items, sizeof(*sfx_items)); + sfx_items->type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN; + sfx_items++; break; default: break; @@ -5518,6 +5523,7 @@ tag_action = actions_pre++; } /* Prepare the actions for prefix and suffix flow. */ + vlan_actions = false; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { struct rte_flow_action *action_cur = NULL; @@ -5548,16 +5554,7 @@ break; case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN: case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID: - if (vlan_item_dst && vlan_item_src) { - memcpy(vlan_item_dst, vlan_item_src, - sizeof(*vlan_item_dst)); - /* - * Convert to internal match item, it is used - * for vlan push and set vid. - */ - vlan_item_dst->type = (enum rte_flow_item_type) - MLX5_RTE_FLOW_ITEM_TYPE_VLAN; - } + vlan_actions = true; break; case RTE_FLOW_ACTION_TYPE_COUNT: if (fm->def_policy) @@ -5572,6 +5569,14 @@ actions_sfx++ : actions_pre++; memcpy(action_cur, actions, sizeof(struct rte_flow_action)); } + /* If there are no VLAN actions, convert VLAN items to VOID in suffix flow items. */ + if (!vlan_actions) { + struct rte_flow_item *it = orig_sfx_items; + + for (; it->type != RTE_FLOW_ITEM_TYPE_END; it++) + if (it->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN) + it->type = RTE_FLOW_ITEM_TYPE_VOID; + } /* Add end action to the actions. */ actions_sfx->type = RTE_FLOW_ACTION_TYPE_END; if (priv->sh->meter_aso_en) { @@ -5661,8 +5666,6 @@ tag_action->type = (enum rte_flow_action_type) MLX5_RTE_FLOW_ACTION_TYPE_TAG; tag_action->conf = set_tag; - tag_item->type = (enum rte_flow_item_type) - MLX5_RTE_FLOW_ITEM_TYPE_TAG; tag_item->spec = tag_item_spec; tag_item->last = NULL; tag_item->mask = tag_item_mask; @@ -6490,6 +6493,19 @@ &drop_split_info, error); } +static int +flow_count_vlan_items(const struct rte_flow_item items[]) +{ + int items_n = 0; + + for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + if (items->type == RTE_FLOW_ITEM_TYPE_VLAN || + items->type == (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_VLAN) + items_n++; + } + return items_n; +} + /** * The splitting for meter feature. * @@ -6545,6 +6561,7 @@ size_t act_size; size_t item_size; int actions_n = 0; + int vlan_items_n = 0; int ret = 0; if (priv->mtr_en) @@ -6604,9 +6621,11 @@ act_size = (sizeof(struct rte_flow_action) * (actions_n + METER_PREFIX_ACTION)) + sizeof(struct mlx5_rte_flow_action_set_tag); - /* Suffix items: tag, vlan, port id, end. */ -#define METER_SUFFIX_ITEM 4 - item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM + + /* Flow can have multiple VLAN items. Account for them in suffix items. */ + vlan_items_n = flow_count_vlan_items(items); + /* Suffix items: tag, [vlans], port id, end. */ +#define METER_SUFFIX_ITEM 3 + item_size = sizeof(struct rte_flow_item) * (METER_SUFFIX_ITEM + vlan_items_n) + sizeof(struct mlx5_rte_flow_item_tag) * 2; sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size), 0, SOCKET_ID_ANY); diff -Nru dpdk-22.11.4/drivers/net/mlx5/mlx5_flow.h dpdk-22.11.5/drivers/net/mlx5/mlx5_flow.h --- dpdk-22.11.4/drivers/net/mlx5/mlx5_flow.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/mlx5_flow.h 2024-04-22 11:25:10.000000000 +0000 @@ -75,7 +75,7 @@ /* Now, the maximal ports will be supported is 16, action number is 32M. */ #define MLX5_INDIRECT_ACT_CT_MAX_PORT 0x10 -#define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 22 +#define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 25 #define MLX5_INDIRECT_ACT_CT_OWNER_MASK (MLX5_INDIRECT_ACT_CT_MAX_PORT - 1) /* 29-31: type, 25-28: owner port, 0-24: index */ @@ -1594,6 +1594,28 @@ } } +static __rte_always_inline int +flow_hw_get_port_id_from_ctx(void *dr_ctx, uint32_t *port_val) +{ +#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) + uint32_t port; + + MLX5_ETH_FOREACH_DEV(port, NULL) { + struct mlx5_priv *priv; + priv = rte_eth_devices[port].data->dev_private; + + if (priv->dr_ctx == dr_ctx) { + *port_val = port; + return 0; + } + } +#else + RTE_SET_USED(dr_ctx); + RTE_SET_USED(port_val); +#endif + return -EINVAL; +} + void flow_hw_set_port_info(struct rte_eth_dev *dev); void flow_hw_clear_port_info(struct rte_eth_dev *dev); @@ -2186,6 +2208,25 @@ [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX]; }; +/* Contains all templates required for control flow rules in FDB with HWS. */ +struct mlx5_flow_hw_ctrl_fdb { + struct rte_flow_pattern_template *esw_mgr_items_tmpl; + struct rte_flow_actions_template *regc_jump_actions_tmpl; + struct rte_flow_template_table *hw_esw_sq_miss_root_tbl; + struct rte_flow_pattern_template *regc_sq_items_tmpl; + struct rte_flow_actions_template *port_actions_tmpl; + struct rte_flow_template_table *hw_esw_sq_miss_tbl; + struct rte_flow_pattern_template *port_items_tmpl; + struct rte_flow_actions_template *jump_one_actions_tmpl; + struct rte_flow_template_table *hw_esw_zero_tbl; + struct rte_flow_pattern_template *tx_meta_items_tmpl; + struct rte_flow_actions_template *tx_meta_actions_tmpl; + struct rte_flow_template_table *hw_tx_meta_cpy_tbl; + struct rte_flow_pattern_template *lacp_rx_items_tmpl; + struct rte_flow_actions_template *lacp_rx_actions_tmpl; + struct rte_flow_template_table *hw_lacp_rx_tbl; +}; + #define MLX5_CTRL_PROMISCUOUS (RTE_BIT32(0)) #define MLX5_CTRL_ALL_MULTICAST (RTE_BIT32(1)) #define MLX5_CTRL_BROADCAST (RTE_BIT32(2)) diff -Nru dpdk-22.11.4/drivers/net/mlx5/mlx5_flow_dv.c dpdk-22.11.5/drivers/net/mlx5/mlx5_flow_dv.c --- dpdk-22.11.4/drivers/net/mlx5/mlx5_flow_dv.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/mlx5_flow_dv.c 2024-04-22 11:25:10.000000000 +0000 @@ -267,21 +267,41 @@ {0, 0, 0}, }; -static void +enum mlx5_l3_tunnel_detection { + l3_tunnel_none, + l3_tunnel_outer, + l3_tunnel_inner +}; + +static enum mlx5_l3_tunnel_detection mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused, - uint8_t next_protocol, uint64_t *item_flags, - int *tunnel) + uint8_t next_protocol, uint64_t item_flags, + uint64_t *l3_tunnel_flag) { + enum mlx5_l3_tunnel_detection td = l3_tunnel_none; + MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 || item->type == RTE_FLOW_ITEM_TYPE_IPV6); - if (next_protocol == IPPROTO_IPIP) { - *item_flags |= MLX5_FLOW_LAYER_IPIP; - *tunnel = 1; - } - if (next_protocol == IPPROTO_IPV6) { - *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP; - *tunnel = 1; + if ((item_flags & MLX5_FLOW_LAYER_OUTER_L3) == 0) { + switch (next_protocol) { + case IPPROTO_IPIP: + td = l3_tunnel_outer; + *l3_tunnel_flag = MLX5_FLOW_LAYER_IPIP; + break; + case IPPROTO_IPV6: + td = l3_tunnel_outer; + *l3_tunnel_flag = MLX5_FLOW_LAYER_IPV6_ENCAP; + break; + default: + break; + } + } else { + td = l3_tunnel_inner; + *l3_tunnel_flag = item->type == RTE_FLOW_ITEM_TYPE_IPV4 ? + MLX5_FLOW_LAYER_IPIP : + MLX5_FLOW_LAYER_IPV6_ENCAP; } + return td; } static inline struct mlx5_hlist * @@ -5207,13 +5227,6 @@ &grp_info, error); if (ret) return ret; - if (attributes->group == target_group && - !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET | - MLX5_FLOW_ACTION_TUNNEL_MATCH))) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "target group must be other than" - " the current flow group"); if (table == 0) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, @@ -5675,7 +5688,7 @@ "cannot allocate resource memory"); return NULL; } - rte_memcpy(&entry->ft_type, + rte_memcpy(RTE_PTR_ADD(entry, offsetof(typeof(*entry), ft_type)), RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)), key_len + data_len); if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) @@ -6978,6 +6991,40 @@ return 0; } +static __rte_always_inline uint8_t +mlx5_flow_l3_next_protocol(const struct rte_flow_item *l3_item, + enum MLX5_SET_MATCHER key_type) +{ +#define MLX5_L3_NEXT_PROTOCOL(i, ms) \ + ((i)->type == RTE_FLOW_ITEM_TYPE_IPV4 ? \ + ((const struct rte_flow_item_ipv4 *)(i)->ms)->hdr.next_proto_id : \ + (i)->type == RTE_FLOW_ITEM_TYPE_IPV6 ? \ + ((const struct rte_flow_item_ipv6 *)(i)->ms)->hdr.proto : \ + (i)->type == RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT ? \ + ((const struct rte_flow_item_ipv6_frag_ext *)(i)->ms)->hdr.next_header :\ + 0xff) + + uint8_t next_protocol; + + if (l3_item->mask != NULL && l3_item->spec != NULL) { + next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, mask); + if (next_protocol) + next_protocol &= MLX5_L3_NEXT_PROTOCOL(l3_item, spec); + else + next_protocol = 0xff; + } else if (key_type == MLX5_SET_MATCHER_HS_M && l3_item->mask != NULL) { + next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, mask); + } else if (key_type == MLX5_SET_MATCHER_HS_V && l3_item->spec != NULL) { + next_protocol = MLX5_L3_NEXT_PROTOCOL(l3_item, spec); + } else { + /* Reset for inner layer. */ + next_protocol = 0xff; + } + return next_protocol; + +#undef MLX5_L3_NEXT_PROTOCOL +} + /** * Internal validation function. For validating both actions and items. * @@ -7115,6 +7162,8 @@ return ret; is_root = (uint64_t)ret; for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { + enum mlx5_l3_tunnel_detection l3_tunnel_detection; + uint64_t l3_tunnel_flag; int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); int type = items->type; @@ -7192,8 +7241,16 @@ vlan_m = items->mask; break; case RTE_FLOW_ITEM_TYPE_IPV4: - mlx5_flow_tunnel_ip_check(items, next_protocol, - &item_flags, &tunnel); + next_protocol = mlx5_flow_l3_next_protocol + (items, (enum MLX5_SET_MATCHER)-1); + l3_tunnel_detection = + mlx5_flow_tunnel_ip_check(items, next_protocol, + item_flags, + &l3_tunnel_flag); + if (l3_tunnel_detection == l3_tunnel_inner) { + item_flags |= l3_tunnel_flag; + tunnel = 1; + } ret = flow_dv_validate_item_ipv4(dev, items, item_flags, last_item, ether_type, error); @@ -7201,23 +7258,20 @@ return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : MLX5_FLOW_LAYER_OUTER_L3_IPV4; - if (items->mask != NULL && - ((const struct rte_flow_item_ipv4 *) - items->mask)->hdr.next_proto_id) { - next_protocol = - ((const struct rte_flow_item_ipv4 *) - (items->spec))->hdr.next_proto_id; - next_protocol &= - ((const struct rte_flow_item_ipv4 *) - (items->mask))->hdr.next_proto_id; - } else { - /* Reset for inner layer. */ - next_protocol = 0xff; - } + if (l3_tunnel_detection == l3_tunnel_outer) + item_flags |= l3_tunnel_flag; break; case RTE_FLOW_ITEM_TYPE_IPV6: - mlx5_flow_tunnel_ip_check(items, next_protocol, - &item_flags, &tunnel); + next_protocol = mlx5_flow_l3_next_protocol + (items, (enum MLX5_SET_MATCHER)-1); + l3_tunnel_detection = + mlx5_flow_tunnel_ip_check(items, next_protocol, + item_flags, + &l3_tunnel_flag); + if (l3_tunnel_detection == l3_tunnel_inner) { + item_flags |= l3_tunnel_flag; + tunnel = 1; + } ret = mlx5_flow_validate_item_ipv6(items, item_flags, last_item, ether_type, @@ -7227,22 +7281,8 @@ return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : MLX5_FLOW_LAYER_OUTER_L3_IPV6; - if (items->mask != NULL && - ((const struct rte_flow_item_ipv6 *) - items->mask)->hdr.proto) { - item_ipv6_proto = - ((const struct rte_flow_item_ipv6 *) - items->spec)->hdr.proto; - next_protocol = - ((const struct rte_flow_item_ipv6 *) - items->spec)->hdr.proto; - next_protocol &= - ((const struct rte_flow_item_ipv6 *) - items->mask)->hdr.proto; - } else { - /* Reset for inner layer. */ - next_protocol = 0xff; - } + if (l3_tunnel_detection == l3_tunnel_outer) + item_flags |= l3_tunnel_flag; break; case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: ret = flow_dv_validate_item_ipv6_frag_ext(items, @@ -7253,19 +7293,8 @@ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT : MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT; - if (items->mask != NULL && - ((const struct rte_flow_item_ipv6_frag_ext *) - items->mask)->hdr.next_header) { - next_protocol = - ((const struct rte_flow_item_ipv6_frag_ext *) - items->spec)->hdr.next_header; - next_protocol &= - ((const struct rte_flow_item_ipv6_frag_ext *) - items->mask)->hdr.next_header; - } else { - /* Reset for inner layer. */ - next_protocol = 0xff; - } + next_protocol = mlx5_flow_l3_next_protocol + (items, (enum MLX5_SET_MATCHER)-1); break; case RTE_FLOW_ITEM_TYPE_TCP: ret = mlx5_flow_validate_item_tcp @@ -9614,14 +9643,13 @@ { const struct rte_flow_item_geneve_opt *geneve_opt_m; const struct rte_flow_item_geneve_opt *geneve_opt_v; - const struct rte_flow_item_geneve_opt *geneve_opt_vv = item->spec; - void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); + const struct rte_flow_item_geneve_opt *orig_spec = item->spec; void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3); rte_be32_t opt_data_key = 0, opt_data_mask = 0; - uint32_t *data; + size_t option_byte_len; int ret = 0; - if (MLX5_ITEM_VALID(item, key_type)) + if (MLX5_ITEM_VALID(item, key_type) || !orig_spec) return -1; MLX5_ITEM_UPDATE(item, key_type, geneve_opt_v, geneve_opt_m, &rte_flow_item_geneve_opt_mask); @@ -9634,36 +9662,15 @@ return ret; } } - /* - * Set the option length in GENEVE header if not requested. - * The GENEVE TLV option length is expressed by the option length field - * in the GENEVE header. - * If the option length was not requested but the GENEVE TLV option item - * is present we set the option length field implicitly. - */ - if (!MLX5_GET16(fte_match_set_misc, misc_v, geneve_opt_len)) { - if (key_type & MLX5_SET_MATCHER_M) - MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, - MLX5_GENEVE_OPTLEN_MASK); - else - MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len, - geneve_opt_v->option_len + 1); - } - /* Set the data. */ - if (key_type == MLX5_SET_MATCHER_SW_V) - data = geneve_opt_vv->data; - else - data = geneve_opt_v->data; - if (data) { - memcpy(&opt_data_key, data, - RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4), - sizeof(opt_data_key))); - memcpy(&opt_data_mask, geneve_opt_m->data, - RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4), - sizeof(opt_data_mask))); + /* Convert the option length from DW to bytes for using memcpy. */ + option_byte_len = RTE_MIN((size_t)(orig_spec->option_len * 4), + sizeof(rte_be32_t)); + if (geneve_opt_v->data) { + memcpy(&opt_data_key, geneve_opt_v->data, option_byte_len); + memcpy(&opt_data_mask, geneve_opt_m->data, option_byte_len); MLX5_SET(fte_match_set_misc3, misc3_v, - geneve_tlv_option_0_data, - rte_be_to_cpu_32(opt_data_key & opt_data_mask)); + geneve_tlv_option_0_data, + rte_be_to_cpu_32(opt_data_key & opt_data_mask)); } return ret; } @@ -13172,6 +13179,13 @@ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "Connection is not supported"); + if (dev->data->port_id >= MLX5_INDIRECT_ACT_CT_MAX_PORT) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "CT supports port indexes up to " + RTE_STR(MLX5_ACTION_CTX_CT_MAX_PORT)); + return 0; + } idx = flow_dv_aso_ct_alloc(dev, error); if (!idx) return rte_flow_error_set(error, rte_errno, @@ -13221,6 +13235,8 @@ int tunnel = !!(wks->item_flags & MLX5_FLOW_LAYER_TUNNEL); int item_type = items->type; uint64_t last_item = wks->last_item; + enum mlx5_l3_tunnel_detection l3_tunnel_detection; + uint64_t l3_tunnel_flag; int ret; switch (item_type) { @@ -13264,94 +13280,47 @@ MLX5_FLOW_LAYER_OUTER_VLAN); break; case RTE_FLOW_ITEM_TYPE_IPV4: - mlx5_flow_tunnel_ip_check(items, next_protocol, - &wks->item_flags, &tunnel); + next_protocol = mlx5_flow_l3_next_protocol(items, key_type); + l3_tunnel_detection = + mlx5_flow_tunnel_ip_check(items, next_protocol, + wks->item_flags, + &l3_tunnel_flag); + if (l3_tunnel_detection == l3_tunnel_inner) { + wks->item_flags |= l3_tunnel_flag; + tunnel = 1; + } flow_dv_translate_item_ipv4(key, items, tunnel, wks->group, key_type); wks->priority = MLX5_PRIORITY_MAP_L3; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : MLX5_FLOW_LAYER_OUTER_L3_IPV4; - if (items->mask != NULL && - items->spec != NULL && - ((const struct rte_flow_item_ipv4 *) - items->mask)->hdr.next_proto_id) { - next_protocol = - ((const struct rte_flow_item_ipv4 *) - (items->spec))->hdr.next_proto_id; - next_protocol &= - ((const struct rte_flow_item_ipv4 *) - (items->mask))->hdr.next_proto_id; - } else if (key_type == MLX5_SET_MATCHER_HS_M && - items->mask != NULL) { - next_protocol = ((const struct rte_flow_item_ipv4 *) - (items->mask))->hdr.next_proto_id; - } else if (key_type == MLX5_SET_MATCHER_HS_V && - items->spec != NULL) { - next_protocol = ((const struct rte_flow_item_ipv4 *) - (items->spec))->hdr.next_proto_id; - } else { - /* Reset for inner layer. */ - next_protocol = 0xff; - } + if (l3_tunnel_detection == l3_tunnel_outer) + wks->item_flags |= l3_tunnel_flag; break; case RTE_FLOW_ITEM_TYPE_IPV6: - mlx5_flow_tunnel_ip_check(items, next_protocol, - &wks->item_flags, &tunnel); + next_protocol = mlx5_flow_l3_next_protocol(items, key_type); + l3_tunnel_detection = + mlx5_flow_tunnel_ip_check(items, next_protocol, + wks->item_flags, + &l3_tunnel_flag); + if (l3_tunnel_detection == l3_tunnel_inner) { + wks->item_flags |= l3_tunnel_flag; + tunnel = 1; + } flow_dv_translate_item_ipv6(key, items, tunnel, wks->group, key_type); wks->priority = MLX5_PRIORITY_MAP_L3; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : MLX5_FLOW_LAYER_OUTER_L3_IPV6; - if (items->mask != NULL && - items->spec != NULL && - ((const struct rte_flow_item_ipv6 *) - items->mask)->hdr.proto) { - next_protocol = - ((const struct rte_flow_item_ipv6 *) - items->spec)->hdr.proto; - next_protocol &= - ((const struct rte_flow_item_ipv6 *) - items->mask)->hdr.proto; - } else if (key_type == MLX5_SET_MATCHER_HS_M && - items->mask != NULL) { - next_protocol = ((const struct rte_flow_item_ipv6 *) - (items->mask))->hdr.proto; - } else if (key_type == MLX5_SET_MATCHER_HS_V && - items->spec != NULL) { - next_protocol = ((const struct rte_flow_item_ipv6 *) - (items->spec))->hdr.proto; - } else { - /* Reset for inner layer. */ - next_protocol = 0xff; - } + if (l3_tunnel_detection == l3_tunnel_outer) + wks->item_flags |= l3_tunnel_flag; break; case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT: flow_dv_translate_item_ipv6_frag_ext (key, items, tunnel, key_type); last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT : MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT; - if (items->mask != NULL && - items->spec != NULL && - ((const struct rte_flow_item_ipv6_frag_ext *) - items->mask)->hdr.next_header) { - next_protocol = - ((const struct rte_flow_item_ipv6_frag_ext *) - items->spec)->hdr.next_header; - next_protocol &= - ((const struct rte_flow_item_ipv6_frag_ext *) - items->mask)->hdr.next_header; - } else if (key_type == MLX5_SET_MATCHER_HS_M && - items->mask != NULL) { - next_protocol = ((const struct rte_flow_item_ipv6_frag_ext *) - (items->mask))->hdr.next_header; - } else if (key_type == MLX5_SET_MATCHER_HS_V && - items->spec != NULL) { - next_protocol = ((const struct rte_flow_item_ipv6_frag_ext *) - (items->spec))->hdr.next_header; - } else { - /* Reset for inner layer. */ - next_protocol = 0xff; - } + next_protocol = mlx5_flow_l3_next_protocol(items, key_type); break; case RTE_FLOW_ITEM_TYPE_TCP: flow_dv_translate_item_tcp(key, items, tunnel, key_type); @@ -13777,7 +13746,7 @@ * Avoid be overwritten by other sub mlx5_flows. */ if (wks.geneve_tlv_option) - dev_flow->flow->geneve_tlv_option = wks.geneve_tlv_option; + dev_flow->flow->geneve_tlv_option += wks.geneve_tlv_option; return 0; } @@ -15377,9 +15346,9 @@ flow_dv_aso_ct_release(dev, flow->ct, NULL); else if (flow->age) flow_dv_aso_age_release(dev, flow->age); - if (flow->geneve_tlv_option) { + while (flow->geneve_tlv_option) { flow_dev_geneve_tlv_option_resource_release(priv->sh); - flow->geneve_tlv_option = 0; + flow->geneve_tlv_option--; } while (flow->dev_handles) { uint32_t tmp_idx = flow->dev_handles; @@ -15841,6 +15810,8 @@ case RTE_FLOW_ACTION_TYPE_CONNTRACK: ret = flow_dv_translate_create_conntrack(dev, action->conf, err); + if (!ret) + break; idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret); break; default: @@ -17166,9 +17137,8 @@ } } tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl); - if (priority < RTE_COLOR_RED) - flow_dv_match_meta_reg(matcher.mask.buf, - (enum modify_reg)color_reg_c_idx, color_mask, color_mask); + flow_dv_match_meta_reg(matcher.mask.buf, + (enum modify_reg)color_reg_c_idx, color_mask, color_mask); matcher.priority = priority; matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf, matcher.mask.size); @@ -17219,7 +17189,6 @@ int i; int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err); struct mlx5_sub_policy_color_rule *color_rule; - bool svport_match; struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL}; if (ret < 0) @@ -17255,10 +17224,9 @@ /* No use. */ attr.priority = i; /* Create matchers for colors. */ - svport_match = (i != RTE_COLOR_RED) ? match_src_port : false; if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx, MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy, - &attr, svport_match, NULL, + &attr, match_src_port, NULL, &color_rule->matcher, &flow_err)) { DRV_LOG(ERR, "Failed to create color%u matcher.", i); goto err_exit; @@ -17268,7 +17236,7 @@ color_reg_c_idx, (enum rte_color)i, color_rule->matcher, acts[i].actions_n, acts[i].dv_actions, - svport_match, NULL, &color_rule->rule, + match_src_port, NULL, &color_rule->rule, &attr)) { DRV_LOG(ERR, "Failed to create color%u rule.", i); goto err_exit; @@ -18151,7 +18119,7 @@ struct { struct mlx5_flow_meter_policy *fm_policy; struct mlx5_flow_meter_info *next_fm; - struct mlx5_sub_policy_color_rule *tag_rule[MLX5_MTR_RTE_COLORS]; + struct mlx5_sub_policy_color_rule *tag_rule[RTE_COLORS]; } fm_info[MLX5_MTR_CHAIN_MAX_NUM] = { {0} }; uint32_t fm_cnt = 0; uint32_t i, j; @@ -18185,14 +18153,22 @@ mtr_policy = fm_info[i].fm_policy; rte_spinlock_lock(&mtr_policy->sl); sub_policy = mtr_policy->sub_policys[domain][0]; - for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) { + for (j = 0; j < RTE_COLORS; j++) { uint8_t act_n = 0; - struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; + struct mlx5_flow_dv_modify_hdr_resource *modify_hdr = NULL; struct mlx5_flow_dv_port_id_action_resource *port_action; + uint8_t fate_action; - if (mtr_policy->act_cnt[j].fate_action != MLX5_FLOW_FATE_MTR && - mtr_policy->act_cnt[j].fate_action != MLX5_FLOW_FATE_PORT_ID) - continue; + if (j == RTE_COLOR_RED) { + fate_action = MLX5_FLOW_FATE_DROP; + } else { + fate_action = mtr_policy->act_cnt[j].fate_action; + modify_hdr = mtr_policy->act_cnt[j].modify_hdr; + if (fate_action != MLX5_FLOW_FATE_MTR && + fate_action != MLX5_FLOW_FATE_PORT_ID && + fate_action != MLX5_FLOW_FATE_DROP) + continue; + } color_rule = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_sub_policy_color_rule), 0, SOCKET_ID_ANY); @@ -18204,9 +18180,8 @@ goto err_exit; } color_rule->src_port = src_port; - modify_hdr = mtr_policy->act_cnt[j].modify_hdr; /* Prepare to create color rule. */ - if (mtr_policy->act_cnt[j].fate_action == MLX5_FLOW_FATE_MTR) { + if (fate_action == MLX5_FLOW_FATE_MTR) { next_fm = fm_info[i].next_fm; if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) { mlx5_free(color_rule); @@ -18233,7 +18208,7 @@ } acts.dv_actions[act_n++] = tbl_data->jump.action; acts.actions_n = act_n; - } else { + } else if (fate_action == MLX5_FLOW_FATE_PORT_ID) { port_action = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], mtr_policy->act_cnt[j].rix_port_id_action); @@ -18246,6 +18221,9 @@ acts.dv_actions[act_n++] = modify_hdr->action; acts.dv_actions[act_n++] = port_action->action; acts.actions_n = act_n; + } else { + acts.dv_actions[act_n++] = mtr_policy->dr_drop_action[domain]; + acts.actions_n = act_n; } fm_info[i].tag_rule[j] = color_rule; TAILQ_INSERT_TAIL(&sub_policy->color_rules[j], color_rule, next_port); @@ -18277,7 +18255,7 @@ mtr_policy = fm_info[i].fm_policy; rte_spinlock_lock(&mtr_policy->sl); sub_policy = mtr_policy->sub_policys[domain][0]; - for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) { + for (j = 0; j < RTE_COLORS; j++) { color_rule = fm_info[i].tag_rule[j]; if (!color_rule) continue; @@ -18607,8 +18585,7 @@ LIST_FOREACH(act, &age_info->aged_aso, next) { nb_flows++; if (nb_contexts) { - context[nb_flows - 1] = - act->age_params.context; + context[nb_flows - 1] = act->age_params.context; if (!(--nb_contexts)) break; } diff -Nru dpdk-22.11.4/drivers/net/mlx5/mlx5_flow_hw.c dpdk-22.11.5/drivers/net/mlx5/mlx5_flow_hw.c --- dpdk-22.11.4/drivers/net/mlx5/mlx5_flow_hw.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/mlx5_flow_hw.c 2024-04-22 11:25:10.000000000 +0000 @@ -72,6 +72,10 @@ static __rte_always_inline uint32_t flow_hw_tx_tag_regc_mask(struct rte_eth_dev *dev); static __rte_always_inline uint32_t flow_hw_tx_tag_regc_value(struct rte_eth_dev *dev); +static void +flow_hw_age_count_release(struct mlx5_priv *priv, uint32_t queue, struct rte_flow_hw *flow, + struct rte_flow_error *error); + const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops; /* DR action flags with different table. */ @@ -2083,6 +2087,30 @@ } /** + * Release any actions allocated for the flow rule during actions construction. + * + * @param[in] flow + * Pointer to flow structure. + */ +static void +flow_hw_release_actions(struct rte_eth_dev *dev, + uint32_t queue, + struct rte_flow_hw *flow) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_mtr_pool *pool = priv->hws_mpool; + + if (flow->fate_type == MLX5_FLOW_FATE_JUMP) + flow_hw_jump_release(dev, flow->jump); + else if (flow->fate_type == MLX5_FLOW_FATE_QUEUE) + mlx5_hrxq_obj_release(dev, flow->hrxq); + if (mlx5_hws_cnt_id_valid(flow->cnt_id)) + flow_hw_age_count_release(priv, queue, flow, NULL); + if (flow->mtr_id) + mlx5_ipool_free(pool->idx_pool, flow->mtr_id); +} + +/** * Construct flow action array. * * For action template contains dynamic actions, these actions need to @@ -2170,6 +2198,7 @@ struct mlx5_hrxq *hrxq; uint32_t ct_idx; cnt_id_t cnt_id; + uint32_t *cnt_queue; uint32_t mtr_id; action = &actions[act_data->action_src]; @@ -2190,7 +2219,7 @@ (dev, queue, action, table, it_idx, at->action_flags, job->flow, &rule_acts[act_data->action_dst])) - return -1; + goto error; break; case RTE_FLOW_ACTION_TYPE_VOID: break; @@ -2210,7 +2239,7 @@ jump = flow_hw_jump_action_register (dev, &table->cfg, jump_group, NULL); if (!jump) - return -1; + goto error; rule_acts[act_data->action_dst].action = (!!attr.group) ? jump->hws_action : jump->root_action; job->flow->jump = jump; @@ -2222,7 +2251,7 @@ ft_flag, action); if (!hrxq) - return -1; + goto error; rule_acts[act_data->action_dst].action = hrxq->action; job->flow->hrxq = hrxq; job->flow->fate_type = MLX5_FLOW_FATE_QUEUE; @@ -2232,19 +2261,19 @@ if (flow_hw_shared_action_get (dev, act_data, item_flags, &rule_acts[act_data->action_dst])) - return -1; + goto error; break; case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: enc_item = ((const struct rte_flow_action_vxlan_encap *) action->conf)->definition; if (flow_dv_convert_encap_data(enc_item, buf, &encap_len, NULL)) - return -1; + goto error; break; case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: enc_item = ((const struct rte_flow_action_nvgre_encap *) action->conf)->definition; if (flow_dv_convert_encap_data(enc_item, buf, &encap_len, NULL)) - return -1; + goto error; break; case RTE_FLOW_ACTION_TYPE_RAW_ENCAP: raw_encap_data = @@ -2266,12 +2295,12 @@ hw_acts, action); if (ret) - return -1; + goto error; break; case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: port_action = action->conf; if (!priv->hw_vport[port_action->port_id]) - return -1; + goto error; rule_acts[act_data->action_dst].action = priv->hw_vport[port_action->port_id]; break; @@ -2286,7 +2315,7 @@ jump = flow_hw_jump_action_register (dev, &table->cfg, aso_mtr->fm.group, NULL); if (!jump) - return -1; + goto error; MLX5_ASSERT (!rule_acts[act_data->action_dst + 1].action); rule_acts[act_data->action_dst + 1].action = @@ -2295,7 +2324,7 @@ job->flow->jump = jump; job->flow->fate_type = MLX5_FLOW_FATE_JUMP; if (mlx5_aso_mtr_wait(priv->sh, MLX5_HW_INV_QUEUE, aso_mtr)) - return -1; + goto error; break; case RTE_FLOW_ACTION_TYPE_AGE: age = action->conf; @@ -2310,7 +2339,7 @@ job->flow->idx, error); if (age_idx == 0) - return -rte_errno; + goto error; job->flow->age_idx = age_idx; if (at->action_flags & MLX5_FLOW_ACTION_INDIRECT_COUNT) /* @@ -2321,10 +2350,10 @@ break; /* Fall-through. */ case RTE_FLOW_ACTION_TYPE_COUNT: - ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, &queue, - &cnt_id, age_idx); + cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue); + ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &cnt_id, age_idx); if (ret != 0) - return ret; + goto error; ret = mlx5_hws_cnt_pool_get_action_offset (priv->hws_cpool, cnt_id, @@ -2332,7 +2361,7 @@ &rule_acts[act_data->action_dst].counter.offset ); if (ret != 0) - return ret; + goto error; job->flow->cnt_id = cnt_id; break; case MLX5_RTE_FLOW_ACTION_TYPE_COUNT: @@ -2343,7 +2372,7 @@ &rule_acts[act_data->action_dst].counter.offset ); if (ret != 0) - return ret; + goto error; job->flow->cnt_id = act_data->shared_counter.id; break; case RTE_FLOW_ACTION_TYPE_CONNTRACK: @@ -2351,7 +2380,7 @@ ((uint32_t)(uintptr_t)action->conf); if (flow_hw_ct_compile(dev, queue, ct_idx, &rule_acts[act_data->action_dst])) - return -1; + goto error; break; case MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK: mtr_id = act_data->shared_meter.id & @@ -2359,7 +2388,7 @@ /* Find ASO object. */ aso_mtr = mlx5_ipool_get(pool->idx_pool, mtr_id); if (!aso_mtr) - return -1; + goto error; rule_acts[act_data->action_dst].action = pool->action; rule_acts[act_data->action_dst].aso_meter.offset = @@ -2377,7 +2406,7 @@ act_data->action_dst, action, rule_acts, &job->flow->mtr_id, MLX5_HW_INV_QUEUE); if (ret != 0) - return ret; + goto error; break; default: break; @@ -2410,6 +2439,11 @@ if (mlx5_hws_cnt_id_valid(hw_acts->cnt_id)) job->flow->cnt_id = hw_acts->cnt_id; return 0; + +error: + flow_hw_release_actions(dev, queue, job->flow); + rte_errno = EINVAL; + return -rte_errno; } static const struct rte_flow_item * @@ -2514,10 +2548,6 @@ uint32_t flow_idx; int ret; - if (unlikely((!dev->data->dev_started))) { - rte_errno = EINVAL; - goto error; - } if (unlikely(!priv->hw_q[queue].job_idx)) { rte_errno = ENOMEM; goto error; @@ -2556,10 +2586,8 @@ if (flow_hw_actions_construct(dev, job, &table->ats[action_template_index], pattern_template_index, actions, - rule_acts, queue, error)) { - rte_errno = EINVAL; + rule_acts, queue, error)) goto free; - } rule_items = flow_hw_get_rule_items(dev, table, items, pattern_template_index, job); if (!rule_items) @@ -2658,6 +2686,8 @@ struct rte_flow_hw *flow, struct rte_flow_error *error) { + uint32_t *cnt_queue; + if (mlx5_hws_cnt_is_shared(priv->hws_cpool, flow->cnt_id)) { if (flow->age_idx && !mlx5_hws_age_is_indirect(flow->age_idx)) { /* Remove this AGE parameter from indirect counter. */ @@ -2668,8 +2698,9 @@ } return; } + cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue); /* Put the counter first to reduce the race risk in BG thread. */ - mlx5_hws_cnt_pool_put(priv->hws_cpool, &queue, &flow->cnt_id); + mlx5_hws_cnt_pool_put(priv->hws_cpool, cnt_queue, &flow->cnt_id); flow->cnt_id = 0; if (flow->age_idx) { if (mlx5_hws_age_is_indirect(flow->age_idx)) { @@ -4316,7 +4347,6 @@ rm[set_vlan_vid_ix].conf)->vlan_vid != 0); const struct rte_flow_action_of_set_vlan_vid *conf = ra[set_vlan_vid_ix].conf; - rte_be16_t vid = masked ? conf->vlan_vid : 0; int width = mlx5_flow_item_field_width(dev, RTE_FLOW_FIELD_VLAN_ID, 0, NULL, &error); *spec = (typeof(*spec)) { @@ -4327,8 +4357,6 @@ }, .src = { .field = RTE_FLOW_FIELD_VALUE, - .level = vid, - .offset = 0, }, .width = width, }; @@ -4340,11 +4368,15 @@ }, .src = { .field = RTE_FLOW_FIELD_VALUE, - .level = masked ? (1U << width) - 1 : 0, - .offset = 0, }, .width = 0xffffffff, }; + if (masked) { + uint32_t mask_val = 0xffffffff; + + rte_memcpy(spec->src.value, &conf->vlan_vid, sizeof(conf->vlan_vid)); + rte_memcpy(mask->src.value, &mask_val, sizeof(mask_val)); + } ra[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD; ra[set_vlan_vid_ix].conf = spec; rm[set_vlan_vid_ix].type = RTE_FLOW_ACTION_TYPE_MODIFY_FIELD; @@ -4371,8 +4403,6 @@ }, .src = { .field = RTE_FLOW_FIELD_VALUE, - .level = vid, - .offset = 0, }, .width = width, }; @@ -4381,6 +4411,7 @@ .conf = &conf }; + rte_memcpy(conf.src.value, &vid, sizeof(vid)); return flow_hw_modify_field_construct(job, act_data, hw_acts, &modify_action); } @@ -6287,6 +6318,72 @@ return flow_hw_table_create(dev, &cfg, &it, 1, &at, 1, error); } +/** + * Cleans up all template tables and pattern, and actions templates used for + * FDB control flow rules. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +flow_hw_cleanup_ctrl_fdb_tables(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb; + + if (!priv->hw_ctrl_fdb) + return; + hw_ctrl_fdb = priv->hw_ctrl_fdb; + /* Clean up templates used for LACP default miss table. */ + if (hw_ctrl_fdb->hw_lacp_rx_tbl) + claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_lacp_rx_tbl, NULL)); + if (hw_ctrl_fdb->lacp_rx_actions_tmpl) + claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->lacp_rx_actions_tmpl, + NULL)); + if (hw_ctrl_fdb->lacp_rx_items_tmpl) + claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->lacp_rx_items_tmpl, + NULL)); + /* Clean up templates used for default Tx metadata copy. */ + if (hw_ctrl_fdb->hw_tx_meta_cpy_tbl) + claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_tx_meta_cpy_tbl, NULL)); + if (hw_ctrl_fdb->tx_meta_actions_tmpl) + claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->tx_meta_actions_tmpl, + NULL)); + if (hw_ctrl_fdb->tx_meta_items_tmpl) + claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->tx_meta_items_tmpl, + NULL)); + /* Clean up templates used for default FDB jump rule. */ + if (hw_ctrl_fdb->hw_esw_zero_tbl) + claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_zero_tbl, NULL)); + if (hw_ctrl_fdb->jump_one_actions_tmpl) + claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->jump_one_actions_tmpl, + NULL)); + if (hw_ctrl_fdb->port_items_tmpl) + claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->port_items_tmpl, + NULL)); + /* Clean up templates used for default SQ miss flow rules - non-root table. */ + if (hw_ctrl_fdb->hw_esw_sq_miss_tbl) + claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_tbl, NULL)); + if (hw_ctrl_fdb->regc_sq_items_tmpl) + claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->regc_sq_items_tmpl, + NULL)); + if (hw_ctrl_fdb->port_actions_tmpl) + claim_zero(flow_hw_actions_template_destroy(dev, hw_ctrl_fdb->port_actions_tmpl, + NULL)); + /* Clean up templates used for default SQ miss flow rules - root table. */ + if (hw_ctrl_fdb->hw_esw_sq_miss_root_tbl) + claim_zero(flow_hw_table_destroy(dev, hw_ctrl_fdb->hw_esw_sq_miss_root_tbl, NULL)); + if (hw_ctrl_fdb->regc_jump_actions_tmpl) + claim_zero(flow_hw_actions_template_destroy(dev, + hw_ctrl_fdb->regc_jump_actions_tmpl, NULL)); + if (hw_ctrl_fdb->esw_mgr_items_tmpl) + claim_zero(flow_hw_pattern_template_destroy(dev, hw_ctrl_fdb->esw_mgr_items_tmpl, + NULL)); + /* Clean up templates structure for FDB control flow rules. */ + mlx5_free(hw_ctrl_fdb); + priv->hw_ctrl_fdb = NULL; +} + /* * Create a table on the root group to for the LACP traffic redirecting. * @@ -6336,182 +6433,144 @@ * @return * 0 on success, negative values otherwise */ -static __rte_unused int +static int flow_hw_create_ctrl_tables(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - struct rte_flow_pattern_template *esw_mgr_items_tmpl = NULL; - struct rte_flow_pattern_template *regc_sq_items_tmpl = NULL; - struct rte_flow_pattern_template *port_items_tmpl = NULL; - struct rte_flow_pattern_template *tx_meta_items_tmpl = NULL; - struct rte_flow_pattern_template *lacp_rx_items_tmpl = NULL; - struct rte_flow_actions_template *regc_jump_actions_tmpl = NULL; - struct rte_flow_actions_template *port_actions_tmpl = NULL; - struct rte_flow_actions_template *jump_one_actions_tmpl = NULL; - struct rte_flow_actions_template *tx_meta_actions_tmpl = NULL; - struct rte_flow_actions_template *lacp_rx_actions_tmpl = NULL; + struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb; uint32_t xmeta = priv->sh->config.dv_xmeta_en; uint32_t repr_matching = priv->sh->config.repr_matching; - int ret; + MLX5_ASSERT(priv->hw_ctrl_fdb == NULL); + hw_ctrl_fdb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hw_ctrl_fdb), 0, SOCKET_ID_ANY); + if (!hw_ctrl_fdb) { + DRV_LOG(ERR, "port %u failed to allocate memory for FDB control flow templates", + dev->data->port_id); + rte_errno = ENOMEM; + goto err; + } + priv->hw_ctrl_fdb = hw_ctrl_fdb; /* Create templates and table for default SQ miss flow rules - root table. */ - esw_mgr_items_tmpl = flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error); - if (!esw_mgr_items_tmpl) { + hw_ctrl_fdb->esw_mgr_items_tmpl = flow_hw_create_ctrl_esw_mgr_pattern_template(dev, error); + if (!hw_ctrl_fdb->esw_mgr_items_tmpl) { DRV_LOG(ERR, "port %u failed to create E-Switch Manager item" " template for control flows", dev->data->port_id); goto err; } - regc_jump_actions_tmpl = flow_hw_create_ctrl_regc_jump_actions_template(dev, error); - if (!regc_jump_actions_tmpl) { + hw_ctrl_fdb->regc_jump_actions_tmpl = flow_hw_create_ctrl_regc_jump_actions_template + (dev, error); + if (!hw_ctrl_fdb->regc_jump_actions_tmpl) { DRV_LOG(ERR, "port %u failed to create REG_C set and jump action template" " for control flows", dev->data->port_id); goto err; } - MLX5_ASSERT(priv->hw_esw_sq_miss_root_tbl == NULL); - priv->hw_esw_sq_miss_root_tbl = flow_hw_create_ctrl_sq_miss_root_table - (dev, esw_mgr_items_tmpl, regc_jump_actions_tmpl, error); - if (!priv->hw_esw_sq_miss_root_tbl) { + hw_ctrl_fdb->hw_esw_sq_miss_root_tbl = flow_hw_create_ctrl_sq_miss_root_table + (dev, hw_ctrl_fdb->esw_mgr_items_tmpl, hw_ctrl_fdb->regc_jump_actions_tmpl, + error); + if (!hw_ctrl_fdb->hw_esw_sq_miss_root_tbl) { DRV_LOG(ERR, "port %u failed to create table for default sq miss (root table)" " for control flows", dev->data->port_id); goto err; } /* Create templates and table for default SQ miss flow rules - non-root table. */ - regc_sq_items_tmpl = flow_hw_create_ctrl_regc_sq_pattern_template(dev, error); - if (!regc_sq_items_tmpl) { + hw_ctrl_fdb->regc_sq_items_tmpl = flow_hw_create_ctrl_regc_sq_pattern_template(dev, error); + if (!hw_ctrl_fdb->regc_sq_items_tmpl) { DRV_LOG(ERR, "port %u failed to create SQ item template for" " control flows", dev->data->port_id); goto err; } - port_actions_tmpl = flow_hw_create_ctrl_port_actions_template(dev, error); - if (!port_actions_tmpl) { + hw_ctrl_fdb->port_actions_tmpl = flow_hw_create_ctrl_port_actions_template(dev, error); + if (!hw_ctrl_fdb->port_actions_tmpl) { DRV_LOG(ERR, "port %u failed to create port action template" " for control flows", dev->data->port_id); goto err; } - MLX5_ASSERT(priv->hw_esw_sq_miss_tbl == NULL); - priv->hw_esw_sq_miss_tbl = flow_hw_create_ctrl_sq_miss_table(dev, regc_sq_items_tmpl, - port_actions_tmpl, error); - if (!priv->hw_esw_sq_miss_tbl) { + hw_ctrl_fdb->hw_esw_sq_miss_tbl = flow_hw_create_ctrl_sq_miss_table + (dev, hw_ctrl_fdb->regc_sq_items_tmpl, hw_ctrl_fdb->port_actions_tmpl, + error); + if (!hw_ctrl_fdb->hw_esw_sq_miss_tbl) { DRV_LOG(ERR, "port %u failed to create table for default sq miss (non-root table)" " for control flows", dev->data->port_id); goto err; } /* Create templates and table for default FDB jump flow rules. */ - port_items_tmpl = flow_hw_create_ctrl_port_pattern_template(dev, error); - if (!port_items_tmpl) { + hw_ctrl_fdb->port_items_tmpl = flow_hw_create_ctrl_port_pattern_template(dev, error); + if (!hw_ctrl_fdb->port_items_tmpl) { DRV_LOG(ERR, "port %u failed to create SQ item template for" " control flows", dev->data->port_id); goto err; } - jump_one_actions_tmpl = flow_hw_create_ctrl_jump_actions_template + hw_ctrl_fdb->jump_one_actions_tmpl = flow_hw_create_ctrl_jump_actions_template (dev, MLX5_HW_LOWEST_USABLE_GROUP, error); - if (!jump_one_actions_tmpl) { + if (!hw_ctrl_fdb->jump_one_actions_tmpl) { DRV_LOG(ERR, "port %u failed to create jump action template" " for control flows", dev->data->port_id); goto err; } - MLX5_ASSERT(priv->hw_esw_zero_tbl == NULL); - priv->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table(dev, port_items_tmpl, - jump_one_actions_tmpl, - error); - if (!priv->hw_esw_zero_tbl) { + hw_ctrl_fdb->hw_esw_zero_tbl = flow_hw_create_ctrl_jump_table + (dev, hw_ctrl_fdb->port_items_tmpl, hw_ctrl_fdb->jump_one_actions_tmpl, + error); + if (!hw_ctrl_fdb->hw_esw_zero_tbl) { DRV_LOG(ERR, "port %u failed to create table for default jump to group 1" " for control flows", dev->data->port_id); goto err; } /* Create templates and table for default Tx metadata copy flow rule. */ if (!repr_matching && xmeta == MLX5_XMETA_MODE_META32_HWS) { - tx_meta_items_tmpl = + hw_ctrl_fdb->tx_meta_items_tmpl = flow_hw_create_tx_default_mreg_copy_pattern_template(dev, error); - if (!tx_meta_items_tmpl) { + if (!hw_ctrl_fdb->tx_meta_items_tmpl) { DRV_LOG(ERR, "port %u failed to Tx metadata copy pattern" " template for control flows", dev->data->port_id); goto err; } - tx_meta_actions_tmpl = + hw_ctrl_fdb->tx_meta_actions_tmpl = flow_hw_create_tx_default_mreg_copy_actions_template(dev, error); - if (!tx_meta_actions_tmpl) { + if (!hw_ctrl_fdb->tx_meta_actions_tmpl) { DRV_LOG(ERR, "port %u failed to Tx metadata copy actions" " template for control flows", dev->data->port_id); goto err; } - MLX5_ASSERT(priv->hw_tx_meta_cpy_tbl == NULL); - priv->hw_tx_meta_cpy_tbl = - flow_hw_create_tx_default_mreg_copy_table(dev, tx_meta_items_tmpl, - tx_meta_actions_tmpl, error); - if (!priv->hw_tx_meta_cpy_tbl) { + hw_ctrl_fdb->hw_tx_meta_cpy_tbl = + flow_hw_create_tx_default_mreg_copy_table + (dev, hw_ctrl_fdb->tx_meta_items_tmpl, + hw_ctrl_fdb->tx_meta_actions_tmpl, error); + if (!hw_ctrl_fdb->hw_tx_meta_cpy_tbl) { DRV_LOG(ERR, "port %u failed to create table for default" " Tx metadata copy flow rule", dev->data->port_id); goto err; } } /* Create LACP default miss table. */ - if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) { - lacp_rx_items_tmpl = flow_hw_create_lacp_rx_pattern_template(dev, error); - if (!lacp_rx_items_tmpl) { + if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) { + hw_ctrl_fdb->lacp_rx_items_tmpl = + flow_hw_create_lacp_rx_pattern_template(dev, error); + if (!hw_ctrl_fdb->lacp_rx_items_tmpl) { DRV_LOG(ERR, "port %u failed to create pattern template" " for LACP Rx traffic", dev->data->port_id); goto err; } - lacp_rx_actions_tmpl = flow_hw_create_lacp_rx_actions_template(dev, error); - if (!lacp_rx_actions_tmpl) { + hw_ctrl_fdb->lacp_rx_actions_tmpl = + flow_hw_create_lacp_rx_actions_template(dev, error); + if (!hw_ctrl_fdb->lacp_rx_actions_tmpl) { DRV_LOG(ERR, "port %u failed to create actions template" " for LACP Rx traffic", dev->data->port_id); goto err; } - priv->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table(dev, lacp_rx_items_tmpl, - lacp_rx_actions_tmpl, error); - if (!priv->hw_lacp_rx_tbl) { + hw_ctrl_fdb->hw_lacp_rx_tbl = flow_hw_create_lacp_rx_table + (dev, hw_ctrl_fdb->lacp_rx_items_tmpl, + hw_ctrl_fdb->lacp_rx_actions_tmpl, error); + if (!hw_ctrl_fdb->hw_lacp_rx_tbl) { DRV_LOG(ERR, "port %u failed to create template table for" " for LACP Rx traffic", dev->data->port_id); goto err; } } return 0; + err: - /* Do not overwrite the rte_errno. */ - ret = -rte_errno; - if (ret == 0) - ret = rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "Failed to create control tables."); - if (priv->hw_tx_meta_cpy_tbl) { - flow_hw_table_destroy(dev, priv->hw_tx_meta_cpy_tbl, NULL); - priv->hw_tx_meta_cpy_tbl = NULL; - } - if (priv->hw_esw_zero_tbl) { - flow_hw_table_destroy(dev, priv->hw_esw_zero_tbl, NULL); - priv->hw_esw_zero_tbl = NULL; - } - if (priv->hw_esw_sq_miss_tbl) { - flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_tbl, NULL); - priv->hw_esw_sq_miss_tbl = NULL; - } - if (priv->hw_esw_sq_miss_root_tbl) { - flow_hw_table_destroy(dev, priv->hw_esw_sq_miss_root_tbl, NULL); - priv->hw_esw_sq_miss_root_tbl = NULL; - } - if (lacp_rx_actions_tmpl) - flow_hw_actions_template_destroy(dev, lacp_rx_actions_tmpl, NULL); - if (tx_meta_actions_tmpl) - flow_hw_actions_template_destroy(dev, tx_meta_actions_tmpl, NULL); - if (jump_one_actions_tmpl) - flow_hw_actions_template_destroy(dev, jump_one_actions_tmpl, NULL); - if (port_actions_tmpl) - flow_hw_actions_template_destroy(dev, port_actions_tmpl, NULL); - if (regc_jump_actions_tmpl) - flow_hw_actions_template_destroy(dev, regc_jump_actions_tmpl, NULL); - if (lacp_rx_items_tmpl) - flow_hw_pattern_template_destroy(dev, lacp_rx_items_tmpl, NULL); - if (tx_meta_items_tmpl) - flow_hw_pattern_template_destroy(dev, tx_meta_items_tmpl, NULL); - if (port_items_tmpl) - flow_hw_pattern_template_destroy(dev, port_items_tmpl, NULL); - if (regc_sq_items_tmpl) - flow_hw_pattern_template_destroy(dev, regc_sq_items_tmpl, NULL); - if (esw_mgr_items_tmpl) - flow_hw_pattern_template_destroy(dev, esw_mgr_items_tmpl, NULL); - return ret; + flow_hw_cleanup_ctrl_fdb_tables(dev); + return -EINVAL; } static void @@ -7019,6 +7078,38 @@ } } +static int +flow_hw_validate_attributes(const struct rte_flow_port_attr *port_attr, + uint16_t nb_queue, + const struct rte_flow_queue_attr *queue_attr[], + struct rte_flow_error *error) +{ + uint32_t size; + unsigned int i; + + if (port_attr == NULL) + return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Port attributes must be non-NULL"); + + if (nb_queue == 0) + return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "At least one flow queue is required"); + + if (queue_attr == NULL) + return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Queue attributes must be non-NULL"); + + size = queue_attr[0]->size; + for (i = 1; i < nb_queue; ++i) { + if (queue_attr[i]->size != size) + return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "All flow queues must have the same size"); + } + + return 0; +} + /** * Configure port HWS resources. * @@ -7068,10 +7159,8 @@ int ret = 0; uint32_t action_flags; - if (!port_attr || !nb_queue || !queue_attr) { - rte_errno = EINVAL; - goto err; - } + if (flow_hw_validate_attributes(port_attr, nb_queue, queue_attr, error)) + return -rte_errno; /* In case re-configuring, release existing context at first. */ if (priv->dr_ctx) { /* */ @@ -7104,14 +7193,6 @@ /* Allocate the queue job descriptor LIFO. */ mem_size = sizeof(priv->hw_q[0]) * nb_q_updated; for (i = 0; i < nb_q_updated; i++) { - /* - * Check if the queues' size are all the same as the - * limitation from HWS layer. - */ - if (_queue_attr[i]->size != _queue_attr[0]->size) { - rte_errno = EINVAL; - goto err; - } mem_size += (sizeof(struct mlx5_hw_q_job *) + sizeof(struct mlx5_hw_q_job) + sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN + @@ -7294,6 +7375,14 @@ priv->hws_strict_queue = 1; return 0; err: + priv->hws_strict_queue = 0; + flow_hw_destroy_vlan(dev); + if (priv->hws_age_req) + mlx5_hws_age_pool_destroy(priv); + if (priv->hws_cpool) { + mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool); + priv->hws_cpool = NULL; + } if (priv->hws_ctpool) { flow_hw_ct_pool_destroy(dev, priv->hws_ctpool); priv->hws_ctpool = NULL; @@ -7302,34 +7391,44 @@ flow_hw_ct_mng_destroy(dev, priv->ct_mng); priv->ct_mng = NULL; } - if (priv->hws_age_req) - mlx5_hws_age_pool_destroy(priv); - if (priv->hws_cpool) { - mlx5_hws_cnt_pool_destroy(priv->sh, priv->hws_cpool); - priv->hws_cpool = NULL; - } + flow_hw_cleanup_ctrl_fdb_tables(dev); flow_hw_free_vport_actions(priv); + if (priv->hw_def_miss) { + mlx5dr_action_destroy(priv->hw_def_miss); + priv->hw_def_miss = NULL; + } + flow_hw_cleanup_tx_repr_tagging(dev); for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) { - if (priv->hw_drop[i]) + if (priv->hw_drop[i]) { mlx5dr_action_destroy(priv->hw_drop[i]); - if (priv->hw_tag[i]) + priv->hw_drop[i] = NULL; + } + if (priv->hw_tag[i]) { mlx5dr_action_destroy(priv->hw_tag[i]); + priv->hw_drop[i] = NULL; + } } - flow_hw_destroy_vlan(dev); - if (dr_ctx) + mlx5_flow_meter_uninit(dev); + flow_hw_cleanup_ctrl_rx_tables(dev); + if (dr_ctx) { claim_zero(mlx5dr_context_close(dr_ctx)); - for (i = 0; i < nb_q_updated; i++) { - rte_ring_free(priv->hw_q[i].indir_iq); - rte_ring_free(priv->hw_q[i].indir_cq); + priv->dr_ctx = NULL; + } + if (priv->hw_q) { + for (i = 0; i < nb_q_updated; i++) { + rte_ring_free(priv->hw_q[i].indir_iq); + rte_ring_free(priv->hw_q[i].indir_cq); + } + mlx5_free(priv->hw_q); + priv->hw_q = NULL; } - mlx5_free(priv->hw_q); - priv->hw_q = NULL; if (priv->acts_ipool) { mlx5_ipool_destroy(priv->acts_ipool); priv->acts_ipool = NULL; } if (_queue_attr) mlx5_free(_queue_attr); + priv->nb_queue = 0; /* Do not overwrite the internal errno information. */ if (ret) return ret; @@ -7357,6 +7456,7 @@ return; flow_hw_rxq_flag_set(dev, false); flow_hw_flush_all_ctrl_flows(dev); + flow_hw_cleanup_ctrl_fdb_tables(dev); flow_hw_cleanup_tx_repr_tagging(dev); flow_hw_cleanup_ctrl_rx_tables(dev); while (!LIST_EMPTY(&priv->flow_hw_tbl_ongo)) { @@ -7381,6 +7481,8 @@ if (priv->hw_tag[i]) mlx5dr_action_destroy(priv->hw_tag[i]); } + if (priv->hw_def_miss) + mlx5dr_action_destroy(priv->hw_def_miss); flow_hw_destroy_vlan(dev); flow_hw_free_vport_actions(priv); if (priv->acts_ipool) { @@ -7407,8 +7509,6 @@ } mlx5_free(priv->hw_q); priv->hw_q = NULL; - claim_zero(mlx5dr_context_close(priv->dr_ctx)); - priv->dr_ctx = NULL; priv->nb_queue = 0; } @@ -7708,6 +7808,13 @@ "CT is not enabled"); return 0; } + if (dev->data->port_id >= MLX5_INDIRECT_ACT_CT_MAX_PORT) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "CT supports port indexes up to " + RTE_STR(MLX5_ACTION_CTX_CT_MAX_PORT)); + return 0; + } ct = mlx5_ipool_zmalloc(pool->cts, &ct_idx); if (!ct) { rte_flow_error_set(error, rte_errno, @@ -8526,6 +8633,10 @@ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "empty context"); + if (!priv->hws_age_req) + return rte_flow_error_set(error, ENOENT, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "No aging initialized"); if (priv->hws_strict_queue) { if (queue_id >= age_info->hw_q_age->nb_rings) return rte_flow_error_set(error, EINVAL, @@ -8949,8 +9060,9 @@ proxy_port_id, port_id); return 0; } - if (!proxy_priv->hw_esw_sq_miss_root_tbl || - !proxy_priv->hw_esw_sq_miss_tbl) { + if (!proxy_priv->hw_ctrl_fdb || + !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl || + !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl) { DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but " "default flow tables were not created.", proxy_port_id, port_id); @@ -8982,7 +9094,8 @@ actions[2] = (struct rte_flow_action) { .type = RTE_FLOW_ACTION_TYPE_END, }; - ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_root_tbl, + ret = flow_hw_create_ctrl_flow(dev, proxy_dev, + proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl, items, 0, actions, 0, &flow_info, external); if (ret) { DRV_LOG(ERR, "Port %u failed to create root SQ miss flow rule for SQ %u, ret %d", @@ -9013,7 +9126,8 @@ .type = RTE_FLOW_ACTION_TYPE_END, }; flow_info.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS; - ret = flow_hw_create_ctrl_flow(dev, proxy_dev, proxy_priv->hw_esw_sq_miss_tbl, + ret = flow_hw_create_ctrl_flow(dev, proxy_dev, + proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl, items, 0, actions, 0, &flow_info, external); if (ret) { DRV_LOG(ERR, "Port %u failed to create HWS SQ miss flow rule for SQ %u, ret %d", @@ -9059,8 +9173,9 @@ proxy_priv = proxy_dev->data->dev_private; if (!proxy_priv->dr_ctx) return 0; - if (!proxy_priv->hw_esw_sq_miss_root_tbl || - !proxy_priv->hw_esw_sq_miss_tbl) + if (!proxy_priv->hw_ctrl_fdb || + !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl || + !proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl) return 0; cf = LIST_FIRST(&proxy_priv->hw_ctrl_flows); while (cf != NULL) { @@ -9127,7 +9242,7 @@ proxy_port_id, port_id); return 0; } - if (!proxy_priv->hw_esw_zero_tbl) { + if (!proxy_priv->hw_ctrl_fdb || !proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl) { DRV_LOG(ERR, "Transfer proxy port (port %u) of port %u was configured, but " "default flow tables were not created.", proxy_port_id, port_id); @@ -9135,7 +9250,7 @@ return -rte_errno; } return flow_hw_create_ctrl_flow(dev, proxy_dev, - proxy_priv->hw_esw_zero_tbl, + proxy_priv->hw_ctrl_fdb->hw_esw_zero_tbl, items, 0, actions, 0, &flow_info, false); } @@ -9187,10 +9302,12 @@ }; MLX5_ASSERT(priv->master); - if (!priv->dr_ctx || !priv->hw_tx_meta_cpy_tbl) + if (!priv->dr_ctx || + !priv->hw_ctrl_fdb || + !priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl) return 0; return flow_hw_create_ctrl_flow(dev, dev, - priv->hw_tx_meta_cpy_tbl, + priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl, eth_all, 0, copy_reg_action, 0, &flow_info, false); } @@ -9282,11 +9399,11 @@ .type = MLX5_HW_CTRL_FLOW_TYPE_LACP_RX, }; - MLX5_ASSERT(priv->master); - if (!priv->dr_ctx || !priv->hw_lacp_rx_tbl) + if (!priv->dr_ctx || !priv->hw_ctrl_fdb || !priv->hw_ctrl_fdb->hw_lacp_rx_tbl) return 0; - return flow_hw_create_ctrl_flow(dev, dev, priv->hw_lacp_rx_tbl, eth_lacp, 0, - miss_action, 0, &flow_info, false); + return flow_hw_create_ctrl_flow(dev, dev, + priv->hw_ctrl_fdb->hw_lacp_rx_tbl, + eth_lacp, 0, miss_action, 0, &flow_info, false); } static uint32_t diff -Nru dpdk-22.11.4/drivers/net/mlx5/mlx5_flow_meter.c dpdk-22.11.5/drivers/net/mlx5/mlx5_flow_meter.c --- dpdk-22.11.4/drivers/net/mlx5/mlx5_flow_meter.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/mlx5_flow_meter.c 2024-04-22 11:25:10.000000000 +0000 @@ -618,6 +618,7 @@ meter_profile_id); } +#if defined(HAVE_MLX5_HWS_SUPPORT) /** * Callback to add MTR profile with HWS. * @@ -707,6 +708,7 @@ memset(fmp, 0, sizeof(struct mlx5_flow_meter_profile)); return 0; } +#endif /** * Find policy by id. @@ -849,6 +851,7 @@ return 0; } +#if defined(HAVE_MLX5_HWS_SUPPORT) /** * Callback to check MTR policy action validate for HWS * @@ -885,6 +888,7 @@ } return 0; } +#endif static int __mlx5_flow_meter_policy_delete(struct rte_eth_dev *dev, @@ -1211,6 +1215,7 @@ &policy_idx); } +#if defined(HAVE_MLX5_HWS_SUPPORT) /** * Callback to delete MTR policy for HWS. * @@ -1547,7 +1552,7 @@ RTE_MTR_ERROR_TYPE_UNSPECIFIED, NULL, "Failed to create meter policy."); } - +#endif /** * Check meter validation. * @@ -1915,6 +1920,7 @@ NULL, "Failed to create devx meter."); } +#if defined(HAVE_MLX5_HWS_SUPPORT) /** * Create meter rules. * @@ -1998,6 +2004,7 @@ __atomic_add_fetch(&policy->ref_cnt, 1, __ATOMIC_RELAXED); return 0; } +#endif static int mlx5_flow_meter_params_flush(struct rte_eth_dev *dev, @@ -2482,6 +2489,7 @@ .stats_read = mlx5_flow_meter_stats_read, }; +#if defined(HAVE_MLX5_HWS_SUPPORT) static const struct rte_mtr_ops mlx5_flow_mtr_hws_ops = { .capabilities_get = mlx5_flow_mtr_cap_get, .meter_profile_add = mlx5_flow_meter_profile_hws_add, @@ -2500,6 +2508,7 @@ .stats_update = NULL, .stats_read = NULL, }; +#endif /** * Get meter operations. @@ -2515,12 +2524,16 @@ int mlx5_flow_meter_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg) { +#if defined(HAVE_MLX5_HWS_SUPPORT) struct mlx5_priv *priv = dev->data->dev_private; if (priv->sh->config.dv_flow_en == 2) *(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_hws_ops; else *(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_ops; +#else + *(const struct rte_mtr_ops **)arg = &mlx5_flow_mtr_ops; +#endif return 0; } @@ -2899,7 +2912,6 @@ struct mlx5_flow_meter_profile *fmp; struct mlx5_legacy_flow_meter *legacy_fm; struct mlx5_flow_meter_info *fm; - struct mlx5_flow_meter_policy *policy; struct mlx5_flow_meter_sub_policy *sub_policy; void *tmp; uint32_t i, mtr_idx, policy_idx; @@ -2967,15 +2979,20 @@ mlx5_l3t_destroy(priv->policy_idx_tbl); priv->policy_idx_tbl = NULL; } +#if defined(HAVE_MLX5_HWS_SUPPORT) if (priv->mtr_policy_arr) { + struct mlx5_flow_meter_policy *policy; + for (i = 0; i < priv->mtr_config.nb_meter_policies; i++) { policy = mlx5_flow_meter_policy_find(dev, i, &policy_idx); - if (policy->initialized) + if (policy->initialized) { mlx5_flow_meter_policy_hws_delete(dev, i, error); + } } } +#endif if (priv->mtr_profile_tbl) { MLX5_L3T_FOREACH(priv->mtr_profile_tbl, i, entry) { fmp = entry; @@ -2989,14 +3006,17 @@ mlx5_l3t_destroy(priv->mtr_profile_tbl); priv->mtr_profile_tbl = NULL; } +#if defined(HAVE_MLX5_HWS_SUPPORT) if (priv->mtr_profile_arr) { for (i = 0; i < priv->mtr_config.nb_meter_profiles; i++) { fmp = mlx5_flow_meter_profile_find(priv, i); - if (fmp->initialized) + if (fmp->initialized) { mlx5_flow_meter_profile_hws_delete(dev, i, error); + } } } +#endif /* Delete default policy table. */ mlx5_flow_destroy_def_policy(dev); if (priv->sh->refcnt == 1) diff -Nru dpdk-22.11.4/drivers/net/mlx5/mlx5_hws_cnt.c dpdk-22.11.5/drivers/net/mlx5/mlx5_hws_cnt.c --- dpdk-22.11.4/drivers/net/mlx5/mlx5_hws_cnt.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/mlx5_hws_cnt.c 2024-04-22 11:25:10.000000000 +0000 @@ -25,28 +25,32 @@ __hws_cnt_id_load(struct mlx5_hws_cnt_pool *cpool) { uint32_t preload; - uint32_t q_num = cpool->cache->q_num; + uint32_t q_num; uint32_t cnt_num = mlx5_hws_cnt_pool_get_size(cpool); cnt_id_t cnt_id; uint32_t qidx, iidx = 0; struct rte_ring *qcache = NULL; - /* - * Counter ID order is important for tracking the max number of in used - * counter for querying, which means counter internal index order must - * be from zero to the number user configured, i.e: 0 - 8000000. - * Need to load counter ID in this order into the cache firstly, - * and then the global free list. - * In the end, user fetch the counter from minimal to the maximum. - */ - preload = RTE_MIN(cpool->cache->preload_sz, cnt_num / q_num); - for (qidx = 0; qidx < q_num; qidx++) { - for (; iidx < preload * (qidx + 1); iidx++) { - cnt_id = mlx5_hws_cnt_id_gen(cpool, iidx); - qcache = cpool->cache->qcache[qidx]; - if (qcache) - rte_ring_enqueue_elem(qcache, &cnt_id, - sizeof(cnt_id)); + /* If counter cache was disabled, only free list must prepopulated. */ + if (cpool->cache != NULL) { + q_num = cpool->cache->q_num; + /* + * Counter ID order is important for tracking the max number of in used + * counter for querying, which means counter internal index order must + * be from zero to the number user configured, i.e: 0 - 8000000. + * Need to load counter ID in this order into the cache firstly, + * and then the global free list. + * In the end, user fetch the counter from minimal to the maximum. + */ + preload = RTE_MIN(cpool->cache->preload_sz, cnt_num / q_num); + for (qidx = 0; qidx < q_num; qidx++) { + for (; iidx < preload * (qidx + 1); iidx++) { + cnt_id = mlx5_hws_cnt_id_gen(cpool, iidx); + qcache = cpool->cache->qcache[qidx]; + if (qcache) + rte_ring_enqueue_elem(qcache, &cnt_id, + sizeof(cnt_id)); + } } } for (; iidx < cnt_num; iidx++) { @@ -333,6 +337,55 @@ return NULL; } +static bool +mlx5_hws_cnt_should_enable_cache(const struct mlx5_hws_cnt_pool_cfg *pcfg, + const struct mlx5_hws_cache_param *ccfg) +{ + /* + * Enable cache if and only if there are enough counters requested + * to populate all of the caches. + */ + return pcfg->request_num >= ccfg->q_num * ccfg->size; +} + +static struct mlx5_hws_cnt_pool_caches * +mlx5_hws_cnt_cache_init(const struct mlx5_hws_cnt_pool_cfg *pcfg, + const struct mlx5_hws_cache_param *ccfg) +{ + struct mlx5_hws_cnt_pool_caches *cache; + char mz_name[RTE_MEMZONE_NAMESIZE]; + uint32_t qidx; + + /* If counter pool is big enough, setup the counter pool cache. */ + cache = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, + sizeof(*cache) + + sizeof(((struct mlx5_hws_cnt_pool_caches *)0)->qcache[0]) + * ccfg->q_num, 0, SOCKET_ID_ANY); + if (cache == NULL) + return NULL; + /* Store the necessary cache parameters. */ + cache->fetch_sz = ccfg->fetch_sz; + cache->preload_sz = ccfg->preload_sz; + cache->threshold = ccfg->threshold; + cache->q_num = ccfg->q_num; + for (qidx = 0; qidx < ccfg->q_num; qidx++) { + snprintf(mz_name, sizeof(mz_name), "%s_qc/%x", pcfg->name, qidx); + cache->qcache[qidx] = rte_ring_create(mz_name, ccfg->size, + SOCKET_ID_ANY, + RING_F_SP_ENQ | RING_F_SC_DEQ | + RING_F_EXACT_SZ); + if (cache->qcache[qidx] == NULL) + goto error; + } + return cache; + +error: + while (qidx--) + rte_ring_free(cache->qcache[qidx]); + mlx5_free(cache); + return NULL; +} + struct mlx5_hws_cnt_pool * mlx5_hws_cnt_pool_init(struct mlx5_dev_ctx_shared *sh, const struct mlx5_hws_cnt_pool_cfg *pcfg, @@ -341,7 +394,6 @@ char mz_name[RTE_MEMZONE_NAMESIZE]; struct mlx5_hws_cnt_pool *cntp; uint64_t cnt_num = 0; - uint32_t qidx; MLX5_ASSERT(pcfg); MLX5_ASSERT(ccfg); @@ -351,17 +403,6 @@ return NULL; cntp->cfg = *pcfg; - cntp->cache = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, - sizeof(*cntp->cache) + - sizeof(((struct mlx5_hws_cnt_pool_caches *)0)->qcache[0]) - * ccfg->q_num, 0, SOCKET_ID_ANY); - if (cntp->cache == NULL) - goto error; - /* store the necessary cache parameters. */ - cntp->cache->fetch_sz = ccfg->fetch_sz; - cntp->cache->preload_sz = ccfg->preload_sz; - cntp->cache->threshold = ccfg->threshold; - cntp->cache->q_num = ccfg->q_num; if (pcfg->request_num > sh->hws_max_nb_counters) { DRV_LOG(ERR, "Counter number %u " "is greater than the maximum supported (%u).", @@ -408,13 +449,10 @@ DRV_LOG(ERR, "failed to create reuse list ring"); goto error; } - for (qidx = 0; qidx < ccfg->q_num; qidx++) { - snprintf(mz_name, sizeof(mz_name), "%s_qc/%x", pcfg->name, qidx); - cntp->cache->qcache[qidx] = rte_ring_create(mz_name, ccfg->size, - SOCKET_ID_ANY, - RING_F_SP_ENQ | RING_F_SC_DEQ | - RING_F_EXACT_SZ); - if (cntp->cache->qcache[qidx] == NULL) + /* Allocate counter cache only if needed. */ + if (mlx5_hws_cnt_should_enable_cache(pcfg, ccfg)) { + cntp->cache = mlx5_hws_cnt_cache_init(pcfg, ccfg); + if (cntp->cache == NULL) goto error; } /* Initialize the time for aging-out calculation. */ diff -Nru dpdk-22.11.4/drivers/net/mlx5/mlx5_hws_cnt.h dpdk-22.11.5/drivers/net/mlx5/mlx5_hws_cnt.h --- dpdk-22.11.4/drivers/net/mlx5/mlx5_hws_cnt.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/mlx5_hws_cnt.h 2024-04-22 11:25:10.000000000 +0000 @@ -533,6 +533,32 @@ return 0; } +/** + * Decide if the given queue can be used to perform counter allocation/deallcation + * based on counter configuration + * + * @param[in] priv + * Pointer to the port private data structure. + * @param[in] queue + * Pointer to the queue index. + * + * @return + * @p queue if cache related to the queue can be used. NULL otherwise. + */ +static __rte_always_inline uint32_t * +mlx5_hws_cnt_get_queue(struct mlx5_priv *priv, uint32_t *queue) +{ + if (priv && priv->hws_cpool) { + /* Do not use queue cache if counter cache is disabled. */ + if (priv->hws_cpool->cache == NULL) + return NULL; + return queue; + } + /* This case should not be reached if counter pool was successfully configured. */ + MLX5_ASSERT(false); + return NULL; +} + static __rte_always_inline unsigned int mlx5_hws_cnt_pool_get_size(struct mlx5_hws_cnt_pool *cpool) { diff -Nru dpdk-22.11.4/drivers/net/mlx5/mlx5_rx.c dpdk-22.11.5/drivers/net/mlx5/mlx5_rx.c --- dpdk-22.11.4/drivers/net/mlx5/mlx5_rx.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/mlx5_rx.c 2024-04-22 11:25:10.000000000 +0000 @@ -601,7 +601,8 @@ * @param mprq * Indication if it is called from MPRQ. * @return - * 0 in case of empty CQE, MLX5_REGULAR_ERROR_CQE_RET in case of error CQE, + * 0 in case of empty CQE, + * MLX5_REGULAR_ERROR_CQE_RET in case of error CQE, * MLX5_CRITICAL_ERROR_CQE_RET in case of error CQE lead to Rx queue reset, * otherwise the packet size in regular RxQ, * and striding byte count format in mprq case. @@ -675,6 +676,11 @@ if (ret == MLX5_RECOVERY_ERROR_RET || ret == MLX5_RECOVERY_COMPLETED_RET) return MLX5_CRITICAL_ERROR_CQE_RET; + if (!mprq && ret == MLX5_RECOVERY_IGNORE_RET) { + *skip_cnt = 1; + ++rxq->cq_ci; + return MLX5_ERROR_CQE_MASK; + } } else { return 0; } @@ -928,19 +934,18 @@ cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe, &skip_cnt, false); if (unlikely(len & MLX5_ERROR_CQE_MASK)) { + /* We drop packets with non-critical errors */ + rte_mbuf_raw_free(rep); if (len == MLX5_CRITICAL_ERROR_CQE_RET) { - rte_mbuf_raw_free(rep); rq_ci = rxq->rq_ci << sges_n; break; } + /* Skip specified amount of error CQEs packets */ rq_ci >>= sges_n; rq_ci += skip_cnt; rq_ci <<= sges_n; - idx = rq_ci & wqe_cnt; - wqe = &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx]; - seg = (*rxq->elts)[idx]; - cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; - len = len & ~MLX5_ERROR_CQE_MASK; + MLX5_ASSERT(!pkt); + continue; } if (len == 0) { rte_mbuf_raw_free(rep); diff -Nru dpdk-22.11.4/drivers/net/mlx5/mlx5_stats.c dpdk-22.11.5/drivers/net/mlx5/mlx5_stats.c --- dpdk-22.11.4/drivers/net/mlx5/mlx5_stats.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/mlx5_stats.c 2024-04-22 11:25:10.000000000 +0000 @@ -39,24 +39,36 @@ unsigned int n) { struct mlx5_priv *priv = dev->data->dev_private; - unsigned int i; - uint64_t counters[n]; + uint64_t counters[MLX5_MAX_XSTATS]; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; + unsigned int i; + uint16_t stats_n = 0; + uint16_t stats_n_2nd = 0; uint16_t mlx5_stats_n = xstats_ctrl->mlx5_stats_n; + bool bond_master = (priv->master && priv->pf_bond >= 0); if (n >= mlx5_stats_n && stats) { - int stats_n; int ret; - stats_n = mlx5_os_get_stats_n(dev); - if (stats_n < 0) - return stats_n; - if (xstats_ctrl->stats_n != stats_n) + ret = mlx5_os_get_stats_n(dev, bond_master, &stats_n, &stats_n_2nd); + if (ret < 0) + return ret; + /* + * The number of statistics fetched via "ETH_SS_STATS" may vary because + * of the port configuration each time. This is also true between 2 + * ports. There might be a case that the numbers are the same even if + * configurations are different. + * It is not recommended to change the configuration without using + * RTE API. The port(traffic) restart may trigger another initialization + * to make sure the map are correct. + */ + if (xstats_ctrl->stats_n != stats_n || + (bond_master && xstats_ctrl->stats_n_2nd != stats_n_2nd)) mlx5_os_stats_init(dev); - ret = mlx5_os_read_dev_counters(dev, counters); - if (ret) + ret = mlx5_os_read_dev_counters(dev, bond_master, counters); + if (ret < 0) return ret; - for (i = 0; i != mlx5_stats_n; ++i) { + for (i = 0; i != mlx5_stats_n; i++) { stats[i].id = i; if (xstats_ctrl->info[i].dev) { uint64_t wrap_n; @@ -225,30 +237,32 @@ { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; - int stats_n; unsigned int i; uint64_t *counters; int ret; + uint16_t stats_n = 0; + uint16_t stats_n_2nd = 0; + bool bond_master = (priv->master && priv->pf_bond >= 0); - stats_n = mlx5_os_get_stats_n(dev); - if (stats_n < 0) { + ret = mlx5_os_get_stats_n(dev, bond_master, &stats_n, &stats_n_2nd); + if (ret < 0) { DRV_LOG(ERR, "port %u cannot get stats: %s", dev->data->port_id, - strerror(-stats_n)); - return stats_n; + strerror(-ret)); + return ret; } - if (xstats_ctrl->stats_n != stats_n) + if (xstats_ctrl->stats_n != stats_n || + (bond_master && xstats_ctrl->stats_n_2nd != stats_n_2nd)) mlx5_os_stats_init(dev); - counters = mlx5_malloc(MLX5_MEM_SYS, sizeof(*counters) * - xstats_ctrl->mlx5_stats_n, 0, - SOCKET_ID_ANY); + /* Considering to use stack directly. */ + counters = mlx5_malloc(MLX5_MEM_SYS, sizeof(*counters) * xstats_ctrl->mlx5_stats_n, + 0, SOCKET_ID_ANY); if (!counters) { - DRV_LOG(WARNING, "port %u unable to allocate memory for xstats " - "counters", + DRV_LOG(WARNING, "port %u unable to allocate memory for xstats counters", dev->data->port_id); rte_errno = ENOMEM; return -rte_errno; } - ret = mlx5_os_read_dev_counters(dev, counters); + ret = mlx5_os_read_dev_counters(dev, bond_master, counters); if (ret) { DRV_LOG(ERR, "port %u cannot read device counters: %s", dev->data->port_id, strerror(rte_errno)); diff -Nru dpdk-22.11.4/drivers/net/mlx5/mlx5_trigger.c dpdk-22.11.5/drivers/net/mlx5/mlx5_trigger.c --- dpdk-22.11.4/drivers/net/mlx5/mlx5_trigger.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/mlx5_trigger.c 2024-04-22 11:25:10.000000000 +0000 @@ -1524,7 +1524,7 @@ } if (priv->isolated) return 0; - if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) + if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) if (mlx5_flow_hw_lacp_rx_flow(dev)) goto error; if (dev->data->promiscuous) @@ -1632,14 +1632,14 @@ DRV_LOG(INFO, "port %u FDB default rule is disabled", dev->data->port_id); } - if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) { + if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0 && priv->master) { ret = mlx5_flow_lacp_miss(dev); if (ret) DRV_LOG(INFO, "port %u LACP rule cannot be created - " "forward LACP to kernel.", dev->data->port_id); else - DRV_LOG(INFO, "LACP traffic will be missed in port %u." - , dev->data->port_id); + DRV_LOG(INFO, "LACP traffic will be missed in port %u.", + dev->data->port_id); } if (priv->isolated) return 0; diff -Nru dpdk-22.11.4/drivers/net/mlx5/windows/mlx5_ethdev_os.c dpdk-22.11.5/drivers/net/mlx5/windows/mlx5_ethdev_os.c --- dpdk-22.11.4/drivers/net/mlx5/windows/mlx5_ethdev_os.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mlx5/windows/mlx5_ethdev_os.c 2024-04-22 11:25:10.000000000 +0000 @@ -178,20 +178,29 @@ return -ENOTSUP; } -/** +/* * Query the number of statistics provided by ETHTOOL. * * @param dev * Pointer to Ethernet device. + * @param bond_master + * Indicate if the device is a bond master. + * @param n_stats + * Pointer to number of stats to store. + * @param n_stats_sec + * Pointer to number of stats to store for the 2nd port of the bond. * * @return - * Number of statistics on success, negative errno value otherwise and - * rte_errno is set. + * 0 on success, negative errno value otherwise and rte_errno is set. */ int -mlx5_os_get_stats_n(struct rte_eth_dev *dev) +mlx5_os_get_stats_n(struct rte_eth_dev *dev, bool bond_master, + uint16_t *n_stats, uint16_t *n_stats_sec) { RTE_SET_USED(dev); + RTE_SET_USED(bond_master); + RTE_SET_USED(n_stats); + RTE_SET_USED(n_stats_sec); return -ENOTSUP; } @@ -221,6 +230,8 @@ * * @param dev * Pointer to Ethernet device. + * @param bond_master + * Indicate if the device is a bond master. * @param[out] stats * Counters table output buffer. * @@ -229,9 +240,10 @@ * rte_errno is set. */ int -mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats) +mlx5_os_read_dev_counters(struct rte_eth_dev *dev, bool bond_master, uint64_t *stats) { RTE_SET_USED(dev); + RTE_SET_USED(bond_master); RTE_SET_USED(stats); return -ENOTSUP; } diff -Nru dpdk-22.11.4/drivers/net/mvneta/mvneta_ethdev.c dpdk-22.11.5/drivers/net/mvneta/mvneta_ethdev.c --- dpdk-22.11.4/drivers/net/mvneta/mvneta_ethdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mvneta/mvneta_ethdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -198,7 +198,8 @@ RTE_PTYPE_L3_IPV4, RTE_PTYPE_L3_IPV6, RTE_PTYPE_L4_TCP, - RTE_PTYPE_L4_UDP + RTE_PTYPE_L4_UDP, + RTE_PTYPE_UNKNOWN }; return ptypes; diff -Nru dpdk-22.11.4/drivers/net/mvpp2/mrvl_ethdev.c dpdk-22.11.5/drivers/net/mvpp2/mrvl_ethdev.c --- dpdk-22.11.4/drivers/net/mvpp2/mrvl_ethdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/mvpp2/mrvl_ethdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -1777,7 +1777,8 @@ RTE_PTYPE_L3_IPV6_EXT, RTE_PTYPE_L2_ETHER_ARP, RTE_PTYPE_L4_TCP, - RTE_PTYPE_L4_UDP + RTE_PTYPE_L4_UDP, + RTE_PTYPE_UNKNOWN }; return ptypes; diff -Nru dpdk-22.11.4/drivers/net/netvsc/hn_rxtx.c dpdk-22.11.5/drivers/net/netvsc/hn_rxtx.c --- dpdk-22.11.4/drivers/net/netvsc/hn_rxtx.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/netvsc/hn_rxtx.c 2024-04-22 11:25:10.000000000 +0000 @@ -612,7 +612,9 @@ RTE_PTYPE_L4_MASK); if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) { - m->vlan_tci = info->vlan_info; + m->vlan_tci = RTE_VLAN_TCI_MAKE(NDIS_VLAN_INFO_ID(info->vlan_info), + NDIS_VLAN_INFO_PRI(info->vlan_info), + NDIS_VLAN_INFO_CFI(info->vlan_info)); m->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN; /* NDIS always strips tag, put it back if necessary */ @@ -1332,7 +1334,9 @@ if (m->ol_flags & RTE_MBUF_F_TX_VLAN) { pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE, NDIS_PKTINFO_TYPE_VLAN); - *pi_data = m->vlan_tci; + *pi_data = NDIS_VLAN_INFO_MAKE(RTE_VLAN_TCI_ID(m->vlan_tci), + RTE_VLAN_TCI_PRI(m->vlan_tci), + RTE_VLAN_TCI_DEI(m->vlan_tci)); } if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) { diff -Nru dpdk-22.11.4/drivers/net/nfp/flower/nfp_flower.c dpdk-22.11.5/drivers/net/nfp/flower/nfp_flower.c --- dpdk-22.11.4/drivers/net/nfp/flower/nfp_flower.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/nfp/flower/nfp_flower.c 2024-04-22 11:25:10.000000000 +0000 @@ -182,61 +182,6 @@ return 0; } -/* Reset and stop device. The device can not be restarted. */ -static int -nfp_flower_pf_close(struct rte_eth_dev *dev) -{ - uint16_t i; - struct nfp_net_hw *hw; - struct nfp_pf_dev *pf_dev; - struct nfp_net_txq *this_tx_q; - struct nfp_net_rxq *this_rx_q; - struct nfp_flower_representor *repr; - struct nfp_app_fw_flower *app_fw_flower; - - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; - - repr = (struct nfp_flower_representor *)dev->data->dev_private; - hw = repr->app_fw_flower->pf_hw; - pf_dev = hw->pf_dev; - app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(pf_dev->app_fw_priv); - - /* - * We assume that the DPDK application is stopping all the - * threads/queues before calling the device close function. - */ - nfp_pf_repr_disable_queues(dev); - - /* Clear queues */ - for (i = 0; i < dev->data->nb_tx_queues; i++) { - this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i]; - nfp_net_reset_tx_queue(this_tx_q); - } - - for (i = 0; i < dev->data->nb_rx_queues; i++) { - this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i]; - nfp_net_reset_rx_queue(this_rx_q); - } - - /* Cancel possible impending LSC work here before releasing the port*/ - rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev); - - nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); - - /* Now it is safe to free all PF resources */ - PMD_DRV_LOG(INFO, "Freeing PF resources"); - nfp_cpp_area_free(pf_dev->ctrl_area); - nfp_cpp_area_free(pf_dev->hwqueues_area); - free(pf_dev->hwinfo); - free(pf_dev->sym_tbl); - nfp_cpp_free(pf_dev->cpp); - rte_free(app_fw_flower); - rte_free(pf_dev); - - return 0; -} - static const struct eth_dev_ops nfp_flower_pf_vnic_ops = { .dev_infos_get = nfp_net_infos_get, .link_update = nfp_net_link_update, @@ -244,7 +189,6 @@ .dev_start = nfp_flower_pf_start, .dev_stop = nfp_flower_pf_stop, - .dev_close = nfp_flower_pf_close, }; static inline void @@ -1221,6 +1165,22 @@ return ret; } +void +nfp_uninit_app_fw_flower(struct nfp_pf_dev *pf_dev) +{ + struct nfp_app_fw_flower *app_fw_flower; + + app_fw_flower = pf_dev->app_fw_priv; + nfp_flower_cleanup_ctrl_vnic(app_fw_flower->ctrl_hw); + nfp_cpp_area_free(app_fw_flower->ctrl_hw->ctrl_area); + nfp_cpp_area_free(pf_dev->ctrl_area); + rte_free(app_fw_flower->pf_hw); + nfp_flow_priv_uninit(pf_dev); + if (rte_eth_switch_domain_free(app_fw_flower->switch_domain_id) != 0) + PMD_DRV_LOG(WARNING, "Failed to free switch domain for device"); + rte_free(app_fw_flower); +} + int nfp_secondary_init_app_fw_flower(struct nfp_cpp *cpp) { diff -Nru dpdk-22.11.4/drivers/net/nfp/flower/nfp_flower.h dpdk-22.11.5/drivers/net/nfp/flower/nfp_flower.h --- dpdk-22.11.4/drivers/net/nfp/flower/nfp_flower.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/nfp/flower/nfp_flower.h 2024-04-22 11:25:10.000000000 +0000 @@ -85,6 +85,7 @@ } int nfp_init_app_fw_flower(struct nfp_pf_dev *pf_dev); +void nfp_uninit_app_fw_flower(struct nfp_pf_dev *pf_dev); int nfp_secondary_init_app_fw_flower(struct nfp_cpp *cpp); uint16_t nfp_flower_pf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); diff -Nru dpdk-22.11.4/drivers/net/nfp/flower/nfp_flower_representor.c dpdk-22.11.5/drivers/net/nfp/flower/nfp_flower_representor.c --- dpdk-22.11.4/drivers/net/nfp/flower/nfp_flower_representor.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/nfp/flower/nfp_flower_representor.c 2024-04-22 11:25:10.000000000 +0000 @@ -524,12 +524,125 @@ return sent; } +static int +nfp_flower_repr_uninit(struct rte_eth_dev *eth_dev) +{ + uint16_t index; + struct nfp_flower_representor *repr; + + repr = eth_dev->data->dev_private; + rte_ring_free(repr->ring); + + if (repr->repr_type == NFP_REPR_TYPE_PHYS_PORT) { + index = NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM(repr->port_id); + repr->app_fw_flower->phy_reprs[index] = NULL; + } else { + index = repr->vf_id; + repr->app_fw_flower->vf_reprs[index] = NULL; + } + + return 0; +} + +static int +nfp_flower_pf_repr_uninit(struct rte_eth_dev *eth_dev) +{ + struct nfp_flower_representor *repr = eth_dev->data->dev_private; + + repr->app_fw_flower->pf_repr = NULL; + + return 0; +} + +static void +nfp_flower_repr_free(struct nfp_flower_representor *repr, + enum nfp_repr_type repr_type) +{ + switch (repr_type) { + case NFP_REPR_TYPE_PHYS_PORT: + nfp_flower_repr_uninit(repr->eth_dev); + break; + case NFP_REPR_TYPE_PF: + nfp_flower_pf_repr_uninit(repr->eth_dev); + break; + case NFP_REPR_TYPE_VF: + nfp_flower_repr_uninit(repr->eth_dev); + break; + default: + PMD_DRV_LOG(ERR, "Unsupported repr port type."); + break; + } +} + +/* Reset and stop device. The device can not be restarted. */ +static int +nfp_flower_repr_dev_close(struct rte_eth_dev *dev) +{ + uint16_t i; + struct nfp_net_hw *hw; + struct nfp_pf_dev *pf_dev; + struct nfp_net_txq *this_tx_q; + struct nfp_net_rxq *this_rx_q; + struct nfp_flower_representor *repr; + struct nfp_app_fw_flower *app_fw_flower; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + repr = dev->data->dev_private; + app_fw_flower = repr->app_fw_flower; + hw = app_fw_flower->pf_hw; + pf_dev = hw->pf_dev; + + /* + * We assume that the DPDK application is stopping all the + * threads/queues before calling the device close function. + */ + nfp_net_disable_queues(dev); + + /* Clear queues */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + this_tx_q = dev->data->tx_queues[i]; + nfp_net_reset_tx_queue(this_tx_q); + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + this_rx_q = dev->data->rx_queues[i]; + nfp_net_reset_rx_queue(this_rx_q); + } + + if (pf_dev->app_fw_id != NFP_APP_FW_FLOWER_NIC) + return -EINVAL; + + nfp_flower_repr_free(repr, repr->repr_type); + + for (i = 0; i < MAX_FLOWER_VFS; i++) { + if (app_fw_flower->vf_reprs[i] != NULL) + return 0; + } + + for (i = 0; i < MAX_FLOWER_PHYPORTS; i++) { + if (app_fw_flower->phy_reprs[i] != NULL) + return 0; + } + + if (app_fw_flower->pf_repr != NULL) + return 0; + + /* Now it is safe to free all PF resources */ + nfp_uninit_app_fw_flower(pf_dev); + nfp_pf_uninit(pf_dev); + + return 0; +} + static const struct eth_dev_ops nfp_flower_pf_repr_dev_ops = { .dev_infos_get = nfp_flower_repr_dev_infos_get, .dev_start = nfp_flower_pf_start, .dev_configure = nfp_flower_repr_dev_configure, .dev_stop = nfp_flower_pf_stop, + .dev_close = nfp_flower_repr_dev_close, .rx_queue_setup = nfp_pf_repr_rx_queue_setup, .tx_queue_setup = nfp_pf_repr_tx_queue_setup, @@ -551,6 +664,7 @@ .dev_start = nfp_flower_repr_dev_start, .dev_configure = nfp_flower_repr_dev_configure, .dev_stop = nfp_flower_repr_dev_stop, + .dev_close = nfp_flower_repr_dev_close, .rx_queue_setup = nfp_flower_repr_rx_queue_setup, .tx_queue_setup = nfp_flower_repr_tx_queue_setup, @@ -640,6 +754,7 @@ repr->app_fw_flower->pf_repr = repr; repr->app_fw_flower->pf_hw->eth_dev = eth_dev; + repr->eth_dev = eth_dev; return 0; } @@ -731,6 +846,8 @@ app_fw_flower->vf_reprs[index] = repr; } + repr->eth_dev = eth_dev; + return 0; mac_cleanup: @@ -741,6 +858,35 @@ return ret; } +static void +nfp_flower_repr_free_all(struct nfp_app_fw_flower *app_fw_flower) +{ + uint32_t i; + struct nfp_flower_representor *repr; + + for (i = 0; i < MAX_FLOWER_VFS; i++) { + repr = app_fw_flower->vf_reprs[i]; + if (repr != NULL) { + nfp_flower_repr_free(repr, NFP_REPR_TYPE_VF); + app_fw_flower->vf_reprs[i] = NULL; + } + } + + for (i = 0; i < MAX_FLOWER_PHYPORTS; i++) { + repr = app_fw_flower->phy_reprs[i]; + if (repr != NULL) { + nfp_flower_repr_free(repr, NFP_REPR_TYPE_PHYS_PORT); + app_fw_flower->phy_reprs[i] = NULL; + } + } + + repr = app_fw_flower->pf_repr; + if (repr != NULL) { + nfp_flower_repr_free(repr, NFP_REPR_TYPE_PF); + app_fw_flower->pf_repr = NULL; + } +} + static int nfp_flower_repr_alloc(struct nfp_app_fw_flower *app_fw_flower) { @@ -816,7 +962,7 @@ } if (i < app_fw_flower->num_phyport_reprs) - return ret; + goto repr_free; /* * Now allocate eth_dev's for VF representors. @@ -845,9 +991,14 @@ } if (i < app_fw_flower->num_vf_reprs) - return ret; + goto repr_free; return 0; + +repr_free: + nfp_flower_repr_free_all(app_fw_flower); + + return ret; } int @@ -866,7 +1017,7 @@ /* Allocate a switch domain for the flower app */ if (app_fw_flower->switch_domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID && - rte_eth_switch_domain_alloc(&app_fw_flower->switch_domain_id)) { + rte_eth_switch_domain_alloc(&app_fw_flower->switch_domain_id) != 0) { PMD_INIT_LOG(WARNING, "failed to allocate switch domain for device"); } @@ -908,8 +1059,15 @@ ret = nfp_flower_repr_alloc(app_fw_flower); if (ret != 0) { PMD_INIT_LOG(ERR, "representors allocation failed"); - return -EINVAL; + ret = -EINVAL; + goto domain_free; } return 0; + +domain_free: + if (rte_eth_switch_domain_free(app_fw_flower->switch_domain_id) != 0) + PMD_INIT_LOG(WARNING, "Failed to free switch domain for device"); + + return ret; } diff -Nru dpdk-22.11.4/drivers/net/nfp/flower/nfp_flower_representor.h dpdk-22.11.5/drivers/net/nfp/flower/nfp_flower_representor.h --- dpdk-22.11.4/drivers/net/nfp/flower/nfp_flower_representor.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/nfp/flower/nfp_flower_representor.h 2024-04-22 11:25:10.000000000 +0000 @@ -34,6 +34,7 @@ struct rte_ring *ring; struct rte_eth_link link; struct rte_eth_stats repr_stats; + struct rte_eth_dev *eth_dev; }; int nfp_flower_repr_create(struct nfp_app_fw_flower *app_fw_flower); diff -Nru dpdk-22.11.4/drivers/net/nfp/nfp_common.h dpdk-22.11.5/drivers/net/nfp/nfp_common.h --- dpdk-22.11.4/drivers/net/nfp/nfp_common.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/nfp/nfp_common.h 2024-04-22 11:25:10.000000000 +0000 @@ -450,6 +450,7 @@ int nfp_net_set_vxlan_port(struct nfp_net_hw *hw, size_t idx, uint16_t port); int nfp_net_check_dma_mask(struct nfp_net_hw *hw, char *name); void nfp_net_irq_unmask(struct rte_eth_dev *dev); +void nfp_pf_uninit(struct nfp_pf_dev *pf_dev); #define NFP_NET_DEV_PRIVATE_TO_HW(adapter)\ (&((struct nfp_net_adapter *)adapter)->hw) diff -Nru dpdk-22.11.4/drivers/net/nfp/nfp_ethdev.c dpdk-22.11.5/drivers/net/nfp/nfp_ethdev.c --- dpdk-22.11.4/drivers/net/nfp/nfp_ethdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/nfp/nfp_ethdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -264,6 +264,45 @@ hw->nfp_idx, 0); } +static void +nfp_cleanup_port_app_fw_nic(struct nfp_pf_dev *pf_dev, + uint8_t id) +{ + struct nfp_app_fw_nic *app_fw_nic; + + app_fw_nic = pf_dev->app_fw_priv; + if (app_fw_nic->ports[id] != NULL) + app_fw_nic->ports[id] = NULL; +} + +static void +nfp_uninit_app_fw_nic(struct nfp_pf_dev *pf_dev) +{ + nfp_cpp_area_release_free(pf_dev->ctrl_area); + rte_free(pf_dev->app_fw_priv); +} + +void +nfp_pf_uninit(struct nfp_pf_dev *pf_dev) +{ + nfp_cpp_area_release_free(pf_dev->hwqueues_area); + free(pf_dev->sym_tbl); + free(pf_dev->nfp_eth_table); + free(pf_dev->hwinfo); + nfp_cpp_free(pf_dev->cpp); + rte_free(pf_dev); +} + +static int +nfp_pf_secondary_uninit(struct nfp_pf_dev *pf_dev) +{ + free(pf_dev->sym_tbl); + nfp_cpp_free(pf_dev->cpp); + rte_free(pf_dev); + + return 0; +} + /* Reset and stop device. The device can not be restarted. */ static int nfp_net_close(struct rte_eth_dev *dev) @@ -274,8 +313,19 @@ struct nfp_app_fw_nic *app_fw_nic; int i; - if (rte_eal_process_type() != RTE_PROC_PRIMARY) + /* + * In secondary process, a released eth device can be found by its name + * in shared memory. + * If the state of the eth device is RTE_ETH_DEV_UNUSED, it means the + * eth device has been released. + */ + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + if (dev->state == RTE_ETH_DEV_UNUSED) + return 0; + + nfp_pf_secondary_uninit(dev->process_private); return 0; + } PMD_INIT_LOG(DEBUG, "Close"); @@ -303,7 +353,11 @@ /* Only free PF resources after all physical ports have been closed */ /* Mark this port as unused and free device priv resources*/ nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff); - app_fw_nic->ports[hw->idx] = NULL; + + if (pf_dev->app_fw_id != NFP_APP_FW_CORE_NIC) + return -EINVAL; + + nfp_cleanup_port_app_fw_nic(pf_dev, hw->idx); for (i = 0; i < app_fw_nic->total_phyports; i++) { /* Check to see if ports are still in use */ @@ -311,26 +365,15 @@ return 0; } - /* Now it is safe to free all PF resources */ - PMD_INIT_LOG(INFO, "Freeing PF resources"); - nfp_cpp_area_free(pf_dev->ctrl_area); - nfp_cpp_area_free(pf_dev->hwqueues_area); - free(pf_dev->hwinfo); - free(pf_dev->sym_tbl); - nfp_cpp_free(pf_dev->cpp); - rte_free(app_fw_nic); - rte_free(pf_dev); - + /* Enable in nfp_net_start() */ rte_intr_disable(pci_dev->intr_handle); - /* unregister callback func from eal lib */ + /* Register in nfp_net_init() */ rte_intr_callback_unregister(pci_dev->intr_handle, nfp_net_dev_interrupt_handler, (void *)dev); - /* - * The ixgbe PMD disables the pcie master on the - * device. The i40e does not... - */ + nfp_uninit_app_fw_nic(pf_dev); + nfp_pf_uninit(pf_dev); return 0; } @@ -920,10 +963,9 @@ struct rte_eth_dev *tmp_dev; tmp_dev = app_fw_nic->ports[i]->eth_dev; rte_eth_dev_release_port(tmp_dev); - app_fw_nic->ports[i] = NULL; } } - nfp_cpp_area_free(pf_dev->ctrl_area); + nfp_cpp_area_release_free(pf_dev->ctrl_area); app_cleanup: rte_free(app_fw_nic); @@ -1088,7 +1130,7 @@ return 0; hwqueues_cleanup: - nfp_cpp_area_free(pf_dev->hwqueues_area); + nfp_cpp_area_release_free(pf_dev->hwqueues_area); pf_cleanup: rte_free(pf_dev); sym_tbl_cleanup: diff -Nru dpdk-22.11.4/drivers/net/nfp/nfpcore/nfp_mutex.c dpdk-22.11.5/drivers/net/nfp/nfpcore/nfp_mutex.c --- dpdk-22.11.4/drivers/net/nfp/nfpcore/nfp_mutex.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/nfp/nfpcore/nfp_mutex.c 2024-04-22 11:25:10.000000000 +0000 @@ -151,7 +151,7 @@ if (tmp != key) return NFP_ERRPTR(EEXIST); - mutex = calloc(sizeof(*mutex), 1); + mutex = calloc(1, sizeof(*mutex)); if (!mutex) return NFP_ERRPTR(ENOMEM); diff -Nru dpdk-22.11.4/drivers/net/pfe/pfe_ethdev.c dpdk-22.11.5/drivers/net/pfe/pfe_ethdev.c --- dpdk-22.11.4/drivers/net/pfe/pfe_ethdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/pfe/pfe_ethdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -520,7 +520,8 @@ RTE_PTYPE_L3_IPV6_EXT, RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP, - RTE_PTYPE_L4_SCTP + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_UNKNOWN }; if (dev->rx_pkt_burst == pfe_recv_pkts || diff -Nru dpdk-22.11.4/drivers/net/tap/rte_eth_tap.c dpdk-22.11.5/drivers/net/tap/rte_eth_tap.c --- dpdk-22.11.4/drivers/net/tap/rte_eth_tap.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/tap/rte_eth_tap.c 2024-04-22 11:25:10.000000000 +0000 @@ -1862,6 +1862,7 @@ RTE_PTYPE_L4_UDP, RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_SCTP, + RTE_PTYPE_UNKNOWN }; return ptypes; diff -Nru dpdk-22.11.4/drivers/net/tap/tap_flow.c dpdk-22.11.5/drivers/net/tap/tap_flow.c --- dpdk-22.11.4/drivers/net/tap/tap_flow.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/tap/tap_flow.c 2024-04-22 11:25:10.000000000 +0000 @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -1082,8 +1083,11 @@ } /* use flower filter type */ tap_nlattr_add(&flow->msg.nh, TCA_KIND, sizeof("flower"), "flower"); - if (tap_nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0) - goto exit_item_not_supported; + if (tap_nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0) { + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ACTION, + actions, "could not allocated netlink msg"); + goto exit_return_error; + } } for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { const struct tap_flow_items *token = NULL; @@ -1199,9 +1203,12 @@ if (action) goto exit_action_not_supported; action = 1; - if (!queue || - (queue->index > pmd->dev->data->nb_rx_queues - 1)) - goto exit_action_not_supported; + if (queue->index >= pmd->dev->data->nb_rx_queues) { + rte_flow_error_set(error, ERANGE, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "queue index out of range"); + goto exit_return_error; + } if (flow) { struct action_data adata = { .id = "skbedit", @@ -1227,7 +1234,7 @@ if (!pmd->rss_enabled) { err = rss_enable(pmd, attr, error); if (err) - goto exit_action_not_supported; + goto exit_return_error; } if (flow) err = rss_add_actions(flow, pmd, rss, error); @@ -1235,7 +1242,7 @@ goto exit_action_not_supported; } if (err) - goto exit_action_not_supported; + goto exit_return_error; } /* When fate is unknown, drop traffic. */ if (!action) { @@ -1258,6 +1265,7 @@ exit_action_not_supported: rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, "action not supported"); +exit_return_error: return -rte_errno; } @@ -1290,9 +1298,7 @@ * In those rules, the handle (uint32_t) is the part that would identify * specifically each rule. * - * On 32-bit architectures, the handle can simply be the flow's pointer address. - * On 64-bit architectures, we rely on jhash(flow) to find a (sufficiently) - * unique handle. + * Use jhash of the flow pointer to make a unique handle. * * @param[in, out] flow * The flow that needs its handle set. @@ -1302,16 +1308,18 @@ { union { struct rte_flow *flow; - const void *key; - } tmp; - uint32_t handle = 0; - - tmp.flow = flow; - - if (sizeof(flow) > 4) - handle = rte_jhash(tmp.key, sizeof(flow), 1); - else - handle = (uintptr_t)flow; + uint32_t words[sizeof(flow) / sizeof(uint32_t)]; + } tmp = { + .flow = flow, + }; + uint32_t handle; + static uint64_t hash_seed; + + if (hash_seed == 0) + hash_seed = rte_rand(); + + handle = rte_jhash_32b(tmp.words, sizeof(flow) / sizeof(uint32_t), hash_seed); + /* must be at least 1 to avoid letting the kernel choose one for us */ if (!handle) handle = 1; diff -Nru dpdk-22.11.4/drivers/net/tap/tap_netlink.c dpdk-22.11.5/drivers/net/tap/tap_netlink.c --- dpdk-22.11.4/drivers/net/tap/tap_netlink.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/tap/tap_netlink.c 2024-04-22 11:25:10.000000000 +0000 @@ -72,7 +72,8 @@ #ifdef NETLINK_EXT_ACK /* Ask for extended ACK response. on older kernel will ignore request. */ - setsockopt(fd, SOL_NETLINK, NETLINK_EXT_ACK, &one, sizeof(one)); + if (setsockopt(fd, SOL_NETLINK, NETLINK_EXT_ACK, &one, sizeof(one)) < 0) + TAP_LOG(NOTICE, "Unable to request netlink error information"); #endif if (bind(fd, (struct sockaddr *)&local, sizeof(local)) < 0) { diff -Nru dpdk-22.11.4/drivers/net/thunderx/base/nicvf_mbox.c dpdk-22.11.5/drivers/net/thunderx/base/nicvf_mbox.c --- dpdk-22.11.4/drivers/net/thunderx/base/nicvf_mbox.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/thunderx/base/nicvf_mbox.c 2024-04-22 11:25:10.000000000 +0000 @@ -485,3 +485,15 @@ mbx.msg.msg = NIC_MBOX_MSG_RESET_XCAST; nicvf_mbox_send_msg_to_pf(nic, &mbx); } + +int +nicvf_mbox_set_xcast(struct nicvf *nic, uint8_t mode, uint64_t mac) +{ + struct nic_mbx mbx = { .msg = { 0 } }; + + mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; + mbx.xcast.mode = mode; + mbx.xcast.mac = mac; + + return nicvf_mbox_send_msg_to_pf(nic, &mbx); +} diff -Nru dpdk-22.11.4/drivers/net/thunderx/base/nicvf_mbox.h dpdk-22.11.5/drivers/net/thunderx/base/nicvf_mbox.h --- dpdk-22.11.4/drivers/net/thunderx/base/nicvf_mbox.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/thunderx/base/nicvf_mbox.h 2024-04-22 11:25:10.000000000 +0000 @@ -45,6 +45,8 @@ #define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */ #define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */ #define NIC_MBOX_MSG_RESET_XCAST 0xF2 /* Reset DCAM filtering mode */ +#define NIC_MBOX_MSG_ADD_MCAST 0xF3 /* ADD MAC to DCAM filters */ +#define NIC_MBOX_MSG_SET_XCAST 0xF4 /* Set MCAST/BCAST Rx mode */ #define NIC_MBOX_MSG_MAX 0x100 /* Maximum number of messages */ /* Get vNIC VF configuration */ @@ -190,6 +192,12 @@ }; +struct xcast { + uint8_t msg; + uint8_t mode; + uint64_t mac:48; +}; + struct nic_mbx { /* 128 bit shared memory between PF and each VF */ union { @@ -209,6 +217,7 @@ struct reset_stat_cfg reset_stat; struct set_link_state set_link; struct change_link_mode_msg mode; + struct xcast xcast; }; }; @@ -239,5 +248,6 @@ void nicvf_mbox_link_change(struct nicvf *nic); void nicvf_mbox_reset_xcast(struct nicvf *nic); int nicvf_mbox_change_mode(struct nicvf *nic, struct change_link_mode *cfg); +int nicvf_mbox_set_xcast(struct nicvf *nic, uint8_t mode, uint64_t mac); #endif /* __THUNDERX_NICVF_MBOX__ */ diff -Nru dpdk-22.11.4/drivers/net/thunderx/nicvf_ethdev.c dpdk-22.11.5/drivers/net/thunderx/nicvf_ethdev.c --- dpdk-22.11.4/drivers/net/thunderx/nicvf_ethdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/thunderx/nicvf_ethdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -58,6 +58,10 @@ #define NICVF_QLM_MODE_SGMII 7 #define NICVF_QLM_MODE_XFI 12 +#define BCAST_ACCEPT 0x01 +#define CAM_ACCEPT (1 << 3) +#define BGX_MCAST_MODE(x) ((x) << 1) + enum nicvf_link_speed { NICVF_LINK_SPEED_SGMII, NICVF_LINK_SPEED_XAUI, @@ -392,12 +396,14 @@ RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP, RTE_PTYPE_L4_FRAG, + RTE_PTYPE_UNKNOWN }; static const uint32_t ptypes_tunnel[] = { RTE_PTYPE_TUNNEL_GRE, RTE_PTYPE_TUNNEL_GENEVE, RTE_PTYPE_TUNNEL_VXLAN, RTE_PTYPE_TUNNEL_NVGRE, + RTE_PTYPE_UNKNOWN }; static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN; @@ -2183,9 +2189,22 @@ nicvf_dev_close(dev); return 0; } + +static inline uint64_t ether_addr_to_u64(uint8_t *addr) +{ + uint64_t u = 0; + int i; + + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) + u = u << 8 | addr[i]; + + return u; +} + static int nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) { + uint8_t dmac_ctrl_reg = 0; int ret; struct rte_pci_device *pci_dev; struct nicvf *nic = nicvf_pmd_priv(eth_dev); @@ -2307,6 +2326,15 @@ if (ret) { PMD_INIT_LOG(ERR, "Failed to set mac addr"); goto malloc_fail; + } + + /* set DMAC CTRL reg to allow MAC */ + dmac_ctrl_reg = BCAST_ACCEPT | BGX_MCAST_MODE(2) | CAM_ACCEPT; + ret = nicvf_mbox_set_xcast(nic, dmac_ctrl_reg, + ether_addr_to_u64(nic->mac_addr)); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to set mac addr"); + goto malloc_fail; } ret = nicvf_set_first_skip(eth_dev); diff -Nru dpdk-22.11.4/drivers/net/virtio/virtio_ethdev.c dpdk-22.11.5/drivers/net/virtio/virtio_ethdev.c --- dpdk-22.11.4/drivers/net/virtio/virtio_ethdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/virtio/virtio_ethdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -2237,8 +2237,6 @@ else eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; - eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; - /* Setting up rx_header size for the device */ if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) || virtio_with_feature(hw, VIRTIO_F_VERSION_1) || diff -Nru dpdk-22.11.4/drivers/net/vmxnet3/vmxnet3_ethdev.c dpdk-22.11.5/drivers/net/vmxnet3/vmxnet3_ethdev.c --- dpdk-22.11.4/drivers/net/vmxnet3/vmxnet3_ethdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/net/vmxnet3/vmxnet3_ethdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -257,6 +257,7 @@ vmxnet3_disable_intr(hw, i); } +#ifndef RTE_EXEC_ENV_FREEBSD /* * Enable all intrs used by the device */ @@ -280,6 +281,7 @@ vmxnet3_enable_intr(hw, i); } } +#endif /* * Gets tx data ring descriptor size. @@ -1036,6 +1038,7 @@ /* Setting proper Rx Mode and issue Rx Mode Update command */ vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1); +#ifndef RTE_EXEC_ENV_FREEBSD /* Setup interrupt callback */ rte_intr_callback_register(dev->intr_handle, vmxnet3_interrupt_handler, dev); @@ -1047,6 +1050,7 @@ /* enable all intrs */ vmxnet3_enable_all_intrs(hw); +#endif vmxnet3_process_events(dev); @@ -1822,11 +1826,13 @@ static int vmxnet3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { +#ifndef RTE_EXEC_ENV_FREEBSD struct vmxnet3_hw *hw = dev->data->dev_private; vmxnet3_enable_intr(hw, rte_intr_vec_list_index_get(dev->intr_handle, queue_id)); +#endif return 0; } diff -Nru dpdk-22.11.4/drivers/vdpa/mlx5/mlx5_vdpa_event.c dpdk-22.11.5/drivers/vdpa/mlx5/mlx5_vdpa_event.c --- dpdk-22.11.4/drivers/vdpa/mlx5/mlx5_vdpa_event.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/drivers/vdpa/mlx5/mlx5_vdpa_event.c 2024-04-22 11:25:10.000000000 +0000 @@ -244,22 +244,30 @@ return max; } +static void +mlx5_vdpa_drain_cq_one(struct mlx5_vdpa_priv *priv, + struct mlx5_vdpa_virtq *virtq) +{ + struct mlx5_vdpa_cq *cq = &virtq->eqp.cq; + + mlx5_vdpa_queue_complete(cq); + if (cq->cq_obj.cq) { + cq->cq_obj.cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX); + virtq->eqp.qp_pi = 0; + if (!cq->armed) + mlx5_vdpa_cq_arm(priv, cq); + } +} + void mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv) { + struct mlx5_vdpa_virtq *virtq; unsigned int i; for (i = 0; i < priv->caps.max_num_virtio_queues; i++) { - struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq; - - mlx5_vdpa_queue_complete(cq); - if (cq->cq_obj.cq) { - cq->cq_obj.cqes[0].wqe_counter = - rte_cpu_to_be_16(UINT16_MAX); - priv->virtqs[i].eqp.qp_pi = 0; - if (!cq->armed) - mlx5_vdpa_cq_arm(priv, cq); - } + virtq = &priv->virtqs[i]; + mlx5_vdpa_drain_cq_one(priv, virtq); } } @@ -658,6 +666,7 @@ if (eqp->cq.cq_obj.cq != NULL && log_desc_n == eqp->cq.log_desc_n) { /* Reuse existing resources. */ eqp->cq.callfd = callfd; + mlx5_vdpa_drain_cq_one(priv, virtq); /* FW will set event qp to error state in q destroy. */ if (reset && !mlx5_vdpa_qps2rst2rts(eqp)) rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)), diff -Nru dpdk-22.11.4/examples/ipsec-secgw/ipsec-secgw.c dpdk-22.11.5/examples/ipsec-secgw/ipsec-secgw.c --- dpdk-22.11.4/examples/ipsec-secgw/ipsec-secgw.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/examples/ipsec-secgw/ipsec-secgw.c 2024-04-22 11:25:10.000000000 +0000 @@ -568,7 +568,7 @@ static inline void process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts, - uint8_t nb_pkts, uint16_t portid, struct rte_security_ctx *ctx) + uint16_t nb_pkts, uint16_t portid, struct rte_security_ctx *ctx) { struct ipsec_traffic traffic; @@ -695,8 +695,7 @@ struct rte_mbuf *pkts[MAX_PKT_BURST]; uint32_t lcore_id; uint64_t prev_tsc, diff_tsc, cur_tsc; - int32_t i, nb_rx; - uint16_t portid; + uint16_t i, nb_rx, portid; uint8_t queueid; struct lcore_conf *qconf; int32_t rc, socket_id; @@ -2057,10 +2056,10 @@ /* Register Rx callback if ptypes are not supported */ if (!ptype_supported && - !rte_eth_add_rx_callback(portid, queue, + !rte_eth_add_rx_callback(portid, rx_queueid, parse_ptype_cb, NULL)) { printf("Failed to add rx callback: port=%d, " - "queue=%d\n", portid, queue); + "rx_queueid=%d\n", portid, rx_queueid); } diff -Nru dpdk-22.11.4/examples/ipsec-secgw/ipsec.c dpdk-22.11.5/examples/ipsec-secgw/ipsec.c --- dpdk-22.11.4/examples/ipsec-secgw/ipsec.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/examples/ipsec-secgw/ipsec.c 2024-04-22 11:25:10.000000000 +0000 @@ -110,10 +110,21 @@ if (cdev_id == RTE_CRYPTO_MAX_DEVS) cdev_id = ipsec_ctx->tbl[cdev_id_qp].id; else if (cdev_id != ipsec_ctx->tbl[cdev_id_qp].id) { - RTE_LOG(ERR, IPSEC, - "SA mapping to multiple cryptodevs is " - "not supported!"); - return -EINVAL; + struct rte_cryptodev_info dev_info_1, dev_info_2; + rte_cryptodev_info_get(cdev_id, &dev_info_1); + rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, + &dev_info_2); + if (dev_info_1.driver_id == dev_info_2.driver_id) { + RTE_LOG(WARNING, IPSEC, + "SA mapped to multiple cryptodevs for SPI %d\n", + sa->spi); + + } else { + RTE_LOG(WARNING, IPSEC, + "SA mapped to multiple cryptodevs of different types for SPI %d\n", + sa->spi); + + } } /* Store per core queue pair information */ @@ -758,6 +769,7 @@ continue; } + RTE_ASSERT(sa->cqp[ipsec_ctx->lcore_id] != NULL); enqueue_cop(sa->cqp[ipsec_ctx->lcore_id], &priv->cop); } } diff -Nru dpdk-22.11.4/examples/ipsec-secgw/ipsec_worker.h dpdk-22.11.5/examples/ipsec-secgw/ipsec_worker.h --- dpdk-22.11.4/examples/ipsec-secgw/ipsec_worker.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/examples/ipsec-secgw/ipsec_worker.h 2024-04-22 11:25:10.000000000 +0000 @@ -472,7 +472,7 @@ static __rte_always_inline void route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], - uint8_t nb_pkts, uint64_t tx_offloads, bool ip_cksum) + uint32_t nb_pkts, uint64_t tx_offloads, bool ip_cksum) { uint32_t hop[MAX_PKT_BURST * 2]; uint32_t dst_ip[MAX_PKT_BURST * 2]; @@ -560,7 +560,7 @@ } static __rte_always_inline void -route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) +route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint32_t nb_pkts) { int32_t hop[MAX_PKT_BURST * 2]; uint8_t dst_ip[MAX_PKT_BURST * 2][16]; diff -Nru dpdk-22.11.4/examples/ipsec-secgw/parser.c dpdk-22.11.5/examples/ipsec-secgw/parser.c --- dpdk-22.11.4/examples/ipsec-secgw/parser.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/examples/ipsec-secgw/parser.c 2024-04-22 11:25:10.000000000 +0000 @@ -388,7 +388,7 @@ rc = parse_mac(res->mac, &mac); APP_CHECK(rc == 0, st, "invalid ether addr:%s", res->mac); rc = add_dst_ethaddr(res->port, &mac); - APP_CHECK(rc == 0, st, "invalid port numer:%hu", res->port); + APP_CHECK(rc == 0, st, "invalid port number:%hu", res->port); if (st->status < 0) return; } diff -Nru dpdk-22.11.4/examples/l3fwd/main.c dpdk-22.11.5/examples/l3fwd/main.c --- dpdk-22.11.4/examples/l3fwd/main.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/examples/l3fwd/main.c 2024-04-22 11:25:10.000000000 +0000 @@ -1512,7 +1512,6 @@ l3fwd_lkp.main_loop = evt_rsrc->ops.fib_event_loop; else l3fwd_lkp.main_loop = evt_rsrc->ops.lpm_event_loop; - l3fwd_event_service_setup(); } else l3fwd_poll_resource_setup(); @@ -1543,6 +1542,11 @@ } } +#ifdef RTE_LIB_EVENTDEV + if (evt_rsrc->enabled) + l3fwd_event_service_setup(); +#endif + printf("\n"); for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { diff -Nru dpdk-22.11.4/examples/packet_ordering/main.c dpdk-22.11.5/examples/packet_ordering/main.c --- dpdk-22.11.4/examples/packet_ordering/main.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/examples/packet_ordering/main.c 2024-04-22 11:25:10.000000000 +0000 @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -427,8 +428,8 @@ * The mbufs are then passed to the worker threads via the rx_to_workers * ring. */ -static int -rx_thread(struct rte_ring *ring_out) +static __rte_always_inline int +rx_thread(struct rte_ring *ring_out, bool disable_reorder_flag) { uint32_t seqn = 0; uint16_t i, ret = 0; @@ -454,9 +455,11 @@ } app_stats.rx.rx_pkts += nb_rx_pkts; - /* mark sequence number */ - for (i = 0; i < nb_rx_pkts; ) - *rte_reorder_seqn(pkts[i++]) = seqn++; + /* mark sequence number if reorder is enabled */ + if (!disable_reorder_flag) { + for (i = 0; i < nb_rx_pkts;) + *rte_reorder_seqn(pkts[i++]) = seqn++; + } /* enqueue to rx_to_workers ring */ ret = rte_ring_enqueue_burst(ring_out, @@ -473,6 +476,18 @@ return 0; } +static __rte_noinline int +rx_thread_reorder(struct rte_ring *ring_out) +{ + return rx_thread(ring_out, false); +} + +static __rte_noinline int +rx_thread_reorder_disabled(struct rte_ring *ring_out) +{ + return rx_thread(ring_out, true); +} + /** * This thread takes bursts of packets from the rx_to_workers ring and * Changes the input port value to output port value. And feds it to @@ -772,8 +787,11 @@ (void *)&send_args, last_lcore_id); } - /* Start rx_thread() on the main core */ - rx_thread(rx_to_workers); + /* Start rx_thread_xxx() on the main core */ + if (disable_reorder) + rx_thread_reorder_disabled(rx_to_workers); + else + rx_thread_reorder(rx_to_workers); RTE_LCORE_FOREACH_WORKER(lcore_id) { if (rte_eal_wait_lcore(lcore_id) < 0) diff -Nru dpdk-22.11.4/examples/qos_sched/args.c dpdk-22.11.5/examples/qos_sched/args.c --- dpdk-22.11.4/examples/qos_sched/args.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/examples/qos_sched/args.c 2024-04-22 11:25:10.000000000 +0000 @@ -141,8 +141,10 @@ n_tokens = rte_strsplit(string, strnlen(string, 32), tokens, n_vals, separator); - if (n_tokens > MAX_OPT_VALUES) + if (n_tokens > MAX_OPT_VALUES) { + free(string); return -1; + } for (i = 0; i < n_tokens; i++) opt_vals[i] = (uint32_t)atol(tokens[i]); diff -Nru dpdk-22.11.4/examples/vhost/main.c dpdk-22.11.5/examples/vhost/main.c --- dpdk-22.11.4/examples/vhost/main.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/examples/vhost/main.c 2024-04-22 11:25:10.000000000 +0000 @@ -259,6 +259,9 @@ char *dma_arg[RTE_MAX_VHOST_DEVICE]; int args_nr; + if (input == NULL) + return -1; + while (isblank(*addrs)) addrs++; if (*addrs == '\0') { diff -Nru dpdk-22.11.4/kernel/freebsd/nic_uio/nic_uio.c dpdk-22.11.5/kernel/freebsd/nic_uio/nic_uio.c --- dpdk-22.11.4/kernel/freebsd/nic_uio/nic_uio.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/kernel/freebsd/nic_uio/nic_uio.c 2024-04-22 11:25:10.000000000 +0000 @@ -78,10 +78,14 @@ uint32_t function; }; -static devclass_t nic_uio_devclass; - DEFINE_CLASS_0(nic_uio, nic_uio_driver, nic_uio_methods, sizeof(struct nic_uio_softc)); + +#if __FreeBSD_version < 1400000 +static devclass_t nic_uio_devclass; DRIVER_MODULE(nic_uio, pci, nic_uio_driver, nic_uio_devclass, nic_uio_modevent, 0); +#else +DRIVER_MODULE(nic_uio, pci, nic_uio_driver, nic_uio_modevent, 0); +#endif static int nic_uio_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr, diff -Nru dpdk-22.11.4/lib/bbdev/rte_bbdev.c dpdk-22.11.5/lib/bbdev/rte_bbdev.c --- dpdk-22.11.4/lib/bbdev/rte_bbdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/bbdev/rte_bbdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -1102,12 +1102,12 @@ intr_handle = dev->intr_handle; if (intr_handle == NULL) { - rte_bbdev_log(ERR, "Device %u intr handle unset\n", dev_id); + rte_bbdev_log(ERR, "Device %u intr handle unset", dev_id); return -ENOTSUP; } if (queue_id >= RTE_MAX_RXTX_INTR_VEC_ID) { - rte_bbdev_log(ERR, "Device %u queue_id %u is too big\n", + rte_bbdev_log(ERR, "Device %u queue_id %u is too big", dev_id, queue_id); return -ENOTSUP; } @@ -1116,7 +1116,7 @@ ret = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data); if (ret && (ret != -EEXIST)) { rte_bbdev_log(ERR, - "dev %u q %u int ctl error op %d epfd %d vec %u\n", + "dev %u q %u int ctl error op %d epfd %d vec %u", dev_id, queue_id, op, epfd, vec); return ret; } diff -Nru dpdk-22.11.4/lib/cfgfile/rte_cfgfile.c dpdk-22.11.5/lib/cfgfile/rte_cfgfile.c --- dpdk-22.11.4/lib/cfgfile/rte_cfgfile.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/cfgfile/rte_cfgfile.c 2024-04-22 11:25:10.000000000 +0000 @@ -135,7 +135,7 @@ unsigned int i; if (!params) { - CFG_LOG(ERR, "missing cfgfile parameters\n"); + CFG_LOG(ERR, "missing cfgfile parameters"); return -EINVAL; } @@ -148,7 +148,7 @@ } if (valid_comment == 0) { - CFG_LOG(ERR, "invalid comment characters %c\n", + CFG_LOG(ERR, "invalid comment characters %c", params->comment_character); return -ENOTSUP; } @@ -186,7 +186,7 @@ lineno++; if ((len >= sizeof(buffer) - 1) && (buffer[len-1] != '\n')) { CFG_LOG(ERR, " line %d - no \\n found on string. " - "Check if line too long\n", lineno); + "Check if line too long", lineno); goto error1; } /* skip parsing if comment character found */ @@ -207,7 +207,7 @@ char *end = memchr(buffer, ']', len); if (end == NULL) { CFG_LOG(ERR, - "line %d - no terminating ']' character found\n", + "line %d - no terminating ']' character found", lineno); goto error1; } @@ -223,7 +223,7 @@ split[1] = memchr(buffer, '=', len); if (split[1] == NULL) { CFG_LOG(ERR, - "line %d - no '=' character found\n", + "line %d - no '=' character found", lineno); goto error1; } @@ -247,7 +247,7 @@ if (!(flags & CFG_FLAG_EMPTY_VALUES) && (*split[1] == '\0')) { CFG_LOG(ERR, - "line %d - cannot use empty values\n", + "line %d - cannot use empty values", lineno); goto error1; } @@ -412,7 +412,7 @@ return 0; } - CFG_LOG(ERR, "entry name doesn't exist\n"); + CFG_LOG(ERR, "entry name doesn't exist"); return -EINVAL; } diff -Nru dpdk-22.11.4/lib/compressdev/rte_compressdev_pmd.c dpdk-22.11.5/lib/compressdev/rte_compressdev_pmd.c --- dpdk-22.11.4/lib/compressdev/rte_compressdev_pmd.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/compressdev/rte_compressdev_pmd.c 2024-04-22 11:25:10.000000000 +0000 @@ -100,12 +100,12 @@ struct rte_compressdev *compressdev; if (params->name[0] != '\0') { - COMPRESSDEV_LOG(INFO, "User specified device name = %s\n", + COMPRESSDEV_LOG(INFO, "User specified device name = %s", params->name); name = params->name; } - COMPRESSDEV_LOG(INFO, "Creating compressdev %s\n", name); + COMPRESSDEV_LOG(INFO, "Creating compressdev %s", name); COMPRESSDEV_LOG(INFO, "Init parameters - name: %s, socket id: %d", name, params->socket_id); diff -Nru dpdk-22.11.4/lib/cryptodev/rte_cryptodev.c dpdk-22.11.5/lib/cryptodev/rte_cryptodev.c --- dpdk-22.11.4/lib/cryptodev/rte_cryptodev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/cryptodev/rte_cryptodev.c 2024-04-22 11:25:10.000000000 +0000 @@ -1969,7 +1969,7 @@ } if (xforms == NULL) { - CDEV_LOG_ERR("Invalid xform\n"); + CDEV_LOG_ERR("Invalid xform"); rte_errno = EINVAL; return NULL; } @@ -2579,7 +2579,7 @@ int driver_id = -1; if (name == NULL) { - RTE_LOG(DEBUG, CRYPTODEV, "name pointer NULL"); + CDEV_LOG_DEBUG("name pointer NULL"); return -1; } diff -Nru dpdk-22.11.4/lib/cryptodev/rte_cryptodev.h dpdk-22.11.5/lib/cryptodev/rte_cryptodev.h --- dpdk-22.11.4/lib/cryptodev/rte_cryptodev.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/cryptodev/rte_cryptodev.h 2024-04-22 11:25:10.000000000 +0000 @@ -26,8 +26,6 @@ #include "rte_cryptodev_trace_fp.h" -extern const char **rte_cyptodev_names; - /* Logging Macros */ #define CDEV_LOG_ERR(...) \ diff -Nru dpdk-22.11.4/lib/dmadev/rte_dmadev.c dpdk-22.11.5/lib/dmadev/rte_dmadev.c --- dpdk-22.11.4/lib/dmadev/rte_dmadev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/dmadev/rte_dmadev.c 2024-04-22 11:25:10.000000000 +0000 @@ -710,7 +710,7 @@ return -EINVAL; if (vchan >= dev->data->dev_conf.nb_vchans) { - RTE_DMA_LOG(ERR, "Device %u vchan %u out of range\n", dev_id, vchan); + RTE_DMA_LOG(ERR, "Device %u vchan %u out of range", dev_id, vchan); return -EINVAL; } @@ -1011,7 +1011,7 @@ if (*end_param != '\0') RTE_DMA_LOG(WARNING, "Extra parameters passed to dmadev telemetry command, ignoring"); - buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN); + buf = calloc(RTE_TEL_MAX_SINGLE_STRING_LEN, sizeof(char)); if (buf == NULL) return -ENOMEM; diff -Nru dpdk-22.11.4/lib/eal/common/eal_common_options.c dpdk-22.11.5/lib/eal/common/eal_common_options.c --- dpdk-22.11.4/lib/eal/common/eal_common_options.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/eal/common/eal_common_options.c 2024-04-22 11:25:10.000000000 +0000 @@ -225,6 +225,8 @@ if (strcmp(argv[i], "--") == 0) break; eal_args[i] = strdup(argv[i]); + if (eal_args[i] == NULL) + goto error; } eal_args[i++] = NULL; /* always finish with NULL */ @@ -234,13 +236,31 @@ eal_app_args = calloc(argc - i + 1, sizeof(*eal_args)); if (eal_app_args == NULL) - return -1; + goto error; - for (j = 0; i < argc; j++, i++) + for (j = 0; i < argc; j++, i++) { eal_app_args[j] = strdup(argv[i]); + if (eal_app_args[j] == NULL) + goto error; + } eal_app_args[j] = NULL; return 0; + +error: + if (eal_app_args != NULL) { + i = 0; + while (eal_app_args[i] != NULL) + free(eal_app_args[i++]); + free(eal_app_args); + eal_app_args = NULL; + } + i = 0; + while (eal_args[i] != NULL) + free(eal_args[i++]); + free(eal_args); + eal_args = NULL; + return -1; } #endif @@ -2142,7 +2162,7 @@ struct internal_config *internal_conf = eal_get_internal_configuration(); if (internal_conf->max_simd_bitwidth.forced) { - RTE_LOG(NOTICE, EAL, "Cannot set max SIMD bitwidth - user runtime override enabled"); + RTE_LOG(NOTICE, EAL, "Cannot set max SIMD bitwidth - user runtime override enabled\n"); return -EPERM; } diff -Nru dpdk-22.11.4/lib/eal/linux/eal_dev.c dpdk-22.11.5/lib/eal/linux/eal_dev.c --- dpdk-22.11.4/lib/eal/linux/eal_dev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/eal/linux/eal_dev.c 2024-04-22 11:25:10.000000000 +0000 @@ -182,6 +182,8 @@ i += 14; strlcpy(pci_slot_name, buf, sizeof(subsystem)); event->devname = strdup(pci_slot_name); + if (event->devname == NULL) + return -1; } for (; i < length; i++) { if (*buf == '\0') diff -Nru dpdk-22.11.4/lib/eal/linux/eal_hugepage_info.c dpdk-22.11.5/lib/eal/linux/eal_hugepage_info.c --- dpdk-22.11.4/lib/eal/linux/eal_hugepage_info.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/eal/linux/eal_hugepage_info.c 2024-04-22 11:25:10.000000000 +0000 @@ -403,7 +403,7 @@ struct stat st; if (fstat(whd->file_fd, &st) < 0) - RTE_LOG(DEBUG, EAL, "%s(): stat(\"%s\") failed: %s", + RTE_LOG(DEBUG, EAL, "%s(): stat(\"%s\") failed: %s\n", __func__, whd->file_name, strerror(errno)); else (*total_size) += st.st_size; diff -Nru dpdk-22.11.4/lib/eal/linux/eal_interrupts.c dpdk-22.11.5/lib/eal/linux/eal_interrupts.c --- dpdk-22.11.4/lib/eal/linux/eal_interrupts.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/eal/linux/eal_interrupts.c 2024-04-22 11:25:10.000000000 +0000 @@ -1542,7 +1542,7 @@ /* only check, initialization would be done in vdev driver.*/ if ((uint64_t)rte_intr_efd_counter_size_get(intr_handle) > sizeof(union rte_intr_read_buffer)) { - RTE_LOG(ERR, EAL, "the efd_counter_size is oversized"); + RTE_LOG(ERR, EAL, "the efd_counter_size is oversized\n"); return -EINVAL; } } else { diff -Nru dpdk-22.11.4/lib/eal/meson.build dpdk-22.11.5/lib/eal/meson.build --- dpdk-22.11.4/lib/eal/meson.build 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/eal/meson.build 2024-04-22 11:25:10.000000000 +0000 @@ -29,9 +29,6 @@ if dpdk_conf.has('RTE_USE_LIBBSD') ext_deps += libbsd endif -if dpdk_conf.has('RTE_HAS_LIBARCHIVE') - ext_deps += libarchive -endif if cc.has_function('getentropy', prefix : '#include ') cflags += '-DRTE_LIBEAL_USE_GETENTROPY' endif diff -Nru dpdk-22.11.4/lib/eal/windows/eal_memory.c dpdk-22.11.5/lib/eal/windows/eal_memory.c --- dpdk-22.11.4/lib/eal/windows/eal_memory.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/eal/windows/eal_memory.c 2024-04-22 11:25:10.000000000 +0000 @@ -110,7 +110,7 @@ VirtualAlloc2_ptr = (VirtualAlloc2_type)( (void *)GetProcAddress(library, function)); if (VirtualAlloc2_ptr == NULL) { - RTE_LOG_WIN32_ERR("GetProcAddress(\"%s\", \"%s\")\n", + RTE_LOG_WIN32_ERR("GetProcAddress(\"%s\", \"%s\")", library_name, function); /* Contrary to the docs, Server 2016 is not supported. */ diff -Nru dpdk-22.11.4/lib/eal/x86/rte_cycles.c dpdk-22.11.5/lib/eal/x86/rte_cycles.c --- dpdk-22.11.4/lib/eal/x86/rte_cycles.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/eal/x86/rte_cycles.c 2024-04-22 11:25:10.000000000 +0000 @@ -6,6 +6,10 @@ #include #include +#define x86_vendor_amd(t1, t2, t3) \ + ((t1 == 0x68747541) && /* htuA */ \ + (t2 == 0x444d4163) && /* DMAc */ \ + (t3 == 0x69746e65)) /* itne */ #include "eal_private.h" @@ -90,6 +94,18 @@ uint8_t mult, model; int32_t ret; +#ifdef RTE_TOOLCHAIN_MSVC + __cpuid(cpuinfo, 0); + a = cpuinfo[0]; + b = cpuinfo[1]; + c = cpuinfo[2]; + d = cpuinfo[3]; +#else + __cpuid(0, a, b, c, d); +#endif + if (x86_vendor_amd(b, c, d)) + return 0; + /* * Time Stamp Counter and Nominal Core Crystal Clock * Information Leaf diff -Nru dpdk-22.11.4/lib/ethdev/ethdev_driver.c dpdk-22.11.5/lib/ethdev/ethdev_driver.c --- dpdk-22.11.4/lib/ethdev/ethdev_driver.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/ethdev/ethdev_driver.c 2024-04-22 11:25:10.000000000 +0000 @@ -465,7 +465,7 @@ pair = &args.pairs[i]; if (strcmp("representor", pair->key) == 0) { if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) { - RTE_LOG(ERR, EAL, "duplicated representor key: %s\n", + RTE_ETHDEV_LOG(ERR, "duplicated representor key: %s\n", dargs); result = -1; goto parse_cleanup; @@ -691,7 +691,7 @@ if (info->ranges[i].controller != controller) continue; if (info->ranges[i].id_end < info->ranges[i].id_base) { - RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n", + RTE_ETHDEV_LOG(WARNING, "Port %hu invalid representor ID Range %u - %u, entry %d\n", port_id, info->ranges[i].id_base, info->ranges[i].id_end, i); continue; diff -Nru dpdk-22.11.4/lib/ethdev/ethdev_pci.h dpdk-22.11.5/lib/ethdev/ethdev_pci.h --- dpdk-22.11.4/lib/ethdev/ethdev_pci.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/ethdev/ethdev_pci.h 2024-04-22 11:25:10.000000000 +0000 @@ -31,7 +31,7 @@ struct rte_pci_device *pci_dev) { if ((eth_dev == NULL) || (pci_dev == NULL)) { - RTE_ETHDEV_LOG(ERR, "NULL pointer eth_dev=%p pci_dev=%p", + RTE_ETHDEV_LOG(ERR, "NULL pointer eth_dev=%p pci_dev=%p\n", (void *)eth_dev, (void *)pci_dev); return; } diff -Nru dpdk-22.11.4/lib/ethdev/ethdev_private.c dpdk-22.11.5/lib/ethdev/ethdev_private.c --- dpdk-22.11.4/lib/ethdev/ethdev_private.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/ethdev/ethdev_private.c 2024-04-22 11:25:10.000000000 +0000 @@ -184,7 +184,7 @@ RTE_DIM(eth_da->representor_ports)); done: if (str == NULL) - RTE_LOG(ERR, EAL, "wrong representor format: %s\n", str); + RTE_ETHDEV_LOG(ERR, "wrong representor format: %s\n", str); return str == NULL ? -1 : 0; } diff -Nru dpdk-22.11.4/lib/ethdev/rte_class_eth.c dpdk-22.11.5/lib/ethdev/rte_class_eth.c --- dpdk-22.11.4/lib/ethdev/rte_class_eth.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/ethdev/rte_class_eth.c 2024-04-22 11:25:10.000000000 +0000 @@ -165,7 +165,7 @@ valid_keys = eth_params_keys; kvargs = rte_kvargs_parse(str, valid_keys); if (kvargs == NULL) { - RTE_LOG(ERR, EAL, "cannot parse argument list\n"); + RTE_ETHDEV_LOG(ERR, "cannot parse argument list\n"); rte_errno = EINVAL; return NULL; } diff -Nru dpdk-22.11.4/lib/ethdev/rte_ethdev.c dpdk-22.11.5/lib/ethdev/rte_ethdev.c --- dpdk-22.11.4/lib/ethdev/rte_ethdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/ethdev/rte_ethdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -631,7 +631,7 @@ uint16_t pid; if (name == NULL) { - RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name"); + RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name\n"); return -EINVAL; } @@ -2097,41 +2097,41 @@ nb_rx_desc = cap.max_nb_desc; if (nb_rx_desc > cap.max_nb_desc) { RTE_ETHDEV_LOG(ERR, - "Invalid value for nb_rx_desc(=%hu), should be: <= %hu", + "Invalid value for nb_rx_desc(=%hu), should be: <= %hu\n", nb_rx_desc, cap.max_nb_desc); return -EINVAL; } if (conf->peer_count > cap.max_rx_2_tx) { RTE_ETHDEV_LOG(ERR, - "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu", + "Invalid value for number of peers for Rx queue(=%u), should be: <= %hu\n", conf->peer_count, cap.max_rx_2_tx); return -EINVAL; } if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) { RTE_ETHDEV_LOG(ERR, - "Attempt to use locked device memory for Rx queue, which is not supported"); + "Attempt to use locked device memory for Rx queue, which is not supported\n"); return -EINVAL; } if (conf->use_rte_memory && !cap.rx_cap.rte_memory) { RTE_ETHDEV_LOG(ERR, - "Attempt to use DPDK memory for Rx queue, which is not supported"); + "Attempt to use DPDK memory for Rx queue, which is not supported\n"); return -EINVAL; } if (conf->use_locked_device_memory && conf->use_rte_memory) { RTE_ETHDEV_LOG(ERR, - "Attempt to use mutually exclusive memory settings for Rx queue"); + "Attempt to use mutually exclusive memory settings for Rx queue\n"); return -EINVAL; } if (conf->force_memory && !conf->use_locked_device_memory && !conf->use_rte_memory) { RTE_ETHDEV_LOG(ERR, - "Attempt to force Rx queue memory settings, but none is set"); + "Attempt to force Rx queue memory settings, but none is set\n"); return -EINVAL; } if (conf->peer_count == 0) { RTE_ETHDEV_LOG(ERR, - "Invalid value for number of peers for Rx queue(=%u), should be: > 0", + "Invalid value for number of peers for Rx queue(=%u), should be: > 0\n", conf->peer_count); return -EINVAL; } @@ -2141,7 +2141,7 @@ count++; } if (count > cap.max_nb_queues) { - RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d", + RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d\n", cap.max_nb_queues); return -EINVAL; } @@ -2286,41 +2286,41 @@ nb_tx_desc = cap.max_nb_desc; if (nb_tx_desc > cap.max_nb_desc) { RTE_ETHDEV_LOG(ERR, - "Invalid value for nb_tx_desc(=%hu), should be: <= %hu", + "Invalid value for nb_tx_desc(=%hu), should be: <= %hu\n", nb_tx_desc, cap.max_nb_desc); return -EINVAL; } if (conf->peer_count > cap.max_tx_2_rx) { RTE_ETHDEV_LOG(ERR, - "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu", + "Invalid value for number of peers for Tx queue(=%u), should be: <= %hu\n", conf->peer_count, cap.max_tx_2_rx); return -EINVAL; } if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) { RTE_ETHDEV_LOG(ERR, - "Attempt to use locked device memory for Tx queue, which is not supported"); + "Attempt to use locked device memory for Tx queue, which is not supported\n"); return -EINVAL; } if (conf->use_rte_memory && !cap.tx_cap.rte_memory) { RTE_ETHDEV_LOG(ERR, - "Attempt to use DPDK memory for Tx queue, which is not supported"); + "Attempt to use DPDK memory for Tx queue, which is not supported\n"); return -EINVAL; } if (conf->use_locked_device_memory && conf->use_rte_memory) { RTE_ETHDEV_LOG(ERR, - "Attempt to use mutually exclusive memory settings for Tx queue"); + "Attempt to use mutually exclusive memory settings for Tx queue\n"); return -EINVAL; } if (conf->force_memory && !conf->use_locked_device_memory && !conf->use_rte_memory) { RTE_ETHDEV_LOG(ERR, - "Attempt to force Tx queue memory settings, but none is set"); + "Attempt to force Tx queue memory settings, but none is set\n"); return -EINVAL; } if (conf->peer_count == 0) { RTE_ETHDEV_LOG(ERR, - "Invalid value for number of peers for Tx queue(=%u), should be: > 0", + "Invalid value for number of peers for Tx queue(=%u), should be: > 0\n", conf->peer_count); return -EINVAL; } @@ -2330,7 +2330,7 @@ count++; } if (count > cap.max_nb_queues) { - RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d", + RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d\n", cap.max_nb_queues); return -EINVAL; } @@ -6152,7 +6152,7 @@ } if (reassembly_capa == NULL) { - RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL"); + RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL\n"); return -EINVAL; } @@ -6182,7 +6182,7 @@ } if (conf == NULL) { - RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL"); + RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL\n"); return -EINVAL; } @@ -6205,7 +6205,7 @@ if (dev->data->dev_configured == 0) { RTE_ETHDEV_LOG(ERR, "Device with port_id=%u is not configured.\n" - "Cannot set IP reassembly configuration", + "Cannot set IP reassembly configuration\n", port_id); return -EINVAL; } diff -Nru dpdk-22.11.4/lib/ethdev/rte_flow.c dpdk-22.11.5/lib/ethdev/rte_flow.c --- dpdk-22.11.4/lib/ethdev/rte_flow.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/ethdev/rte_flow.c 2024-04-22 11:25:10.000000000 +0000 @@ -206,7 +206,7 @@ sizeof(struct rte_flow_action_of_push_mpls)), MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), MK_FLOW_ACTION(VXLAN_DECAP, 0), - MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), + MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_nvgre_encap)), MK_FLOW_ACTION(NVGRE_DECAP, 0), MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)), MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)), diff -Nru dpdk-22.11.4/lib/ethdev/rte_flow.h dpdk-22.11.5/lib/ethdev/rte_flow.h --- dpdk-22.11.4/lib/ethdev/rte_flow.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/ethdev/rte_flow.h 2024-04-22 11:25:10.000000000 +0000 @@ -3138,7 +3138,7 @@ */ struct rte_flow_action_nvgre_encap { /** - * Encapsulating vxlan tunnel definition + * Encapsulating nvgre tunnel definition * (terminated by the END pattern item). */ struct rte_flow_item *definition; diff -Nru dpdk-22.11.4/lib/eventdev/eventdev_pmd.h dpdk-22.11.5/lib/eventdev/eventdev_pmd.h --- dpdk-22.11.4/lib/eventdev/eventdev_pmd.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/eventdev/eventdev_pmd.h 2024-04-22 11:25:10.000000000 +0000 @@ -49,14 +49,14 @@ /* Macros to check for valid device */ #define RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, retval) do { \ if (!rte_event_pmd_is_valid_dev((dev_id))) { \ - RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \ + RTE_EDEV_LOG_ERR("Invalid dev_id=%d", dev_id); \ return retval; \ } \ } while (0) #define RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, errno, retval) do { \ if (!rte_event_pmd_is_valid_dev((dev_id))) { \ - RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \ + RTE_EDEV_LOG_ERR("Invalid dev_id=%d", dev_id); \ rte_errno = errno; \ return retval; \ } \ @@ -64,7 +64,7 @@ #define RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id) do { \ if (!rte_event_pmd_is_valid_dev((dev_id))) { \ - RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \ + RTE_EDEV_LOG_ERR("Invalid dev_id=%d", dev_id); \ return; \ } \ } while (0) diff -Nru dpdk-22.11.4/lib/eventdev/rte_event_crypto_adapter.c dpdk-22.11.5/lib/eventdev/rte_event_crypto_adapter.c --- dpdk-22.11.4/lib/eventdev/rte_event_crypto_adapter.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/eventdev/rte_event_crypto_adapter.c 2024-04-22 11:25:10.000000000 +0000 @@ -126,7 +126,7 @@ /* Macros to check for valid adapter */ #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \ if (!eca_valid_id(id)) { \ - RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \ + RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d", id); \ return retval; \ } \ } while (0) @@ -237,20 +237,28 @@ struct rte_crypto_op **ops = bufp->op_buffer; if (*tailp > *headp) + /* Flush ops from head pointer to (tail - head) OPs */ n = *tailp - *headp; else if (*tailp < *headp) + /* Circ buffer - Rollover. + * Flush OPs from head to max size of buffer. + * Rest of the OPs will be flushed in next iteration. + */ n = bufp->size - *headp; else { /* head == tail case */ /* when head == tail, * circ buff is either full(tail pointer roll over) or empty */ if (bufp->count != 0) { - /* circ buffer is full */ - n = bufp->count; + /* Circ buffer - FULL. + * Flush OPs from head to max size of buffer. + * Rest of the OPS will be flushed in next iteration. + */ + n = bufp->size - *headp; } else { - /* circ buffer is empty */ + /* Circ buffer - Empty */ *nb_ops_flushed = 0; - return 0; /* buffer empty */ + return 0; } } @@ -298,7 +306,7 @@ dev_conf.nb_event_ports += 1; ret = rte_event_dev_configure(dev_id, &dev_conf); if (ret) { - RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id); + RTE_EDEV_LOG_ERR("failed to configure event dev %u", dev_id); if (started) { if (rte_event_dev_start(dev_id)) return -EIO; @@ -308,7 +316,7 @@ ret = rte_event_port_setup(dev_id, port_id, port_conf); if (ret) { - RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id); + RTE_EDEV_LOG_ERR("failed to setup event port %u", port_id); return ret; } @@ -392,7 +400,7 @@ sizeof(struct crypto_device_info), 0, socket_id); if (adapter->cdevs == NULL) { - RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n"); + RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices"); eca_circular_buffer_free(&adapter->ebuf); rte_free(adapter); return -ENOMEM; diff -Nru dpdk-22.11.4/lib/eventdev/rte_event_eth_rx_adapter.c dpdk-22.11.5/lib/eventdev/rte_event_eth_rx_adapter.c --- dpdk-22.11.4/lib/eventdev/rte_event_eth_rx_adapter.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/eventdev/rte_event_eth_rx_adapter.c 2024-04-22 11:25:10.000000000 +0000 @@ -290,14 +290,14 @@ #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \ if (!rxa_validate_id(id)) { \ - RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \ + RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d", id); \ return retval; \ } \ } while (0) #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(id, retval) do { \ if (!rxa_validate_id(id)) { \ - RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \ + RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d", id); \ ret = retval; \ goto error; \ } \ @@ -305,15 +305,15 @@ #define RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, retval) do { \ if ((token) == NULL || strlen(token) == 0 || !isdigit(*token)) { \ - RTE_EDEV_LOG_ERR("Invalid eth Rx adapter token\n"); \ + RTE_EDEV_LOG_ERR("Invalid eth Rx adapter token"); \ ret = retval; \ goto error; \ } \ } while (0) -#define RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(port_id, retval) do { \ +#define RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(port_id, retval) do { \ if (!rte_eth_dev_is_valid_port(port_id)) { \ - RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \ + RTE_EDEV_LOG_ERR("Invalid port_id=%u", port_id); \ ret = retval; \ goto error; \ } \ @@ -1534,7 +1534,7 @@ dev_conf.nb_event_ports += 1; ret = rte_event_dev_configure(dev_id, &dev_conf); if (ret) { - RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", + RTE_EDEV_LOG_ERR("failed to configure event dev %u", dev_id); if (started) { if (rte_event_dev_start(dev_id)) @@ -1545,7 +1545,7 @@ ret = rte_event_port_setup(dev_id, port_id, port_conf); if (ret) { - RTE_EDEV_LOG_ERR("failed to setup event port %u\n", + RTE_EDEV_LOG_ERR("failed to setup event port %u", port_id); return ret; } @@ -1622,7 +1622,7 @@ if (!err) return 0; - RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d\n", err); + RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d", err); rte_free(rx_adapter->epoll_events); error: rte_ring_free(rx_adapter->intr_ring); @@ -1638,12 +1638,12 @@ err = pthread_cancel(rx_adapter->rx_intr_thread); if (err) - RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n", + RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d", err); err = pthread_join(rx_adapter->rx_intr_thread, NULL); if (err) - RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err); + RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d", err); rte_free(rx_adapter->epoll_events); rte_ring_free(rx_adapter->intr_ring); @@ -1909,7 +1909,7 @@ if (rte_mbuf_dyn_rx_timestamp_register( &event_eth_rx_timestamp_dynfield_offset, &event_eth_rx_timestamp_dynflag) != 0) { - RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n"); + RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf"); return -rte_errno; } @@ -2439,7 +2439,7 @@ RTE_DIM(default_rss_key)); if (rx_adapter->eth_devices == NULL) { - RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n"); + RTE_EDEV_LOG_ERR("failed to get mem for eth devices"); rte_free(rx_adapter); return -ENOMEM; } @@ -3449,7 +3449,7 @@ /* Get Rx adapter stats */ if (rte_event_eth_rx_adapter_stats_get(rx_adapter_id, &rx_adptr_stats)) { - RTE_EDEV_LOG_ERR("Failed to get Rx adapter stats\n"); + RTE_EDEV_LOG_ERR("Failed to get Rx adapter stats"); return -1; } @@ -3486,7 +3486,7 @@ /* Reset Rx adapter stats */ if (rte_event_eth_rx_adapter_stats_reset(rx_adapter_id)) { - RTE_EDEV_LOG_ERR("Failed to reset Rx adapter stats\n"); + RTE_EDEV_LOG_ERR("Failed to reset Rx adapter stats"); return -1; } @@ -3521,7 +3521,7 @@ /* Get device ID from parameter string */ eth_dev_id = strtoul(token, NULL, 10); - RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); + RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); token = strtok(NULL, ","); RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); @@ -3593,7 +3593,7 @@ /* Get device ID from parameter string */ eth_dev_id = strtoul(token, NULL, 10); - RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); + RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); token = strtok(NULL, ","); RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); @@ -3663,7 +3663,7 @@ /* Get device ID from parameter string */ eth_dev_id = strtoul(token, NULL, 10); - RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); + RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); token = strtok(NULL, ","); RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); @@ -3718,7 +3718,7 @@ /* Get device ID from parameter string */ eth_dev_id = strtoul(token, NULL, 10); - RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); + RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); token = strtok(NULL, ","); RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); diff -Nru dpdk-22.11.4/lib/eventdev/rte_event_eth_tx_adapter.c dpdk-22.11.5/lib/eventdev/rte_event_eth_tx_adapter.c --- dpdk-22.11.4/lib/eventdev/rte_event_eth_tx_adapter.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/eventdev/rte_event_eth_tx_adapter.c 2024-04-22 11:25:10.000000000 +0000 @@ -330,7 +330,7 @@ ret = rte_event_port_setup(dev_id, port_id, pc); if (ret) { - RTE_EDEV_LOG_ERR("failed to setup event port %u\n", + RTE_EDEV_LOG_ERR("failed to setup event port %u", port_id); if (started) { if (rte_event_dev_start(dev_id)) diff -Nru dpdk-22.11.4/lib/eventdev/rte_event_timer_adapter.c dpdk-22.11.5/lib/eventdev/rte_event_timer_adapter.c --- dpdk-22.11.4/lib/eventdev/rte_event_timer_adapter.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/eventdev/rte_event_timer_adapter.c 2024-04-22 11:25:10.000000000 +0000 @@ -92,7 +92,7 @@ dev_conf.nb_event_ports += 1; ret = rte_event_dev_configure(dev_id, &dev_conf); if (ret < 0) { - EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id); + EVTIM_LOG_ERR("failed to configure event dev %u", dev_id); if (started) if (rte_event_dev_start(dev_id)) return -EIO; @@ -112,7 +112,7 @@ ret = rte_event_port_setup(dev_id, port_id, port_conf); if (ret < 0) { - EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n", + EVTIM_LOG_ERR("failed to setup event port %u on event dev %u", port_id, dev_id); return ret; } diff -Nru dpdk-22.11.4/lib/eventdev/rte_eventdev.c dpdk-22.11.5/lib/eventdev/rte_eventdev.c --- dpdk-22.11.4/lib/eventdev/rte_eventdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/eventdev/rte_eventdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -932,7 +932,7 @@ dev = &rte_eventdevs[dev_id]; if (*dev->dev_ops->port_link == NULL) { - RTE_EDEV_LOG_ERR("Function not supported\n"); + RTE_EDEV_LOG_ERR("Function not supported"); rte_errno = ENOTSUP; return 0; } @@ -1263,8 +1263,8 @@ int ret; if (!nb_elem) { - RTE_LOG(ERR, EVENTDEV, - "Invalid number of elements=%d requested\n", nb_elem); + RTE_EDEV_LOG_ERR("Invalid number of elements=%d requested", + nb_elem); rte_errno = EINVAL; return NULL; } @@ -1279,7 +1279,7 @@ mp_ops_name = rte_mbuf_best_mempool_ops(); ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL); if (ret != 0) { - RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n"); + RTE_EDEV_LOG_ERR("error setting mempool handler"); goto err; } @@ -1834,7 +1834,7 @@ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); - buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN); + buf = calloc(RTE_TEL_MAX_SINGLE_STRING_LEN, sizeof(char)); if (buf == NULL) return -ENOMEM; diff -Nru dpdk-22.11.4/lib/eventdev/rte_eventdev.h dpdk-22.11.5/lib/eventdev/rte_eventdev.h --- dpdk-22.11.4/lib/eventdev/rte_eventdev.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/eventdev/rte_eventdev.h 2024-04-22 11:25:10.000000000 +0000 @@ -507,9 +507,9 @@ struct rte_event_dev_config { uint32_t dequeue_timeout_ns; /**< rte_event_dequeue_burst() timeout on this device. - * This value should be in the range of *min_dequeue_timeout_ns* and - * *max_dequeue_timeout_ns* which previously provided in - * rte_event_dev_info_get() + * This value should be in the range of @ref rte_event_dev_info.min_dequeue_timeout_ns and + * @ref rte_event_dev_info.max_dequeue_timeout_ns returned by + * @ref rte_event_dev_info_get() * The value 0 is allowed, in which case, default dequeue timeout used. * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT */ @@ -517,40 +517,53 @@ /**< In a *closed system* this field is the limit on maximum number of * events that can be inflight in the eventdev at a given time. The * limit is required to ensure that the finite space in a closed system - * is not overwhelmed. The value cannot exceed the *max_num_events* - * as provided by rte_event_dev_info_get(). - * This value should be set to -1 for *open system*. + * is not exhausted. + * The value cannot exceed @ref rte_event_dev_info.max_num_events + * returned by rte_event_dev_info_get(). + * + * This value should be set to -1 for *open systems*, that is, + * those systems returning -1 in @ref rte_event_dev_info.max_num_events. + * + * @see rte_event_port_conf.new_event_threshold */ uint8_t nb_event_queues; /**< Number of event queues to configure on this device. - * This value cannot exceed the *max_event_queues* which previously - * provided in rte_event_dev_info_get() + * This value *includes* any single-link queue-port pairs to be used. + * This value cannot exceed @ref rte_event_dev_info.max_event_queues + + * @ref rte_event_dev_info.max_single_link_event_port_queue_pairs + * returned by rte_event_dev_info_get(). + * The number of non-single-link queues i.e. this value less + * *nb_single_link_event_port_queues* in this struct, cannot exceed + * @ref rte_event_dev_info.max_event_queues */ uint8_t nb_event_ports; /**< Number of event ports to configure on this device. - * This value cannot exceed the *max_event_ports* which previously - * provided in rte_event_dev_info_get() + * This value *includes* any single-link queue-port pairs to be used. + * This value cannot exceed @ref rte_event_dev_info.max_event_ports + + * @ref rte_event_dev_info.max_single_link_event_port_queue_pairs + * returned by rte_event_dev_info_get(). + * The number of non-single-link ports i.e. this value less + * *nb_single_link_event_port_queues* in this struct, cannot exceed + * @ref rte_event_dev_info.max_event_ports */ uint32_t nb_event_queue_flows; - /**< Number of flows for any event queue on this device. - * This value cannot exceed the *max_event_queue_flows* which previously - * provided in rte_event_dev_info_get() + /**< Max number of flows needed for a single event queue on this device. + * This value cannot exceed @ref rte_event_dev_info.max_event_queue_flows + * returned by rte_event_dev_info_get() */ uint32_t nb_event_port_dequeue_depth; - /**< Maximum number of events can be dequeued at a time from an - * event port by this device. - * This value cannot exceed the *max_event_port_dequeue_depth* - * which previously provided in rte_event_dev_info_get(). + /**< Max number of events that can be dequeued at a time from an event port on this device. + * This value cannot exceed @ref rte_event_dev_info.max_event_port_dequeue_depth + * returned by rte_event_dev_info_get(). * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. - * @see rte_event_port_setup() + * @see rte_event_port_setup() rte_event_dequeue_burst() */ uint32_t nb_event_port_enqueue_depth; - /**< Maximum number of events can be enqueued at a time from an - * event port by this device. - * This value cannot exceed the *max_event_port_enqueue_depth* - * which previously provided in rte_event_dev_info_get(). + /**< Maximum number of events can be enqueued at a time to an event port on this device. + * This value cannot exceed @ref rte_event_dev_info.max_event_port_enqueue_depth + * returned by rte_event_dev_info_get(). * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. - * @see rte_event_port_setup() + * @see rte_event_port_setup() rte_event_enqueue_burst() */ uint32_t event_dev_cfg; /**< Event device config flags(RTE_EVENT_DEV_CFG_)*/ @@ -560,7 +573,7 @@ * queues; this value cannot exceed *nb_event_ports* or * *nb_event_queues*. If the device has ports and queues that are * optimized for single-link usage, this field is a hint for how many - * to allocate; otherwise, regular event ports and queues can be used. + * to allocate; otherwise, regular event ports and queues will be used. */ }; @@ -1094,10 +1107,8 @@ * port and queue of the mbufs in the vector */ struct { - uint16_t port; - /* Ethernet device port id. */ - uint16_t queue; - /* Ethernet device queue id. */ + uint16_t port; /**< Ethernet device port id. */ + uint16_t queue; /**< Ethernet device queue id. */ }; }; /**< Union to hold common attributes of the vector array. */ @@ -1126,7 +1137,11 @@ * vector array can be an array of mbufs or pointers or opaque u64 * values. */ +#ifndef __DOXYGEN__ } __rte_aligned(16); +#else +}; +#endif /* Scheduler type definitions */ #define RTE_SCHED_TYPE_ORDERED 0 diff -Nru dpdk-22.11.4/lib/hash/rte_cuckoo_hash.h dpdk-22.11.5/lib/hash/rte_cuckoo_hash.h --- dpdk-22.11.4/lib/hash/rte_cuckoo_hash.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/hash/rte_cuckoo_hash.h 2024-04-22 11:25:10.000000000 +0000 @@ -29,17 +29,6 @@ #define RETURN_IF_TRUE(cond, retval) #endif -#if defined(RTE_LIBRTE_HASH_DEBUG) -#define ERR_IF_TRUE(cond, fmt, args...) do { \ - if (cond) { \ - RTE_LOG(ERR, HASH, fmt, ##args); \ - return; \ - } \ -} while (0) -#else -#define ERR_IF_TRUE(cond, fmt, args...) -#endif - #include #include diff -Nru dpdk-22.11.4/lib/lpm/rte_lpm6.c dpdk-22.11.5/lib/lpm/rte_lpm6.c --- dpdk-22.11.4/lib/lpm/rte_lpm6.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/lpm/rte_lpm6.c 2024-04-22 11:25:10.000000000 +0000 @@ -279,7 +279,7 @@ rules_tbl = rte_hash_create(&rule_hash_tbl_params); if (rules_tbl == NULL) { - RTE_LOG(ERR, LPM, "LPM rules hash table allocation failed: %s (%d)", + RTE_LOG(ERR, LPM, "LPM rules hash table allocation failed: %s (%d)\n", rte_strerror(rte_errno), rte_errno); goto fail_wo_unlock; } @@ -289,7 +289,7 @@ sizeof(uint32_t) * config->number_tbl8s, RTE_CACHE_LINE_SIZE); if (tbl8_pool == NULL) { - RTE_LOG(ERR, LPM, "LPM tbl8 pool allocation failed: %s (%d)", + RTE_LOG(ERR, LPM, "LPM tbl8 pool allocation failed: %s (%d)\n", rte_strerror(rte_errno), rte_errno); rte_errno = ENOMEM; goto fail_wo_unlock; @@ -300,7 +300,7 @@ sizeof(struct rte_lpm_tbl8_hdr) * config->number_tbl8s, RTE_CACHE_LINE_SIZE); if (tbl8_hdrs == NULL) { - RTE_LOG(ERR, LPM, "LPM tbl8 headers allocation failed: %s (%d)", + RTE_LOG(ERR, LPM, "LPM tbl8 headers allocation failed: %s (%d)\n", rte_strerror(rte_errno), rte_errno); rte_errno = ENOMEM; goto fail_wo_unlock; diff -Nru dpdk-22.11.4/lib/mempool/rte_mempool_ops.c dpdk-22.11.5/lib/mempool/rte_mempool_ops.c --- dpdk-22.11.4/lib/mempool/rte_mempool_ops.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/mempool/rte_mempool_ops.c 2024-04-22 11:25:10.000000000 +0000 @@ -46,7 +46,7 @@ if (strlen(h->name) >= sizeof(ops->name) - 1) { rte_spinlock_unlock(&rte_mempool_ops_table.sl); - RTE_LOG(DEBUG, EAL, "%s(): mempool_ops <%s>: name too long\n", + RTE_LOG(DEBUG, MEMPOOL, "%s(): mempool_ops <%s>: name too long\n", __func__, h->name); rte_errno = EEXIST; return -EEXIST; diff -Nru dpdk-22.11.4/lib/meson.build dpdk-22.11.5/lib/meson.build --- dpdk-22.11.4/lib/meson.build 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/meson.build 2024-04-22 11:25:10.000000000 +0000 @@ -176,7 +176,7 @@ if not build dpdk_libs_disabled += name - set_variable(name.underscorify() + '_disable_reason', reason) + set_variable('lib_' + name.underscorify() + '_disable_reason', reason) continue endif diff -Nru dpdk-22.11.4/lib/metrics/rte_metrics_telemetry.c dpdk-22.11.5/lib/metrics/rte_metrics_telemetry.c --- dpdk-22.11.4/lib/metrics/rte_metrics_telemetry.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/metrics/rte_metrics_telemetry.c 2024-04-22 11:25:10.000000000 +0000 @@ -363,7 +363,7 @@ } } if (j == num_metrics) { - METRICS_LOG_WARN("Invalid stat name %s\n", + METRICS_LOG_WARN("Invalid stat name %s", stat_names[i]); free(names); return -EINVAL; diff -Nru dpdk-22.11.4/lib/net/rte_ether.h dpdk-22.11.5/lib/net/rte_ether.h --- dpdk-22.11.4/lib/net/rte_ether.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/net/rte_ether.h 2024-04-22 11:25:10.000000000 +0000 @@ -46,6 +46,20 @@ #define RTE_ETHER_MIN_MTU 68 /**< Minimum MTU for IPv4 packets, see RFC 791. */ +/* VLAN header fields */ +#define RTE_VLAN_DEI_SHIFT 12 +#define RTE_VLAN_PRI_SHIFT 13 +#define RTE_VLAN_PRI_MASK 0xe000 /* Priority Code Point */ +#define RTE_VLAN_DEI_MASK 0x1000 /* Drop Eligible Indicator */ +#define RTE_VLAN_ID_MASK 0x0fff /* VLAN Identifier */ + +#define RTE_VLAN_TCI_ID(vlan_tci) ((vlan_tci) & RTE_VLAN_ID_MASK) +#define RTE_VLAN_TCI_PRI(vlan_tci) (((vlan_tci) & RTE_VLAN_PRI_MASK) >> RTE_VLAN_PRI_SHIFT) +#define RTE_VLAN_TCI_DEI(vlan_tci) (((vlan_tci) & RTE_VLAN_DEI_MASK) >> RTE_VLAN_DEI_SHIFT) +#define RTE_VLAN_TCI_MAKE(id, pri, dei) ((id) | \ + ((pri) << RTE_VLAN_PRI_SHIFT) | \ + ((dei) << RTE_VLAN_DEI_SHIFT)) + /** * Ethernet address: * A universally administered address is uniquely assigned to a device by its diff -Nru dpdk-22.11.4/lib/net/rte_ip.h dpdk-22.11.5/lib/net/rte_ip.h --- dpdk-22.11.4/lib/net/rte_ip.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/net/rte_ip.h 2024-04-22 11:25:10.000000000 +0000 @@ -420,11 +420,14 @@ { uint16_t raw_cksum; uint32_t cksum; + uint16_t len; - if (l4_off > m->pkt_len) - return 0; + if (unlikely(l4_off > m->pkt_len)) + return 0; /* invalid params, return a dummy value */ + + len = rte_be_to_cpu_16(ipv4_hdr->total_length) - (uint16_t)rte_ipv4_hdr_len(ipv4_hdr); - if (rte_raw_cksum_mbuf(m, l4_off, m->pkt_len - l4_off, &raw_cksum)) + if (rte_raw_cksum_mbuf(m, l4_off, len, &raw_cksum)) return 0; cksum = raw_cksum + rte_ipv4_phdr_cksum(ipv4_hdr, 0); @@ -650,10 +653,10 @@ uint16_t raw_cksum; uint32_t cksum; - if (l4_off > m->pkt_len) - return 0; + if (unlikely(l4_off > m->pkt_len)) + return 0; /* invalid params, return a dummy value */ - if (rte_raw_cksum_mbuf(m, l4_off, m->pkt_len - l4_off, &raw_cksum)) + if (rte_raw_cksum_mbuf(m, l4_off, rte_be_to_cpu_16(ipv6_hdr->payload_len), &raw_cksum)) return 0; cksum = raw_cksum + rte_ipv6_phdr_cksum(ipv6_hdr, 0); diff -Nru dpdk-22.11.4/lib/net/rte_net_crc.c dpdk-22.11.5/lib/net/rte_net_crc.c --- dpdk-22.11.4/lib/net/rte_net_crc.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/net/rte_net_crc.c 2024-04-22 11:25:10.000000000 +0000 @@ -179,7 +179,7 @@ max_simd_bitwidth >= RTE_VECT_SIMD_512) return handlers_avx512; #endif - NET_LOG(INFO, "Requirements not met, can't use AVX512\n"); + NET_LOG(INFO, "Requirements not met, can't use AVX512"); return NULL; } @@ -205,7 +205,7 @@ max_simd_bitwidth >= RTE_VECT_SIMD_128) return handlers_sse42; #endif - NET_LOG(INFO, "Requirements not met, can't use SSE\n"); + NET_LOG(INFO, "Requirements not met, can't use SSE"); return NULL; } @@ -231,7 +231,7 @@ max_simd_bitwidth >= RTE_VECT_SIMD_128) return handlers_neon; #endif - NET_LOG(INFO, "Requirements not met, can't use NEON\n"); + NET_LOG(INFO, "Requirements not met, can't use NEON"); return NULL; } diff -Nru dpdk-22.11.4/lib/node/ethdev_rx.c dpdk-22.11.5/lib/node/ethdev_rx.c --- dpdk-22.11.4/lib/node/ethdev_rx.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/node/ethdev_rx.c 2024-04-22 11:25:10.000000000 +0000 @@ -160,13 +160,13 @@ if (!l3_ipv4 || !l3_ipv6) { node_info("ethdev_rx", - "Enabling ptype callback for required ptypes on port %u\n", + "Enabling ptype callback for required ptypes on port %u", port); if (!rte_eth_add_rx_callback(port, queue, eth_pkt_parse_cb, NULL)) { node_err("ethdev_rx", - "Failed to add rx ptype cb: port=%d, queue=%d\n", + "Failed to add rx ptype cb: port=%d, queue=%d", port, queue); return -EINVAL; } diff -Nru dpdk-22.11.4/lib/node/ip4_lookup.c dpdk-22.11.5/lib/node/ip4_lookup.c --- dpdk-22.11.4/lib/node/ip4_lookup.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/node/ip4_lookup.c 2024-04-22 11:25:10.000000000 +0000 @@ -143,7 +143,7 @@ ip, depth, val); if (ret < 0) { node_err("ip4_lookup", - "Unable to add entry %s / %d nh (%x) to LPM table on sock %d, rc=%d\n", + "Unable to add entry %s / %d nh (%x) to LPM table on sock %d, rc=%d", abuf, depth, val, socket, ret); return ret; } diff -Nru dpdk-22.11.4/lib/pipeline/rte_swx_pipeline_spec.c dpdk-22.11.5/lib/pipeline/rte_swx_pipeline_spec.c --- dpdk-22.11.4/lib/pipeline/rte_swx_pipeline_spec.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/pipeline/rte_swx_pipeline_spec.c 2024-04-22 11:25:10.000000000 +0000 @@ -2841,7 +2841,7 @@ } /* Memory allocation. */ - s = calloc(sizeof(struct pipeline_spec), 1); + s = calloc(1, sizeof(struct pipeline_spec)); if (!s) { if (err_line) *err_line = n_lines; @@ -4145,7 +4145,7 @@ } /* Memory allocation. */ - s = calloc(sizeof(struct pipeline_iospec), 1); + s = calloc(1, sizeof(struct pipeline_iospec)); if (!s) { if (err_line) *err_line = n_lines; diff -Nru dpdk-22.11.4/lib/power/guest_channel.c dpdk-22.11.5/lib/power/guest_channel.c --- dpdk-22.11.4/lib/power/guest_channel.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/power/guest_channel.c 2024-04-22 11:25:10.000000000 +0000 @@ -89,7 +89,7 @@ flags |= O_NONBLOCK; if (fcntl(fd, F_SETFL, flags) < 0) { RTE_LOG(ERR, GUEST_CHANNEL, "Failed on setting non-blocking mode for " - "file %s", fd_path); + "file %s\n", fd_path); goto error; } /* QEMU needs a delay after connection */ diff -Nru dpdk-22.11.4/lib/power/rte_power_pmd_mgmt.c dpdk-22.11.5/lib/power/rte_power_pmd_mgmt.c --- dpdk-22.11.4/lib/power/rte_power_pmd_mgmt.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/power/rte_power_pmd_mgmt.c 2024-04-22 11:25:10.000000000 +0000 @@ -684,7 +684,7 @@ rte_power_pmd_mgmt_set_pause_duration(unsigned int duration) { if (duration == 0) { - RTE_LOG(ERR, POWER, "Pause duration must be greater than 0, value unchanged"); + RTE_LOG(ERR, POWER, "Pause duration must be greater than 0, value unchanged\n"); return -EINVAL; } pause_duration = duration; @@ -707,7 +707,7 @@ } if (min > scale_freq_max[lcore]) { - RTE_LOG(ERR, POWER, "Invalid min frequency: Cannot be greater than max frequency"); + RTE_LOG(ERR, POWER, "Invalid min frequency: Cannot be greater than max frequency\n"); return -EINVAL; } scale_freq_min[lcore] = min; @@ -727,7 +727,7 @@ if (max == 0) max = UINT32_MAX; if (max < scale_freq_min[lcore]) { - RTE_LOG(ERR, POWER, "Invalid max frequency: Cannot be less than min frequency"); + RTE_LOG(ERR, POWER, "Invalid max frequency: Cannot be less than min frequency\n"); return -EINVAL; } diff -Nru dpdk-22.11.4/lib/rawdev/rte_rawdev.c dpdk-22.11.5/lib/rawdev/rte_rawdev.c --- dpdk-22.11.4/lib/rawdev/rte_rawdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/rawdev/rte_rawdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -656,7 +656,7 @@ if (!rte_rawdev_pmd_is_valid_dev(dev_id)) return -EINVAL; - buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN); + buf = calloc(RTE_TEL_MAX_SINGLE_STRING_LEN, sizeof(char)); if (buf == NULL) return -ENOMEM; diff -Nru dpdk-22.11.4/lib/rcu/rte_rcu_qsbr.c dpdk-22.11.5/lib/rcu/rte_rcu_qsbr.c --- dpdk-22.11.4/lib/rcu/rte_rcu_qsbr.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/rcu/rte_rcu_qsbr.c 2024-04-22 11:25:10.000000000 +0000 @@ -92,7 +92,7 @@ return 1; } - __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n", + __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u", v->qsbr_cnt[thread_id].lock_cnt); id = thread_id & __RTE_QSBR_THRID_MASK; @@ -144,7 +144,7 @@ return 1; } - __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n", + __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u", v->qsbr_cnt[thread_id].lock_cnt); id = thread_id & __RTE_QSBR_THRID_MASK; diff -Nru dpdk-22.11.4/lib/rcu/rte_rcu_qsbr.h dpdk-22.11.5/lib/rcu/rte_rcu_qsbr.h --- dpdk-22.11.4/lib/rcu/rte_rcu_qsbr.h 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/rcu/rte_rcu_qsbr.h 2024-04-22 11:25:10.000000000 +0000 @@ -304,7 +304,7 @@ RTE_ASSERT(v != NULL && thread_id < v->max_threads); - __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n", + __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u", v->qsbr_cnt[thread_id].lock_cnt); /* Copy the current value of token. @@ -355,7 +355,7 @@ { RTE_ASSERT(v != NULL && thread_id < v->max_threads); - __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n", + __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u", v->qsbr_cnt[thread_id].lock_cnt); /* The reader can go offline only after the load of the @@ -432,7 +432,7 @@ 1, __ATOMIC_RELEASE); __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, WARNING, - "Lock counter %u. Nested locks?\n", + "Lock counter %u. Nested locks?", v->qsbr_cnt[thread_id].lock_cnt); #endif } @@ -486,7 +486,7 @@ RTE_ASSERT(v != NULL && thread_id < v->max_threads); - __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u\n", + __RTE_RCU_IS_LOCK_CNT_ZERO(v, thread_id, ERR, "Lock counter %u", v->qsbr_cnt[thread_id].lock_cnt); /* Acquire the changes to the shared data structure released diff -Nru dpdk-22.11.4/lib/regexdev/rte_regexdev.c dpdk-22.11.5/lib/regexdev/rte_regexdev.c --- dpdk-22.11.4/lib/regexdev/rte_regexdev.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/regexdev/rte_regexdev.c 2024-04-22 11:25:10.000000000 +0000 @@ -19,7 +19,7 @@ struct rte_regexdev_data data[RTE_MAX_REGEXDEV_DEVS]; } *rte_regexdev_shared_data; -int rte_regexdev_logtype; +RTE_LOG_REGISTER_DEFAULT(rte_regexdev_logtype, INFO); static uint16_t regexdev_find_free_dev(void) diff -Nru dpdk-22.11.4/lib/stack/rte_stack.c dpdk-22.11.5/lib/stack/rte_stack.c --- dpdk-22.11.4/lib/stack/rte_stack.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/stack/rte_stack.c 2024-04-22 11:25:10.000000000 +0000 @@ -56,7 +56,7 @@ int ret; if (flags & ~(RTE_STACK_F_LF)) { - STACK_LOG_ERR("Unsupported stack flags %#x\n", flags); + STACK_LOG_ERR("Unsupported stack flags %#x", flags); return NULL; } @@ -65,7 +65,7 @@ #endif #if !defined(RTE_STACK_LF_SUPPORTED) if (flags & RTE_STACK_F_LF) { - STACK_LOG_ERR("Lock-free stack is not supported on your platform\n"); + STACK_LOG_ERR("Lock-free stack is not supported on your platform"); rte_errno = ENOTSUP; return NULL; } @@ -82,7 +82,7 @@ te = rte_zmalloc("STACK_TAILQ_ENTRY", sizeof(*te), 0); if (te == NULL) { - STACK_LOG_ERR("Cannot reserve memory for tailq\n"); + STACK_LOG_ERR("Cannot reserve memory for tailq"); rte_errno = ENOMEM; return NULL; } @@ -92,7 +92,7 @@ mz = rte_memzone_reserve_aligned(mz_name, sz, socket_id, 0, __alignof__(*s)); if (mz == NULL) { - STACK_LOG_ERR("Cannot reserve stack memzone!\n"); + STACK_LOG_ERR("Cannot reserve stack memzone!"); rte_mcfg_tailq_write_unlock(); rte_free(te); return NULL; diff -Nru dpdk-22.11.4/lib/telemetry/telemetry.c dpdk-22.11.5/lib/telemetry/telemetry.c --- dpdk-22.11.4/lib/telemetry/telemetry.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/telemetry/telemetry.c 2024-04-22 11:25:10.000000000 +0000 @@ -171,7 +171,11 @@ d->type != RTE_TEL_ARRAY_INT && d->type != RTE_TEL_ARRAY_STRING) return snprintf(out_buf, buf_len, "null"); - used = rte_tel_json_empty_array(out_buf, buf_len, 0); + if (d->type == RTE_TEL_DICT) + used = rte_tel_json_empty_obj(out_buf, buf_len, 0); + else + used = rte_tel_json_empty_array(out_buf, buf_len, 0); + if (d->type == RTE_TEL_ARRAY_U64) for (i = 0; i < d->data_len; i++) used = rte_tel_json_add_array_u64(out_buf, @@ -379,8 +383,8 @@ "{\"version\":\"%s\",\"pid\":%d,\"max_output_len\":%d}", telemetry_version, getpid(), MAX_OUTPUT_LEN); if (write(s, info_str, strlen(info_str)) < 0) { - close(s); - return NULL; + TMTY_LOG(ERR, "Socket write base info to client failed"); + goto exit; } /* receive data is not null terminated */ @@ -405,6 +409,7 @@ bytes = read(s, buffer, sizeof(buffer) - 1); } +exit: close(s); __atomic_sub_fetch(&v2_clients, 1, __ATOMIC_RELAXED); return NULL; diff -Nru dpdk-22.11.4/lib/vhost/vdpa.c dpdk-22.11.5/lib/vhost/vdpa.c --- dpdk-22.11.4/lib/vhost/vdpa.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/vhost/vdpa.c 2024-04-22 11:25:10.000000000 +0000 @@ -19,6 +19,7 @@ #include "rte_vdpa.h" #include "vdpa_driver.h" #include "vhost.h" +#include "iotlb.h" /** Double linked list of vDPA devices. */ TAILQ_HEAD(vdpa_device_list, rte_vdpa_device); @@ -191,17 +192,21 @@ if (unlikely(nr_descs > vq->size)) return -1; + vhost_user_iotlb_rd_lock(vq); desc_ring = (struct vring_desc *)(uintptr_t) vhost_iova_to_vva(dev, vq, vq->desc[desc_id].addr, &dlen, VHOST_ACCESS_RO); + vhost_user_iotlb_rd_unlock(vq); if (unlikely(!desc_ring)) return -1; if (unlikely(dlen < vq->desc[desc_id].len)) { + vhost_user_iotlb_rd_lock(vq); idesc = vhost_alloc_copy_ind_table(dev, vq, vq->desc[desc_id].addr, vq->desc[desc_id].len); + vhost_user_iotlb_rd_unlock(vq); if (unlikely(!idesc)) return -1; @@ -218,9 +223,12 @@ if (unlikely(nr_descs-- == 0)) goto fail; desc = desc_ring[desc_id]; - if (desc.flags & VRING_DESC_F_WRITE) + if (desc.flags & VRING_DESC_F_WRITE) { + vhost_user_iotlb_rd_lock(vq); vhost_log_write_iova(dev, vq, desc.addr, desc.len); + vhost_user_iotlb_rd_unlock(vq); + } desc_id = desc.next; } while (desc.flags & VRING_DESC_F_NEXT); diff -Nru dpdk-22.11.4/lib/vhost/vhost_crypto.c dpdk-22.11.5/lib/vhost/vhost_crypto.c --- dpdk-22.11.4/lib/vhost/vhost_crypto.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/vhost/vhost_crypto.c 2024-04-22 11:25:10.000000000 +0000 @@ -245,7 +245,7 @@ return ret; if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) { - VC_LOG_DBG("Invalid cipher key length\n"); + VC_LOG_DBG("Invalid cipher key length"); return -VIRTIO_CRYPTO_BADMSG; } @@ -301,7 +301,7 @@ return ret; if (param->cipher_key_len > VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH) { - VC_LOG_DBG("Invalid cipher key length\n"); + VC_LOG_DBG("Invalid cipher key length"); return -VIRTIO_CRYPTO_BADMSG; } @@ -321,7 +321,7 @@ return ret; if (param->auth_key_len > VHOST_USER_CRYPTO_MAX_HMAC_KEY_LENGTH) { - VC_LOG_DBG("Invalid auth key length\n"); + VC_LOG_DBG("Invalid auth key length"); return -VIRTIO_CRYPTO_BADMSG; } diff -Nru dpdk-22.11.4/lib/vhost/vhost_user.c dpdk-22.11.5/lib/vhost/vhost_user.c --- dpdk-22.11.4/lib/vhost/vhost_user.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/vhost/vhost_user.c 2024-04-22 11:25:10.000000000 +0000 @@ -2144,7 +2144,9 @@ vhost_user_iotlb_flush_all(vq); + rte_spinlock_lock(&vq->access_lock); vring_invalidate(dev, vq); + rte_spinlock_unlock(&vq->access_lock); return RTE_VHOST_MSG_RESULT_REPLY; } diff -Nru dpdk-22.11.4/lib/vhost/virtio_net.c dpdk-22.11.5/lib/vhost/virtio_net.c --- dpdk-22.11.4/lib/vhost/virtio_net.c 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/lib/vhost/virtio_net.c 2024-04-22 11:25:10.000000000 +0000 @@ -2862,7 +2862,6 @@ { uint16_t i; uint16_t avail_entries; - uint16_t dropped = 0; static bool allocerr_warned; /* @@ -2901,11 +2900,8 @@ update_shadow_used_ring_split(vq, head_idx, 0); - if (unlikely(buf_len <= dev->vhost_hlen)) { - dropped += 1; - i++; + if (unlikely(buf_len <= dev->vhost_hlen)) break; - } buf_len -= dev->vhost_hlen; @@ -2922,8 +2918,6 @@ buf_len, mbuf_pool->name); allocerr_warned = true; } - dropped += 1; - i++; break; } @@ -2934,27 +2928,21 @@ VHOST_LOG_DATA(dev->ifname, ERR, "failed to copy desc to mbuf.\n"); allocerr_warned = true; } - dropped += 1; - i++; break; } - } - if (dropped) - rte_pktmbuf_free_bulk(&pkts[i - 1], count - i + 1); - - vq->last_avail_idx += i; + if (unlikely(count != i)) + rte_pktmbuf_free_bulk(&pkts[i], count - i); - do_data_copy_dequeue(vq); - if (unlikely(i < count)) - vq->shadow_used_idx = i; if (likely(vq->shadow_used_idx)) { + vq->last_avail_idx += vq->shadow_used_idx; + do_data_copy_dequeue(vq); flush_shadow_used_ring_split(dev, vq); vhost_vring_call_split(dev, vq); } - return (i - dropped); + return i; } __rte_noinline diff -Nru dpdk-22.11.4/meson.build dpdk-22.11.5/meson.build --- dpdk-22.11.4/meson.build 2024-01-23 15:08:10.000000000 +0000 +++ dpdk-22.11.5/meson.build 2024-04-22 11:25:10.000000000 +0000 @@ -163,17 +163,17 @@ output_message = '\n=================\nContent Skipped\n=================\n' output_message += '\napps:\n\t' foreach app:dpdk_apps_disabled - reason = get_variable(app.underscorify() + '_disable_reason') + reason = get_variable('app_' + app.underscorify() + '_disable_reason') output_message += app + ':\t' + reason + '\n\t' endforeach output_message += '\nlibs:\n\t' foreach lib:dpdk_libs_disabled - reason = get_variable(lib.underscorify() + '_disable_reason') + reason = get_variable('lib_' + lib.underscorify() + '_disable_reason') output_message += lib + ':\t' + reason + '\n\t' endforeach output_message += '\ndrivers:\n\t' foreach drv:dpdk_drvs_disabled - reason = get_variable(drv.underscorify() + '_disable_reason') + reason = get_variable('drv_' + drv.underscorify() + '_disable_reason') output_message += drv + ':\t' + reason + '\n\t' endforeach message(output_message + '\n')