M: Shahaf Shuler <shahafs@mellanox.com>
M: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
T: git://dpdk.org/next/dpdk-next-net-mlx
+F: drivers/common/mlx5/
F: drivers/net/mlx5/
F: buildtools/options-ibverbs-static.sh
F: doc/guides/nics/mlx5.rst
DIRS-y += iavf
endif
+ifeq ($(CONFIG_RTE_LIBRTE_MLX5_PMD),y)
+DIRS-y += mlx5
+endif
+
include $(RTE_SDK)/mk/rte.subdir.mk
# Copyright(c) 2018 Cavium, Inc
std_deps = ['eal']
-drivers = ['cpt', 'dpaax', 'iavf', 'mvep', 'octeontx', 'octeontx2', 'qat']
+drivers = ['cpt', 'dpaax', 'iavf', 'mlx5', 'mvep', 'octeontx', 'octeontx2', 'qat']
config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON'
driver_name_fmt = 'rte_common_@0@'
--- /dev/null
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2019 Mellanox Technologies, Ltd
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# Library name.
+LIB = librte_common_mlx5.a
+LIB_GLUE = $(LIB_GLUE_BASE).$(LIB_GLUE_VERSION)
+LIB_GLUE_BASE = librte_pmd_mlx5_glue.so
+LIB_GLUE_VERSION = 20.02.0
+
+# Sources.
+ifneq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_glue.c
+endif
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_devx_cmds.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_common.c
+
+ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)
+INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE)
+endif
+
+# Basic CFLAGS.
+CFLAGS += -O3
+CFLAGS += -std=c11 -Wall -Wextra
+CFLAGS += -g
+CFLAGS += -I.
+CFLAGS += -D_BSD_SOURCE
+CFLAGS += -D_DEFAULT_SOURCE
+CFLAGS += -D_XOPEN_SOURCE=600
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -Wno-strict-prototypes
+ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)
+CFLAGS += -DMLX5_GLUE='"$(LIB_GLUE)"'
+CFLAGS += -DMLX5_GLUE_VERSION='"$(LIB_GLUE_VERSION)"'
+CFLAGS_mlx5_glue.o += -fPIC
+LDLIBS += -ldl
+else ifeq ($(CONFIG_RTE_IBVERBS_LINK_STATIC),y)
+LDLIBS += $(shell $(RTE_SDK)/buildtools/options-ibverbs-static.sh)
+else
+LDLIBS += -libverbs -lmlx5
+endif
+
+LDLIBS += -lrte_eal
+
+# A few warnings cannot be avoided in external headers.
+CFLAGS += -Wno-error=cast-qual -DNDEBUG -UPEDANTIC
+
+EXPORT_MAP := rte_common_mlx5_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
+
+# Generate and clean-up mlx5_autoconf.h.
+
+export CC CFLAGS CPPFLAGS EXTRA_CFLAGS EXTRA_CPPFLAGS
+export AUTO_CONFIG_CFLAGS = -Wno-error
+
+ifndef V
+AUTOCONF_OUTPUT := >/dev/null
+endif
+
+mlx5_autoconf.h.new: FORCE
+
+mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
+ $Q $(RM) -f -- '$@'
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_TUNNEL_SUPPORT \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_MPLS_SUPPORT \
+ infiniband/verbs.h \
+ enum IBV_FLOW_SPEC_MPLS \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING \
+ infiniband/verbs.h \
+ enum IBV_WQ_FLAGS_PCI_WRITE_END_PADDING \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_WQ_FLAG_RX_END_PADDING \
+ infiniband/verbs.h \
+ enum IBV_WQ_FLAG_RX_END_PADDING \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX5_MOD_SWP \
+ infiniband/mlx5dv.h \
+ type 'struct mlx5dv_sw_parsing_caps' \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX5_MOD_MPW \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX5_MOD_CQE_128B_COMP \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX5_MOD_CQE_128B_PAD \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_FLOW_DV_SUPPORT \
+ infiniband/mlx5dv.h \
+ func mlx5dv_create_flow_action_packet_reformat \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_MLX5DV_DR \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_DR_DOMAIN_TYPE_NIC_RX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_MLX5DV_DR_ESWITCH \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_DR_DOMAIN_TYPE_FDB \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_MLX5DV_DR_VLAN \
+ infiniband/mlx5dv.h \
+ func mlx5dv_dr_action_create_push_vlan \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_MLX5DV_DR_DEVX_PORT \
+ infiniband/mlx5dv.h \
+ func mlx5dv_query_devx_port \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVX_OBJ \
+ infiniband/mlx5dv.h \
+ func mlx5dv_devx_obj_create \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_FLOW_DEVX_COUNTERS \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_FLOW_ACTION_COUNTERS_DEVX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVX_ASYNC \
+ infiniband/mlx5dv.h \
+ func mlx5dv_devx_obj_query_async \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR \
+ infiniband/mlx5dv.h \
+ func mlx5dv_dr_action_create_dest_devx_tir \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER \
+ infiniband/mlx5dv.h \
+ func mlx5dv_dr_action_create_flow_meter \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_MLX5_DR_FLOW_DUMP \
+ infiniband/mlx5dv.h \
+ func mlx5dv_dump_dr_domain \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_MLX5DV_MMAP_GET_NC_PAGES_CMD \
+ infiniband/mlx5dv.h \
+ enum MLX5_MMAP_GET_NC_PAGES_CMD \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_ETHTOOL_LINK_MODE_25G \
+ /usr/include/linux/ethtool.h \
+ enum ETHTOOL_LINK_MODE_25000baseCR_Full_BIT \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_ETHTOOL_LINK_MODE_50G \
+ /usr/include/linux/ethtool.h \
+ enum ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_ETHTOOL_LINK_MODE_100G \
+ /usr/include/linux/ethtool.h \
+ enum ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_COUNTERS_SET_V42 \
+ infiniband/verbs.h \
+ type 'struct ibv_counter_set_init_attr' \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_COUNTERS_SET_V45 \
+ infiniband/verbs.h \
+ type 'struct ibv_counters_init_attr' \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NL_NLDEV \
+ rdma/rdma_netlink.h \
+ enum RDMA_NL_NLDEV \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_CMD_GET \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_CMD_GET \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_CMD_PORT_GET \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_CMD_PORT_GET \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_DEV_INDEX \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_DEV_INDEX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_DEV_NAME \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_DEV_NAME \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_PORT_INDEX \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_PORT_INDEX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_NDEV_INDEX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IFLA_NUM_VF \
+ linux/if_link.h \
+ enum IFLA_NUM_VF \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IFLA_EXT_MASK \
+ linux/if_link.h \
+ enum IFLA_EXT_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IFLA_PHYS_SWITCH_ID \
+ linux/if_link.h \
+ enum IFLA_PHYS_SWITCH_ID \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IFLA_PHYS_PORT_NAME \
+ linux/if_link.h \
+ enum IFLA_PHYS_PORT_NAME \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseKR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseKR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseCR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseCR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseSR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseSR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseLR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseLR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseKR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseKR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseCR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseCR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseSR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseSR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseLR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseLR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_STATIC_ASSERT \
+ /usr/include/assert.h \
+ define static_assert \
+ $(AUTOCONF_OUTPUT)
+
+# Create mlx5_autoconf.h or update it in case it differs from the new one.
+
+mlx5_autoconf.h: mlx5_autoconf.h.new
+ $Q [ -f '$@' ] && \
+ cmp '$<' '$@' $(AUTOCONF_OUTPUT) || \
+ mv '$<' '$@'
+
+$(SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD):.c=.o): mlx5_autoconf.h
+
+# Generate dependency plug-in for rdma-core when the PMD must not be linked
+# directly, so that applications do not inherit this dependency.
+
+ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)
+
+$(LIB): $(LIB_GLUE)
+
+ifeq ($(LINK_USING_CC),1)
+GLUE_LDFLAGS := $(call linkerprefix,$(LDFLAGS))
+else
+GLUE_LDFLAGS := $(LDFLAGS)
+endif
+$(LIB_GLUE): mlx5_glue.o
+ $Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \
+ -Wl,-h,$(LIB_GLUE) \
+ -shared -o $@ $< -libverbs -lmlx5
+
+mlx5_glue.o: mlx5_autoconf.h
+
+endif
+
+clean_mlx5: FORCE
+ $Q rm -f -- mlx5_autoconf.h mlx5_autoconf.h.new
+ $Q rm -f -- mlx5_glue.o $(LIB_GLUE_BASE)*
+
+clean: clean_mlx5
--- /dev/null
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2019 Mellanox Technologies, Ltd
+
+if not is_linux
+ build = false
+ reason = 'only supported on Linux'
+ subdir_done()
+endif
+build = true
+
+pmd_dlopen = (get_option('ibverbs_link') == 'dlopen')
+LIB_GLUE_BASE = 'librte_pmd_mlx5_glue.so'
+LIB_GLUE_VERSION = '20.02.0'
+LIB_GLUE = LIB_GLUE_BASE + '.' + LIB_GLUE_VERSION
+if pmd_dlopen
+ dpdk_conf.set('RTE_IBVERBS_LINK_DLOPEN', 1)
+ cflags += [
+ '-DMLX5_GLUE="@0@"'.format(LIB_GLUE),
+ '-DMLX5_GLUE_VERSION="@0@"'.format(LIB_GLUE_VERSION),
+ ]
+endif
+
+libnames = [ 'mlx5', 'ibverbs' ]
+libs = []
+foreach libname:libnames
+ lib = dependency('lib' + libname, required:false)
+ if not lib.found()
+ lib = cc.find_library(libname, required:false)
+ endif
+ if lib.found()
+ libs += lib
+ else
+ build = false
+ reason = 'missing dependency, "' + libname + '"'
+ endif
+endforeach
+
+if build
+ allow_experimental_apis = true
+ deps += ['hash', 'pci', 'net', 'eal']
+ ext_deps += libs
+ sources = files(
+ 'mlx5_devx_cmds.c',
+ 'mlx5_common.c',
+ )
+ if not pmd_dlopen
+ sources += files('mlx5_glue.c')
+ endif
+ cflags_options = [
+ '-std=c11',
+ '-Wno-strict-prototypes',
+ '-D_BSD_SOURCE',
+ '-D_DEFAULT_SOURCE',
+ '-D_XOPEN_SOURCE=600'
+ ]
+ foreach option:cflags_options
+ if cc.has_argument(option)
+ cflags += option
+ endif
+ endforeach
+ if get_option('buildtype').contains('debug')
+ cflags += [ '-pedantic', '-UNDEBUG', '-DPEDANTIC' ]
+ else
+ cflags += [ '-DNDEBUG', '-UPEDANTIC' ]
+ endif
+ # To maintain the compatibility with the make build system
+ # mlx5_autoconf.h file is still generated.
+ # input array for meson member search:
+ # [ "MACRO to define if found", "header for the search",
+ # "symbol to search", "struct member to search" ]
+ has_member_args = [
+ [ 'HAVE_IBV_MLX5_MOD_SWP', 'infiniband/mlx5dv.h',
+ 'struct mlx5dv_sw_parsing_caps', 'sw_parsing_offloads' ],
+ [ 'HAVE_IBV_DEVICE_COUNTERS_SET_V42', 'infiniband/verbs.h',
+ 'struct ibv_counter_set_init_attr', 'counter_set_id' ],
+ [ 'HAVE_IBV_DEVICE_COUNTERS_SET_V45', 'infiniband/verbs.h',
+ 'struct ibv_counters_init_attr', 'comp_mask' ],
+ ]
+ # input array for meson symbol search:
+ # [ "MACRO to define if found", "header for the search",
+ # "symbol to search" ]
+ has_sym_args = [
+ [ 'HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT', 'infiniband/mlx5dv.h',
+ 'MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX' ],
+ [ 'HAVE_IBV_DEVICE_TUNNEL_SUPPORT', 'infiniband/mlx5dv.h',
+ 'MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS' ],
+ [ 'HAVE_IBV_MLX5_MOD_MPW', 'infiniband/mlx5dv.h',
+ 'MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED' ],
+ [ 'HAVE_IBV_MLX5_MOD_CQE_128B_COMP', 'infiniband/mlx5dv.h',
+ 'MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP' ],
+ [ 'HAVE_IBV_MLX5_MOD_CQE_128B_PAD', 'infiniband/mlx5dv.h',
+ 'MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD' ],
+ [ 'HAVE_IBV_FLOW_DV_SUPPORT', 'infiniband/mlx5dv.h',
+ 'mlx5dv_create_flow_action_packet_reformat' ],
+ [ 'HAVE_IBV_DEVICE_MPLS_SUPPORT', 'infiniband/verbs.h',
+ 'IBV_FLOW_SPEC_MPLS' ],
+ [ 'HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING', 'infiniband/verbs.h',
+ 'IBV_WQ_FLAGS_PCI_WRITE_END_PADDING' ],
+ [ 'HAVE_IBV_WQ_FLAG_RX_END_PADDING', 'infiniband/verbs.h',
+ 'IBV_WQ_FLAG_RX_END_PADDING' ],
+ [ 'HAVE_MLX5DV_DR_DEVX_PORT', 'infiniband/mlx5dv.h',
+ 'mlx5dv_query_devx_port' ],
+ [ 'HAVE_IBV_DEVX_OBJ', 'infiniband/mlx5dv.h',
+ 'mlx5dv_devx_obj_create' ],
+ [ 'HAVE_IBV_FLOW_DEVX_COUNTERS', 'infiniband/mlx5dv.h',
+ 'MLX5DV_FLOW_ACTION_COUNTERS_DEVX' ],
+ [ 'HAVE_IBV_DEVX_ASYNC', 'infiniband/mlx5dv.h',
+ 'mlx5dv_devx_obj_query_async' ],
+ [ 'HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR', 'infiniband/mlx5dv.h',
+ 'mlx5dv_dr_action_create_dest_devx_tir' ],
+ [ 'HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER', 'infiniband/mlx5dv.h',
+ 'mlx5dv_dr_action_create_flow_meter' ],
+ [ 'HAVE_MLX5DV_MMAP_GET_NC_PAGES_CMD', 'infiniband/mlx5dv.h',
+ 'MLX5_MMAP_GET_NC_PAGES_CMD' ],
+ [ 'HAVE_MLX5DV_DR', 'infiniband/mlx5dv.h',
+ 'MLX5DV_DR_DOMAIN_TYPE_NIC_RX' ],
+ [ 'HAVE_MLX5DV_DR_ESWITCH', 'infiniband/mlx5dv.h',
+ 'MLX5DV_DR_DOMAIN_TYPE_FDB' ],
+ [ 'HAVE_MLX5DV_DR_VLAN', 'infiniband/mlx5dv.h',
+ 'mlx5dv_dr_action_create_push_vlan' ],
+ [ 'HAVE_SUPPORTED_40000baseKR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_40000baseKR4_Full' ],
+ [ 'HAVE_SUPPORTED_40000baseCR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_40000baseCR4_Full' ],
+ [ 'HAVE_SUPPORTED_40000baseSR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_40000baseSR4_Full' ],
+ [ 'HAVE_SUPPORTED_40000baseLR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_40000baseLR4_Full' ],
+ [ 'HAVE_SUPPORTED_56000baseKR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_56000baseKR4_Full' ],
+ [ 'HAVE_SUPPORTED_56000baseCR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_56000baseCR4_Full' ],
+ [ 'HAVE_SUPPORTED_56000baseSR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_56000baseSR4_Full' ],
+ [ 'HAVE_SUPPORTED_56000baseLR4_Full', 'linux/ethtool.h',
+ 'SUPPORTED_56000baseLR4_Full' ],
+ [ 'HAVE_ETHTOOL_LINK_MODE_25G', 'linux/ethtool.h',
+ 'ETHTOOL_LINK_MODE_25000baseCR_Full_BIT' ],
+ [ 'HAVE_ETHTOOL_LINK_MODE_50G', 'linux/ethtool.h',
+ 'ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT' ],
+ [ 'HAVE_ETHTOOL_LINK_MODE_100G', 'linux/ethtool.h',
+ 'ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT' ],
+ [ 'HAVE_IFLA_NUM_VF', 'linux/if_link.h',
+ 'IFLA_NUM_VF' ],
+ [ 'HAVE_IFLA_EXT_MASK', 'linux/if_link.h',
+ 'IFLA_EXT_MASK' ],
+ [ 'HAVE_IFLA_PHYS_SWITCH_ID', 'linux/if_link.h',
+ 'IFLA_PHYS_SWITCH_ID' ],
+ [ 'HAVE_IFLA_PHYS_PORT_NAME', 'linux/if_link.h',
+ 'IFLA_PHYS_PORT_NAME' ],
+ [ 'HAVE_RDMA_NL_NLDEV', 'rdma/rdma_netlink.h',
+ 'RDMA_NL_NLDEV' ],
+ [ 'HAVE_RDMA_NLDEV_CMD_GET', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_CMD_GET' ],
+ [ 'HAVE_RDMA_NLDEV_CMD_PORT_GET', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_CMD_PORT_GET' ],
+ [ 'HAVE_RDMA_NLDEV_ATTR_DEV_INDEX', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_ATTR_DEV_INDEX' ],
+ [ 'HAVE_RDMA_NLDEV_ATTR_DEV_NAME', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_ATTR_DEV_NAME' ],
+ [ 'HAVE_RDMA_NLDEV_ATTR_PORT_INDEX', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_ATTR_PORT_INDEX' ],
+ [ 'HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX', 'rdma/rdma_netlink.h',
+ 'RDMA_NLDEV_ATTR_NDEV_INDEX' ],
+ [ 'HAVE_MLX5_DR_FLOW_DUMP', 'infiniband/mlx5dv.h',
+ 'mlx5dv_dump_dr_domain'],
+ ]
+ config = configuration_data()
+ foreach arg:has_sym_args
+ config.set(arg[0], cc.has_header_symbol(arg[1], arg[2],
+ dependencies: libs))
+ endforeach
+ foreach arg:has_member_args
+ file_prefix = '#include <' + arg[1] + '>'
+ config.set(arg[0], cc.has_member(arg[2], arg[3],
+ prefix : file_prefix, dependencies: libs))
+ endforeach
+ configure_file(output : 'mlx5_autoconf.h', configuration : config)
+endif
+# Build Glue Library
+if pmd_dlopen and build
+ dlopen_name = 'mlx5_glue'
+ dlopen_lib_name = driver_name_fmt.format(dlopen_name)
+ dlopen_so_version = LIB_GLUE_VERSION
+ dlopen_sources = files('mlx5_glue.c')
+ dlopen_install_dir = [ eal_pmd_path + '-glue' ]
+ dlopen_includes = [global_inc]
+ dlopen_includes += include_directories(
+ '../../../lib/librte_eal/common/include/generic',
+ )
+ shared_lib = shared_library(
+ dlopen_lib_name,
+ dlopen_sources,
+ include_directories: dlopen_includes,
+ c_args: cflags,
+ dependencies: libs,
+ link_args: [
+ '-Wl,-export-dynamic',
+ '-Wl,-h,@0@'.format(LIB_GLUE),
+ ],
+ soversion: dlopen_so_version,
+ install: true,
+ install_dir: dlopen_install_dir,
+ )
+endif
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2019 Mellanox Technologies, Ltd
+ */
+
+#include <dlfcn.h>
+#include <unistd.h>
+#include <string.h>
+
+#include <rte_errno.h>
+
+#include "mlx5_common.h"
+#include "mlx5_common_utils.h"
+#include "mlx5_glue.h"
+
+
+int mlx5_common_logtype;
+
+
+#ifdef RTE_IBVERBS_LINK_DLOPEN
+
+/**
+ * Suffix RTE_EAL_PMD_PATH with "-glue".
+ *
+ * This function performs a sanity check on RTE_EAL_PMD_PATH before
+ * suffixing its last component.
+ *
+ * @param buf[out]
+ * Output buffer, should be large enough otherwise NULL is returned.
+ * @param size
+ * Size of @p out.
+ *
+ * @return
+ * Pointer to @p buf or @p NULL in case suffix cannot be appended.
+ */
+static char *
+mlx5_glue_path(char *buf, size_t size)
+{
+ static const char *const bad[] = { "/", ".", "..", NULL };
+ const char *path = RTE_EAL_PMD_PATH;
+ size_t len = strlen(path);
+ size_t off;
+ int i;
+
+ while (len && path[len - 1] == '/')
+ --len;
+ for (off = len; off && path[off - 1] != '/'; --off)
+ ;
+ for (i = 0; bad[i]; ++i)
+ if (!strncmp(path + off, bad[i], (int)(len - off)))
+ goto error;
+ i = snprintf(buf, size, "%.*s-glue", (int)len, path);
+ if (i == -1 || (size_t)i >= size)
+ goto error;
+ return buf;
+error:
+ RTE_LOG(ERR, PMD, "unable to append \"-glue\" to last component of"
+ " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"), please"
+ " re-configure DPDK");
+ return NULL;
+}
+#endif
+
+/**
+ * Initialization routine for run-time dependency on rdma-core.
+ */
+RTE_INIT_PRIO(mlx5_glue_init, CLASS)
+{
+ void *handle = NULL;
+
+ /* Initialize common log type. */
+ mlx5_common_logtype = rte_log_register("pmd.common.mlx5");
+ if (mlx5_common_logtype >= 0)
+ rte_log_set_level(mlx5_common_logtype, RTE_LOG_NOTICE);
+ /*
+ * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
+ * huge pages. Calling ibv_fork_init() during init allows
+ * applications to use fork() safely for purposes other than
+ * using this PMD, which is not supported in forked processes.
+ */
+ setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
+ /* Match the size of Rx completion entry to the size of a cacheline. */
+ if (RTE_CACHE_LINE_SIZE == 128)
+ setenv("MLX5_CQE_SIZE", "128", 0);
+ /*
+ * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to
+ * cleanup all the Verbs resources even when the device was removed.
+ */
+ setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1);
+ /* The glue initialization was done earlier by mlx5 common library. */
+#ifdef RTE_IBVERBS_LINK_DLOPEN
+ char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
+ const char *path[] = {
+ /*
+ * A basic security check is necessary before trusting
+ * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
+ */
+ (geteuid() == getuid() && getegid() == getgid() ?
+ getenv("MLX5_GLUE_PATH") : NULL),
+ /*
+ * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
+ * variant, otherwise let dlopen() look up libraries on its
+ * own.
+ */
+ (*RTE_EAL_PMD_PATH ?
+ mlx5_glue_path(glue_path, sizeof(glue_path)) : ""),
+ };
+ unsigned int i = 0;
+ void **sym;
+ const char *dlmsg;
+
+ while (!handle && i != RTE_DIM(path)) {
+ const char *end;
+ size_t len;
+ int ret;
+
+ if (!path[i]) {
+ ++i;
+ continue;
+ }
+ end = strpbrk(path[i], ":;");
+ if (!end)
+ end = path[i] + strlen(path[i]);
+ len = end - path[i];
+ ret = 0;
+ do {
+ char name[ret + 1];
+
+ ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE,
+ (int)len, path[i],
+ (!len || *(end - 1) == '/') ? "" : "/");
+ if (ret == -1)
+ break;
+ if (sizeof(name) != (size_t)ret + 1)
+ continue;
+ DRV_LOG(DEBUG, "Looking for rdma-core glue as "
+ "\"%s\"", name);
+ handle = dlopen(name, RTLD_LAZY);
+ break;
+ } while (1);
+ path[i] = end + 1;
+ if (!*end)
+ ++i;
+ }
+ if (!handle) {
+ rte_errno = EINVAL;
+ dlmsg = dlerror();
+ if (dlmsg)
+ DRV_LOG(WARNING, "Cannot load glue library: %s", dlmsg);
+ goto glue_error;
+ }
+ sym = dlsym(handle, "mlx5_glue");
+ if (!sym || !*sym) {
+ rte_errno = EINVAL;
+ dlmsg = dlerror();
+ if (dlmsg)
+ DRV_LOG(ERR, "Cannot resolve glue symbol: %s", dlmsg);
+ goto glue_error;
+ }
+ mlx5_glue = *sym;
+#endif /* RTE_IBVERBS_LINK_DLOPEN */
+#ifndef NDEBUG
+ /* Glue structure must not contain any NULL pointers. */
+ {
+ unsigned int i;
+
+ for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i)
+ assert(((const void *const *)mlx5_glue)[i]);
+ }
+#endif
+ if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
+ rte_errno = EINVAL;
+ DRV_LOG(ERR, "rdma-core glue \"%s\" mismatch: \"%s\" is "
+ "required", mlx5_glue->version, MLX5_GLUE_VERSION);
+ goto glue_error;
+ }
+ mlx5_glue->fork_init();
+ return;
+glue_error:
+ if (handle)
+ dlclose(handle);
+ DRV_LOG(WARNING, "Cannot initialize MLX5 common due to missing"
+ " run-time dependency on rdma-core libraries (libibverbs,"
+ " libmlx5)");
+ mlx5_glue = NULL;
+ return;
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2019 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_COMMON_H_
+#define RTE_PMD_MLX5_COMMON_H_
+
+#include <assert.h>
+
+#include <rte_log.h>
+
+
+/*
+ * Helper macros to work around __VA_ARGS__ limitations in a C99 compliant
+ * manner.
+ */
+#define PMD_DRV_LOG_STRIP(a, b) a
+#define PMD_DRV_LOG_OPAREN (
+#define PMD_DRV_LOG_CPAREN )
+#define PMD_DRV_LOG_COMMA ,
+
+/* Return the file name part of a path. */
+static inline const char *
+pmd_drv_log_basename(const char *s)
+{
+ const char *n = s;
+
+ while (*n)
+ if (*(n++) == '/')
+ s = n;
+ return s;
+}
+
+#define PMD_DRV_LOG___(level, type, name, ...) \
+ rte_log(RTE_LOG_ ## level, \
+ type, \
+ RTE_FMT(name ": " \
+ RTE_FMT_HEAD(__VA_ARGS__,), \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
+
+/*
+ * When debugging is enabled (NDEBUG not defined), file, line and function
+ * information replace the driver name (MLX5_DRIVER_NAME) in log messages.
+ */
+#ifndef NDEBUG
+
+#define PMD_DRV_LOG__(level, type, name, ...) \
+ PMD_DRV_LOG___(level, type, name, "%s:%u: %s(): " __VA_ARGS__)
+#define PMD_DRV_LOG_(level, type, name, s, ...) \
+ PMD_DRV_LOG__(level, type, name,\
+ s "\n" PMD_DRV_LOG_COMMA \
+ pmd_drv_log_basename(__FILE__) PMD_DRV_LOG_COMMA \
+ __LINE__ PMD_DRV_LOG_COMMA \
+ __func__, \
+ __VA_ARGS__)
+
+#else /* NDEBUG */
+#define PMD_DRV_LOG__(level, type, name, ...) \
+ PMD_DRV_LOG___(level, type, name, __VA_ARGS__)
+#define PMD_DRV_LOG_(level, type, name, s, ...) \
+ PMD_DRV_LOG__(level, type, name, s "\n", __VA_ARGS__)
+
+#endif /* NDEBUG */
+
+/* claim_zero() does not perform any check when debugging is disabled. */
+#ifndef NDEBUG
+
+#define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__)
+#define claim_zero(...) assert((__VA_ARGS__) == 0)
+#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
+
+#else /* NDEBUG */
+
+#define DEBUG(...) (void)0
+#define claim_zero(...) (__VA_ARGS__)
+#define claim_nonzero(...) (__VA_ARGS__)
+
+#endif /* NDEBUG */
+
+/* Allocate a buffer on the stack and fill it with a printf format string. */
+#define MKSTR(name, ...) \
+ int mkstr_size_##name = snprintf(NULL, 0, "" __VA_ARGS__); \
+ char name[mkstr_size_##name + 1]; \
+ \
+ snprintf(name, sizeof(name), "" __VA_ARGS__)
+
+#endif /* RTE_PMD_MLX5_COMMON_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2019 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_COMMON_UTILS_H_
+#define RTE_PMD_MLX5_COMMON_UTILS_H_
+
+#include "mlx5_common.h"
+
+
+extern int mlx5_common_logtype;
+
+#define MLX5_COMMON_LOG_PREFIX "common_mlx5"
+/* Generic printf()-like logging macro with automatic line feed. */
+#define DRV_LOG(level, ...) \
+ PMD_DRV_LOG_(level, mlx5_common_logtype, MLX5_COMMON_LOG_PREFIX, \
+ __VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
+ PMD_DRV_LOG_CPAREN)
+
+#endif /* RTE_PMD_MLX5_COMMON_UTILS_H_ */
--- /dev/null
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright 2018 Mellanox Technologies, Ltd */
+
+#include <unistd.h>
+
+#include <rte_errno.h>
+#include <rte_malloc.h>
+
+#include "mlx5_prm.h"
+#include "mlx5_devx_cmds.h"
+#include "mlx5_common_utils.h"
+
+
+/**
+ * Allocate flow counters via devx interface.
+ *
+ * @param[in] ctx
+ * ibv contexts returned from mlx5dv_open_device.
+ * @param dcs
+ * Pointer to counters properties structure to be filled by the routine.
+ * @param bulk_n_128
+ * Bulk counter numbers in 128 counters units.
+ *
+ * @return
+ * Pointer to counter object on success, a negative value otherwise and
+ * rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_flow_counter_alloc(struct ibv_context *ctx, uint32_t bulk_n_128)
+{
+ struct mlx5_devx_obj *dcs = rte_zmalloc("dcs", sizeof(*dcs), 0);
+ uint32_t in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
+
+ if (!dcs) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(alloc_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
+ MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, bulk_n_128);
+ dcs->obj = mlx5_glue->devx_obj_create(ctx, in,
+ sizeof(in), out, sizeof(out));
+ if (!dcs->obj) {
+ DRV_LOG(ERR, "Can't allocate counters - error %d", errno);
+ rte_errno = errno;
+ rte_free(dcs);
+ return NULL;
+ }
+ dcs->id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
+ return dcs;
+}
+
+/**
+ * Query flow counters values.
+ *
+ * @param[in] dcs
+ * devx object that was obtained from mlx5_devx_cmd_fc_alloc.
+ * @param[in] clear
+ * Whether hardware should clear the counters after the query or not.
+ * @param[in] n_counters
+ * 0 in case of 1 counter to read, otherwise the counter number to read.
+ * @param pkts
+ * The number of packets that matched the flow.
+ * @param bytes
+ * The number of bytes that matched the flow.
+ * @param mkey
+ * The mkey key for batch query.
+ * @param addr
+ * The address in the mkey range for batch query.
+ * @param cmd_comp
+ * The completion object for asynchronous batch query.
+ * @param async_id
+ * The ID to be returned in the asynchronous batch query response.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs,
+ int clear, uint32_t n_counters,
+ uint64_t *pkts, uint64_t *bytes,
+ uint32_t mkey, void *addr,
+ struct mlx5dv_devx_cmd_comp *cmd_comp,
+ uint64_t async_id)
+{
+ int out_len = MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+ MLX5_ST_SZ_BYTES(traffic_counter);
+ uint32_t out[out_len];
+ uint32_t in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
+ void *stats;
+ int rc;
+
+ MLX5_SET(query_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_FLOW_COUNTER);
+ MLX5_SET(query_flow_counter_in, in, op_mod, 0);
+ MLX5_SET(query_flow_counter_in, in, flow_counter_id, dcs->id);
+ MLX5_SET(query_flow_counter_in, in, clear, !!clear);
+
+ if (n_counters) {
+ MLX5_SET(query_flow_counter_in, in, num_of_counters,
+ n_counters);
+ MLX5_SET(query_flow_counter_in, in, dump_to_memory, 1);
+ MLX5_SET(query_flow_counter_in, in, mkey, mkey);
+ MLX5_SET64(query_flow_counter_in, in, address,
+ (uint64_t)(uintptr_t)addr);
+ }
+ if (!cmd_comp)
+ rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out,
+ out_len);
+ else
+ rc = mlx5_glue->devx_obj_query_async(dcs->obj, in, sizeof(in),
+ out_len, async_id,
+ cmd_comp);
+ if (rc) {
+ DRV_LOG(ERR, "Failed to query devx counters with rc %d", rc);
+ rte_errno = rc;
+ return -rc;
+ }
+ if (!n_counters) {
+ stats = MLX5_ADDR_OF(query_flow_counter_out,
+ out, flow_statistics);
+ *pkts = MLX5_GET64(traffic_counter, stats, packets);
+ *bytes = MLX5_GET64(traffic_counter, stats, octets);
+ }
+ return 0;
+}
+
+/**
+ * Create a new mkey.
+ *
+ * @param[in] ctx
+ * ibv contexts returned from mlx5dv_open_device.
+ * @param[in] attr
+ * Attributes of the requested mkey.
+ *
+ * @return
+ * Pointer to Devx mkey on success, a negative value otherwise and rte_errno
+ * is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_mkey_create(struct ibv_context *ctx,
+ struct mlx5_devx_mkey_attr *attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_mkey_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
+ void *mkc;
+ struct mlx5_devx_obj *mkey = rte_zmalloc("mkey", sizeof(*mkey), 0);
+ size_t pgsize;
+ uint32_t translation_size;
+
+ if (!mkey) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ pgsize = sysconf(_SC_PAGESIZE);
+ translation_size = (RTE_ALIGN(attr->size, pgsize) * 8) / 16;
+ MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ translation_size);
+ MLX5_SET(create_mkey_in, in, mkey_umem_id, attr->umem_id);
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, lw, 0x1);
+ MLX5_SET(mkc, mkc, lr, 0x1);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, attr->pd);
+ MLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF);
+ MLX5_SET(mkc, mkc, translations_octword_size, translation_size);
+ MLX5_SET64(mkc, mkc, start_addr, attr->addr);
+ MLX5_SET64(mkc, mkc, len, attr->size);
+ MLX5_SET(mkc, mkc, log_page_size, rte_log2_u32(pgsize));
+ mkey->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
+ sizeof(out));
+ if (!mkey->obj) {
+ DRV_LOG(ERR, "Can't create mkey - error %d", errno);
+ rte_errno = errno;
+ rte_free(mkey);
+ return NULL;
+ }
+ mkey->id = MLX5_GET(create_mkey_out, out, mkey_index);
+ mkey->id = (mkey->id << 8) | (attr->umem_id & 0xFF);
+ return mkey;
+}
+
+/**
+ * Get status of devx command response.
+ * Mainly used for asynchronous commands.
+ *
+ * @param[in] out
+ * The out response buffer.
+ *
+ * @return
+ * 0 on success, non-zero value otherwise.
+ */
+int
+mlx5_devx_get_out_command_status(void *out)
+{
+ int status;
+
+ if (!out)
+ return -EINVAL;
+ status = MLX5_GET(query_flow_counter_out, out, status);
+ if (status) {
+ int syndrome = MLX5_GET(query_flow_counter_out, out, syndrome);
+
+ DRV_LOG(ERR, "Bad devX status %x, syndrome = %x", status,
+ syndrome);
+ }
+ return status;
+}
+
+/**
+ * Destroy any object allocated by a Devx API.
+ *
+ * @param[in] obj
+ * Pointer to a general object.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj)
+{
+ int ret;
+
+ if (!obj)
+ return 0;
+ ret = mlx5_glue->devx_obj_destroy(obj->obj);
+ rte_free(obj);
+ return ret;
+}
+
+/**
+ * Query NIC vport context.
+ * Fills minimal inline attribute.
+ *
+ * @param[in] ctx
+ * ibv contexts returned from mlx5dv_open_device.
+ * @param[in] vport
+ * vport index
+ * @param[out] attr
+ * Attributes device values.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+static int
+mlx5_devx_cmd_query_nic_vport_context(struct ibv_context *ctx,
+ unsigned int vport,
+ struct mlx5_hca_attr *attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
+ void *vctx;
+ int status, syndrome, rc;
+
+ /* Query NIC vport context to determine inline mode. */
+ MLX5_SET(query_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+ MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
+ if (vport)
+ MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
+ rc = mlx5_glue->devx_general_cmd(ctx,
+ in, sizeof(in),
+ out, sizeof(out));
+ if (rc)
+ goto error;
+ status = MLX5_GET(query_nic_vport_context_out, out, status);
+ syndrome = MLX5_GET(query_nic_vport_context_out, out, syndrome);
+ if (status) {
+ DRV_LOG(DEBUG, "Failed to query NIC vport context, "
+ "status %x, syndrome = %x",
+ status, syndrome);
+ return -1;
+ }
+ vctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
+ nic_vport_context);
+ attr->vport_inline_mode = MLX5_GET(nic_vport_context, vctx,
+ min_wqe_inline_mode);
+ return 0;
+error:
+ rc = (rc > 0) ? -rc : rc;
+ return rc;
+}
+
+/**
+ * Query HCA attributes.
+ * Using those attributes we can check on run time if the device
+ * is having the required capabilities.
+ *
+ * @param[in] ctx
+ * ibv contexts returned from mlx5dv_open_device.
+ * @param[out] attr
+ * Attributes device values.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_devx_cmd_query_hca_attr(struct ibv_context *ctx,
+ struct mlx5_hca_attr *attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
+ void *hcattr;
+ int status, syndrome, rc;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
+ MLX5_HCA_CAP_OPMOD_GET_CUR);
+
+ rc = mlx5_glue->devx_general_cmd(ctx,
+ in, sizeof(in), out, sizeof(out));
+ if (rc)
+ goto error;
+ status = MLX5_GET(query_hca_cap_out, out, status);
+ syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
+ if (status) {
+ DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
+ "status %x, syndrome = %x",
+ status, syndrome);
+ return -1;
+ }
+ hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+ attr->flow_counter_bulk_alloc_bitmap =
+ MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc);
+ attr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr,
+ flow_counters_dump);
+ attr->eswitch_manager = MLX5_GET(cmd_hca_cap, hcattr, eswitch_manager);
+ attr->hairpin = MLX5_GET(cmd_hca_cap, hcattr, hairpin);
+ attr->log_max_hairpin_queues = MLX5_GET(cmd_hca_cap, hcattr,
+ log_max_hairpin_queues);
+ attr->log_max_hairpin_wq_data_sz = MLX5_GET(cmd_hca_cap, hcattr,
+ log_max_hairpin_wq_data_sz);
+ attr->log_max_hairpin_num_packets = MLX5_GET
+ (cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz);
+ attr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id);
+ attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr,
+ eth_net_offloads);
+ attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt);
+ attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr,
+ flex_parser_protocols);
+ attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos);
+ if (attr->qos.sup) {
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
+ MLX5_HCA_CAP_OPMOD_GET_CUR);
+ rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (rc)
+ goto error;
+ if (status) {
+ DRV_LOG(DEBUG, "Failed to query devx QOS capabilities,"
+ " status %x, syndrome = %x",
+ status, syndrome);
+ return -1;
+ }
+ hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+ attr->qos.srtcm_sup =
+ MLX5_GET(qos_cap, hcattr, flow_meter_srtcm);
+ attr->qos.log_max_flow_meter =
+ MLX5_GET(qos_cap, hcattr, log_max_flow_meter);
+ attr->qos.flow_meter_reg_c_ids =
+ MLX5_GET(qos_cap, hcattr, flow_meter_reg_id);
+ attr->qos.flow_meter_reg_share =
+ MLX5_GET(qos_cap, hcattr, flow_meter_reg_share);
+ }
+ if (!attr->eth_net_offloads)
+ return 0;
+
+ /* Query HCA offloads for Ethernet protocol. */
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS |
+ MLX5_HCA_CAP_OPMOD_GET_CUR);
+
+ rc = mlx5_glue->devx_general_cmd(ctx,
+ in, sizeof(in),
+ out, sizeof(out));
+ if (rc) {
+ attr->eth_net_offloads = 0;
+ goto error;
+ }
+ status = MLX5_GET(query_hca_cap_out, out, status);
+ syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
+ if (status) {
+ DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
+ "status %x, syndrome = %x",
+ status, syndrome);
+ attr->eth_net_offloads = 0;
+ return -1;
+ }
+ hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+ attr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, wqe_vlan_insert);
+ attr->lro_cap = MLX5_GET(per_protocol_networking_offload_caps, hcattr,
+ lro_cap);
+ attr->tunnel_lro_gre = MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, tunnel_lro_gre);
+ attr->tunnel_lro_vxlan = MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, tunnel_lro_vxlan);
+ attr->lro_max_msg_sz_mode = MLX5_GET
+ (per_protocol_networking_offload_caps,
+ hcattr, lro_max_msg_sz_mode);
+ for (int i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) {
+ attr->lro_timer_supported_periods[i] =
+ MLX5_GET(per_protocol_networking_offload_caps, hcattr,
+ lro_timer_supported_periods[i]);
+ }
+ attr->tunnel_stateless_geneve_rx =
+ MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, tunnel_stateless_geneve_rx);
+ attr->geneve_max_opt_len =
+ MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, max_geneve_opt_len);
+ attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, wqe_inline_mode);
+ attr->tunnel_stateless_gtp = MLX5_GET
+ (per_protocol_networking_offload_caps,
+ hcattr, tunnel_stateless_gtp);
+ if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
+ return 0;
+ if (attr->eth_virt) {
+ rc = mlx5_devx_cmd_query_nic_vport_context(ctx, 0, attr);
+ if (rc) {
+ attr->eth_virt = 0;
+ goto error;
+ }
+ }
+ return 0;
+error:
+ rc = (rc > 0) ? -rc : rc;
+ return rc;
+}
+
+/**
+ * Query TIS transport domain from QP verbs object using DevX API.
+ *
+ * @param[in] qp
+ * Pointer to verbs QP returned by ibv_create_qp .
+ * @param[in] tis_num
+ * TIS number of TIS to query.
+ * @param[out] tis_td
+ * Pointer to TIS transport domain variable, to be set by the routine.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_devx_cmd_qp_query_tis_td(struct ibv_qp *qp, uint32_t tis_num,
+ uint32_t *tis_td)
+{
+ uint32_t in[MLX5_ST_SZ_DW(query_tis_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(query_tis_out)] = {0};
+ int rc;
+ void *tis_ctx;
+
+ MLX5_SET(query_tis_in, in, opcode, MLX5_CMD_OP_QUERY_TIS);
+ MLX5_SET(query_tis_in, in, tisn, tis_num);
+ rc = mlx5_glue->devx_qp_query(qp, in, sizeof(in), out, sizeof(out));
+ if (rc) {
+ DRV_LOG(ERR, "Failed to query QP using DevX");
+ return -rc;
+ };
+ tis_ctx = MLX5_ADDR_OF(query_tis_out, out, tis_context);
+ *tis_td = MLX5_GET(tisc, tis_ctx, transport_domain);
+ return 0;
+}
+
+/**
+ * Fill WQ data for DevX API command.
+ * Utility function for use when creating DevX objects containing a WQ.
+ *
+ * @param[in] wq_ctx
+ * Pointer to WQ context to fill with data.
+ * @param [in] wq_attr
+ * Pointer to WQ attributes structure to fill in WQ context.
+ */
+static void
+devx_cmd_fill_wq_data(void *wq_ctx, struct mlx5_devx_wq_attr *wq_attr)
+{
+ MLX5_SET(wq, wq_ctx, wq_type, wq_attr->wq_type);
+ MLX5_SET(wq, wq_ctx, wq_signature, wq_attr->wq_signature);
+ MLX5_SET(wq, wq_ctx, end_padding_mode, wq_attr->end_padding_mode);
+ MLX5_SET(wq, wq_ctx, cd_slave, wq_attr->cd_slave);
+ MLX5_SET(wq, wq_ctx, hds_skip_first_sge, wq_attr->hds_skip_first_sge);
+ MLX5_SET(wq, wq_ctx, log2_hds_buf_size, wq_attr->log2_hds_buf_size);
+ MLX5_SET(wq, wq_ctx, page_offset, wq_attr->page_offset);
+ MLX5_SET(wq, wq_ctx, lwm, wq_attr->lwm);
+ MLX5_SET(wq, wq_ctx, pd, wq_attr->pd);
+ MLX5_SET(wq, wq_ctx, uar_page, wq_attr->uar_page);
+ MLX5_SET64(wq, wq_ctx, dbr_addr, wq_attr->dbr_addr);
+ MLX5_SET(wq, wq_ctx, hw_counter, wq_attr->hw_counter);
+ MLX5_SET(wq, wq_ctx, sw_counter, wq_attr->sw_counter);
+ MLX5_SET(wq, wq_ctx, log_wq_stride, wq_attr->log_wq_stride);
+ MLX5_SET(wq, wq_ctx, log_wq_pg_sz, wq_attr->log_wq_pg_sz);
+ MLX5_SET(wq, wq_ctx, log_wq_sz, wq_attr->log_wq_sz);
+ MLX5_SET(wq, wq_ctx, dbr_umem_valid, wq_attr->dbr_umem_valid);
+ MLX5_SET(wq, wq_ctx, wq_umem_valid, wq_attr->wq_umem_valid);
+ MLX5_SET(wq, wq_ctx, log_hairpin_num_packets,
+ wq_attr->log_hairpin_num_packets);
+ MLX5_SET(wq, wq_ctx, log_hairpin_data_sz, wq_attr->log_hairpin_data_sz);
+ MLX5_SET(wq, wq_ctx, single_wqe_log_num_of_strides,
+ wq_attr->single_wqe_log_num_of_strides);
+ MLX5_SET(wq, wq_ctx, two_byte_shift_en, wq_attr->two_byte_shift_en);
+ MLX5_SET(wq, wq_ctx, single_stride_log_num_of_bytes,
+ wq_attr->single_stride_log_num_of_bytes);
+ MLX5_SET(wq, wq_ctx, dbr_umem_id, wq_attr->dbr_umem_id);
+ MLX5_SET(wq, wq_ctx, wq_umem_id, wq_attr->wq_umem_id);
+ MLX5_SET64(wq, wq_ctx, wq_umem_offset, wq_attr->wq_umem_offset);
+}
+
+/**
+ * Create RQ using DevX API.
+ *
+ * @param[in] ctx
+ * ibv_context returned from mlx5dv_open_device.
+ * @param [in] rq_attr
+ * Pointer to create RQ attributes structure.
+ * @param [in] socket
+ * CPU socket ID for allocations.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_rq(struct ibv_context *ctx,
+ struct mlx5_devx_create_rq_attr *rq_attr,
+ int socket)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_rq_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(create_rq_out)] = {0};
+ void *rq_ctx, *wq_ctx;
+ struct mlx5_devx_wq_attr *wq_attr;
+ struct mlx5_devx_obj *rq = NULL;
+
+ rq = rte_calloc_socket(__func__, 1, sizeof(*rq), 0, socket);
+ if (!rq) {
+ DRV_LOG(ERR, "Failed to allocate RQ data");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
+ rq_ctx = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ MLX5_SET(rqc, rq_ctx, rlky, rq_attr->rlky);
+ MLX5_SET(rqc, rq_ctx, delay_drop_en, rq_attr->delay_drop_en);
+ MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs);
+ MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd);
+ MLX5_SET(rqc, rq_ctx, mem_rq_type, rq_attr->mem_rq_type);
+ MLX5_SET(rqc, rq_ctx, state, rq_attr->state);
+ MLX5_SET(rqc, rq_ctx, flush_in_error_en, rq_attr->flush_in_error_en);
+ MLX5_SET(rqc, rq_ctx, hairpin, rq_attr->hairpin);
+ MLX5_SET(rqc, rq_ctx, user_index, rq_attr->user_index);
+ MLX5_SET(rqc, rq_ctx, cqn, rq_attr->cqn);
+ MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);
+ MLX5_SET(rqc, rq_ctx, rmpn, rq_attr->rmpn);
+ wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq);
+ wq_attr = &rq_attr->wq_attr;
+ devx_cmd_fill_wq_data(wq_ctx, wq_attr);
+ rq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (!rq->obj) {
+ DRV_LOG(ERR, "Failed to create RQ using DevX");
+ rte_errno = errno;
+ rte_free(rq);
+ return NULL;
+ }
+ rq->id = MLX5_GET(create_rq_out, out, rqn);
+ return rq;
+}
+
+/**
+ * Modify RQ using DevX API.
+ *
+ * @param[in] rq
+ * Pointer to RQ object structure.
+ * @param [in] rq_attr
+ * Pointer to modify RQ attributes structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq,
+ struct mlx5_devx_modify_rq_attr *rq_attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(modify_rq_out)] = {0};
+ void *rq_ctx, *wq_ctx;
+ int ret;
+
+ MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
+ MLX5_SET(modify_rq_in, in, rq_state, rq_attr->rq_state);
+ MLX5_SET(modify_rq_in, in, rqn, rq->id);
+ MLX5_SET64(modify_rq_in, in, modify_bitmask, rq_attr->modify_bitmask);
+ rq_ctx = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+ MLX5_SET(rqc, rq_ctx, state, rq_attr->state);
+ if (rq_attr->modify_bitmask &
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS)
+ MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs);
+ if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD)
+ MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd);
+ if (rq_attr->modify_bitmask &
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID)
+ MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);
+ MLX5_SET(rqc, rq_ctx, hairpin_peer_sq, rq_attr->hairpin_peer_sq);
+ MLX5_SET(rqc, rq_ctx, hairpin_peer_vhca, rq_attr->hairpin_peer_vhca);
+ if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM) {
+ wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq);
+ MLX5_SET(wq, wq_ctx, lwm, rq_attr->lwm);
+ }
+ ret = mlx5_glue->devx_obj_modify(rq->obj, in, sizeof(in),
+ out, sizeof(out));
+ if (ret) {
+ DRV_LOG(ERR, "Failed to modify RQ using DevX");
+ rte_errno = errno;
+ return -errno;
+ }
+ return ret;
+}
+
+/**
+ * Create TIR using DevX API.
+ *
+ * @param[in] ctx
+ * ibv_context returned from mlx5dv_open_device.
+ * @param [in] tir_attr
+ * Pointer to TIR attributes structure.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_tir(struct ibv_context *ctx,
+ struct mlx5_devx_tir_attr *tir_attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(create_tir_out)] = {0};
+ void *tir_ctx, *outer, *inner;
+ struct mlx5_devx_obj *tir = NULL;
+ int i;
+
+ tir = rte_calloc(__func__, 1, sizeof(*tir), 0);
+ if (!tir) {
+ DRV_LOG(ERR, "Failed to allocate TIR data");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+ tir_ctx = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ MLX5_SET(tirc, tir_ctx, disp_type, tir_attr->disp_type);
+ MLX5_SET(tirc, tir_ctx, lro_timeout_period_usecs,
+ tir_attr->lro_timeout_period_usecs);
+ MLX5_SET(tirc, tir_ctx, lro_enable_mask, tir_attr->lro_enable_mask);
+ MLX5_SET(tirc, tir_ctx, lro_max_msg_sz, tir_attr->lro_max_msg_sz);
+ MLX5_SET(tirc, tir_ctx, inline_rqn, tir_attr->inline_rqn);
+ MLX5_SET(tirc, tir_ctx, rx_hash_symmetric, tir_attr->rx_hash_symmetric);
+ MLX5_SET(tirc, tir_ctx, tunneled_offload_en,
+ tir_attr->tunneled_offload_en);
+ MLX5_SET(tirc, tir_ctx, indirect_table, tir_attr->indirect_table);
+ MLX5_SET(tirc, tir_ctx, rx_hash_fn, tir_attr->rx_hash_fn);
+ MLX5_SET(tirc, tir_ctx, self_lb_block, tir_attr->self_lb_block);
+ MLX5_SET(tirc, tir_ctx, transport_domain, tir_attr->transport_domain);
+ for (i = 0; i < 10; i++) {
+ MLX5_SET(tirc, tir_ctx, rx_hash_toeplitz_key[i],
+ tir_attr->rx_hash_toeplitz_key[i]);
+ }
+ outer = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_outer);
+ MLX5_SET(rx_hash_field_select, outer, l3_prot_type,
+ tir_attr->rx_hash_field_selector_outer.l3_prot_type);
+ MLX5_SET(rx_hash_field_select, outer, l4_prot_type,
+ tir_attr->rx_hash_field_selector_outer.l4_prot_type);
+ MLX5_SET(rx_hash_field_select, outer, selected_fields,
+ tir_attr->rx_hash_field_selector_outer.selected_fields);
+ inner = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_inner);
+ MLX5_SET(rx_hash_field_select, inner, l3_prot_type,
+ tir_attr->rx_hash_field_selector_inner.l3_prot_type);
+ MLX5_SET(rx_hash_field_select, inner, l4_prot_type,
+ tir_attr->rx_hash_field_selector_inner.l4_prot_type);
+ MLX5_SET(rx_hash_field_select, inner, selected_fields,
+ tir_attr->rx_hash_field_selector_inner.selected_fields);
+ tir->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (!tir->obj) {
+ DRV_LOG(ERR, "Failed to create TIR using DevX");
+ rte_errno = errno;
+ rte_free(tir);
+ return NULL;
+ }
+ tir->id = MLX5_GET(create_tir_out, out, tirn);
+ return tir;
+}
+
+/**
+ * Create RQT using DevX API.
+ *
+ * @param[in] ctx
+ * ibv_context returned from mlx5dv_open_device.
+ * @param [in] rqt_attr
+ * Pointer to RQT attributes structure.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_rqt(struct ibv_context *ctx,
+ struct mlx5_devx_rqt_attr *rqt_attr)
+{
+ uint32_t *in = NULL;
+ uint32_t inlen = MLX5_ST_SZ_BYTES(create_rqt_in) +
+ rqt_attr->rqt_actual_size * sizeof(uint32_t);
+ uint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
+ void *rqt_ctx;
+ struct mlx5_devx_obj *rqt = NULL;
+ int i;
+
+ in = rte_calloc(__func__, 1, inlen, 0);
+ if (!in) {
+ DRV_LOG(ERR, "Failed to allocate RQT IN data");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ rqt = rte_calloc(__func__, 1, sizeof(*rqt), 0);
+ if (!rqt) {
+ DRV_LOG(ERR, "Failed to allocate RQT data");
+ rte_errno = ENOMEM;
+ rte_free(in);
+ return NULL;
+ }
+ MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+ rqt_ctx = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+ MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size);
+ MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size);
+ for (i = 0; i < rqt_attr->rqt_actual_size; i++)
+ MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]);
+ rqt->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out));
+ rte_free(in);
+ if (!rqt->obj) {
+ DRV_LOG(ERR, "Failed to create RQT using DevX");
+ rte_errno = errno;
+ rte_free(rqt);
+ return NULL;
+ }
+ rqt->id = MLX5_GET(create_rqt_out, out, rqtn);
+ return rqt;
+}
+
+/**
+ * Create SQ using DevX API.
+ *
+ * @param[in] ctx
+ * ibv_context returned from mlx5dv_open_device.
+ * @param [in] sq_attr
+ * Pointer to SQ attributes structure.
+ * @param [in] socket
+ * CPU socket ID for allocations.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ **/
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_sq(struct ibv_context *ctx,
+ struct mlx5_devx_create_sq_attr *sq_attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
+ void *sq_ctx;
+ void *wq_ctx;
+ struct mlx5_devx_wq_attr *wq_attr;
+ struct mlx5_devx_obj *sq = NULL;
+
+ sq = rte_calloc(__func__, 1, sizeof(*sq), 0);
+ if (!sq) {
+ DRV_LOG(ERR, "Failed to allocate SQ data");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
+ sq_ctx = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ MLX5_SET(sqc, sq_ctx, rlky, sq_attr->rlky);
+ MLX5_SET(sqc, sq_ctx, cd_master, sq_attr->cd_master);
+ MLX5_SET(sqc, sq_ctx, fre, sq_attr->fre);
+ MLX5_SET(sqc, sq_ctx, flush_in_error_en, sq_attr->flush_in_error_en);
+ MLX5_SET(sqc, sq_ctx, allow_multi_pkt_send_wqe,
+ sq_attr->flush_in_error_en);
+ MLX5_SET(sqc, sq_ctx, min_wqe_inline_mode,
+ sq_attr->min_wqe_inline_mode);
+ MLX5_SET(sqc, sq_ctx, state, sq_attr->state);
+ MLX5_SET(sqc, sq_ctx, reg_umr, sq_attr->reg_umr);
+ MLX5_SET(sqc, sq_ctx, allow_swp, sq_attr->allow_swp);
+ MLX5_SET(sqc, sq_ctx, hairpin, sq_attr->hairpin);
+ MLX5_SET(sqc, sq_ctx, user_index, sq_attr->user_index);
+ MLX5_SET(sqc, sq_ctx, cqn, sq_attr->cqn);
+ MLX5_SET(sqc, sq_ctx, packet_pacing_rate_limit_index,
+ sq_attr->packet_pacing_rate_limit_index);
+ MLX5_SET(sqc, sq_ctx, tis_lst_sz, sq_attr->tis_lst_sz);
+ MLX5_SET(sqc, sq_ctx, tis_num_0, sq_attr->tis_num);
+ wq_ctx = MLX5_ADDR_OF(sqc, sq_ctx, wq);
+ wq_attr = &sq_attr->wq_attr;
+ devx_cmd_fill_wq_data(wq_ctx, wq_attr);
+ sq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (!sq->obj) {
+ DRV_LOG(ERR, "Failed to create SQ using DevX");
+ rte_errno = errno;
+ rte_free(sq);
+ return NULL;
+ }
+ sq->id = MLX5_GET(create_sq_out, out, sqn);
+ return sq;
+}
+
+/**
+ * Modify SQ using DevX API.
+ *
+ * @param[in] sq
+ * Pointer to SQ object structure.
+ * @param [in] sq_attr
+ * Pointer to SQ attributes structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq,
+ struct mlx5_devx_modify_sq_attr *sq_attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
+ void *sq_ctx;
+ int ret;
+
+ MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+ MLX5_SET(modify_sq_in, in, sq_state, sq_attr->sq_state);
+ MLX5_SET(modify_sq_in, in, sqn, sq->id);
+ sq_ctx = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sq_ctx, state, sq_attr->state);
+ MLX5_SET(sqc, sq_ctx, hairpin_peer_rq, sq_attr->hairpin_peer_rq);
+ MLX5_SET(sqc, sq_ctx, hairpin_peer_vhca, sq_attr->hairpin_peer_vhca);
+ ret = mlx5_glue->devx_obj_modify(sq->obj, in, sizeof(in),
+ out, sizeof(out));
+ if (ret) {
+ DRV_LOG(ERR, "Failed to modify SQ using DevX");
+ rte_errno = errno;
+ return -errno;
+ }
+ return ret;
+}
+
+/**
+ * Create TIS using DevX API.
+ *
+ * @param[in] ctx
+ * ibv_context returned from mlx5dv_open_device.
+ * @param [in] tis_attr
+ * Pointer to TIS attributes structure.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_tis(struct ibv_context *ctx,
+ struct mlx5_devx_tis_attr *tis_attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(create_tis_out)] = {0};
+ struct mlx5_devx_obj *tis = NULL;
+ void *tis_ctx;
+
+ tis = rte_calloc(__func__, 1, sizeof(*tis), 0);
+ if (!tis) {
+ DRV_LOG(ERR, "Failed to allocate TIS object");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+ tis_ctx = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,
+ tis_attr->strict_lag_tx_port_affinity);
+ MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,
+ tis_attr->strict_lag_tx_port_affinity);
+ MLX5_SET(tisc, tis_ctx, prio, tis_attr->prio);
+ MLX5_SET(tisc, tis_ctx, transport_domain,
+ tis_attr->transport_domain);
+ tis->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (!tis->obj) {
+ DRV_LOG(ERR, "Failed to create TIS using DevX");
+ rte_errno = errno;
+ rte_free(tis);
+ return NULL;
+ }
+ tis->id = MLX5_GET(create_tis_out, out, tisn);
+ return tis;
+}
+
+/**
+ * Create transport domain using DevX API.
+ *
+ * @param[in] ctx
+ * ibv_context returned from mlx5dv_open_device.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_td(struct ibv_context *ctx)
+{
+ uint32_t in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
+ struct mlx5_devx_obj *td = NULL;
+
+ td = rte_calloc(__func__, 1, sizeof(*td), 0);
+ if (!td) {
+ DRV_LOG(ERR, "Failed to allocate TD object");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(alloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+ td->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (!td->obj) {
+ DRV_LOG(ERR, "Failed to create TIS using DevX");
+ rte_errno = errno;
+ rte_free(td);
+ return NULL;
+ }
+ td->id = MLX5_GET(alloc_transport_domain_out, out,
+ transport_domain);
+ return td;
+}
+
+/**
+ * Dump all flows to file.
+ *
+ * @param[in] fdb_domain
+ * FDB domain.
+ * @param[in] rx_domain
+ * RX domain.
+ * @param[in] tx_domain
+ * TX domain.
+ * @param[out] file
+ * Pointer to file stream.
+ *
+ * @return
+ * 0 on success, a nagative value otherwise.
+ */
+int
+mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused,
+ void *rx_domain __rte_unused,
+ void *tx_domain __rte_unused, FILE *file __rte_unused)
+{
+ int ret = 0;
+
+#ifdef HAVE_MLX5_DR_FLOW_DUMP
+ if (fdb_domain) {
+ ret = mlx5_glue->dr_dump_domain(file, fdb_domain);
+ if (ret)
+ return ret;
+ }
+ assert(rx_domain);
+ ret = mlx5_glue->dr_dump_domain(file, rx_domain);
+ if (ret)
+ return ret;
+ assert(tx_domain);
+ ret = mlx5_glue->dr_dump_domain(file, tx_domain);
+#else
+ ret = ENOTSUP;
+#endif
+ return -ret;
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2019 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_DEVX_CMDS_H_
+#define RTE_PMD_MLX5_DEVX_CMDS_H_
+
+#include "mlx5_glue.h"
+
+
+/* devX creation object */
+struct mlx5_devx_obj {
+ struct mlx5dv_devx_obj *obj; /* The DV object. */
+ int id; /* The object ID. */
+};
+
+struct mlx5_devx_mkey_attr {
+ uint64_t addr;
+ uint64_t size;
+ uint32_t umem_id;
+ uint32_t pd;
+};
+
+/* HCA qos attributes. */
+struct mlx5_hca_qos_attr {
+ uint32_t sup:1; /* Whether QOS is supported. */
+ uint32_t srtcm_sup:1; /* Whether srTCM mode is supported. */
+ uint32_t flow_meter_reg_share:1;
+ /* Whether reg_c share is supported. */
+ uint8_t log_max_flow_meter;
+ /* Power of the maximum supported meters. */
+ uint8_t flow_meter_reg_c_ids;
+ /* Bitmap of the reg_Cs available for flow meter to use. */
+
+};
+
+/* HCA supports this number of time periods for LRO. */
+#define MLX5_LRO_NUM_SUPP_PERIODS 4
+
+/* HCA attributes. */
+struct mlx5_hca_attr {
+ uint32_t eswitch_manager:1;
+ uint32_t flow_counters_dump:1;
+ uint8_t flow_counter_bulk_alloc_bitmap;
+ uint32_t eth_net_offloads:1;
+ uint32_t eth_virt:1;
+ uint32_t wqe_vlan_insert:1;
+ uint32_t wqe_inline_mode:2;
+ uint32_t vport_inline_mode:3;
+ uint32_t tunnel_stateless_geneve_rx:1;
+ uint32_t geneve_max_opt_len:1; /* 0x0: 14DW, 0x1: 63DW */
+ uint32_t tunnel_stateless_gtp:1;
+ uint32_t lro_cap:1;
+ uint32_t tunnel_lro_gre:1;
+ uint32_t tunnel_lro_vxlan:1;
+ uint32_t lro_max_msg_sz_mode:2;
+ uint32_t lro_timer_supported_periods[MLX5_LRO_NUM_SUPP_PERIODS];
+ uint32_t flex_parser_protocols;
+ uint32_t hairpin:1;
+ uint32_t log_max_hairpin_queues:5;
+ uint32_t log_max_hairpin_wq_data_sz:5;
+ uint32_t log_max_hairpin_num_packets:5;
+ uint32_t vhca_id:16;
+ struct mlx5_hca_qos_attr qos;
+};
+
+struct mlx5_devx_wq_attr {
+ uint32_t wq_type:4;
+ uint32_t wq_signature:1;
+ uint32_t end_padding_mode:2;
+ uint32_t cd_slave:1;
+ uint32_t hds_skip_first_sge:1;
+ uint32_t log2_hds_buf_size:3;
+ uint32_t page_offset:5;
+ uint32_t lwm:16;
+ uint32_t pd:24;
+ uint32_t uar_page:24;
+ uint64_t dbr_addr;
+ uint32_t hw_counter;
+ uint32_t sw_counter;
+ uint32_t log_wq_stride:4;
+ uint32_t log_wq_pg_sz:5;
+ uint32_t log_wq_sz:5;
+ uint32_t dbr_umem_valid:1;
+ uint32_t wq_umem_valid:1;
+ uint32_t log_hairpin_num_packets:5;
+ uint32_t log_hairpin_data_sz:5;
+ uint32_t single_wqe_log_num_of_strides:4;
+ uint32_t two_byte_shift_en:1;
+ uint32_t single_stride_log_num_of_bytes:3;
+ uint32_t dbr_umem_id;
+ uint32_t wq_umem_id;
+ uint64_t wq_umem_offset;
+};
+
+/* Create RQ attributes structure, used by create RQ operation. */
+struct mlx5_devx_create_rq_attr {
+ uint32_t rlky:1;
+ uint32_t delay_drop_en:1;
+ uint32_t scatter_fcs:1;
+ uint32_t vsd:1;
+ uint32_t mem_rq_type:4;
+ uint32_t state:4;
+ uint32_t flush_in_error_en:1;
+ uint32_t hairpin:1;
+ uint32_t user_index:24;
+ uint32_t cqn:24;
+ uint32_t counter_set_id:8;
+ uint32_t rmpn:24;
+ struct mlx5_devx_wq_attr wq_attr;
+};
+
+/* Modify RQ attributes structure, used by modify RQ operation. */
+struct mlx5_devx_modify_rq_attr {
+ uint32_t rqn:24;
+ uint32_t rq_state:4; /* Current RQ state. */
+ uint32_t state:4; /* Required RQ state. */
+ uint32_t scatter_fcs:1;
+ uint32_t vsd:1;
+ uint32_t counter_set_id:8;
+ uint32_t hairpin_peer_sq:24;
+ uint32_t hairpin_peer_vhca:16;
+ uint64_t modify_bitmask;
+ uint32_t lwm:16; /* Contained WQ lwm. */
+};
+
+struct mlx5_rx_hash_field_select {
+ uint32_t l3_prot_type:1;
+ uint32_t l4_prot_type:1;
+ uint32_t selected_fields:30;
+};
+
+/* TIR attributes structure, used by TIR operations. */
+struct mlx5_devx_tir_attr {
+ uint32_t disp_type:4;
+ uint32_t lro_timeout_period_usecs:16;
+ uint32_t lro_enable_mask:4;
+ uint32_t lro_max_msg_sz:8;
+ uint32_t inline_rqn:24;
+ uint32_t rx_hash_symmetric:1;
+ uint32_t tunneled_offload_en:1;
+ uint32_t indirect_table:24;
+ uint32_t rx_hash_fn:4;
+ uint32_t self_lb_block:2;
+ uint32_t transport_domain:24;
+ uint32_t rx_hash_toeplitz_key[10];
+ struct mlx5_rx_hash_field_select rx_hash_field_selector_outer;
+ struct mlx5_rx_hash_field_select rx_hash_field_selector_inner;
+};
+
+/* RQT attributes structure, used by RQT operations. */
+struct mlx5_devx_rqt_attr {
+ uint32_t rqt_max_size:16;
+ uint32_t rqt_actual_size:16;
+ uint32_t rq_list[];
+};
+
+/* TIS attributes structure. */
+struct mlx5_devx_tis_attr {
+ uint32_t strict_lag_tx_port_affinity:1;
+ uint32_t tls_en:1;
+ uint32_t lag_tx_port_affinity:4;
+ uint32_t prio:4;
+ uint32_t transport_domain:24;
+};
+
+/* SQ attributes structure, used by SQ create operation. */
+struct mlx5_devx_create_sq_attr {
+ uint32_t rlky:1;
+ uint32_t cd_master:1;
+ uint32_t fre:1;
+ uint32_t flush_in_error_en:1;
+ uint32_t allow_multi_pkt_send_wqe:1;
+ uint32_t min_wqe_inline_mode:3;
+ uint32_t state:4;
+ uint32_t reg_umr:1;
+ uint32_t allow_swp:1;
+ uint32_t hairpin:1;
+ uint32_t user_index:24;
+ uint32_t cqn:24;
+ uint32_t packet_pacing_rate_limit_index:16;
+ uint32_t tis_lst_sz:16;
+ uint32_t tis_num:24;
+ struct mlx5_devx_wq_attr wq_attr;
+};
+
+/* SQ attributes structure, used by SQ modify operation. */
+struct mlx5_devx_modify_sq_attr {
+ uint32_t sq_state:4;
+ uint32_t state:4;
+ uint32_t hairpin_peer_rq:24;
+ uint32_t hairpin_peer_vhca:16;
+};
+
+/* mlx5_devx_cmds.c */
+
+struct mlx5_devx_obj *mlx5_devx_cmd_flow_counter_alloc(struct ibv_context *ctx,
+ uint32_t bulk_sz);
+int mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj);
+int mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs,
+ int clear, uint32_t n_counters,
+ uint64_t *pkts, uint64_t *bytes,
+ uint32_t mkey, void *addr,
+ struct mlx5dv_devx_cmd_comp *cmd_comp,
+ uint64_t async_id);
+int mlx5_devx_cmd_query_hca_attr(struct ibv_context *ctx,
+ struct mlx5_hca_attr *attr);
+struct mlx5_devx_obj *mlx5_devx_cmd_mkey_create(struct ibv_context *ctx,
+ struct mlx5_devx_mkey_attr *attr);
+int mlx5_devx_get_out_command_status(void *out);
+int mlx5_devx_cmd_qp_query_tis_td(struct ibv_qp *qp, uint32_t tis_num,
+ uint32_t *tis_td);
+struct mlx5_devx_obj *mlx5_devx_cmd_create_rq(struct ibv_context *ctx,
+ struct mlx5_devx_create_rq_attr *rq_attr,
+ int socket);
+int mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq,
+ struct mlx5_devx_modify_rq_attr *rq_attr);
+struct mlx5_devx_obj *mlx5_devx_cmd_create_tir(struct ibv_context *ctx,
+ struct mlx5_devx_tir_attr *tir_attr);
+struct mlx5_devx_obj *mlx5_devx_cmd_create_rqt(struct ibv_context *ctx,
+ struct mlx5_devx_rqt_attr *rqt_attr);
+struct mlx5_devx_obj *mlx5_devx_cmd_create_sq(struct ibv_context *ctx,
+ struct mlx5_devx_create_sq_attr *sq_attr);
+int mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq,
+ struct mlx5_devx_modify_sq_attr *sq_attr);
+struct mlx5_devx_obj *mlx5_devx_cmd_create_tis(struct ibv_context *ctx,
+ struct mlx5_devx_tis_attr *tis_attr);
+struct mlx5_devx_obj *mlx5_devx_cmd_create_td(struct ibv_context *ctx);
+int mlx5_devx_cmd_flow_dump(void *fdb_domain, void *rx_domain, void *tx_domain,
+ FILE *file);
+#endif /* RTE_PMD_MLX5_DEVX_CMDS_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <errno.h>
+#include <stdalign.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+/*
+ * Not needed by this file; included to work around the lack of off_t
+ * definition for mlx5dv.h with unpatched rdma-core versions.
+ */
+#include <sys/types.h>
+
+#include <rte_config.h>
+
+#include "mlx5_glue.h"
+
+static int
+mlx5_glue_fork_init(void)
+{
+ return ibv_fork_init();
+}
+
+static struct ibv_pd *
+mlx5_glue_alloc_pd(struct ibv_context *context)
+{
+ return ibv_alloc_pd(context);
+}
+
+static int
+mlx5_glue_dealloc_pd(struct ibv_pd *pd)
+{
+ return ibv_dealloc_pd(pd);
+}
+
+static struct ibv_device **
+mlx5_glue_get_device_list(int *num_devices)
+{
+ return ibv_get_device_list(num_devices);
+}
+
+static void
+mlx5_glue_free_device_list(struct ibv_device **list)
+{
+ ibv_free_device_list(list);
+}
+
+static struct ibv_context *
+mlx5_glue_open_device(struct ibv_device *device)
+{
+ return ibv_open_device(device);
+}
+
+static int
+mlx5_glue_close_device(struct ibv_context *context)
+{
+ return ibv_close_device(context);
+}
+
+static int
+mlx5_glue_query_device(struct ibv_context *context,
+ struct ibv_device_attr *device_attr)
+{
+ return ibv_query_device(context, device_attr);
+}
+
+static int
+mlx5_glue_query_device_ex(struct ibv_context *context,
+ const struct ibv_query_device_ex_input *input,
+ struct ibv_device_attr_ex *attr)
+{
+ return ibv_query_device_ex(context, input, attr);
+}
+
+static int
+mlx5_glue_query_rt_values_ex(struct ibv_context *context,
+ struct ibv_values_ex *values)
+{
+ return ibv_query_rt_values_ex(context, values);
+}
+
+static int
+mlx5_glue_query_port(struct ibv_context *context, uint8_t port_num,
+ struct ibv_port_attr *port_attr)
+{
+ return ibv_query_port(context, port_num, port_attr);
+}
+
+static struct ibv_comp_channel *
+mlx5_glue_create_comp_channel(struct ibv_context *context)
+{
+ return ibv_create_comp_channel(context);
+}
+
+static int
+mlx5_glue_destroy_comp_channel(struct ibv_comp_channel *channel)
+{
+ return ibv_destroy_comp_channel(channel);
+}
+
+static struct ibv_cq *
+mlx5_glue_create_cq(struct ibv_context *context, int cqe, void *cq_context,
+ struct ibv_comp_channel *channel, int comp_vector)
+{
+ return ibv_create_cq(context, cqe, cq_context, channel, comp_vector);
+}
+
+static int
+mlx5_glue_destroy_cq(struct ibv_cq *cq)
+{
+ return ibv_destroy_cq(cq);
+}
+
+static int
+mlx5_glue_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq,
+ void **cq_context)
+{
+ return ibv_get_cq_event(channel, cq, cq_context);
+}
+
+static void
+mlx5_glue_ack_cq_events(struct ibv_cq *cq, unsigned int nevents)
+{
+ ibv_ack_cq_events(cq, nevents);
+}
+
+static struct ibv_rwq_ind_table *
+mlx5_glue_create_rwq_ind_table(struct ibv_context *context,
+ struct ibv_rwq_ind_table_init_attr *init_attr)
+{
+ return ibv_create_rwq_ind_table(context, init_attr);
+}
+
+static int
+mlx5_glue_destroy_rwq_ind_table(struct ibv_rwq_ind_table *rwq_ind_table)
+{
+ return ibv_destroy_rwq_ind_table(rwq_ind_table);
+}
+
+static struct ibv_wq *
+mlx5_glue_create_wq(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_init_attr)
+{
+ return ibv_create_wq(context, wq_init_attr);
+}
+
+static int
+mlx5_glue_destroy_wq(struct ibv_wq *wq)
+{
+ return ibv_destroy_wq(wq);
+}
+static int
+mlx5_glue_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr)
+{
+ return ibv_modify_wq(wq, wq_attr);
+}
+
+static struct ibv_flow *
+mlx5_glue_create_flow(struct ibv_qp *qp, struct ibv_flow_attr *flow)
+{
+ return ibv_create_flow(qp, flow);
+}
+
+static int
+mlx5_glue_destroy_flow(struct ibv_flow *flow_id)
+{
+ return ibv_destroy_flow(flow_id);
+}
+
+static int
+mlx5_glue_destroy_flow_action(void *action)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_action_destroy(action);
+#else
+ struct mlx5dv_flow_action_attr *attr = action;
+ int res = 0;
+ switch (attr->type) {
+ case MLX5DV_FLOW_ACTION_TAG:
+ break;
+ default:
+ res = ibv_destroy_flow_action(attr->action);
+ break;
+ }
+ free(action);
+ return res;
+#endif
+#else
+ (void)action;
+ return ENOTSUP;
+#endif
+}
+
+static struct ibv_qp *
+mlx5_glue_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *qp_init_attr)
+{
+ return ibv_create_qp(pd, qp_init_attr);
+}
+
+static struct ibv_qp *
+mlx5_glue_create_qp_ex(struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex)
+{
+ return ibv_create_qp_ex(context, qp_init_attr_ex);
+}
+
+static int
+mlx5_glue_destroy_qp(struct ibv_qp *qp)
+{
+ return ibv_destroy_qp(qp);
+}
+
+static int
+mlx5_glue_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)
+{
+ return ibv_modify_qp(qp, attr, attr_mask);
+}
+
+static struct ibv_mr *
+mlx5_glue_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
+{
+ return ibv_reg_mr(pd, addr, length, access);
+}
+
+static int
+mlx5_glue_dereg_mr(struct ibv_mr *mr)
+{
+ return ibv_dereg_mr(mr);
+}
+
+static struct ibv_counter_set *
+mlx5_glue_create_counter_set(struct ibv_context *context,
+ struct ibv_counter_set_init_attr *init_attr)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
+ (void)context;
+ (void)init_attr;
+ return NULL;
+#else
+ return ibv_create_counter_set(context, init_attr);
+#endif
+}
+
+static int
+mlx5_glue_destroy_counter_set(struct ibv_counter_set *cs)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
+ (void)cs;
+ return ENOTSUP;
+#else
+ return ibv_destroy_counter_set(cs);
+#endif
+}
+
+static int
+mlx5_glue_describe_counter_set(struct ibv_context *context,
+ uint16_t counter_set_id,
+ struct ibv_counter_set_description *cs_desc)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
+ (void)context;
+ (void)counter_set_id;
+ (void)cs_desc;
+ return ENOTSUP;
+#else
+ return ibv_describe_counter_set(context, counter_set_id, cs_desc);
+#endif
+}
+
+static int
+mlx5_glue_query_counter_set(struct ibv_query_counter_set_attr *query_attr,
+ struct ibv_counter_set_data *cs_data)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
+ (void)query_attr;
+ (void)cs_data;
+ return ENOTSUP;
+#else
+ return ibv_query_counter_set(query_attr, cs_data);
+#endif
+}
+
+static struct ibv_counters *
+mlx5_glue_create_counters(struct ibv_context *context,
+ struct ibv_counters_init_attr *init_attr)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
+ (void)context;
+ (void)init_attr;
+ errno = ENOTSUP;
+ return NULL;
+#else
+ return ibv_create_counters(context, init_attr);
+#endif
+}
+
+static int
+mlx5_glue_destroy_counters(struct ibv_counters *counters)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
+ (void)counters;
+ return ENOTSUP;
+#else
+ return ibv_destroy_counters(counters);
+#endif
+}
+
+static int
+mlx5_glue_attach_counters(struct ibv_counters *counters,
+ struct ibv_counter_attach_attr *attr,
+ struct ibv_flow *flow)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
+ (void)counters;
+ (void)attr;
+ (void)flow;
+ return ENOTSUP;
+#else
+ return ibv_attach_counters_point_flow(counters, attr, flow);
+#endif
+}
+
+static int
+mlx5_glue_query_counters(struct ibv_counters *counters,
+ uint64_t *counters_value,
+ uint32_t ncounters,
+ uint32_t flags)
+{
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
+ (void)counters;
+ (void)counters_value;
+ (void)ncounters;
+ (void)flags;
+ return ENOTSUP;
+#else
+ return ibv_read_counters(counters, counters_value, ncounters, flags);
+#endif
+}
+
+static void
+mlx5_glue_ack_async_event(struct ibv_async_event *event)
+{
+ ibv_ack_async_event(event);
+}
+
+static int
+mlx5_glue_get_async_event(struct ibv_context *context,
+ struct ibv_async_event *event)
+{
+ return ibv_get_async_event(context, event);
+}
+
+static const char *
+mlx5_glue_port_state_str(enum ibv_port_state port_state)
+{
+ return ibv_port_state_str(port_state);
+}
+
+static struct ibv_cq *
+mlx5_glue_cq_ex_to_cq(struct ibv_cq_ex *cq)
+{
+ return ibv_cq_ex_to_cq(cq);
+}
+
+static void *
+mlx5_glue_dr_create_flow_action_dest_flow_tbl(void *tbl)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_action_create_dest_table(tbl);
+#else
+ (void)tbl;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dr_create_flow_action_dest_port(void *domain, uint32_t port)
+{
+#ifdef HAVE_MLX5DV_DR_DEVX_PORT
+ return mlx5dv_dr_action_create_dest_ib_port(domain, port);
+#else
+#ifdef HAVE_MLX5DV_DR_ESWITCH
+ return mlx5dv_dr_action_create_dest_vport(domain, port);
+#else
+ (void)domain;
+ (void)port;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+#endif
+}
+
+static void *
+mlx5_glue_dr_create_flow_action_drop(void)
+{
+#ifdef HAVE_MLX5DV_DR_ESWITCH
+ return mlx5dv_dr_action_create_drop();
+#else
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dr_create_flow_action_push_vlan(struct mlx5dv_dr_domain *domain,
+ rte_be32_t vlan_tag)
+{
+#ifdef HAVE_MLX5DV_DR_VLAN
+ return mlx5dv_dr_action_create_push_vlan(domain, vlan_tag);
+#else
+ (void)domain;
+ (void)vlan_tag;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dr_create_flow_action_pop_vlan(void)
+{
+#ifdef HAVE_MLX5DV_DR_VLAN
+ return mlx5dv_dr_action_create_pop_vlan();
+#else
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dr_create_flow_tbl(void *domain, uint32_t level)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_table_create(domain, level);
+#else
+ (void)domain;
+ (void)level;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static int
+mlx5_glue_dr_destroy_flow_tbl(void *tbl)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_table_destroy(tbl);
+#else
+ (void)tbl;
+ errno = ENOTSUP;
+ return errno;
+#endif
+}
+
+static void *
+mlx5_glue_dr_create_domain(struct ibv_context *ctx,
+ enum mlx5dv_dr_domain_type domain)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_domain_create(ctx, domain);
+#else
+ (void)ctx;
+ (void)domain;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static int
+mlx5_glue_dr_destroy_domain(void *domain)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_domain_destroy(domain);
+#else
+ (void)domain;
+ errno = ENOTSUP;
+ return errno;
+#endif
+}
+
+static struct ibv_cq_ex *
+mlx5_glue_dv_create_cq(struct ibv_context *context,
+ struct ibv_cq_init_attr_ex *cq_attr,
+ struct mlx5dv_cq_init_attr *mlx5_cq_attr)
+{
+ return mlx5dv_create_cq(context, cq_attr, mlx5_cq_attr);
+}
+
+static struct ibv_wq *
+mlx5_glue_dv_create_wq(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_attr,
+ struct mlx5dv_wq_init_attr *mlx5_wq_attr)
+{
+#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ (void)context;
+ (void)wq_attr;
+ (void)mlx5_wq_attr;
+ errno = ENOTSUP;
+ return NULL;
+#else
+ return mlx5dv_create_wq(context, wq_attr, mlx5_wq_attr);
+#endif
+}
+
+static int
+mlx5_glue_dv_query_device(struct ibv_context *ctx,
+ struct mlx5dv_context *attrs_out)
+{
+ return mlx5dv_query_device(ctx, attrs_out);
+}
+
+static int
+mlx5_glue_dv_set_context_attr(struct ibv_context *ibv_ctx,
+ enum mlx5dv_set_ctx_attr_type type, void *attr)
+{
+ return mlx5dv_set_context_attr(ibv_ctx, type, attr);
+}
+
+static int
+mlx5_glue_dv_init_obj(struct mlx5dv_obj *obj, uint64_t obj_type)
+{
+ return mlx5dv_init_obj(obj, obj_type);
+}
+
+static struct ibv_qp *
+mlx5_glue_dv_create_qp(struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex,
+ struct mlx5dv_qp_init_attr *dv_qp_init_attr)
+{
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ return mlx5dv_create_qp(context, qp_init_attr_ex, dv_qp_init_attr);
+#else
+ (void)context;
+ (void)qp_init_attr_ex;
+ (void)dv_qp_init_attr;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dv_create_flow_matcher(struct ibv_context *context,
+ struct mlx5dv_flow_matcher_attr *matcher_attr,
+ void *tbl)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ (void)context;
+ return mlx5dv_dr_matcher_create(tbl, matcher_attr->priority,
+ matcher_attr->match_criteria_enable,
+ matcher_attr->match_mask);
+#else
+ (void)tbl;
+ return mlx5dv_create_flow_matcher(context, matcher_attr);
+#endif
+#else
+ (void)context;
+ (void)matcher_attr;
+ (void)tbl;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dv_create_flow(void *matcher,
+ void *match_value,
+ size_t num_actions,
+ void *actions[])
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_rule_create(matcher, match_value, num_actions,
+ (struct mlx5dv_dr_action **)actions);
+#else
+ struct mlx5dv_flow_action_attr actions_attr[8];
+
+ if (num_actions > 8)
+ return NULL;
+ for (size_t i = 0; i < num_actions; i++)
+ actions_attr[i] =
+ *((struct mlx5dv_flow_action_attr *)(actions[i]));
+ return mlx5dv_create_flow(matcher, match_value,
+ num_actions, actions_attr);
+#endif
+#else
+ (void)matcher;
+ (void)match_value;
+ (void)num_actions;
+ (void)actions;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dv_create_flow_action_counter(void *counter_obj, uint32_t offset)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_action_create_flow_counter(counter_obj, offset);
+#else
+ struct mlx5dv_flow_action_attr *action;
+
+ (void)offset;
+ action = malloc(sizeof(*action));
+ if (!action)
+ return NULL;
+ action->type = MLX5DV_FLOW_ACTION_COUNTERS_DEVX;
+ action->obj = counter_obj;
+ return action;
+#endif
+#else
+ (void)counter_obj;
+ (void)offset;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dv_create_flow_action_dest_ibv_qp(void *qp)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_action_create_dest_ibv_qp(qp);
+#else
+ struct mlx5dv_flow_action_attr *action;
+
+ action = malloc(sizeof(*action));
+ if (!action)
+ return NULL;
+ action->type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
+ action->obj = qp;
+ return action;
+#endif
+#else
+ (void)qp;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dv_create_flow_action_dest_devx_tir(void *tir)
+{
+#ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
+ return mlx5dv_dr_action_create_dest_devx_tir(tir);
+#else
+ (void)tir;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dv_create_flow_action_modify_header
+ (struct ibv_context *ctx,
+ enum mlx5dv_flow_table_type ft_type,
+ void *domain, uint64_t flags,
+ size_t actions_sz,
+ uint64_t actions[])
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ (void)ctx;
+ (void)ft_type;
+ return mlx5dv_dr_action_create_modify_header(domain, flags, actions_sz,
+ (__be64 *)actions);
+#else
+ struct mlx5dv_flow_action_attr *action;
+
+ (void)domain;
+ (void)flags;
+ action = malloc(sizeof(*action));
+ if (!action)
+ return NULL;
+ action->type = MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
+ action->action = mlx5dv_create_flow_action_modify_header
+ (ctx, actions_sz, actions, ft_type);
+ return action;
+#endif
+#else
+ (void)ctx;
+ (void)ft_type;
+ (void)domain;
+ (void)flags;
+ (void)actions_sz;
+ (void)actions;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dv_create_flow_action_packet_reformat
+ (struct ibv_context *ctx,
+ enum mlx5dv_flow_action_packet_reformat_type reformat_type,
+ enum mlx5dv_flow_table_type ft_type,
+ struct mlx5dv_dr_domain *domain,
+ uint32_t flags, size_t data_sz, void *data)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ (void)ctx;
+ (void)ft_type;
+ return mlx5dv_dr_action_create_packet_reformat(domain, flags,
+ reformat_type, data_sz,
+ data);
+#else
+ (void)domain;
+ (void)flags;
+ struct mlx5dv_flow_action_attr *action;
+
+ action = malloc(sizeof(*action));
+ if (!action)
+ return NULL;
+ action->type = MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
+ action->action = mlx5dv_create_flow_action_packet_reformat
+ (ctx, data_sz, data, reformat_type, ft_type);
+ return action;
+#endif
+#else
+ (void)ctx;
+ (void)reformat_type;
+ (void)ft_type;
+ (void)domain;
+ (void)flags;
+ (void)data_sz;
+ (void)data;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dv_create_flow_action_tag(uint32_t tag)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_action_create_tag(tag);
+#else
+ struct mlx5dv_flow_action_attr *action;
+ action = malloc(sizeof(*action));
+ if (!action)
+ return NULL;
+ action->type = MLX5DV_FLOW_ACTION_TAG;
+ action->tag_value = tag;
+ return action;
+#endif
+#endif
+ (void)tag;
+ errno = ENOTSUP;
+ return NULL;
+}
+
+static void *
+mlx5_glue_dv_create_flow_action_meter(struct mlx5dv_dr_flow_meter_attr *attr)
+{
+#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)
+ return mlx5dv_dr_action_create_flow_meter(attr);
+#else
+ (void)attr;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static int
+mlx5_glue_dv_modify_flow_action_meter(void *action,
+ struct mlx5dv_dr_flow_meter_attr *attr,
+ uint64_t modify_bits)
+{
+#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)
+ return mlx5dv_dr_action_modify_flow_meter(action, attr, modify_bits);
+#else
+ (void)action;
+ (void)attr;
+ (void)modify_bits;
+ errno = ENOTSUP;
+ return errno;
+#endif
+}
+
+static int
+mlx5_glue_dv_destroy_flow(void *flow_id)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_rule_destroy(flow_id);
+#else
+ return ibv_destroy_flow(flow_id);
+#endif
+}
+
+static int
+mlx5_glue_dv_destroy_flow_matcher(void *matcher)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_matcher_destroy(matcher);
+#else
+ return mlx5dv_destroy_flow_matcher(matcher);
+#endif
+#else
+ (void)matcher;
+ errno = ENOTSUP;
+ return errno;
+#endif
+}
+
+static struct ibv_context *
+mlx5_glue_dv_open_device(struct ibv_device *device)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_open_device(device,
+ &(struct mlx5dv_context_attr){
+ .flags = MLX5DV_CONTEXT_FLAGS_DEVX,
+ });
+#else
+ (void)device;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static struct mlx5dv_devx_obj *
+mlx5_glue_devx_obj_create(struct ibv_context *ctx,
+ const void *in, size_t inlen,
+ void *out, size_t outlen)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_devx_obj_create(ctx, in, inlen, out, outlen);
+#else
+ (void)ctx;
+ (void)in;
+ (void)inlen;
+ (void)out;
+ (void)outlen;
+ errno = ENOTSUP;
+ return NULL;
+#endif
+}
+
+static int
+mlx5_glue_devx_obj_destroy(struct mlx5dv_devx_obj *obj)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_devx_obj_destroy(obj);
+#else
+ (void)obj;
+ return -ENOTSUP;
+#endif
+}
+
+static int
+mlx5_glue_devx_obj_query(struct mlx5dv_devx_obj *obj,
+ const void *in, size_t inlen,
+ void *out, size_t outlen)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_devx_obj_query(obj, in, inlen, out, outlen);
+#else
+ (void)obj;
+ (void)in;
+ (void)inlen;
+ (void)out;
+ (void)outlen;
+ return -ENOTSUP;
+#endif
+}
+
+static int
+mlx5_glue_devx_obj_modify(struct mlx5dv_devx_obj *obj,
+ const void *in, size_t inlen,
+ void *out, size_t outlen)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_devx_obj_modify(obj, in, inlen, out, outlen);
+#else
+ (void)obj;
+ (void)in;
+ (void)inlen;
+ (void)out;
+ (void)outlen;
+ return -ENOTSUP;
+#endif
+}
+
+static int
+mlx5_glue_devx_general_cmd(struct ibv_context *ctx,
+ const void *in, size_t inlen,
+ void *out, size_t outlen)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_devx_general_cmd(ctx, in, inlen, out, outlen);
+#else
+ (void)ctx;
+ (void)in;
+ (void)inlen;
+ (void)out;
+ (void)outlen;
+ return -ENOTSUP;
+#endif
+}
+
+static struct mlx5dv_devx_cmd_comp *
+mlx5_glue_devx_create_cmd_comp(struct ibv_context *ctx)
+{
+#ifdef HAVE_IBV_DEVX_ASYNC
+ return mlx5dv_devx_create_cmd_comp(ctx);
+#else
+ (void)ctx;
+ errno = -ENOTSUP;
+ return NULL;
+#endif
+}
+
+static void
+mlx5_glue_devx_destroy_cmd_comp(struct mlx5dv_devx_cmd_comp *cmd_comp)
+{
+#ifdef HAVE_IBV_DEVX_ASYNC
+ mlx5dv_devx_destroy_cmd_comp(cmd_comp);
+#else
+ (void)cmd_comp;
+ errno = -ENOTSUP;
+#endif
+}
+
+static int
+mlx5_glue_devx_obj_query_async(struct mlx5dv_devx_obj *obj, const void *in,
+ size_t inlen, size_t outlen, uint64_t wr_id,
+ struct mlx5dv_devx_cmd_comp *cmd_comp)
+{
+#ifdef HAVE_IBV_DEVX_ASYNC
+ return mlx5dv_devx_obj_query_async(obj, in, inlen, outlen, wr_id,
+ cmd_comp);
+#else
+ (void)obj;
+ (void)in;
+ (void)inlen;
+ (void)outlen;
+ (void)wr_id;
+ (void)cmd_comp;
+ return -ENOTSUP;
+#endif
+}
+
+static int
+mlx5_glue_devx_get_async_cmd_comp(struct mlx5dv_devx_cmd_comp *cmd_comp,
+ struct mlx5dv_devx_async_cmd_hdr *cmd_resp,
+ size_t cmd_resp_len)
+{
+#ifdef HAVE_IBV_DEVX_ASYNC
+ return mlx5dv_devx_get_async_cmd_comp(cmd_comp, cmd_resp,
+ cmd_resp_len);
+#else
+ (void)cmd_comp;
+ (void)cmd_resp;
+ (void)cmd_resp_len;
+ return -ENOTSUP;
+#endif
+}
+
+static struct mlx5dv_devx_umem *
+mlx5_glue_devx_umem_reg(struct ibv_context *context, void *addr, size_t size,
+ uint32_t access)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_devx_umem_reg(context, addr, size, access);
+#else
+ (void)context;
+ (void)addr;
+ (void)size;
+ (void)access;
+ errno = -ENOTSUP;
+ return NULL;
+#endif
+}
+
+static int
+mlx5_glue_devx_umem_dereg(struct mlx5dv_devx_umem *dv_devx_umem)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_devx_umem_dereg(dv_devx_umem);
+#else
+ (void)dv_devx_umem;
+ return -ENOTSUP;
+#endif
+}
+
+static int
+mlx5_glue_devx_qp_query(struct ibv_qp *qp,
+ const void *in, size_t inlen,
+ void *out, size_t outlen)
+{
+#ifdef HAVE_IBV_DEVX_OBJ
+ return mlx5dv_devx_qp_query(qp, in, inlen, out, outlen);
+#else
+ (void)qp;
+ (void)in;
+ (void)inlen;
+ (void)out;
+ (void)outlen;
+ errno = ENOTSUP;
+ return errno;
+#endif
+}
+
+static int
+mlx5_glue_devx_port_query(struct ibv_context *ctx,
+ uint32_t port_num,
+ struct mlx5dv_devx_port *mlx5_devx_port)
+{
+#ifdef HAVE_MLX5DV_DR_DEVX_PORT
+ return mlx5dv_query_devx_port(ctx, port_num, mlx5_devx_port);
+#else
+ (void)ctx;
+ (void)port_num;
+ (void)mlx5_devx_port;
+ errno = ENOTSUP;
+ return errno;
+#endif
+}
+
+static int
+mlx5_glue_dr_dump_domain(FILE *file, void *domain)
+{
+#ifdef HAVE_MLX5_DR_FLOW_DUMP
+ return mlx5dv_dump_dr_domain(file, domain);
+#else
+ RTE_SET_USED(file);
+ RTE_SET_USED(domain);
+ return -ENOTSUP;
+#endif
+}
+
+alignas(RTE_CACHE_LINE_SIZE)
+const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){
+ .version = MLX5_GLUE_VERSION,
+ .fork_init = mlx5_glue_fork_init,
+ .alloc_pd = mlx5_glue_alloc_pd,
+ .dealloc_pd = mlx5_glue_dealloc_pd,
+ .get_device_list = mlx5_glue_get_device_list,
+ .free_device_list = mlx5_glue_free_device_list,
+ .open_device = mlx5_glue_open_device,
+ .close_device = mlx5_glue_close_device,
+ .query_device = mlx5_glue_query_device,
+ .query_device_ex = mlx5_glue_query_device_ex,
+ .query_rt_values_ex = mlx5_glue_query_rt_values_ex,
+ .query_port = mlx5_glue_query_port,
+ .create_comp_channel = mlx5_glue_create_comp_channel,
+ .destroy_comp_channel = mlx5_glue_destroy_comp_channel,
+ .create_cq = mlx5_glue_create_cq,
+ .destroy_cq = mlx5_glue_destroy_cq,
+ .get_cq_event = mlx5_glue_get_cq_event,
+ .ack_cq_events = mlx5_glue_ack_cq_events,
+ .create_rwq_ind_table = mlx5_glue_create_rwq_ind_table,
+ .destroy_rwq_ind_table = mlx5_glue_destroy_rwq_ind_table,
+ .create_wq = mlx5_glue_create_wq,
+ .destroy_wq = mlx5_glue_destroy_wq,
+ .modify_wq = mlx5_glue_modify_wq,
+ .create_flow = mlx5_glue_create_flow,
+ .destroy_flow = mlx5_glue_destroy_flow,
+ .destroy_flow_action = mlx5_glue_destroy_flow_action,
+ .create_qp = mlx5_glue_create_qp,
+ .create_qp_ex = mlx5_glue_create_qp_ex,
+ .destroy_qp = mlx5_glue_destroy_qp,
+ .modify_qp = mlx5_glue_modify_qp,
+ .reg_mr = mlx5_glue_reg_mr,
+ .dereg_mr = mlx5_glue_dereg_mr,
+ .create_counter_set = mlx5_glue_create_counter_set,
+ .destroy_counter_set = mlx5_glue_destroy_counter_set,
+ .describe_counter_set = mlx5_glue_describe_counter_set,
+ .query_counter_set = mlx5_glue_query_counter_set,
+ .create_counters = mlx5_glue_create_counters,
+ .destroy_counters = mlx5_glue_destroy_counters,
+ .attach_counters = mlx5_glue_attach_counters,
+ .query_counters = mlx5_glue_query_counters,
+ .ack_async_event = mlx5_glue_ack_async_event,
+ .get_async_event = mlx5_glue_get_async_event,
+ .port_state_str = mlx5_glue_port_state_str,
+ .cq_ex_to_cq = mlx5_glue_cq_ex_to_cq,
+ .dr_create_flow_action_dest_flow_tbl =
+ mlx5_glue_dr_create_flow_action_dest_flow_tbl,
+ .dr_create_flow_action_dest_port =
+ mlx5_glue_dr_create_flow_action_dest_port,
+ .dr_create_flow_action_drop =
+ mlx5_glue_dr_create_flow_action_drop,
+ .dr_create_flow_action_push_vlan =
+ mlx5_glue_dr_create_flow_action_push_vlan,
+ .dr_create_flow_action_pop_vlan =
+ mlx5_glue_dr_create_flow_action_pop_vlan,
+ .dr_create_flow_tbl = mlx5_glue_dr_create_flow_tbl,
+ .dr_destroy_flow_tbl = mlx5_glue_dr_destroy_flow_tbl,
+ .dr_create_domain = mlx5_glue_dr_create_domain,
+ .dr_destroy_domain = mlx5_glue_dr_destroy_domain,
+ .dv_create_cq = mlx5_glue_dv_create_cq,
+ .dv_create_wq = mlx5_glue_dv_create_wq,
+ .dv_query_device = mlx5_glue_dv_query_device,
+ .dv_set_context_attr = mlx5_glue_dv_set_context_attr,
+ .dv_init_obj = mlx5_glue_dv_init_obj,
+ .dv_create_qp = mlx5_glue_dv_create_qp,
+ .dv_create_flow_matcher = mlx5_glue_dv_create_flow_matcher,
+ .dv_create_flow = mlx5_glue_dv_create_flow,
+ .dv_create_flow_action_counter =
+ mlx5_glue_dv_create_flow_action_counter,
+ .dv_create_flow_action_dest_ibv_qp =
+ mlx5_glue_dv_create_flow_action_dest_ibv_qp,
+ .dv_create_flow_action_dest_devx_tir =
+ mlx5_glue_dv_create_flow_action_dest_devx_tir,
+ .dv_create_flow_action_modify_header =
+ mlx5_glue_dv_create_flow_action_modify_header,
+ .dv_create_flow_action_packet_reformat =
+ mlx5_glue_dv_create_flow_action_packet_reformat,
+ .dv_create_flow_action_tag = mlx5_glue_dv_create_flow_action_tag,
+ .dv_create_flow_action_meter = mlx5_glue_dv_create_flow_action_meter,
+ .dv_modify_flow_action_meter = mlx5_glue_dv_modify_flow_action_meter,
+ .dv_destroy_flow = mlx5_glue_dv_destroy_flow,
+ .dv_destroy_flow_matcher = mlx5_glue_dv_destroy_flow_matcher,
+ .dv_open_device = mlx5_glue_dv_open_device,
+ .devx_obj_create = mlx5_glue_devx_obj_create,
+ .devx_obj_destroy = mlx5_glue_devx_obj_destroy,
+ .devx_obj_query = mlx5_glue_devx_obj_query,
+ .devx_obj_modify = mlx5_glue_devx_obj_modify,
+ .devx_general_cmd = mlx5_glue_devx_general_cmd,
+ .devx_create_cmd_comp = mlx5_glue_devx_create_cmd_comp,
+ .devx_destroy_cmd_comp = mlx5_glue_devx_destroy_cmd_comp,
+ .devx_obj_query_async = mlx5_glue_devx_obj_query_async,
+ .devx_get_async_cmd_comp = mlx5_glue_devx_get_async_cmd_comp,
+ .devx_umem_reg = mlx5_glue_devx_umem_reg,
+ .devx_umem_dereg = mlx5_glue_devx_umem_dereg,
+ .devx_qp_query = mlx5_glue_devx_qp_query,
+ .devx_port_query = mlx5_glue_devx_port_query,
+ .dr_dump_domain = mlx5_glue_dr_dump_domain,
+};
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#ifndef MLX5_GLUE_H_
+#define MLX5_GLUE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx5dv.h>
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_byteorder.h>
+
+#include "mlx5_autoconf.h"
+
+#ifndef MLX5_GLUE_VERSION
+#define MLX5_GLUE_VERSION ""
+#endif
+
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
+struct ibv_counter_set;
+struct ibv_counter_set_data;
+struct ibv_counter_set_description;
+struct ibv_counter_set_init_attr;
+struct ibv_query_counter_set_attr;
+#endif
+
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
+struct ibv_counters;
+struct ibv_counters_init_attr;
+struct ibv_counter_attach_attr;
+#endif
+
+#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+struct mlx5dv_qp_init_attr;
+#endif
+
+#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+struct mlx5dv_wq_init_attr;
+#endif
+
+#ifndef HAVE_IBV_FLOW_DV_SUPPORT
+struct mlx5dv_flow_matcher;
+struct mlx5dv_flow_matcher_attr;
+struct mlx5dv_flow_action_attr;
+struct mlx5dv_flow_match_parameters;
+struct mlx5dv_dr_flow_meter_attr;
+struct ibv_flow_action;
+enum mlx5dv_flow_action_packet_reformat_type { packet_reformat_type = 0, };
+enum mlx5dv_flow_table_type { flow_table_type = 0, };
+#endif
+
+#ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
+#define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
+#endif
+
+#ifndef HAVE_IBV_DEVX_OBJ
+struct mlx5dv_devx_obj;
+struct mlx5dv_devx_umem { uint32_t umem_id; };
+#endif
+
+#ifndef HAVE_IBV_DEVX_ASYNC
+struct mlx5dv_devx_cmd_comp;
+struct mlx5dv_devx_async_cmd_hdr;
+#endif
+
+#ifndef HAVE_MLX5DV_DR
+enum mlx5dv_dr_domain_type { unused, };
+struct mlx5dv_dr_domain;
+#endif
+
+#ifndef HAVE_MLX5DV_DR_DEVX_PORT
+struct mlx5dv_devx_port;
+#endif
+
+#ifndef HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER
+struct mlx5dv_dr_flow_meter_attr;
+#endif
+
+/* LIB_GLUE_VERSION must be updated every time this structure is modified. */
+struct mlx5_glue {
+ const char *version;
+ int (*fork_init)(void);
+ struct ibv_pd *(*alloc_pd)(struct ibv_context *context);
+ int (*dealloc_pd)(struct ibv_pd *pd);
+ struct ibv_device **(*get_device_list)(int *num_devices);
+ void (*free_device_list)(struct ibv_device **list);
+ struct ibv_context *(*open_device)(struct ibv_device *device);
+ int (*close_device)(struct ibv_context *context);
+ int (*query_device)(struct ibv_context *context,
+ struct ibv_device_attr *device_attr);
+ int (*query_device_ex)(struct ibv_context *context,
+ const struct ibv_query_device_ex_input *input,
+ struct ibv_device_attr_ex *attr);
+ int (*query_rt_values_ex)(struct ibv_context *context,
+ struct ibv_values_ex *values);
+ int (*query_port)(struct ibv_context *context, uint8_t port_num,
+ struct ibv_port_attr *port_attr);
+ struct ibv_comp_channel *(*create_comp_channel)
+ (struct ibv_context *context);
+ int (*destroy_comp_channel)(struct ibv_comp_channel *channel);
+ struct ibv_cq *(*create_cq)(struct ibv_context *context, int cqe,
+ void *cq_context,
+ struct ibv_comp_channel *channel,
+ int comp_vector);
+ int (*destroy_cq)(struct ibv_cq *cq);
+ int (*get_cq_event)(struct ibv_comp_channel *channel,
+ struct ibv_cq **cq, void **cq_context);
+ void (*ack_cq_events)(struct ibv_cq *cq, unsigned int nevents);
+ struct ibv_rwq_ind_table *(*create_rwq_ind_table)
+ (struct ibv_context *context,
+ struct ibv_rwq_ind_table_init_attr *init_attr);
+ int (*destroy_rwq_ind_table)(struct ibv_rwq_ind_table *rwq_ind_table);
+ struct ibv_wq *(*create_wq)(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_init_attr);
+ int (*destroy_wq)(struct ibv_wq *wq);
+ int (*modify_wq)(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr);
+ struct ibv_flow *(*create_flow)(struct ibv_qp *qp,
+ struct ibv_flow_attr *flow);
+ int (*destroy_flow)(struct ibv_flow *flow_id);
+ int (*destroy_flow_action)(void *action);
+ struct ibv_qp *(*create_qp)(struct ibv_pd *pd,
+ struct ibv_qp_init_attr *qp_init_attr);
+ struct ibv_qp *(*create_qp_ex)
+ (struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex);
+ int (*destroy_qp)(struct ibv_qp *qp);
+ int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+ int attr_mask);
+ struct ibv_mr *(*reg_mr)(struct ibv_pd *pd, void *addr,
+ size_t length, int access);
+ int (*dereg_mr)(struct ibv_mr *mr);
+ struct ibv_counter_set *(*create_counter_set)
+ (struct ibv_context *context,
+ struct ibv_counter_set_init_attr *init_attr);
+ int (*destroy_counter_set)(struct ibv_counter_set *cs);
+ int (*describe_counter_set)
+ (struct ibv_context *context,
+ uint16_t counter_set_id,
+ struct ibv_counter_set_description *cs_desc);
+ int (*query_counter_set)(struct ibv_query_counter_set_attr *query_attr,
+ struct ibv_counter_set_data *cs_data);
+ struct ibv_counters *(*create_counters)
+ (struct ibv_context *context,
+ struct ibv_counters_init_attr *init_attr);
+ int (*destroy_counters)(struct ibv_counters *counters);
+ int (*attach_counters)(struct ibv_counters *counters,
+ struct ibv_counter_attach_attr *attr,
+ struct ibv_flow *flow);
+ int (*query_counters)(struct ibv_counters *counters,
+ uint64_t *counters_value,
+ uint32_t ncounters,
+ uint32_t flags);
+ void (*ack_async_event)(struct ibv_async_event *event);
+ int (*get_async_event)(struct ibv_context *context,
+ struct ibv_async_event *event);
+ const char *(*port_state_str)(enum ibv_port_state port_state);
+ struct ibv_cq *(*cq_ex_to_cq)(struct ibv_cq_ex *cq);
+ void *(*dr_create_flow_action_dest_flow_tbl)(void *tbl);
+ void *(*dr_create_flow_action_dest_port)(void *domain,
+ uint32_t port);
+ void *(*dr_create_flow_action_drop)();
+ void *(*dr_create_flow_action_push_vlan)
+ (struct mlx5dv_dr_domain *domain,
+ rte_be32_t vlan_tag);
+ void *(*dr_create_flow_action_pop_vlan)();
+ void *(*dr_create_flow_tbl)(void *domain, uint32_t level);
+ int (*dr_destroy_flow_tbl)(void *tbl);
+ void *(*dr_create_domain)(struct ibv_context *ctx,
+ enum mlx5dv_dr_domain_type domain);
+ int (*dr_destroy_domain)(void *domain);
+ struct ibv_cq_ex *(*dv_create_cq)
+ (struct ibv_context *context,
+ struct ibv_cq_init_attr_ex *cq_attr,
+ struct mlx5dv_cq_init_attr *mlx5_cq_attr);
+ struct ibv_wq *(*dv_create_wq)
+ (struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_attr,
+ struct mlx5dv_wq_init_attr *mlx5_wq_attr);
+ int (*dv_query_device)(struct ibv_context *ctx_in,
+ struct mlx5dv_context *attrs_out);
+ int (*dv_set_context_attr)(struct ibv_context *ibv_ctx,
+ enum mlx5dv_set_ctx_attr_type type,
+ void *attr);
+ int (*dv_init_obj)(struct mlx5dv_obj *obj, uint64_t obj_type);
+ struct ibv_qp *(*dv_create_qp)
+ (struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex,
+ struct mlx5dv_qp_init_attr *dv_qp_init_attr);
+ void *(*dv_create_flow_matcher)
+ (struct ibv_context *context,
+ struct mlx5dv_flow_matcher_attr *matcher_attr,
+ void *tbl);
+ void *(*dv_create_flow)(void *matcher, void *match_value,
+ size_t num_actions, void *actions[]);
+ void *(*dv_create_flow_action_counter)(void *obj, uint32_t offset);
+ void *(*dv_create_flow_action_dest_ibv_qp)(void *qp);
+ void *(*dv_create_flow_action_dest_devx_tir)(void *tir);
+ void *(*dv_create_flow_action_modify_header)
+ (struct ibv_context *ctx, enum mlx5dv_flow_table_type ft_type,
+ void *domain, uint64_t flags, size_t actions_sz,
+ uint64_t actions[]);
+ void *(*dv_create_flow_action_packet_reformat)
+ (struct ibv_context *ctx,
+ enum mlx5dv_flow_action_packet_reformat_type reformat_type,
+ enum mlx5dv_flow_table_type ft_type,
+ struct mlx5dv_dr_domain *domain,
+ uint32_t flags, size_t data_sz, void *data);
+ void *(*dv_create_flow_action_tag)(uint32_t tag);
+ void *(*dv_create_flow_action_meter)
+ (struct mlx5dv_dr_flow_meter_attr *attr);
+ int (*dv_modify_flow_action_meter)(void *action,
+ struct mlx5dv_dr_flow_meter_attr *attr, uint64_t modify_bits);
+ int (*dv_destroy_flow)(void *flow);
+ int (*dv_destroy_flow_matcher)(void *matcher);
+ struct ibv_context *(*dv_open_device)(struct ibv_device *device);
+ struct mlx5dv_devx_obj *(*devx_obj_create)
+ (struct ibv_context *ctx,
+ const void *in, size_t inlen,
+ void *out, size_t outlen);
+ int (*devx_obj_destroy)(struct mlx5dv_devx_obj *obj);
+ int (*devx_obj_query)(struct mlx5dv_devx_obj *obj,
+ const void *in, size_t inlen,
+ void *out, size_t outlen);
+ int (*devx_obj_modify)(struct mlx5dv_devx_obj *obj,
+ const void *in, size_t inlen,
+ void *out, size_t outlen);
+ int (*devx_general_cmd)(struct ibv_context *context,
+ const void *in, size_t inlen,
+ void *out, size_t outlen);
+ struct mlx5dv_devx_cmd_comp *(*devx_create_cmd_comp)
+ (struct ibv_context *context);
+ void (*devx_destroy_cmd_comp)(struct mlx5dv_devx_cmd_comp *cmd_comp);
+ int (*devx_obj_query_async)(struct mlx5dv_devx_obj *obj,
+ const void *in, size_t inlen,
+ size_t outlen, uint64_t wr_id,
+ struct mlx5dv_devx_cmd_comp *cmd_comp);
+ int (*devx_get_async_cmd_comp)(struct mlx5dv_devx_cmd_comp *cmd_comp,
+ struct mlx5dv_devx_async_cmd_hdr *resp,
+ size_t cmd_resp_len);
+ struct mlx5dv_devx_umem *(*devx_umem_reg)(struct ibv_context *context,
+ void *addr, size_t size,
+ uint32_t access);
+ int (*devx_umem_dereg)(struct mlx5dv_devx_umem *dv_devx_umem);
+ int (*devx_qp_query)(struct ibv_qp *qp,
+ const void *in, size_t inlen,
+ void *out, size_t outlen);
+ int (*devx_port_query)(struct ibv_context *ctx,
+ uint32_t port_num,
+ struct mlx5dv_devx_port *mlx5_devx_port);
+ int (*dr_dump_domain)(FILE *file, void *domain);
+};
+
+const struct mlx5_glue *mlx5_glue;
+
+#endif /* MLX5_GLUE_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_PRM_H_
+#define RTE_PMD_MLX5_PRM_H_
+
+#include <assert.h>
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/mlx5dv.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_vect.h>
+#include <rte_byteorder.h>
+
+#include "mlx5_autoconf.h"
+
+/* RSS hash key size. */
+#define MLX5_RSS_HASH_KEY_LEN 40
+
+/* Get CQE owner bit. */
+#define MLX5_CQE_OWNER(op_own) ((op_own) & MLX5_CQE_OWNER_MASK)
+
+/* Get CQE format. */
+#define MLX5_CQE_FORMAT(op_own) (((op_own) & MLX5E_CQE_FORMAT_MASK) >> 2)
+
+/* Get CQE opcode. */
+#define MLX5_CQE_OPCODE(op_own) (((op_own) & 0xf0) >> 4)
+
+/* Get CQE solicited event. */
+#define MLX5_CQE_SE(op_own) (((op_own) >> 1) & 1)
+
+/* Invalidate a CQE. */
+#define MLX5_CQE_INVALIDATE (MLX5_CQE_INVALID << 4)
+
+/* WQE Segment sizes in bytes. */
+#define MLX5_WSEG_SIZE 16u
+#define MLX5_WQE_CSEG_SIZE sizeof(struct mlx5_wqe_cseg)
+#define MLX5_WQE_DSEG_SIZE sizeof(struct mlx5_wqe_dseg)
+#define MLX5_WQE_ESEG_SIZE sizeof(struct mlx5_wqe_eseg)
+
+/* WQE/WQEBB size in bytes. */
+#define MLX5_WQE_SIZE sizeof(struct mlx5_wqe)
+
+/*
+ * Max size of a WQE session.
+ * Absolute maximum size is 63 (MLX5_DSEG_MAX) segments,
+ * the WQE size field in Control Segment is 6 bits wide.
+ */
+#define MLX5_WQE_SIZE_MAX (60 * MLX5_WSEG_SIZE)
+
+/*
+ * Default minimum number of Tx queues for inlining packets.
+ * If there are less queues as specified we assume we have
+ * no enough CPU resources (cycles) to perform inlining,
+ * the PCIe throughput is not supposed as bottleneck and
+ * inlining is disabled.
+ */
+#define MLX5_INLINE_MAX_TXQS 8u
+#define MLX5_INLINE_MAX_TXQS_BLUEFIELD 16u
+
+/*
+ * Default packet length threshold to be inlined with
+ * enhanced MPW. If packet length exceeds the threshold
+ * the data are not inlined. Should be aligned in WQEBB
+ * boundary with accounting the title Control and Ethernet
+ * segments.
+ */
+#define MLX5_EMPW_DEF_INLINE_LEN (4u * MLX5_WQE_SIZE + \
+ MLX5_DSEG_MIN_INLINE_SIZE)
+/*
+ * Maximal inline data length sent with enhanced MPW.
+ * Is based on maximal WQE size.
+ */
+#define MLX5_EMPW_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \
+ MLX5_WQE_CSEG_SIZE - \
+ MLX5_WQE_ESEG_SIZE - \
+ MLX5_WQE_DSEG_SIZE + \
+ MLX5_DSEG_MIN_INLINE_SIZE)
+/*
+ * Minimal amount of packets to be sent with EMPW.
+ * This limits the minimal required size of sent EMPW.
+ * If there are no enough resources to built minimal
+ * EMPW the sending loop exits.
+ */
+#define MLX5_EMPW_MIN_PACKETS (2u + 3u * 4u)
+/*
+ * Maximal amount of packets to be sent with EMPW.
+ * This value is not recommended to exceed MLX5_TX_COMP_THRESH,
+ * otherwise there might be up to MLX5_EMPW_MAX_PACKETS mbufs
+ * without CQE generation request, being multiplied by
+ * MLX5_TX_COMP_MAX_CQE it may cause significant latency
+ * in tx burst routine at the moment of freeing multiple mbufs.
+ */
+#define MLX5_EMPW_MAX_PACKETS MLX5_TX_COMP_THRESH
+#define MLX5_MPW_MAX_PACKETS 6
+#define MLX5_MPW_INLINE_MAX_PACKETS 2
+
+/*
+ * Default packet length threshold to be inlined with
+ * ordinary SEND. Inlining saves the MR key search
+ * and extra PCIe data fetch transaction, but eats the
+ * CPU cycles.
+ */
+#define MLX5_SEND_DEF_INLINE_LEN (5U * MLX5_WQE_SIZE + \
+ MLX5_ESEG_MIN_INLINE_SIZE - \
+ MLX5_WQE_CSEG_SIZE - \
+ MLX5_WQE_ESEG_SIZE - \
+ MLX5_WQE_DSEG_SIZE)
+/*
+ * Maximal inline data length sent with ordinary SEND.
+ * Is based on maximal WQE size.
+ */
+#define MLX5_SEND_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \
+ MLX5_WQE_CSEG_SIZE - \
+ MLX5_WQE_ESEG_SIZE - \
+ MLX5_WQE_DSEG_SIZE + \
+ MLX5_ESEG_MIN_INLINE_SIZE)
+
+/* Missed in mlv5dv.h, should define here. */
+#define MLX5_OPCODE_ENHANCED_MPSW 0x29u
+
+/* CQE value to inform that VLAN is stripped. */
+#define MLX5_CQE_VLAN_STRIPPED (1u << 0)
+
+/* IPv4 options. */
+#define MLX5_CQE_RX_IP_EXT_OPTS_PACKET (1u << 1)
+
+/* IPv6 packet. */
+#define MLX5_CQE_RX_IPV6_PACKET (1u << 2)
+
+/* IPv4 packet. */
+#define MLX5_CQE_RX_IPV4_PACKET (1u << 3)
+
+/* TCP packet. */
+#define MLX5_CQE_RX_TCP_PACKET (1u << 4)
+
+/* UDP packet. */
+#define MLX5_CQE_RX_UDP_PACKET (1u << 5)
+
+/* IP is fragmented. */
+#define MLX5_CQE_RX_IP_FRAG_PACKET (1u << 7)
+
+/* L2 header is valid. */
+#define MLX5_CQE_RX_L2_HDR_VALID (1u << 8)
+
+/* L3 header is valid. */
+#define MLX5_CQE_RX_L3_HDR_VALID (1u << 9)
+
+/* L4 header is valid. */
+#define MLX5_CQE_RX_L4_HDR_VALID (1u << 10)
+
+/* Outer packet, 0 IPv4, 1 IPv6. */
+#define MLX5_CQE_RX_OUTER_PACKET (1u << 1)
+
+/* Tunnel packet bit in the CQE. */
+#define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0)
+
+/* Mask for LRO push flag in the CQE lro_tcppsh_abort_dupack field. */
+#define MLX5_CQE_LRO_PUSH_MASK 0x40
+
+/* Mask for L4 type in the CQE hdr_type_etc field. */
+#define MLX5_CQE_L4_TYPE_MASK 0x70
+
+/* The bit index of L4 type in CQE hdr_type_etc field. */
+#define MLX5_CQE_L4_TYPE_SHIFT 0x4
+
+/* L4 type to indicate TCP packet without acknowledgment. */
+#define MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK 0x3
+
+/* L4 type to indicate TCP packet with acknowledgment. */
+#define MLX5_L4_HDR_TYPE_TCP_WITH_ACL 0x4
+
+/* Inner L3 checksum offload (Tunneled packets only). */
+#define MLX5_ETH_WQE_L3_INNER_CSUM (1u << 4)
+
+/* Inner L4 checksum offload (Tunneled packets only). */
+#define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5)
+
+/* Outer L4 type is TCP. */
+#define MLX5_ETH_WQE_L4_OUTER_TCP (0u << 5)
+
+/* Outer L4 type is UDP. */
+#define MLX5_ETH_WQE_L4_OUTER_UDP (1u << 5)
+
+/* Outer L3 type is IPV4. */
+#define MLX5_ETH_WQE_L3_OUTER_IPV4 (0u << 4)
+
+/* Outer L3 type is IPV6. */
+#define MLX5_ETH_WQE_L3_OUTER_IPV6 (1u << 4)
+
+/* Inner L4 type is TCP. */
+#define MLX5_ETH_WQE_L4_INNER_TCP (0u << 1)
+
+/* Inner L4 type is UDP. */
+#define MLX5_ETH_WQE_L4_INNER_UDP (1u << 1)
+
+/* Inner L3 type is IPV4. */
+#define MLX5_ETH_WQE_L3_INNER_IPV4 (0u << 0)
+
+/* Inner L3 type is IPV6. */
+#define MLX5_ETH_WQE_L3_INNER_IPV6 (1u << 0)
+
+/* VLAN insertion flag. */
+#define MLX5_ETH_WQE_VLAN_INSERT (1u << 31)
+
+/* Data inline segment flag. */
+#define MLX5_ETH_WQE_DATA_INLINE (1u << 31)
+
+/* Is flow mark valid. */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00)
+#else
+#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff)
+#endif
+
+/* INVALID is used by packets matching no flow rules. */
+#define MLX5_FLOW_MARK_INVALID 0
+
+/* Maximum allowed value to mark a packet. */
+#define MLX5_FLOW_MARK_MAX 0xfffff0
+
+/* Default mark value used when none is provided. */
+#define MLX5_FLOW_MARK_DEFAULT 0xffffff
+
+/* Default mark mask for metadata legacy mode. */
+#define MLX5_FLOW_MARK_MASK 0xffffff
+
+/* Maximum number of DS in WQE. Limited by 6-bit field. */
+#define MLX5_DSEG_MAX 63
+
+/* The completion mode offset in the WQE control segment line 2. */
+#define MLX5_COMP_MODE_OFFSET 2
+
+/* Amount of data bytes in minimal inline data segment. */
+#define MLX5_DSEG_MIN_INLINE_SIZE 12u
+
+/* Amount of data bytes in minimal inline eth segment. */
+#define MLX5_ESEG_MIN_INLINE_SIZE 18u
+
+/* Amount of data bytes after eth data segment. */
+#define MLX5_ESEG_EXTRA_DATA_SIZE 32u
+
+/* The maximum log value of segments per RQ WQE. */
+#define MLX5_MAX_LOG_RQ_SEGS 5u
+
+/* The alignment needed for WQ buffer. */
+#define MLX5_WQE_BUF_ALIGNMENT 512
+
+/* Completion mode. */
+enum mlx5_completion_mode {
+ MLX5_COMP_ONLY_ERR = 0x0,
+ MLX5_COMP_ONLY_FIRST_ERR = 0x1,
+ MLX5_COMP_ALWAYS = 0x2,
+ MLX5_COMP_CQE_AND_EQE = 0x3,
+};
+
+/* MPW mode. */
+enum mlx5_mpw_mode {
+ MLX5_MPW_DISABLED,
+ MLX5_MPW,
+ MLX5_MPW_ENHANCED, /* Enhanced Multi-Packet Send WQE, a.k.a MPWv2. */
+};
+
+/* WQE Control segment. */
+struct mlx5_wqe_cseg {
+ uint32_t opcode;
+ uint32_t sq_ds;
+ uint32_t flags;
+ uint32_t misc;
+} __rte_packed __rte_aligned(MLX5_WSEG_SIZE);
+
+/* Header of data segment. Minimal size Data Segment */
+struct mlx5_wqe_dseg {
+ uint32_t bcount;
+ union {
+ uint8_t inline_data[MLX5_DSEG_MIN_INLINE_SIZE];
+ struct {
+ uint32_t lkey;
+ uint64_t pbuf;
+ } __rte_packed;
+ };
+} __rte_packed;
+
+/* Subset of struct WQE Ethernet Segment. */
+struct mlx5_wqe_eseg {
+ union {
+ struct {
+ uint32_t swp_offs;
+ uint8_t cs_flags;
+ uint8_t swp_flags;
+ uint16_t mss;
+ uint32_t metadata;
+ uint16_t inline_hdr_sz;
+ union {
+ uint16_t inline_data;
+ uint16_t vlan_tag;
+ };
+ } __rte_packed;
+ struct {
+ uint32_t offsets;
+ uint32_t flags;
+ uint32_t flow_metadata;
+ uint32_t inline_hdr;
+ } __rte_packed;
+ };
+} __rte_packed;
+
+/* The title WQEBB, header of WQE. */
+struct mlx5_wqe {
+ union {
+ struct mlx5_wqe_cseg cseg;
+ uint32_t ctrl[4];
+ };
+ struct mlx5_wqe_eseg eseg;
+ union {
+ struct mlx5_wqe_dseg dseg[2];
+ uint8_t data[MLX5_ESEG_EXTRA_DATA_SIZE];
+ };
+} __rte_packed;
+
+/* WQE for Multi-Packet RQ. */
+struct mlx5_wqe_mprq {
+ struct mlx5_wqe_srq_next_seg next_seg;
+ struct mlx5_wqe_data_seg dseg;
+};
+
+#define MLX5_MPRQ_LEN_MASK 0x000ffff
+#define MLX5_MPRQ_LEN_SHIFT 0
+#define MLX5_MPRQ_STRIDE_NUM_MASK 0x3fff0000
+#define MLX5_MPRQ_STRIDE_NUM_SHIFT 16
+#define MLX5_MPRQ_FILLER_MASK 0x80000000
+#define MLX5_MPRQ_FILLER_SHIFT 31
+
+#define MLX5_MPRQ_STRIDE_SHIFT_BYTE 2
+
+/* CQ element structure - should be equal to the cache line size */
+struct mlx5_cqe {
+#if (RTE_CACHE_LINE_SIZE == 128)
+ uint8_t padding[64];
+#endif
+ uint8_t pkt_info;
+ uint8_t rsvd0;
+ uint16_t wqe_id;
+ uint8_t lro_tcppsh_abort_dupack;
+ uint8_t lro_min_ttl;
+ uint16_t lro_tcp_win;
+ uint32_t lro_ack_seq_num;
+ uint32_t rx_hash_res;
+ uint8_t rx_hash_type;
+ uint8_t rsvd1[3];
+ uint16_t csum;
+ uint8_t rsvd2[6];
+ uint16_t hdr_type_etc;
+ uint16_t vlan_info;
+ uint8_t lro_num_seg;
+ uint8_t rsvd3[3];
+ uint32_t flow_table_metadata;
+ uint8_t rsvd4[4];
+ uint32_t byte_cnt;
+ uint64_t timestamp;
+ uint32_t sop_drop_qpn;
+ uint16_t wqe_counter;
+ uint8_t rsvd5;
+ uint8_t op_own;
+};
+
+/* Adding direct verbs to data-path. */
+
+/* CQ sequence number mask. */
+#define MLX5_CQ_SQN_MASK 0x3
+
+/* CQ sequence number index. */
+#define MLX5_CQ_SQN_OFFSET 28
+
+/* CQ doorbell index mask. */
+#define MLX5_CI_MASK 0xffffff
+
+/* CQ doorbell offset. */
+#define MLX5_CQ_ARM_DB 1
+
+/* CQ doorbell offset*/
+#define MLX5_CQ_DOORBELL 0x20
+
+/* CQE format value. */
+#define MLX5_COMPRESSED 0x3
+
+/* Action type of header modification. */
+enum {
+ MLX5_MODIFICATION_TYPE_SET = 0x1,
+ MLX5_MODIFICATION_TYPE_ADD = 0x2,
+ MLX5_MODIFICATION_TYPE_COPY = 0x3,
+};
+
+/* The field of packet to be modified. */
+enum mlx5_modification_field {
+ MLX5_MODI_OUT_NONE = -1,
+ MLX5_MODI_OUT_SMAC_47_16 = 1,
+ MLX5_MODI_OUT_SMAC_15_0,
+ MLX5_MODI_OUT_ETHERTYPE,
+ MLX5_MODI_OUT_DMAC_47_16,
+ MLX5_MODI_OUT_DMAC_15_0,
+ MLX5_MODI_OUT_IP_DSCP,
+ MLX5_MODI_OUT_TCP_FLAGS,
+ MLX5_MODI_OUT_TCP_SPORT,
+ MLX5_MODI_OUT_TCP_DPORT,
+ MLX5_MODI_OUT_IPV4_TTL,
+ MLX5_MODI_OUT_UDP_SPORT,
+ MLX5_MODI_OUT_UDP_DPORT,
+ MLX5_MODI_OUT_SIPV6_127_96,
+ MLX5_MODI_OUT_SIPV6_95_64,
+ MLX5_MODI_OUT_SIPV6_63_32,
+ MLX5_MODI_OUT_SIPV6_31_0,
+ MLX5_MODI_OUT_DIPV6_127_96,
+ MLX5_MODI_OUT_DIPV6_95_64,
+ MLX5_MODI_OUT_DIPV6_63_32,
+ MLX5_MODI_OUT_DIPV6_31_0,
+ MLX5_MODI_OUT_SIPV4,
+ MLX5_MODI_OUT_DIPV4,
+ MLX5_MODI_OUT_FIRST_VID,
+ MLX5_MODI_IN_SMAC_47_16 = 0x31,
+ MLX5_MODI_IN_SMAC_15_0,
+ MLX5_MODI_IN_ETHERTYPE,
+ MLX5_MODI_IN_DMAC_47_16,
+ MLX5_MODI_IN_DMAC_15_0,
+ MLX5_MODI_IN_IP_DSCP,
+ MLX5_MODI_IN_TCP_FLAGS,
+ MLX5_MODI_IN_TCP_SPORT,
+ MLX5_MODI_IN_TCP_DPORT,
+ MLX5_MODI_IN_IPV4_TTL,
+ MLX5_MODI_IN_UDP_SPORT,
+ MLX5_MODI_IN_UDP_DPORT,
+ MLX5_MODI_IN_SIPV6_127_96,
+ MLX5_MODI_IN_SIPV6_95_64,
+ MLX5_MODI_IN_SIPV6_63_32,
+ MLX5_MODI_IN_SIPV6_31_0,
+ MLX5_MODI_IN_DIPV6_127_96,
+ MLX5_MODI_IN_DIPV6_95_64,
+ MLX5_MODI_IN_DIPV6_63_32,
+ MLX5_MODI_IN_DIPV6_31_0,
+ MLX5_MODI_IN_SIPV4,
+ MLX5_MODI_IN_DIPV4,
+ MLX5_MODI_OUT_IPV6_HOPLIMIT,
+ MLX5_MODI_IN_IPV6_HOPLIMIT,
+ MLX5_MODI_META_DATA_REG_A,
+ MLX5_MODI_META_DATA_REG_B = 0x50,
+ MLX5_MODI_META_REG_C_0,
+ MLX5_MODI_META_REG_C_1,
+ MLX5_MODI_META_REG_C_2,
+ MLX5_MODI_META_REG_C_3,
+ MLX5_MODI_META_REG_C_4,
+ MLX5_MODI_META_REG_C_5,
+ MLX5_MODI_META_REG_C_6,
+ MLX5_MODI_META_REG_C_7,
+ MLX5_MODI_OUT_TCP_SEQ_NUM,
+ MLX5_MODI_IN_TCP_SEQ_NUM,
+ MLX5_MODI_OUT_TCP_ACK_NUM,
+ MLX5_MODI_IN_TCP_ACK_NUM = 0x5C,
+};
+
+/* Total number of metadata reg_c's. */
+#define MLX5_MREG_C_NUM (MLX5_MODI_META_REG_C_7 - MLX5_MODI_META_REG_C_0 + 1)
+
+enum modify_reg {
+ REG_NONE = 0,
+ REG_A,
+ REG_B,
+ REG_C_0,
+ REG_C_1,
+ REG_C_2,
+ REG_C_3,
+ REG_C_4,
+ REG_C_5,
+ REG_C_6,
+ REG_C_7,
+};
+
+/* Modification sub command. */
+struct mlx5_modification_cmd {
+ union {
+ uint32_t data0;
+ struct {
+ unsigned int length:5;
+ unsigned int rsvd0:3;
+ unsigned int offset:5;
+ unsigned int rsvd1:3;
+ unsigned int field:12;
+ unsigned int action_type:4;
+ };
+ };
+ union {
+ uint32_t data1;
+ uint8_t data[4];
+ struct {
+ unsigned int rsvd2:8;
+ unsigned int dst_offset:5;
+ unsigned int rsvd3:3;
+ unsigned int dst_field:12;
+ unsigned int rsvd4:4;
+ };
+ };
+};
+
+typedef uint32_t u32;
+typedef uint16_t u16;
+typedef uint8_t u8;
+
+#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
+#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
+#define __mlx5_bit_off(typ, fld) ((unsigned int)(unsigned long) \
+ (&(__mlx5_nullp(typ)->fld)))
+#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - \
+ (__mlx5_bit_off(typ, fld) & 0x1f))
+#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
+#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
+#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << \
+ __mlx5_dw_bit_off(typ, fld))
+#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
+#define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
+#define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - \
+ (__mlx5_bit_off(typ, fld) & 0xf))
+#define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
+#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
+#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
+
+/* insert a value to a struct */
+#define MLX5_SET(typ, p, fld, v) \
+ do { \
+ u32 _v = v; \
+ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+ rte_cpu_to_be_32((rte_be_to_cpu_32(*((u32 *)(p) + \
+ __mlx5_dw_off(typ, fld))) & \
+ (~__mlx5_dw_mask(typ, fld))) | \
+ (((_v) & __mlx5_mask(typ, fld)) << \
+ __mlx5_dw_bit_off(typ, fld))); \
+ } while (0)
+
+#define MLX5_SET64(typ, p, fld, v) \
+ do { \
+ assert(__mlx5_bit_sz(typ, fld) == 64); \
+ *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = \
+ rte_cpu_to_be_64(v); \
+ } while (0)
+
+#define MLX5_GET(typ, p, fld) \
+ ((rte_be_to_cpu_32(*((__be32 *)(p) +\
+ __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
+ __mlx5_mask(typ, fld))
+#define MLX5_GET16(typ, p, fld) \
+ ((rte_be_to_cpu_16(*((__be16 *)(p) + \
+ __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
+ __mlx5_mask16(typ, fld))
+#define MLX5_GET64(typ, p, fld) rte_be_to_cpu_64(*((__be64 *)(p) + \
+ __mlx5_64_off(typ, fld)))
+#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
+
+struct mlx5_ifc_fte_match_set_misc_bits {
+ u8 gre_c_present[0x1];
+ u8 reserved_at_1[0x1];
+ u8 gre_k_present[0x1];
+ u8 gre_s_present[0x1];
+ u8 source_vhci_port[0x4];
+ u8 source_sqn[0x18];
+ u8 reserved_at_20[0x10];
+ u8 source_port[0x10];
+ u8 outer_second_prio[0x3];
+ u8 outer_second_cfi[0x1];
+ u8 outer_second_vid[0xc];
+ u8 inner_second_prio[0x3];
+ u8 inner_second_cfi[0x1];
+ u8 inner_second_vid[0xc];
+ u8 outer_second_cvlan_tag[0x1];
+ u8 inner_second_cvlan_tag[0x1];
+ u8 outer_second_svlan_tag[0x1];
+ u8 inner_second_svlan_tag[0x1];
+ u8 reserved_at_64[0xc];
+ u8 gre_protocol[0x10];
+ u8 gre_key_h[0x18];
+ u8 gre_key_l[0x8];
+ u8 vxlan_vni[0x18];
+ u8 reserved_at_b8[0x8];
+ u8 geneve_vni[0x18];
+ u8 reserved_at_e4[0x7];
+ u8 geneve_oam[0x1];
+ u8 reserved_at_e0[0xc];
+ u8 outer_ipv6_flow_label[0x14];
+ u8 reserved_at_100[0xc];
+ u8 inner_ipv6_flow_label[0x14];
+ u8 reserved_at_120[0xa];
+ u8 geneve_opt_len[0x6];
+ u8 geneve_protocol_type[0x10];
+ u8 reserved_at_140[0xc0];
+};
+
+struct mlx5_ifc_ipv4_layout_bits {
+ u8 reserved_at_0[0x60];
+ u8 ipv4[0x20];
+};
+
+struct mlx5_ifc_ipv6_layout_bits {
+ u8 ipv6[16][0x8];
+};
+
+union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
+ struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
+ struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
+ u8 reserved_at_0[0x80];
+};
+
+struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
+ u8 smac_47_16[0x20];
+ u8 smac_15_0[0x10];
+ u8 ethertype[0x10];
+ u8 dmac_47_16[0x20];
+ u8 dmac_15_0[0x10];
+ u8 first_prio[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vid[0xc];
+ u8 ip_protocol[0x8];
+ u8 ip_dscp[0x6];
+ u8 ip_ecn[0x2];
+ u8 cvlan_tag[0x1];
+ u8 svlan_tag[0x1];
+ u8 frag[0x1];
+ u8 ip_version[0x4];
+ u8 tcp_flags[0x9];
+ u8 tcp_sport[0x10];
+ u8 tcp_dport[0x10];
+ u8 reserved_at_c0[0x20];
+ u8 udp_sport[0x10];
+ u8 udp_dport[0x10];
+ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
+ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
+};
+
+struct mlx5_ifc_fte_match_mpls_bits {
+ u8 mpls_label[0x14];
+ u8 mpls_exp[0x3];
+ u8 mpls_s_bos[0x1];
+ u8 mpls_ttl[0x8];
+};
+
+struct mlx5_ifc_fte_match_set_misc2_bits {
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls;
+ struct mlx5_ifc_fte_match_mpls_bits inner_first_mpls;
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre;
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp;
+ u8 metadata_reg_c_7[0x20];
+ u8 metadata_reg_c_6[0x20];
+ u8 metadata_reg_c_5[0x20];
+ u8 metadata_reg_c_4[0x20];
+ u8 metadata_reg_c_3[0x20];
+ u8 metadata_reg_c_2[0x20];
+ u8 metadata_reg_c_1[0x20];
+ u8 metadata_reg_c_0[0x20];
+ u8 metadata_reg_a[0x20];
+ u8 metadata_reg_b[0x20];
+ u8 reserved_at_1c0[0x40];
+};
+
+struct mlx5_ifc_fte_match_set_misc3_bits {
+ u8 inner_tcp_seq_num[0x20];
+ u8 outer_tcp_seq_num[0x20];
+ u8 inner_tcp_ack_num[0x20];
+ u8 outer_tcp_ack_num[0x20];
+ u8 reserved_at_auto1[0x8];
+ u8 outer_vxlan_gpe_vni[0x18];
+ u8 outer_vxlan_gpe_next_protocol[0x8];
+ u8 outer_vxlan_gpe_flags[0x8];
+ u8 reserved_at_a8[0x10];
+ u8 icmp_header_data[0x20];
+ u8 icmpv6_header_data[0x20];
+ u8 icmp_type[0x8];
+ u8 icmp_code[0x8];
+ u8 icmpv6_type[0x8];
+ u8 icmpv6_code[0x8];
+ u8 reserved_at_120[0x20];
+ u8 gtpu_teid[0x20];
+ u8 gtpu_msg_type[0x08];
+ u8 gtpu_msg_flags[0x08];
+ u8 reserved_at_170[0x90];
+};
+
+/* Flow matcher. */
+struct mlx5_ifc_fte_match_param_bits {
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
+ struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
+ struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
+ struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
+};
+
+enum {
+ MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT,
+ MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT,
+ MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT,
+ MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT,
+ MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT
+};
+
+enum {
+ MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
+ MLX5_CMD_OP_CREATE_MKEY = 0x200,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,
+ MLX5_CMD_OP_CREATE_TIR = 0x900,
+ MLX5_CMD_OP_CREATE_SQ = 0X904,
+ MLX5_CMD_OP_MODIFY_SQ = 0X905,
+ MLX5_CMD_OP_CREATE_RQ = 0x908,
+ MLX5_CMD_OP_MODIFY_RQ = 0x909,
+ MLX5_CMD_OP_CREATE_TIS = 0x912,
+ MLX5_CMD_OP_QUERY_TIS = 0x915,
+ MLX5_CMD_OP_CREATE_RQT = 0x916,
+ MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
+ MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
+};
+
+enum {
+ MLX5_MKC_ACCESS_MODE_MTT = 0x1,
+};
+
+/* Flow counters. */
+struct mlx5_ifc_alloc_flow_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 flow_counter_id[0x20];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_flow_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 flow_counter_id[0x20];
+ u8 reserved_at_40[0x18];
+ u8 flow_counter_bulk[0x8];
+};
+
+struct mlx5_ifc_dealloc_flow_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_flow_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 flow_counter_id[0x20];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_traffic_counter_bits {
+ u8 packets[0x40];
+ u8 octets[0x40];
+};
+
+struct mlx5_ifc_query_flow_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+ struct mlx5_ifc_traffic_counter_bits flow_statistics[];
+};
+
+struct mlx5_ifc_query_flow_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x20];
+ u8 mkey[0x20];
+ u8 address[0x40];
+ u8 clear[0x1];
+ u8 dump_to_memory[0x1];
+ u8 num_of_counters[0x1e];
+ u8 flow_counter_id[0x20];
+};
+
+struct mlx5_ifc_mkc_bits {
+ u8 reserved_at_0[0x1];
+ u8 free[0x1];
+ u8 reserved_at_2[0x1];
+ u8 access_mode_4_2[0x3];
+ u8 reserved_at_6[0x7];
+ u8 relaxed_ordering_write[0x1];
+ u8 reserved_at_e[0x1];
+ u8 small_fence_on_rdma_read_response[0x1];
+ u8 umr_en[0x1];
+ u8 a[0x1];
+ u8 rw[0x1];
+ u8 rr[0x1];
+ u8 lw[0x1];
+ u8 lr[0x1];
+ u8 access_mode_1_0[0x2];
+ u8 reserved_at_18[0x8];
+
+ u8 qpn[0x18];
+ u8 mkey_7_0[0x8];
+
+ u8 reserved_at_40[0x20];
+
+ u8 length64[0x1];
+ u8 bsf_en[0x1];
+ u8 sync_umr[0x1];
+ u8 reserved_at_63[0x2];
+ u8 expected_sigerr_count[0x1];
+ u8 reserved_at_66[0x1];
+ u8 en_rinval[0x1];
+ u8 pd[0x18];
+
+ u8 start_addr[0x40];
+
+ u8 len[0x40];
+
+ u8 bsf_octword_size[0x20];
+
+ u8 reserved_at_120[0x80];
+
+ u8 translations_octword_size[0x20];
+
+ u8 reserved_at_1c0[0x1b];
+ u8 log_page_size[0x5];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_create_mkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x8];
+ u8 mkey_index[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_mkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 pg_access[0x1];
+ u8 reserved_at_61[0x1f];
+
+ struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+ u8 reserved_at_280[0x80];
+
+ u8 translations_octword_actual_size[0x20];
+
+ u8 mkey_umem_id[0x20];
+
+ u8 mkey_umem_offset[0x40];
+
+ u8 reserved_at_380[0x500];
+
+ u8 klm_pas_mtt[][0x20];
+};
+
+enum {
+ MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0 << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS = 0x1 << 1,
+ MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP = 0xc << 1,
+};
+
+enum {
+ MLX5_HCA_CAP_OPMOD_GET_MAX = 0,
+ MLX5_HCA_CAP_OPMOD_GET_CUR = 1,
+};
+
+enum {
+ MLX5_CAP_INLINE_MODE_L2,
+ MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
+ MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
+};
+
+enum {
+ MLX5_INLINE_MODE_NONE,
+ MLX5_INLINE_MODE_L2,
+ MLX5_INLINE_MODE_IP,
+ MLX5_INLINE_MODE_TCP_UDP,
+ MLX5_INLINE_MODE_RESERVED4,
+ MLX5_INLINE_MODE_INNER_L2,
+ MLX5_INLINE_MODE_INNER_IP,
+ MLX5_INLINE_MODE_INNER_TCP_UDP,
+};
+
+/* HCA bit masks indicating which Flex parser protocols are already enabled. */
+#define MLX5_HCA_FLEX_IPV4_OVER_VXLAN_ENABLED (1UL << 0)
+#define MLX5_HCA_FLEX_IPV6_OVER_VXLAN_ENABLED (1UL << 1)
+#define MLX5_HCA_FLEX_IPV6_OVER_IP_ENABLED (1UL << 2)
+#define MLX5_HCA_FLEX_GENEVE_ENABLED (1UL << 3)
+#define MLX5_HCA_FLEX_CW_MPLS_OVER_GRE_ENABLED (1UL << 4)
+#define MLX5_HCA_FLEX_CW_MPLS_OVER_UDP_ENABLED (1UL << 5)
+#define MLX5_HCA_FLEX_P_BIT_VXLAN_GPE_ENABLED (1UL << 6)
+#define MLX5_HCA_FLEX_VXLAN_GPE_ENABLED (1UL << 7)
+#define MLX5_HCA_FLEX_ICMP_ENABLED (1UL << 8)
+#define MLX5_HCA_FLEX_ICMPV6_ENABLED (1UL << 9)
+
+struct mlx5_ifc_cmd_hca_cap_bits {
+ u8 reserved_at_0[0x30];
+ u8 vhca_id[0x10];
+ u8 reserved_at_40[0x40];
+ u8 log_max_srq_sz[0x8];
+ u8 log_max_qp_sz[0x8];
+ u8 reserved_at_90[0xb];
+ u8 log_max_qp[0x5];
+ u8 reserved_at_a0[0xb];
+ u8 log_max_srq[0x5];
+ u8 reserved_at_b0[0x10];
+ u8 reserved_at_c0[0x8];
+ u8 log_max_cq_sz[0x8];
+ u8 reserved_at_d0[0xb];
+ u8 log_max_cq[0x5];
+ u8 log_max_eq_sz[0x8];
+ u8 reserved_at_e8[0x2];
+ u8 log_max_mkey[0x6];
+ u8 reserved_at_f0[0x8];
+ u8 dump_fill_mkey[0x1];
+ u8 reserved_at_f9[0x3];
+ u8 log_max_eq[0x4];
+ u8 max_indirection[0x8];
+ u8 fixed_buffer_size[0x1];
+ u8 log_max_mrw_sz[0x7];
+ u8 force_teardown[0x1];
+ u8 reserved_at_111[0x1];
+ u8 log_max_bsf_list_size[0x6];
+ u8 umr_extended_translation_offset[0x1];
+ u8 null_mkey[0x1];
+ u8 log_max_klm_list_size[0x6];
+ u8 reserved_at_120[0xa];
+ u8 log_max_ra_req_dc[0x6];
+ u8 reserved_at_130[0xa];
+ u8 log_max_ra_res_dc[0x6];
+ u8 reserved_at_140[0xa];
+ u8 log_max_ra_req_qp[0x6];
+ u8 reserved_at_150[0xa];
+ u8 log_max_ra_res_qp[0x6];
+ u8 end_pad[0x1];
+ u8 cc_query_allowed[0x1];
+ u8 cc_modify_allowed[0x1];
+ u8 start_pad[0x1];
+ u8 cache_line_128byte[0x1];
+ u8 reserved_at_165[0xa];
+ u8 qcam_reg[0x1];
+ u8 gid_table_size[0x10];
+ u8 out_of_seq_cnt[0x1];
+ u8 vport_counters[0x1];
+ u8 retransmission_q_counters[0x1];
+ u8 debug[0x1];
+ u8 modify_rq_counter_set_id[0x1];
+ u8 rq_delay_drop[0x1];
+ u8 max_qp_cnt[0xa];
+ u8 pkey_table_size[0x10];
+ u8 vport_group_manager[0x1];
+ u8 vhca_group_manager[0x1];
+ u8 ib_virt[0x1];
+ u8 eth_virt[0x1];
+ u8 vnic_env_queue_counters[0x1];
+ u8 ets[0x1];
+ u8 nic_flow_table[0x1];
+ u8 eswitch_manager[0x1];
+ u8 device_memory[0x1];
+ u8 mcam_reg[0x1];
+ u8 pcam_reg[0x1];
+ u8 local_ca_ack_delay[0x5];
+ u8 port_module_event[0x1];
+ u8 enhanced_error_q_counters[0x1];
+ u8 ports_check[0x1];
+ u8 reserved_at_1b3[0x1];
+ u8 disable_link_up[0x1];
+ u8 beacon_led[0x1];
+ u8 port_type[0x2];
+ u8 num_ports[0x8];
+ u8 reserved_at_1c0[0x1];
+ u8 pps[0x1];
+ u8 pps_modify[0x1];
+ u8 log_max_msg[0x5];
+ u8 reserved_at_1c8[0x4];
+ u8 max_tc[0x4];
+ u8 temp_warn_event[0x1];
+ u8 dcbx[0x1];
+ u8 general_notification_event[0x1];
+ u8 reserved_at_1d3[0x2];
+ u8 fpga[0x1];
+ u8 rol_s[0x1];
+ u8 rol_g[0x1];
+ u8 reserved_at_1d8[0x1];
+ u8 wol_s[0x1];
+ u8 wol_g[0x1];
+ u8 wol_a[0x1];
+ u8 wol_b[0x1];
+ u8 wol_m[0x1];
+ u8 wol_u[0x1];
+ u8 wol_p[0x1];
+ u8 stat_rate_support[0x10];
+ u8 reserved_at_1f0[0xc];
+ u8 cqe_version[0x4];
+ u8 compact_address_vector[0x1];
+ u8 striding_rq[0x1];
+ u8 reserved_at_202[0x1];
+ u8 ipoib_enhanced_offloads[0x1];
+ u8 ipoib_basic_offloads[0x1];
+ u8 reserved_at_205[0x1];
+ u8 repeated_block_disabled[0x1];
+ u8 umr_modify_entity_size_disabled[0x1];
+ u8 umr_modify_atomic_disabled[0x1];
+ u8 umr_indirect_mkey_disabled[0x1];
+ u8 umr_fence[0x2];
+ u8 reserved_at_20c[0x3];
+ u8 drain_sigerr[0x1];
+ u8 cmdif_checksum[0x2];
+ u8 sigerr_cqe[0x1];
+ u8 reserved_at_213[0x1];
+ u8 wq_signature[0x1];
+ u8 sctr_data_cqe[0x1];
+ u8 reserved_at_216[0x1];
+ u8 sho[0x1];
+ u8 tph[0x1];
+ u8 rf[0x1];
+ u8 dct[0x1];
+ u8 qos[0x1];
+ u8 eth_net_offloads[0x1];
+ u8 roce[0x1];
+ u8 atomic[0x1];
+ u8 reserved_at_21f[0x1];
+ u8 cq_oi[0x1];
+ u8 cq_resize[0x1];
+ u8 cq_moderation[0x1];
+ u8 reserved_at_223[0x3];
+ u8 cq_eq_remap[0x1];
+ u8 pg[0x1];
+ u8 block_lb_mc[0x1];
+ u8 reserved_at_229[0x1];
+ u8 scqe_break_moderation[0x1];
+ u8 cq_period_start_from_cqe[0x1];
+ u8 cd[0x1];
+ u8 reserved_at_22d[0x1];
+ u8 apm[0x1];
+ u8 vector_calc[0x1];
+ u8 umr_ptr_rlky[0x1];
+ u8 imaicl[0x1];
+ u8 reserved_at_232[0x4];
+ u8 qkv[0x1];
+ u8 pkv[0x1];
+ u8 set_deth_sqpn[0x1];
+ u8 reserved_at_239[0x3];
+ u8 xrc[0x1];
+ u8 ud[0x1];
+ u8 uc[0x1];
+ u8 rc[0x1];
+ u8 uar_4k[0x1];
+ u8 reserved_at_241[0x9];
+ u8 uar_sz[0x6];
+ u8 reserved_at_250[0x8];
+ u8 log_pg_sz[0x8];
+ u8 bf[0x1];
+ u8 driver_version[0x1];
+ u8 pad_tx_eth_packet[0x1];
+ u8 reserved_at_263[0x8];
+ u8 log_bf_reg_size[0x5];
+ u8 reserved_at_270[0xb];
+ u8 lag_master[0x1];
+ u8 num_lag_ports[0x4];
+ u8 reserved_at_280[0x10];
+ u8 max_wqe_sz_sq[0x10];
+ u8 reserved_at_2a0[0x10];
+ u8 max_wqe_sz_rq[0x10];
+ u8 max_flow_counter_31_16[0x10];
+ u8 max_wqe_sz_sq_dc[0x10];
+ u8 reserved_at_2e0[0x7];
+ u8 max_qp_mcg[0x19];
+ u8 reserved_at_300[0x10];
+ u8 flow_counter_bulk_alloc[0x08];
+ u8 log_max_mcg[0x8];
+ u8 reserved_at_320[0x3];
+ u8 log_max_transport_domain[0x5];
+ u8 reserved_at_328[0x3];
+ u8 log_max_pd[0x5];
+ u8 reserved_at_330[0xb];
+ u8 log_max_xrcd[0x5];
+ u8 nic_receive_steering_discard[0x1];
+ u8 receive_discard_vport_down[0x1];
+ u8 transmit_discard_vport_down[0x1];
+ u8 reserved_at_343[0x5];
+ u8 log_max_flow_counter_bulk[0x8];
+ u8 max_flow_counter_15_0[0x10];
+ u8 modify_tis[0x1];
+ u8 flow_counters_dump[0x1];
+ u8 reserved_at_360[0x1];
+ u8 log_max_rq[0x5];
+ u8 reserved_at_368[0x3];
+ u8 log_max_sq[0x5];
+ u8 reserved_at_370[0x3];
+ u8 log_max_tir[0x5];
+ u8 reserved_at_378[0x3];
+ u8 log_max_tis[0x5];
+ u8 basic_cyclic_rcv_wqe[0x1];
+ u8 reserved_at_381[0x2];
+ u8 log_max_rmp[0x5];
+ u8 reserved_at_388[0x3];
+ u8 log_max_rqt[0x5];
+ u8 reserved_at_390[0x3];
+ u8 log_max_rqt_size[0x5];
+ u8 reserved_at_398[0x3];
+ u8 log_max_tis_per_sq[0x5];
+ u8 ext_stride_num_range[0x1];
+ u8 reserved_at_3a1[0x2];
+ u8 log_max_stride_sz_rq[0x5];
+ u8 reserved_at_3a8[0x3];
+ u8 log_min_stride_sz_rq[0x5];
+ u8 reserved_at_3b0[0x3];
+ u8 log_max_stride_sz_sq[0x5];
+ u8 reserved_at_3b8[0x3];
+ u8 log_min_stride_sz_sq[0x5];
+ u8 hairpin[0x1];
+ u8 reserved_at_3c1[0x2];
+ u8 log_max_hairpin_queues[0x5];
+ u8 reserved_at_3c8[0x3];
+ u8 log_max_hairpin_wq_data_sz[0x5];
+ u8 reserved_at_3d0[0x3];
+ u8 log_max_hairpin_num_packets[0x5];
+ u8 reserved_at_3d8[0x3];
+ u8 log_max_wq_sz[0x5];
+ u8 nic_vport_change_event[0x1];
+ u8 disable_local_lb_uc[0x1];
+ u8 disable_local_lb_mc[0x1];
+ u8 log_min_hairpin_wq_data_sz[0x5];
+ u8 reserved_at_3e8[0x3];
+ u8 log_max_vlan_list[0x5];
+ u8 reserved_at_3f0[0x3];
+ u8 log_max_current_mc_list[0x5];
+ u8 reserved_at_3f8[0x3];
+ u8 log_max_current_uc_list[0x5];
+ u8 general_obj_types[0x40];
+ u8 reserved_at_440[0x20];
+ u8 reserved_at_460[0x10];
+ u8 max_num_eqs[0x10];
+ u8 reserved_at_480[0x3];
+ u8 log_max_l2_table[0x5];
+ u8 reserved_at_488[0x8];
+ u8 log_uar_page_sz[0x10];
+ u8 reserved_at_4a0[0x20];
+ u8 device_frequency_mhz[0x20];
+ u8 device_frequency_khz[0x20];
+ u8 reserved_at_500[0x20];
+ u8 num_of_uars_per_page[0x20];
+ u8 flex_parser_protocols[0x20];
+ u8 reserved_at_560[0x20];
+ u8 reserved_at_580[0x3c];
+ u8 mini_cqe_resp_stride_index[0x1];
+ u8 cqe_128_always[0x1];
+ u8 cqe_compression_128[0x1];
+ u8 cqe_compression[0x1];
+ u8 cqe_compression_timeout[0x10];
+ u8 cqe_compression_max_num[0x10];
+ u8 reserved_at_5e0[0x10];
+ u8 tag_matching[0x1];
+ u8 rndv_offload_rc[0x1];
+ u8 rndv_offload_dc[0x1];
+ u8 log_tag_matching_list_sz[0x5];
+ u8 reserved_at_5f8[0x3];
+ u8 log_max_xrq[0x5];
+ u8 affiliate_nic_vport_criteria[0x8];
+ u8 native_port_num[0x8];
+ u8 num_vhca_ports[0x8];
+ u8 reserved_at_618[0x6];
+ u8 sw_owner_id[0x1];
+ u8 reserved_at_61f[0x1e1];
+};
+
+struct mlx5_ifc_qos_cap_bits {
+ u8 packet_pacing[0x1];
+ u8 esw_scheduling[0x1];
+ u8 esw_bw_share[0x1];
+ u8 esw_rate_limit[0x1];
+ u8 reserved_at_4[0x1];
+ u8 packet_pacing_burst_bound[0x1];
+ u8 packet_pacing_typical_size[0x1];
+ u8 flow_meter_srtcm[0x1];
+ u8 reserved_at_8[0x8];
+ u8 log_max_flow_meter[0x8];
+ u8 flow_meter_reg_id[0x8];
+ u8 reserved_at_25[0x8];
+ u8 flow_meter_reg_share[0x1];
+ u8 reserved_at_2e[0x17];
+ u8 packet_pacing_max_rate[0x20];
+ u8 packet_pacing_min_rate[0x20];
+ u8 reserved_at_80[0x10];
+ u8 packet_pacing_rate_table_size[0x10];
+ u8 esw_element_type[0x10];
+ u8 esw_tsar_type[0x10];
+ u8 reserved_at_c0[0x10];
+ u8 max_qos_para_vport[0x10];
+ u8 max_tsar_bw_share[0x20];
+ u8 reserved_at_100[0x6e8];
+};
+
+struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
+ u8 csum_cap[0x1];
+ u8 vlan_cap[0x1];
+ u8 lro_cap[0x1];
+ u8 lro_psh_flag[0x1];
+ u8 lro_time_stamp[0x1];
+ u8 lro_max_msg_sz_mode[0x2];
+ u8 wqe_vlan_insert[0x1];
+ u8 self_lb_en_modifiable[0x1];
+ u8 self_lb_mc[0x1];
+ u8 self_lb_uc[0x1];
+ u8 max_lso_cap[0x5];
+ u8 multi_pkt_send_wqe[0x2];
+ u8 wqe_inline_mode[0x2];
+ u8 rss_ind_tbl_cap[0x4];
+ u8 reg_umr_sq[0x1];
+ u8 scatter_fcs[0x1];
+ u8 enhanced_multi_pkt_send_wqe[0x1];
+ u8 tunnel_lso_const_out_ip_id[0x1];
+ u8 tunnel_lro_gre[0x1];
+ u8 tunnel_lro_vxlan[0x1];
+ u8 tunnel_stateless_gre[0x1];
+ u8 tunnel_stateless_vxlan[0x1];
+ u8 swp[0x1];
+ u8 swp_csum[0x1];
+ u8 swp_lso[0x1];
+ u8 reserved_at_23[0x8];
+ u8 tunnel_stateless_gtp[0x1];
+ u8 reserved_at_25[0x4];
+ u8 max_vxlan_udp_ports[0x8];
+ u8 reserved_at_38[0x6];
+ u8 max_geneve_opt_len[0x1];
+ u8 tunnel_stateless_geneve_rx[0x1];
+ u8 reserved_at_40[0x10];
+ u8 lro_min_mss_size[0x10];
+ u8 reserved_at_60[0x120];
+ u8 lro_timer_supported_periods[4][0x20];
+ u8 reserved_at_200[0x600];
+};
+
+union mlx5_ifc_hca_cap_union_bits {
+ struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+ struct mlx5_ifc_per_protocol_networking_offload_caps_bits
+ per_protocol_networking_offload_caps;
+ struct mlx5_ifc_qos_cap_bits qos_cap;
+ u8 reserved_at_0[0x8000];
+};
+
+struct mlx5_ifc_query_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+ union mlx5_ifc_hca_cap_union_bits capability;
+};
+
+struct mlx5_ifc_query_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_mac_address_layout_bits {
+ u8 reserved_at_0[0x10];
+ u8 mac_addr_47_32[0x10];
+ u8 mac_addr_31_0[0x20];
+};
+
+struct mlx5_ifc_nic_vport_context_bits {
+ u8 reserved_at_0[0x5];
+ u8 min_wqe_inline_mode[0x3];
+ u8 reserved_at_8[0x15];
+ u8 disable_mc_local_lb[0x1];
+ u8 disable_uc_local_lb[0x1];
+ u8 roce_en[0x1];
+ u8 arm_change_event[0x1];
+ u8 reserved_at_21[0x1a];
+ u8 event_on_mtu[0x1];
+ u8 event_on_promisc_change[0x1];
+ u8 event_on_vlan_change[0x1];
+ u8 event_on_mc_address_change[0x1];
+ u8 event_on_uc_address_change[0x1];
+ u8 reserved_at_40[0xc];
+ u8 affiliation_criteria[0x4];
+ u8 affiliated_vhca_id[0x10];
+ u8 reserved_at_60[0xd0];
+ u8 mtu[0x10];
+ u8 system_image_guid[0x40];
+ u8 port_guid[0x40];
+ u8 node_guid[0x40];
+ u8 reserved_at_200[0x140];
+ u8 qkey_violation_counter[0x10];
+ u8 reserved_at_350[0x430];
+ u8 promisc_uc[0x1];
+ u8 promisc_mc[0x1];
+ u8 promisc_all[0x1];
+ u8 reserved_at_783[0x2];
+ u8 allowed_list_type[0x3];
+ u8 reserved_at_788[0xc];
+ u8 allowed_list_size[0xc];
+ struct mlx5_ifc_mac_address_layout_bits permanent_address;
+ u8 reserved_at_7e0[0x20];
+};
+
+struct mlx5_ifc_query_nic_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+ struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_query_nic_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+ u8 reserved_at_60[0x5];
+ u8 allowed_list_type[0x3];
+ u8 reserved_at_68[0x18];
+};
+
+struct mlx5_ifc_tisc_bits {
+ u8 strict_lag_tx_port_affinity[0x1];
+ u8 reserved_at_1[0x3];
+ u8 lag_tx_port_affinity[0x04];
+ u8 reserved_at_8[0x4];
+ u8 prio[0x4];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x100];
+ u8 reserved_at_120[0x8];
+ u8 transport_domain[0x18];
+ u8 reserved_at_140[0x8];
+ u8 underlay_qpn[0x18];
+ u8 reserved_at_160[0x3a0];
+};
+
+struct mlx5_ifc_query_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+ struct mlx5_ifc_tisc_bits tis_context;
+};
+
+struct mlx5_ifc_query_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x8];
+ u8 tisn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_transport_domain_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x8];
+ u8 transport_domain[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_transport_domain_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x40];
+};
+
+enum {
+ MLX5_WQ_TYPE_LINKED_LIST = 0x0,
+ MLX5_WQ_TYPE_CYCLIC = 0x1,
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2,
+ MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ = 0x3,
+};
+
+enum {
+ MLX5_WQ_END_PAD_MODE_NONE = 0x0,
+ MLX5_WQ_END_PAD_MODE_ALIGN = 0x1,
+};
+
+struct mlx5_ifc_wq_bits {
+ u8 wq_type[0x4];
+ u8 wq_signature[0x1];
+ u8 end_padding_mode[0x2];
+ u8 cd_slave[0x1];
+ u8 reserved_at_8[0x18];
+ u8 hds_skip_first_sge[0x1];
+ u8 log2_hds_buf_size[0x3];
+ u8 reserved_at_24[0x7];
+ u8 page_offset[0x5];
+ u8 lwm[0x10];
+ u8 reserved_at_40[0x8];
+ u8 pd[0x18];
+ u8 reserved_at_60[0x8];
+ u8 uar_page[0x18];
+ u8 dbr_addr[0x40];
+ u8 hw_counter[0x20];
+ u8 sw_counter[0x20];
+ u8 reserved_at_100[0xc];
+ u8 log_wq_stride[0x4];
+ u8 reserved_at_110[0x3];
+ u8 log_wq_pg_sz[0x5];
+ u8 reserved_at_118[0x3];
+ u8 log_wq_sz[0x5];
+ u8 dbr_umem_valid[0x1];
+ u8 wq_umem_valid[0x1];
+ u8 reserved_at_122[0x1];
+ u8 log_hairpin_num_packets[0x5];
+ u8 reserved_at_128[0x3];
+ u8 log_hairpin_data_sz[0x5];
+ u8 reserved_at_130[0x4];
+ u8 single_wqe_log_num_of_strides[0x4];
+ u8 two_byte_shift_en[0x1];
+ u8 reserved_at_139[0x4];
+ u8 single_stride_log_num_of_bytes[0x3];
+ u8 dbr_umem_id[0x20];
+ u8 wq_umem_id[0x20];
+ u8 wq_umem_offset[0x40];
+ u8 reserved_at_1c0[0x440];
+};
+
+enum {
+ MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
+ MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP = 0x1,
+};
+
+enum {
+ MLX5_RQC_STATE_RST = 0x0,
+ MLX5_RQC_STATE_RDY = 0x1,
+ MLX5_RQC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_rqc_bits {
+ u8 rlky[0x1];
+ u8 delay_drop_en[0x1];
+ u8 scatter_fcs[0x1];
+ u8 vsd[0x1];
+ u8 mem_rq_type[0x4];
+ u8 state[0x4];
+ u8 reserved_at_c[0x1];
+ u8 flush_in_error_en[0x1];
+ u8 hairpin[0x1];
+ u8 reserved_at_f[0x11];
+ u8 reserved_at_20[0x8];
+ u8 user_index[0x18];
+ u8 reserved_at_40[0x8];
+ u8 cqn[0x18];
+ u8 counter_set_id[0x8];
+ u8 reserved_at_68[0x18];
+ u8 reserved_at_80[0x8];
+ u8 rmpn[0x18];
+ u8 reserved_at_a0[0x8];
+ u8 hairpin_peer_sq[0x18];
+ u8 reserved_at_c0[0x10];
+ u8 hairpin_peer_vhca[0x10];
+ u8 reserved_at_e0[0xa0];
+ struct mlx5_ifc_wq_bits wq; /* Not used in LRO RQ. */
+};
+
+struct mlx5_ifc_create_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x8];
+ u8 rqn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_rq_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0xc0];
+ struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x8];
+ u8 tisn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_tis_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0xc0];
+ struct mlx5_ifc_tisc_bits ctx;
+};
+
+enum {
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM = 1ULL << 0,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS = 1ULL << 2,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID = 1ULL << 3,
+};
+
+struct mlx5_ifc_modify_rq_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 rq_state[0x4];
+ u8 reserved_at_44[0x4];
+ u8 rqn[0x18];
+ u8 reserved_at_60[0x20];
+ u8 modify_bitmask[0x40];
+ u8 reserved_at_c0[0x40];
+ struct mlx5_ifc_rqc_bits ctx;
+};
+
+enum {
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4,
+};
+
+struct mlx5_ifc_rx_hash_field_select_bits {
+ u8 l3_prot_type[0x1];
+ u8 l4_prot_type[0x1];
+ u8 selected_fields[0x1e];
+};
+
+enum {
+ MLX5_TIRC_DISP_TYPE_DIRECT = 0x0,
+ MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1,
+};
+
+enum {
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2,
+};
+
+enum {
+ MLX5_RX_HASH_FN_NONE = 0x0,
+ MLX5_RX_HASH_FN_INVERTED_XOR8 = 0x1,
+ MLX5_RX_HASH_FN_TOEPLITZ = 0x2,
+};
+
+enum {
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST = 0x1,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST = 0x2,
+};
+
+enum {
+ MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 = 0x0,
+ MLX5_LRO_MAX_MSG_SIZE_START_FROM_L2 = 0x1,
+};
+
+struct mlx5_ifc_tirc_bits {
+ u8 reserved_at_0[0x20];
+ u8 disp_type[0x4];
+ u8 reserved_at_24[0x1c];
+ u8 reserved_at_40[0x40];
+ u8 reserved_at_80[0x4];
+ u8 lro_timeout_period_usecs[0x10];
+ u8 lro_enable_mask[0x4];
+ u8 lro_max_msg_sz[0x8];
+ u8 reserved_at_a0[0x40];
+ u8 reserved_at_e0[0x8];
+ u8 inline_rqn[0x18];
+ u8 rx_hash_symmetric[0x1];
+ u8 reserved_at_101[0x1];
+ u8 tunneled_offload_en[0x1];
+ u8 reserved_at_103[0x5];
+ u8 indirect_table[0x18];
+ u8 rx_hash_fn[0x4];
+ u8 reserved_at_124[0x2];
+ u8 self_lb_block[0x2];
+ u8 transport_domain[0x18];
+ u8 rx_hash_toeplitz_key[10][0x20];
+ struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;
+ struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
+ u8 reserved_at_2c0[0x4c0];
+};
+
+struct mlx5_ifc_create_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x8];
+ u8 tirn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_tir_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0xc0];
+ struct mlx5_ifc_tirc_bits ctx;
+};
+
+struct mlx5_ifc_rq_num_bits {
+ u8 reserved_at_0[0x8];
+ u8 rq_num[0x18];
+};
+
+struct mlx5_ifc_rqtc_bits {
+ u8 reserved_at_0[0xa0];
+ u8 reserved_at_a0[0x10];
+ u8 rqt_max_size[0x10];
+ u8 reserved_at_c0[0x10];
+ u8 rqt_actual_size[0x10];
+ u8 reserved_at_e0[0x6a0];
+ struct mlx5_ifc_rq_num_bits rq_num[];
+};
+
+struct mlx5_ifc_create_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x8];
+ u8 rqtn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+struct mlx5_ifc_create_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0xc0];
+ struct mlx5_ifc_rqtc_bits rqt_context;
+};
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+enum {
+ MLX5_SQC_STATE_RST = 0x0,
+ MLX5_SQC_STATE_RDY = 0x1,
+ MLX5_SQC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_sqc_bits {
+ u8 rlky[0x1];
+ u8 cd_master[0x1];
+ u8 fre[0x1];
+ u8 flush_in_error_en[0x1];
+ u8 allow_multi_pkt_send_wqe[0x1];
+ u8 min_wqe_inline_mode[0x3];
+ u8 state[0x4];
+ u8 reg_umr[0x1];
+ u8 allow_swp[0x1];
+ u8 hairpin[0x1];
+ u8 reserved_at_f[0x11];
+ u8 reserved_at_20[0x8];
+ u8 user_index[0x18];
+ u8 reserved_at_40[0x8];
+ u8 cqn[0x18];
+ u8 reserved_at_60[0x8];
+ u8 hairpin_peer_rq[0x18];
+ u8 reserved_at_80[0x10];
+ u8 hairpin_peer_vhca[0x10];
+ u8 reserved_at_a0[0x50];
+ u8 packet_pacing_rate_limit_index[0x10];
+ u8 tis_lst_sz[0x10];
+ u8 reserved_at_110[0x10];
+ u8 reserved_at_120[0x40];
+ u8 reserved_at_160[0x8];
+ u8 tis_num_0[0x18];
+ struct mlx5_ifc_wq_bits wq;
+};
+
+struct mlx5_ifc_query_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x8];
+ u8 sqn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_modify_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_sq_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 sq_state[0x4];
+ u8 reserved_at_44[0x4];
+ u8 sqn[0x18];
+ u8 reserved_at_60[0x20];
+ u8 modify_bitmask[0x40];
+ u8 reserved_at_c0[0x40];
+ struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_create_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x8];
+ u8 sqn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_sq_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0xc0];
+ struct mlx5_ifc_sqc_bits ctx;
+};
+
+enum {
+ MLX5_FLOW_METER_OBJ_MODIFY_FIELD_ACTIVE = (1ULL << 0),
+ MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CBS = (1ULL << 1),
+ MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CIR = (1ULL << 2),
+ MLX5_FLOW_METER_OBJ_MODIFY_FIELD_EBS = (1ULL << 3),
+ MLX5_FLOW_METER_OBJ_MODIFY_FIELD_EIR = (1ULL << 4),
+};
+
+struct mlx5_ifc_flow_meter_parameters_bits {
+ u8 valid[0x1]; // 00h
+ u8 bucket_overflow[0x1];
+ u8 start_color[0x2];
+ u8 both_buckets_on_green[0x1];
+ u8 meter_mode[0x2];
+ u8 reserved_at_1[0x19];
+ u8 reserved_at_2[0x20]; //04h
+ u8 reserved_at_3[0x3];
+ u8 cbs_exponent[0x5]; // 08h
+ u8 cbs_mantissa[0x8];
+ u8 reserved_at_4[0x3];
+ u8 cir_exponent[0x5];
+ u8 cir_mantissa[0x8];
+ u8 reserved_at_5[0x20]; // 0Ch
+ u8 reserved_at_6[0x3];
+ u8 ebs_exponent[0x5]; // 10h
+ u8 ebs_mantissa[0x8];
+ u8 reserved_at_7[0x3];
+ u8 eir_exponent[0x5];
+ u8 eir_mantissa[0x8];
+ u8 reserved_at_8[0x60]; // 14h-1Ch
+};
+
+/* CQE format mask. */
+#define MLX5E_CQE_FORMAT_MASK 0xc
+
+/* MPW opcode. */
+#define MLX5_OPC_MOD_MPW 0x01
+
+/* Compressed Rx CQE structure. */
+struct mlx5_mini_cqe8 {
+ union {
+ uint32_t rx_hash_result;
+ struct {
+ uint16_t checksum;
+ uint16_t stride_idx;
+ };
+ struct {
+ uint16_t wqe_counter;
+ uint8_t s_wqe_opcode;
+ uint8_t reserved;
+ } s_wqe_info;
+ };
+ uint32_t byte_cnt;
+};
+
+/* srTCM PRM flow meter parameters. */
+enum {
+ MLX5_FLOW_COLOR_RED = 0,
+ MLX5_FLOW_COLOR_YELLOW,
+ MLX5_FLOW_COLOR_GREEN,
+ MLX5_FLOW_COLOR_UNDEFINED,
+};
+
+/* Maximum value of srTCM metering parameters. */
+#define MLX5_SRTCM_CBS_MAX (0xFF * (1ULL << 0x1F))
+#define MLX5_SRTCM_CIR_MAX (8 * (1ULL << 30) * 0xFF)
+#define MLX5_SRTCM_EBS_MAX 0
+
+/* The bits meter color use. */
+#define MLX5_MTR_COLOR_BITS 8
+
+/**
+ * Convert a user mark to flow mark.
+ *
+ * @param val
+ * Mark value to convert.
+ *
+ * @return
+ * Converted mark value.
+ */
+static inline uint32_t
+mlx5_flow_mark_set(uint32_t val)
+{
+ uint32_t ret;
+
+ /*
+ * Add one to the user value to differentiate un-marked flows from
+ * marked flows, if the ID is equal to MLX5_FLOW_MARK_DEFAULT it
+ * remains untouched.
+ */
+ if (val != MLX5_FLOW_MARK_DEFAULT)
+ ++val;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ /*
+ * Mark is 24 bits (minus reserved values) but is stored on a 32 bit
+ * word, byte-swapped by the kernel on little-endian systems. In this
+ * case, left-shifting the resulting big-endian value ensures the
+ * least significant 24 bits are retained when converting it back.
+ */
+ ret = rte_cpu_to_be_32(val) >> 8;
+#else
+ ret = val;
+#endif
+ return ret;
+}
+
+/**
+ * Convert a mark to user mark.
+ *
+ * @param val
+ * Mark value to convert.
+ *
+ * @return
+ * Converted mark value.
+ */
+static inline uint32_t
+mlx5_flow_mark_get(uint32_t val)
+{
+ /*
+ * Subtract one from the retrieved value. It was added by
+ * mlx5_flow_mark_set() to distinguish unmarked flows.
+ */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ return (val >> 8) - 1;
+#else
+ return val - 1;
+#endif
+}
+
+#endif /* RTE_PMD_MLX5_PRM_H_ */
--- /dev/null
+DPDK_20.02 {
+ global:
+
+ mlx5_devx_cmd_create_rq;
+ mlx5_devx_cmd_create_rqt;
+ mlx5_devx_cmd_create_sq;
+ mlx5_devx_cmd_create_tir;
+ mlx5_devx_cmd_create_td;
+ mlx5_devx_cmd_create_tis;
+ mlx5_devx_cmd_destroy;
+ mlx5_devx_cmd_flow_counter_alloc;
+ mlx5_devx_cmd_flow_counter_query;
+ mlx5_devx_cmd_flow_dump;
+ mlx5_devx_cmd_mkey_create;
+ mlx5_devx_cmd_modify_rq;
+ mlx5_devx_cmd_modify_sq;
+ mlx5_devx_cmd_qp_query_tis_td;
+ mlx5_devx_cmd_query_hca_attr;
+ mlx5_devx_get_out_command_status;
+};
# Library name.
LIB = librte_pmd_mlx5.a
-LIB_GLUE = $(LIB_GLUE_BASE).$(LIB_GLUE_VERSION)
-LIB_GLUE_BASE = librte_pmd_mlx5_glue.so
-LIB_GLUE_VERSION = 20.02.0
# Sources.
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5.c
-ifneq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)
-SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_glue.c
-endif
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxq.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_txq.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow_verbs.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mp.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl.c
-SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_devx_cmds.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_utils.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c
-ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)
-INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE)
-endif
-
# Basic CFLAGS.
CFLAGS += -O3
CFLAGS += -std=c11 -Wall -Wextra
CFLAGS += -g
-CFLAGS += -I.
+CFLAGS += -I$(RTE_SDK)/drivers/common/mlx5
+CFLAGS += -I$(RTE_SDK)/drivers/net/mlx5
+CFLAGS += -I$(BUILDDIR)/drivers/common/mlx5
CFLAGS += -D_BSD_SOURCE
CFLAGS += -D_DEFAULT_SOURCE
CFLAGS += -D_XOPEN_SOURCE=600
CFLAGS += $(WERROR_FLAGS)
CFLAGS += -Wno-strict-prototypes
-ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)
-CFLAGS += -DMLX5_GLUE='"$(LIB_GLUE)"'
-CFLAGS += -DMLX5_GLUE_VERSION='"$(LIB_GLUE_VERSION)"'
-CFLAGS_mlx5_glue.o += -fPIC
-LDLIBS += -ldl
-else ifeq ($(CONFIG_RTE_IBVERBS_LINK_STATIC),y)
-LDLIBS += $(shell $(RTE_SDK)/buildtools/options-ibverbs-static.sh)
-else
-LDLIBS += -libverbs -lmlx5
-endif
+LDLIBS += -lrte_common_mlx5
LDLIBS += -lm
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
CFLAGS += -Wno-error=cast-qual
EXPORT_MAP := rte_pmd_mlx5_version.map
+
# memseg walk is not part of stable API
CFLAGS += -DALLOW_EXPERIMENTAL_API
include $(RTE_SDK)/mk/rte.lib.mk
-# Generate and clean-up mlx5_autoconf.h.
-
-export CC CFLAGS CPPFLAGS EXTRA_CFLAGS EXTRA_CPPFLAGS
-export AUTO_CONFIG_CFLAGS += -Wno-error
-
-ifndef V
-AUTOCONF_OUTPUT := >/dev/null
-endif
-
-mlx5_autoconf.h.new: FORCE
-
-mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
- $Q $(RM) -f -- '$@'
- $Q sh -- '$<' '$@' \
- HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT \
- infiniband/mlx5dv.h \
- enum MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IBV_DEVICE_TUNNEL_SUPPORT \
- infiniband/mlx5dv.h \
- enum MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IBV_DEVICE_MPLS_SUPPORT \
- infiniband/verbs.h \
- enum IBV_FLOW_SPEC_MPLS \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING \
- infiniband/verbs.h \
- enum IBV_WQ_FLAGS_PCI_WRITE_END_PADDING \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IBV_WQ_FLAG_RX_END_PADDING \
- infiniband/verbs.h \
- enum IBV_WQ_FLAG_RX_END_PADDING \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IBV_MLX5_MOD_SWP \
- infiniband/mlx5dv.h \
- type 'struct mlx5dv_sw_parsing_caps' \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IBV_MLX5_MOD_MPW \
- infiniband/mlx5dv.h \
- enum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IBV_MLX5_MOD_CQE_128B_COMP \
- infiniband/mlx5dv.h \
- enum MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IBV_MLX5_MOD_CQE_128B_PAD \
- infiniband/mlx5dv.h \
- enum MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IBV_FLOW_DV_SUPPORT \
- infiniband/mlx5dv.h \
- func mlx5dv_create_flow_action_packet_reformat \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_MLX5DV_DR \
- infiniband/mlx5dv.h \
- enum MLX5DV_DR_DOMAIN_TYPE_NIC_RX \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_MLX5DV_DR_ESWITCH \
- infiniband/mlx5dv.h \
- enum MLX5DV_DR_DOMAIN_TYPE_FDB \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_MLX5DV_DR_VLAN \
- infiniband/mlx5dv.h \
- func mlx5dv_dr_action_create_push_vlan \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_MLX5DV_DR_DEVX_PORT \
- infiniband/mlx5dv.h \
- func mlx5dv_query_devx_port \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IBV_DEVX_OBJ \
- infiniband/mlx5dv.h \
- func mlx5dv_devx_obj_create \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IBV_FLOW_DEVX_COUNTERS \
- infiniband/mlx5dv.h \
- enum MLX5DV_FLOW_ACTION_COUNTERS_DEVX \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IBV_DEVX_ASYNC \
- infiniband/mlx5dv.h \
- func mlx5dv_devx_obj_query_async \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR \
- infiniband/mlx5dv.h \
- func mlx5dv_dr_action_create_dest_devx_tir \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER \
- infiniband/mlx5dv.h \
- func mlx5dv_dr_action_create_flow_meter \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_MLX5_DR_FLOW_DUMP \
- infiniband/mlx5dv.h \
- func mlx5dv_dump_dr_domain \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_MLX5DV_MMAP_GET_NC_PAGES_CMD \
- infiniband/mlx5dv.h \
- enum MLX5_MMAP_GET_NC_PAGES_CMD \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_ETHTOOL_LINK_MODE_25G \
- /usr/include/linux/ethtool.h \
- enum ETHTOOL_LINK_MODE_25000baseCR_Full_BIT \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_ETHTOOL_LINK_MODE_50G \
- /usr/include/linux/ethtool.h \
- enum ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_ETHTOOL_LINK_MODE_100G \
- /usr/include/linux/ethtool.h \
- enum ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IBV_DEVICE_COUNTERS_SET_V42 \
- infiniband/verbs.h \
- type 'struct ibv_counter_set_init_attr' \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IBV_DEVICE_COUNTERS_SET_V45 \
- infiniband/verbs.h \
- type 'struct ibv_counters_init_attr' \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_RDMA_NL_NLDEV \
- rdma/rdma_netlink.h \
- enum RDMA_NL_NLDEV \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_RDMA_NLDEV_CMD_GET \
- rdma/rdma_netlink.h \
- enum RDMA_NLDEV_CMD_GET \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_RDMA_NLDEV_CMD_PORT_GET \
- rdma/rdma_netlink.h \
- enum RDMA_NLDEV_CMD_PORT_GET \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_RDMA_NLDEV_ATTR_DEV_INDEX \
- rdma/rdma_netlink.h \
- enum RDMA_NLDEV_ATTR_DEV_INDEX \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_RDMA_NLDEV_ATTR_DEV_NAME \
- rdma/rdma_netlink.h \
- enum RDMA_NLDEV_ATTR_DEV_NAME \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_RDMA_NLDEV_ATTR_PORT_INDEX \
- rdma/rdma_netlink.h \
- enum RDMA_NLDEV_ATTR_PORT_INDEX \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX \
- rdma/rdma_netlink.h \
- enum RDMA_NLDEV_ATTR_NDEV_INDEX \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IFLA_NUM_VF \
- linux/if_link.h \
- enum IFLA_NUM_VF \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IFLA_EXT_MASK \
- linux/if_link.h \
- enum IFLA_EXT_MASK \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IFLA_PHYS_SWITCH_ID \
- linux/if_link.h \
- enum IFLA_PHYS_SWITCH_ID \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_IFLA_PHYS_PORT_NAME \
- linux/if_link.h \
- enum IFLA_PHYS_PORT_NAME \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_SUPPORTED_40000baseKR4_Full \
- /usr/include/linux/ethtool.h \
- define SUPPORTED_40000baseKR4_Full \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_SUPPORTED_40000baseCR4_Full \
- /usr/include/linux/ethtool.h \
- define SUPPORTED_40000baseCR4_Full \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_SUPPORTED_40000baseSR4_Full \
- /usr/include/linux/ethtool.h \
- define SUPPORTED_40000baseSR4_Full \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_SUPPORTED_40000baseLR4_Full \
- /usr/include/linux/ethtool.h \
- define SUPPORTED_40000baseLR4_Full \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_SUPPORTED_56000baseKR4_Full \
- /usr/include/linux/ethtool.h \
- define SUPPORTED_56000baseKR4_Full \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_SUPPORTED_56000baseCR4_Full \
- /usr/include/linux/ethtool.h \
- define SUPPORTED_56000baseCR4_Full \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_SUPPORTED_56000baseSR4_Full \
- /usr/include/linux/ethtool.h \
- define SUPPORTED_56000baseSR4_Full \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_SUPPORTED_56000baseLR4_Full \
- /usr/include/linux/ethtool.h \
- define SUPPORTED_56000baseLR4_Full \
- $(AUTOCONF_OUTPUT)
- $Q sh -- '$<' '$@' \
- HAVE_STATIC_ASSERT \
- /usr/include/assert.h \
- define static_assert \
- $(AUTOCONF_OUTPUT)
-
-# Create mlx5_autoconf.h or update it in case it differs from the new one.
-
-mlx5_autoconf.h: mlx5_autoconf.h.new
- $Q [ -f '$@' ] && \
- cmp '$<' '$@' $(AUTOCONF_OUTPUT) || \
- mv '$<' '$@'
-
-$(SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD):.c=.o): mlx5_autoconf.h
-
-# Generate dependency plug-in for rdma-core when the PMD must not be linked
-# directly, so that applications do not inherit this dependency.
-
-ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)
-
-$(LIB): $(LIB_GLUE)
-
-ifeq ($(LINK_USING_CC),1)
-GLUE_LDFLAGS := $(call linkerprefix,$(LDFLAGS))
-else
-GLUE_LDFLAGS := $(LDFLAGS)
-endif
-$(LIB_GLUE): mlx5_glue.o
- $Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \
- -Wl,-h,$(LIB_GLUE) \
- -shared -o $@ $< -libverbs -lmlx5
-
-mlx5_glue.o: mlx5_autoconf.h
-
-endif
-
-clean_mlx5: FORCE
- $Q rm -f -- mlx5_autoconf.h mlx5_autoconf.h.new
- $Q rm -f -- mlx5_glue.o $(LIB_GLUE_BASE)*
-
-clean: clean_mlx5
reason = 'only supported on Linux'
subdir_done()
endif
-build = true
-pmd_dlopen = (get_option('ibverbs_link') == 'dlopen')
-LIB_GLUE_BASE = 'librte_pmd_mlx5_glue.so'
-LIB_GLUE_VERSION = '20.02.0'
-LIB_GLUE = LIB_GLUE_BASE + '.' + LIB_GLUE_VERSION
-if pmd_dlopen
- dpdk_conf.set('RTE_IBVERBS_LINK_DLOPEN', 1)
- cflags += [
- '-DMLX5_GLUE="@0@"'.format(LIB_GLUE),
- '-DMLX5_GLUE_VERSION="@0@"'.format(LIB_GLUE_VERSION),
- ]
+allow_experimental_apis = true
+deps += ['hash', 'common_mlx5']
+sources = files(
+ 'mlx5.c',
+ 'mlx5_ethdev.c',
+ 'mlx5_flow.c',
+ 'mlx5_flow_meter.c',
+ 'mlx5_flow_dv.c',
+ 'mlx5_flow_verbs.c',
+ 'mlx5_mac.c',
+ 'mlx5_mr.c',
+ 'mlx5_nl.c',
+ 'mlx5_rss.c',
+ 'mlx5_rxmode.c',
+ 'mlx5_rxq.c',
+ 'mlx5_rxtx.c',
+ 'mlx5_mp.c',
+ 'mlx5_stats.c',
+ 'mlx5_trigger.c',
+ 'mlx5_txq.c',
+ 'mlx5_vlan.c',
+ 'mlx5_utils.c',
+ 'mlx5_socket.c',
+)
+if (dpdk_conf.has('RTE_ARCH_X86_64')
+ or dpdk_conf.has('RTE_ARCH_ARM64')
+ or dpdk_conf.has('RTE_ARCH_PPC_64'))
+ sources += files('mlx5_rxtx_vec.c')
endif
-
-libnames = [ 'mlx5', 'ibverbs' ]
-libs = []
-foreach libname:libnames
- lib = dependency('lib' + libname, required:false)
- if not lib.found()
- lib = cc.find_library(libname, required:false)
- endif
- if lib.found()
- libs += [ lib ]
- else
- build = false
- reason = 'missing dependency, "' + libname + '"'
+cflags_options = [
+ '-std=c11',
+ '-Wno-strict-prototypes',
+ '-D_BSD_SOURCE',
+ '-D_DEFAULT_SOURCE',
+ '-D_XOPEN_SOURCE=600'
+]
+foreach option:cflags_options
+ if cc.has_argument(option)
+ cflags += option
endif
endforeach
-
-if build
- allow_experimental_apis = true
- deps += ['hash']
- ext_deps += libs
- sources = files(
- 'mlx5.c',
- 'mlx5_ethdev.c',
- 'mlx5_flow.c',
- 'mlx5_flow_meter.c',
- 'mlx5_flow_dv.c',
- 'mlx5_flow_verbs.c',
- 'mlx5_mac.c',
- 'mlx5_mr.c',
- 'mlx5_nl.c',
- 'mlx5_rss.c',
- 'mlx5_rxmode.c',
- 'mlx5_rxq.c',
- 'mlx5_rxtx.c',
- 'mlx5_mp.c',
- 'mlx5_stats.c',
- 'mlx5_trigger.c',
- 'mlx5_txq.c',
- 'mlx5_vlan.c',
- 'mlx5_devx_cmds.c',
- 'mlx5_utils.c',
- 'mlx5_socket.c',
- )
- if (dpdk_conf.has('RTE_ARCH_X86_64')
- or dpdk_conf.has('RTE_ARCH_ARM64')
- or dpdk_conf.has('RTE_ARCH_PPC_64'))
- sources += files('mlx5_rxtx_vec.c')
- endif
- if not pmd_dlopen
- sources += files('mlx5_glue.c')
- endif
- cflags_options = [
- '-std=c11',
- '-Wno-strict-prototypes',
- '-D_BSD_SOURCE',
- '-D_DEFAULT_SOURCE',
- '-D_XOPEN_SOURCE=600'
- ]
- foreach option:cflags_options
- if cc.has_argument(option)
- cflags += option
- endif
- endforeach
- if get_option('buildtype').contains('debug')
- cflags += [ '-pedantic', '-UNDEBUG', '-DPEDANTIC' ]
- else
- cflags += [ '-DNDEBUG', '-UPEDANTIC' ]
- endif
- # To maintain the compatibility with the make build system
- # mlx5_autoconf.h file is still generated.
- # input array for meson member search:
- # [ "MACRO to define if found", "header for the search",
- # "symbol to search", "struct member to search" ]
- has_member_args = [
- [ 'HAVE_IBV_MLX5_MOD_SWP', 'infiniband/mlx5dv.h',
- 'struct mlx5dv_sw_parsing_caps', 'sw_parsing_offloads' ],
- [ 'HAVE_IBV_DEVICE_COUNTERS_SET_V42', 'infiniband/verbs.h',
- 'struct ibv_counter_set_init_attr', 'counter_set_id' ],
- [ 'HAVE_IBV_DEVICE_COUNTERS_SET_V45', 'infiniband/verbs.h',
- 'struct ibv_counters_init_attr', 'comp_mask' ],
- ]
- # input array for meson symbol search:
- # [ "MACRO to define if found", "header for the search",
- # "symbol to search" ]
- has_sym_args = [
- [ 'HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT', 'infiniband/mlx5dv.h',
- 'MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX' ],
- [ 'HAVE_IBV_DEVICE_TUNNEL_SUPPORT', 'infiniband/mlx5dv.h',
- 'MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS' ],
- [ 'HAVE_IBV_MLX5_MOD_MPW', 'infiniband/mlx5dv.h',
- 'MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED' ],
- [ 'HAVE_IBV_MLX5_MOD_CQE_128B_COMP', 'infiniband/mlx5dv.h',
- 'MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP' ],
- [ 'HAVE_IBV_MLX5_MOD_CQE_128B_PAD', 'infiniband/mlx5dv.h',
- 'MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD' ],
- [ 'HAVE_IBV_FLOW_DV_SUPPORT', 'infiniband/mlx5dv.h',
- 'mlx5dv_create_flow_action_packet_reformat' ],
- [ 'HAVE_IBV_DEVICE_MPLS_SUPPORT', 'infiniband/verbs.h',
- 'IBV_FLOW_SPEC_MPLS' ],
- [ 'HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING', 'infiniband/verbs.h',
- 'IBV_WQ_FLAGS_PCI_WRITE_END_PADDING' ],
- [ 'HAVE_IBV_WQ_FLAG_RX_END_PADDING', 'infiniband/verbs.h',
- 'IBV_WQ_FLAG_RX_END_PADDING' ],
- [ 'HAVE_MLX5DV_DR_DEVX_PORT', 'infiniband/mlx5dv.h',
- 'mlx5dv_query_devx_port' ],
- [ 'HAVE_IBV_DEVX_OBJ', 'infiniband/mlx5dv.h',
- 'mlx5dv_devx_obj_create' ],
- [ 'HAVE_IBV_FLOW_DEVX_COUNTERS', 'infiniband/mlx5dv.h',
- 'MLX5DV_FLOW_ACTION_COUNTERS_DEVX' ],
- [ 'HAVE_IBV_DEVX_ASYNC', 'infiniband/mlx5dv.h',
- 'mlx5dv_devx_obj_query_async' ],
- [ 'HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR', 'infiniband/mlx5dv.h',
- 'mlx5dv_dr_action_create_dest_devx_tir' ],
- [ 'HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER', 'infiniband/mlx5dv.h',
- 'mlx5dv_dr_action_create_flow_meter' ],
- [ 'HAVE_MLX5DV_MMAP_GET_NC_PAGES_CMD', 'infiniband/mlx5dv.h',
- 'MLX5_MMAP_GET_NC_PAGES_CMD' ],
- [ 'HAVE_MLX5DV_DR', 'infiniband/mlx5dv.h',
- 'MLX5DV_DR_DOMAIN_TYPE_NIC_RX' ],
- [ 'HAVE_MLX5DV_DR_ESWITCH', 'infiniband/mlx5dv.h',
- 'MLX5DV_DR_DOMAIN_TYPE_FDB' ],
- [ 'HAVE_MLX5DV_DR_VLAN', 'infiniband/mlx5dv.h',
- 'mlx5dv_dr_action_create_push_vlan' ],
- [ 'HAVE_SUPPORTED_40000baseKR4_Full', 'linux/ethtool.h',
- 'SUPPORTED_40000baseKR4_Full' ],
- [ 'HAVE_SUPPORTED_40000baseCR4_Full', 'linux/ethtool.h',
- 'SUPPORTED_40000baseCR4_Full' ],
- [ 'HAVE_SUPPORTED_40000baseSR4_Full', 'linux/ethtool.h',
- 'SUPPORTED_40000baseSR4_Full' ],
- [ 'HAVE_SUPPORTED_40000baseLR4_Full', 'linux/ethtool.h',
- 'SUPPORTED_40000baseLR4_Full' ],
- [ 'HAVE_SUPPORTED_56000baseKR4_Full', 'linux/ethtool.h',
- 'SUPPORTED_56000baseKR4_Full' ],
- [ 'HAVE_SUPPORTED_56000baseCR4_Full', 'linux/ethtool.h',
- 'SUPPORTED_56000baseCR4_Full' ],
- [ 'HAVE_SUPPORTED_56000baseSR4_Full', 'linux/ethtool.h',
- 'SUPPORTED_56000baseSR4_Full' ],
- [ 'HAVE_SUPPORTED_56000baseLR4_Full', 'linux/ethtool.h',
- 'SUPPORTED_56000baseLR4_Full' ],
- [ 'HAVE_ETHTOOL_LINK_MODE_25G', 'linux/ethtool.h',
- 'ETHTOOL_LINK_MODE_25000baseCR_Full_BIT' ],
- [ 'HAVE_ETHTOOL_LINK_MODE_50G', 'linux/ethtool.h',
- 'ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT' ],
- [ 'HAVE_ETHTOOL_LINK_MODE_100G', 'linux/ethtool.h',
- 'ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT' ],
- [ 'HAVE_IFLA_NUM_VF', 'linux/if_link.h',
- 'IFLA_NUM_VF' ],
- [ 'HAVE_IFLA_EXT_MASK', 'linux/if_link.h',
- 'IFLA_EXT_MASK' ],
- [ 'HAVE_IFLA_PHYS_SWITCH_ID', 'linux/if_link.h',
- 'IFLA_PHYS_SWITCH_ID' ],
- [ 'HAVE_IFLA_PHYS_PORT_NAME', 'linux/if_link.h',
- 'IFLA_PHYS_PORT_NAME' ],
- [ 'HAVE_RDMA_NL_NLDEV', 'rdma/rdma_netlink.h',
- 'RDMA_NL_NLDEV' ],
- [ 'HAVE_RDMA_NLDEV_CMD_GET', 'rdma/rdma_netlink.h',
- 'RDMA_NLDEV_CMD_GET' ],
- [ 'HAVE_RDMA_NLDEV_CMD_PORT_GET', 'rdma/rdma_netlink.h',
- 'RDMA_NLDEV_CMD_PORT_GET' ],
- [ 'HAVE_RDMA_NLDEV_ATTR_DEV_INDEX', 'rdma/rdma_netlink.h',
- 'RDMA_NLDEV_ATTR_DEV_INDEX' ],
- [ 'HAVE_RDMA_NLDEV_ATTR_DEV_NAME', 'rdma/rdma_netlink.h',
- 'RDMA_NLDEV_ATTR_DEV_NAME' ],
- [ 'HAVE_RDMA_NLDEV_ATTR_PORT_INDEX', 'rdma/rdma_netlink.h',
- 'RDMA_NLDEV_ATTR_PORT_INDEX' ],
- [ 'HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX', 'rdma/rdma_netlink.h',
- 'RDMA_NLDEV_ATTR_NDEV_INDEX' ],
- [ 'HAVE_MLX5_DR_FLOW_DUMP', 'infiniband/mlx5dv.h',
- 'mlx5dv_dump_dr_domain'],
- ]
- config = configuration_data()
- foreach arg:has_sym_args
- config.set(arg[0], cc.has_header_symbol(arg[1], arg[2],
- dependencies: libs))
- endforeach
- foreach arg:has_member_args
- file_prefix = '#include <' + arg[1] + '>'
- config.set(arg[0], cc.has_member(arg[2], arg[3],
- prefix : file_prefix, dependencies: libs))
- endforeach
- configure_file(output : 'mlx5_autoconf.h', configuration : config)
-endif
-# Build Glue Library
-if pmd_dlopen and build
- dlopen_name = 'mlx5_glue'
- dlopen_lib_name = driver_name_fmt.format(dlopen_name)
- dlopen_so_version = LIB_GLUE_VERSION
- dlopen_sources = files('mlx5_glue.c')
- dlopen_install_dir = [ eal_pmd_path + '-glue' ]
- dlopen_includes = [global_inc]
- dlopen_includes += include_directories(
- '../../../lib/librte_eal/common/include/generic',
- )
- shared_lib = shared_library(
- dlopen_lib_name,
- dlopen_sources,
- include_directories: dlopen_includes,
- c_args: cflags,
- dependencies: libs,
- link_args: [
- '-Wl,-export-dynamic',
- '-Wl,-h,@0@'.format(LIB_GLUE),
- ],
- soversion: dlopen_so_version,
- install: true,
- install_dir: dlopen_install_dir,
- )
+if get_option('buildtype').contains('debug')
+ cflags += [ '-pedantic', '-UNDEBUG', '-DPEDANTIC' ]
+else
+ cflags += [ '-DNDEBUG', '-UPEDANTIC' ]
endif
#include <unistd.h>
#include <string.h>
#include <assert.h>
-#include <dlfcn.h>
#include <stdint.h>
#include <stdlib.h>
#include <errno.h>
#include <rte_string_fns.h>
#include <rte_alarm.h>
+#include <mlx5_glue.h>
+#include <mlx5_devx_cmds.h>
+
+#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
-#include "mlx5_defs.h"
-#include "mlx5_glue.h"
#include "mlx5_mr.h"
#include "mlx5_flow.h"
-#include "mlx5_devx_cmds.h"
/* Device parameter to enable RX completion queue compression. */
#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
RTE_PCI_DRV_PROBE_AGAIN,
};
-#ifdef RTE_IBVERBS_LINK_DLOPEN
-
-/**
- * Suffix RTE_EAL_PMD_PATH with "-glue".
- *
- * This function performs a sanity check on RTE_EAL_PMD_PATH before
- * suffixing its last component.
- *
- * @param buf[out]
- * Output buffer, should be large enough otherwise NULL is returned.
- * @param size
- * Size of @p out.
- *
- * @return
- * Pointer to @p buf or @p NULL in case suffix cannot be appended.
- */
-static char *
-mlx5_glue_path(char *buf, size_t size)
-{
- static const char *const bad[] = { "/", ".", "..", NULL };
- const char *path = RTE_EAL_PMD_PATH;
- size_t len = strlen(path);
- size_t off;
- int i;
-
- while (len && path[len - 1] == '/')
- --len;
- for (off = len; off && path[off - 1] != '/'; --off)
- ;
- for (i = 0; bad[i]; ++i)
- if (!strncmp(path + off, bad[i], (int)(len - off)))
- goto error;
- i = snprintf(buf, size, "%.*s-glue", (int)len, path);
- if (i == -1 || (size_t)i >= size)
- goto error;
- return buf;
-error:
- DRV_LOG(ERR,
- "unable to append \"-glue\" to last component of"
- " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
- " please re-configure DPDK");
- return NULL;
-}
-
-/**
- * Initialization routine for run-time dependency on rdma-core.
- */
-static int
-mlx5_glue_init(void)
-{
- char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
- const char *path[] = {
- /*
- * A basic security check is necessary before trusting
- * MLX5_GLUE_PATH, which may override RTE_EAL_PMD_PATH.
- */
- (geteuid() == getuid() && getegid() == getgid() ?
- getenv("MLX5_GLUE_PATH") : NULL),
- /*
- * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
- * variant, otherwise let dlopen() look up libraries on its
- * own.
- */
- (*RTE_EAL_PMD_PATH ?
- mlx5_glue_path(glue_path, sizeof(glue_path)) : ""),
- };
- unsigned int i = 0;
- void *handle = NULL;
- void **sym;
- const char *dlmsg;
-
- while (!handle && i != RTE_DIM(path)) {
- const char *end;
- size_t len;
- int ret;
-
- if (!path[i]) {
- ++i;
- continue;
- }
- end = strpbrk(path[i], ":;");
- if (!end)
- end = path[i] + strlen(path[i]);
- len = end - path[i];
- ret = 0;
- do {
- char name[ret + 1];
-
- ret = snprintf(name, sizeof(name), "%.*s%s" MLX5_GLUE,
- (int)len, path[i],
- (!len || *(end - 1) == '/') ? "" : "/");
- if (ret == -1)
- break;
- if (sizeof(name) != (size_t)ret + 1)
- continue;
- DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"",
- name);
- handle = dlopen(name, RTLD_LAZY);
- break;
- } while (1);
- path[i] = end + 1;
- if (!*end)
- ++i;
- }
- if (!handle) {
- rte_errno = EINVAL;
- dlmsg = dlerror();
- if (dlmsg)
- DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg);
- goto glue_error;
- }
- sym = dlsym(handle, "mlx5_glue");
- if (!sym || !*sym) {
- rte_errno = EINVAL;
- dlmsg = dlerror();
- if (dlmsg)
- DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg);
- goto glue_error;
- }
- mlx5_glue = *sym;
- return 0;
-glue_error:
- if (handle)
- dlclose(handle);
- DRV_LOG(WARNING,
- "cannot initialize PMD due to missing run-time dependency on"
- " rdma-core libraries (libibverbs, libmlx5)");
- return -rte_errno;
-}
-
-#endif
-
/**
* Driver initialization routine.
*/
mlx5_set_ptype_table();
mlx5_set_cksum_table();
mlx5_set_swp_types_table();
- /*
- * RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
- * huge pages. Calling ibv_fork_init() during init allows
- * applications to use fork() safely for purposes other than
- * using this PMD, which is not supported in forked processes.
- */
- setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
- /* Match the size of Rx completion entry to the size of a cacheline. */
- if (RTE_CACHE_LINE_SIZE == 128)
- setenv("MLX5_CQE_SIZE", "128", 0);
- /*
- * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to
- * cleanup all the Verbs resources even when the device was removed.
- */
- setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1);
-#ifdef RTE_IBVERBS_LINK_DLOPEN
- if (mlx5_glue_init())
- return;
- assert(mlx5_glue);
-#endif
-#ifndef NDEBUG
- /* Glue structure must not contain any NULL pointers. */
- {
- unsigned int i;
-
- for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i)
- assert(((const void *const *)mlx5_glue)[i]);
- }
-#endif
- if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
- DRV_LOG(ERR,
- "rdma-core glue \"%s\" mismatch: \"%s\" is required",
- mlx5_glue->version, MLX5_GLUE_VERSION);
- return;
- }
- mlx5_glue->fork_init();
- rte_pci_register(&mlx5_driver);
+ if (mlx5_glue)
+ rte_pci_register(&mlx5_driver);
}
RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
#include <rte_errno.h>
#include <rte_flow.h>
+#include <mlx5_glue.h>
+#include <mlx5_devx_cmds.h>
+#include <mlx5_prm.h>
+
+#include "mlx5_defs.h"
#include "mlx5_utils.h"
#include "mlx5_mr.h"
#include "mlx5_autoconf.h"
-#include "mlx5_defs.h"
-#include "mlx5_glue.h"
-#include "mlx5_prm.h"
-#include "mlx5_devx_cmds.h"
enum {
PCI_VENDOR_ID_MELLANOX = 0x15b3,
+++ /dev/null
-// SPDX-License-Identifier: BSD-3-Clause
-/* Copyright 2018 Mellanox Technologies, Ltd */
-
-#include <unistd.h>
-
-#include <rte_flow_driver.h>
-#include <rte_malloc.h>
-
-#include "mlx5_prm.h"
-#include "mlx5_devx_cmds.h"
-#include "mlx5_utils.h"
-
-
-/**
- * Allocate flow counters via devx interface.
- *
- * @param[in] ctx
- * ibv contexts returned from mlx5dv_open_device.
- * @param dcs
- * Pointer to counters properties structure to be filled by the routine.
- * @param bulk_n_128
- * Bulk counter numbers in 128 counters units.
- *
- * @return
- * Pointer to counter object on success, a negative value otherwise and
- * rte_errno is set.
- */
-struct mlx5_devx_obj *
-mlx5_devx_cmd_flow_counter_alloc(struct ibv_context *ctx, uint32_t bulk_n_128)
-{
- struct mlx5_devx_obj *dcs = rte_zmalloc("dcs", sizeof(*dcs), 0);
- uint32_t in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
- uint32_t out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
-
- if (!dcs) {
- rte_errno = ENOMEM;
- return NULL;
- }
- MLX5_SET(alloc_flow_counter_in, in, opcode,
- MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
- MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, bulk_n_128);
- dcs->obj = mlx5_glue->devx_obj_create(ctx, in,
- sizeof(in), out, sizeof(out));
- if (!dcs->obj) {
- DRV_LOG(ERR, "Can't allocate counters - error %d", errno);
- rte_errno = errno;
- rte_free(dcs);
- return NULL;
- }
- dcs->id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
- return dcs;
-}
-
-/**
- * Query flow counters values.
- *
- * @param[in] dcs
- * devx object that was obtained from mlx5_devx_cmd_fc_alloc.
- * @param[in] clear
- * Whether hardware should clear the counters after the query or not.
- * @param[in] n_counters
- * 0 in case of 1 counter to read, otherwise the counter number to read.
- * @param pkts
- * The number of packets that matched the flow.
- * @param bytes
- * The number of bytes that matched the flow.
- * @param mkey
- * The mkey key for batch query.
- * @param addr
- * The address in the mkey range for batch query.
- * @param cmd_comp
- * The completion object for asynchronous batch query.
- * @param async_id
- * The ID to be returned in the asynchronous batch query response.
- *
- * @return
- * 0 on success, a negative value otherwise.
- */
-int
-mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs,
- int clear, uint32_t n_counters,
- uint64_t *pkts, uint64_t *bytes,
- uint32_t mkey, void *addr,
- struct mlx5dv_devx_cmd_comp *cmd_comp,
- uint64_t async_id)
-{
- int out_len = MLX5_ST_SZ_BYTES(query_flow_counter_out) +
- MLX5_ST_SZ_BYTES(traffic_counter);
- uint32_t out[out_len];
- uint32_t in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
- void *stats;
- int rc;
-
- MLX5_SET(query_flow_counter_in, in, opcode,
- MLX5_CMD_OP_QUERY_FLOW_COUNTER);
- MLX5_SET(query_flow_counter_in, in, op_mod, 0);
- MLX5_SET(query_flow_counter_in, in, flow_counter_id, dcs->id);
- MLX5_SET(query_flow_counter_in, in, clear, !!clear);
-
- if (n_counters) {
- MLX5_SET(query_flow_counter_in, in, num_of_counters,
- n_counters);
- MLX5_SET(query_flow_counter_in, in, dump_to_memory, 1);
- MLX5_SET(query_flow_counter_in, in, mkey, mkey);
- MLX5_SET64(query_flow_counter_in, in, address,
- (uint64_t)(uintptr_t)addr);
- }
- if (!cmd_comp)
- rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out,
- out_len);
- else
- rc = mlx5_glue->devx_obj_query_async(dcs->obj, in, sizeof(in),
- out_len, async_id,
- cmd_comp);
- if (rc) {
- DRV_LOG(ERR, "Failed to query devx counters with rc %d", rc);
- rte_errno = rc;
- return -rc;
- }
- if (!n_counters) {
- stats = MLX5_ADDR_OF(query_flow_counter_out,
- out, flow_statistics);
- *pkts = MLX5_GET64(traffic_counter, stats, packets);
- *bytes = MLX5_GET64(traffic_counter, stats, octets);
- }
- return 0;
-}
-
-/**
- * Create a new mkey.
- *
- * @param[in] ctx
- * ibv contexts returned from mlx5dv_open_device.
- * @param[in] attr
- * Attributes of the requested mkey.
- *
- * @return
- * Pointer to Devx mkey on success, a negative value otherwise and rte_errno
- * is set.
- */
-struct mlx5_devx_obj *
-mlx5_devx_cmd_mkey_create(struct ibv_context *ctx,
- struct mlx5_devx_mkey_attr *attr)
-{
- uint32_t in[MLX5_ST_SZ_DW(create_mkey_in)] = {0};
- uint32_t out[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
- void *mkc;
- struct mlx5_devx_obj *mkey = rte_zmalloc("mkey", sizeof(*mkey), 0);
- size_t pgsize;
- uint32_t translation_size;
-
- if (!mkey) {
- rte_errno = ENOMEM;
- return NULL;
- }
- pgsize = sysconf(_SC_PAGESIZE);
- translation_size = (RTE_ALIGN(attr->size, pgsize) * 8) / 16;
- MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
- MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
- translation_size);
- MLX5_SET(create_mkey_in, in, mkey_umem_id, attr->umem_id);
- mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
- MLX5_SET(mkc, mkc, lw, 0x1);
- MLX5_SET(mkc, mkc, lr, 0x1);
- MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET(mkc, mkc, pd, attr->pd);
- MLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF);
- MLX5_SET(mkc, mkc, translations_octword_size, translation_size);
- MLX5_SET64(mkc, mkc, start_addr, attr->addr);
- MLX5_SET64(mkc, mkc, len, attr->size);
- MLX5_SET(mkc, mkc, log_page_size, rte_log2_u32(pgsize));
- mkey->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
- sizeof(out));
- if (!mkey->obj) {
- DRV_LOG(ERR, "Can't create mkey - error %d", errno);
- rte_errno = errno;
- rte_free(mkey);
- return NULL;
- }
- mkey->id = MLX5_GET(create_mkey_out, out, mkey_index);
- mkey->id = (mkey->id << 8) | (attr->umem_id & 0xFF);
- return mkey;
-}
-
-/**
- * Get status of devx command response.
- * Mainly used for asynchronous commands.
- *
- * @param[in] out
- * The out response buffer.
- *
- * @return
- * 0 on success, non-zero value otherwise.
- */
-int
-mlx5_devx_get_out_command_status(void *out)
-{
- int status;
-
- if (!out)
- return -EINVAL;
- status = MLX5_GET(query_flow_counter_out, out, status);
- if (status) {
- int syndrome = MLX5_GET(query_flow_counter_out, out, syndrome);
-
- DRV_LOG(ERR, "Bad devX status %x, syndrome = %x", status,
- syndrome);
- }
- return status;
-}
-
-/**
- * Destroy any object allocated by a Devx API.
- *
- * @param[in] obj
- * Pointer to a general object.
- *
- * @return
- * 0 on success, a negative value otherwise.
- */
-int
-mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj)
-{
- int ret;
-
- if (!obj)
- return 0;
- ret = mlx5_glue->devx_obj_destroy(obj->obj);
- rte_free(obj);
- return ret;
-}
-
-/**
- * Query NIC vport context.
- * Fills minimal inline attribute.
- *
- * @param[in] ctx
- * ibv contexts returned from mlx5dv_open_device.
- * @param[in] vport
- * vport index
- * @param[out] attr
- * Attributes device values.
- *
- * @return
- * 0 on success, a negative value otherwise.
- */
-static int
-mlx5_devx_cmd_query_nic_vport_context(struct ibv_context *ctx,
- unsigned int vport,
- struct mlx5_hca_attr *attr)
-{
- uint32_t in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
- uint32_t out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
- void *vctx;
- int status, syndrome, rc;
-
- /* Query NIC vport context to determine inline mode. */
- MLX5_SET(query_nic_vport_context_in, in, opcode,
- MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
- MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
- rc = mlx5_glue->devx_general_cmd(ctx,
- in, sizeof(in),
- out, sizeof(out));
- if (rc)
- goto error;
- status = MLX5_GET(query_nic_vport_context_out, out, status);
- syndrome = MLX5_GET(query_nic_vport_context_out, out, syndrome);
- if (status) {
- DRV_LOG(DEBUG, "Failed to query NIC vport context, "
- "status %x, syndrome = %x",
- status, syndrome);
- return -1;
- }
- vctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
- nic_vport_context);
- attr->vport_inline_mode = MLX5_GET(nic_vport_context, vctx,
- min_wqe_inline_mode);
- return 0;
-error:
- rc = (rc > 0) ? -rc : rc;
- return rc;
-}
-
-/**
- * Query HCA attributes.
- * Using those attributes we can check on run time if the device
- * is having the required capabilities.
- *
- * @param[in] ctx
- * ibv contexts returned from mlx5dv_open_device.
- * @param[out] attr
- * Attributes device values.
- *
- * @return
- * 0 on success, a negative value otherwise.
- */
-int
-mlx5_devx_cmd_query_hca_attr(struct ibv_context *ctx,
- struct mlx5_hca_attr *attr)
-{
- uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
- uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
- void *hcattr;
- int status, syndrome, rc;
-
- MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
- MLX5_SET(query_hca_cap_in, in, op_mod,
- MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
- MLX5_HCA_CAP_OPMOD_GET_CUR);
-
- rc = mlx5_glue->devx_general_cmd(ctx,
- in, sizeof(in), out, sizeof(out));
- if (rc)
- goto error;
- status = MLX5_GET(query_hca_cap_out, out, status);
- syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
- if (status) {
- DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
- "status %x, syndrome = %x",
- status, syndrome);
- return -1;
- }
- hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
- attr->flow_counter_bulk_alloc_bitmap =
- MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc);
- attr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr,
- flow_counters_dump);
- attr->eswitch_manager = MLX5_GET(cmd_hca_cap, hcattr, eswitch_manager);
- attr->hairpin = MLX5_GET(cmd_hca_cap, hcattr, hairpin);
- attr->log_max_hairpin_queues = MLX5_GET(cmd_hca_cap, hcattr,
- log_max_hairpin_queues);
- attr->log_max_hairpin_wq_data_sz = MLX5_GET(cmd_hca_cap, hcattr,
- log_max_hairpin_wq_data_sz);
- attr->log_max_hairpin_num_packets = MLX5_GET
- (cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz);
- attr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id);
- attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr,
- eth_net_offloads);
- attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt);
- attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr,
- flex_parser_protocols);
- attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos);
- if (attr->qos.sup) {
- MLX5_SET(query_hca_cap_in, in, op_mod,
- MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
- MLX5_HCA_CAP_OPMOD_GET_CUR);
- rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in),
- out, sizeof(out));
- if (rc)
- goto error;
- if (status) {
- DRV_LOG(DEBUG, "Failed to query devx QOS capabilities,"
- " status %x, syndrome = %x",
- status, syndrome);
- return -1;
- }
- hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
- attr->qos.srtcm_sup =
- MLX5_GET(qos_cap, hcattr, flow_meter_srtcm);
- attr->qos.log_max_flow_meter =
- MLX5_GET(qos_cap, hcattr, log_max_flow_meter);
- attr->qos.flow_meter_reg_c_ids =
- MLX5_GET(qos_cap, hcattr, flow_meter_reg_id);
- attr->qos.flow_meter_reg_share =
- MLX5_GET(qos_cap, hcattr, flow_meter_reg_share);
- }
- if (!attr->eth_net_offloads)
- return 0;
-
- /* Query HCA offloads for Ethernet protocol. */
- memset(in, 0, sizeof(in));
- memset(out, 0, sizeof(out));
- MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
- MLX5_SET(query_hca_cap_in, in, op_mod,
- MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS |
- MLX5_HCA_CAP_OPMOD_GET_CUR);
-
- rc = mlx5_glue->devx_general_cmd(ctx,
- in, sizeof(in),
- out, sizeof(out));
- if (rc) {
- attr->eth_net_offloads = 0;
- goto error;
- }
- status = MLX5_GET(query_hca_cap_out, out, status);
- syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
- if (status) {
- DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
- "status %x, syndrome = %x",
- status, syndrome);
- attr->eth_net_offloads = 0;
- return -1;
- }
- hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
- attr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps,
- hcattr, wqe_vlan_insert);
- attr->lro_cap = MLX5_GET(per_protocol_networking_offload_caps, hcattr,
- lro_cap);
- attr->tunnel_lro_gre = MLX5_GET(per_protocol_networking_offload_caps,
- hcattr, tunnel_lro_gre);
- attr->tunnel_lro_vxlan = MLX5_GET(per_protocol_networking_offload_caps,
- hcattr, tunnel_lro_vxlan);
- attr->lro_max_msg_sz_mode = MLX5_GET
- (per_protocol_networking_offload_caps,
- hcattr, lro_max_msg_sz_mode);
- for (int i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) {
- attr->lro_timer_supported_periods[i] =
- MLX5_GET(per_protocol_networking_offload_caps, hcattr,
- lro_timer_supported_periods[i]);
- }
- attr->tunnel_stateless_geneve_rx =
- MLX5_GET(per_protocol_networking_offload_caps,
- hcattr, tunnel_stateless_geneve_rx);
- attr->geneve_max_opt_len =
- MLX5_GET(per_protocol_networking_offload_caps,
- hcattr, max_geneve_opt_len);
- attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps,
- hcattr, wqe_inline_mode);
- attr->tunnel_stateless_gtp = MLX5_GET
- (per_protocol_networking_offload_caps,
- hcattr, tunnel_stateless_gtp);
- if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
- return 0;
- if (attr->eth_virt) {
- rc = mlx5_devx_cmd_query_nic_vport_context(ctx, 0, attr);
- if (rc) {
- attr->eth_virt = 0;
- goto error;
- }
- }
- return 0;
-error:
- rc = (rc > 0) ? -rc : rc;
- return rc;
-}
-
-/**
- * Query TIS transport domain from QP verbs object using DevX API.
- *
- * @param[in] qp
- * Pointer to verbs QP returned by ibv_create_qp .
- * @param[in] tis_num
- * TIS number of TIS to query.
- * @param[out] tis_td
- * Pointer to TIS transport domain variable, to be set by the routine.
- *
- * @return
- * 0 on success, a negative value otherwise.
- */
-int
-mlx5_devx_cmd_qp_query_tis_td(struct ibv_qp *qp, uint32_t tis_num,
- uint32_t *tis_td)
-{
- uint32_t in[MLX5_ST_SZ_DW(query_tis_in)] = {0};
- uint32_t out[MLX5_ST_SZ_DW(query_tis_out)] = {0};
- int rc;
- void *tis_ctx;
-
- MLX5_SET(query_tis_in, in, opcode, MLX5_CMD_OP_QUERY_TIS);
- MLX5_SET(query_tis_in, in, tisn, tis_num);
- rc = mlx5_glue->devx_qp_query(qp, in, sizeof(in), out, sizeof(out));
- if (rc) {
- DRV_LOG(ERR, "Failed to query QP using DevX");
- return -rc;
- };
- tis_ctx = MLX5_ADDR_OF(query_tis_out, out, tis_context);
- *tis_td = MLX5_GET(tisc, tis_ctx, transport_domain);
- return 0;
-}
-
-/**
- * Fill WQ data for DevX API command.
- * Utility function for use when creating DevX objects containing a WQ.
- *
- * @param[in] wq_ctx
- * Pointer to WQ context to fill with data.
- * @param [in] wq_attr
- * Pointer to WQ attributes structure to fill in WQ context.
- */
-static void
-devx_cmd_fill_wq_data(void *wq_ctx, struct mlx5_devx_wq_attr *wq_attr)
-{
- MLX5_SET(wq, wq_ctx, wq_type, wq_attr->wq_type);
- MLX5_SET(wq, wq_ctx, wq_signature, wq_attr->wq_signature);
- MLX5_SET(wq, wq_ctx, end_padding_mode, wq_attr->end_padding_mode);
- MLX5_SET(wq, wq_ctx, cd_slave, wq_attr->cd_slave);
- MLX5_SET(wq, wq_ctx, hds_skip_first_sge, wq_attr->hds_skip_first_sge);
- MLX5_SET(wq, wq_ctx, log2_hds_buf_size, wq_attr->log2_hds_buf_size);
- MLX5_SET(wq, wq_ctx, page_offset, wq_attr->page_offset);
- MLX5_SET(wq, wq_ctx, lwm, wq_attr->lwm);
- MLX5_SET(wq, wq_ctx, pd, wq_attr->pd);
- MLX5_SET(wq, wq_ctx, uar_page, wq_attr->uar_page);
- MLX5_SET64(wq, wq_ctx, dbr_addr, wq_attr->dbr_addr);
- MLX5_SET(wq, wq_ctx, hw_counter, wq_attr->hw_counter);
- MLX5_SET(wq, wq_ctx, sw_counter, wq_attr->sw_counter);
- MLX5_SET(wq, wq_ctx, log_wq_stride, wq_attr->log_wq_stride);
- MLX5_SET(wq, wq_ctx, log_wq_pg_sz, wq_attr->log_wq_pg_sz);
- MLX5_SET(wq, wq_ctx, log_wq_sz, wq_attr->log_wq_sz);
- MLX5_SET(wq, wq_ctx, dbr_umem_valid, wq_attr->dbr_umem_valid);
- MLX5_SET(wq, wq_ctx, wq_umem_valid, wq_attr->wq_umem_valid);
- MLX5_SET(wq, wq_ctx, log_hairpin_num_packets,
- wq_attr->log_hairpin_num_packets);
- MLX5_SET(wq, wq_ctx, log_hairpin_data_sz, wq_attr->log_hairpin_data_sz);
- MLX5_SET(wq, wq_ctx, single_wqe_log_num_of_strides,
- wq_attr->single_wqe_log_num_of_strides);
- MLX5_SET(wq, wq_ctx, two_byte_shift_en, wq_attr->two_byte_shift_en);
- MLX5_SET(wq, wq_ctx, single_stride_log_num_of_bytes,
- wq_attr->single_stride_log_num_of_bytes);
- MLX5_SET(wq, wq_ctx, dbr_umem_id, wq_attr->dbr_umem_id);
- MLX5_SET(wq, wq_ctx, wq_umem_id, wq_attr->wq_umem_id);
- MLX5_SET64(wq, wq_ctx, wq_umem_offset, wq_attr->wq_umem_offset);
-}
-
-/**
- * Create RQ using DevX API.
- *
- * @param[in] ctx
- * ibv_context returned from mlx5dv_open_device.
- * @param [in] rq_attr
- * Pointer to create RQ attributes structure.
- * @param [in] socket
- * CPU socket ID for allocations.
- *
- * @return
- * The DevX object created, NULL otherwise and rte_errno is set.
- */
-struct mlx5_devx_obj *
-mlx5_devx_cmd_create_rq(struct ibv_context *ctx,
- struct mlx5_devx_create_rq_attr *rq_attr,
- int socket)
-{
- uint32_t in[MLX5_ST_SZ_DW(create_rq_in)] = {0};
- uint32_t out[MLX5_ST_SZ_DW(create_rq_out)] = {0};
- void *rq_ctx, *wq_ctx;
- struct mlx5_devx_wq_attr *wq_attr;
- struct mlx5_devx_obj *rq = NULL;
-
- rq = rte_calloc_socket(__func__, 1, sizeof(*rq), 0, socket);
- if (!rq) {
- DRV_LOG(ERR, "Failed to allocate RQ data");
- rte_errno = ENOMEM;
- return NULL;
- }
- MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
- rq_ctx = MLX5_ADDR_OF(create_rq_in, in, ctx);
- MLX5_SET(rqc, rq_ctx, rlky, rq_attr->rlky);
- MLX5_SET(rqc, rq_ctx, delay_drop_en, rq_attr->delay_drop_en);
- MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs);
- MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd);
- MLX5_SET(rqc, rq_ctx, mem_rq_type, rq_attr->mem_rq_type);
- MLX5_SET(rqc, rq_ctx, state, rq_attr->state);
- MLX5_SET(rqc, rq_ctx, flush_in_error_en, rq_attr->flush_in_error_en);
- MLX5_SET(rqc, rq_ctx, hairpin, rq_attr->hairpin);
- MLX5_SET(rqc, rq_ctx, user_index, rq_attr->user_index);
- MLX5_SET(rqc, rq_ctx, cqn, rq_attr->cqn);
- MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);
- MLX5_SET(rqc, rq_ctx, rmpn, rq_attr->rmpn);
- wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq);
- wq_attr = &rq_attr->wq_attr;
- devx_cmd_fill_wq_data(wq_ctx, wq_attr);
- rq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
- out, sizeof(out));
- if (!rq->obj) {
- DRV_LOG(ERR, "Failed to create RQ using DevX");
- rte_errno = errno;
- rte_free(rq);
- return NULL;
- }
- rq->id = MLX5_GET(create_rq_out, out, rqn);
- return rq;
-}
-
-/**
- * Modify RQ using DevX API.
- *
- * @param[in] rq
- * Pointer to RQ object structure.
- * @param [in] rq_attr
- * Pointer to modify RQ attributes structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq,
- struct mlx5_devx_modify_rq_attr *rq_attr)
-{
- uint32_t in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
- uint32_t out[MLX5_ST_SZ_DW(modify_rq_out)] = {0};
- void *rq_ctx, *wq_ctx;
- int ret;
-
- MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
- MLX5_SET(modify_rq_in, in, rq_state, rq_attr->rq_state);
- MLX5_SET(modify_rq_in, in, rqn, rq->id);
- MLX5_SET64(modify_rq_in, in, modify_bitmask, rq_attr->modify_bitmask);
- rq_ctx = MLX5_ADDR_OF(modify_rq_in, in, ctx);
- MLX5_SET(rqc, rq_ctx, state, rq_attr->state);
- if (rq_attr->modify_bitmask &
- MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS)
- MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs);
- if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD)
- MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd);
- if (rq_attr->modify_bitmask &
- MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID)
- MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);
- MLX5_SET(rqc, rq_ctx, hairpin_peer_sq, rq_attr->hairpin_peer_sq);
- MLX5_SET(rqc, rq_ctx, hairpin_peer_vhca, rq_attr->hairpin_peer_vhca);
- if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM) {
- wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq);
- MLX5_SET(wq, wq_ctx, lwm, rq_attr->lwm);
- }
- ret = mlx5_glue->devx_obj_modify(rq->obj, in, sizeof(in),
- out, sizeof(out));
- if (ret) {
- DRV_LOG(ERR, "Failed to modify RQ using DevX");
- rte_errno = errno;
- return -errno;
- }
- return ret;
-}
-
-/**
- * Create TIR using DevX API.
- *
- * @param[in] ctx
- * ibv_context returned from mlx5dv_open_device.
- * @param [in] tir_attr
- * Pointer to TIR attributes structure.
- *
- * @return
- * The DevX object created, NULL otherwise and rte_errno is set.
- */
-struct mlx5_devx_obj *
-mlx5_devx_cmd_create_tir(struct ibv_context *ctx,
- struct mlx5_devx_tir_attr *tir_attr)
-{
- uint32_t in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
- uint32_t out[MLX5_ST_SZ_DW(create_tir_out)] = {0};
- void *tir_ctx, *outer, *inner;
- struct mlx5_devx_obj *tir = NULL;
- int i;
-
- tir = rte_calloc(__func__, 1, sizeof(*tir), 0);
- if (!tir) {
- DRV_LOG(ERR, "Failed to allocate TIR data");
- rte_errno = ENOMEM;
- return NULL;
- }
- MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
- tir_ctx = MLX5_ADDR_OF(create_tir_in, in, ctx);
- MLX5_SET(tirc, tir_ctx, disp_type, tir_attr->disp_type);
- MLX5_SET(tirc, tir_ctx, lro_timeout_period_usecs,
- tir_attr->lro_timeout_period_usecs);
- MLX5_SET(tirc, tir_ctx, lro_enable_mask, tir_attr->lro_enable_mask);
- MLX5_SET(tirc, tir_ctx, lro_max_msg_sz, tir_attr->lro_max_msg_sz);
- MLX5_SET(tirc, tir_ctx, inline_rqn, tir_attr->inline_rqn);
- MLX5_SET(tirc, tir_ctx, rx_hash_symmetric, tir_attr->rx_hash_symmetric);
- MLX5_SET(tirc, tir_ctx, tunneled_offload_en,
- tir_attr->tunneled_offload_en);
- MLX5_SET(tirc, tir_ctx, indirect_table, tir_attr->indirect_table);
- MLX5_SET(tirc, tir_ctx, rx_hash_fn, tir_attr->rx_hash_fn);
- MLX5_SET(tirc, tir_ctx, self_lb_block, tir_attr->self_lb_block);
- MLX5_SET(tirc, tir_ctx, transport_domain, tir_attr->transport_domain);
- for (i = 0; i < 10; i++) {
- MLX5_SET(tirc, tir_ctx, rx_hash_toeplitz_key[i],
- tir_attr->rx_hash_toeplitz_key[i]);
- }
- outer = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_outer);
- MLX5_SET(rx_hash_field_select, outer, l3_prot_type,
- tir_attr->rx_hash_field_selector_outer.l3_prot_type);
- MLX5_SET(rx_hash_field_select, outer, l4_prot_type,
- tir_attr->rx_hash_field_selector_outer.l4_prot_type);
- MLX5_SET(rx_hash_field_select, outer, selected_fields,
- tir_attr->rx_hash_field_selector_outer.selected_fields);
- inner = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_inner);
- MLX5_SET(rx_hash_field_select, inner, l3_prot_type,
- tir_attr->rx_hash_field_selector_inner.l3_prot_type);
- MLX5_SET(rx_hash_field_select, inner, l4_prot_type,
- tir_attr->rx_hash_field_selector_inner.l4_prot_type);
- MLX5_SET(rx_hash_field_select, inner, selected_fields,
- tir_attr->rx_hash_field_selector_inner.selected_fields);
- tir->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
- out, sizeof(out));
- if (!tir->obj) {
- DRV_LOG(ERR, "Failed to create TIR using DevX");
- rte_errno = errno;
- rte_free(tir);
- return NULL;
- }
- tir->id = MLX5_GET(create_tir_out, out, tirn);
- return tir;
-}
-
-/**
- * Create RQT using DevX API.
- *
- * @param[in] ctx
- * ibv_context returned from mlx5dv_open_device.
- * @param [in] rqt_attr
- * Pointer to RQT attributes structure.
- *
- * @return
- * The DevX object created, NULL otherwise and rte_errno is set.
- */
-struct mlx5_devx_obj *
-mlx5_devx_cmd_create_rqt(struct ibv_context *ctx,
- struct mlx5_devx_rqt_attr *rqt_attr)
-{
- uint32_t *in = NULL;
- uint32_t inlen = MLX5_ST_SZ_BYTES(create_rqt_in) +
- rqt_attr->rqt_actual_size * sizeof(uint32_t);
- uint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
- void *rqt_ctx;
- struct mlx5_devx_obj *rqt = NULL;
- int i;
-
- in = rte_calloc(__func__, 1, inlen, 0);
- if (!in) {
- DRV_LOG(ERR, "Failed to allocate RQT IN data");
- rte_errno = ENOMEM;
- return NULL;
- }
- rqt = rte_calloc(__func__, 1, sizeof(*rqt), 0);
- if (!rqt) {
- DRV_LOG(ERR, "Failed to allocate RQT data");
- rte_errno = ENOMEM;
- rte_free(in);
- return NULL;
- }
- MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
- rqt_ctx = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
- MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size);
- MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size);
- for (i = 0; i < rqt_attr->rqt_actual_size; i++)
- MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]);
- rqt->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out));
- rte_free(in);
- if (!rqt->obj) {
- DRV_LOG(ERR, "Failed to create RQT using DevX");
- rte_errno = errno;
- rte_free(rqt);
- return NULL;
- }
- rqt->id = MLX5_GET(create_rqt_out, out, rqtn);
- return rqt;
-}
-
-/**
- * Create SQ using DevX API.
- *
- * @param[in] ctx
- * ibv_context returned from mlx5dv_open_device.
- * @param [in] sq_attr
- * Pointer to SQ attributes structure.
- * @param [in] socket
- * CPU socket ID for allocations.
- *
- * @return
- * The DevX object created, NULL otherwise and rte_errno is set.
- **/
-struct mlx5_devx_obj *
-mlx5_devx_cmd_create_sq(struct ibv_context *ctx,
- struct mlx5_devx_create_sq_attr *sq_attr)
-{
- uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
- uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
- void *sq_ctx;
- void *wq_ctx;
- struct mlx5_devx_wq_attr *wq_attr;
- struct mlx5_devx_obj *sq = NULL;
-
- sq = rte_calloc(__func__, 1, sizeof(*sq), 0);
- if (!sq) {
- DRV_LOG(ERR, "Failed to allocate SQ data");
- rte_errno = ENOMEM;
- return NULL;
- }
- MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
- sq_ctx = MLX5_ADDR_OF(create_sq_in, in, ctx);
- MLX5_SET(sqc, sq_ctx, rlky, sq_attr->rlky);
- MLX5_SET(sqc, sq_ctx, cd_master, sq_attr->cd_master);
- MLX5_SET(sqc, sq_ctx, fre, sq_attr->fre);
- MLX5_SET(sqc, sq_ctx, flush_in_error_en, sq_attr->flush_in_error_en);
- MLX5_SET(sqc, sq_ctx, allow_multi_pkt_send_wqe,
- sq_attr->flush_in_error_en);
- MLX5_SET(sqc, sq_ctx, min_wqe_inline_mode,
- sq_attr->min_wqe_inline_mode);
- MLX5_SET(sqc, sq_ctx, state, sq_attr->state);
- MLX5_SET(sqc, sq_ctx, reg_umr, sq_attr->reg_umr);
- MLX5_SET(sqc, sq_ctx, allow_swp, sq_attr->allow_swp);
- MLX5_SET(sqc, sq_ctx, hairpin, sq_attr->hairpin);
- MLX5_SET(sqc, sq_ctx, user_index, sq_attr->user_index);
- MLX5_SET(sqc, sq_ctx, cqn, sq_attr->cqn);
- MLX5_SET(sqc, sq_ctx, packet_pacing_rate_limit_index,
- sq_attr->packet_pacing_rate_limit_index);
- MLX5_SET(sqc, sq_ctx, tis_lst_sz, sq_attr->tis_lst_sz);
- MLX5_SET(sqc, sq_ctx, tis_num_0, sq_attr->tis_num);
- wq_ctx = MLX5_ADDR_OF(sqc, sq_ctx, wq);
- wq_attr = &sq_attr->wq_attr;
- devx_cmd_fill_wq_data(wq_ctx, wq_attr);
- sq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
- out, sizeof(out));
- if (!sq->obj) {
- DRV_LOG(ERR, "Failed to create SQ using DevX");
- rte_errno = errno;
- rte_free(sq);
- return NULL;
- }
- sq->id = MLX5_GET(create_sq_out, out, sqn);
- return sq;
-}
-
-/**
- * Modify SQ using DevX API.
- *
- * @param[in] sq
- * Pointer to SQ object structure.
- * @param [in] sq_attr
- * Pointer to SQ attributes structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq,
- struct mlx5_devx_modify_sq_attr *sq_attr)
-{
- uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
- uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
- void *sq_ctx;
- int ret;
-
- MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
- MLX5_SET(modify_sq_in, in, sq_state, sq_attr->sq_state);
- MLX5_SET(modify_sq_in, in, sqn, sq->id);
- sq_ctx = MLX5_ADDR_OF(modify_sq_in, in, ctx);
- MLX5_SET(sqc, sq_ctx, state, sq_attr->state);
- MLX5_SET(sqc, sq_ctx, hairpin_peer_rq, sq_attr->hairpin_peer_rq);
- MLX5_SET(sqc, sq_ctx, hairpin_peer_vhca, sq_attr->hairpin_peer_vhca);
- ret = mlx5_glue->devx_obj_modify(sq->obj, in, sizeof(in),
- out, sizeof(out));
- if (ret) {
- DRV_LOG(ERR, "Failed to modify SQ using DevX");
- rte_errno = errno;
- return -errno;
- }
- return ret;
-}
-
-/**
- * Create TIS using DevX API.
- *
- * @param[in] ctx
- * ibv_context returned from mlx5dv_open_device.
- * @param [in] tis_attr
- * Pointer to TIS attributes structure.
- *
- * @return
- * The DevX object created, NULL otherwise and rte_errno is set.
- */
-struct mlx5_devx_obj *
-mlx5_devx_cmd_create_tis(struct ibv_context *ctx,
- struct mlx5_devx_tis_attr *tis_attr)
-{
- uint32_t in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
- uint32_t out[MLX5_ST_SZ_DW(create_tis_out)] = {0};
- struct mlx5_devx_obj *tis = NULL;
- void *tis_ctx;
-
- tis = rte_calloc(__func__, 1, sizeof(*tis), 0);
- if (!tis) {
- DRV_LOG(ERR, "Failed to allocate TIS object");
- rte_errno = ENOMEM;
- return NULL;
- }
- MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
- tis_ctx = MLX5_ADDR_OF(create_tis_in, in, ctx);
- MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,
- tis_attr->strict_lag_tx_port_affinity);
- MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,
- tis_attr->strict_lag_tx_port_affinity);
- MLX5_SET(tisc, tis_ctx, prio, tis_attr->prio);
- MLX5_SET(tisc, tis_ctx, transport_domain,
- tis_attr->transport_domain);
- tis->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
- out, sizeof(out));
- if (!tis->obj) {
- DRV_LOG(ERR, "Failed to create TIS using DevX");
- rte_errno = errno;
- rte_free(tis);
- return NULL;
- }
- tis->id = MLX5_GET(create_tis_out, out, tisn);
- return tis;
-}
-
-/**
- * Create transport domain using DevX API.
- *
- * @param[in] ctx
- * ibv_context returned from mlx5dv_open_device.
- *
- * @return
- * The DevX object created, NULL otherwise and rte_errno is set.
- */
-struct mlx5_devx_obj *
-mlx5_devx_cmd_create_td(struct ibv_context *ctx)
-{
- uint32_t in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
- uint32_t out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
- struct mlx5_devx_obj *td = NULL;
-
- td = rte_calloc(__func__, 1, sizeof(*td), 0);
- if (!td) {
- DRV_LOG(ERR, "Failed to allocate TD object");
- rte_errno = ENOMEM;
- return NULL;
- }
- MLX5_SET(alloc_transport_domain_in, in, opcode,
- MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
- td->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
- out, sizeof(out));
- if (!td->obj) {
- DRV_LOG(ERR, "Failed to create TIS using DevX");
- rte_errno = errno;
- rte_free(td);
- return NULL;
- }
- td->id = MLX5_GET(alloc_transport_domain_out, out,
- transport_domain);
- return td;
-}
-
-/**
- * Dump all flows to file.
- *
- * @param[in] fdb_domain
- * FDB domain.
- * @param[in] rx_domain
- * RX domain.
- * @param[in] tx_domain
- * TX domain.
- * @param[out] file
- * Pointer to file stream.
- *
- * @return
- * 0 on success, a nagative value otherwise.
- */
-int
-mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused,
- void *rx_domain __rte_unused,
- void *tx_domain __rte_unused, FILE *file __rte_unused)
-{
- int ret = 0;
-
-#ifdef HAVE_MLX5_DR_FLOW_DUMP
- if (fdb_domain) {
- ret = mlx5_glue->dr_dump_domain(file, fdb_domain);
- if (ret)
- return ret;
- }
- assert(rx_domain);
- ret = mlx5_glue->dr_dump_domain(file, rx_domain);
- if (ret)
- return ret;
- assert(tx_domain);
- ret = mlx5_glue->dr_dump_domain(file, tx_domain);
-#else
- ret = ENOTSUP;
-#endif
- return -ret;
-}
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2019 Mellanox Technologies, Ltd
- */
-
-#ifndef RTE_PMD_MLX5_DEVX_CMDS_H_
-#define RTE_PMD_MLX5_DEVX_CMDS_H_
-
-#include "mlx5_glue.h"
-
-
-/* devX creation object */
-struct mlx5_devx_obj {
- struct mlx5dv_devx_obj *obj; /* The DV object. */
- int id; /* The object ID. */
-};
-
-struct mlx5_devx_mkey_attr {
- uint64_t addr;
- uint64_t size;
- uint32_t umem_id;
- uint32_t pd;
-};
-
-/* HCA qos attributes. */
-struct mlx5_hca_qos_attr {
- uint32_t sup:1; /* Whether QOS is supported. */
- uint32_t srtcm_sup:1; /* Whether srTCM mode is supported. */
- uint32_t flow_meter_reg_share:1;
- /* Whether reg_c share is supported. */
- uint8_t log_max_flow_meter;
- /* Power of the maximum supported meters. */
- uint8_t flow_meter_reg_c_ids;
- /* Bitmap of the reg_Cs available for flow meter to use. */
-
-};
-
-/* HCA supports this number of time periods for LRO. */
-#define MLX5_LRO_NUM_SUPP_PERIODS 4
-
-/* HCA attributes. */
-struct mlx5_hca_attr {
- uint32_t eswitch_manager:1;
- uint32_t flow_counters_dump:1;
- uint8_t flow_counter_bulk_alloc_bitmap;
- uint32_t eth_net_offloads:1;
- uint32_t eth_virt:1;
- uint32_t wqe_vlan_insert:1;
- uint32_t wqe_inline_mode:2;
- uint32_t vport_inline_mode:3;
- uint32_t tunnel_stateless_geneve_rx:1;
- uint32_t geneve_max_opt_len:1; /* 0x0: 14DW, 0x1: 63DW */
- uint32_t tunnel_stateless_gtp:1;
- uint32_t lro_cap:1;
- uint32_t tunnel_lro_gre:1;
- uint32_t tunnel_lro_vxlan:1;
- uint32_t lro_max_msg_sz_mode:2;
- uint32_t lro_timer_supported_periods[MLX5_LRO_NUM_SUPP_PERIODS];
- uint32_t flex_parser_protocols;
- uint32_t hairpin:1;
- uint32_t log_max_hairpin_queues:5;
- uint32_t log_max_hairpin_wq_data_sz:5;
- uint32_t log_max_hairpin_num_packets:5;
- uint32_t vhca_id:16;
- struct mlx5_hca_qos_attr qos;
-};
-
-struct mlx5_devx_wq_attr {
- uint32_t wq_type:4;
- uint32_t wq_signature:1;
- uint32_t end_padding_mode:2;
- uint32_t cd_slave:1;
- uint32_t hds_skip_first_sge:1;
- uint32_t log2_hds_buf_size:3;
- uint32_t page_offset:5;
- uint32_t lwm:16;
- uint32_t pd:24;
- uint32_t uar_page:24;
- uint64_t dbr_addr;
- uint32_t hw_counter;
- uint32_t sw_counter;
- uint32_t log_wq_stride:4;
- uint32_t log_wq_pg_sz:5;
- uint32_t log_wq_sz:5;
- uint32_t dbr_umem_valid:1;
- uint32_t wq_umem_valid:1;
- uint32_t log_hairpin_num_packets:5;
- uint32_t log_hairpin_data_sz:5;
- uint32_t single_wqe_log_num_of_strides:4;
- uint32_t two_byte_shift_en:1;
- uint32_t single_stride_log_num_of_bytes:3;
- uint32_t dbr_umem_id;
- uint32_t wq_umem_id;
- uint64_t wq_umem_offset;
-};
-
-/* Create RQ attributes structure, used by create RQ operation. */
-struct mlx5_devx_create_rq_attr {
- uint32_t rlky:1;
- uint32_t delay_drop_en:1;
- uint32_t scatter_fcs:1;
- uint32_t vsd:1;
- uint32_t mem_rq_type:4;
- uint32_t state:4;
- uint32_t flush_in_error_en:1;
- uint32_t hairpin:1;
- uint32_t user_index:24;
- uint32_t cqn:24;
- uint32_t counter_set_id:8;
- uint32_t rmpn:24;
- struct mlx5_devx_wq_attr wq_attr;
-};
-
-/* Modify RQ attributes structure, used by modify RQ operation. */
-struct mlx5_devx_modify_rq_attr {
- uint32_t rqn:24;
- uint32_t rq_state:4; /* Current RQ state. */
- uint32_t state:4; /* Required RQ state. */
- uint32_t scatter_fcs:1;
- uint32_t vsd:1;
- uint32_t counter_set_id:8;
- uint32_t hairpin_peer_sq:24;
- uint32_t hairpin_peer_vhca:16;
- uint64_t modify_bitmask;
- uint32_t lwm:16; /* Contained WQ lwm. */
-};
-
-struct mlx5_rx_hash_field_select {
- uint32_t l3_prot_type:1;
- uint32_t l4_prot_type:1;
- uint32_t selected_fields:30;
-};
-
-/* TIR attributes structure, used by TIR operations. */
-struct mlx5_devx_tir_attr {
- uint32_t disp_type:4;
- uint32_t lro_timeout_period_usecs:16;
- uint32_t lro_enable_mask:4;
- uint32_t lro_max_msg_sz:8;
- uint32_t inline_rqn:24;
- uint32_t rx_hash_symmetric:1;
- uint32_t tunneled_offload_en:1;
- uint32_t indirect_table:24;
- uint32_t rx_hash_fn:4;
- uint32_t self_lb_block:2;
- uint32_t transport_domain:24;
- uint32_t rx_hash_toeplitz_key[10];
- struct mlx5_rx_hash_field_select rx_hash_field_selector_outer;
- struct mlx5_rx_hash_field_select rx_hash_field_selector_inner;
-};
-
-/* RQT attributes structure, used by RQT operations. */
-struct mlx5_devx_rqt_attr {
- uint32_t rqt_max_size:16;
- uint32_t rqt_actual_size:16;
- uint32_t rq_list[];
-};
-
-/* TIS attributes structure. */
-struct mlx5_devx_tis_attr {
- uint32_t strict_lag_tx_port_affinity:1;
- uint32_t tls_en:1;
- uint32_t lag_tx_port_affinity:4;
- uint32_t prio:4;
- uint32_t transport_domain:24;
-};
-
-/* SQ attributes structure, used by SQ create operation. */
-struct mlx5_devx_create_sq_attr {
- uint32_t rlky:1;
- uint32_t cd_master:1;
- uint32_t fre:1;
- uint32_t flush_in_error_en:1;
- uint32_t allow_multi_pkt_send_wqe:1;
- uint32_t min_wqe_inline_mode:3;
- uint32_t state:4;
- uint32_t reg_umr:1;
- uint32_t allow_swp:1;
- uint32_t hairpin:1;
- uint32_t user_index:24;
- uint32_t cqn:24;
- uint32_t packet_pacing_rate_limit_index:16;
- uint32_t tis_lst_sz:16;
- uint32_t tis_num:24;
- struct mlx5_devx_wq_attr wq_attr;
-};
-
-/* SQ attributes structure, used by SQ modify operation. */
-struct mlx5_devx_modify_sq_attr {
- uint32_t sq_state:4;
- uint32_t state:4;
- uint32_t hairpin_peer_rq:24;
- uint32_t hairpin_peer_vhca:16;
-};
-
-/* mlx5_devx_cmds.c */
-
-struct mlx5_devx_obj *mlx5_devx_cmd_flow_counter_alloc(struct ibv_context *ctx,
- uint32_t bulk_sz);
-int mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj);
-int mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs,
- int clear, uint32_t n_counters,
- uint64_t *pkts, uint64_t *bytes,
- uint32_t mkey, void *addr,
- struct mlx5dv_devx_cmd_comp *cmd_comp,
- uint64_t async_id);
-int mlx5_devx_cmd_query_hca_attr(struct ibv_context *ctx,
- struct mlx5_hca_attr *attr);
-struct mlx5_devx_obj *mlx5_devx_cmd_mkey_create(struct ibv_context *ctx,
- struct mlx5_devx_mkey_attr *attr);
-int mlx5_devx_get_out_command_status(void *out);
-int mlx5_devx_cmd_qp_query_tis_td(struct ibv_qp *qp, uint32_t tis_num,
- uint32_t *tis_td);
-struct mlx5_devx_obj *mlx5_devx_cmd_create_rq(struct ibv_context *ctx,
- struct mlx5_devx_create_rq_attr *rq_attr,
- int socket);
-int mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq,
- struct mlx5_devx_modify_rq_attr *rq_attr);
-struct mlx5_devx_obj *mlx5_devx_cmd_create_tir(struct ibv_context *ctx,
- struct mlx5_devx_tir_attr *tir_attr);
-struct mlx5_devx_obj *mlx5_devx_cmd_create_rqt(struct ibv_context *ctx,
- struct mlx5_devx_rqt_attr *rqt_attr);
-struct mlx5_devx_obj *mlx5_devx_cmd_create_sq(struct ibv_context *ctx,
- struct mlx5_devx_create_sq_attr *sq_attr);
-int mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq,
- struct mlx5_devx_modify_sq_attr *sq_attr);
-struct mlx5_devx_obj *mlx5_devx_cmd_create_tis(struct ibv_context *ctx,
- struct mlx5_devx_tis_attr *tis_attr);
-struct mlx5_devx_obj *mlx5_devx_cmd_create_td(struct ibv_context *ctx);
-int mlx5_devx_cmd_flow_dump(void *fdb_domain, void *rx_domain, void *tx_domain,
- FILE *file);
-#endif /* RTE_PMD_MLX5_DEVX_CMDS_H_ */
#include <rte_rwlock.h>
#include <rte_cycles.h>
+#include <mlx5_glue.h>
+#include <mlx5_devx_cmds.h>
+
#include "mlx5.h"
-#include "mlx5_glue.h"
-#include "mlx5_devx_cmds.h"
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
#include <rte_malloc.h>
#include <rte_ip.h>
-#include "mlx5.h"
+#include <mlx5_glue.h>
+#include <mlx5_devx_cmds.h>
+#include <mlx5_prm.h>
+
#include "mlx5_defs.h"
+#include "mlx5.h"
#include "mlx5_flow.h"
-#include "mlx5_glue.h"
-#include "mlx5_devx_cmds.h"
-#include "mlx5_prm.h"
#include "mlx5_rxtx.h"
/* Dev ops structure defined in mlx5.c */
#include <rte_alarm.h>
#include <rte_mtr.h>
+#include <mlx5_prm.h>
+
#include "mlx5.h"
-#include "mlx5_prm.h"
/* Private rte flow items. */
enum mlx5_rte_flow_item_type {
#include <rte_vxlan.h>
#include <rte_gtp.h>
-#include "mlx5.h"
+#include <mlx5_glue.h>
+#include <mlx5_devx_cmds.h>
+#include <mlx5_prm.h>
+
#include "mlx5_defs.h"
-#include "mlx5_glue.h"
-#include "mlx5_devx_cmds.h"
+#include "mlx5.h"
#include "mlx5_flow.h"
-#include "mlx5_prm.h"
#include "mlx5_rxtx.h"
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
#include <rte_mtr.h>
#include <rte_mtr_driver.h>
+#include <mlx5_devx_cmds.h>
+
#include "mlx5.h"
#include "mlx5_flow.h"
#include <rte_malloc.h>
#include <rte_ip.h>
-#include "mlx5.h"
+#include <mlx5_glue.h>
+#include <mlx5_prm.h>
+
#include "mlx5_defs.h"
+#include "mlx5.h"
#include "mlx5_flow.h"
-#include "mlx5_glue.h"
-#include "mlx5_prm.h"
#include "mlx5_rxtx.h"
#define VERBS_SPEC_INNER(item_flags) \
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018 6WIND S.A.
- * Copyright 2018 Mellanox Technologies, Ltd
- */
-
-#include <errno.h>
-#include <stdalign.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <stdlib.h>
-
-/*
- * Not needed by this file; included to work around the lack of off_t
- * definition for mlx5dv.h with unpatched rdma-core versions.
- */
-#include <sys/types.h>
-
-/* Verbs headers do not support -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/mlx5dv.h>
-#include <infiniband/verbs.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
-#include <rte_config.h>
-
-#include "mlx5_autoconf.h"
-#include "mlx5_glue.h"
-
-static int
-mlx5_glue_fork_init(void)
-{
- return ibv_fork_init();
-}
-
-static struct ibv_pd *
-mlx5_glue_alloc_pd(struct ibv_context *context)
-{
- return ibv_alloc_pd(context);
-}
-
-static int
-mlx5_glue_dealloc_pd(struct ibv_pd *pd)
-{
- return ibv_dealloc_pd(pd);
-}
-
-static struct ibv_device **
-mlx5_glue_get_device_list(int *num_devices)
-{
- return ibv_get_device_list(num_devices);
-}
-
-static void
-mlx5_glue_free_device_list(struct ibv_device **list)
-{
- ibv_free_device_list(list);
-}
-
-static struct ibv_context *
-mlx5_glue_open_device(struct ibv_device *device)
-{
- return ibv_open_device(device);
-}
-
-static int
-mlx5_glue_close_device(struct ibv_context *context)
-{
- return ibv_close_device(context);
-}
-
-static int
-mlx5_glue_query_device(struct ibv_context *context,
- struct ibv_device_attr *device_attr)
-{
- return ibv_query_device(context, device_attr);
-}
-
-static int
-mlx5_glue_query_device_ex(struct ibv_context *context,
- const struct ibv_query_device_ex_input *input,
- struct ibv_device_attr_ex *attr)
-{
- return ibv_query_device_ex(context, input, attr);
-}
-
-static int
-mlx5_glue_query_rt_values_ex(struct ibv_context *context,
- struct ibv_values_ex *values)
-{
- return ibv_query_rt_values_ex(context, values);
-}
-
-static int
-mlx5_glue_query_port(struct ibv_context *context, uint8_t port_num,
- struct ibv_port_attr *port_attr)
-{
- return ibv_query_port(context, port_num, port_attr);
-}
-
-static struct ibv_comp_channel *
-mlx5_glue_create_comp_channel(struct ibv_context *context)
-{
- return ibv_create_comp_channel(context);
-}
-
-static int
-mlx5_glue_destroy_comp_channel(struct ibv_comp_channel *channel)
-{
- return ibv_destroy_comp_channel(channel);
-}
-
-static struct ibv_cq *
-mlx5_glue_create_cq(struct ibv_context *context, int cqe, void *cq_context,
- struct ibv_comp_channel *channel, int comp_vector)
-{
- return ibv_create_cq(context, cqe, cq_context, channel, comp_vector);
-}
-
-static int
-mlx5_glue_destroy_cq(struct ibv_cq *cq)
-{
- return ibv_destroy_cq(cq);
-}
-
-static int
-mlx5_glue_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq,
- void **cq_context)
-{
- return ibv_get_cq_event(channel, cq, cq_context);
-}
-
-static void
-mlx5_glue_ack_cq_events(struct ibv_cq *cq, unsigned int nevents)
-{
- ibv_ack_cq_events(cq, nevents);
-}
-
-static struct ibv_rwq_ind_table *
-mlx5_glue_create_rwq_ind_table(struct ibv_context *context,
- struct ibv_rwq_ind_table_init_attr *init_attr)
-{
- return ibv_create_rwq_ind_table(context, init_attr);
-}
-
-static int
-mlx5_glue_destroy_rwq_ind_table(struct ibv_rwq_ind_table *rwq_ind_table)
-{
- return ibv_destroy_rwq_ind_table(rwq_ind_table);
-}
-
-static struct ibv_wq *
-mlx5_glue_create_wq(struct ibv_context *context,
- struct ibv_wq_init_attr *wq_init_attr)
-{
- return ibv_create_wq(context, wq_init_attr);
-}
-
-static int
-mlx5_glue_destroy_wq(struct ibv_wq *wq)
-{
- return ibv_destroy_wq(wq);
-}
-static int
-mlx5_glue_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr)
-{
- return ibv_modify_wq(wq, wq_attr);
-}
-
-static struct ibv_flow *
-mlx5_glue_create_flow(struct ibv_qp *qp, struct ibv_flow_attr *flow)
-{
- return ibv_create_flow(qp, flow);
-}
-
-static int
-mlx5_glue_destroy_flow(struct ibv_flow *flow_id)
-{
- return ibv_destroy_flow(flow_id);
-}
-
-static int
-mlx5_glue_destroy_flow_action(void *action)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-#ifdef HAVE_MLX5DV_DR
- return mlx5dv_dr_action_destroy(action);
-#else
- struct mlx5dv_flow_action_attr *attr = action;
- int res = 0;
- switch (attr->type) {
- case MLX5DV_FLOW_ACTION_TAG:
- break;
- default:
- res = ibv_destroy_flow_action(attr->action);
- break;
- }
- free(action);
- return res;
-#endif
-#else
- (void)action;
- return ENOTSUP;
-#endif
-}
-
-static struct ibv_qp *
-mlx5_glue_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *qp_init_attr)
-{
- return ibv_create_qp(pd, qp_init_attr);
-}
-
-static struct ibv_qp *
-mlx5_glue_create_qp_ex(struct ibv_context *context,
- struct ibv_qp_init_attr_ex *qp_init_attr_ex)
-{
- return ibv_create_qp_ex(context, qp_init_attr_ex);
-}
-
-static int
-mlx5_glue_destroy_qp(struct ibv_qp *qp)
-{
- return ibv_destroy_qp(qp);
-}
-
-static int
-mlx5_glue_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)
-{
- return ibv_modify_qp(qp, attr, attr_mask);
-}
-
-static struct ibv_mr *
-mlx5_glue_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
-{
- return ibv_reg_mr(pd, addr, length, access);
-}
-
-static int
-mlx5_glue_dereg_mr(struct ibv_mr *mr)
-{
- return ibv_dereg_mr(mr);
-}
-
-static struct ibv_counter_set *
-mlx5_glue_create_counter_set(struct ibv_context *context,
- struct ibv_counter_set_init_attr *init_attr)
-{
-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
- (void)context;
- (void)init_attr;
- return NULL;
-#else
- return ibv_create_counter_set(context, init_attr);
-#endif
-}
-
-static int
-mlx5_glue_destroy_counter_set(struct ibv_counter_set *cs)
-{
-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
- (void)cs;
- return ENOTSUP;
-#else
- return ibv_destroy_counter_set(cs);
-#endif
-}
-
-static int
-mlx5_glue_describe_counter_set(struct ibv_context *context,
- uint16_t counter_set_id,
- struct ibv_counter_set_description *cs_desc)
-{
-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
- (void)context;
- (void)counter_set_id;
- (void)cs_desc;
- return ENOTSUP;
-#else
- return ibv_describe_counter_set(context, counter_set_id, cs_desc);
-#endif
-}
-
-static int
-mlx5_glue_query_counter_set(struct ibv_query_counter_set_attr *query_attr,
- struct ibv_counter_set_data *cs_data)
-{
-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
- (void)query_attr;
- (void)cs_data;
- return ENOTSUP;
-#else
- return ibv_query_counter_set(query_attr, cs_data);
-#endif
-}
-
-static struct ibv_counters *
-mlx5_glue_create_counters(struct ibv_context *context,
- struct ibv_counters_init_attr *init_attr)
-{
-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
- (void)context;
- (void)init_attr;
- errno = ENOTSUP;
- return NULL;
-#else
- return ibv_create_counters(context, init_attr);
-#endif
-}
-
-static int
-mlx5_glue_destroy_counters(struct ibv_counters *counters)
-{
-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
- (void)counters;
- return ENOTSUP;
-#else
- return ibv_destroy_counters(counters);
-#endif
-}
-
-static int
-mlx5_glue_attach_counters(struct ibv_counters *counters,
- struct ibv_counter_attach_attr *attr,
- struct ibv_flow *flow)
-{
-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
- (void)counters;
- (void)attr;
- (void)flow;
- return ENOTSUP;
-#else
- return ibv_attach_counters_point_flow(counters, attr, flow);
-#endif
-}
-
-static int
-mlx5_glue_query_counters(struct ibv_counters *counters,
- uint64_t *counters_value,
- uint32_t ncounters,
- uint32_t flags)
-{
-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
- (void)counters;
- (void)counters_value;
- (void)ncounters;
- (void)flags;
- return ENOTSUP;
-#else
- return ibv_read_counters(counters, counters_value, ncounters, flags);
-#endif
-}
-
-static void
-mlx5_glue_ack_async_event(struct ibv_async_event *event)
-{
- ibv_ack_async_event(event);
-}
-
-static int
-mlx5_glue_get_async_event(struct ibv_context *context,
- struct ibv_async_event *event)
-{
- return ibv_get_async_event(context, event);
-}
-
-static const char *
-mlx5_glue_port_state_str(enum ibv_port_state port_state)
-{
- return ibv_port_state_str(port_state);
-}
-
-static struct ibv_cq *
-mlx5_glue_cq_ex_to_cq(struct ibv_cq_ex *cq)
-{
- return ibv_cq_ex_to_cq(cq);
-}
-
-static void *
-mlx5_glue_dr_create_flow_action_dest_flow_tbl(void *tbl)
-{
-#ifdef HAVE_MLX5DV_DR
- return mlx5dv_dr_action_create_dest_table(tbl);
-#else
- (void)tbl;
- errno = ENOTSUP;
- return NULL;
-#endif
-}
-
-static void *
-mlx5_glue_dr_create_flow_action_dest_port(void *domain, uint32_t port)
-{
-#ifdef HAVE_MLX5DV_DR_DEVX_PORT
- return mlx5dv_dr_action_create_dest_ib_port(domain, port);
-#else
-#ifdef HAVE_MLX5DV_DR_ESWITCH
- return mlx5dv_dr_action_create_dest_vport(domain, port);
-#else
- (void)domain;
- (void)port;
- errno = ENOTSUP;
- return NULL;
-#endif
-#endif
-}
-
-static void *
-mlx5_glue_dr_create_flow_action_drop(void)
-{
-#ifdef HAVE_MLX5DV_DR_ESWITCH
- return mlx5dv_dr_action_create_drop();
-#else
- errno = ENOTSUP;
- return NULL;
-#endif
-}
-
-static void *
-mlx5_glue_dr_create_flow_action_push_vlan(struct mlx5dv_dr_domain *domain,
- rte_be32_t vlan_tag)
-{
-#ifdef HAVE_MLX5DV_DR_VLAN
- return mlx5dv_dr_action_create_push_vlan(domain, vlan_tag);
-#else
- (void)domain;
- (void)vlan_tag;
- errno = ENOTSUP;
- return NULL;
-#endif
-}
-
-static void *
-mlx5_glue_dr_create_flow_action_pop_vlan(void)
-{
-#ifdef HAVE_MLX5DV_DR_VLAN
- return mlx5dv_dr_action_create_pop_vlan();
-#else
- errno = ENOTSUP;
- return NULL;
-#endif
-}
-
-static void *
-mlx5_glue_dr_create_flow_tbl(void *domain, uint32_t level)
-{
-#ifdef HAVE_MLX5DV_DR
- return mlx5dv_dr_table_create(domain, level);
-#else
- (void)domain;
- (void)level;
- errno = ENOTSUP;
- return NULL;
-#endif
-}
-
-static int
-mlx5_glue_dr_destroy_flow_tbl(void *tbl)
-{
-#ifdef HAVE_MLX5DV_DR
- return mlx5dv_dr_table_destroy(tbl);
-#else
- (void)tbl;
- errno = ENOTSUP;
- return errno;
-#endif
-}
-
-static void *
-mlx5_glue_dr_create_domain(struct ibv_context *ctx,
- enum mlx5dv_dr_domain_type domain)
-{
-#ifdef HAVE_MLX5DV_DR
- return mlx5dv_dr_domain_create(ctx, domain);
-#else
- (void)ctx;
- (void)domain;
- errno = ENOTSUP;
- return NULL;
-#endif
-}
-
-static int
-mlx5_glue_dr_destroy_domain(void *domain)
-{
-#ifdef HAVE_MLX5DV_DR
- return mlx5dv_dr_domain_destroy(domain);
-#else
- (void)domain;
- errno = ENOTSUP;
- return errno;
-#endif
-}
-
-static struct ibv_cq_ex *
-mlx5_glue_dv_create_cq(struct ibv_context *context,
- struct ibv_cq_init_attr_ex *cq_attr,
- struct mlx5dv_cq_init_attr *mlx5_cq_attr)
-{
- return mlx5dv_create_cq(context, cq_attr, mlx5_cq_attr);
-}
-
-static struct ibv_wq *
-mlx5_glue_dv_create_wq(struct ibv_context *context,
- struct ibv_wq_init_attr *wq_attr,
- struct mlx5dv_wq_init_attr *mlx5_wq_attr)
-{
-#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- (void)context;
- (void)wq_attr;
- (void)mlx5_wq_attr;
- errno = ENOTSUP;
- return NULL;
-#else
- return mlx5dv_create_wq(context, wq_attr, mlx5_wq_attr);
-#endif
-}
-
-static int
-mlx5_glue_dv_query_device(struct ibv_context *ctx,
- struct mlx5dv_context *attrs_out)
-{
- return mlx5dv_query_device(ctx, attrs_out);
-}
-
-static int
-mlx5_glue_dv_set_context_attr(struct ibv_context *ibv_ctx,
- enum mlx5dv_set_ctx_attr_type type, void *attr)
-{
- return mlx5dv_set_context_attr(ibv_ctx, type, attr);
-}
-
-static int
-mlx5_glue_dv_init_obj(struct mlx5dv_obj *obj, uint64_t obj_type)
-{
- return mlx5dv_init_obj(obj, obj_type);
-}
-
-static struct ibv_qp *
-mlx5_glue_dv_create_qp(struct ibv_context *context,
- struct ibv_qp_init_attr_ex *qp_init_attr_ex,
- struct mlx5dv_qp_init_attr *dv_qp_init_attr)
-{
-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- return mlx5dv_create_qp(context, qp_init_attr_ex, dv_qp_init_attr);
-#else
- (void)context;
- (void)qp_init_attr_ex;
- (void)dv_qp_init_attr;
- errno = ENOTSUP;
- return NULL;
-#endif
-}
-
-static void *
-mlx5_glue_dv_create_flow_matcher(struct ibv_context *context,
- struct mlx5dv_flow_matcher_attr *matcher_attr,
- void *tbl)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-#ifdef HAVE_MLX5DV_DR
- (void)context;
- return mlx5dv_dr_matcher_create(tbl, matcher_attr->priority,
- matcher_attr->match_criteria_enable,
- matcher_attr->match_mask);
-#else
- (void)tbl;
- return mlx5dv_create_flow_matcher(context, matcher_attr);
-#endif
-#else
- (void)context;
- (void)matcher_attr;
- (void)tbl;
- errno = ENOTSUP;
- return NULL;
-#endif
-}
-
-static void *
-mlx5_glue_dv_create_flow(void *matcher,
- void *match_value,
- size_t num_actions,
- void *actions[])
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-#ifdef HAVE_MLX5DV_DR
- return mlx5dv_dr_rule_create(matcher, match_value, num_actions,
- (struct mlx5dv_dr_action **)actions);
-#else
- struct mlx5dv_flow_action_attr actions_attr[8];
-
- if (num_actions > 8)
- return NULL;
- for (size_t i = 0; i < num_actions; i++)
- actions_attr[i] =
- *((struct mlx5dv_flow_action_attr *)(actions[i]));
- return mlx5dv_create_flow(matcher, match_value,
- num_actions, actions_attr);
-#endif
-#else
- (void)matcher;
- (void)match_value;
- (void)num_actions;
- (void)actions;
- return NULL;
-#endif
-}
-
-static void *
-mlx5_glue_dv_create_flow_action_counter(void *counter_obj, uint32_t offset)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-#ifdef HAVE_MLX5DV_DR
- return mlx5dv_dr_action_create_flow_counter(counter_obj, offset);
-#else
- struct mlx5dv_flow_action_attr *action;
-
- (void)offset;
- action = malloc(sizeof(*action));
- if (!action)
- return NULL;
- action->type = MLX5DV_FLOW_ACTION_COUNTERS_DEVX;
- action->obj = counter_obj;
- return action;
-#endif
-#else
- (void)counter_obj;
- (void)offset;
- errno = ENOTSUP;
- return NULL;
-#endif
-}
-
-static void *
-mlx5_glue_dv_create_flow_action_dest_ibv_qp(void *qp)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-#ifdef HAVE_MLX5DV_DR
- return mlx5dv_dr_action_create_dest_ibv_qp(qp);
-#else
- struct mlx5dv_flow_action_attr *action;
-
- action = malloc(sizeof(*action));
- if (!action)
- return NULL;
- action->type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
- action->obj = qp;
- return action;
-#endif
-#else
- (void)qp;
- errno = ENOTSUP;
- return NULL;
-#endif
-}
-
-static void *
-mlx5_glue_dv_create_flow_action_dest_devx_tir(void *tir)
-{
-#ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
- return mlx5dv_dr_action_create_dest_devx_tir(tir);
-#else
- (void)tir;
- errno = ENOTSUP;
- return NULL;
-#endif
-}
-
-static void *
-mlx5_glue_dv_create_flow_action_modify_header
- (struct ibv_context *ctx,
- enum mlx5dv_flow_table_type ft_type,
- void *domain, uint64_t flags,
- size_t actions_sz,
- uint64_t actions[])
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-#ifdef HAVE_MLX5DV_DR
- (void)ctx;
- (void)ft_type;
- return mlx5dv_dr_action_create_modify_header(domain, flags, actions_sz,
- (__be64 *)actions);
-#else
- struct mlx5dv_flow_action_attr *action;
-
- (void)domain;
- (void)flags;
- action = malloc(sizeof(*action));
- if (!action)
- return NULL;
- action->type = MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
- action->action = mlx5dv_create_flow_action_modify_header
- (ctx, actions_sz, actions, ft_type);
- return action;
-#endif
-#else
- (void)ctx;
- (void)ft_type;
- (void)domain;
- (void)flags;
- (void)actions_sz;
- (void)actions;
- errno = ENOTSUP;
- return NULL;
-#endif
-}
-
-static void *
-mlx5_glue_dv_create_flow_action_packet_reformat
- (struct ibv_context *ctx,
- enum mlx5dv_flow_action_packet_reformat_type reformat_type,
- enum mlx5dv_flow_table_type ft_type,
- struct mlx5dv_dr_domain *domain,
- uint32_t flags, size_t data_sz, void *data)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-#ifdef HAVE_MLX5DV_DR
- (void)ctx;
- (void)ft_type;
- return mlx5dv_dr_action_create_packet_reformat(domain, flags,
- reformat_type, data_sz,
- data);
-#else
- (void)domain;
- (void)flags;
- struct mlx5dv_flow_action_attr *action;
-
- action = malloc(sizeof(*action));
- if (!action)
- return NULL;
- action->type = MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
- action->action = mlx5dv_create_flow_action_packet_reformat
- (ctx, data_sz, data, reformat_type, ft_type);
- return action;
-#endif
-#else
- (void)ctx;
- (void)reformat_type;
- (void)ft_type;
- (void)domain;
- (void)flags;
- (void)data_sz;
- (void)data;
- errno = ENOTSUP;
- return NULL;
-#endif
-}
-
-static void *
-mlx5_glue_dv_create_flow_action_tag(uint32_t tag)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-#ifdef HAVE_MLX5DV_DR
- return mlx5dv_dr_action_create_tag(tag);
-#else
- struct mlx5dv_flow_action_attr *action;
- action = malloc(sizeof(*action));
- if (!action)
- return NULL;
- action->type = MLX5DV_FLOW_ACTION_TAG;
- action->tag_value = tag;
- return action;
-#endif
-#endif
- (void)tag;
- errno = ENOTSUP;
- return NULL;
-}
-
-static void *
-mlx5_glue_dv_create_flow_action_meter(struct mlx5dv_dr_flow_meter_attr *attr)
-{
-#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)
- return mlx5dv_dr_action_create_flow_meter(attr);
-#else
- (void)attr;
- errno = ENOTSUP;
- return NULL;
-#endif
-}
-
-static int
-mlx5_glue_dv_modify_flow_action_meter(void *action,
- struct mlx5dv_dr_flow_meter_attr *attr,
- uint64_t modify_bits)
-{
-#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER)
- return mlx5dv_dr_action_modify_flow_meter(action, attr, modify_bits);
-#else
- (void)action;
- (void)attr;
- (void)modify_bits;
- errno = ENOTSUP;
- return errno;
-#endif
-}
-
-static int
-mlx5_glue_dv_destroy_flow(void *flow_id)
-{
-#ifdef HAVE_MLX5DV_DR
- return mlx5dv_dr_rule_destroy(flow_id);
-#else
- return ibv_destroy_flow(flow_id);
-#endif
-}
-
-static int
-mlx5_glue_dv_destroy_flow_matcher(void *matcher)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-#ifdef HAVE_MLX5DV_DR
- return mlx5dv_dr_matcher_destroy(matcher);
-#else
- return mlx5dv_destroy_flow_matcher(matcher);
-#endif
-#else
- (void)matcher;
- errno = ENOTSUP;
- return errno;
-#endif
-}
-
-static struct ibv_context *
-mlx5_glue_dv_open_device(struct ibv_device *device)
-{
-#ifdef HAVE_IBV_DEVX_OBJ
- return mlx5dv_open_device(device,
- &(struct mlx5dv_context_attr){
- .flags = MLX5DV_CONTEXT_FLAGS_DEVX,
- });
-#else
- (void)device;
- errno = ENOTSUP;
- return NULL;
-#endif
-}
-
-static struct mlx5dv_devx_obj *
-mlx5_glue_devx_obj_create(struct ibv_context *ctx,
- const void *in, size_t inlen,
- void *out, size_t outlen)
-{
-#ifdef HAVE_IBV_DEVX_OBJ
- return mlx5dv_devx_obj_create(ctx, in, inlen, out, outlen);
-#else
- (void)ctx;
- (void)in;
- (void)inlen;
- (void)out;
- (void)outlen;
- errno = ENOTSUP;
- return NULL;
-#endif
-}
-
-static int
-mlx5_glue_devx_obj_destroy(struct mlx5dv_devx_obj *obj)
-{
-#ifdef HAVE_IBV_DEVX_OBJ
- return mlx5dv_devx_obj_destroy(obj);
-#else
- (void)obj;
- return -ENOTSUP;
-#endif
-}
-
-static int
-mlx5_glue_devx_obj_query(struct mlx5dv_devx_obj *obj,
- const void *in, size_t inlen,
- void *out, size_t outlen)
-{
-#ifdef HAVE_IBV_DEVX_OBJ
- return mlx5dv_devx_obj_query(obj, in, inlen, out, outlen);
-#else
- (void)obj;
- (void)in;
- (void)inlen;
- (void)out;
- (void)outlen;
- return -ENOTSUP;
-#endif
-}
-
-static int
-mlx5_glue_devx_obj_modify(struct mlx5dv_devx_obj *obj,
- const void *in, size_t inlen,
- void *out, size_t outlen)
-{
-#ifdef HAVE_IBV_DEVX_OBJ
- return mlx5dv_devx_obj_modify(obj, in, inlen, out, outlen);
-#else
- (void)obj;
- (void)in;
- (void)inlen;
- (void)out;
- (void)outlen;
- return -ENOTSUP;
-#endif
-}
-
-static int
-mlx5_glue_devx_general_cmd(struct ibv_context *ctx,
- const void *in, size_t inlen,
- void *out, size_t outlen)
-{
-#ifdef HAVE_IBV_DEVX_OBJ
- return mlx5dv_devx_general_cmd(ctx, in, inlen, out, outlen);
-#else
- (void)ctx;
- (void)in;
- (void)inlen;
- (void)out;
- (void)outlen;
- return -ENOTSUP;
-#endif
-}
-
-static struct mlx5dv_devx_cmd_comp *
-mlx5_glue_devx_create_cmd_comp(struct ibv_context *ctx)
-{
-#ifdef HAVE_IBV_DEVX_ASYNC
- return mlx5dv_devx_create_cmd_comp(ctx);
-#else
- (void)ctx;
- errno = -ENOTSUP;
- return NULL;
-#endif
-}
-
-static void
-mlx5_glue_devx_destroy_cmd_comp(struct mlx5dv_devx_cmd_comp *cmd_comp)
-{
-#ifdef HAVE_IBV_DEVX_ASYNC
- mlx5dv_devx_destroy_cmd_comp(cmd_comp);
-#else
- (void)cmd_comp;
- errno = -ENOTSUP;
-#endif
-}
-
-static int
-mlx5_glue_devx_obj_query_async(struct mlx5dv_devx_obj *obj, const void *in,
- size_t inlen, size_t outlen, uint64_t wr_id,
- struct mlx5dv_devx_cmd_comp *cmd_comp)
-{
-#ifdef HAVE_IBV_DEVX_ASYNC
- return mlx5dv_devx_obj_query_async(obj, in, inlen, outlen, wr_id,
- cmd_comp);
-#else
- (void)obj;
- (void)in;
- (void)inlen;
- (void)outlen;
- (void)wr_id;
- (void)cmd_comp;
- return -ENOTSUP;
-#endif
-}
-
-static int
-mlx5_glue_devx_get_async_cmd_comp(struct mlx5dv_devx_cmd_comp *cmd_comp,
- struct mlx5dv_devx_async_cmd_hdr *cmd_resp,
- size_t cmd_resp_len)
-{
-#ifdef HAVE_IBV_DEVX_ASYNC
- return mlx5dv_devx_get_async_cmd_comp(cmd_comp, cmd_resp,
- cmd_resp_len);
-#else
- (void)cmd_comp;
- (void)cmd_resp;
- (void)cmd_resp_len;
- return -ENOTSUP;
-#endif
-}
-
-static struct mlx5dv_devx_umem *
-mlx5_glue_devx_umem_reg(struct ibv_context *context, void *addr, size_t size,
- uint32_t access)
-{
-#ifdef HAVE_IBV_DEVX_OBJ
- return mlx5dv_devx_umem_reg(context, addr, size, access);
-#else
- (void)context;
- (void)addr;
- (void)size;
- (void)access;
- errno = -ENOTSUP;
- return NULL;
-#endif
-}
-
-static int
-mlx5_glue_devx_umem_dereg(struct mlx5dv_devx_umem *dv_devx_umem)
-{
-#ifdef HAVE_IBV_DEVX_OBJ
- return mlx5dv_devx_umem_dereg(dv_devx_umem);
-#else
- (void)dv_devx_umem;
- return -ENOTSUP;
-#endif
-}
-
-static int
-mlx5_glue_devx_qp_query(struct ibv_qp *qp,
- const void *in, size_t inlen,
- void *out, size_t outlen)
-{
-#ifdef HAVE_IBV_DEVX_OBJ
- return mlx5dv_devx_qp_query(qp, in, inlen, out, outlen);
-#else
- (void)qp;
- (void)in;
- (void)inlen;
- (void)out;
- (void)outlen;
- errno = ENOTSUP;
- return errno;
-#endif
-}
-
-static int
-mlx5_glue_devx_port_query(struct ibv_context *ctx,
- uint32_t port_num,
- struct mlx5dv_devx_port *mlx5_devx_port)
-{
-#ifdef HAVE_MLX5DV_DR_DEVX_PORT
- return mlx5dv_query_devx_port(ctx, port_num, mlx5_devx_port);
-#else
- (void)ctx;
- (void)port_num;
- (void)mlx5_devx_port;
- errno = ENOTSUP;
- return errno;
-#endif
-}
-
-static int
-mlx5_glue_dr_dump_domain(FILE *file, void *domain)
-{
-#ifdef HAVE_MLX5_DR_FLOW_DUMP
- return mlx5dv_dump_dr_domain(file, domain);
-#else
- RTE_SET_USED(file);
- RTE_SET_USED(domain);
- return -ENOTSUP;
-#endif
-}
-
-alignas(RTE_CACHE_LINE_SIZE)
-const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){
- .version = MLX5_GLUE_VERSION,
- .fork_init = mlx5_glue_fork_init,
- .alloc_pd = mlx5_glue_alloc_pd,
- .dealloc_pd = mlx5_glue_dealloc_pd,
- .get_device_list = mlx5_glue_get_device_list,
- .free_device_list = mlx5_glue_free_device_list,
- .open_device = mlx5_glue_open_device,
- .close_device = mlx5_glue_close_device,
- .query_device = mlx5_glue_query_device,
- .query_device_ex = mlx5_glue_query_device_ex,
- .query_rt_values_ex = mlx5_glue_query_rt_values_ex,
- .query_port = mlx5_glue_query_port,
- .create_comp_channel = mlx5_glue_create_comp_channel,
- .destroy_comp_channel = mlx5_glue_destroy_comp_channel,
- .create_cq = mlx5_glue_create_cq,
- .destroy_cq = mlx5_glue_destroy_cq,
- .get_cq_event = mlx5_glue_get_cq_event,
- .ack_cq_events = mlx5_glue_ack_cq_events,
- .create_rwq_ind_table = mlx5_glue_create_rwq_ind_table,
- .destroy_rwq_ind_table = mlx5_glue_destroy_rwq_ind_table,
- .create_wq = mlx5_glue_create_wq,
- .destroy_wq = mlx5_glue_destroy_wq,
- .modify_wq = mlx5_glue_modify_wq,
- .create_flow = mlx5_glue_create_flow,
- .destroy_flow = mlx5_glue_destroy_flow,
- .destroy_flow_action = mlx5_glue_destroy_flow_action,
- .create_qp = mlx5_glue_create_qp,
- .create_qp_ex = mlx5_glue_create_qp_ex,
- .destroy_qp = mlx5_glue_destroy_qp,
- .modify_qp = mlx5_glue_modify_qp,
- .reg_mr = mlx5_glue_reg_mr,
- .dereg_mr = mlx5_glue_dereg_mr,
- .create_counter_set = mlx5_glue_create_counter_set,
- .destroy_counter_set = mlx5_glue_destroy_counter_set,
- .describe_counter_set = mlx5_glue_describe_counter_set,
- .query_counter_set = mlx5_glue_query_counter_set,
- .create_counters = mlx5_glue_create_counters,
- .destroy_counters = mlx5_glue_destroy_counters,
- .attach_counters = mlx5_glue_attach_counters,
- .query_counters = mlx5_glue_query_counters,
- .ack_async_event = mlx5_glue_ack_async_event,
- .get_async_event = mlx5_glue_get_async_event,
- .port_state_str = mlx5_glue_port_state_str,
- .cq_ex_to_cq = mlx5_glue_cq_ex_to_cq,
- .dr_create_flow_action_dest_flow_tbl =
- mlx5_glue_dr_create_flow_action_dest_flow_tbl,
- .dr_create_flow_action_dest_port =
- mlx5_glue_dr_create_flow_action_dest_port,
- .dr_create_flow_action_drop =
- mlx5_glue_dr_create_flow_action_drop,
- .dr_create_flow_action_push_vlan =
- mlx5_glue_dr_create_flow_action_push_vlan,
- .dr_create_flow_action_pop_vlan =
- mlx5_glue_dr_create_flow_action_pop_vlan,
- .dr_create_flow_tbl = mlx5_glue_dr_create_flow_tbl,
- .dr_destroy_flow_tbl = mlx5_glue_dr_destroy_flow_tbl,
- .dr_create_domain = mlx5_glue_dr_create_domain,
- .dr_destroy_domain = mlx5_glue_dr_destroy_domain,
- .dv_create_cq = mlx5_glue_dv_create_cq,
- .dv_create_wq = mlx5_glue_dv_create_wq,
- .dv_query_device = mlx5_glue_dv_query_device,
- .dv_set_context_attr = mlx5_glue_dv_set_context_attr,
- .dv_init_obj = mlx5_glue_dv_init_obj,
- .dv_create_qp = mlx5_glue_dv_create_qp,
- .dv_create_flow_matcher = mlx5_glue_dv_create_flow_matcher,
- .dv_create_flow = mlx5_glue_dv_create_flow,
- .dv_create_flow_action_counter =
- mlx5_glue_dv_create_flow_action_counter,
- .dv_create_flow_action_dest_ibv_qp =
- mlx5_glue_dv_create_flow_action_dest_ibv_qp,
- .dv_create_flow_action_dest_devx_tir =
- mlx5_glue_dv_create_flow_action_dest_devx_tir,
- .dv_create_flow_action_modify_header =
- mlx5_glue_dv_create_flow_action_modify_header,
- .dv_create_flow_action_packet_reformat =
- mlx5_glue_dv_create_flow_action_packet_reformat,
- .dv_create_flow_action_tag = mlx5_glue_dv_create_flow_action_tag,
- .dv_create_flow_action_meter = mlx5_glue_dv_create_flow_action_meter,
- .dv_modify_flow_action_meter = mlx5_glue_dv_modify_flow_action_meter,
- .dv_destroy_flow = mlx5_glue_dv_destroy_flow,
- .dv_destroy_flow_matcher = mlx5_glue_dv_destroy_flow_matcher,
- .dv_open_device = mlx5_glue_dv_open_device,
- .devx_obj_create = mlx5_glue_devx_obj_create,
- .devx_obj_destroy = mlx5_glue_devx_obj_destroy,
- .devx_obj_query = mlx5_glue_devx_obj_query,
- .devx_obj_modify = mlx5_glue_devx_obj_modify,
- .devx_general_cmd = mlx5_glue_devx_general_cmd,
- .devx_create_cmd_comp = mlx5_glue_devx_create_cmd_comp,
- .devx_destroy_cmd_comp = mlx5_glue_devx_destroy_cmd_comp,
- .devx_obj_query_async = mlx5_glue_devx_obj_query_async,
- .devx_get_async_cmd_comp = mlx5_glue_devx_get_async_cmd_comp,
- .devx_umem_reg = mlx5_glue_devx_umem_reg,
- .devx_umem_dereg = mlx5_glue_devx_umem_dereg,
- .devx_qp_query = mlx5_glue_devx_qp_query,
- .devx_port_query = mlx5_glue_devx_port_query,
- .dr_dump_domain = mlx5_glue_dr_dump_domain,
-};
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018 6WIND S.A.
- * Copyright 2018 Mellanox Technologies, Ltd
- */
-
-#ifndef MLX5_GLUE_H_
-#define MLX5_GLUE_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include "rte_byteorder.h"
-
-/* Verbs headers do not support -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/mlx5dv.h>
-#include <infiniband/verbs.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
-#ifndef MLX5_GLUE_VERSION
-#define MLX5_GLUE_VERSION ""
-#endif
-
-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V42
-struct ibv_counter_set;
-struct ibv_counter_set_data;
-struct ibv_counter_set_description;
-struct ibv_counter_set_init_attr;
-struct ibv_query_counter_set_attr;
-#endif
-
-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_V45
-struct ibv_counters;
-struct ibv_counters_init_attr;
-struct ibv_counter_attach_attr;
-#endif
-
-#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-struct mlx5dv_qp_init_attr;
-#endif
-
-#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
-struct mlx5dv_wq_init_attr;
-#endif
-
-#ifndef HAVE_IBV_FLOW_DV_SUPPORT
-struct mlx5dv_flow_matcher;
-struct mlx5dv_flow_matcher_attr;
-struct mlx5dv_flow_action_attr;
-struct mlx5dv_flow_match_parameters;
-struct mlx5dv_dr_flow_meter_attr;
-struct ibv_flow_action;
-enum mlx5dv_flow_action_packet_reformat_type { packet_reformat_type = 0, };
-enum mlx5dv_flow_table_type { flow_table_type = 0, };
-#endif
-
-#ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
-#define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
-#endif
-
-#ifndef HAVE_IBV_DEVX_OBJ
-struct mlx5dv_devx_obj;
-struct mlx5dv_devx_umem { uint32_t umem_id; };
-#endif
-
-#ifndef HAVE_IBV_DEVX_ASYNC
-struct mlx5dv_devx_cmd_comp;
-struct mlx5dv_devx_async_cmd_hdr;
-#endif
-
-#ifndef HAVE_MLX5DV_DR
-enum mlx5dv_dr_domain_type { unused, };
-struct mlx5dv_dr_domain;
-#endif
-
-#ifndef HAVE_MLX5DV_DR_DEVX_PORT
-struct mlx5dv_devx_port;
-#endif
-
-#ifndef HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER
-struct mlx5dv_dr_flow_meter_attr;
-#endif
-
-/* LIB_GLUE_VERSION must be updated every time this structure is modified. */
-struct mlx5_glue {
- const char *version;
- int (*fork_init)(void);
- struct ibv_pd *(*alloc_pd)(struct ibv_context *context);
- int (*dealloc_pd)(struct ibv_pd *pd);
- struct ibv_device **(*get_device_list)(int *num_devices);
- void (*free_device_list)(struct ibv_device **list);
- struct ibv_context *(*open_device)(struct ibv_device *device);
- int (*close_device)(struct ibv_context *context);
- int (*query_device)(struct ibv_context *context,
- struct ibv_device_attr *device_attr);
- int (*query_device_ex)(struct ibv_context *context,
- const struct ibv_query_device_ex_input *input,
- struct ibv_device_attr_ex *attr);
- int (*query_rt_values_ex)(struct ibv_context *context,
- struct ibv_values_ex *values);
- int (*query_port)(struct ibv_context *context, uint8_t port_num,
- struct ibv_port_attr *port_attr);
- struct ibv_comp_channel *(*create_comp_channel)
- (struct ibv_context *context);
- int (*destroy_comp_channel)(struct ibv_comp_channel *channel);
- struct ibv_cq *(*create_cq)(struct ibv_context *context, int cqe,
- void *cq_context,
- struct ibv_comp_channel *channel,
- int comp_vector);
- int (*destroy_cq)(struct ibv_cq *cq);
- int (*get_cq_event)(struct ibv_comp_channel *channel,
- struct ibv_cq **cq, void **cq_context);
- void (*ack_cq_events)(struct ibv_cq *cq, unsigned int nevents);
- struct ibv_rwq_ind_table *(*create_rwq_ind_table)
- (struct ibv_context *context,
- struct ibv_rwq_ind_table_init_attr *init_attr);
- int (*destroy_rwq_ind_table)(struct ibv_rwq_ind_table *rwq_ind_table);
- struct ibv_wq *(*create_wq)(struct ibv_context *context,
- struct ibv_wq_init_attr *wq_init_attr);
- int (*destroy_wq)(struct ibv_wq *wq);
- int (*modify_wq)(struct ibv_wq *wq, struct ibv_wq_attr *wq_attr);
- struct ibv_flow *(*create_flow)(struct ibv_qp *qp,
- struct ibv_flow_attr *flow);
- int (*destroy_flow)(struct ibv_flow *flow_id);
- int (*destroy_flow_action)(void *action);
- struct ibv_qp *(*create_qp)(struct ibv_pd *pd,
- struct ibv_qp_init_attr *qp_init_attr);
- struct ibv_qp *(*create_qp_ex)
- (struct ibv_context *context,
- struct ibv_qp_init_attr_ex *qp_init_attr_ex);
- int (*destroy_qp)(struct ibv_qp *qp);
- int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr,
- int attr_mask);
- struct ibv_mr *(*reg_mr)(struct ibv_pd *pd, void *addr,
- size_t length, int access);
- int (*dereg_mr)(struct ibv_mr *mr);
- struct ibv_counter_set *(*create_counter_set)
- (struct ibv_context *context,
- struct ibv_counter_set_init_attr *init_attr);
- int (*destroy_counter_set)(struct ibv_counter_set *cs);
- int (*describe_counter_set)
- (struct ibv_context *context,
- uint16_t counter_set_id,
- struct ibv_counter_set_description *cs_desc);
- int (*query_counter_set)(struct ibv_query_counter_set_attr *query_attr,
- struct ibv_counter_set_data *cs_data);
- struct ibv_counters *(*create_counters)
- (struct ibv_context *context,
- struct ibv_counters_init_attr *init_attr);
- int (*destroy_counters)(struct ibv_counters *counters);
- int (*attach_counters)(struct ibv_counters *counters,
- struct ibv_counter_attach_attr *attr,
- struct ibv_flow *flow);
- int (*query_counters)(struct ibv_counters *counters,
- uint64_t *counters_value,
- uint32_t ncounters,
- uint32_t flags);
- void (*ack_async_event)(struct ibv_async_event *event);
- int (*get_async_event)(struct ibv_context *context,
- struct ibv_async_event *event);
- const char *(*port_state_str)(enum ibv_port_state port_state);
- struct ibv_cq *(*cq_ex_to_cq)(struct ibv_cq_ex *cq);
- void *(*dr_create_flow_action_dest_flow_tbl)(void *tbl);
- void *(*dr_create_flow_action_dest_port)(void *domain,
- uint32_t port);
- void *(*dr_create_flow_action_drop)();
- void *(*dr_create_flow_action_push_vlan)
- (struct mlx5dv_dr_domain *domain,
- rte_be32_t vlan_tag);
- void *(*dr_create_flow_action_pop_vlan)();
- void *(*dr_create_flow_tbl)(void *domain, uint32_t level);
- int (*dr_destroy_flow_tbl)(void *tbl);
- void *(*dr_create_domain)(struct ibv_context *ctx,
- enum mlx5dv_dr_domain_type domain);
- int (*dr_destroy_domain)(void *domain);
- struct ibv_cq_ex *(*dv_create_cq)
- (struct ibv_context *context,
- struct ibv_cq_init_attr_ex *cq_attr,
- struct mlx5dv_cq_init_attr *mlx5_cq_attr);
- struct ibv_wq *(*dv_create_wq)
- (struct ibv_context *context,
- struct ibv_wq_init_attr *wq_attr,
- struct mlx5dv_wq_init_attr *mlx5_wq_attr);
- int (*dv_query_device)(struct ibv_context *ctx_in,
- struct mlx5dv_context *attrs_out);
- int (*dv_set_context_attr)(struct ibv_context *ibv_ctx,
- enum mlx5dv_set_ctx_attr_type type,
- void *attr);
- int (*dv_init_obj)(struct mlx5dv_obj *obj, uint64_t obj_type);
- struct ibv_qp *(*dv_create_qp)
- (struct ibv_context *context,
- struct ibv_qp_init_attr_ex *qp_init_attr_ex,
- struct mlx5dv_qp_init_attr *dv_qp_init_attr);
- void *(*dv_create_flow_matcher)
- (struct ibv_context *context,
- struct mlx5dv_flow_matcher_attr *matcher_attr,
- void *tbl);
- void *(*dv_create_flow)(void *matcher, void *match_value,
- size_t num_actions, void *actions[]);
- void *(*dv_create_flow_action_counter)(void *obj, uint32_t offset);
- void *(*dv_create_flow_action_dest_ibv_qp)(void *qp);
- void *(*dv_create_flow_action_dest_devx_tir)(void *tir);
- void *(*dv_create_flow_action_modify_header)
- (struct ibv_context *ctx, enum mlx5dv_flow_table_type ft_type,
- void *domain, uint64_t flags, size_t actions_sz,
- uint64_t actions[]);
- void *(*dv_create_flow_action_packet_reformat)
- (struct ibv_context *ctx,
- enum mlx5dv_flow_action_packet_reformat_type reformat_type,
- enum mlx5dv_flow_table_type ft_type,
- struct mlx5dv_dr_domain *domain,
- uint32_t flags, size_t data_sz, void *data);
- void *(*dv_create_flow_action_tag)(uint32_t tag);
- void *(*dv_create_flow_action_meter)
- (struct mlx5dv_dr_flow_meter_attr *attr);
- int (*dv_modify_flow_action_meter)(void *action,
- struct mlx5dv_dr_flow_meter_attr *attr, uint64_t modify_bits);
- int (*dv_destroy_flow)(void *flow);
- int (*dv_destroy_flow_matcher)(void *matcher);
- struct ibv_context *(*dv_open_device)(struct ibv_device *device);
- struct mlx5dv_devx_obj *(*devx_obj_create)
- (struct ibv_context *ctx,
- const void *in, size_t inlen,
- void *out, size_t outlen);
- int (*devx_obj_destroy)(struct mlx5dv_devx_obj *obj);
- int (*devx_obj_query)(struct mlx5dv_devx_obj *obj,
- const void *in, size_t inlen,
- void *out, size_t outlen);
- int (*devx_obj_modify)(struct mlx5dv_devx_obj *obj,
- const void *in, size_t inlen,
- void *out, size_t outlen);
- int (*devx_general_cmd)(struct ibv_context *context,
- const void *in, size_t inlen,
- void *out, size_t outlen);
- struct mlx5dv_devx_cmd_comp *(*devx_create_cmd_comp)
- (struct ibv_context *context);
- void (*devx_destroy_cmd_comp)(struct mlx5dv_devx_cmd_comp *cmd_comp);
- int (*devx_obj_query_async)(struct mlx5dv_devx_obj *obj,
- const void *in, size_t inlen,
- size_t outlen, uint64_t wr_id,
- struct mlx5dv_devx_cmd_comp *cmd_comp);
- int (*devx_get_async_cmd_comp)(struct mlx5dv_devx_cmd_comp *cmd_comp,
- struct mlx5dv_devx_async_cmd_hdr *resp,
- size_t cmd_resp_len);
- struct mlx5dv_devx_umem *(*devx_umem_reg)(struct ibv_context *context,
- void *addr, size_t size,
- uint32_t access);
- int (*devx_umem_dereg)(struct mlx5dv_devx_umem *dv_devx_umem);
- int (*devx_qp_query)(struct ibv_qp *qp,
- const void *in, size_t inlen,
- void *out, size_t outlen);
- int (*devx_port_query)(struct ibv_context *ctx,
- uint32_t port_num,
- struct mlx5dv_devx_port *mlx5_devx_port);
- int (*dr_dump_domain)(FILE *file, void *domain);
-};
-
-const struct mlx5_glue *mlx5_glue;
-
-#endif /* MLX5_GLUE_H_ */
#include <rte_ethdev_driver.h>
#include <rte_common.h>
+#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
-#include "mlx5_defs.h"
/**
* Get MAC address by querying netdevice.
#include <rte_rwlock.h>
#include <rte_bus_pci.h>
+#include <mlx5_glue.h>
+
#include "mlx5.h"
#include "mlx5_mr.h"
#include "mlx5_rxtx.h"
-#include "mlx5_glue.h"
struct mr_find_contig_memsegs_data {
uintptr_t addr;
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox Technologies, Ltd
- */
-
-#ifndef RTE_PMD_MLX5_PRM_H_
-#define RTE_PMD_MLX5_PRM_H_
-
-#include <assert.h>
-
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/mlx5dv.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
-#include <rte_vect.h>
-#include "mlx5_autoconf.h"
-
-/* RSS hash key size. */
-#define MLX5_RSS_HASH_KEY_LEN 40
-
-/* Get CQE owner bit. */
-#define MLX5_CQE_OWNER(op_own) ((op_own) & MLX5_CQE_OWNER_MASK)
-
-/* Get CQE format. */
-#define MLX5_CQE_FORMAT(op_own) (((op_own) & MLX5E_CQE_FORMAT_MASK) >> 2)
-
-/* Get CQE opcode. */
-#define MLX5_CQE_OPCODE(op_own) (((op_own) & 0xf0) >> 4)
-
-/* Get CQE solicited event. */
-#define MLX5_CQE_SE(op_own) (((op_own) >> 1) & 1)
-
-/* Invalidate a CQE. */
-#define MLX5_CQE_INVALIDATE (MLX5_CQE_INVALID << 4)
-
-/* WQE Segment sizes in bytes. */
-#define MLX5_WSEG_SIZE 16u
-#define MLX5_WQE_CSEG_SIZE sizeof(struct mlx5_wqe_cseg)
-#define MLX5_WQE_DSEG_SIZE sizeof(struct mlx5_wqe_dseg)
-#define MLX5_WQE_ESEG_SIZE sizeof(struct mlx5_wqe_eseg)
-
-/* WQE/WQEBB size in bytes. */
-#define MLX5_WQE_SIZE sizeof(struct mlx5_wqe)
-
-/*
- * Max size of a WQE session.
- * Absolute maximum size is 63 (MLX5_DSEG_MAX) segments,
- * the WQE size field in Control Segment is 6 bits wide.
- */
-#define MLX5_WQE_SIZE_MAX (60 * MLX5_WSEG_SIZE)
-
-/*
- * Default minimum number of Tx queues for inlining packets.
- * If there are less queues as specified we assume we have
- * no enough CPU resources (cycles) to perform inlining,
- * the PCIe throughput is not supposed as bottleneck and
- * inlining is disabled.
- */
-#define MLX5_INLINE_MAX_TXQS 8u
-#define MLX5_INLINE_MAX_TXQS_BLUEFIELD 16u
-
-/*
- * Default packet length threshold to be inlined with
- * enhanced MPW. If packet length exceeds the threshold
- * the data are not inlined. Should be aligned in WQEBB
- * boundary with accounting the title Control and Ethernet
- * segments.
- */
-#define MLX5_EMPW_DEF_INLINE_LEN (4u * MLX5_WQE_SIZE + \
- MLX5_DSEG_MIN_INLINE_SIZE)
-/*
- * Maximal inline data length sent with enhanced MPW.
- * Is based on maximal WQE size.
- */
-#define MLX5_EMPW_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \
- MLX5_WQE_CSEG_SIZE - \
- MLX5_WQE_ESEG_SIZE - \
- MLX5_WQE_DSEG_SIZE + \
- MLX5_DSEG_MIN_INLINE_SIZE)
-/*
- * Minimal amount of packets to be sent with EMPW.
- * This limits the minimal required size of sent EMPW.
- * If there are no enough resources to built minimal
- * EMPW the sending loop exits.
- */
-#define MLX5_EMPW_MIN_PACKETS (2u + 3u * 4u)
-/*
- * Maximal amount of packets to be sent with EMPW.
- * This value is not recommended to exceed MLX5_TX_COMP_THRESH,
- * otherwise there might be up to MLX5_EMPW_MAX_PACKETS mbufs
- * without CQE generation request, being multiplied by
- * MLX5_TX_COMP_MAX_CQE it may cause significant latency
- * in tx burst routine at the moment of freeing multiple mbufs.
- */
-#define MLX5_EMPW_MAX_PACKETS MLX5_TX_COMP_THRESH
-#define MLX5_MPW_MAX_PACKETS 6
-#define MLX5_MPW_INLINE_MAX_PACKETS 2
-
-/*
- * Default packet length threshold to be inlined with
- * ordinary SEND. Inlining saves the MR key search
- * and extra PCIe data fetch transaction, but eats the
- * CPU cycles.
- */
-#define MLX5_SEND_DEF_INLINE_LEN (5U * MLX5_WQE_SIZE + \
- MLX5_ESEG_MIN_INLINE_SIZE - \
- MLX5_WQE_CSEG_SIZE - \
- MLX5_WQE_ESEG_SIZE - \
- MLX5_WQE_DSEG_SIZE)
-/*
- * Maximal inline data length sent with ordinary SEND.
- * Is based on maximal WQE size.
- */
-#define MLX5_SEND_MAX_INLINE_LEN (MLX5_WQE_SIZE_MAX - \
- MLX5_WQE_CSEG_SIZE - \
- MLX5_WQE_ESEG_SIZE - \
- MLX5_WQE_DSEG_SIZE + \
- MLX5_ESEG_MIN_INLINE_SIZE)
-
-/* Missed in mlv5dv.h, should define here. */
-#define MLX5_OPCODE_ENHANCED_MPSW 0x29u
-
-/* CQE value to inform that VLAN is stripped. */
-#define MLX5_CQE_VLAN_STRIPPED (1u << 0)
-
-/* IPv4 options. */
-#define MLX5_CQE_RX_IP_EXT_OPTS_PACKET (1u << 1)
-
-/* IPv6 packet. */
-#define MLX5_CQE_RX_IPV6_PACKET (1u << 2)
-
-/* IPv4 packet. */
-#define MLX5_CQE_RX_IPV4_PACKET (1u << 3)
-
-/* TCP packet. */
-#define MLX5_CQE_RX_TCP_PACKET (1u << 4)
-
-/* UDP packet. */
-#define MLX5_CQE_RX_UDP_PACKET (1u << 5)
-
-/* IP is fragmented. */
-#define MLX5_CQE_RX_IP_FRAG_PACKET (1u << 7)
-
-/* L2 header is valid. */
-#define MLX5_CQE_RX_L2_HDR_VALID (1u << 8)
-
-/* L3 header is valid. */
-#define MLX5_CQE_RX_L3_HDR_VALID (1u << 9)
-
-/* L4 header is valid. */
-#define MLX5_CQE_RX_L4_HDR_VALID (1u << 10)
-
-/* Outer packet, 0 IPv4, 1 IPv6. */
-#define MLX5_CQE_RX_OUTER_PACKET (1u << 1)
-
-/* Tunnel packet bit in the CQE. */
-#define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0)
-
-/* Mask for LRO push flag in the CQE lro_tcppsh_abort_dupack field. */
-#define MLX5_CQE_LRO_PUSH_MASK 0x40
-
-/* Mask for L4 type in the CQE hdr_type_etc field. */
-#define MLX5_CQE_L4_TYPE_MASK 0x70
-
-/* The bit index of L4 type in CQE hdr_type_etc field. */
-#define MLX5_CQE_L4_TYPE_SHIFT 0x4
-
-/* L4 type to indicate TCP packet without acknowledgment. */
-#define MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK 0x3
-
-/* L4 type to indicate TCP packet with acknowledgment. */
-#define MLX5_L4_HDR_TYPE_TCP_WITH_ACL 0x4
-
-/* Inner L3 checksum offload (Tunneled packets only). */
-#define MLX5_ETH_WQE_L3_INNER_CSUM (1u << 4)
-
-/* Inner L4 checksum offload (Tunneled packets only). */
-#define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5)
-
-/* Outer L4 type is TCP. */
-#define MLX5_ETH_WQE_L4_OUTER_TCP (0u << 5)
-
-/* Outer L4 type is UDP. */
-#define MLX5_ETH_WQE_L4_OUTER_UDP (1u << 5)
-
-/* Outer L3 type is IPV4. */
-#define MLX5_ETH_WQE_L3_OUTER_IPV4 (0u << 4)
-
-/* Outer L3 type is IPV6. */
-#define MLX5_ETH_WQE_L3_OUTER_IPV6 (1u << 4)
-
-/* Inner L4 type is TCP. */
-#define MLX5_ETH_WQE_L4_INNER_TCP (0u << 1)
-
-/* Inner L4 type is UDP. */
-#define MLX5_ETH_WQE_L4_INNER_UDP (1u << 1)
-
-/* Inner L3 type is IPV4. */
-#define MLX5_ETH_WQE_L3_INNER_IPV4 (0u << 0)
-
-/* Inner L3 type is IPV6. */
-#define MLX5_ETH_WQE_L3_INNER_IPV6 (1u << 0)
-
-/* VLAN insertion flag. */
-#define MLX5_ETH_WQE_VLAN_INSERT (1u << 31)
-
-/* Data inline segment flag. */
-#define MLX5_ETH_WQE_DATA_INLINE (1u << 31)
-
-/* Is flow mark valid. */
-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
-#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00)
-#else
-#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff)
-#endif
-
-/* INVALID is used by packets matching no flow rules. */
-#define MLX5_FLOW_MARK_INVALID 0
-
-/* Maximum allowed value to mark a packet. */
-#define MLX5_FLOW_MARK_MAX 0xfffff0
-
-/* Default mark value used when none is provided. */
-#define MLX5_FLOW_MARK_DEFAULT 0xffffff
-
-/* Default mark mask for metadata legacy mode. */
-#define MLX5_FLOW_MARK_MASK 0xffffff
-
-/* Maximum number of DS in WQE. Limited by 6-bit field. */
-#define MLX5_DSEG_MAX 63
-
-/* The completion mode offset in the WQE control segment line 2. */
-#define MLX5_COMP_MODE_OFFSET 2
-
-/* Amount of data bytes in minimal inline data segment. */
-#define MLX5_DSEG_MIN_INLINE_SIZE 12u
-
-/* Amount of data bytes in minimal inline eth segment. */
-#define MLX5_ESEG_MIN_INLINE_SIZE 18u
-
-/* Amount of data bytes after eth data segment. */
-#define MLX5_ESEG_EXTRA_DATA_SIZE 32u
-
-/* The maximum log value of segments per RQ WQE. */
-#define MLX5_MAX_LOG_RQ_SEGS 5u
-
-/* The alignment needed for WQ buffer. */
-#define MLX5_WQE_BUF_ALIGNMENT 512
-
-/* Completion mode. */
-enum mlx5_completion_mode {
- MLX5_COMP_ONLY_ERR = 0x0,
- MLX5_COMP_ONLY_FIRST_ERR = 0x1,
- MLX5_COMP_ALWAYS = 0x2,
- MLX5_COMP_CQE_AND_EQE = 0x3,
-};
-
-/* MPW mode. */
-enum mlx5_mpw_mode {
- MLX5_MPW_DISABLED,
- MLX5_MPW,
- MLX5_MPW_ENHANCED, /* Enhanced Multi-Packet Send WQE, a.k.a MPWv2. */
-};
-
-/* WQE Control segment. */
-struct mlx5_wqe_cseg {
- uint32_t opcode;
- uint32_t sq_ds;
- uint32_t flags;
- uint32_t misc;
-} __rte_packed __rte_aligned(MLX5_WSEG_SIZE);
-
-/* Header of data segment. Minimal size Data Segment */
-struct mlx5_wqe_dseg {
- uint32_t bcount;
- union {
- uint8_t inline_data[MLX5_DSEG_MIN_INLINE_SIZE];
- struct {
- uint32_t lkey;
- uint64_t pbuf;
- } __rte_packed;
- };
-} __rte_packed;
-
-/* Subset of struct WQE Ethernet Segment. */
-struct mlx5_wqe_eseg {
- union {
- struct {
- uint32_t swp_offs;
- uint8_t cs_flags;
- uint8_t swp_flags;
- uint16_t mss;
- uint32_t metadata;
- uint16_t inline_hdr_sz;
- union {
- uint16_t inline_data;
- uint16_t vlan_tag;
- };
- } __rte_packed;
- struct {
- uint32_t offsets;
- uint32_t flags;
- uint32_t flow_metadata;
- uint32_t inline_hdr;
- } __rte_packed;
- };
-} __rte_packed;
-
-/* The title WQEBB, header of WQE. */
-struct mlx5_wqe {
- union {
- struct mlx5_wqe_cseg cseg;
- uint32_t ctrl[4];
- };
- struct mlx5_wqe_eseg eseg;
- union {
- struct mlx5_wqe_dseg dseg[2];
- uint8_t data[MLX5_ESEG_EXTRA_DATA_SIZE];
- };
-} __rte_packed;
-
-/* WQE for Multi-Packet RQ. */
-struct mlx5_wqe_mprq {
- struct mlx5_wqe_srq_next_seg next_seg;
- struct mlx5_wqe_data_seg dseg;
-};
-
-#define MLX5_MPRQ_LEN_MASK 0x000ffff
-#define MLX5_MPRQ_LEN_SHIFT 0
-#define MLX5_MPRQ_STRIDE_NUM_MASK 0x3fff0000
-#define MLX5_MPRQ_STRIDE_NUM_SHIFT 16
-#define MLX5_MPRQ_FILLER_MASK 0x80000000
-#define MLX5_MPRQ_FILLER_SHIFT 31
-
-#define MLX5_MPRQ_STRIDE_SHIFT_BYTE 2
-
-/* CQ element structure - should be equal to the cache line size */
-struct mlx5_cqe {
-#if (RTE_CACHE_LINE_SIZE == 128)
- uint8_t padding[64];
-#endif
- uint8_t pkt_info;
- uint8_t rsvd0;
- uint16_t wqe_id;
- uint8_t lro_tcppsh_abort_dupack;
- uint8_t lro_min_ttl;
- uint16_t lro_tcp_win;
- uint32_t lro_ack_seq_num;
- uint32_t rx_hash_res;
- uint8_t rx_hash_type;
- uint8_t rsvd1[3];
- uint16_t csum;
- uint8_t rsvd2[6];
- uint16_t hdr_type_etc;
- uint16_t vlan_info;
- uint8_t lro_num_seg;
- uint8_t rsvd3[3];
- uint32_t flow_table_metadata;
- uint8_t rsvd4[4];
- uint32_t byte_cnt;
- uint64_t timestamp;
- uint32_t sop_drop_qpn;
- uint16_t wqe_counter;
- uint8_t rsvd5;
- uint8_t op_own;
-};
-
-/* Adding direct verbs to data-path. */
-
-/* CQ sequence number mask. */
-#define MLX5_CQ_SQN_MASK 0x3
-
-/* CQ sequence number index. */
-#define MLX5_CQ_SQN_OFFSET 28
-
-/* CQ doorbell index mask. */
-#define MLX5_CI_MASK 0xffffff
-
-/* CQ doorbell offset. */
-#define MLX5_CQ_ARM_DB 1
-
-/* CQ doorbell offset*/
-#define MLX5_CQ_DOORBELL 0x20
-
-/* CQE format value. */
-#define MLX5_COMPRESSED 0x3
-
-/* Action type of header modification. */
-enum {
- MLX5_MODIFICATION_TYPE_SET = 0x1,
- MLX5_MODIFICATION_TYPE_ADD = 0x2,
- MLX5_MODIFICATION_TYPE_COPY = 0x3,
-};
-
-/* The field of packet to be modified. */
-enum mlx5_modification_field {
- MLX5_MODI_OUT_NONE = -1,
- MLX5_MODI_OUT_SMAC_47_16 = 1,
- MLX5_MODI_OUT_SMAC_15_0,
- MLX5_MODI_OUT_ETHERTYPE,
- MLX5_MODI_OUT_DMAC_47_16,
- MLX5_MODI_OUT_DMAC_15_0,
- MLX5_MODI_OUT_IP_DSCP,
- MLX5_MODI_OUT_TCP_FLAGS,
- MLX5_MODI_OUT_TCP_SPORT,
- MLX5_MODI_OUT_TCP_DPORT,
- MLX5_MODI_OUT_IPV4_TTL,
- MLX5_MODI_OUT_UDP_SPORT,
- MLX5_MODI_OUT_UDP_DPORT,
- MLX5_MODI_OUT_SIPV6_127_96,
- MLX5_MODI_OUT_SIPV6_95_64,
- MLX5_MODI_OUT_SIPV6_63_32,
- MLX5_MODI_OUT_SIPV6_31_0,
- MLX5_MODI_OUT_DIPV6_127_96,
- MLX5_MODI_OUT_DIPV6_95_64,
- MLX5_MODI_OUT_DIPV6_63_32,
- MLX5_MODI_OUT_DIPV6_31_0,
- MLX5_MODI_OUT_SIPV4,
- MLX5_MODI_OUT_DIPV4,
- MLX5_MODI_OUT_FIRST_VID,
- MLX5_MODI_IN_SMAC_47_16 = 0x31,
- MLX5_MODI_IN_SMAC_15_0,
- MLX5_MODI_IN_ETHERTYPE,
- MLX5_MODI_IN_DMAC_47_16,
- MLX5_MODI_IN_DMAC_15_0,
- MLX5_MODI_IN_IP_DSCP,
- MLX5_MODI_IN_TCP_FLAGS,
- MLX5_MODI_IN_TCP_SPORT,
- MLX5_MODI_IN_TCP_DPORT,
- MLX5_MODI_IN_IPV4_TTL,
- MLX5_MODI_IN_UDP_SPORT,
- MLX5_MODI_IN_UDP_DPORT,
- MLX5_MODI_IN_SIPV6_127_96,
- MLX5_MODI_IN_SIPV6_95_64,
- MLX5_MODI_IN_SIPV6_63_32,
- MLX5_MODI_IN_SIPV6_31_0,
- MLX5_MODI_IN_DIPV6_127_96,
- MLX5_MODI_IN_DIPV6_95_64,
- MLX5_MODI_IN_DIPV6_63_32,
- MLX5_MODI_IN_DIPV6_31_0,
- MLX5_MODI_IN_SIPV4,
- MLX5_MODI_IN_DIPV4,
- MLX5_MODI_OUT_IPV6_HOPLIMIT,
- MLX5_MODI_IN_IPV6_HOPLIMIT,
- MLX5_MODI_META_DATA_REG_A,
- MLX5_MODI_META_DATA_REG_B = 0x50,
- MLX5_MODI_META_REG_C_0,
- MLX5_MODI_META_REG_C_1,
- MLX5_MODI_META_REG_C_2,
- MLX5_MODI_META_REG_C_3,
- MLX5_MODI_META_REG_C_4,
- MLX5_MODI_META_REG_C_5,
- MLX5_MODI_META_REG_C_6,
- MLX5_MODI_META_REG_C_7,
- MLX5_MODI_OUT_TCP_SEQ_NUM,
- MLX5_MODI_IN_TCP_SEQ_NUM,
- MLX5_MODI_OUT_TCP_ACK_NUM,
- MLX5_MODI_IN_TCP_ACK_NUM = 0x5C,
-};
-
-/* Total number of metadata reg_c's. */
-#define MLX5_MREG_C_NUM (MLX5_MODI_META_REG_C_7 - MLX5_MODI_META_REG_C_0 + 1)
-
-enum modify_reg {
- REG_NONE = 0,
- REG_A,
- REG_B,
- REG_C_0,
- REG_C_1,
- REG_C_2,
- REG_C_3,
- REG_C_4,
- REG_C_5,
- REG_C_6,
- REG_C_7,
-};
-
-/* Modification sub command. */
-struct mlx5_modification_cmd {
- union {
- uint32_t data0;
- struct {
- unsigned int length:5;
- unsigned int rsvd0:3;
- unsigned int offset:5;
- unsigned int rsvd1:3;
- unsigned int field:12;
- unsigned int action_type:4;
- };
- };
- union {
- uint32_t data1;
- uint8_t data[4];
- struct {
- unsigned int rsvd2:8;
- unsigned int dst_offset:5;
- unsigned int rsvd3:3;
- unsigned int dst_field:12;
- unsigned int rsvd4:4;
- };
- };
-};
-
-typedef uint32_t u32;
-typedef uint16_t u16;
-typedef uint8_t u8;
-
-#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
-#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
-#define __mlx5_bit_off(typ, fld) ((unsigned int)(unsigned long) \
- (&(__mlx5_nullp(typ)->fld)))
-#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - \
- (__mlx5_bit_off(typ, fld) & 0x1f))
-#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
-#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
-#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << \
- __mlx5_dw_bit_off(typ, fld))
-#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
-#define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
-#define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - \
- (__mlx5_bit_off(typ, fld) & 0xf))
-#define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
-#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
-#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
-#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
-#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
-
-/* insert a value to a struct */
-#define MLX5_SET(typ, p, fld, v) \
- do { \
- u32 _v = v; \
- *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
- rte_cpu_to_be_32((rte_be_to_cpu_32(*((u32 *)(p) + \
- __mlx5_dw_off(typ, fld))) & \
- (~__mlx5_dw_mask(typ, fld))) | \
- (((_v) & __mlx5_mask(typ, fld)) << \
- __mlx5_dw_bit_off(typ, fld))); \
- } while (0)
-
-#define MLX5_SET64(typ, p, fld, v) \
- do { \
- assert(__mlx5_bit_sz(typ, fld) == 64); \
- *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = \
- rte_cpu_to_be_64(v); \
- } while (0)
-
-#define MLX5_GET(typ, p, fld) \
- ((rte_be_to_cpu_32(*((__be32 *)(p) +\
- __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
- __mlx5_mask(typ, fld))
-#define MLX5_GET16(typ, p, fld) \
- ((rte_be_to_cpu_16(*((__be16 *)(p) + \
- __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
- __mlx5_mask16(typ, fld))
-#define MLX5_GET64(typ, p, fld) rte_be_to_cpu_64(*((__be64 *)(p) + \
- __mlx5_64_off(typ, fld)))
-#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
-
-struct mlx5_ifc_fte_match_set_misc_bits {
- u8 gre_c_present[0x1];
- u8 reserved_at_1[0x1];
- u8 gre_k_present[0x1];
- u8 gre_s_present[0x1];
- u8 source_vhci_port[0x4];
- u8 source_sqn[0x18];
- u8 reserved_at_20[0x10];
- u8 source_port[0x10];
- u8 outer_second_prio[0x3];
- u8 outer_second_cfi[0x1];
- u8 outer_second_vid[0xc];
- u8 inner_second_prio[0x3];
- u8 inner_second_cfi[0x1];
- u8 inner_second_vid[0xc];
- u8 outer_second_cvlan_tag[0x1];
- u8 inner_second_cvlan_tag[0x1];
- u8 outer_second_svlan_tag[0x1];
- u8 inner_second_svlan_tag[0x1];
- u8 reserved_at_64[0xc];
- u8 gre_protocol[0x10];
- u8 gre_key_h[0x18];
- u8 gre_key_l[0x8];
- u8 vxlan_vni[0x18];
- u8 reserved_at_b8[0x8];
- u8 geneve_vni[0x18];
- u8 reserved_at_e4[0x7];
- u8 geneve_oam[0x1];
- u8 reserved_at_e0[0xc];
- u8 outer_ipv6_flow_label[0x14];
- u8 reserved_at_100[0xc];
- u8 inner_ipv6_flow_label[0x14];
- u8 reserved_at_120[0xa];
- u8 geneve_opt_len[0x6];
- u8 geneve_protocol_type[0x10];
- u8 reserved_at_140[0xc0];
-};
-
-struct mlx5_ifc_ipv4_layout_bits {
- u8 reserved_at_0[0x60];
- u8 ipv4[0x20];
-};
-
-struct mlx5_ifc_ipv6_layout_bits {
- u8 ipv6[16][0x8];
-};
-
-union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
- struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
- struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
- u8 reserved_at_0[0x80];
-};
-
-struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
- u8 smac_47_16[0x20];
- u8 smac_15_0[0x10];
- u8 ethertype[0x10];
- u8 dmac_47_16[0x20];
- u8 dmac_15_0[0x10];
- u8 first_prio[0x3];
- u8 first_cfi[0x1];
- u8 first_vid[0xc];
- u8 ip_protocol[0x8];
- u8 ip_dscp[0x6];
- u8 ip_ecn[0x2];
- u8 cvlan_tag[0x1];
- u8 svlan_tag[0x1];
- u8 frag[0x1];
- u8 ip_version[0x4];
- u8 tcp_flags[0x9];
- u8 tcp_sport[0x10];
- u8 tcp_dport[0x10];
- u8 reserved_at_c0[0x20];
- u8 udp_sport[0x10];
- u8 udp_dport[0x10];
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
-};
-
-struct mlx5_ifc_fte_match_mpls_bits {
- u8 mpls_label[0x14];
- u8 mpls_exp[0x3];
- u8 mpls_s_bos[0x1];
- u8 mpls_ttl[0x8];
-};
-
-struct mlx5_ifc_fte_match_set_misc2_bits {
- struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls;
- struct mlx5_ifc_fte_match_mpls_bits inner_first_mpls;
- struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre;
- struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp;
- u8 metadata_reg_c_7[0x20];
- u8 metadata_reg_c_6[0x20];
- u8 metadata_reg_c_5[0x20];
- u8 metadata_reg_c_4[0x20];
- u8 metadata_reg_c_3[0x20];
- u8 metadata_reg_c_2[0x20];
- u8 metadata_reg_c_1[0x20];
- u8 metadata_reg_c_0[0x20];
- u8 metadata_reg_a[0x20];
- u8 metadata_reg_b[0x20];
- u8 reserved_at_1c0[0x40];
-};
-
-struct mlx5_ifc_fte_match_set_misc3_bits {
- u8 inner_tcp_seq_num[0x20];
- u8 outer_tcp_seq_num[0x20];
- u8 inner_tcp_ack_num[0x20];
- u8 outer_tcp_ack_num[0x20];
- u8 reserved_at_auto1[0x8];
- u8 outer_vxlan_gpe_vni[0x18];
- u8 outer_vxlan_gpe_next_protocol[0x8];
- u8 outer_vxlan_gpe_flags[0x8];
- u8 reserved_at_a8[0x10];
- u8 icmp_header_data[0x20];
- u8 icmpv6_header_data[0x20];
- u8 icmp_type[0x8];
- u8 icmp_code[0x8];
- u8 icmpv6_type[0x8];
- u8 icmpv6_code[0x8];
- u8 reserved_at_120[0x20];
- u8 gtpu_teid[0x20];
- u8 gtpu_msg_type[0x08];
- u8 gtpu_msg_flags[0x08];
- u8 reserved_at_170[0x90];
-};
-
-/* Flow matcher. */
-struct mlx5_ifc_fte_match_param_bits {
- struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
- struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
- struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
- struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
- struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
-};
-
-enum {
- MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT,
- MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT,
- MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT,
- MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT,
- MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT
-};
-
-enum {
- MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
- MLX5_CMD_OP_CREATE_MKEY = 0x200,
- MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
- MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,
- MLX5_CMD_OP_CREATE_TIR = 0x900,
- MLX5_CMD_OP_CREATE_SQ = 0X904,
- MLX5_CMD_OP_MODIFY_SQ = 0X905,
- MLX5_CMD_OP_CREATE_RQ = 0x908,
- MLX5_CMD_OP_MODIFY_RQ = 0x909,
- MLX5_CMD_OP_CREATE_TIS = 0x912,
- MLX5_CMD_OP_QUERY_TIS = 0x915,
- MLX5_CMD_OP_CREATE_RQT = 0x916,
- MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
- MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
-};
-
-enum {
- MLX5_MKC_ACCESS_MODE_MTT = 0x1,
-};
-
-/* Flow counters. */
-struct mlx5_ifc_alloc_flow_counter_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
- u8 syndrome[0x20];
- u8 flow_counter_id[0x20];
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_alloc_flow_counter_in_bits {
- u8 opcode[0x10];
- u8 reserved_at_10[0x10];
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
- u8 flow_counter_id[0x20];
- u8 reserved_at_40[0x18];
- u8 flow_counter_bulk[0x8];
-};
-
-struct mlx5_ifc_dealloc_flow_counter_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
- u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_dealloc_flow_counter_in_bits {
- u8 opcode[0x10];
- u8 reserved_at_10[0x10];
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
- u8 flow_counter_id[0x20];
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_traffic_counter_bits {
- u8 packets[0x40];
- u8 octets[0x40];
-};
-
-struct mlx5_ifc_query_flow_counter_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
- u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
- struct mlx5_ifc_traffic_counter_bits flow_statistics[];
-};
-
-struct mlx5_ifc_query_flow_counter_in_bits {
- u8 opcode[0x10];
- u8 reserved_at_10[0x10];
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
- u8 reserved_at_40[0x20];
- u8 mkey[0x20];
- u8 address[0x40];
- u8 clear[0x1];
- u8 dump_to_memory[0x1];
- u8 num_of_counters[0x1e];
- u8 flow_counter_id[0x20];
-};
-
-struct mlx5_ifc_mkc_bits {
- u8 reserved_at_0[0x1];
- u8 free[0x1];
- u8 reserved_at_2[0x1];
- u8 access_mode_4_2[0x3];
- u8 reserved_at_6[0x7];
- u8 relaxed_ordering_write[0x1];
- u8 reserved_at_e[0x1];
- u8 small_fence_on_rdma_read_response[0x1];
- u8 umr_en[0x1];
- u8 a[0x1];
- u8 rw[0x1];
- u8 rr[0x1];
- u8 lw[0x1];
- u8 lr[0x1];
- u8 access_mode_1_0[0x2];
- u8 reserved_at_18[0x8];
-
- u8 qpn[0x18];
- u8 mkey_7_0[0x8];
-
- u8 reserved_at_40[0x20];
-
- u8 length64[0x1];
- u8 bsf_en[0x1];
- u8 sync_umr[0x1];
- u8 reserved_at_63[0x2];
- u8 expected_sigerr_count[0x1];
- u8 reserved_at_66[0x1];
- u8 en_rinval[0x1];
- u8 pd[0x18];
-
- u8 start_addr[0x40];
-
- u8 len[0x40];
-
- u8 bsf_octword_size[0x20];
-
- u8 reserved_at_120[0x80];
-
- u8 translations_octword_size[0x20];
-
- u8 reserved_at_1c0[0x1b];
- u8 log_page_size[0x5];
-
- u8 reserved_at_1e0[0x20];
-};
-
-struct mlx5_ifc_create_mkey_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 reserved_at_40[0x8];
- u8 mkey_index[0x18];
-
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_mkey_in_bits {
- u8 opcode[0x10];
- u8 reserved_at_10[0x10];
-
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
-
- u8 reserved_at_40[0x20];
-
- u8 pg_access[0x1];
- u8 reserved_at_61[0x1f];
-
- struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
-
- u8 reserved_at_280[0x80];
-
- u8 translations_octword_actual_size[0x20];
-
- u8 mkey_umem_id[0x20];
-
- u8 mkey_umem_offset[0x40];
-
- u8 reserved_at_380[0x500];
-
- u8 klm_pas_mtt[][0x20];
-};
-
-enum {
- MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0 << 1,
- MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS = 0x1 << 1,
- MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP = 0xc << 1,
-};
-
-enum {
- MLX5_HCA_CAP_OPMOD_GET_MAX = 0,
- MLX5_HCA_CAP_OPMOD_GET_CUR = 1,
-};
-
-enum {
- MLX5_CAP_INLINE_MODE_L2,
- MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
- MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
-};
-
-enum {
- MLX5_INLINE_MODE_NONE,
- MLX5_INLINE_MODE_L2,
- MLX5_INLINE_MODE_IP,
- MLX5_INLINE_MODE_TCP_UDP,
- MLX5_INLINE_MODE_RESERVED4,
- MLX5_INLINE_MODE_INNER_L2,
- MLX5_INLINE_MODE_INNER_IP,
- MLX5_INLINE_MODE_INNER_TCP_UDP,
-};
-
-/* HCA bit masks indicating which Flex parser protocols are already enabled. */
-#define MLX5_HCA_FLEX_IPV4_OVER_VXLAN_ENABLED (1UL << 0)
-#define MLX5_HCA_FLEX_IPV6_OVER_VXLAN_ENABLED (1UL << 1)
-#define MLX5_HCA_FLEX_IPV6_OVER_IP_ENABLED (1UL << 2)
-#define MLX5_HCA_FLEX_GENEVE_ENABLED (1UL << 3)
-#define MLX5_HCA_FLEX_CW_MPLS_OVER_GRE_ENABLED (1UL << 4)
-#define MLX5_HCA_FLEX_CW_MPLS_OVER_UDP_ENABLED (1UL << 5)
-#define MLX5_HCA_FLEX_P_BIT_VXLAN_GPE_ENABLED (1UL << 6)
-#define MLX5_HCA_FLEX_VXLAN_GPE_ENABLED (1UL << 7)
-#define MLX5_HCA_FLEX_ICMP_ENABLED (1UL << 8)
-#define MLX5_HCA_FLEX_ICMPV6_ENABLED (1UL << 9)
-
-struct mlx5_ifc_cmd_hca_cap_bits {
- u8 reserved_at_0[0x30];
- u8 vhca_id[0x10];
- u8 reserved_at_40[0x40];
- u8 log_max_srq_sz[0x8];
- u8 log_max_qp_sz[0x8];
- u8 reserved_at_90[0xb];
- u8 log_max_qp[0x5];
- u8 reserved_at_a0[0xb];
- u8 log_max_srq[0x5];
- u8 reserved_at_b0[0x10];
- u8 reserved_at_c0[0x8];
- u8 log_max_cq_sz[0x8];
- u8 reserved_at_d0[0xb];
- u8 log_max_cq[0x5];
- u8 log_max_eq_sz[0x8];
- u8 reserved_at_e8[0x2];
- u8 log_max_mkey[0x6];
- u8 reserved_at_f0[0x8];
- u8 dump_fill_mkey[0x1];
- u8 reserved_at_f9[0x3];
- u8 log_max_eq[0x4];
- u8 max_indirection[0x8];
- u8 fixed_buffer_size[0x1];
- u8 log_max_mrw_sz[0x7];
- u8 force_teardown[0x1];
- u8 reserved_at_111[0x1];
- u8 log_max_bsf_list_size[0x6];
- u8 umr_extended_translation_offset[0x1];
- u8 null_mkey[0x1];
- u8 log_max_klm_list_size[0x6];
- u8 reserved_at_120[0xa];
- u8 log_max_ra_req_dc[0x6];
- u8 reserved_at_130[0xa];
- u8 log_max_ra_res_dc[0x6];
- u8 reserved_at_140[0xa];
- u8 log_max_ra_req_qp[0x6];
- u8 reserved_at_150[0xa];
- u8 log_max_ra_res_qp[0x6];
- u8 end_pad[0x1];
- u8 cc_query_allowed[0x1];
- u8 cc_modify_allowed[0x1];
- u8 start_pad[0x1];
- u8 cache_line_128byte[0x1];
- u8 reserved_at_165[0xa];
- u8 qcam_reg[0x1];
- u8 gid_table_size[0x10];
- u8 out_of_seq_cnt[0x1];
- u8 vport_counters[0x1];
- u8 retransmission_q_counters[0x1];
- u8 debug[0x1];
- u8 modify_rq_counter_set_id[0x1];
- u8 rq_delay_drop[0x1];
- u8 max_qp_cnt[0xa];
- u8 pkey_table_size[0x10];
- u8 vport_group_manager[0x1];
- u8 vhca_group_manager[0x1];
- u8 ib_virt[0x1];
- u8 eth_virt[0x1];
- u8 vnic_env_queue_counters[0x1];
- u8 ets[0x1];
- u8 nic_flow_table[0x1];
- u8 eswitch_manager[0x1];
- u8 device_memory[0x1];
- u8 mcam_reg[0x1];
- u8 pcam_reg[0x1];
- u8 local_ca_ack_delay[0x5];
- u8 port_module_event[0x1];
- u8 enhanced_error_q_counters[0x1];
- u8 ports_check[0x1];
- u8 reserved_at_1b3[0x1];
- u8 disable_link_up[0x1];
- u8 beacon_led[0x1];
- u8 port_type[0x2];
- u8 num_ports[0x8];
- u8 reserved_at_1c0[0x1];
- u8 pps[0x1];
- u8 pps_modify[0x1];
- u8 log_max_msg[0x5];
- u8 reserved_at_1c8[0x4];
- u8 max_tc[0x4];
- u8 temp_warn_event[0x1];
- u8 dcbx[0x1];
- u8 general_notification_event[0x1];
- u8 reserved_at_1d3[0x2];
- u8 fpga[0x1];
- u8 rol_s[0x1];
- u8 rol_g[0x1];
- u8 reserved_at_1d8[0x1];
- u8 wol_s[0x1];
- u8 wol_g[0x1];
- u8 wol_a[0x1];
- u8 wol_b[0x1];
- u8 wol_m[0x1];
- u8 wol_u[0x1];
- u8 wol_p[0x1];
- u8 stat_rate_support[0x10];
- u8 reserved_at_1f0[0xc];
- u8 cqe_version[0x4];
- u8 compact_address_vector[0x1];
- u8 striding_rq[0x1];
- u8 reserved_at_202[0x1];
- u8 ipoib_enhanced_offloads[0x1];
- u8 ipoib_basic_offloads[0x1];
- u8 reserved_at_205[0x1];
- u8 repeated_block_disabled[0x1];
- u8 umr_modify_entity_size_disabled[0x1];
- u8 umr_modify_atomic_disabled[0x1];
- u8 umr_indirect_mkey_disabled[0x1];
- u8 umr_fence[0x2];
- u8 reserved_at_20c[0x3];
- u8 drain_sigerr[0x1];
- u8 cmdif_checksum[0x2];
- u8 sigerr_cqe[0x1];
- u8 reserved_at_213[0x1];
- u8 wq_signature[0x1];
- u8 sctr_data_cqe[0x1];
- u8 reserved_at_216[0x1];
- u8 sho[0x1];
- u8 tph[0x1];
- u8 rf[0x1];
- u8 dct[0x1];
- u8 qos[0x1];
- u8 eth_net_offloads[0x1];
- u8 roce[0x1];
- u8 atomic[0x1];
- u8 reserved_at_21f[0x1];
- u8 cq_oi[0x1];
- u8 cq_resize[0x1];
- u8 cq_moderation[0x1];
- u8 reserved_at_223[0x3];
- u8 cq_eq_remap[0x1];
- u8 pg[0x1];
- u8 block_lb_mc[0x1];
- u8 reserved_at_229[0x1];
- u8 scqe_break_moderation[0x1];
- u8 cq_period_start_from_cqe[0x1];
- u8 cd[0x1];
- u8 reserved_at_22d[0x1];
- u8 apm[0x1];
- u8 vector_calc[0x1];
- u8 umr_ptr_rlky[0x1];
- u8 imaicl[0x1];
- u8 reserved_at_232[0x4];
- u8 qkv[0x1];
- u8 pkv[0x1];
- u8 set_deth_sqpn[0x1];
- u8 reserved_at_239[0x3];
- u8 xrc[0x1];
- u8 ud[0x1];
- u8 uc[0x1];
- u8 rc[0x1];
- u8 uar_4k[0x1];
- u8 reserved_at_241[0x9];
- u8 uar_sz[0x6];
- u8 reserved_at_250[0x8];
- u8 log_pg_sz[0x8];
- u8 bf[0x1];
- u8 driver_version[0x1];
- u8 pad_tx_eth_packet[0x1];
- u8 reserved_at_263[0x8];
- u8 log_bf_reg_size[0x5];
- u8 reserved_at_270[0xb];
- u8 lag_master[0x1];
- u8 num_lag_ports[0x4];
- u8 reserved_at_280[0x10];
- u8 max_wqe_sz_sq[0x10];
- u8 reserved_at_2a0[0x10];
- u8 max_wqe_sz_rq[0x10];
- u8 max_flow_counter_31_16[0x10];
- u8 max_wqe_sz_sq_dc[0x10];
- u8 reserved_at_2e0[0x7];
- u8 max_qp_mcg[0x19];
- u8 reserved_at_300[0x10];
- u8 flow_counter_bulk_alloc[0x08];
- u8 log_max_mcg[0x8];
- u8 reserved_at_320[0x3];
- u8 log_max_transport_domain[0x5];
- u8 reserved_at_328[0x3];
- u8 log_max_pd[0x5];
- u8 reserved_at_330[0xb];
- u8 log_max_xrcd[0x5];
- u8 nic_receive_steering_discard[0x1];
- u8 receive_discard_vport_down[0x1];
- u8 transmit_discard_vport_down[0x1];
- u8 reserved_at_343[0x5];
- u8 log_max_flow_counter_bulk[0x8];
- u8 max_flow_counter_15_0[0x10];
- u8 modify_tis[0x1];
- u8 flow_counters_dump[0x1];
- u8 reserved_at_360[0x1];
- u8 log_max_rq[0x5];
- u8 reserved_at_368[0x3];
- u8 log_max_sq[0x5];
- u8 reserved_at_370[0x3];
- u8 log_max_tir[0x5];
- u8 reserved_at_378[0x3];
- u8 log_max_tis[0x5];
- u8 basic_cyclic_rcv_wqe[0x1];
- u8 reserved_at_381[0x2];
- u8 log_max_rmp[0x5];
- u8 reserved_at_388[0x3];
- u8 log_max_rqt[0x5];
- u8 reserved_at_390[0x3];
- u8 log_max_rqt_size[0x5];
- u8 reserved_at_398[0x3];
- u8 log_max_tis_per_sq[0x5];
- u8 ext_stride_num_range[0x1];
- u8 reserved_at_3a1[0x2];
- u8 log_max_stride_sz_rq[0x5];
- u8 reserved_at_3a8[0x3];
- u8 log_min_stride_sz_rq[0x5];
- u8 reserved_at_3b0[0x3];
- u8 log_max_stride_sz_sq[0x5];
- u8 reserved_at_3b8[0x3];
- u8 log_min_stride_sz_sq[0x5];
- u8 hairpin[0x1];
- u8 reserved_at_3c1[0x2];
- u8 log_max_hairpin_queues[0x5];
- u8 reserved_at_3c8[0x3];
- u8 log_max_hairpin_wq_data_sz[0x5];
- u8 reserved_at_3d0[0x3];
- u8 log_max_hairpin_num_packets[0x5];
- u8 reserved_at_3d8[0x3];
- u8 log_max_wq_sz[0x5];
- u8 nic_vport_change_event[0x1];
- u8 disable_local_lb_uc[0x1];
- u8 disable_local_lb_mc[0x1];
- u8 log_min_hairpin_wq_data_sz[0x5];
- u8 reserved_at_3e8[0x3];
- u8 log_max_vlan_list[0x5];
- u8 reserved_at_3f0[0x3];
- u8 log_max_current_mc_list[0x5];
- u8 reserved_at_3f8[0x3];
- u8 log_max_current_uc_list[0x5];
- u8 general_obj_types[0x40];
- u8 reserved_at_440[0x20];
- u8 reserved_at_460[0x10];
- u8 max_num_eqs[0x10];
- u8 reserved_at_480[0x3];
- u8 log_max_l2_table[0x5];
- u8 reserved_at_488[0x8];
- u8 log_uar_page_sz[0x10];
- u8 reserved_at_4a0[0x20];
- u8 device_frequency_mhz[0x20];
- u8 device_frequency_khz[0x20];
- u8 reserved_at_500[0x20];
- u8 num_of_uars_per_page[0x20];
- u8 flex_parser_protocols[0x20];
- u8 reserved_at_560[0x20];
- u8 reserved_at_580[0x3c];
- u8 mini_cqe_resp_stride_index[0x1];
- u8 cqe_128_always[0x1];
- u8 cqe_compression_128[0x1];
- u8 cqe_compression[0x1];
- u8 cqe_compression_timeout[0x10];
- u8 cqe_compression_max_num[0x10];
- u8 reserved_at_5e0[0x10];
- u8 tag_matching[0x1];
- u8 rndv_offload_rc[0x1];
- u8 rndv_offload_dc[0x1];
- u8 log_tag_matching_list_sz[0x5];
- u8 reserved_at_5f8[0x3];
- u8 log_max_xrq[0x5];
- u8 affiliate_nic_vport_criteria[0x8];
- u8 native_port_num[0x8];
- u8 num_vhca_ports[0x8];
- u8 reserved_at_618[0x6];
- u8 sw_owner_id[0x1];
- u8 reserved_at_61f[0x1e1];
-};
-
-struct mlx5_ifc_qos_cap_bits {
- u8 packet_pacing[0x1];
- u8 esw_scheduling[0x1];
- u8 esw_bw_share[0x1];
- u8 esw_rate_limit[0x1];
- u8 reserved_at_4[0x1];
- u8 packet_pacing_burst_bound[0x1];
- u8 packet_pacing_typical_size[0x1];
- u8 flow_meter_srtcm[0x1];
- u8 reserved_at_8[0x8];
- u8 log_max_flow_meter[0x8];
- u8 flow_meter_reg_id[0x8];
- u8 reserved_at_25[0x8];
- u8 flow_meter_reg_share[0x1];
- u8 reserved_at_2e[0x17];
- u8 packet_pacing_max_rate[0x20];
- u8 packet_pacing_min_rate[0x20];
- u8 reserved_at_80[0x10];
- u8 packet_pacing_rate_table_size[0x10];
- u8 esw_element_type[0x10];
- u8 esw_tsar_type[0x10];
- u8 reserved_at_c0[0x10];
- u8 max_qos_para_vport[0x10];
- u8 max_tsar_bw_share[0x20];
- u8 reserved_at_100[0x6e8];
-};
-
-struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
- u8 csum_cap[0x1];
- u8 vlan_cap[0x1];
- u8 lro_cap[0x1];
- u8 lro_psh_flag[0x1];
- u8 lro_time_stamp[0x1];
- u8 lro_max_msg_sz_mode[0x2];
- u8 wqe_vlan_insert[0x1];
- u8 self_lb_en_modifiable[0x1];
- u8 self_lb_mc[0x1];
- u8 self_lb_uc[0x1];
- u8 max_lso_cap[0x5];
- u8 multi_pkt_send_wqe[0x2];
- u8 wqe_inline_mode[0x2];
- u8 rss_ind_tbl_cap[0x4];
- u8 reg_umr_sq[0x1];
- u8 scatter_fcs[0x1];
- u8 enhanced_multi_pkt_send_wqe[0x1];
- u8 tunnel_lso_const_out_ip_id[0x1];
- u8 tunnel_lro_gre[0x1];
- u8 tunnel_lro_vxlan[0x1];
- u8 tunnel_stateless_gre[0x1];
- u8 tunnel_stateless_vxlan[0x1];
- u8 swp[0x1];
- u8 swp_csum[0x1];
- u8 swp_lso[0x1];
- u8 reserved_at_23[0x8];
- u8 tunnel_stateless_gtp[0x1];
- u8 reserved_at_25[0x4];
- u8 max_vxlan_udp_ports[0x8];
- u8 reserved_at_38[0x6];
- u8 max_geneve_opt_len[0x1];
- u8 tunnel_stateless_geneve_rx[0x1];
- u8 reserved_at_40[0x10];
- u8 lro_min_mss_size[0x10];
- u8 reserved_at_60[0x120];
- u8 lro_timer_supported_periods[4][0x20];
- u8 reserved_at_200[0x600];
-};
-
-union mlx5_ifc_hca_cap_union_bits {
- struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
- struct mlx5_ifc_per_protocol_networking_offload_caps_bits
- per_protocol_networking_offload_caps;
- struct mlx5_ifc_qos_cap_bits qos_cap;
- u8 reserved_at_0[0x8000];
-};
-
-struct mlx5_ifc_query_hca_cap_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
- u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
- union mlx5_ifc_hca_cap_union_bits capability;
-};
-
-struct mlx5_ifc_query_hca_cap_in_bits {
- u8 opcode[0x10];
- u8 reserved_at_10[0x10];
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_mac_address_layout_bits {
- u8 reserved_at_0[0x10];
- u8 mac_addr_47_32[0x10];
- u8 mac_addr_31_0[0x20];
-};
-
-struct mlx5_ifc_nic_vport_context_bits {
- u8 reserved_at_0[0x5];
- u8 min_wqe_inline_mode[0x3];
- u8 reserved_at_8[0x15];
- u8 disable_mc_local_lb[0x1];
- u8 disable_uc_local_lb[0x1];
- u8 roce_en[0x1];
- u8 arm_change_event[0x1];
- u8 reserved_at_21[0x1a];
- u8 event_on_mtu[0x1];
- u8 event_on_promisc_change[0x1];
- u8 event_on_vlan_change[0x1];
- u8 event_on_mc_address_change[0x1];
- u8 event_on_uc_address_change[0x1];
- u8 reserved_at_40[0xc];
- u8 affiliation_criteria[0x4];
- u8 affiliated_vhca_id[0x10];
- u8 reserved_at_60[0xd0];
- u8 mtu[0x10];
- u8 system_image_guid[0x40];
- u8 port_guid[0x40];
- u8 node_guid[0x40];
- u8 reserved_at_200[0x140];
- u8 qkey_violation_counter[0x10];
- u8 reserved_at_350[0x430];
- u8 promisc_uc[0x1];
- u8 promisc_mc[0x1];
- u8 promisc_all[0x1];
- u8 reserved_at_783[0x2];
- u8 allowed_list_type[0x3];
- u8 reserved_at_788[0xc];
- u8 allowed_list_size[0xc];
- struct mlx5_ifc_mac_address_layout_bits permanent_address;
- u8 reserved_at_7e0[0x20];
-};
-
-struct mlx5_ifc_query_nic_vport_context_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
- u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
- struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
-};
-
-struct mlx5_ifc_query_nic_vport_context_in_bits {
- u8 opcode[0x10];
- u8 reserved_at_10[0x10];
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
- u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
- u8 vport_number[0x10];
- u8 reserved_at_60[0x5];
- u8 allowed_list_type[0x3];
- u8 reserved_at_68[0x18];
-};
-
-struct mlx5_ifc_tisc_bits {
- u8 strict_lag_tx_port_affinity[0x1];
- u8 reserved_at_1[0x3];
- u8 lag_tx_port_affinity[0x04];
- u8 reserved_at_8[0x4];
- u8 prio[0x4];
- u8 reserved_at_10[0x10];
- u8 reserved_at_20[0x100];
- u8 reserved_at_120[0x8];
- u8 transport_domain[0x18];
- u8 reserved_at_140[0x8];
- u8 underlay_qpn[0x18];
- u8 reserved_at_160[0x3a0];
-};
-
-struct mlx5_ifc_query_tis_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
- u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
- struct mlx5_ifc_tisc_bits tis_context;
-};
-
-struct mlx5_ifc_query_tis_in_bits {
- u8 opcode[0x10];
- u8 reserved_at_10[0x10];
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
- u8 reserved_at_40[0x8];
- u8 tisn[0x18];
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_alloc_transport_domain_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
- u8 syndrome[0x20];
- u8 reserved_at_40[0x8];
- u8 transport_domain[0x18];
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_alloc_transport_domain_in_bits {
- u8 opcode[0x10];
- u8 reserved_at_10[0x10];
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
-};
-
-enum {
- MLX5_WQ_TYPE_LINKED_LIST = 0x0,
- MLX5_WQ_TYPE_CYCLIC = 0x1,
- MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2,
- MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ = 0x3,
-};
-
-enum {
- MLX5_WQ_END_PAD_MODE_NONE = 0x0,
- MLX5_WQ_END_PAD_MODE_ALIGN = 0x1,
-};
-
-struct mlx5_ifc_wq_bits {
- u8 wq_type[0x4];
- u8 wq_signature[0x1];
- u8 end_padding_mode[0x2];
- u8 cd_slave[0x1];
- u8 reserved_at_8[0x18];
- u8 hds_skip_first_sge[0x1];
- u8 log2_hds_buf_size[0x3];
- u8 reserved_at_24[0x7];
- u8 page_offset[0x5];
- u8 lwm[0x10];
- u8 reserved_at_40[0x8];
- u8 pd[0x18];
- u8 reserved_at_60[0x8];
- u8 uar_page[0x18];
- u8 dbr_addr[0x40];
- u8 hw_counter[0x20];
- u8 sw_counter[0x20];
- u8 reserved_at_100[0xc];
- u8 log_wq_stride[0x4];
- u8 reserved_at_110[0x3];
- u8 log_wq_pg_sz[0x5];
- u8 reserved_at_118[0x3];
- u8 log_wq_sz[0x5];
- u8 dbr_umem_valid[0x1];
- u8 wq_umem_valid[0x1];
- u8 reserved_at_122[0x1];
- u8 log_hairpin_num_packets[0x5];
- u8 reserved_at_128[0x3];
- u8 log_hairpin_data_sz[0x5];
- u8 reserved_at_130[0x4];
- u8 single_wqe_log_num_of_strides[0x4];
- u8 two_byte_shift_en[0x1];
- u8 reserved_at_139[0x4];
- u8 single_stride_log_num_of_bytes[0x3];
- u8 dbr_umem_id[0x20];
- u8 wq_umem_id[0x20];
- u8 wq_umem_offset[0x40];
- u8 reserved_at_1c0[0x440];
-};
-
-enum {
- MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
- MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP = 0x1,
-};
-
-enum {
- MLX5_RQC_STATE_RST = 0x0,
- MLX5_RQC_STATE_RDY = 0x1,
- MLX5_RQC_STATE_ERR = 0x3,
-};
-
-struct mlx5_ifc_rqc_bits {
- u8 rlky[0x1];
- u8 delay_drop_en[0x1];
- u8 scatter_fcs[0x1];
- u8 vsd[0x1];
- u8 mem_rq_type[0x4];
- u8 state[0x4];
- u8 reserved_at_c[0x1];
- u8 flush_in_error_en[0x1];
- u8 hairpin[0x1];
- u8 reserved_at_f[0x11];
- u8 reserved_at_20[0x8];
- u8 user_index[0x18];
- u8 reserved_at_40[0x8];
- u8 cqn[0x18];
- u8 counter_set_id[0x8];
- u8 reserved_at_68[0x18];
- u8 reserved_at_80[0x8];
- u8 rmpn[0x18];
- u8 reserved_at_a0[0x8];
- u8 hairpin_peer_sq[0x18];
- u8 reserved_at_c0[0x10];
- u8 hairpin_peer_vhca[0x10];
- u8 reserved_at_e0[0xa0];
- struct mlx5_ifc_wq_bits wq; /* Not used in LRO RQ. */
-};
-
-struct mlx5_ifc_create_rq_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
- u8 syndrome[0x20];
- u8 reserved_at_40[0x8];
- u8 rqn[0x18];
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_rq_in_bits {
- u8 opcode[0x10];
- u8 uid[0x10];
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
- u8 reserved_at_40[0xc0];
- struct mlx5_ifc_rqc_bits ctx;
-};
-
-struct mlx5_ifc_modify_rq_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
- u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_create_tis_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
- u8 syndrome[0x20];
- u8 reserved_at_40[0x8];
- u8 tisn[0x18];
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_tis_in_bits {
- u8 opcode[0x10];
- u8 uid[0x10];
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
- u8 reserved_at_40[0xc0];
- struct mlx5_ifc_tisc_bits ctx;
-};
-
-enum {
- MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM = 1ULL << 0,
- MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,
- MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS = 1ULL << 2,
- MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID = 1ULL << 3,
-};
-
-struct mlx5_ifc_modify_rq_in_bits {
- u8 opcode[0x10];
- u8 uid[0x10];
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
- u8 rq_state[0x4];
- u8 reserved_at_44[0x4];
- u8 rqn[0x18];
- u8 reserved_at_60[0x20];
- u8 modify_bitmask[0x40];
- u8 reserved_at_c0[0x40];
- struct mlx5_ifc_rqc_bits ctx;
-};
-
-enum {
- MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0,
- MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1,
- MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2,
- MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3,
- MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4,
-};
-
-struct mlx5_ifc_rx_hash_field_select_bits {
- u8 l3_prot_type[0x1];
- u8 l4_prot_type[0x1];
- u8 selected_fields[0x1e];
-};
-
-enum {
- MLX5_TIRC_DISP_TYPE_DIRECT = 0x0,
- MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1,
-};
-
-enum {
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1,
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2,
-};
-
-enum {
- MLX5_RX_HASH_FN_NONE = 0x0,
- MLX5_RX_HASH_FN_INVERTED_XOR8 = 0x1,
- MLX5_RX_HASH_FN_TOEPLITZ = 0x2,
-};
-
-enum {
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST = 0x1,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST = 0x2,
-};
-
-enum {
- MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 = 0x0,
- MLX5_LRO_MAX_MSG_SIZE_START_FROM_L2 = 0x1,
-};
-
-struct mlx5_ifc_tirc_bits {
- u8 reserved_at_0[0x20];
- u8 disp_type[0x4];
- u8 reserved_at_24[0x1c];
- u8 reserved_at_40[0x40];
- u8 reserved_at_80[0x4];
- u8 lro_timeout_period_usecs[0x10];
- u8 lro_enable_mask[0x4];
- u8 lro_max_msg_sz[0x8];
- u8 reserved_at_a0[0x40];
- u8 reserved_at_e0[0x8];
- u8 inline_rqn[0x18];
- u8 rx_hash_symmetric[0x1];
- u8 reserved_at_101[0x1];
- u8 tunneled_offload_en[0x1];
- u8 reserved_at_103[0x5];
- u8 indirect_table[0x18];
- u8 rx_hash_fn[0x4];
- u8 reserved_at_124[0x2];
- u8 self_lb_block[0x2];
- u8 transport_domain[0x18];
- u8 rx_hash_toeplitz_key[10][0x20];
- struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;
- struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
- u8 reserved_at_2c0[0x4c0];
-};
-
-struct mlx5_ifc_create_tir_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
- u8 syndrome[0x20];
- u8 reserved_at_40[0x8];
- u8 tirn[0x18];
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_tir_in_bits {
- u8 opcode[0x10];
- u8 uid[0x10];
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
- u8 reserved_at_40[0xc0];
- struct mlx5_ifc_tirc_bits ctx;
-};
-
-struct mlx5_ifc_rq_num_bits {
- u8 reserved_at_0[0x8];
- u8 rq_num[0x18];
-};
-
-struct mlx5_ifc_rqtc_bits {
- u8 reserved_at_0[0xa0];
- u8 reserved_at_a0[0x10];
- u8 rqt_max_size[0x10];
- u8 reserved_at_c0[0x10];
- u8 rqt_actual_size[0x10];
- u8 reserved_at_e0[0x6a0];
- struct mlx5_ifc_rq_num_bits rq_num[];
-};
-
-struct mlx5_ifc_create_rqt_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
- u8 syndrome[0x20];
- u8 reserved_at_40[0x8];
- u8 rqtn[0x18];
- u8 reserved_at_60[0x20];
-};
-
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-struct mlx5_ifc_create_rqt_in_bits {
- u8 opcode[0x10];
- u8 uid[0x10];
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
- u8 reserved_at_40[0xc0];
- struct mlx5_ifc_rqtc_bits rqt_context;
-};
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
-enum {
- MLX5_SQC_STATE_RST = 0x0,
- MLX5_SQC_STATE_RDY = 0x1,
- MLX5_SQC_STATE_ERR = 0x3,
-};
-
-struct mlx5_ifc_sqc_bits {
- u8 rlky[0x1];
- u8 cd_master[0x1];
- u8 fre[0x1];
- u8 flush_in_error_en[0x1];
- u8 allow_multi_pkt_send_wqe[0x1];
- u8 min_wqe_inline_mode[0x3];
- u8 state[0x4];
- u8 reg_umr[0x1];
- u8 allow_swp[0x1];
- u8 hairpin[0x1];
- u8 reserved_at_f[0x11];
- u8 reserved_at_20[0x8];
- u8 user_index[0x18];
- u8 reserved_at_40[0x8];
- u8 cqn[0x18];
- u8 reserved_at_60[0x8];
- u8 hairpin_peer_rq[0x18];
- u8 reserved_at_80[0x10];
- u8 hairpin_peer_vhca[0x10];
- u8 reserved_at_a0[0x50];
- u8 packet_pacing_rate_limit_index[0x10];
- u8 tis_lst_sz[0x10];
- u8 reserved_at_110[0x10];
- u8 reserved_at_120[0x40];
- u8 reserved_at_160[0x8];
- u8 tis_num_0[0x18];
- struct mlx5_ifc_wq_bits wq;
-};
-
-struct mlx5_ifc_query_sq_in_bits {
- u8 opcode[0x10];
- u8 reserved_at_10[0x10];
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
- u8 reserved_at_40[0x8];
- u8 sqn[0x18];
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_modify_sq_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
- u8 syndrome[0x20];
- u8 reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_modify_sq_in_bits {
- u8 opcode[0x10];
- u8 uid[0x10];
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
- u8 sq_state[0x4];
- u8 reserved_at_44[0x4];
- u8 sqn[0x18];
- u8 reserved_at_60[0x20];
- u8 modify_bitmask[0x40];
- u8 reserved_at_c0[0x40];
- struct mlx5_ifc_sqc_bits ctx;
-};
-
-struct mlx5_ifc_create_sq_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
- u8 syndrome[0x20];
- u8 reserved_at_40[0x8];
- u8 sqn[0x18];
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_sq_in_bits {
- u8 opcode[0x10];
- u8 uid[0x10];
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
- u8 reserved_at_40[0xc0];
- struct mlx5_ifc_sqc_bits ctx;
-};
-
-enum {
- MLX5_FLOW_METER_OBJ_MODIFY_FIELD_ACTIVE = (1ULL << 0),
- MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CBS = (1ULL << 1),
- MLX5_FLOW_METER_OBJ_MODIFY_FIELD_CIR = (1ULL << 2),
- MLX5_FLOW_METER_OBJ_MODIFY_FIELD_EBS = (1ULL << 3),
- MLX5_FLOW_METER_OBJ_MODIFY_FIELD_EIR = (1ULL << 4),
-};
-
-struct mlx5_ifc_flow_meter_parameters_bits {
- u8 valid[0x1]; // 00h
- u8 bucket_overflow[0x1];
- u8 start_color[0x2];
- u8 both_buckets_on_green[0x1];
- u8 meter_mode[0x2];
- u8 reserved_at_1[0x19];
- u8 reserved_at_2[0x20]; //04h
- u8 reserved_at_3[0x3];
- u8 cbs_exponent[0x5]; // 08h
- u8 cbs_mantissa[0x8];
- u8 reserved_at_4[0x3];
- u8 cir_exponent[0x5];
- u8 cir_mantissa[0x8];
- u8 reserved_at_5[0x20]; // 0Ch
- u8 reserved_at_6[0x3];
- u8 ebs_exponent[0x5]; // 10h
- u8 ebs_mantissa[0x8];
- u8 reserved_at_7[0x3];
- u8 eir_exponent[0x5];
- u8 eir_mantissa[0x8];
- u8 reserved_at_8[0x60]; // 14h-1Ch
-};
-
-/* CQE format mask. */
-#define MLX5E_CQE_FORMAT_MASK 0xc
-
-/* MPW opcode. */
-#define MLX5_OPC_MOD_MPW 0x01
-
-/* Compressed Rx CQE structure. */
-struct mlx5_mini_cqe8 {
- union {
- uint32_t rx_hash_result;
- struct {
- uint16_t checksum;
- uint16_t stride_idx;
- };
- struct {
- uint16_t wqe_counter;
- uint8_t s_wqe_opcode;
- uint8_t reserved;
- } s_wqe_info;
- };
- uint32_t byte_cnt;
-};
-
-/* srTCM PRM flow meter parameters. */
-enum {
- MLX5_FLOW_COLOR_RED = 0,
- MLX5_FLOW_COLOR_YELLOW,
- MLX5_FLOW_COLOR_GREEN,
- MLX5_FLOW_COLOR_UNDEFINED,
-};
-
-/* Maximum value of srTCM metering parameters. */
-#define MLX5_SRTCM_CBS_MAX (0xFF * (1ULL << 0x1F))
-#define MLX5_SRTCM_CIR_MAX (8 * (1ULL << 30) * 0xFF)
-#define MLX5_SRTCM_EBS_MAX 0
-
-/* The bits meter color use. */
-#define MLX5_MTR_COLOR_BITS 8
-
-/**
- * Convert a user mark to flow mark.
- *
- * @param val
- * Mark value to convert.
- *
- * @return
- * Converted mark value.
- */
-static inline uint32_t
-mlx5_flow_mark_set(uint32_t val)
-{
- uint32_t ret;
-
- /*
- * Add one to the user value to differentiate un-marked flows from
- * marked flows, if the ID is equal to MLX5_FLOW_MARK_DEFAULT it
- * remains untouched.
- */
- if (val != MLX5_FLOW_MARK_DEFAULT)
- ++val;
-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
- /*
- * Mark is 24 bits (minus reserved values) but is stored on a 32 bit
- * word, byte-swapped by the kernel on little-endian systems. In this
- * case, left-shifting the resulting big-endian value ensures the
- * least significant 24 bits are retained when converting it back.
- */
- ret = rte_cpu_to_be_32(val) >> 8;
-#else
- ret = val;
-#endif
- return ret;
-}
-
-/**
- * Convert a mark to user mark.
- *
- * @param val
- * Mark value to convert.
- *
- * @return
- * Converted mark value.
- */
-static inline uint32_t
-mlx5_flow_mark_get(uint32_t val)
-{
- /*
- * Subtract one from the retrieved value. It was added by
- * mlx5_flow_mark_set() to distinguish unmarked flows.
- */
-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
- return (val >> 8) - 1;
-#else
- return val - 1;
-#endif
-}
-
-#endif /* RTE_PMD_MLX5_PRM_H_ */
#include <rte_malloc.h>
#include <rte_ethdev_driver.h>
-#include "mlx5.h"
#include "mlx5_defs.h"
+#include "mlx5.h"
#include "mlx5_rxtx.h"
/**
#include <rte_debug.h>
#include <rte_io.h>
+#include <mlx5_glue.h>
+#include <mlx5_devx_cmds.h>
+
+#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
#include "mlx5_autoconf.h"
-#include "mlx5_defs.h"
-#include "mlx5_glue.h"
#include "mlx5_flow.h"
-#include "mlx5_devx_cmds.h"
+
/* Default RSS hash key also used for ConnectX-3. */
uint8_t rss_hash_default_key[] = {
#include <rte_cycles.h>
#include <rte_flow.h>
+#include <mlx5_devx_cmds.h>
+#include <mlx5_prm.h>
+
+#include "mlx5_defs.h"
#include "mlx5.h"
-#include "mlx5_devx_cmds.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
-#include "mlx5_defs.h"
-#include "mlx5_prm.h"
/* TX burst subroutines return codes. */
enum mlx5_txcmp_code {
#include <rte_bus_pci.h>
#include <rte_malloc.h>
+#include <mlx5_glue.h>
+#include <mlx5_prm.h>
+
+#include "mlx5_defs.h"
#include "mlx5_utils.h"
#include "mlx5.h"
#include "mlx5_mr.h"
#include "mlx5_autoconf.h"
-#include "mlx5_defs.h"
-#include "mlx5_prm.h"
-#include "mlx5_glue.h"
/* Support tunnel matching. */
#define MLX5_FLOW_TUNNEL 10
#include <rte_mempool.h>
#include <rte_prefetch.h>
+#include <mlx5_prm.h>
+
+#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
#include "mlx5_rxtx_vec.h"
#include "mlx5_autoconf.h"
-#include "mlx5_defs.h"
-#include "mlx5_prm.h"
#if defined RTE_ARCH_X86_64
#include "mlx5_rxtx_vec_sse.h"
#include <rte_common.h>
#include <rte_mbuf.h>
+#include <mlx5_prm.h>
+
#include "mlx5_autoconf.h"
-#include "mlx5_prm.h"
/* HW checksum offload capabilities of vectorized Tx. */
#define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
#include <rte_mempool.h>
#include <rte_prefetch.h>
+#include <mlx5_prm.h>
+
+#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
#include "mlx5_rxtx_vec.h"
#include "mlx5_autoconf.h"
-#include "mlx5_defs.h"
-#include "mlx5_prm.h"
#ifndef __INTEL_COMPILER
#pragma GCC diagnostic ignored "-Wcast-qual"
#include <rte_mempool.h>
#include <rte_prefetch.h>
+#include <mlx5_prm.h>
+
+#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
#include "mlx5_rxtx_vec.h"
#include "mlx5_autoconf.h"
-#include "mlx5_defs.h"
-#include "mlx5_prm.h"
#pragma GCC diagnostic ignored "-Wcast-qual"
#include <rte_mempool.h>
#include <rte_prefetch.h>
+#include <mlx5_prm.h>
+
+#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
#include "mlx5_rxtx_vec.h"
#include "mlx5_autoconf.h"
-#include "mlx5_defs.h"
-#include "mlx5_prm.h"
#ifndef __INTEL_COMPILER
#pragma GCC diagnostic ignored "-Wcast-qual"
#include <rte_common.h>
#include <rte_malloc.h>
+#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_rxtx.h"
-#include "mlx5_defs.h"
static const struct mlx5_counter_ctrl mlx5_counters_init[] = {
{
#include <rte_ethdev_driver.h>
#include <rte_common.h>
-#include "mlx5_utils.h"
+#include <mlx5_glue.h>
+#include <mlx5_devx_cmds.h>
+
#include "mlx5_defs.h"
+#include "mlx5_utils.h"
#include "mlx5.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
-#include "mlx5_glue.h"
-#include "mlx5_devx_cmds.h"
/**
* Allocate TX queue elements.
#include <assert.h>
#include <errno.h>
+#include <mlx5_common.h>
+
#include "mlx5_defs.h"
+
/*
* Compilation workaround for PPC64 when AltiVec is fully enabled, e.g. std=c11.
* Otherwise there would be a type conflict between stdbool and altivec.
/* Save and restore errno around argument evaluation. */
#define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0]))
-/*
- * Helper macros to work around __VA_ARGS__ limitations in a C99 compliant
- * manner.
- */
-#define PMD_DRV_LOG_STRIP(a, b) a
-#define PMD_DRV_LOG_OPAREN (
-#define PMD_DRV_LOG_CPAREN )
-#define PMD_DRV_LOG_COMMA ,
-
-/* Return the file name part of a path. */
-static inline const char *
-pmd_drv_log_basename(const char *s)
-{
- const char *n = s;
-
- while (*n)
- if (*(n++) == '/')
- s = n;
- return s;
-}
-
extern int mlx5_logtype;
-#define PMD_DRV_LOG___(level, ...) \
- rte_log(RTE_LOG_ ## level, \
- mlx5_logtype, \
- RTE_FMT(MLX5_DRIVER_NAME ": " \
- RTE_FMT_HEAD(__VA_ARGS__,), \
- RTE_FMT_TAIL(__VA_ARGS__,)))
-
-/*
- * When debugging is enabled (NDEBUG not defined), file, line and function
- * information replace the driver name (MLX5_DRIVER_NAME) in log messages.
- */
-#ifndef NDEBUG
-
-#define PMD_DRV_LOG__(level, ...) \
- PMD_DRV_LOG___(level, "%s:%u: %s(): " __VA_ARGS__)
-#define PMD_DRV_LOG_(level, s, ...) \
- PMD_DRV_LOG__(level, \
- s "\n" PMD_DRV_LOG_COMMA \
- pmd_drv_log_basename(__FILE__) PMD_DRV_LOG_COMMA \
- __LINE__ PMD_DRV_LOG_COMMA \
- __func__, \
- __VA_ARGS__)
-
-#else /* NDEBUG */
-#define PMD_DRV_LOG__(level, ...) \
- PMD_DRV_LOG___(level, __VA_ARGS__)
-#define PMD_DRV_LOG_(level, s, ...) \
- PMD_DRV_LOG__(level, s "\n", __VA_ARGS__)
-
-#endif /* NDEBUG */
-
/* Generic printf()-like logging macro with automatic line feed. */
#define DRV_LOG(level, ...) \
- PMD_DRV_LOG_(level, \
+ PMD_DRV_LOG_(level, mlx5_logtype, MLX5_DRIVER_NAME, \
__VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
PMD_DRV_LOG_CPAREN)
-/* claim_zero() does not perform any check when debugging is disabled. */
-#ifndef NDEBUG
-
-#define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__)
-#define claim_zero(...) assert((__VA_ARGS__) == 0)
-#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
-
-#else /* NDEBUG */
-
-#define DEBUG(...) (void)0
-#define claim_zero(...) (__VA_ARGS__)
-#define claim_nonzero(...) (__VA_ARGS__)
-
-#endif /* NDEBUG */
-
#define INFO(...) DRV_LOG(INFO, __VA_ARGS__)
#define WARN(...) DRV_LOG(WARNING, __VA_ARGS__)
#define ERROR(...) DRV_LOG(ERR, __VA_ARGS__)
(((val) & (from)) / ((from) / (to))) : \
(((val) & (from)) * ((to) / (from))))
-/* Allocate a buffer on the stack and fill it with a printf format string. */
-#define MKSTR(name, ...) \
- int mkstr_size_##name = snprintf(NULL, 0, "" __VA_ARGS__); \
- char name[mkstr_size_##name + 1]; \
- \
- snprintf(name, sizeof(name), "" __VA_ARGS__)
-
/**
* Return logarithm of the nearest power of two above input value.
*
#include <rte_ethdev_driver.h>
#include <rte_common.h>
+#include <mlx5_glue.h>
+#include <mlx5_devx_cmds.h>
+
#include "mlx5.h"
#include "mlx5_autoconf.h"
-#include "mlx5_glue.h"
-#include "mlx5_devx_cmds.h"
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
_LDLIBS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += -lrte_pmd_lio
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_MEMIF) += -lrte_pmd_memif
_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += -lrte_pmd_mlx4
+_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += -lrte_common_mlx5
_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += -lrte_pmd_mlx5
ifeq ($(CONFIG_RTE_IBVERBS_LINK_DLOPEN),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += -ldl