#include <mlx5_glue.h>
#include <mlx5_devx_cmds.h>
#include <mlx5_common.h>
+#include <mlx5_common_os.h>
#include <mlx5_common_mp.h>
#include "mlx5_defs.h"
*/
#define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
+/*
+ * Device parameter to enable Tx scheduling on timestamps
+ * and specify the packet pacing granularity in nanoseconds.
+ */
+#define MLX5_TX_PP "tx_pp"
+
+/*
+ * Device parameter to specify skew in nanoseconds on Tx datapath,
+ * it represents the time between SQ start WQE processing and
+ * appearing actual packet data on the wire.
+ */
+#define MLX5_TX_SKEW "tx_skew"
+
/*
* Device parameter to enable hardware Tx vector.
* Deprecated, ignored (no vectorized Tx routines anymore).
/* Enable extensive flow metadata support. */
#define MLX5_DV_XMETA_EN "dv_xmeta_en"
+/* Device parameter to let the user manage the lacp traffic of bonded device */
+#define MLX5_LACP_BY_USER "lacp_by_user"
+
/* Activate Netlink support in VF mode. */
#define MLX5_VF_NL_EN "vf_nl_en"
/* Flow memory reclaim mode. */
#define MLX5_RECLAIM_MEM "reclaim_mem_mode"
+/* The default memory allocator used in PMD. */
+#define MLX5_SYS_MEM_EN "sys_mem_en"
+
static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
/* Shared memory between primary and secondary processes. */
/* Process local data for secondary processes. */
static struct mlx5_local_data mlx5_local_data;
-/** Driver-specific log messages type. */
-int mlx5_logtype;
-static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();
-static pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
+static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list =
+ LIST_HEAD_INITIALIZER();
+static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER;
-static struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
+static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
{
.size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
.type = "mlx5_hrxq_ipool",
},
{
- .size = sizeof(struct mlx5_flow_handle),
+ /*
+ * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows.
+ * It set in run time according to PCI function configuration.
+ */
+ .size = 0,
.trunk_size = 64,
.grow_trunk = 3,
.grow_shift = 2,
memset(&sh->cmng, 0, sizeof(sh->cmng));
TAILQ_INIT(&sh->cmng.flow_counters);
for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) {
+ sh->cmng.ccont[i].min_id = MLX5_CNT_BATCH_OFFSET;
+ sh->cmng.ccont[i].max_id = -1;
+ sh->cmng.ccont[i].last_pool_idx = POOL_IDX_INVALID;
TAILQ_INIT(&sh->cmng.ccont[i].pool_list);
rte_spinlock_init(&sh->cmng.ccont[i].resize_sl);
+ TAILQ_INIT(&sh->cmng.ccont[i].counters);
+ rte_spinlock_init(&sh->cmng.ccont[i].csl);
}
}
*/
static void
mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh,
- const struct mlx5_dev_config *config __rte_unused)
+ const struct mlx5_dev_config *config)
{
uint8_t i;
+ struct mlx5_indexed_pool_config cfg;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- /*
- * While DV is supported, user chooses the verbs mode,
- * the mlx5 flow handle size is different with the
- * MLX5_FLOW_HANDLE_VERBS_SIZE.
- */
- if (!config->dv_flow_en)
- mlx5_ipool_cfg[MLX5_IPOOL_MLX5_FLOW].size =
- MLX5_FLOW_HANDLE_VERBS_SIZE;
-#endif
for (i = 0; i < MLX5_IPOOL_MAX; ++i) {
+ cfg = mlx5_ipool_cfg[i];
+ switch (i) {
+ default:
+ break;
+ /*
+ * Set MLX5_IPOOL_MLX5_FLOW ipool size
+ * according to PCI function flow configuration.
+ */
+ case MLX5_IPOOL_MLX5_FLOW:
+ cfg.size = config->dv_flow_en ?
+ sizeof(struct mlx5_flow_handle) :
+ MLX5_FLOW_HANDLE_VERBS_SIZE;
+ break;
+ }
if (config->reclaim_mode)
- mlx5_ipool_cfg[i].release_mem_en = 1;
- sh->ipool[i] = mlx5_ipool_create(&mlx5_ipool_cfg[i]);
+ cfg.release_mem_en = 1;
+ sh->ipool[i] = mlx5_ipool_create(&cfg);
}
}
mlx5_ipool_destroy(sh->ipool[i]);
}
+/*
+ * Check if dynamic flex parser for eCPRI already exists.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * true on exists, false on not.
+ */
+bool
+mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flex_parser_profiles *prf =
+ &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+
+ return !!prf->obj;
+}
+
+/*
+ * Allocation of a flex parser for eCPRI. Once created, this parser related
+ * resources will be held until the device is closed.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flex_parser_profiles *prf =
+ &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+ struct mlx5_devx_graph_node_attr node = {
+ .modify_field_select = 0,
+ };
+ uint32_t ids[8];
+ int ret;
+
+ if (!priv->config.hca_attr.parse_graph_flex_node) {
+ DRV_LOG(ERR, "Dynamic flex parser is not supported "
+ "for device %s.", priv->dev_data->name);
+ return -ENOTSUP;
+ }
+ node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
+ /* 8 bytes now: 4B common header + 4B message body header. */
+ node.header_length_base_value = 0x8;
+ /* After MAC layer: Ether / VLAN. */
+ node.in[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_MAC;
+ /* Type of compared condition should be 0xAEFE in the L2 layer. */
+ node.in[0].compare_condition_value = RTE_ETHER_TYPE_ECPRI;
+ /* Sample #0: type in common header. */
+ node.sample[0].flow_match_sample_en = 1;
+ /* Fixed offset. */
+ node.sample[0].flow_match_sample_offset_mode = 0x0;
+ /* Only the 2nd byte will be used. */
+ node.sample[0].flow_match_sample_field_base_offset = 0x0;
+ /* Sample #1: message payload. */
+ node.sample[1].flow_match_sample_en = 1;
+ /* Fixed offset. */
+ node.sample[1].flow_match_sample_offset_mode = 0x0;
+ /*
+ * Only the first two bytes will be used right now, and its offset will
+ * start after the common header that with the length of a DW(u32).
+ */
+ node.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t);
+ prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->ctx, &node);
+ if (!prf->obj) {
+ DRV_LOG(ERR, "Failed to create flex parser node object.");
+ return (rte_errno == 0) ? -ENODEV : -rte_errno;
+ }
+ prf->num = 2;
+ ret = mlx5_devx_cmd_query_parse_samples(prf->obj, ids, prf->num);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to query sample IDs.");
+ return (rte_errno == 0) ? -ENODEV : -rte_errno;
+ }
+ prf->offset[0] = 0x0;
+ prf->offset[1] = sizeof(uint32_t);
+ prf->ids[0] = ids[0];
+ prf->ids[1] = ids[1];
+ return 0;
+}
+
+/*
+ * Destroy the flex parser node, including the parser itself, input / output
+ * arcs and DW samples. Resources could be reused then.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flex_parser_profiles *prf =
+ &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0];
+
+ if (prf->obj)
+ mlx5_devx_cmd_destroy(prf->obj);
+ prf->obj = NULL;
+}
+
/**
- * Allocate shared IB device context. If there is multiport device the
+ * Allocate shared device context. If there is multiport device the
* master and representors will share this context, if there is single
- * port dedicated IB device, the context will be used by only given
+ * port dedicated device, the context will be used by only given
* port due to unification.
*
- * Routine first searches the context for the specified IB device name,
+ * Routine first searches the context for the specified device name,
* if found the shared context assumed and reference counter is incremented.
* If no context found the new one is created and initialized with specified
- * IB device context and parameters.
+ * device context and parameters.
*
* @param[in] spawn
- * Pointer to the IB device attributes (name, port, etc).
+ * Pointer to the device attributes (name, port, etc).
* @param[in] config
* Pointer to device configuration structure.
*
* otherwise NULL and rte_errno is set.
*/
struct mlx5_dev_ctx_shared *
-mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
- const struct mlx5_dev_config *config)
+mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
+ const struct mlx5_dev_config *config)
{
struct mlx5_dev_ctx_shared *sh;
int err = 0;
MLX5_ASSERT(spawn);
/* Secondary process should not create the shared context. */
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
- pthread_mutex_lock(&mlx5_ibv_list_mutex);
+ pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
/* Search for IB context by device name. */
- LIST_FOREACH(sh, &mlx5_ibv_list, next) {
- if (!strcmp(sh->ibdev_name, spawn->ibv_dev->name)) {
+ LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) {
+ if (!strcmp(sh->ibdev_name,
+ mlx5_os_get_dev_device_name(spawn->phys_dev))) {
sh->refcnt++;
goto exit;
}
sh = rte_zmalloc("ethdev shared ib context",
sizeof(struct mlx5_dev_ctx_shared) +
spawn->max_port *
- sizeof(struct mlx5_ibv_shared_port),
+ sizeof(struct mlx5_dev_shared_port),
RTE_CACHE_LINE_SIZE);
if (!sh) {
DRV_LOG(ERR, "shared context allocation failure");
err = ENOMEM;
goto error;
}
+ sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->ctx, 0);
+ if (!sh->tx_uar) {
+ DRV_LOG(ERR, "Failed to allocate DevX UAR.");
+ err = ENOMEM;
+ goto error;
+ }
}
sh->flow_id_pool = mlx5_flow_id_pool_alloc
((1 << HAIRPIN_FLOW_ID_BITS) - 1);
err = ENOMEM;
goto error;
}
+#ifndef RTE_ARCH_64
+ /* Initialize UAR access locks for 32bit implementations. */
+ rte_spinlock_init(&sh->uar_lock_cq);
+ for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
+ rte_spinlock_init(&sh->uar_lock[i]);
+#endif
/*
* Once the device is added to the list of memory event
* callback, its global MR cache table cannot be expanded
err = rte_errno;
goto error;
}
+ mlx5_os_set_reg_mr_cb(&sh->share_cache.reg_mr_cb,
+ &sh->share_cache.dereg_mr_cb);
mlx5_os_dev_shared_handler_install(sh);
+ sh->cnt_id_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_DWORD);
+ if (!sh->cnt_id_tbl) {
+ err = rte_errno;
+ goto error;
+ }
mlx5_flow_aging_init(sh);
mlx5_flow_counters_mng_init(sh);
mlx5_flow_ipool_create(sh, config);
sh, mem_event_cb);
rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
/* Add context to the global device list. */
- LIST_INSERT_HEAD(&mlx5_ibv_list, sh, next);
+ LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
exit:
- pthread_mutex_unlock(&mlx5_ibv_list_mutex);
+ pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
return sh;
error:
- pthread_mutex_unlock(&mlx5_ibv_list_mutex);
+ pthread_mutex_destroy(&sh->txpp.mutex);
+ pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
MLX5_ASSERT(sh);
+ if (sh->cnt_id_tbl) {
+ mlx5_l3t_destroy(sh->cnt_id_tbl);
+ sh->cnt_id_tbl = NULL;
+ }
+ if (sh->tx_uar) {
+ mlx5_glue->devx_free_uar(sh->tx_uar);
+ sh->tx_uar = NULL;
+ }
if (sh->tis)
claim_zero(mlx5_devx_cmd_destroy(sh->tis));
if (sh->td)
* Pointer to mlx5_dev_ctx_shared object to free
*/
void
-mlx5_free_shared_ibctx(struct mlx5_dev_ctx_shared *sh)
+mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
{
- pthread_mutex_lock(&mlx5_ibv_list_mutex);
+ pthread_mutex_lock(&mlx5_dev_ctx_list_mutex);
#ifdef RTE_LIBRTE_MLX5_DEBUG
/* Check the object presence in the list. */
struct mlx5_dev_ctx_shared *lctx;
- LIST_FOREACH(lctx, &mlx5_ibv_list, next)
+ LIST_FOREACH(lctx, &mlx5_dev_ctx_list, next)
if (lctx == sh)
break;
MLX5_ASSERT(lctx);
mlx5_flow_counters_mng_close(sh);
mlx5_flow_ipool_destroy(sh);
mlx5_os_dev_shared_handler_uninstall(sh);
+ if (sh->cnt_id_tbl) {
+ mlx5_l3t_destroy(sh->cnt_id_tbl);
+ sh->cnt_id_tbl = NULL;
+ }
+ if (sh->tx_uar) {
+ mlx5_glue->devx_free_uar(sh->tx_uar);
+ sh->tx_uar = NULL;
+ }
if (sh->pd)
claim_zero(mlx5_glue->dealloc_pd(sh->pd));
if (sh->tis)
claim_zero(mlx5_glue->close_device(sh->ctx));
if (sh->flow_id_pool)
mlx5_flow_id_pool_release(sh->flow_id_pool);
+ pthread_mutex_destroy(&sh->txpp.mutex);
rte_free(sh);
exit:
- pthread_mutex_unlock(&mlx5_ibv_list_mutex);
+ pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
}
/**
snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE);
if (!sh->flow_tbls) {
- DRV_LOG(ERR, "flow tables with hash creation failed.\n");
+ DRV_LOG(ERR, "flow tables with hash creation failed.");
err = ENOMEM;
return err;
}
rte_wmb();
/* Disable datapath on secondary process. */
mlx5_mp_req_stop_rxtx(dev);
+ /* Free the eCPRI flex parser resource. */
+ mlx5_flex_parser_ecpri_release(dev);
if (priv->rxqs != NULL) {
/* XXX race condition if mlx5_rx_burst() is still running. */
usleep(1000);
* mlx5_nl_mac_addr_flush() uses ibdev_path for retrieveing
* ifindex if Netlink fails.
*/
- mlx5_free_shared_ibctx(priv->sh);
+ mlx5_free_shared_dev_ctx(priv->sh);
if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
unsigned int c = 0;
uint16_t port_id;
dev->data->mac_addrs = NULL;
}
-const struct eth_dev_ops mlx5_dev_ops = {
- .dev_configure = mlx5_dev_configure,
- .dev_start = mlx5_dev_start,
- .dev_stop = mlx5_dev_stop,
- .dev_set_link_down = mlx5_set_link_down,
- .dev_set_link_up = mlx5_set_link_up,
- .dev_close = mlx5_dev_close,
- .promiscuous_enable = mlx5_promiscuous_enable,
- .promiscuous_disable = mlx5_promiscuous_disable,
- .allmulticast_enable = mlx5_allmulticast_enable,
- .allmulticast_disable = mlx5_allmulticast_disable,
- .link_update = mlx5_link_update,
- .stats_get = mlx5_stats_get,
- .stats_reset = mlx5_stats_reset,
- .xstats_get = mlx5_xstats_get,
- .xstats_reset = mlx5_xstats_reset,
- .xstats_get_names = mlx5_xstats_get_names,
- .fw_version_get = mlx5_fw_version_get,
- .dev_infos_get = mlx5_dev_infos_get,
- .read_clock = mlx5_read_clock,
- .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
- .vlan_filter_set = mlx5_vlan_filter_set,
- .rx_queue_setup = mlx5_rx_queue_setup,
- .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
- .tx_queue_setup = mlx5_tx_queue_setup,
- .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
- .rx_queue_release = mlx5_rx_queue_release,
- .tx_queue_release = mlx5_tx_queue_release,
- .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
- .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
- .mac_addr_remove = mlx5_mac_addr_remove,
- .mac_addr_add = mlx5_mac_addr_add,
- .mac_addr_set = mlx5_mac_addr_set,
- .set_mc_addr_list = mlx5_set_mc_addr_list,
- .mtu_set = mlx5_dev_set_mtu,
- .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
- .vlan_offload_set = mlx5_vlan_offload_set,
- .reta_update = mlx5_dev_rss_reta_update,
- .reta_query = mlx5_dev_rss_reta_query,
- .rss_hash_update = mlx5_rss_hash_update,
- .rss_hash_conf_get = mlx5_rss_hash_conf_get,
- .filter_ctrl = mlx5_dev_filter_ctrl,
- .rx_descriptor_status = mlx5_rx_descriptor_status,
- .tx_descriptor_status = mlx5_tx_descriptor_status,
- .rxq_info_get = mlx5_rxq_info_get,
- .txq_info_get = mlx5_txq_info_get,
- .rx_burst_mode_get = mlx5_rx_burst_mode_get,
- .tx_burst_mode_get = mlx5_tx_burst_mode_get,
- .rx_queue_count = mlx5_rx_queue_count,
- .rx_queue_intr_enable = mlx5_rx_intr_enable,
- .rx_queue_intr_disable = mlx5_rx_intr_disable,
- .is_removed = mlx5_is_removed,
- .udp_tunnel_port_add = mlx5_udp_tunnel_port_add,
- .get_module_info = mlx5_get_module_info,
- .get_module_eeprom = mlx5_get_module_eeprom,
- .hairpin_cap_get = mlx5_hairpin_cap_get,
- .mtr_ops_get = mlx5_flow_meter_ops_get,
-};
-
-/* Available operations from secondary process. */
-const struct eth_dev_ops mlx5_dev_sec_ops = {
- .stats_get = mlx5_stats_get,
- .stats_reset = mlx5_stats_reset,
- .xstats_get = mlx5_xstats_get,
- .xstats_reset = mlx5_xstats_reset,
- .xstats_get_names = mlx5_xstats_get_names,
- .fw_version_get = mlx5_fw_version_get,
- .dev_infos_get = mlx5_dev_infos_get,
- .rx_descriptor_status = mlx5_rx_descriptor_status,
- .tx_descriptor_status = mlx5_tx_descriptor_status,
- .rxq_info_get = mlx5_rxq_info_get,
- .txq_info_get = mlx5_txq_info_get,
- .rx_burst_mode_get = mlx5_rx_burst_mode_get,
- .tx_burst_mode_get = mlx5_tx_burst_mode_get,
- .get_module_info = mlx5_get_module_info,
- .get_module_eeprom = mlx5_get_module_eeprom,
-};
-
-/* Available operations in flow isolated mode. */
-const struct eth_dev_ops mlx5_dev_ops_isolate = {
- .dev_configure = mlx5_dev_configure,
- .dev_start = mlx5_dev_start,
- .dev_stop = mlx5_dev_stop,
- .dev_set_link_down = mlx5_set_link_down,
- .dev_set_link_up = mlx5_set_link_up,
- .dev_close = mlx5_dev_close,
- .promiscuous_enable = mlx5_promiscuous_enable,
- .promiscuous_disable = mlx5_promiscuous_disable,
- .allmulticast_enable = mlx5_allmulticast_enable,
- .allmulticast_disable = mlx5_allmulticast_disable,
- .link_update = mlx5_link_update,
- .stats_get = mlx5_stats_get,
- .stats_reset = mlx5_stats_reset,
- .xstats_get = mlx5_xstats_get,
- .xstats_reset = mlx5_xstats_reset,
- .xstats_get_names = mlx5_xstats_get_names,
- .fw_version_get = mlx5_fw_version_get,
- .dev_infos_get = mlx5_dev_infos_get,
- .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
- .vlan_filter_set = mlx5_vlan_filter_set,
- .rx_queue_setup = mlx5_rx_queue_setup,
- .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup,
- .tx_queue_setup = mlx5_tx_queue_setup,
- .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup,
- .rx_queue_release = mlx5_rx_queue_release,
- .tx_queue_release = mlx5_tx_queue_release,
- .flow_ctrl_get = mlx5_dev_get_flow_ctrl,
- .flow_ctrl_set = mlx5_dev_set_flow_ctrl,
- .mac_addr_remove = mlx5_mac_addr_remove,
- .mac_addr_add = mlx5_mac_addr_add,
- .mac_addr_set = mlx5_mac_addr_set,
- .set_mc_addr_list = mlx5_set_mc_addr_list,
- .mtu_set = mlx5_dev_set_mtu,
- .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
- .vlan_offload_set = mlx5_vlan_offload_set,
- .filter_ctrl = mlx5_dev_filter_ctrl,
- .rx_descriptor_status = mlx5_rx_descriptor_status,
- .tx_descriptor_status = mlx5_tx_descriptor_status,
- .rxq_info_get = mlx5_rxq_info_get,
- .txq_info_get = mlx5_txq_info_get,
- .rx_burst_mode_get = mlx5_rx_burst_mode_get,
- .tx_burst_mode_get = mlx5_tx_burst_mode_get,
- .rx_queue_intr_enable = mlx5_rx_intr_enable,
- .rx_queue_intr_disable = mlx5_rx_intr_disable,
- .is_removed = mlx5_is_removed,
- .get_module_info = mlx5_get_module_info,
- .get_module_eeprom = mlx5_get_module_eeprom,
- .hairpin_cap_get = mlx5_hairpin_cap_get,
- .mtr_ops_get = mlx5_flow_meter_ops_get,
-};
-
/**
* Verify and store value for device argument.
*
mlx5_args_check(const char *key, const char *val, void *opaque)
{
struct mlx5_dev_config *config = opaque;
- unsigned long tmp;
+ unsigned long mod;
+ signed long tmp;
/* No-op, port representors are processed in mlx5_dev_spawn(). */
if (!strcmp(MLX5_REPRESENTOR, key))
return 0;
errno = 0;
- tmp = strtoul(val, NULL, 0);
+ tmp = strtol(val, NULL, 0);
if (errno) {
rte_errno = errno;
DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
return -rte_errno;
}
+ if (tmp < 0 && strcmp(MLX5_TX_PP, key) && strcmp(MLX5_TX_SKEW, key)) {
+ /* Negative values are acceptable for some keys only. */
+ rte_errno = EINVAL;
+ DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val);
+ return -rte_errno;
+ }
+ mod = tmp >= 0 ? tmp : -tmp;
if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
config->cqe_comp = !!tmp;
} else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) {
config->txq_inline_mpw = tmp;
} else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key);
+ } else if (strcmp(MLX5_TX_PP, key) == 0) {
+ if (!mod) {
+ DRV_LOG(ERR, "Zero Tx packet pacing parameter");
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ config->tx_pp = tmp;
+ } else if (strcmp(MLX5_TX_SKEW, key) == 0) {
+ config->tx_skew = tmp;
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
config->rx_vec_en = !!tmp;
} else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
return -rte_errno;
}
config->dv_xmeta_en = tmp;
+ } else if (strcmp(MLX5_LACP_BY_USER, key) == 0) {
+ config->lacp_by_user = !!tmp;
} else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) {
config->mr_ext_memseg_en = !!tmp;
} else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) {
return -rte_errno;
}
config->reclaim_mode = tmp;
+ } else if (strcmp(MLX5_SYS_MEM_EN, key) == 0) {
+ config->sys_mem_en = !!tmp;
} else {
DRV_LOG(WARNING, "%s: unknown parameter", key);
rte_errno = EINVAL;
MLX5_TXQ_MPW_HDR_DSEG_EN,
MLX5_TXQ_MAX_INLINE_LEN,
MLX5_TX_DB_NC,
+ MLX5_TX_PP,
+ MLX5_TX_SKEW,
MLX5_TX_VEC_EN,
MLX5_RX_VEC_EN,
MLX5_L3_VXLAN_EN,
MLX5_DV_ESW_EN,
MLX5_DV_FLOW_EN,
MLX5_DV_XMETA_EN,
+ MLX5_LACP_BY_USER,
MLX5_MR_EXT_MEMSEG_EN,
MLX5_REPRESENTOR,
MLX5_MAX_DUMP_FILES_NUM,
MLX5_CLASS_ARG_NAME,
MLX5_HP_BUF_SIZE,
MLX5_RECLAIM_MEM,
+ MLX5_SYS_MEM_EN,
NULL,
};
struct rte_kvargs *kvlist;
DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask);
}
-/**
- * Allocate page of door-bells and register it using DevX API.
- *
- * @param [in] dev
- * Pointer to Ethernet device.
- *
- * @return
- * Pointer to new page on success, NULL otherwise.
- */
-static struct mlx5_devx_dbr_page *
-mlx5_alloc_dbr_page(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_devx_dbr_page *page;
-
- /* Allocate space for door-bell page and management data. */
- page = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_devx_dbr_page),
- RTE_CACHE_LINE_SIZE, dev->device->numa_node);
- if (!page) {
- DRV_LOG(ERR, "port %u cannot allocate dbr page",
- dev->data->port_id);
- return NULL;
- }
- /* Register allocated memory. */
- page->umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, page->dbrs,
- MLX5_DBR_PAGE_SIZE, 0);
- if (!page->umem) {
- DRV_LOG(ERR, "port %u cannot umem reg dbr page",
- dev->data->port_id);
- rte_free(page);
- return NULL;
- }
- return page;
-}
-
-/**
- * Find the next available door-bell, allocate new page if needed.
- *
- * @param [in] dev
- * Pointer to Ethernet device.
- * @param [out] dbr_page
- * Door-bell page containing the page data.
- *
- * @return
- * Door-bell address offset on success, a negative error value otherwise.
- */
-int64_t
-mlx5_get_dbr(struct rte_eth_dev *dev, struct mlx5_devx_dbr_page **dbr_page)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_devx_dbr_page *page = NULL;
- uint32_t i, j;
-
- LIST_FOREACH(page, &priv->dbrpgs, next)
- if (page->dbr_count < MLX5_DBR_PER_PAGE)
- break;
- if (!page) { /* No page with free door-bell exists. */
- page = mlx5_alloc_dbr_page(dev);
- if (!page) /* Failed to allocate new page. */
- return (-1);
- LIST_INSERT_HEAD(&priv->dbrpgs, page, next);
- }
- /* Loop to find bitmap part with clear bit. */
- for (i = 0;
- i < MLX5_DBR_BITMAP_SIZE && page->dbr_bitmap[i] == UINT64_MAX;
- i++)
- ; /* Empty. */
- /* Find the first clear bit. */
- MLX5_ASSERT(i < MLX5_DBR_BITMAP_SIZE);
- j = rte_bsf64(~page->dbr_bitmap[i]);
- page->dbr_bitmap[i] |= (UINT64_C(1) << j);
- page->dbr_count++;
- *dbr_page = page;
- return (((i * 64) + j) * sizeof(uint64_t));
-}
-
-/**
- * Release a door-bell record.
- *
- * @param [in] dev
- * Pointer to Ethernet device.
- * @param [in] umem_id
- * UMEM ID of page containing the door-bell record to release.
- * @param [in] offset
- * Offset of door-bell record in page.
- *
- * @return
- * 0 on success, a negative error value otherwise.
- */
-int32_t
-mlx5_release_dbr(struct rte_eth_dev *dev, uint32_t umem_id, uint64_t offset)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_devx_dbr_page *page = NULL;
- int ret = 0;
-
- LIST_FOREACH(page, &priv->dbrpgs, next)
- /* Find the page this address belongs to. */
- if (mlx5_os_get_umem_id(page->umem) == umem_id)
- break;
- if (!page)
- return -EINVAL;
- page->dbr_count--;
- if (!page->dbr_count) {
- /* Page not used, free it and remove from list. */
- LIST_REMOVE(page, next);
- if (page->umem)
- ret = -mlx5_glue->devx_umem_dereg(page->umem);
- rte_free(page);
- } else {
- /* Mark in bitmap that this door-bell is not in use. */
- offset /= MLX5_DBR_SIZE;
- int i = offset / 64;
- int j = offset % 64;
-
- page->dbr_bitmap[i] &= ~(UINT64_C(1) << j);
- }
- return ret;
-}
-
int
rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n)
{
static const char *const dynf_names[] = {
RTE_PMD_MLX5_FINE_GRANULARITY_INLINE,
- RTE_MBUF_DYNFLAG_METADATA_NAME
+ RTE_MBUF_DYNFLAG_METADATA_NAME,
+ RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME
};
unsigned int i;
RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF)
},
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX6LX)
+ },
{
.vendor_id = 0
}
.remove = mlx5_pci_remove,
.dma_map = mlx5_dma_map,
.dma_unmap = mlx5_dma_unmap,
- .drv_flags = RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_INTR_RMV |
- RTE_PCI_DRV_PROBE_AGAIN,
+ .drv_flags = PCI_DRV_FLAGS,
};
+/* Initialize driver log type. */
+RTE_LOG_REGISTER(mlx5_logtype, pmd.net.mlx5, NOTICE)
+
/**
* Driver initialization routine.
*/
RTE_INIT(rte_mlx5_pmd_init)
{
- /* Initialize driver log type. */
- mlx5_logtype = rte_log_register("pmd.net.mlx5");
- if (mlx5_logtype >= 0)
- rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);
-
/* Build the static tables for Verbs conversion. */
mlx5_set_ptype_table();
mlx5_set_cksum_table();