device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr;
device_attr->max_sge = attr_ex.orig_attr.max_sge;
device_attr->max_cq = attr_ex.orig_attr.max_cq;
+ device_attr->max_cqe = attr_ex.orig_attr.max_cqe;
+ device_attr->max_mr = attr_ex.orig_attr.max_mr;
+ device_attr->max_pd = attr_ex.orig_attr.max_pd;
device_attr->max_qp = attr_ex.orig_attr.max_qp;
+ device_attr->max_srq = attr_ex.orig_attr.max_srq;
+ device_attr->max_srq_wr = attr_ex.orig_attr.max_srq_wr;
device_attr->raw_packet_caps = attr_ex.raw_packet_caps;
device_attr->max_rwq_indirection_table_size =
attr_ex.rss_caps.max_rwq_indirection_table_size;
static void *
mlx5_alloc_verbs_buf(size_t size, void *data)
{
- struct mlx5_priv *priv = data;
+ struct mlx5_dev_ctx_shared *sh = data;
void *ret;
- unsigned int socket = SOCKET_ID_ANY;
size_t alignment = rte_mem_page_size();
if (alignment == (size_t)-1) {
DRV_LOG(ERR, "Failed to get mem page size");
return NULL;
}
- if (priv->verbs_alloc_ctx.type == MLX5_VERBS_ALLOC_TYPE_TX_QUEUE) {
- const struct mlx5_txq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
-
- socket = ctrl->socket;
- } else if (priv->verbs_alloc_ctx.type ==
- MLX5_VERBS_ALLOC_TYPE_RX_QUEUE) {
- const struct mlx5_rxq_ctrl *ctrl = priv->verbs_alloc_ctx.obj;
-
- socket = ctrl->socket;
- }
MLX5_ASSERT(data != NULL);
- ret = mlx5_malloc(0, size, alignment, socket);
+ ret = mlx5_malloc(0, size, alignment, sh->numa_node);
if (!ret && size)
rte_errno = ENOMEM;
return ret;
goto error;
/* The resources below are only valid with DV support. */
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ /* Init port id action cache list. */
+ snprintf(s, sizeof(s), "%s_port_id_action_cache", sh->ibdev_name);
+ mlx5_cache_list_init(&sh->port_id_action_list, s, 0, sh,
+ flow_dv_port_id_create_cb,
+ flow_dv_port_id_match_cb,
+ flow_dv_port_id_remove_cb);
+ /* Init push vlan action cache list. */
+ snprintf(s, sizeof(s), "%s_push_vlan_action_cache", sh->ibdev_name);
+ mlx5_cache_list_init(&sh->push_vlan_action_list, s, 0, sh,
+ flow_dv_push_vlan_create_cb,
+ flow_dv_push_vlan_match_cb,
+ flow_dv_push_vlan_remove_cb);
+ /* Init sample action cache list. */
+ snprintf(s, sizeof(s), "%s_sample_action_cache", sh->ibdev_name);
+ mlx5_cache_list_init(&sh->sample_action_list, s, 0, sh,
+ flow_dv_sample_create_cb,
+ flow_dv_sample_match_cb,
+ flow_dv_sample_remove_cb);
+ /* Init dest array action cache list. */
+ snprintf(s, sizeof(s), "%s_dest_array_cache", sh->ibdev_name);
+ mlx5_cache_list_init(&sh->dest_array_list, s, 0, sh,
+ flow_dv_dest_array_create_cb,
+ flow_dv_dest_array_match_cb,
+ flow_dv_dest_array_remove_cb);
/* Create tags hash list table. */
snprintf(s, sizeof(s), "%s_tags", sh->ibdev_name);
sh->tag_table = mlx5_hlist_create(s, MLX5_TAGS_HLIST_ARRAY_SIZE, 0,
MLX5_HLIST_WRITE_MOST,
- flow_dv_tag_create_cb, NULL,
+ flow_dv_tag_create_cb,
+ flow_dv_tag_match_cb,
flow_dv_tag_remove_cb);
if (!sh->tag_table) {
DRV_LOG(ERR, "tags with hash creation failed.");
snprintf(s, sizeof(s), "%s_encaps_decaps", sh->ibdev_name);
sh->encaps_decaps = mlx5_hlist_create(s,
MLX5_FLOW_ENCAP_DECAP_HTABLE_SZ,
- 0, 0, NULL, NULL, NULL);
+ 0, MLX5_HLIST_DIRECT_KEY |
+ MLX5_HLIST_WRITE_MOST,
+ flow_dv_encap_decap_create_cb,
+ flow_dv_encap_decap_match_cb,
+ flow_dv_encap_decap_remove_cb);
if (!sh->encaps_decaps) {
DRV_LOG(ERR, "encap decap hash creation failed");
err = ENOMEM;
goto error;
}
+ sh->encaps_decaps->ctx = sh;
#endif
#ifdef HAVE_MLX5DV_DR
void *domain;
err = errno;
goto error;
}
- pthread_mutex_init(&sh->dv_mutex, NULL);
sh->tx_domain = domain;
#ifdef HAVE_MLX5DV_DR_ESWITCH
if (priv->config.dv_esw_en) {
mlx5_glue->destroy_flow_action(sh->pop_vlan_action);
sh->pop_vlan_action = NULL;
}
- pthread_mutex_destroy(&sh->dv_mutex);
#endif /* HAVE_MLX5DV_DR */
if (sh->default_miss_action)
mlx5_glue->destroy_flow_action
mlx5_release_tunnel_hub(sh, priv->dev_port);
sh->tunnel_hub = NULL;
}
+ mlx5_cache_list_destroy(&sh->port_id_action_list);
+ mlx5_cache_list_destroy(&sh->push_vlan_action_list);
mlx5_free_table_hash_list(priv);
}
rte_errno = ENOMEM;
return NULL;
}
- eth_dev->device = dpdk_dev;
+ priv = eth_dev->data->dev_private;
+ if (priv->sh->bond_dev != UINT16_MAX)
+ /* For bonding port, use primary PCI device. */
+ eth_dev->device =
+ rte_eth_devices[priv->sh->bond_dev].device;
+ else
+ eth_dev->device = dpdk_dev;
eth_dev->dev_ops = &mlx5_os_dev_sec_ops;
eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status;
eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status;
goto error;
}
/* Check relax ordering support. */
- if (config->hca_attr.relaxed_ordering_write &&
- config->hca_attr.relaxed_ordering_read &&
- !haswell_broadwell_cpu)
- sh->cmng.relaxed_ordering = 1;
+ if (!haswell_broadwell_cpu) {
+ sh->cmng.relaxed_ordering_write =
+ config->hca_attr.relaxed_ordering_write;
+ sh->cmng.relaxed_ordering_read =
+ config->hca_attr.relaxed_ordering_read;
+ } else {
+ sh->cmng.relaxed_ordering_read = 0;
+ sh->cmng.relaxed_ordering_write = 0;
+ }
/* Check for LRO support. */
if (config->dest_tir && config->hca_attr.lro_cap &&
config->dv_flow_en) {
DRV_LOG(WARNING, "No available register for"
" meter.");
} else {
- priv->mtr_color_reg = ffs(reg_c_mask) - 1 +
- REG_C_0;
+ /*
+ * The meter color register is used by the
+ * flow-hit feature as well.
+ * The flow-hit feature must use REG_C_3
+ * Prefer REG_C_3 if it is available.
+ */
+ if (reg_c_mask & (1 << (REG_C_3 - REG_C_0)))
+ priv->mtr_color_reg = REG_C_3;
+ else
+ priv->mtr_color_reg = ffs(reg_c_mask)
+ - 1 + REG_C_0;
priv->mtr_en = 1;
priv->mtr_reg_share =
config->hca_attr.qos.flow_meter_reg_share;
}
}
#endif
+#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
+ if (config->hca_attr.flow_hit_aso &&
+ priv->mtr_color_reg == REG_C_3) {
+ sh->flow_hit_aso_en = 1;
+ err = mlx5_flow_aso_age_mng_init(sh);
+ if (err) {
+ err = -err;
+ goto error;
+ }
+ DRV_LOG(DEBUG, "Flow Hit ASO is supported.");
+ }
+#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE)
if (config->hca_attr.log_max_ft_sampler_num > 0 &&
config->dv_flow_en) {
eth_dev->data->dev_private = priv;
priv->dev_data = eth_dev->data;
eth_dev->data->mac_addrs = priv->mac;
- eth_dev->device = dpdk_dev;
+ if (spawn->pf_bond < 0) {
+ eth_dev->device = dpdk_dev;
+ } else {
+ /* Use primary bond PCI as device. */
+ if (sh->bond_dev == UINT16_MAX) {
+ sh->bond_dev = eth_dev->data->port_id;
+ eth_dev->device = dpdk_dev;
+ } else {
+ eth_dev->device = rte_eth_devices[sh->bond_dev].device;
+ }
+ }
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Configure the first MAC address by default. */
if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
(void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){
.alloc = &mlx5_alloc_verbs_buf,
.free = &mlx5_free_verbs_buf,
- .data = priv,
+ .data = sh,
}));
/* Bring Ethernet device up. */
DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
err = ENOTSUP;
goto error;
}
+ mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev,
+ mlx5_hrxq_create_cb,
+ mlx5_hrxq_match_cb,
+ mlx5_hrxq_remove_cb);
/* Query availability of metadata reg_c's. */
err = mlx5_flow_discover_mreg_c(eth_dev);
if (err < 0) {
priv->mreg_cp_tbl = mlx5_hlist_create(MLX5_FLOW_MREG_HNAME,
MLX5_FLOW_MREG_HTABLE_SZ,
0, 0,
- NULL, NULL, NULL);
+ flow_dv_mreg_create_cb,
+ flow_dv_mreg_match_cb,
+ flow_dv_mreg_remove_cb);
if (!priv->mreg_cp_tbl) {
err = ENOMEM;
goto error;
}
+ priv->mreg_cp_tbl->ctx = eth_dev;
}
+ rte_spinlock_init(&priv->shared_act_sl);
mlx5_flow_counter_mode_config(eth_dev);
+ if (priv->config.dv_flow_en)
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
return eth_dev;
error:
if (priv) {
mlx5_drop_action_destroy(eth_dev);
if (own_domain_id)
claim_zero(rte_eth_switch_domain_free(priv->domain_id));
+ mlx5_cache_list_destroy(&priv->hrxqs);
mlx5_free(priv);
if (eth_dev != NULL)
eth_dev->data->dev_private = NULL;
(list[ns].ifindex,
&list[ns].info);
}
+#ifdef HAVE_MLX5DV_DR_DEVX_PORT
if (!ret && bd >= 0) {
switch (list[ns].info.name_type) {
case MLX5_PHYS_PORT_NAME_TYPE_UPLINK:
}
continue;
}
+#endif
if (!ret && (list[ns].info.representor ^
list[ns].info.master))
ns++;
case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF:
case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF:
- case PCI_DEVICE_ID_MELLANOX_CONNECTX6DXVF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTXVF:
dev_config_vf = 1;
break;
default:
DRV_LOG(DEBUG, "DevX is NOT supported");
err = 0;
}
+ if (!err && sh->ctx) {
+ /* Hint libmlx5 to use PMD allocator for data plane resources */
+ mlx5_glue->dv_set_context_attr(sh->ctx,
+ MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
+ (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){
+ .alloc = &mlx5_alloc_verbs_buf,
+ .free = &mlx5_free_verbs_buf,
+ .data = sh,
+ }));
+ }
return err;
}