}
}
+/**
+ * DV flow counter mode detect and config.
+ *
+ * @param dev
+ * Pointer to rte_eth_dev structure.
+ *
+ */
+void
+mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
+ bool fallback;
+
+#ifndef HAVE_IBV_DEVX_ASYNC
+ fallback = true;
+#else
+ fallback = false;
+ if (!sh->cdev->config.devx || !priv->config.dv_flow_en ||
+ !hca_attr->flow_counters_dump ||
+ !(hca_attr->flow_counter_bulk_alloc_bitmap & 0x4) ||
+ (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP))
+ fallback = true;
+#endif
+ if (fallback)
+ DRV_LOG(INFO, "Use fall-back DV counter management. Flow "
+ "counter dump:%d, bulk_alloc_bitmap:0x%hhx.",
+ hca_attr->flow_counters_dump,
+ hca_attr->flow_counter_bulk_alloc_bitmap);
+ /* Initialize fallback mode only on the port initializes sh. */
+ if (sh->refcnt == 1)
+ sh->cmng.counter_fallback = fallback;
+ else if (fallback != sh->cmng.counter_fallback)
+ DRV_LOG(WARNING, "Port %d in sh has different fallback mode "
+ "with others:%d.", PORT_ID(priv), fallback);
+#endif
+}
+
/**
* Initialize the counters management structure.
*
uint32_t ids[8];
int ret;
- if (!priv->config.hca_attr.parse_graph_flex_node) {
+ if (!priv->sh->cdev->config.hca_attr.parse_graph_flex_node) {
DRV_LOG(ERR, "Dynamic flex parser is not supported "
"for device %s.", priv->dev_data->name);
return -ENOTSUP;
return 0;
}
+/**
+ * Configure realtime timestamp format.
+ *
+ * @param sh
+ * Pointer to mlx5_dev_ctx_shared object.
+ * @param config
+ * Device configuration parameters.
+ * @param hca_attr
+ * Pointer to DevX HCA capabilities structure.
+ */
+void
+mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
+ struct mlx5_dev_config *config,
+ struct mlx5_hca_attr *hca_attr)
+{
+ uint32_t dw_cnt = MLX5_ST_SZ_DW(register_mtutc);
+ uint32_t reg[dw_cnt];
+ int ret = ENOTSUP;
+
+ if (hca_attr->access_register_user)
+ ret = mlx5_devx_cmd_register_read(sh->cdev->ctx,
+ MLX5_REGISTER_ID_MTUTC, 0,
+ reg, dw_cnt);
+ if (!ret) {
+ uint32_t ts_mode;
+
+ /* MTUTC register is read successfully. */
+ ts_mode = MLX5_GET(register_mtutc, reg, time_stamp_mode);
+ if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME)
+ config->rt_timestamp = 1;
+ } else {
+ /* Kernel does not support register reading. */
+ if (hca_attr->dev_freq_khz == (NS_PER_S / MS_PER_S))
+ config->rt_timestamp = 1;
+ }
+}
+
/**
* Allocate shared device context. If there is multiport device the
* master and representors will share this context, if there is single
MLX5_ASSERT(spawn->max_port);
sh = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
sizeof(struct mlx5_dev_ctx_shared) +
- spawn->max_port *
- sizeof(struct mlx5_dev_shared_port),
+ spawn->max_port * sizeof(struct mlx5_dev_shared_port),
RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
if (!sh) {
- DRV_LOG(ERR, "shared context allocation failure");
- rte_errno = ENOMEM;
+ DRV_LOG(ERR, "Shared context allocation failure.");
+ rte_errno = ENOMEM;
goto exit;
}
pthread_mutex_init(&sh->txpp.mutex, NULL);
sh->numa_node = spawn->cdev->dev->numa_node;
sh->cdev = spawn->cdev;
- sh->devx = sh->cdev->config.devx;
+ sh->esw_mode = !!(spawn->info.master || spawn->info.representor);
if (spawn->bond_info)
sh->bond = *spawn->bond_info;
- err = mlx5_os_get_dev_attr(sh->cdev, &sh->device_attr);
+ err = mlx5_os_capabilities_prepare(sh);
if (err) {
- DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
+ DRV_LOG(ERR, "Fail to configure device capabilities.");
goto error;
}
sh->refcnt = 1;
strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->cdev->ctx),
sizeof(sh->ibdev_path) - 1);
/*
- * Setting port_id to max unallowed value means
- * there is no interrupt subhandler installed for
- * the given port index i.
+ * Setting port_id to max unallowed value means there is no interrupt
+ * subhandler installed for the given port index i.
*/
for (i = 0; i < sh->max_port; i++) {
sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
}
- if (sh->devx) {
+ if (sh->cdev->config.devx) {
sh->td = mlx5_devx_cmd_create_td(sh->cdev->ctx);
if (!sh->td) {
DRV_LOG(ERR, "TD allocation failure");
- err = ENOMEM;
+ rte_errno = ENOMEM;
goto error;
}
if (mlx5_setup_tis(sh)) {
DRV_LOG(ERR, "TIS allocation failure");
- err = ENOMEM;
+ rte_errno = ENOMEM;
goto error;
}
err = mlx5_rxtx_uars_prepare(sh);
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
return sh;
error:
+ err = rte_errno;
pthread_mutex_destroy(&sh->txpp.mutex);
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
MLX5_ASSERT(sh);
- if (sh->td)
- claim_zero(mlx5_devx_cmd_destroy(sh->td));
+ mlx5_rxtx_uars_release(sh);
i = 0;
do {
if (sh->tis[i])
claim_zero(mlx5_devx_cmd_destroy(sh->tis[i]));
} while (++i < (uint32_t)sh->bond.n_port);
- mlx5_rxtx_uars_release(sh);
+ if (sh->td)
+ claim_zero(mlx5_devx_cmd_destroy(sh->td));
mlx5_free(sh);
- MLX5_ASSERT(err > 0);
rte_errno = err;
return NULL;
}
* Only primary process handles async device events.
**/
mlx5_flow_counters_mng_close(sh);
+ if (sh->ct_mng)
+ mlx5_flow_aso_ct_mng_close(sh);
if (sh->aso_age_mng) {
mlx5_flow_aso_age_mng_close(sh);
sh->aso_age_mng = NULL;
mlx5_action_handle_flush(dev);
mlx5_flow_meter_flush(dev, NULL);
/* Prevent crashes when queues are still in use. */
- dev->rx_pkt_burst = removed_rx_burst;
- dev->tx_pkt_burst = removed_tx_burst;
+ dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
+ dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
rte_wmb();
/* Disable datapath on secondary process. */
mlx5_mp_os_req_stop_rxtx(dev);
if (priv->mreg_cp_tbl)
mlx5_hlist_destroy(priv->mreg_cp_tbl);
mlx5_mprq_free_mp(dev);
- if (priv->sh->ct_mng)
- mlx5_flow_aso_ct_mng_close(priv->sh);
mlx5_os_free_shared_dr(priv);
if (priv->rss_conf.rss_key != NULL)
mlx5_free(priv->rss_conf.rss_key);
} else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
config->mprq.enabled = !!tmp;
} else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
- config->mprq.stride_num_n = tmp;
+ config->mprq.log_stride_num = tmp;
} else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) {
- config->mprq.stride_size_n = tmp;
+ config->mprq.log_stride_size = tmp;
} else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
config->mprq.max_memcpy_len = tmp;
} else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn,
struct mlx5_dev_config *config)
{
+ struct mlx5_hca_attr *hca_attr = &spawn->cdev->config.hca_attr;
+
if (config->txq_inline_min != MLX5_ARG_UNSET) {
/* Application defines size of inlined data explicitly. */
if (spawn->pci_dev != NULL) {
}
goto exit;
}
- if (config->hca_attr.eth_net_offloads) {
+ if (hca_attr->eth_net_offloads) {
/* We have DevX enabled, inline mode queried successfully. */
- switch (config->hca_attr.wqe_inline_mode) {
+ switch (hca_attr->wqe_inline_mode) {
case MLX5_CAP_INLINE_MODE_L2:
/* outer L2 header must be inlined. */
config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
/* No inline data are required by NIC. */
config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
config->hw_vlan_insert =
- config->hca_attr.wqe_vlan_insert;
+ hca_attr->wqe_vlan_insert;
DRV_LOG(DEBUG, "Tx VLAN insertion is supported");
goto exit;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
/* inline mode is defined by NIC vport context. */
- if (!config->hca_attr.eth_virt)
+ if (!hca_attr->eth_virt)
break;
- switch (config->hca_attr.vport_inline_mode) {
+ switch (hca_attr->vport_inline_mode) {
case MLX5_INLINE_MODE_NONE:
config->txq_inline_min =
MLX5_INLINE_HSIZE_NONE;
}
/**
- * Comparison callback to sort device data.
+ * Check sibling device configurations.
*
- * This is meant to be used with qsort().
+ * Sibling devices sharing the Infiniband device context should have compatible
+ * configurations. This regards representors and bonding device.
*
- * @param a[in]
- * Pointer to pointer to first data object.
- * @param b[in]
- * Pointer to pointer to second data object.
+ * @param sh
+ * Shared device context.
+ * @param config
+ * Configuration of the device is going to be created.
+ * @param dpdk_dev
+ * Backing DPDK device.
*
* @return
- * 0 if both objects are equal, less than 0 if the first argument is less
- * than the second, greater than 0 otherwise.
+ * 0 on success, EINVAL otherwise
*/
int
-mlx5_dev_check_sibling_config(struct mlx5_priv *priv,
+mlx5_dev_check_sibling_config(struct mlx5_dev_ctx_shared *sh,
struct mlx5_dev_config *config,
struct rte_device *dpdk_dev)
{
- struct mlx5_dev_ctx_shared *sh = priv->sh;
struct mlx5_dev_config *sh_conf = NULL;
uint16_t port_id;
struct mlx5_priv *opriv =
rte_eth_devices[port_id].data->dev_private;
- if (opriv && opriv != priv && opriv->sh == sh) {
+ if (opriv && opriv->sh == sh) {
sh_conf = &opriv->config;
break;
}