rxq->mprq_repl = buf;
}
DRV_LOG(DEBUG,
- "port %u Rx queue %u allocated and configured %u segments",
+ "port %u MPRQ queue %u allocated and configured %u segments",
rxq->port_id, rxq->idx, wqe_n);
return 0;
error:
(*rxq->mprq_bufs)[i]);
(*rxq->mprq_bufs)[i] = NULL;
}
- DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
+ DRV_LOG(DEBUG, "port %u MPRQ queue %u failed, freed everything",
rxq->port_id, rxq->idx);
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
{
const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
- unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
+ unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+ (1 << rxq_ctrl->rxq.elts_n) * (1 << rxq_ctrl->rxq.strd_num_n) :
+ (1 << rxq_ctrl->rxq.elts_n);
unsigned int i;
int err;
(*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
}
DRV_LOG(DEBUG,
- "port %u Rx queue %u allocated and configured %u segments"
+ "port %u SPRQ queue %u allocated and configured %u segments"
" (max %u packets)",
PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
elts_n / (1 << rxq_ctrl->rxq.sges_n));
rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
(*rxq_ctrl->rxq.elts)[i] = NULL;
}
- DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
+ DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
int
rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
{
- return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
- rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
+ int ret = 0;
+
+ /**
+ * For MPRQ we need to allocate both MPRQ buffers
+ * for WQEs and simple mbufs for vector processing.
+ */
+ if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
+ ret = rxq_alloc_elts_mprq(rxq_ctrl);
+ return (ret || rxq_alloc_elts_sprq(rxq_ctrl));
}
/**
struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
uint16_t i;
- DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
- rxq->port_id, rxq->idx);
+ DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing %d WRs",
+ rxq->port_id, rxq->idx, (1u << rxq->elts_n));
if (rxq->mprq_bufs == NULL)
return;
- MLX5_ASSERT(mlx5_rxq_check_vec_support(rxq) < 0);
for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
if ((*rxq->mprq_bufs)[i] != NULL)
mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
{
struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
- const uint16_t q_n = (1 << rxq->elts_n);
+ const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+ (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
+ (1 << rxq->elts_n);
const uint16_t q_mask = q_n - 1;
uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
uint16_t i;
- DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
- PORT_ID(rxq_ctrl->priv), rxq->idx);
+ DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
+ PORT_ID(rxq_ctrl->priv), rxq->idx, q_n);
if (rxq->elts == NULL)
return;
/**
- * Some mbuf in the Ring belongs to the application. They cannot be
- * freed.
+ * Some mbuf in the Ring belongs to the application.
+ * They cannot be freed.
*/
if (mlx5_rxq_check_vec_support(rxq) > 0) {
for (i = 0; i < used; ++i)
(*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
rxq->rq_pi = rxq->rq_ci;
}
- for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
+ for (i = 0; i != q_n; ++i) {
if ((*rxq->elts)[i] != NULL)
rte_pktmbuf_free_seg((*rxq->elts)[i]);
(*rxq->elts)[i] = NULL;
static void
rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
{
+ /*
+ * For MPRQ we need to allocate both MPRQ buffers
+ * for WQEs and simple mbufs for vector processing.
+ */
if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
rxq_free_elts_mprq(rxq_ctrl);
- else
- rxq_free_elts_sprq(rxq_ctrl);
+ rxq_free_elts_sprq(rxq_ctrl);
}
/**
return -rte_errno;
}
rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
- return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
+ return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1);
+
}
/* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
cqe->op_own = MLX5_CQE_INVALIDATE;
}
/* Resync CQE and WQE (WQ in RESET state). */
- rte_cio_wmb();
+ rte_io_wmb();
*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
- rte_cio_wmb();
+ rte_io_wmb();
*rxq->rq_db = rte_cpu_to_be_32(0);
- rte_cio_wmb();
+ rte_io_wmb();
}
/**
int ret;
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
- ret = priv->obj_ops->rxq_obj_modify(rxq_ctrl->obj, false);
+ ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RDY2RST);
if (ret) {
DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s",
strerror(errno));
eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
int ret;
- if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_HAIRPIN) {
+ if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
DRV_LOG(ERR, "Hairpin queue can't be stopped");
rte_errno = EINVAL;
return -EINVAL;
* The routine pointer depends on the process
* type, should perform check there.
*/
- if (pkt_burst == mlx5_rx_burst) {
+ if (pkt_burst == mlx5_rx_burst_vec) {
DRV_LOG(ERR, "Rx queue stop is not supported "
"for vectorized Rx");
rte_errno = EINVAL;
rte_errno = errno;
return ret;
}
- rte_cio_wmb();
+ rte_io_wmb();
*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
- rte_cio_wmb();
- /* Reset RQ consumer before moving queue to READY state. */
+ rte_io_wmb();
+ /* Reset RQ consumer before moving queue ro READY state. */
*rxq->rq_db = rte_cpu_to_be_32(0);
- rte_cio_wmb();
- ret = priv->obj_ops->rxq_obj_modify(rxq_ctrl->obj, true);
+ rte_io_wmb();
+ ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY);
if (ret) {
DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s",
strerror(errno));
{
int ret;
- if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_HAIRPIN) {
+ if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) {
DRV_LOG(ERR, "Hairpin queue can't be started");
rte_errno = EINVAL;
return -EINVAL;
if (!rxq_ctrl->obj)
goto error;
if (rxq_ctrl->irq) {
- ret = priv->obj_ops->rxq_event_get(rxq_ctrl->obj);
+ ret = priv->obj_ops.rxq_event_get(rxq_ctrl->obj);
if (ret < 0)
goto error;
rxq_ctrl->rxq.cq_arm_sn++;
memset(_m, 0, sizeof(*buf));
buf->mp = mp;
- rte_atomic16_set(&buf->refcnt, 1);
+ __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
for (j = 0; j != strd_n; ++j) {
shinfo = &buf->shinfos[j];
shinfo->free_cb = mlx5_mprq_buf_free_cb;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
- unsigned int mprq_stride_nums;
- unsigned int mprq_stride_size;
- unsigned int mprq_stride_cap;
struct mlx5_dev_config *config = &priv->config;
- /*
- * Always allocate extra slots, even if eventually
- * the vector Rx will not be used.
- */
- uint16_t desc_n =
- desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
uint64_t offloads = conf->offloads |
dev->data->dev_conf.rxmode.offloads;
unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
- const int mprq_en = mlx5_check_mprq_support(dev) > 0;
unsigned int max_rx_pkt_len = lro_on_queue ?
dev->data->dev_conf.rxmode.max_lro_pkt_size :
dev->data->dev_conf.rxmode.max_rx_pkt_len;
RTE_PKTMBUF_HEADROOM;
unsigned int max_lro_size = 0;
unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
+ const int mprq_en = mlx5_check_mprq_support(dev) > 0;
+ unsigned int mprq_stride_nums = config->mprq.stride_num_n ?
+ config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
+ unsigned int mprq_stride_size = non_scatter_min_mbuf_size <=
+ (1U << config->mprq.max_stride_size_n) ?
+ log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
+ unsigned int mprq_stride_cap = (config->mprq.stride_num_n ?
+ (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
+ (config->mprq.stride_size_n ?
+ (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
+ /*
+ * Always allocate extra slots, even if eventually
+ * the vector Rx will not be used.
+ */
+ uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
if (non_scatter_min_mbuf_size > mb_len && !(offloads &
DEV_RX_OFFLOAD_SCATTER)) {
rte_errno = ENOSPC;
return NULL;
}
- tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
- desc_n * sizeof(struct rte_mbuf *), 0, socket);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) +
+ (desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *),
+ 0, socket);
+
if (!tmpl) {
rte_errno = ENOMEM;
return NULL;
tmpl->socket = socket;
if (dev->data->dev_conf.intr_conf.rxq)
tmpl->irq = 1;
- mprq_stride_nums = config->mprq.stride_num_n ?
- config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
- mprq_stride_size = non_scatter_min_mbuf_size <=
- (1U << config->mprq.max_stride_size_n) ?
- log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
- mprq_stride_cap = (config->mprq.stride_num_n ?
- (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
- (config->mprq.stride_size_n ?
- (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
/*
* This Rx queue can be configured as a Multi-Packet RQ if all of the
* following conditions are met:
mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
/* Toggle RX checksum offload if hardware supports it. */
tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+ /* Configure Rx timestamp. */
tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+ tmpl->rxq.timestamp_rx_flag = 0;
+ if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register(
+ &tmpl->rxq.timestamp_offset,
+ &tmpl->rxq.timestamp_rx_flag) != 0) {
+ DRV_LOG(ERR, "Cannot register Rx timestamp field/flag");
+ goto error;
+ }
/* Configure VLAN stripping. */
tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
/* By default, FCS (CRC) is stripped by hardware. */
tmpl->rxq.mp = mp;
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.rq_repl_thresh =
- MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
+ MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
tmpl->rxq.elts =
- (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
+ (struct rte_mbuf *(*)[desc_n])(tmpl + 1);
+ tmpl->rxq.mprq_bufs =
+ (struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
#ifndef RTE_ARCH_64
tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
#endif
tmpl->rxq.idx = idx;
- rte_atomic32_inc(&tmpl->refcnt);
+ __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
error:
tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
tmpl->hairpin_conf = *hairpin_conf;
tmpl->rxq.idx = idx;
- rte_atomic32_inc(&tmpl->refcnt);
+ __atomic_add_fetch(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
}
if (rxq_data) {
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- rte_atomic32_inc(&rxq_ctrl->refcnt);
+ __atomic_add_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED);
}
return rxq_ctrl;
}
if (!(*priv->rxqs)[idx])
return 0;
rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
- if (!rte_atomic32_dec_and_test(&rxq_ctrl->refcnt))
+ if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
return 1;
if (rxq_ctrl->obj) {
- priv->obj_ops->rxq_obj_release(rxq_ctrl->obj);
+ priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
LIST_REMOVE(rxq_ctrl->obj, next);
mlx5_free(rxq_ctrl->obj);
rxq_ctrl->obj = NULL;
}
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
- mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
+ if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
rxq_free_elts(rxq_ctrl);
+ if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+ if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
+ mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
+ LIST_REMOVE(rxq_ctrl, next);
+ mlx5_free(rxq_ctrl);
+ (*priv->rxqs)[idx] = NULL;
}
- LIST_REMOVE(rxq_ctrl, next);
- mlx5_free(rxq_ctrl);
- (*priv->rxqs)[idx] = NULL;
return 0;
}
unsigned int i;
if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
- priv->obj_ops->ind_table_destroy(ind_tbl);
+ priv->obj_ops.ind_table_destroy(ind_tbl);
for (i = 0; i != ind_tbl->queues_n; ++i)
claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
if (!rte_atomic32_read(&ind_tbl->refcnt)) {
goto error;
ind_tbl->queues[i] = queues[i];
}
- ret = priv->obj_ops->ind_table_new(dev, n, ind_tbl);
+ ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
if (ret < 0)
goto error;
rte_atomic32_inc(&ind_tbl->refcnt);
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
mlx5_glue->destroy_flow_action(hrxq->action);
#endif
- priv->obj_ops->hrxq_destroy(hrxq);
+ priv->obj_ops.hrxq_destroy(hrxq);
mlx5_ind_table_obj_release(dev, hrxq->ind_table);
ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
hrxq_idx, hrxq, next);
hrxq->rss_key_len = rss_key_len;
hrxq->hash_fields = hash_fields;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
- ret = priv->obj_ops->hrxq_new(dev, hrxq, tunnel);
+ ret = priv->obj_ops.hrxq_new(dev, hrxq, tunnel);
if (ret < 0) {
rte_errno = errno;
goto error;
return 0;
}
-/**
- * Verify the Rx Queue list is empty
- *
- * @param dev
- * Pointer to Ethernet device.
- *
- * @return
- * The number of object not released.
- */
-int
-mlx5_hrxq_verify(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hrxq *hrxq;
- uint32_t idx;
- int ret = 0;
-
- ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
- hrxq, next) {
- DRV_LOG(DEBUG,
- "port %u hash Rx queue %p still referenced",
- dev->data->port_id, (void *)hrxq);
- ++ret;
- }
- return ret;
-}
-
-/**
- * Create a drop Rx queue Verbs/DevX object.
- *
- * @param dev
- * Pointer to Ethernet device.
- *
- * @return
- * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
- */
-static struct mlx5_rxq_obj *
-mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct ibv_context *ctx = priv->sh->ctx;
- struct ibv_cq *cq;
- struct ibv_wq *wq = NULL;
- struct mlx5_rxq_obj *rxq;
-
- if (priv->drop_queue.rxq)
- return priv->drop_queue.rxq;
- cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
- if (!cq) {
- DEBUG("port %u cannot allocate CQ for drop queue",
- dev->data->port_id);
- rte_errno = errno;
- goto error;
- }
- wq = mlx5_glue->create_wq(ctx,
- &(struct ibv_wq_init_attr){
- .wq_type = IBV_WQT_RQ,
- .max_wr = 1,
- .max_sge = 1,
- .pd = priv->sh->pd,
- .cq = cq,
- });
- if (!wq) {
- DEBUG("port %u cannot allocate WQ for drop queue",
- dev->data->port_id);
- rte_errno = errno;
- goto error;
- }
- rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
- if (!rxq) {
- DEBUG("port %u cannot allocate drop Rx queue memory",
- dev->data->port_id);
- rte_errno = ENOMEM;
- goto error;
- }
- rxq->ibv_cq = cq;
- rxq->wq = wq;
- priv->drop_queue.rxq = rxq;
- return rxq;
-error:
- if (wq)
- claim_zero(mlx5_glue->destroy_wq(wq));
- if (cq)
- claim_zero(mlx5_glue->destroy_cq(cq));
- return NULL;
-}
-
-/**
- * Release a drop Rx queue Verbs/DevX object.
- *
- * @param dev
- * Pointer to Ethernet device.
- *
- * @return
- * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
- */
-static void
-mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
-
- if (rxq->wq)
- claim_zero(mlx5_glue->destroy_wq(rxq->wq));
- if (rxq->ibv_cq)
- claim_zero(mlx5_glue->destroy_cq(rxq->ibv_cq));
- mlx5_free(rxq);
- priv->drop_queue.rxq = NULL;
-}
-
-/**
- * Create a drop indirection table.
- *
- * @param dev
- * Pointer to Ethernet device.
- *
- * @return
- * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
- */
-static struct mlx5_ind_table_obj *
-mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_obj *ind_tbl;
- struct mlx5_rxq_obj *rxq;
- struct mlx5_ind_table_obj tmpl;
-
- rxq = mlx5_rxq_obj_drop_new(dev);
- if (!rxq)
- return NULL;
- tmpl.ind_table = mlx5_glue->create_rwq_ind_table
- (priv->sh->ctx,
- &(struct ibv_rwq_ind_table_init_attr){
- .log_ind_tbl_size = 0,
- .ind_tbl = (struct ibv_wq **)&rxq->wq,
- .comp_mask = 0,
- });
- if (!tmpl.ind_table) {
- DEBUG("port %u cannot allocate indirection table for drop"
- " queue",
- dev->data->port_id);
- rte_errno = errno;
- goto error;
- }
- ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl), 0,
- SOCKET_ID_ANY);
- if (!ind_tbl) {
- rte_errno = ENOMEM;
- goto error;
- }
- ind_tbl->ind_table = tmpl.ind_table;
- return ind_tbl;
-error:
- mlx5_rxq_obj_drop_release(dev);
- return NULL;
-}
-
-/**
- * Release a drop indirection table.
- *
- * @param dev
- * Pointer to Ethernet device.
- */
-static void
-mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
-
- claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
- mlx5_rxq_obj_drop_release(dev);
- mlx5_free(ind_tbl);
- priv->drop_queue.hrxq->ind_table = NULL;
-}
-
/**
* Create a drop Rx Hash queue.
*
* Pointer to Ethernet device.
*
* @return
- * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
*/
struct mlx5_hrxq *
-mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
+mlx5_drop_action_create(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_obj *ind_tbl = NULL;
- struct ibv_qp *qp = NULL;
struct mlx5_hrxq *hrxq = NULL;
+ int ret;
if (priv->drop_queue.hrxq) {
rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
if (!hrxq) {
DRV_LOG(WARNING,
- "port %u cannot allocate memory for drop queue",
+ "Port %u cannot allocate memory for drop queue.",
dev->data->port_id);
rte_errno = ENOMEM;
goto error;
}
priv->drop_queue.hrxq = hrxq;
- ind_tbl = mlx5_ind_table_obj_drop_new(dev);
- if (!ind_tbl)
- goto error;
- hrxq->ind_table = ind_tbl;
- qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
- &(struct ibv_qp_init_attr_ex){
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask =
- IBV_QP_INIT_ATTR_PD |
- IBV_QP_INIT_ATTR_IND_TABLE |
- IBV_QP_INIT_ATTR_RX_HASH,
- .rx_hash_conf = (struct ibv_rx_hash_conf){
- .rx_hash_function =
- IBV_RX_HASH_FUNC_TOEPLITZ,
- .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
- .rx_hash_key = rss_hash_default_key,
- .rx_hash_fields_mask = 0,
- },
- .rwq_ind_tbl = ind_tbl->ind_table,
- .pd = priv->sh->pd
- });
- if (!qp) {
- DEBUG("port %u cannot allocate QP for drop queue",
- dev->data->port_id);
- rte_errno = errno;
+ hrxq->ind_table = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq->ind_table),
+ 0, SOCKET_ID_ANY);
+ if (!hrxq->ind_table) {
+ rte_errno = ENOMEM;
goto error;
}
- hrxq->qp = qp;
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
- if (!hrxq->action) {
- rte_errno = errno;
+ ret = priv->obj_ops.drop_action_create(dev);
+ if (ret < 0)
goto error;
- }
-#endif
rte_atomic32_set(&hrxq->refcnt, 1);
return hrxq;
error:
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- if (hrxq && hrxq->action)
- mlx5_glue->destroy_flow_action(hrxq->action);
-#endif
- if (qp)
- claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
- if (ind_tbl)
- mlx5_ind_table_obj_drop_release(dev);
if (hrxq) {
+ if (hrxq->ind_table)
+ mlx5_free(hrxq->ind_table);
priv->drop_queue.hrxq = NULL;
mlx5_free(hrxq);
}
* Pointer to Ethernet device.
*/
void
-mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
+mlx5_drop_action_destroy(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- mlx5_glue->destroy_flow_action(hrxq->action);
-#endif
- claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
- mlx5_ind_table_obj_drop_release(dev);
+ priv->obj_ops.drop_action_destroy(dev);
+ mlx5_free(priv->drop_queue.rxq);
+ mlx5_free(hrxq->ind_table);
mlx5_free(hrxq);
+ priv->drop_queue.rxq = NULL;
priv->drop_queue.hrxq = NULL;
}
}
+/**
+ * Verify the Rx Queue list is empty
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The number of object not released.
+ */
+int
+mlx5_hrxq_verify(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+ uint32_t idx;
+ int ret = 0;
+
+ ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
+ hrxq, next) {
+ DRV_LOG(DEBUG,
+ "port %u hash Rx queue %p still referenced",
+ dev->data->port_id, (void *)hrxq);
+ ++ret;
+ }
+ return ret;
+}
/**
* Set the Rx queue timestamp conversion parameters