X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=d3bc3ee9c3d0296a17fd6ebe10371fc4a0a43f1a;hb=23820a79891b3b087b25e3d7eb0fda2ab9104598;hp=3e82f71e34189245daf3a11f7c2c8e7de2d3c301;hpb=6bb506cc2c2d877a10275c223e94e8cf99d24872;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 3e82f71e34..d3bc3ee9c3 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -93,6 +93,7 @@ mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) /** * Check whether Multi-Packet RQ is enabled for the device. + * MPRQ can be enabled explicitly, or implicitly by enabling LRO. * * @param dev * Pointer to Ethernet device. @@ -123,6 +124,21 @@ mlx5_mprq_enabled(struct rte_eth_dev *dev) return n == priv->rxqs_n; } +/** + * Check whether LRO is supported and enabled for the device. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 if disabled, 1 if enabled. + */ +inline int +mlx5_lro_on(struct rte_eth_dev *dev) +{ + return (MLX5_LRO_SUPPORTED(dev) && MLX5_LRO_ENABLED(dev)); +} + /** * Allocate RX queue elements for Multi-Packet RQ. * @@ -386,14 +402,19 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) /** * Returns the per-port supported offloads. * + * @param dev + * Pointer to Ethernet device. + * * @return * Supported Rx offloads. */ uint64_t -mlx5_get_rx_port_offloads(void) +mlx5_get_rx_port_offloads(struct rte_eth_dev *dev) { uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER; + if (MLX5_LRO_SUPPORTED(dev)) + offloads |= DEV_RX_OFFLOAD_TCP_LRO; return offloads; } @@ -512,7 +533,7 @@ mlx5_rx_queue_release(void *dpdk_rxq) } /** - * Get an Rx queue Verbs object. + * Get an Rx queue Verbs/DevX object. * * @param dev * Pointer to Ethernet device. @@ -520,10 +541,10 @@ mlx5_rx_queue_release(void *dpdk_rxq) * Queue index in DPDK Rx queue array * * @return - * The Verbs object if it exists. + * The Verbs/DevX object if it exists. */ -static struct mlx5_rxq_ibv * -mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) +static struct mlx5_rxq_obj * +mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; @@ -534,35 +555,35 @@ mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) if (!rxq_data) return NULL; rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - if (rxq_ctrl->ibv) - rte_atomic32_inc(&rxq_ctrl->ibv->refcnt); - return rxq_ctrl->ibv; + if (rxq_ctrl->obj) + rte_atomic32_inc(&rxq_ctrl->obj->refcnt); + return rxq_ctrl->obj; } /** - * Release an Rx verbs queue object. + * Release an Rx verbs/DevX queue object. * - * @param rxq_ibv - * Verbs Rx queue object. + * @param rxq_obj + * Verbs/DevX Rx queue object. * * @return * 1 while a reference on it exists, 0 when freed. */ static int -mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv) +mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj) { - assert(rxq_ibv); - assert(rxq_ibv->wq); - assert(rxq_ibv->cq); - if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) { - rxq_free_elts(rxq_ibv->rxq_ctrl); - claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq)); - claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq)); - if (rxq_ibv->channel) + assert(rxq_obj); + assert(rxq_obj->wq); + assert(rxq_obj->cq); + if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) { + rxq_free_elts(rxq_obj->rxq_ctrl); + claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq)); + claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq)); + if (rxq_obj->channel) claim_zero(mlx5_glue->destroy_comp_channel - (rxq_ibv->channel)); - LIST_REMOVE(rxq_ibv, next); - rte_free(rxq_ibv); + (rxq_obj->channel)); + LIST_REMOVE(rxq_obj, next); + rte_free(rxq_obj); return 0; } return 1; @@ -601,14 +622,14 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) } intr_handle->type = RTE_INTR_HANDLE_EXT; for (i = 0; i != n; ++i) { - /* This rxq ibv must not be released in this function. */ - struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i); + /* This rxq obj must not be released in this function. */ + struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i); int fd; int flags; int rc; /* Skip queues that cannot request interrupts. */ - if (!rxq_ibv || !rxq_ibv->channel) { + if (!rxq_obj || !rxq_obj->channel) { /* Use invalid intr_vec[] index to disable entry. */ intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + @@ -625,7 +646,7 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) rte_errno = ENOMEM; return -rte_errno; } - fd = rxq_ibv->channel->fd; + fd = rxq_obj->channel->fd; flags = fcntl(fd, F_GETFL); rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK); if (rc < 0) { @@ -681,8 +702,8 @@ mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev) */ rxq_data = (*priv->rxqs)[i]; rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - if (rxq_ctrl->ibv) - mlx5_rxq_ibv_release(rxq_ctrl->ibv); + if (rxq_ctrl->obj) + mlx5_rxq_obj_release(rxq_ctrl->obj); } free: rte_intr_free_epoll_fd(intr_handle); @@ -742,15 +763,15 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) } rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (rxq_ctrl->irq) { - struct mlx5_rxq_ibv *rxq_ibv; + struct mlx5_rxq_obj *rxq_obj; - rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); - if (!rxq_ibv) { + rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id); + if (!rxq_obj) { rte_errno = EINVAL; return -rte_errno; } mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn); - mlx5_rxq_ibv_release(rxq_ibv); + mlx5_rxq_obj_release(rxq_obj); } return 0; } @@ -772,7 +793,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data; struct mlx5_rxq_ctrl *rxq_ctrl; - struct mlx5_rxq_ibv *rxq_ibv = NULL; + struct mlx5_rxq_obj *rxq_obj = NULL; struct ibv_cq *ev_cq; void *ev_ctx; int ret; @@ -785,24 +806,24 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (!rxq_ctrl->irq) return 0; - rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); - if (!rxq_ibv) { + rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id); + if (!rxq_obj) { rte_errno = EINVAL; return -rte_errno; } - ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx); - if (ret || ev_cq != rxq_ibv->cq) { + ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx); + if (ret || ev_cq != rxq_obj->cq) { rte_errno = EINVAL; goto exit; } rxq_data->cq_arm_sn++; - mlx5_glue->ack_cq_events(rxq_ibv->cq, 1); - mlx5_rxq_ibv_release(rxq_ibv); + mlx5_glue->ack_cq_events(rxq_obj->cq, 1); + mlx5_rxq_obj_release(rxq_obj); return 0; exit: ret = rte_errno; /* Save rte_errno before cleanup. */ - if (rxq_ibv) - mlx5_rxq_ibv_release(rxq_ibv); + if (rxq_obj) + mlx5_rxq_obj_release(rxq_obj); DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d", dev->data->port_id, rx_queue_id); rte_errno = ret; /* Restore rte_errno. */ @@ -810,7 +831,7 @@ exit: } /** - * Create the Rx queue Verbs object. + * Create the Rx queue Verbs/DevX object. * * @param dev * Pointer to Ethernet device. @@ -818,10 +839,10 @@ exit: * Queue index in DPDK Rx queue array * * @return - * The Verbs object initialised, NULL otherwise and rte_errno is set. + * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. */ -struct mlx5_rxq_ibv * -mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) +struct mlx5_rxq_obj * +mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; @@ -839,11 +860,10 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) struct mlx5dv_wq_init_attr mlx5; #endif } wq; - struct ibv_cq_ex cq_attr; } attr; unsigned int cqe_n; unsigned int wqe_n = 1 << rxq_data->elts_n; - struct mlx5_rxq_ibv *tmpl = NULL; + struct mlx5_rxq_obj *tmpl = NULL; struct mlx5dv_cq cq_info; struct mlx5dv_rwq rwq; int ret = 0; @@ -1042,7 +1062,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id, idx, (void *)&tmpl); rte_atomic32_inc(&tmpl->refcnt); - LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next); + LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return tmpl; error: @@ -1063,24 +1083,24 @@ error: } /** - * Verify the Verbs Rx queue list is empty + * Verify the Rx queue objects list is empty * * @param dev * Pointer to Ethernet device. * * @return - * The number of object not released. + * The number of objects not released. */ int -mlx5_rxq_ibv_verify(struct rte_eth_dev *dev) +mlx5_rxq_obj_verify(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; int ret = 0; - struct mlx5_rxq_ibv *rxq_ibv; + struct mlx5_rxq_obj *rxq_obj; - LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) { - DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced", - dev->data->port_id, rxq_ibv->rxq_ctrl->rxq.idx); + LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) { + DRV_LOG(DEBUG, "port %u Rx queue %u still referenced", + dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx); ++ret; } return ret; @@ -1122,7 +1142,7 @@ mlx5_mprq_free_mp(struct rte_eth_dev *dev) dev->data->port_id, mp->name); /* * If a buffer in the pool has been externally attached to a mbuf and it - * is still in use by application, destroying the Rx qeueue can spoil + * is still in use by application, destroying the Rx queue can spoil * the packet. It is unlikely to happen but if application dynamically * creates and destroys with holding Rx packets, this can happen. * @@ -1412,7 +1432,18 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.crc_present = 0; if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) { if (config->hw_fcs_strip) { - tmpl->rxq.crc_present = 1; + /* + * RQs used for LRO-enabled TIRs should not be + * configured to scatter the FCS. + */ + if (mlx5_lro_on(dev)) + DRV_LOG(WARNING, + "port %u CRC stripping has been " + "disabled but will still be performed " + "by hardware, because LRO is enabled", + dev->data->port_id); + else + tmpl->rxq.crc_present = 1; } else { DRV_LOG(WARNING, "port %u CRC stripping has been disabled but will" @@ -1471,7 +1502,7 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); - mlx5_rxq_ibv_get(dev, idx); + mlx5_rxq_obj_get(dev, idx); rte_atomic32_inc(&rxq_ctrl->refcnt); } return rxq_ctrl; @@ -1498,8 +1529,8 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) return 0; rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); assert(rxq_ctrl->priv); - if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv)) - rxq_ctrl->ibv = NULL; + if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj)) + rxq_ctrl->obj = NULL; if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) { mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh); LIST_REMOVE(rxq_ctrl, next); @@ -1545,14 +1576,14 @@ mlx5_rxq_verify(struct rte_eth_dev *dev) * Number of queues in the array. * * @return - * The Verbs object initialised, NULL otherwise and rte_errno is set. + * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. */ -static struct mlx5_ind_table_ibv * -mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues, +static struct mlx5_ind_table_obj * +mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, uint32_t queues_n) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_ibv *ind_tbl; + struct mlx5_ind_table_obj *ind_tbl; const unsigned int wq_n = rte_is_power_of_2(queues_n) ? log2above(queues_n) : log2above(priv->config.ind_table_max_size); @@ -1571,7 +1602,7 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues, if (!rxq) goto error; - wq[i] = rxq->ibv->wq; + wq[i] = rxq->obj->wq; ind_tbl->queues[i] = queues[i]; } ind_tbl->queues_n = queues_n; @@ -1611,12 +1642,12 @@ error: * @return * An indirection table if found. */ -static struct mlx5_ind_table_ibv * -mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues, +static struct mlx5_ind_table_obj * +mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, uint32_t queues_n) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_ibv *ind_tbl; + struct mlx5_ind_table_obj *ind_tbl; LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { if ((ind_tbl->queues_n == queues_n) && @@ -1647,8 +1678,8 @@ mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues, * 1 while a reference on it exists, 0 when freed. */ static int -mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, - struct mlx5_ind_table_ibv *ind_tbl) +mlx5_ind_table_obj_release(struct rte_eth_dev *dev, + struct mlx5_ind_table_obj *ind_tbl) { unsigned int i; @@ -1675,15 +1706,15 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, * The number of object not released. */ int -mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev) +mlx5_ind_table_obj_verify(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_ibv *ind_tbl; + struct mlx5_ind_table_obj *ind_tbl; int ret = 0; LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { DRV_LOG(DEBUG, - "port %u Verbs indirection table %p still referenced", + "port %u indirection table obj %p still referenced", dev->data->port_id, (void *)ind_tbl); ++ret; } @@ -1710,7 +1741,7 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev) * Tunnel type. * * @return - * The Verbs object initialised, NULL otherwise and rte_errno is set. + * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. */ struct mlx5_hrxq * mlx5_hrxq_new(struct rte_eth_dev *dev, @@ -1721,7 +1752,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; - struct mlx5_ind_table_ibv *ind_tbl; + struct mlx5_ind_table_obj *ind_tbl; struct ibv_qp *qp; #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT struct mlx5dv_qp_init_attr qp_init_attr; @@ -1729,9 +1760,9 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, int err; queues_n = hash_fields ? queues_n : 1; - ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); + ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) - ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n); + ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n); if (!ind_tbl) { rte_errno = ENOMEM; return NULL; @@ -1813,7 +1844,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, return hrxq; error: err = rte_errno; /* Save rte_errno before cleanup. */ - mlx5_ind_table_ibv_release(dev, ind_tbl); + mlx5_ind_table_obj_release(dev, ind_tbl); if (qp) claim_zero(mlx5_glue->destroy_qp(qp)); rte_errno = err; /* Restore rte_errno. */ @@ -1847,7 +1878,7 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, queues_n = hash_fields ? queues_n : 1; LIST_FOREACH(hrxq, &priv->hrxqs, next) { - struct mlx5_ind_table_ibv *ind_tbl; + struct mlx5_ind_table_obj *ind_tbl; if (hrxq->rss_key_len != rss_key_len) continue; @@ -1855,11 +1886,11 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, continue; if (hrxq->hash_fields != hash_fields) continue; - ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); + ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); if (!ind_tbl) continue; if (ind_tbl != hrxq->ind_table) { - mlx5_ind_table_ibv_release(dev, ind_tbl); + mlx5_ind_table_obj_release(dev, ind_tbl); continue; } rte_atomic32_inc(&hrxq->refcnt); @@ -1887,12 +1918,12 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) mlx5_glue->destroy_flow_action(hrxq->action); #endif claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); - mlx5_ind_table_ibv_release(dev, hrxq->ind_table); + mlx5_ind_table_obj_release(dev, hrxq->ind_table); LIST_REMOVE(hrxq, next); rte_free(hrxq); return 0; } - claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table)); + claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table)); return 1; } @@ -1906,7 +1937,7 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) * The number of object not released. */ int -mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev) +mlx5_hrxq_verify(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; @@ -1914,7 +1945,7 @@ mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev) LIST_FOREACH(hrxq, &priv->hrxqs, next) { DRV_LOG(DEBUG, - "port %u Verbs hash Rx queue %p still referenced", + "port %u hash Rx queue %p still referenced", dev->data->port_id, (void *)hrxq); ++ret; } @@ -1922,22 +1953,22 @@ mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev) } /** - * Create a drop Rx queue Verbs object. + * Create a drop Rx queue Verbs/DevX object. * * @param dev * Pointer to Ethernet device. * * @return - * The Verbs object initialised, NULL otherwise and rte_errno is set. + * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. */ -static struct mlx5_rxq_ibv * -mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev) +static struct mlx5_rxq_obj * +mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; struct ibv_context *ctx = priv->sh->ctx; struct ibv_cq *cq; struct ibv_wq *wq = NULL; - struct mlx5_rxq_ibv *rxq; + struct mlx5_rxq_obj *rxq; if (priv->drop_queue.rxq) return priv->drop_queue.rxq; @@ -1982,19 +2013,19 @@ error: } /** - * Release a drop Rx queue Verbs object. + * Release a drop Rx queue Verbs/DevX object. * * @param dev * Pointer to Ethernet device. * * @return - * The Verbs object initialised, NULL otherwise and rte_errno is set. + * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. */ static void -mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev) +mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_ibv *rxq = priv->drop_queue.rxq; + struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq; if (rxq->wq) claim_zero(mlx5_glue->destroy_wq(rxq->wq)); @@ -2011,17 +2042,17 @@ mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev) * Pointer to Ethernet device. * * @return - * The Verbs object initialised, NULL otherwise and rte_errno is set. + * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. */ -static struct mlx5_ind_table_ibv * -mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev) +static struct mlx5_ind_table_obj * +mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_ibv *ind_tbl; - struct mlx5_rxq_ibv *rxq; - struct mlx5_ind_table_ibv tmpl; + struct mlx5_ind_table_obj *ind_tbl; + struct mlx5_rxq_obj *rxq; + struct mlx5_ind_table_obj tmpl; - rxq = mlx5_rxq_ibv_drop_new(dev); + rxq = mlx5_rxq_obj_drop_new(dev); if (!rxq) return NULL; tmpl.ind_table = mlx5_glue->create_rwq_ind_table @@ -2046,7 +2077,7 @@ mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev) ind_tbl->ind_table = tmpl.ind_table; return ind_tbl; error: - mlx5_rxq_ibv_drop_release(dev); + mlx5_rxq_obj_drop_release(dev); return NULL; } @@ -2057,13 +2088,13 @@ error: * Pointer to Ethernet device. */ static void -mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev) +mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table; + struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table; claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table)); - mlx5_rxq_ibv_drop_release(dev); + mlx5_rxq_obj_drop_release(dev); rte_free(ind_tbl); priv->drop_queue.hrxq->ind_table = NULL; } @@ -2075,13 +2106,13 @@ mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev) * Pointer to Ethernet device. * * @return - * The Verbs object initialised, NULL otherwise and rte_errno is set. + * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. */ struct mlx5_hrxq * mlx5_hrxq_drop_new(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_ibv *ind_tbl; + struct mlx5_ind_table_obj *ind_tbl; struct ibv_qp *qp; struct mlx5_hrxq *hrxq; @@ -2089,7 +2120,7 @@ mlx5_hrxq_drop_new(struct rte_eth_dev *dev) rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt); return priv->drop_queue.hrxq; } - ind_tbl = mlx5_ind_table_ibv_drop_new(dev); + ind_tbl = mlx5_ind_table_obj_drop_new(dev); if (!ind_tbl) return NULL; qp = mlx5_glue->create_qp_ex(priv->sh->ctx, @@ -2137,7 +2168,7 @@ mlx5_hrxq_drop_new(struct rte_eth_dev *dev) return hrxq; error: if (ind_tbl) - mlx5_ind_table_ibv_drop_release(dev); + mlx5_ind_table_obj_drop_release(dev); return NULL; } @@ -2158,7 +2189,7 @@ mlx5_hrxq_drop_release(struct rte_eth_dev *dev) mlx5_glue->destroy_flow_action(hrxq->action); #endif claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); - mlx5_ind_table_ibv_drop_release(dev); + mlx5_ind_table_obj_drop_release(dev); rte_free(hrxq); priv->drop_queue.hrxq = NULL; }