}
/**
- * Clean up a Rx queue.
- *
- * Destroy objects, free allocated memory and reset the structure for reuse.
- *
- * @param rxq
- * Pointer to Rx queue structure.
- */
-void
-mlx4_rxq_cleanup(struct rxq *rxq)
-{
- DEBUG("cleaning up %p", (void *)rxq);
- mlx4_rxq_free_elts(rxq);
- if (rxq->qp != NULL)
- claim_zero(ibv_destroy_qp(rxq->qp));
- if (rxq->cq != NULL)
- claim_zero(ibv_destroy_cq(rxq->cq));
- if (rxq->channel != NULL)
- claim_zero(ibv_destroy_comp_channel(rxq->channel));
- if (rxq->mr != NULL)
- claim_zero(ibv_dereg_mr(rxq->mr));
- memset(rxq, 0, sizeof(*rxq));
-}
-
-/**
- * Configure a Rx queue.
+ * DPDK callback to configure a Rx queue.
*
* @param dev
* Pointer to Ethernet device structure.
- * @param rxq
- * Pointer to Rx queue structure.
+ * @param idx
+ * Rx queue index.
* @param desc
* Number of descriptors to configure in queue.
* @param socket
* @return
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx4_rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
- unsigned int socket, const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp)
+int
+mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
{
struct priv *priv = dev->data->dev_private;
- struct rxq tmpl = {
- .priv = priv,
- .mp = mp,
- .socket = socket
- };
- struct ibv_qp_attr mod;
- struct ibv_qp_init_attr qp_init;
- struct ibv_recv_wr *bad_wr;
- unsigned int mb_len;
+ uint32_t mb_len = rte_pktmbuf_data_room_size(mp);
+ struct rte_flow_error error;
+ struct rxq *rxq;
int ret;
(void)conf; /* Thresholds configuration (ignored). */
- mb_len = rte_pktmbuf_data_room_size(mp);
- if (desc == 0) {
+ DEBUG("%p: configuring queue %u for %u descriptors",
+ (void *)dev, idx, desc);
+ if (idx >= dev->data->nb_rx_queues) {
+ rte_errno = EOVERFLOW;
+ ERROR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, idx, dev->data->nb_rx_queues);
+ return -rte_errno;
+ }
+ rxq = dev->data->rx_queues[idx];
+ if (rxq) {
+ rte_errno = EEXIST;
+ ERROR("%p: Rx queue %u already configured, release it first",
+ (void *)dev, idx);
+ return -rte_errno;
+ }
+ if (!desc) {
rte_errno = EINVAL;
ERROR("%p: invalid number of Rx descriptors", (void *)dev);
- goto error;
+ return -rte_errno;
+ }
+ /* Allocate and initialize Rx queue. */
+ rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
+ if (!rxq) {
+ rte_errno = ENOMEM;
+ ERROR("%p: unable to allocate queue index %u",
+ (void *)dev, idx);
+ return -rte_errno;
}
+ *rxq = (struct rxq){
+ .priv = priv,
+ .mp = mp,
+ .port_id = dev->data->port_id,
+ .stats.idx = idx,
+ .socket = socket,
+ };
/* Enable scattered packets support for this queue if necessary. */
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
mb_len - RTE_PKTMBUF_HEADROOM);
}
/* Use the entire Rx mempool as the memory region. */
- tmpl.mr = mlx4_mp2mr(priv->pd, mp);
- if (tmpl.mr == NULL) {
+ rxq->mr = mlx4_mp2mr(priv->pd, mp);
+ if (!rxq->mr) {
rte_errno = EINVAL;
ERROR("%p: MR creation failure: %s",
(void *)dev, strerror(rte_errno));
goto error;
}
if (dev->data->dev_conf.intr_conf.rxq) {
- tmpl.channel = ibv_create_comp_channel(priv->ctx);
- if (tmpl.channel == NULL) {
+ rxq->channel = ibv_create_comp_channel(priv->ctx);
+ if (rxq->channel == NULL) {
rte_errno = ENOMEM;
ERROR("%p: Rx interrupt completion channel creation"
" failure: %s",
(void *)dev, strerror(rte_errno));
goto error;
}
- if (mlx4_fd_set_non_blocking(tmpl.channel->fd) < 0) {
+ if (mlx4_fd_set_non_blocking(rxq->channel->fd) < 0) {
ERROR("%p: unable to make Rx interrupt completion"
" channel non-blocking: %s",
(void *)dev, strerror(rte_errno));
goto error;
}
}
- tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, tmpl.channel, 0);
- if (tmpl.cq == NULL) {
+ rxq->cq = ibv_create_cq(priv->ctx, desc, NULL, rxq->channel, 0);
+ if (!rxq->cq) {
rte_errno = ENOMEM;
ERROR("%p: CQ creation failure: %s",
(void *)dev, strerror(rte_errno));
goto error;
}
- DEBUG("priv->device_attr.max_qp_wr is %d",
- priv->device_attr.max_qp_wr);
- DEBUG("priv->device_attr.max_sge is %d",
- priv->device_attr.max_sge);
- qp_init = (struct ibv_qp_init_attr){
- /* CQ to be associated with the send queue. */
- .send_cq = tmpl.cq,
- /* CQ to be associated with the receive queue. */
- .recv_cq = tmpl.cq,
- .cap = {
- /* Max number of outstanding WRs. */
- .max_recv_wr = ((priv->device_attr.max_qp_wr < desc) ?
- priv->device_attr.max_qp_wr :
- desc),
- /* Max number of scatter/gather elements in a WR. */
- .max_recv_sge = 1,
- },
- .qp_type = IBV_QPT_RAW_PACKET,
- };
- tmpl.qp = ibv_create_qp(priv->pd, &qp_init);
- if (tmpl.qp == NULL) {
+ rxq->qp = ibv_create_qp
+ (priv->pd,
+ &(struct ibv_qp_init_attr){
+ .send_cq = rxq->cq,
+ .recv_cq = rxq->cq,
+ .cap = {
+ .max_recv_wr =
+ RTE_MIN(priv->device_attr.max_qp_wr,
+ desc),
+ .max_recv_sge = 1,
+ },
+ .qp_type = IBV_QPT_RAW_PACKET,
+ });
+ if (!rxq->qp) {
rte_errno = errno ? errno : EINVAL;
ERROR("%p: QP creation failure: %s",
(void *)dev, strerror(rte_errno));
goto error;
}
- mod = (struct ibv_qp_attr){
- /* Move the QP to this state. */
- .qp_state = IBV_QPS_INIT,
- /* Primary port number. */
- .port_num = priv->port
- };
- ret = ibv_modify_qp(tmpl.qp, &mod, IBV_QP_STATE | IBV_QP_PORT);
+ ret = ibv_modify_qp
+ (rxq->qp,
+ &(struct ibv_qp_attr){
+ .qp_state = IBV_QPS_INIT,
+ .port_num = priv->port,
+ },
+ IBV_QP_STATE | IBV_QP_PORT);
if (ret) {
rte_errno = ret;
ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
(void *)dev, strerror(rte_errno));
goto error;
}
- ret = mlx4_rxq_alloc_elts(&tmpl, desc);
+ ret = mlx4_rxq_alloc_elts(rxq, desc);
if (ret) {
ERROR("%p: RXQ allocation failed: %s",
(void *)dev, strerror(rte_errno));
goto error;
}
- ret = ibv_post_recv(tmpl.qp, &(*tmpl.elts)[0].wr, &bad_wr);
+ ret = ibv_post_recv(rxq->qp, &(*rxq->elts)[0].wr,
+ &(struct ibv_recv_wr *){ NULL });
if (ret) {
rte_errno = ret;
- ERROR("%p: ibv_post_recv() failed for WR %p: %s",
+ ERROR("%p: ibv_post_recv() failed: %s",
(void *)dev,
- (void *)bad_wr,
strerror(rte_errno));
goto error;
}
- mod = (struct ibv_qp_attr){
- .qp_state = IBV_QPS_RTR
- };
- ret = ibv_modify_qp(tmpl.qp, &mod, IBV_QP_STATE);
+ ret = ibv_modify_qp
+ (rxq->qp,
+ &(struct ibv_qp_attr){
+ .qp_state = IBV_QPS_RTR,
+ },
+ IBV_QP_STATE);
if (ret) {
rte_errno = ret;
ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
(void *)dev, strerror(rte_errno));
goto error;
}
- /* Save port ID. */
- tmpl.port_id = dev->data->port_id;
- DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id);
- /* Clean up rxq in case we're reinitializing it. */
- DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq);
- mlx4_rxq_cleanup(rxq);
- *rxq = tmpl;
- DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl);
- return 0;
+ DEBUG("%p: adding Rx queue %p to list", (void *)dev, (void *)rxq);
+ dev->data->rx_queues[idx] = rxq;
+ /* Enable associated flows. */
+ ret = mlx4_flow_sync(priv, &error);
+ if (!ret)
+ return 0;
+ ERROR("cannot re-attach flow rules to queue %u"
+ " (code %d, \"%s\"), flow error type %d, cause %p, message: %s",
+ idx, -ret, strerror(-ret), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
error:
+ dev->data->rx_queues[idx] = NULL;
ret = rte_errno;
- mlx4_rxq_cleanup(&tmpl);
+ mlx4_rx_queue_release(rxq);
rte_errno = ret;
assert(rte_errno > 0);
return -rte_errno;
}
-/**
- * DPDK callback to configure a Rx queue.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param idx
- * Rx queue index.
- * @param desc
- * Number of descriptors to configure in queue.
- * @param socket
- * NUMA socket on which memory must be allocated.
- * @param[in] conf
- * Thresholds parameters.
- * @param mp
- * Memory pool for buffer allocations.
- *
- * @return
- * 0 on success, negative errno value otherwise and rte_errno is set.
- */
-int
-mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
- unsigned int socket, const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp)
-{
- struct priv *priv = dev->data->dev_private;
- struct rxq *rxq = dev->data->rx_queues[idx];
- int ret;
-
- DEBUG("%p: configuring queue %u for %u descriptors",
- (void *)dev, idx, desc);
- if (idx >= dev->data->nb_rx_queues) {
- rte_errno = EOVERFLOW;
- ERROR("%p: queue index out of range (%u >= %u)",
- (void *)dev, idx, dev->data->nb_rx_queues);
- return -rte_errno;
- }
- if (rxq != NULL) {
- DEBUG("%p: reusing already allocated queue index %u (%p)",
- (void *)dev, idx, (void *)rxq);
- if (priv->started) {
- rte_errno = EEXIST;
- return -rte_errno;
- }
- dev->data->rx_queues[idx] = NULL;
- /* Disable associated flows. */
- mlx4_flow_sync(priv, NULL);
- mlx4_rxq_cleanup(rxq);
- } else {
- rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
- if (rxq == NULL) {
- rte_errno = ENOMEM;
- ERROR("%p: unable to allocate queue index %u",
- (void *)dev, idx);
- return -rte_errno;
- }
- }
- ret = mlx4_rxq_setup(dev, rxq, desc, socket, conf, mp);
- if (ret) {
- rte_free(rxq);
- } else {
- struct rte_flow_error error;
-
- rxq->stats.idx = idx;
- DEBUG("%p: adding Rx queue %p to list",
- (void *)dev, (void *)rxq);
- dev->data->rx_queues[idx] = rxq;
- /* Re-enable associated flows. */
- ret = mlx4_flow_sync(priv, &error);
- if (ret) {
- ERROR("cannot re-attach flow rules to queue %u"
- " (code %d, \"%s\"), flow error type %d,"
- " cause %p, message: %s", idx,
- -ret, strerror(-ret), error.type, error.cause,
- error.message ? error.message : "(unspecified)");
- dev->data->rx_queues[idx] = NULL;
- mlx4_rxq_cleanup(rxq);
- rte_free(rxq);
- return ret;
- }
- }
- return ret;
-}
-
/**
* DPDK callback to release a Rx queue.
*
break;
}
mlx4_flow_sync(priv, NULL);
- mlx4_rxq_cleanup(rxq);
+ mlx4_rxq_free_elts(rxq);
+ if (rxq->qp)
+ claim_zero(ibv_destroy_qp(rxq->qp));
+ if (rxq->cq)
+ claim_zero(ibv_destroy_cq(rxq->cq));
+ if (rxq->channel)
+ claim_zero(ibv_destroy_comp_channel(rxq->channel));
+ if (rxq->mr)
+ claim_zero(ibv_dereg_mr(rxq->mr));
rte_free(rxq);
}
rte_free(elts);
}
-/**
- * Clean up a Tx queue.
- *
- * Destroy objects, free allocated memory and reset the structure for reuse.
- *
- * @param txq
- * Pointer to Tx queue structure.
- */
-void
-mlx4_txq_cleanup(struct txq *txq)
-{
- size_t i;
-
- DEBUG("cleaning up %p", (void *)txq);
- mlx4_txq_free_elts(txq);
- if (txq->qp != NULL)
- claim_zero(ibv_destroy_qp(txq->qp));
- if (txq->cq != NULL)
- claim_zero(ibv_destroy_cq(txq->cq));
- for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
- if (txq->mp2mr[i].mp == NULL)
- break;
- assert(txq->mp2mr[i].mr != NULL);
- claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr));
- }
- memset(txq, 0, sizeof(*txq));
-}
-
struct txq_mp2mr_mbuf_check_data {
int ret;
};
}
/**
- * Configure a Tx queue.
+ * DPDK callback to configure a Tx queue.
*
* @param dev
* Pointer to Ethernet device structure.
- * @param txq
- * Pointer to Tx queue structure.
+ * @param idx
+ * Tx queue index.
* @param desc
* Number of descriptors to configure in queue.
* @param socket
* @return
* 0 on success, negative errno value otherwise and rte_errno is set.
*/
-static int
-mlx4_txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
- unsigned int socket, const struct rte_eth_txconf *conf)
+int
+mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf)
{
struct priv *priv = dev->data->dev_private;
- struct txq tmpl = {
- .priv = priv,
- .socket = socket
- };
- union {
- struct ibv_qp_init_attr init;
- struct ibv_qp_attr mod;
- } attr;
+ struct ibv_qp_init_attr qp_init_attr;
+ struct txq *txq;
int ret;
(void)conf; /* Thresholds configuration (ignored). */
- if (priv == NULL) {
- rte_errno = EINVAL;
- goto error;
+ DEBUG("%p: configuring queue %u for %u descriptors",
+ (void *)dev, idx, desc);
+ if (idx >= dev->data->nb_tx_queues) {
+ rte_errno = EOVERFLOW;
+ ERROR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, idx, dev->data->nb_tx_queues);
+ return -rte_errno;
+ }
+ txq = dev->data->tx_queues[idx];
+ if (txq) {
+ rte_errno = EEXIST;
+ DEBUG("%p: Tx queue %u already configured, release it first",
+ (void *)dev, idx);
+ return -rte_errno;
}
- if (desc == 0) {
+ if (!desc) {
rte_errno = EINVAL;
ERROR("%p: invalid number of Tx descriptors", (void *)dev);
- goto error;
+ return -rte_errno;
}
- /* MRs will be registered in mp2mr[] later. */
- tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);
- if (tmpl.cq == NULL) {
+ /* Allocate and initialize Tx queue. */
+ txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, socket);
+ if (!txq) {
+ rte_errno = ENOMEM;
+ ERROR("%p: unable to allocate queue index %u",
+ (void *)dev, idx);
+ return -rte_errno;
+ }
+ *txq = (struct txq){
+ .priv = priv,
+ .stats.idx = idx,
+ .socket = socket,
+ };
+ txq->cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);
+ if (!txq->cq) {
rte_errno = ENOMEM;
ERROR("%p: CQ creation failure: %s",
(void *)dev, strerror(rte_errno));
goto error;
}
- DEBUG("priv->device_attr.max_qp_wr is %d",
- priv->device_attr.max_qp_wr);
- DEBUG("priv->device_attr.max_sge is %d",
- priv->device_attr.max_sge);
- attr.init = (struct ibv_qp_init_attr){
- /* CQ to be associated with the send queue. */
- .send_cq = tmpl.cq,
- /* CQ to be associated with the receive queue. */
- .recv_cq = tmpl.cq,
+ qp_init_attr = (struct ibv_qp_init_attr){
+ .send_cq = txq->cq,
+ .recv_cq = txq->cq,
.cap = {
- /* Max number of outstanding WRs. */
- .max_send_wr = ((priv->device_attr.max_qp_wr < desc) ?
- priv->device_attr.max_qp_wr :
- desc),
- /* Max number of scatter/gather elements in a WR. */
+ .max_send_wr =
+ RTE_MIN(priv->device_attr.max_qp_wr, desc),
.max_send_sge = 1,
.max_inline_data = MLX4_PMD_MAX_INLINE,
},
.qp_type = IBV_QPT_RAW_PACKET,
- /*
- * Do *NOT* enable this, completions events are managed per
- * Tx burst.
- */
+ /* No completion events must occur by default. */
.sq_sig_all = 0,
};
- tmpl.qp = ibv_create_qp(priv->pd, &attr.init);
- if (tmpl.qp == NULL) {
+ txq->qp = ibv_create_qp(priv->pd, &qp_init_attr);
+ if (!txq->qp) {
rte_errno = errno ? errno : EINVAL;
ERROR("%p: QP creation failure: %s",
(void *)dev, strerror(rte_errno));
goto error;
}
- /* ibv_create_qp() updates this value. */
- tmpl.max_inline = attr.init.cap.max_inline_data;
- attr.mod = (struct ibv_qp_attr){
- /* Move the QP to this state. */
- .qp_state = IBV_QPS_INIT,
- /* Primary port number. */
- .port_num = priv->port
- };
- ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE | IBV_QP_PORT);
+ txq->max_inline = qp_init_attr.cap.max_inline_data;
+ ret = ibv_modify_qp
+ (txq->qp,
+ &(struct ibv_qp_attr){
+ .qp_state = IBV_QPS_INIT,
+ .port_num = priv->port,
+ },
+ IBV_QP_STATE | IBV_QP_PORT);
if (ret) {
rte_errno = ret;
ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
(void *)dev, strerror(rte_errno));
goto error;
}
- ret = mlx4_txq_alloc_elts(&tmpl, desc);
+ ret = mlx4_txq_alloc_elts(txq, desc);
if (ret) {
ERROR("%p: TXQ allocation failed: %s",
(void *)dev, strerror(rte_errno));
goto error;
}
- attr.mod = (struct ibv_qp_attr){
- .qp_state = IBV_QPS_RTR
- };
- ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
+ ret = ibv_modify_qp
+ (txq->qp,
+ &(struct ibv_qp_attr){
+ .qp_state = IBV_QPS_RTR,
+ },
+ IBV_QP_STATE);
if (ret) {
rte_errno = ret;
ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
(void *)dev, strerror(rte_errno));
goto error;
}
- attr.mod.qp_state = IBV_QPS_RTS;
- ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
+ ret = ibv_modify_qp
+ (txq->qp,
+ &(struct ibv_qp_attr){
+ .qp_state = IBV_QPS_RTS,
+ },
+ IBV_QP_STATE);
if (ret) {
rte_errno = ret;
ERROR("%p: QP state to IBV_QPS_RTS failed: %s",
(void *)dev, strerror(rte_errno));
goto error;
}
- /* Clean up txq in case we're reinitializing it. */
- DEBUG("%p: cleaning-up old txq just in case", (void *)txq);
- mlx4_txq_cleanup(txq);
- *txq = tmpl;
- DEBUG("%p: txq updated with %p", (void *)txq, (void *)&tmpl);
/* Pre-register known mempools. */
rte_mempool_walk(mlx4_txq_mp2mr_iter, txq);
+ DEBUG("%p: adding Tx queue %p to list", (void *)dev, (void *)txq);
+ dev->data->tx_queues[idx] = txq;
return 0;
error:
+ dev->data->tx_queues[idx] = NULL;
ret = rte_errno;
- mlx4_txq_cleanup(&tmpl);
+ mlx4_tx_queue_release(txq);
rte_errno = ret;
assert(rte_errno > 0);
return -rte_errno;
}
-/**
- * DPDK callback to configure a Tx queue.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param idx
- * Tx queue index.
- * @param desc
- * Number of descriptors to configure in queue.
- * @param socket
- * NUMA socket on which memory must be allocated.
- * @param[in] conf
- * Thresholds parameters.
- *
- * @return
- * 0 on success, negative errno value otherwise and rte_errno is set.
- */
-int
-mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
- unsigned int socket, const struct rte_eth_txconf *conf)
-{
- struct priv *priv = dev->data->dev_private;
- struct txq *txq = dev->data->tx_queues[idx];
- int ret;
-
- DEBUG("%p: configuring queue %u for %u descriptors",
- (void *)dev, idx, desc);
- if (idx >= dev->data->nb_tx_queues) {
- rte_errno = EOVERFLOW;
- ERROR("%p: queue index out of range (%u >= %u)",
- (void *)dev, idx, dev->data->nb_tx_queues);
- return -rte_errno;
- }
- if (txq != NULL) {
- DEBUG("%p: reusing already allocated queue index %u (%p)",
- (void *)dev, idx, (void *)txq);
- if (priv->started) {
- rte_errno = EEXIST;
- return -rte_errno;
- }
- dev->data->tx_queues[idx] = NULL;
- mlx4_txq_cleanup(txq);
- } else {
- txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, socket);
- if (txq == NULL) {
- rte_errno = ENOMEM;
- ERROR("%p: unable to allocate queue index %u",
- (void *)dev, idx);
- return -rte_errno;
- }
- }
- ret = mlx4_txq_setup(dev, txq, desc, socket, conf);
- if (ret) {
- rte_free(txq);
- } else {
- txq->stats.idx = idx;
- DEBUG("%p: adding Tx queue %p to list",
- (void *)dev, (void *)txq);
- dev->data->tx_queues[idx] = txq;
- }
- return ret;
-}
-
/**
* DPDK callback to release a Tx queue.
*
priv->dev->data->tx_queues[i] = NULL;
break;
}
- mlx4_txq_cleanup(txq);
+ mlx4_txq_free_elts(txq);
+ if (txq->qp)
+ claim_zero(ibv_destroy_qp(txq->qp));
+ if (txq->cq)
+ claim_zero(ibv_destroy_cq(txq->cq));
+ for (i = 0; i != RTE_DIM(txq->mp2mr); ++i) {
+ if (!txq->mp2mr[i].mp)
+ break;
+ assert(txq->mp2mr[i].mr);
+ claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr));
+ }
rte_free(txq);
}