/**
* Check whether Multi-Packet RQ is enabled for the device.
+ * MPRQ can be enabled explicitly, or implicitly by enabling LRO.
*
* @param dev
* Pointer to Ethernet device.
return n == priv->rxqs_n;
}
+/**
+ * Check whether LRO is supported and enabled for the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 if disabled, 1 if enabled.
+ */
+inline int
+mlx5_lro_on(struct rte_eth_dev *dev)
+{
+ return (MLX5_LRO_SUPPORTED(dev) && MLX5_LRO_ENABLED(dev));
+}
+
/**
* Allocate RX queue elements for Multi-Packet RQ.
*
rxq_free_elts_sprq(rxq_ctrl);
}
-/**
- * Clean up a RX queue.
- *
- * Destroy objects, free allocated memory and reset the structure for reuse.
- *
- * @param rxq_ctrl
- * Pointer to RX queue structure.
- */
-void
-mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
-{
- DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
- PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
- if (rxq_ctrl->ibv)
- mlx5_rxq_ibv_release(rxq_ctrl->ibv);
- memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
-}
-
/**
* Returns the per-queue supported offloads.
*
/**
* Returns the per-port supported offloads.
*
+ * @param dev
+ * Pointer to Ethernet device.
+ *
* @return
* Supported Rx offloads.
*/
uint64_t
-mlx5_get_rx_port_offloads(void)
+mlx5_get_rx_port_offloads(struct rte_eth_dev *dev)
{
uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
+ if (MLX5_LRO_SUPPORTED(dev))
+ offloads |= DEV_RX_OFFLOAD_TCP_LRO;
return offloads;
}
+/**
+ * Verify if the queue can be released.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * 1 if the queue can be released
+ * 0 if the queue can not be released, there are references to it.
+ * Negative errno and rte_errno is set if queue doesn't exist.
+ */
+static int
+mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (!(*priv->rxqs)[idx]) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
+ return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
+}
+
/**
*
* @param dev
mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
}
+/**
+ * Get an Rx queue Verbs/DevX object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Queue index in DPDK Rx queue array
+ *
+ * @return
+ * The Verbs/DevX object if it exists.
+ */
+static struct mlx5_rxq_obj *
+mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (idx >= priv->rxqs_n)
+ return NULL;
+ if (!rxq_data)
+ return NULL;
+ rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ if (rxq_ctrl->obj)
+ rte_atomic32_inc(&rxq_ctrl->obj->refcnt);
+ return rxq_ctrl->obj;
+}
+
+/**
+ * Release an Rx verbs/DevX queue object.
+ *
+ * @param rxq_obj
+ * Verbs/DevX Rx queue object.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
+{
+ assert(rxq_obj);
+ assert(rxq_obj->wq);
+ assert(rxq_obj->cq);
+ if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
+ rxq_free_elts(rxq_obj->rxq_ctrl);
+ claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
+ claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
+ if (rxq_obj->channel)
+ claim_zero(mlx5_glue->destroy_comp_channel
+ (rxq_obj->channel));
+ LIST_REMOVE(rxq_obj, next);
+ rte_free(rxq_obj);
+ return 0;
+ }
+ return 1;
+}
+
/**
* Allocate queue vector and fill epoll fd list for Rx interrupts.
*
}
intr_handle->type = RTE_INTR_HANDLE_EXT;
for (i = 0; i != n; ++i) {
- /* This rxq ibv must not be released in this function. */
- struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i);
+ /* This rxq obj must not be released in this function. */
+ struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i);
int fd;
int flags;
int rc;
/* Skip queues that cannot request interrupts. */
- if (!rxq_ibv || !rxq_ibv->channel) {
+ if (!rxq_obj || !rxq_obj->channel) {
/* Use invalid intr_vec[] index to disable entry. */
intr_handle->intr_vec[i] =
RTE_INTR_VEC_RXTX_OFFSET +
rte_errno = ENOMEM;
return -rte_errno;
}
- fd = rxq_ibv->channel->fd;
+ fd = rxq_obj->channel->fd;
flags = fcntl(fd, F_GETFL);
rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
if (rc < 0) {
continue;
/**
* Need to access directly the queue to release the reference
- * kept in priv_rx_intr_vec_enable().
+ * kept in mlx5_rx_intr_vec_enable().
*/
rxq_data = (*priv->rxqs)[i];
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- mlx5_rxq_ibv_release(rxq_ctrl->ibv);
+ if (rxq_ctrl->obj)
+ mlx5_rxq_obj_release(rxq_ctrl->obj);
}
free:
rte_intr_free_epoll_fd(intr_handle);
}
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (rxq_ctrl->irq) {
- struct mlx5_rxq_ibv *rxq_ibv;
+ struct mlx5_rxq_obj *rxq_obj;
- rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
- if (!rxq_ibv) {
+ rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
+ if (!rxq_obj) {
rte_errno = EINVAL;
return -rte_errno;
}
mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
- mlx5_rxq_ibv_release(rxq_ibv);
+ mlx5_rxq_obj_release(rxq_obj);
}
return 0;
}
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data;
struct mlx5_rxq_ctrl *rxq_ctrl;
- struct mlx5_rxq_ibv *rxq_ibv = NULL;
+ struct mlx5_rxq_obj *rxq_obj = NULL;
struct ibv_cq *ev_cq;
void *ev_ctx;
int ret;
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (!rxq_ctrl->irq)
return 0;
- rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
- if (!rxq_ibv) {
+ rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
+ if (!rxq_obj) {
rte_errno = EINVAL;
return -rte_errno;
}
- ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
- if (ret || ev_cq != rxq_ibv->cq) {
+ ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx);
+ if (ret || ev_cq != rxq_obj->cq) {
rte_errno = EINVAL;
goto exit;
}
rxq_data->cq_arm_sn++;
- mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
+ mlx5_glue->ack_cq_events(rxq_obj->cq, 1);
+ mlx5_rxq_obj_release(rxq_obj);
return 0;
exit:
ret = rte_errno; /* Save rte_errno before cleanup. */
- if (rxq_ibv)
- mlx5_rxq_ibv_release(rxq_ibv);
+ if (rxq_obj)
+ mlx5_rxq_obj_release(rxq_obj);
DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
dev->data->port_id, rx_queue_id);
rte_errno = ret; /* Restore rte_errno. */
}
/**
- * Create the Rx queue Verbs object.
+ * Create a CQ Verbs object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param priv
+ * Pointer to device private data.
+ * @param rxq_data
+ * Pointer to Rx queue data.
+ * @param cqe_n
+ * Number of CQEs in CQ.
+ * @param rxq_obj
+ * Pointer to Rx queue object data.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+static struct ibv_cq *
+mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
+ struct mlx5_rxq_data *rxq_data,
+ unsigned int cqe_n, struct mlx5_rxq_obj *rxq_obj)
+{
+ struct {
+ struct ibv_cq_init_attr_ex ibv;
+ struct mlx5dv_cq_init_attr mlx5;
+ } cq_attr;
+
+ cq_attr.ibv = (struct ibv_cq_init_attr_ex){
+ .cqe = cqe_n,
+ .channel = rxq_obj->channel,
+ .comp_mask = 0,
+ };
+ cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
+ .comp_mask = 0,
+ };
+ if (priv->config.cqe_comp && !rxq_data->hw_timestamp) {
+ cq_attr.mlx5.comp_mask |=
+ MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ cq_attr.mlx5.cqe_comp_res_format =
+ mlx5_rxq_mprq_enabled(rxq_data) ?
+ MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
+ MLX5DV_CQE_RES_FORMAT_HASH;
+#else
+ cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
+#endif
+ /*
+ * For vectorized Rx, it must not be doubled in order to
+ * make cq_ci and rq_ci aligned.
+ */
+ if (mlx5_rxq_check_vec_support(rxq_data) < 0)
+ cq_attr.ibv.cqe *= 2;
+ } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
+ DRV_LOG(DEBUG,
+ "port %u Rx CQE compression is disabled for HW"
+ " timestamp",
+ dev->data->port_id);
+ }
+#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
+ if (priv->config.cqe_pad) {
+ cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
+ cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
+ }
+#endif
+ return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
+ &cq_attr.ibv,
+ &cq_attr.mlx5));
+}
+
+/**
+ * Create the Rx queue Verbs/DevX object.
*
* @param dev
* Pointer to Ethernet device.
* Queue index in DPDK Rx queue array
*
* @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
-struct mlx5_rxq_ibv *
-mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
+struct mlx5_rxq_obj *
+mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
struct ibv_wq_attr mod;
- union {
- struct {
- struct ibv_cq_init_attr_ex ibv;
- struct mlx5dv_cq_init_attr mlx5;
- } cq;
- struct {
- struct ibv_wq_init_attr ibv;
+ struct {
+ struct ibv_wq_init_attr ibv;
#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- struct mlx5dv_wq_init_attr mlx5;
+ struct mlx5dv_wq_init_attr mlx5;
#endif
- } wq;
- struct ibv_cq_ex cq_attr;
- } attr;
+ } wq_attr;
unsigned int cqe_n;
unsigned int wqe_n = 1 << rxq_data->elts_n;
- struct mlx5_rxq_ibv *tmpl;
+ struct mlx5_rxq_obj *tmpl = NULL;
struct mlx5dv_cq cq_info;
struct mlx5dv_rwq rwq;
- unsigned int i;
int ret = 0;
struct mlx5dv_obj obj;
struct mlx5_dev_config *config = &priv->config;
const int mprq_en = mlx5_rxq_mprq_enabled(rxq_data);
assert(rxq_data);
- assert(!rxq_ctrl->ibv);
+ assert(!rxq_ctrl->obj);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
priv->verbs_alloc_ctx.obj = rxq_ctrl;
tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
else
cqe_n = wqe_n - 1;
- attr.cq.ibv = (struct ibv_cq_init_attr_ex){
- .cqe = cqe_n,
- .channel = tmpl->channel,
- .comp_mask = 0,
- };
- attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
- .comp_mask = 0,
- };
- if (config->cqe_comp && !rxq_data->hw_timestamp) {
- attr.cq.mlx5.comp_mask |=
- MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
-#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- attr.cq.mlx5.cqe_comp_res_format =
- mprq_en ? MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
- MLX5DV_CQE_RES_FORMAT_HASH;
-#else
- attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
-#endif
- /*
- * For vectorized Rx, it must not be doubled in order to
- * make cq_ci and rq_ci aligned.
- */
- if (mlx5_rxq_check_vec_support(rxq_data) < 0)
- attr.cq.ibv.cqe *= 2;
- } else if (config->cqe_comp && rxq_data->hw_timestamp) {
- DRV_LOG(DEBUG,
- "port %u Rx CQE compression is disabled for HW"
- " timestamp",
- dev->data->port_id);
- }
-#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
- if (config->cqe_pad) {
- attr.cq.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
- attr.cq.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
- }
-#endif
- tmpl->cq = mlx5_glue->cq_ex_to_cq
- (mlx5_glue->dv_create_cq(priv->sh->ctx, &attr.cq.ibv,
- &attr.cq.mlx5));
- if (tmpl->cq == NULL) {
+ tmpl->cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl);
+ if (!tmpl->cq) {
DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
dev->data->port_id, idx);
rte_errno = ENOMEM;
dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
- attr.wq.ibv = (struct ibv_wq_init_attr){
+ wq_attr.ibv = (struct ibv_wq_init_attr){
.wq_context = NULL, /* Could be useful in the future. */
.wq_type = IBV_WQT_RQ,
/* Max number of outstanding WRs. */
};
/* By default, FCS (CRC) is stripped by hardware. */
if (rxq_data->crc_present) {
- attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
- attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
+ wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
}
if (config->hw_padding) {
#if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
- attr.wq.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
- attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
+ wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
#elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
- attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
- attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
+ wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
#endif
}
#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- attr.wq.mlx5 = (struct mlx5dv_wq_init_attr){
+ wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
.comp_mask = 0,
};
if (mprq_en) {
struct mlx5dv_striding_rq_init_attr *mprq_attr =
- &attr.wq.mlx5.striding_rq_attrs;
+ &wq_attr.mlx5.striding_rq_attrs;
- attr.wq.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
+ wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
*mprq_attr = (struct mlx5dv_striding_rq_init_attr){
.single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
.single_wqe_log_num_of_strides = rxq_data->strd_num_n,
.two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
};
}
- tmpl->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &attr.wq.ibv,
- &attr.wq.mlx5);
+ tmpl->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
+ &wq_attr.mlx5);
#else
- tmpl->wq = mlx5_glue->create_wq(priv->sh->ctx, &attr.wq.ibv);
+ tmpl->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
#endif
if (tmpl->wq == NULL) {
DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
* Make sure number of WRs*SGEs match expectations since a queue
* cannot allocate more than "desc" buffers.
*/
- if (attr.wq.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
- attr.wq.ibv.max_sge != (1u << rxq_data->sges_n)) {
+ if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
+ wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
DRV_LOG(ERR,
"port %u Rx queue %u requested %u*%u but got %u*%u"
" WRs*SGEs",
dev->data->port_id, idx,
wqe_n >> rxq_data->sges_n, (1 << rxq_data->sges_n),
- attr.wq.ibv.max_wr, attr.wq.ibv.max_sge);
+ wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
rte_errno = EINVAL;
goto error;
}
}
/* Fill the rings. */
rxq_data->wqes = rwq.buf;
- for (i = 0; (i != wqe_n); ++i) {
- volatile struct mlx5_wqe_data_seg *scat;
- uintptr_t addr;
- uint32_t byte_count;
-
- if (mprq_en) {
- struct mlx5_mprq_buf *buf = (*rxq_data->mprq_bufs)[i];
-
- scat = &((volatile struct mlx5_wqe_mprq *)
- rxq_data->wqes)[i].dseg;
- addr = (uintptr_t)mlx5_mprq_buf_addr(buf);
- byte_count = (1 << rxq_data->strd_sz_n) *
- (1 << rxq_data->strd_num_n);
- } else {
- struct rte_mbuf *buf = (*rxq_data->elts)[i];
-
- scat = &((volatile struct mlx5_wqe_data_seg *)
- rxq_data->wqes)[i];
- addr = rte_pktmbuf_mtod(buf, uintptr_t);
- byte_count = DATA_LEN(buf);
- }
- /* scat->addr must be able to store a pointer. */
- assert(sizeof(scat->addr) >= sizeof(uintptr_t));
- *scat = (struct mlx5_wqe_data_seg){
- .addr = rte_cpu_to_be_64(addr),
- .byte_count = rte_cpu_to_be_32(byte_count),
- .lkey = mlx5_rx_addr2mr(rxq_data, addr),
- };
- }
rxq_data->rq_db = rwq.dbrec;
rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
- rxq_data->cq_ci = 0;
- rxq_data->consumed_strd = 0;
- rxq_data->rq_pi = 0;
- rxq_data->zip = (struct rxq_zip){
- .ai = 0,
- };
rxq_data->cq_db = cq_info.dbrec;
rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
rxq_data->cq_uar = cq_info.cq_uar;
rxq_data->cqn = cq_info.cqn;
rxq_data->cq_arm_sn = 0;
- /* Update doorbell counter. */
- rxq_data->rq_ci = wqe_n >> rxq_data->sges_n;
- rte_wmb();
- *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
+ mlx5_rxq_initialize(rxq_data);
+ rxq_data->cq_ci = 0;
DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
idx, (void *)&tmpl);
rte_atomic32_inc(&tmpl->refcnt);
- LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
+ LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return tmpl;
error:
- ret = rte_errno; /* Save rte_errno before cleanup. */
- if (tmpl->wq)
- claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
- if (tmpl->cq)
- claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
- if (tmpl->channel)
- claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
- rte_errno = ret; /* Restore rte_errno. */
- return NULL;
-}
-
-/**
- * Get an Rx queue Verbs object.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param idx
- * Queue index in DPDK Rx queue array
- *
- * @return
- * The Verbs object if it exists.
- */
-struct mlx5_rxq_ibv *
-mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl;
-
- if (idx >= priv->rxqs_n)
- return NULL;
- if (!rxq_data)
- return NULL;
- rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- if (rxq_ctrl->ibv) {
- rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
- }
- return rxq_ctrl->ibv;
-}
-
-/**
- * Release an Rx verbs queue object.
- *
- * @param rxq_ibv
- * Verbs Rx queue object.
- *
- * @return
- * 1 while a reference on it exists, 0 when freed.
- */
-int
-mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
-{
- assert(rxq_ibv);
- assert(rxq_ibv->wq);
- assert(rxq_ibv->cq);
- if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
- rxq_free_elts(rxq_ibv->rxq_ctrl);
- claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
- claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq));
- if (rxq_ibv->channel)
+ if (tmpl) {
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (tmpl->wq)
+ claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
+ if (tmpl->cq)
+ claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
+ if (tmpl->channel)
claim_zero(mlx5_glue->destroy_comp_channel
- (rxq_ibv->channel));
- LIST_REMOVE(rxq_ibv, next);
- rte_free(rxq_ibv);
- return 0;
+ (tmpl->channel));
+ rte_free(tmpl);
+ rte_errno = ret; /* Restore rte_errno. */
}
- return 1;
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ return NULL;
}
/**
- * Verify the Verbs Rx queue list is empty
+ * Verify the Rx queue objects list is empty
*
* @param dev
* Pointer to Ethernet device.
*
* @return
- * The number of object not released.
+ * The number of objects not released.
*/
int
-mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
+mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
int ret = 0;
- struct mlx5_rxq_ibv *rxq_ibv;
+ struct mlx5_rxq_obj *rxq_obj;
- LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
- DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
- dev->data->port_id, rxq_ibv->rxq_ctrl->rxq.idx);
+ LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
+ DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
+ dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
++ret;
}
return ret;
}
-/**
- * Return true if a single reference exists on the object.
- *
- * @param rxq_ibv
- * Verbs Rx queue object.
- */
-int
-mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
-{
- assert(rxq_ibv);
- return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
-}
-
/**
* Callback function to initialize mbufs for Multi-Packet RQ.
*/
dev->data->port_id, mp->name);
/*
* If a buffer in the pool has been externally attached to a mbuf and it
- * is still in use by application, destroying the Rx qeueue can spoil
+ * is still in use by application, destroying the Rx queue can spoil
* the packet. It is unlikely to happen but if application dynamically
* creates and destroys with holding Rx packets, this can happen.
*
return -rte_errno;
}
}
- snprintf(name, sizeof(name), "%s-mprq", dev->device->name);
+ snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
0, NULL, NULL, mlx5_mprq_buf_init, NULL,
dev->device->numa_node, 0);
tmpl->rxq.crc_present = 0;
if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
if (config->hw_fcs_strip) {
- tmpl->rxq.crc_present = 1;
+ /*
+ * RQs used for LRO-enabled TIRs should not be
+ * configured to scatter the FCS.
+ */
+ if (mlx5_lro_on(dev))
+ DRV_LOG(WARNING,
+ "port %u CRC stripping has been "
+ "disabled but will still be performed "
+ "by hardware, because LRO is enabled",
+ dev->data->port_id);
+ else
+ tmpl->rxq.crc_present = 1;
} else {
DRV_LOG(WARNING,
"port %u CRC stripping has been disabled but will"
rxq_ctrl = container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl,
rxq);
- mlx5_rxq_ibv_get(dev, idx);
+ mlx5_rxq_obj_get(dev, idx);
rte_atomic32_inc(&rxq_ctrl->refcnt);
}
return rxq_ctrl;
return 0;
rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
assert(rxq_ctrl->priv);
- if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
- rxq_ctrl->ibv = NULL;
+ if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
+ rxq_ctrl->obj = NULL;
if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
LIST_REMOVE(rxq_ctrl, next);
return 1;
}
-/**
- * Verify if the queue can be released.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param idx
- * RX queue index.
- *
- * @return
- * 1 if the queue can be released, negative errno otherwise and rte_errno is
- * set.
- */
-int
-mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl;
-
- if (!(*priv->rxqs)[idx]) {
- rte_errno = EINVAL;
- return -rte_errno;
- }
- rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
- return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
-}
-
/**
* Verify the Rx Queue list is empty
*
* Number of queues in the array.
*
* @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
-struct mlx5_ind_table_ibv *
-mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
+static struct mlx5_ind_table_obj *
+mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
uint32_t queues_n)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
log2above(priv->config.ind_table_max_size);
if (!rxq)
goto error;
- wq[i] = rxq->ibv->wq;
+ wq[i] = rxq->obj->wq;
ind_tbl->queues[i] = queues[i];
}
ind_tbl->queues_n = queues_n;
* @return
* An indirection table if found.
*/
-struct mlx5_ind_table_ibv *
-mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
+static struct mlx5_ind_table_obj *
+mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
uint32_t queues_n)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
if ((ind_tbl->queues_n == queues_n) &&
* @return
* 1 while a reference on it exists, 0 when freed.
*/
-int
-mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
- struct mlx5_ind_table_ibv *ind_tbl)
+static int
+mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl)
{
unsigned int i;
* The number of object not released.
*/
int
-mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
+mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
int ret = 0;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
DRV_LOG(DEBUG,
- "port %u Verbs indirection table %p still referenced",
+ "port %u indirection table obj %p still referenced",
dev->data->port_id, (void *)ind_tbl);
++ret;
}
* Tunnel type.
*
* @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
struct mlx5_hrxq *
mlx5_hrxq_new(struct rte_eth_dev *dev,
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
struct ibv_qp *qp;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
struct mlx5dv_qp_init_attr qp_init_attr;
int err;
queues_n = hash_fields ? queues_n : 1;
- ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
+ ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
- ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
+ ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
if (!ind_tbl) {
rte_errno = ENOMEM;
return NULL;
return hrxq;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_ind_table_ibv_release(dev, ind_tbl);
+ mlx5_ind_table_obj_release(dev, ind_tbl);
if (qp)
claim_zero(mlx5_glue->destroy_qp(qp));
rte_errno = err; /* Restore rte_errno. */
queues_n = hash_fields ? queues_n : 1;
LIST_FOREACH(hrxq, &priv->hrxqs, next) {
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
if (hrxq->rss_key_len != rss_key_len)
continue;
continue;
if (hrxq->hash_fields != hash_fields)
continue;
- ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
+ ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
continue;
if (ind_tbl != hrxq->ind_table) {
- mlx5_ind_table_ibv_release(dev, ind_tbl);
+ mlx5_ind_table_obj_release(dev, ind_tbl);
continue;
}
rte_atomic32_inc(&hrxq->refcnt);
mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
{
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
- claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
- mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
mlx5_glue->destroy_flow_action(hrxq->action);
#endif
+ claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+ mlx5_ind_table_obj_release(dev, hrxq->ind_table);
LIST_REMOVE(hrxq, next);
rte_free(hrxq);
return 0;
}
- claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
+ claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
return 1;
}
* The number of object not released.
*/
int
-mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
+mlx5_hrxq_verify(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
LIST_FOREACH(hrxq, &priv->hrxqs, next) {
DRV_LOG(DEBUG,
- "port %u Verbs hash Rx queue %p still referenced",
+ "port %u hash Rx queue %p still referenced",
dev->data->port_id, (void *)hrxq);
++ret;
}
}
/**
- * Create a drop Rx queue Verbs object.
+ * Create a drop Rx queue Verbs/DevX object.
*
* @param dev
* Pointer to Ethernet device.
*
* @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
-struct mlx5_rxq_ibv *
-mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
+static struct mlx5_rxq_obj *
+mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct ibv_context *ctx = priv->sh->ctx;
struct ibv_cq *cq;
struct ibv_wq *wq = NULL;
- struct mlx5_rxq_ibv *rxq;
+ struct mlx5_rxq_obj *rxq;
if (priv->drop_queue.rxq)
return priv->drop_queue.rxq;
}
/**
- * Release a drop Rx queue Verbs object.
+ * Release a drop Rx queue Verbs/DevX object.
*
* @param dev
* Pointer to Ethernet device.
*
* @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
-void
-mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev)
+static void
+mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ibv *rxq = priv->drop_queue.rxq;
+ struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
if (rxq->wq)
claim_zero(mlx5_glue->destroy_wq(rxq->wq));
* Pointer to Ethernet device.
*
* @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
-struct mlx5_ind_table_ibv *
-mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
+static struct mlx5_ind_table_obj *
+mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
- struct mlx5_rxq_ibv *rxq;
- struct mlx5_ind_table_ibv tmpl;
+ struct mlx5_ind_table_obj *ind_tbl;
+ struct mlx5_rxq_obj *rxq;
+ struct mlx5_ind_table_obj tmpl;
- rxq = mlx5_rxq_ibv_drop_new(dev);
+ rxq = mlx5_rxq_obj_drop_new(dev);
if (!rxq)
return NULL;
tmpl.ind_table = mlx5_glue->create_rwq_ind_table
ind_tbl->ind_table = tmpl.ind_table;
return ind_tbl;
error:
- mlx5_rxq_ibv_drop_release(dev);
+ mlx5_rxq_obj_drop_release(dev);
return NULL;
}
* @param dev
* Pointer to Ethernet device.
*/
-void
-mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)
+static void
+mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table;
+ struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
- mlx5_rxq_ibv_drop_release(dev);
+ mlx5_rxq_obj_drop_release(dev);
rte_free(ind_tbl);
priv->drop_queue.hrxq->ind_table = NULL;
}
* Pointer to Ethernet device.
*
* @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
struct mlx5_hrxq *
mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
struct ibv_qp *qp;
struct mlx5_hrxq *hrxq;
rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
return priv->drop_queue.hrxq;
}
- ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
+ ind_tbl = mlx5_ind_table_obj_drop_new(dev);
if (!ind_tbl)
return NULL;
qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
return hrxq;
error:
if (ind_tbl)
- mlx5_ind_table_ibv_drop_release(dev);
+ mlx5_ind_table_obj_drop_release(dev);
return NULL;
}
struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
- claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
- mlx5_ind_table_ibv_drop_release(dev);
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
mlx5_glue->destroy_flow_action(hrxq->action);
#endif
+ claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+ mlx5_ind_table_obj_drop_release(dev);
rte_free(hrxq);
priv->drop_queue.hrxq = NULL;
}