#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
#include "mlx5_glue.h"
+#include "mlx5_flow.h"
/* Default RSS hash key also used for ConnectX-3. */
uint8_t rss_hash_default_key[] = {
struct mlx5_priv *priv = dev->data->dev_private;
uint16_t i;
uint16_t n = 0;
+ uint16_t n_ibv = 0;
if (mlx5_check_mprq_support(dev) < 0)
return 0;
/* All the configured queues should be enabled. */
for (i = 0; i < priv->rxqs_n; ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl = container_of
+ (rxq, struct mlx5_rxq_ctrl, rxq);
- if (!rxq)
+ if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
continue;
+ n_ibv++;
if (mlx5_rxq_mprq_enabled(rxq))
++n;
}
/* Multi-Packet RQ can't be partially configured. */
- assert(n == 0 || n == priv->rxqs_n);
- return n == priv->rxqs_n;
+ assert(n == 0 || n == n_ibv);
+ return n == n_ibv;
}
/**
}
DRV_LOG(DEBUG,
"port %u Rx queue %u allocated and configured %u segments",
- rxq->port_id, rxq_ctrl->idx, wqe_n);
+ rxq->port_id, rxq->idx, wqe_n);
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
(*rxq->mprq_bufs)[i] = NULL;
}
DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
- rxq->port_id, rxq_ctrl->idx);
+ rxq->port_id, rxq->idx);
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
}
DRV_LOG(DEBUG,
"port %u Rx queue %u allocated and configured %u segments"
" (max %u packets)",
- PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n,
+ PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
elts_n / (1 << rxq_ctrl->rxq.sges_n));
return 0;
error:
(*rxq_ctrl->rxq.elts)[i] = NULL;
}
DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
- PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
+ PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
}
uint16_t i;
DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
- rxq->port_id, rxq_ctrl->idx);
+ rxq->port_id, rxq->idx);
if (rxq->mprq_bufs == NULL)
return;
assert(mlx5_rxq_check_vec_support(rxq) < 0);
uint16_t i;
DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
- PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
+ PORT_ID(rxq_ctrl->priv), rxq->idx);
if (rxq->elts == NULL)
return;
/**
rxq_free_elts_sprq(rxq_ctrl);
}
-/**
- * Clean up a RX queue.
- *
- * Destroy objects, free allocated memory and reset the structure for reuse.
- *
- * @param rxq_ctrl
- * Pointer to RX queue structure.
- */
-void
-mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
-{
- DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
- PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
- if (rxq_ctrl->ibv)
- mlx5_rxq_ibv_release(rxq_ctrl->ibv);
- memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
-}
-
/**
* Returns the per-queue supported offloads.
*
struct mlx5_dev_config *config = &priv->config;
uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
- DEV_RX_OFFLOAD_JUMBO_FRAME);
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_RSS_HASH);
if (config->hw_fcs_strip)
offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
DEV_RX_OFFLOAD_TCP_CKSUM);
if (config->hw_vlan_strip)
offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ if (MLX5_LRO_SUPPORTED(dev))
+ offloads |= DEV_RX_OFFLOAD_TCP_LRO;
return offloads;
}
}
/**
+ * Verify if the queue can be released.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * 1 if the queue can be released
+ * 0 if the queue can not be released, there are references to it.
+ * Negative errno and rte_errno is set if queue doesn't exist.
+ */
+static int
+mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (!(*priv->rxqs)[idx]) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
+ return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
+}
+
+/**
+ * Rx queue presetup checks.
*
* @param dev
* Pointer to Ethernet device structure.
* RX queue index.
* @param desc
* Number of descriptors to configure in queue.
- * @param socket
- * NUMA socket on which memory must be allocated.
- * @param[in] conf
- * Thresholds parameters.
- * @param mp
- * Memory pool for buffer allocations.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-int
-mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
- unsigned int socket, const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp)
+static int
+mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
return -rte_errno;
}
mlx5_rxq_release(dev, idx);
+ return 0;
+}
+
+/**
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ * @param mp
+ * Memory pool for buffer allocations.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ int res;
+
+ res = mlx5_rx_queue_pre_setup(dev, idx, desc);
+ if (res)
+ return res;
rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
if (!rxq_ctrl) {
DRV_LOG(ERR, "port %u unable to allocate queue index %u",
return 0;
}
+/**
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * RX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param hairpin_conf
+ * Hairpin configuration parameters.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t desc,
+ const struct rte_eth_hairpin_conf *hairpin_conf)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ int res;
+
+ res = mlx5_rx_queue_pre_setup(dev, idx, desc);
+ if (res)
+ return res;
+ if (hairpin_conf->peer_count != 1 ||
+ hairpin_conf->peers[0].port != dev->data->port_id ||
+ hairpin_conf->peers[0].queue >= priv->txqs_n) {
+ DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
+ " invalid hairpind configuration", dev->data->port_id,
+ idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
+ if (!rxq_ctrl) {
+ DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
+ dev->data->port_id, idx);
+ (*priv->rxqs)[idx] = &rxq_ctrl->rxq;
+ return 0;
+}
+
/**
* DPDK callback to release a RX queue.
*
return;
rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
priv = rxq_ctrl->priv;
- if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx))
+ if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx))
rte_panic("port %u Rx queue %u is still used by a flow and"
" cannot be removed\n",
- PORT_ID(priv), rxq_ctrl->idx);
- mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx);
+ PORT_ID(priv), rxq->idx);
+ mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx);
+}
+
+/**
+ * Get an Rx queue Verbs/DevX object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Queue index in DPDK Rx queue array
+ *
+ * @return
+ * The Verbs/DevX object if it exists.
+ */
+static struct mlx5_rxq_obj *
+mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (idx >= priv->rxqs_n)
+ return NULL;
+ if (!rxq_data)
+ return NULL;
+ rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ if (rxq_ctrl->obj)
+ rte_atomic32_inc(&rxq_ctrl->obj->refcnt);
+ return rxq_ctrl->obj;
+}
+
+/**
+ * Release the resources allocated for an RQ DevX object.
+ *
+ * @param rxq_ctrl
+ * DevX Rx queue object.
+ */
+static void
+rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ if (rxq_ctrl->rxq.wqes) {
+ rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
+ rxq_ctrl->rxq.wqes = NULL;
+ }
+ if (rxq_ctrl->wq_umem) {
+ mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
+ rxq_ctrl->wq_umem = NULL;
+ }
+}
+
+/**
+ * Release an Rx hairpin related resources.
+ *
+ * @param rxq_obj
+ * Hairpin Rx queue object.
+ */
+static void
+rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
+{
+ struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
+
+ assert(rxq_obj);
+ rq_attr.state = MLX5_RQC_STATE_RST;
+ rq_attr.rq_state = MLX5_RQC_STATE_RDY;
+ mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
+ claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
+}
+
+/**
+ * Release an Rx verbs/DevX queue object.
+ *
+ * @param rxq_obj
+ * Verbs/DevX Rx queue object.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
+{
+ assert(rxq_obj);
+ if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
+ switch (rxq_obj->type) {
+ case MLX5_RXQ_OBJ_TYPE_IBV:
+ assert(rxq_obj->wq);
+ assert(rxq_obj->cq);
+ rxq_free_elts(rxq_obj->rxq_ctrl);
+ claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
+ claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
+ break;
+ case MLX5_RXQ_OBJ_TYPE_DEVX_RQ:
+ assert(rxq_obj->cq);
+ assert(rxq_obj->rq);
+ rxq_free_elts(rxq_obj->rxq_ctrl);
+ claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
+ rxq_release_rq_resources(rxq_obj->rxq_ctrl);
+ claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
+ break;
+ case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN:
+ assert(rxq_obj->rq);
+ rxq_obj_hairpin_release(rxq_obj);
+ break;
+ }
+ if (rxq_obj->channel)
+ claim_zero(mlx5_glue->destroy_comp_channel
+ (rxq_obj->channel));
+ LIST_REMOVE(rxq_obj, next);
+ rte_free(rxq_obj);
+ return 0;
+ }
+ return 1;
}
/**
}
intr_handle->type = RTE_INTR_HANDLE_EXT;
for (i = 0; i != n; ++i) {
- /* This rxq ibv must not be released in this function. */
- struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i);
+ /* This rxq obj must not be released in this function. */
+ struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i);
int fd;
int flags;
int rc;
/* Skip queues that cannot request interrupts. */
- if (!rxq_ibv || !rxq_ibv->channel) {
+ if (!rxq_obj || !rxq_obj->channel) {
/* Use invalid intr_vec[] index to disable entry. */
intr_handle->intr_vec[i] =
RTE_INTR_VEC_RXTX_OFFSET +
rte_errno = ENOMEM;
return -rte_errno;
}
- fd = rxq_ibv->channel->fd;
+ fd = rxq_obj->channel->fd;
flags = fcntl(fd, F_GETFL);
rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
if (rc < 0) {
continue;
/**
* Need to access directly the queue to release the reference
- * kept in priv_rx_intr_vec_enable().
+ * kept in mlx5_rx_intr_vec_enable().
*/
rxq_data = (*priv->rxqs)[i];
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- mlx5_rxq_ibv_release(rxq_ctrl->ibv);
+ if (rxq_ctrl->obj)
+ mlx5_rxq_obj_release(rxq_ctrl->obj);
}
free:
rte_intr_free_epoll_fd(intr_handle);
}
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (rxq_ctrl->irq) {
- struct mlx5_rxq_ibv *rxq_ibv;
+ struct mlx5_rxq_obj *rxq_obj;
- rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
- if (!rxq_ibv) {
+ rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
+ if (!rxq_obj) {
rte_errno = EINVAL;
return -rte_errno;
}
mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
- mlx5_rxq_ibv_release(rxq_ibv);
+ mlx5_rxq_obj_release(rxq_obj);
}
return 0;
}
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data;
struct mlx5_rxq_ctrl *rxq_ctrl;
- struct mlx5_rxq_ibv *rxq_ibv = NULL;
+ struct mlx5_rxq_obj *rxq_obj = NULL;
struct ibv_cq *ev_cq;
void *ev_ctx;
int ret;
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (!rxq_ctrl->irq)
return 0;
- rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
- if (!rxq_ibv) {
+ rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id);
+ if (!rxq_obj) {
rte_errno = EINVAL;
return -rte_errno;
}
- ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
- if (ret || ev_cq != rxq_ibv->cq) {
+ ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx);
+ if (ret || ev_cq != rxq_obj->cq) {
rte_errno = EINVAL;
goto exit;
}
rxq_data->cq_arm_sn++;
- mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
+ mlx5_glue->ack_cq_events(rxq_obj->cq, 1);
+ mlx5_rxq_obj_release(rxq_obj);
return 0;
exit:
ret = rte_errno; /* Save rte_errno before cleanup. */
- if (rxq_ibv)
- mlx5_rxq_ibv_release(rxq_ibv);
+ if (rxq_obj)
+ mlx5_rxq_obj_release(rxq_obj);
DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
dev->data->port_id, rx_queue_id);
rte_errno = ret; /* Restore rte_errno. */
}
/**
- * Create the Rx queue Verbs object.
+ * Create a CQ Verbs object.
*
* @param dev
* Pointer to Ethernet device.
- * @param idx
- * Queue index in DPDK Rx queue array
+ * @param priv
+ * Pointer to device private data.
+ * @param rxq_data
+ * Pointer to Rx queue data.
+ * @param cqe_n
+ * Number of CQEs in CQ.
+ * @param rxq_obj
+ * Pointer to Rx queue object data.
*
* @return
* The Verbs object initialised, NULL otherwise and rte_errno is set.
*/
-struct mlx5_rxq_ibv *
-mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
+static struct ibv_cq *
+mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
+ struct mlx5_rxq_data *rxq_data,
+ unsigned int cqe_n, struct mlx5_rxq_obj *rxq_obj)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- struct ibv_wq_attr mod;
- union {
- struct {
- struct ibv_cq_init_attr_ex ibv;
- struct mlx5dv_cq_init_attr mlx5;
- } cq;
- struct {
- struct ibv_wq_init_attr ibv;
-#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- struct mlx5dv_wq_init_attr mlx5;
-#endif
- } wq;
- struct ibv_cq_ex cq_attr;
- } attr;
- unsigned int cqe_n;
- unsigned int wqe_n = 1 << rxq_data->elts_n;
- struct mlx5_rxq_ibv *tmpl;
- struct mlx5dv_cq cq_info;
- struct mlx5dv_rwq rwq;
- unsigned int i;
- int ret = 0;
- struct mlx5dv_obj obj;
- struct mlx5_dev_config *config = &priv->config;
- const int mprq_en = mlx5_rxq_mprq_enabled(rxq_data);
+ struct {
+ struct ibv_cq_init_attr_ex ibv;
+ struct mlx5dv_cq_init_attr mlx5;
+ } cq_attr;
- assert(rxq_data);
- assert(!rxq_ctrl->ibv);
- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
- priv->verbs_alloc_ctx.obj = rxq_ctrl;
- tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
- rxq_ctrl->socket);
- if (!tmpl) {
- DRV_LOG(ERR,
- "port %u Rx queue %u cannot allocate verbs resources",
- dev->data->port_id, rxq_ctrl->idx);
- rte_errno = ENOMEM;
- goto error;
- }
- tmpl->rxq_ctrl = rxq_ctrl;
- if (rxq_ctrl->irq) {
- tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
- if (!tmpl->channel) {
- DRV_LOG(ERR, "port %u: comp channel creation failure",
- dev->data->port_id);
- rte_errno = ENOMEM;
- goto error;
- }
- }
- if (mprq_en)
- cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
- else
- cqe_n = wqe_n - 1;
- attr.cq.ibv = (struct ibv_cq_init_attr_ex){
+ cq_attr.ibv = (struct ibv_cq_init_attr_ex){
.cqe = cqe_n,
- .channel = tmpl->channel,
+ .channel = rxq_obj->channel,
.comp_mask = 0,
};
- attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
+ cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
.comp_mask = 0,
};
- if (config->cqe_comp && !rxq_data->hw_timestamp) {
- attr.cq.mlx5.comp_mask |=
- MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
+ if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
+ !rxq_data->lro) {
+ cq_attr.mlx5.comp_mask |=
+ MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- attr.cq.mlx5.cqe_comp_res_format =
- mprq_en ? MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
- MLX5DV_CQE_RES_FORMAT_HASH;
+ cq_attr.mlx5.cqe_comp_res_format =
+ mlx5_rxq_mprq_enabled(rxq_data) ?
+ MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
+ MLX5DV_CQE_RES_FORMAT_HASH;
#else
- attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
+ cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
#endif
/*
* For vectorized Rx, it must not be doubled in order to
* make cq_ci and rq_ci aligned.
*/
if (mlx5_rxq_check_vec_support(rxq_data) < 0)
- attr.cq.ibv.cqe *= 2;
- } else if (config->cqe_comp && rxq_data->hw_timestamp) {
+ cq_attr.ibv.cqe *= 2;
+ } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
DRV_LOG(DEBUG,
"port %u Rx CQE compression is disabled for HW"
" timestamp",
dev->data->port_id);
+ } else if (priv->config.cqe_comp && rxq_data->lro) {
+ DRV_LOG(DEBUG,
+ "port %u Rx CQE compression is disabled for LRO",
+ dev->data->port_id);
}
#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
- if (config->cqe_pad) {
- attr.cq.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
- attr.cq.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
+ if (priv->config.cqe_pad) {
+ cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
+ cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
}
#endif
- tmpl->cq = mlx5_glue->cq_ex_to_cq
- (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
- &attr.cq.mlx5));
- if (tmpl->cq == NULL) {
- DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
- dev->data->port_id, idx);
- rte_errno = ENOMEM;
- goto error;
- }
- DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
- dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
- DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
- dev->data->port_id, priv->device_attr.orig_attr.max_sge);
- attr.wq.ibv = (struct ibv_wq_init_attr){
+ return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
+ &cq_attr.ibv,
+ &cq_attr.mlx5));
+}
+
+/**
+ * Create a WQ Verbs object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param priv
+ * Pointer to device private data.
+ * @param rxq_data
+ * Pointer to Rx queue data.
+ * @param idx
+ * Queue index in DPDK Rx queue array
+ * @param wqe_n
+ * Number of WQEs in WQ.
+ * @param rxq_obj
+ * Pointer to Rx queue object data.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+static struct ibv_wq *
+mlx5_ibv_wq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv,
+ struct mlx5_rxq_data *rxq_data, uint16_t idx,
+ unsigned int wqe_n, struct mlx5_rxq_obj *rxq_obj)
+{
+ struct {
+ struct ibv_wq_init_attr ibv;
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ struct mlx5dv_wq_init_attr mlx5;
+#endif
+ } wq_attr;
+
+ wq_attr.ibv = (struct ibv_wq_init_attr){
.wq_context = NULL, /* Could be useful in the future. */
.wq_type = IBV_WQT_RQ,
/* Max number of outstanding WRs. */
/* Max number of scatter/gather elements in a WR. */
.max_sge = 1 << rxq_data->sges_n,
.pd = priv->sh->pd,
- .cq = tmpl->cq,
- .comp_mask =
- IBV_WQ_FLAGS_CVLAN_STRIPPING |
- 0,
+ .cq = rxq_obj->cq,
+ .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
.create_flags = (rxq_data->vlan_strip ?
- IBV_WQ_FLAGS_CVLAN_STRIPPING :
- 0),
+ IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
};
/* By default, FCS (CRC) is stripped by hardware. */
if (rxq_data->crc_present) {
- attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
- attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
+ wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
}
- if (config->hw_padding) {
+ if (priv->config.hw_padding) {
#if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
- attr.wq.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
- attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
+ wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
#elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
- attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
- attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
+ wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
#endif
}
#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- attr.wq.mlx5 = (struct mlx5dv_wq_init_attr){
+ wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
.comp_mask = 0,
};
- if (mprq_en) {
+ if (mlx5_rxq_mprq_enabled(rxq_data)) {
struct mlx5dv_striding_rq_init_attr *mprq_attr =
- &attr.wq.mlx5.striding_rq_attrs;
+ &wq_attr.mlx5.striding_rq_attrs;
- attr.wq.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
+ wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
*mprq_attr = (struct mlx5dv_striding_rq_init_attr){
.single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
.single_wqe_log_num_of_strides = rxq_data->strd_num_n,
.two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
};
}
- tmpl->wq = mlx5_glue->dv_create_wq(priv->ctx, &attr.wq.ibv,
- &attr.wq.mlx5);
+ rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
+ &wq_attr.mlx5);
#else
- tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq.ibv);
+ rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
#endif
- if (tmpl->wq == NULL) {
- DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
- dev->data->port_id, idx);
- rte_errno = ENOMEM;
- goto error;
+ if (rxq_obj->wq) {
+ /*
+ * Make sure number of WRs*SGEs match expectations since a queue
+ * cannot allocate more than "desc" buffers.
+ */
+ if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
+ wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
+ DRV_LOG(ERR,
+ "port %u Rx queue %u requested %u*%u but got"
+ " %u*%u WRs*SGEs",
+ dev->data->port_id, idx,
+ wqe_n >> rxq_data->sges_n,
+ (1 << rxq_data->sges_n),
+ wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
+ claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
+ rxq_obj->wq = NULL;
+ rte_errno = EINVAL;
+ }
}
- /*
- * Make sure number of WRs*SGEs match expectations since a queue
- * cannot allocate more than "desc" buffers.
- */
- if (attr.wq.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
- attr.wq.ibv.max_sge != (1u << rxq_data->sges_n)) {
- DRV_LOG(ERR,
- "port %u Rx queue %u requested %u*%u but got %u*%u"
- " WRs*SGEs",
- dev->data->port_id, idx,
- wqe_n >> rxq_data->sges_n, (1 << rxq_data->sges_n),
- attr.wq.ibv.max_wr, attr.wq.ibv.max_sge);
- rte_errno = EINVAL;
- goto error;
+ return rxq_obj->wq;
+}
+
+/**
+ * Fill common fields of create RQ attributes structure.
+ *
+ * @param rxq_data
+ * Pointer to Rx queue data.
+ * @param cqn
+ * CQ number to use with this RQ.
+ * @param rq_attr
+ * RQ attributes structure to fill..
+ */
+static void
+mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
+ struct mlx5_devx_create_rq_attr *rq_attr)
+{
+ rq_attr->state = MLX5_RQC_STATE_RST;
+ rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
+ rq_attr->cqn = cqn;
+ rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
+}
+
+/**
+ * Fill common fields of DevX WQ attributes structure.
+ *
+ * @param priv
+ * Pointer to device private data.
+ * @param rxq_ctrl
+ * Pointer to Rx queue control structure.
+ * @param wq_attr
+ * WQ attributes structure to fill..
+ */
+static void
+mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
+ struct mlx5_devx_wq_attr *wq_attr)
+{
+ wq_attr->end_padding_mode = priv->config.cqe_pad ?
+ MLX5_WQ_END_PAD_MODE_ALIGN :
+ MLX5_WQ_END_PAD_MODE_NONE;
+ wq_attr->pd = priv->sh->pdn;
+ wq_attr->dbr_addr = rxq_ctrl->dbr_offset;
+ wq_attr->dbr_umem_id = rxq_ctrl->dbr_umem_id;
+ wq_attr->dbr_umem_valid = 1;
+ wq_attr->wq_umem_id = rxq_ctrl->wq_umem->umem_id;
+ wq_attr->wq_umem_valid = 1;
+}
+
+/**
+ * Create a RQ object using DevX.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Queue index in DPDK Rx queue array
+ * @param cqn
+ * CQ number to use with this RQ.
+ *
+ * @return
+ * The DevX object initialised, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_devx_obj *
+mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_devx_create_rq_attr rq_attr;
+ uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
+ uint32_t wq_size = 0;
+ uint32_t wqe_size = 0;
+ uint32_t log_wqe_size = 0;
+ void *buf = NULL;
+ struct mlx5_devx_obj *rq;
+
+ memset(&rq_attr, 0, sizeof(rq_attr));
+ /* Fill RQ attributes. */
+ rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
+ rq_attr.flush_in_error_en = 1;
+ mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
+ /* Fill WQ attributes for this RQ. */
+ if (mlx5_rxq_mprq_enabled(rxq_data)) {
+ rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
+ /*
+ * Number of strides in each WQE:
+ * 512*2^single_wqe_log_num_of_strides.
+ */
+ rq_attr.wq_attr.single_wqe_log_num_of_strides =
+ rxq_data->strd_num_n -
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
+ rq_attr.wq_attr.single_stride_log_num_of_bytes =
+ rxq_data->strd_sz_n -
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
+ wqe_size = sizeof(struct mlx5_wqe_mprq);
+ } else {
+ rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
+ wqe_size = sizeof(struct mlx5_wqe_data_seg);
+ }
+ log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
+ rq_attr.wq_attr.log_wq_stride = log_wqe_size;
+ rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
+ /* Calculate and allocate WQ memory space. */
+ wqe_size = 1 << log_wqe_size; /* round up power of two.*/
+ wq_size = wqe_n * wqe_size;
+ buf = rte_calloc_socket(__func__, 1, wq_size, MLX5_WQE_BUF_ALIGNMENT,
+ rxq_ctrl->socket);
+ if (!buf)
+ return NULL;
+ rxq_data->wqes = buf;
+ rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
+ buf, wq_size, 0);
+ if (!rxq_ctrl->wq_umem) {
+ rte_free(buf);
+ return NULL;
}
- /* Change queue state to ready. */
- mod = (struct ibv_wq_attr){
- .attr_mask = IBV_WQ_ATTR_STATE,
- .wq_state = IBV_WQS_RDY,
- };
- ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
- if (ret) {
+ mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
+ rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
+ if (!rq)
+ rxq_release_rq_resources(rxq_ctrl);
+ return rq;
+}
+
+/**
+ * Create the Rx hairpin queue object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * Queue index in DPDK Rx queue array
+ *
+ * @return
+ * The hairpin DevX object initialised, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_rxq_obj *
+mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_devx_create_rq_attr attr = { 0 };
+ struct mlx5_rxq_obj *tmpl = NULL;
+ int ret = 0;
+
+ assert(rxq_data);
+ assert(!rxq_ctrl->obj);
+ tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
+ rxq_ctrl->socket);
+ if (!tmpl) {
DRV_LOG(ERR,
- "port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
- dev->data->port_id, idx);
- rte_errno = ret;
- goto error;
- }
- obj.cq.in = tmpl->cq;
- obj.cq.out = &cq_info;
- obj.rwq.in = tmpl->wq;
- obj.rwq.out = &rwq;
- ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
- if (ret) {
- rte_errno = ret;
+ "port %u Rx queue %u cannot allocate verbs resources",
+ dev->data->port_id, rxq_data->idx);
+ rte_errno = ENOMEM;
goto error;
}
- if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
+ tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
+ tmpl->rxq_ctrl = rxq_ctrl;
+ attr.hairpin = 1;
+ /* Workaround for hairpin startup */
+ attr.wq_attr.log_hairpin_num_packets = log2above(32);
+ /* Workaround for packets larger than 1KB */
+ attr.wq_attr.log_hairpin_data_sz =
+ priv->config.hca_attr.log_max_hairpin_wq_data_sz;
+ tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
+ rxq_ctrl->socket);
+ if (!tmpl->rq) {
DRV_LOG(ERR,
- "port %u wrong MLX5_CQE_SIZE environment variable"
- " value: it should be set to %u",
- dev->data->port_id, RTE_CACHE_LINE_SIZE);
- rte_errno = EINVAL;
+ "port %u Rx hairpin queue %u can't create rq object",
+ dev->data->port_id, idx);
+ rte_errno = errno;
goto error;
}
- /* Fill the rings. */
- rxq_data->wqes = rwq.buf;
- for (i = 0; (i != wqe_n); ++i) {
- volatile struct mlx5_wqe_data_seg *scat;
- uintptr_t addr;
- uint32_t byte_count;
-
- if (mprq_en) {
- struct mlx5_mprq_buf *buf = (*rxq_data->mprq_bufs)[i];
-
- scat = &((volatile struct mlx5_wqe_mprq *)
- rxq_data->wqes)[i].dseg;
- addr = (uintptr_t)mlx5_mprq_buf_addr(buf);
- byte_count = (1 << rxq_data->strd_sz_n) *
- (1 << rxq_data->strd_num_n);
- } else {
- struct rte_mbuf *buf = (*rxq_data->elts)[i];
-
- scat = &((volatile struct mlx5_wqe_data_seg *)
- rxq_data->wqes)[i];
- addr = rte_pktmbuf_mtod(buf, uintptr_t);
- byte_count = DATA_LEN(buf);
- }
- /* scat->addr must be able to store a pointer. */
- assert(sizeof(scat->addr) >= sizeof(uintptr_t));
- *scat = (struct mlx5_wqe_data_seg){
- .addr = rte_cpu_to_be_64(addr),
- .byte_count = rte_cpu_to_be_32(byte_count),
- .lkey = mlx5_rx_addr2mr(rxq_data, addr),
- };
- }
- rxq_data->rq_db = rwq.dbrec;
- rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
- rxq_data->cq_ci = 0;
- rxq_data->consumed_strd = 0;
- rxq_data->rq_pi = 0;
- rxq_data->zip = (struct rxq_zip){
- .ai = 0,
- };
- rxq_data->cq_db = cq_info.dbrec;
- rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
- rxq_data->cq_uar = cq_info.cq_uar;
- rxq_data->cqn = cq_info.cqn;
- rxq_data->cq_arm_sn = 0;
- /* Update doorbell counter. */
- rxq_data->rq_ci = wqe_n >> rxq_data->sges_n;
- rte_wmb();
- *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
idx, (void *)&tmpl);
rte_atomic32_inc(&tmpl->refcnt);
- LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
+ LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return tmpl;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
- if (tmpl->wq)
- claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
- if (tmpl->cq)
- claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
- if (tmpl->channel)
- claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
- priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ if (tmpl->rq)
+ mlx5_devx_cmd_destroy(tmpl->rq);
rte_errno = ret; /* Restore rte_errno. */
return NULL;
}
/**
- * Get an Rx queue Verbs object.
+ * Create the Rx queue Verbs/DevX object.
*
* @param dev
* Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array
+ * @param type
+ * Type of Rx queue object to create.
*
* @return
- * The Verbs object if it exists.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
-struct mlx5_rxq_ibv *
-mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
+struct mlx5_rxq_obj *
+mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
+ enum mlx5_rxq_obj_type type)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ struct ibv_wq_attr mod;
+ unsigned int cqe_n;
+ unsigned int wqe_n = 1 << rxq_data->elts_n;
+ struct mlx5_rxq_obj *tmpl = NULL;
+ struct mlx5dv_cq cq_info;
+ struct mlx5dv_rwq rwq;
+ int ret = 0;
+ struct mlx5dv_obj obj;
- if (idx >= priv->rxqs_n)
- return NULL;
- if (!rxq_data)
- return NULL;
- rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- if (rxq_ctrl->ibv) {
- rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
+ assert(rxq_data);
+ assert(!rxq_ctrl->obj);
+ if (type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
+ return mlx5_rxq_obj_hairpin_new(dev, idx);
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
+ priv->verbs_alloc_ctx.obj = rxq_ctrl;
+ tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
+ rxq_ctrl->socket);
+ if (!tmpl) {
+ DRV_LOG(ERR,
+ "port %u Rx queue %u cannot allocate verbs resources",
+ dev->data->port_id, rxq_data->idx);
+ rte_errno = ENOMEM;
+ goto error;
}
- return rxq_ctrl->ibv;
-}
-
-/**
- * Release an Rx verbs queue object.
- *
- * @param rxq_ibv
- * Verbs Rx queue object.
- *
- * @return
- * 1 while a reference on it exists, 0 when freed.
- */
-int
-mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
-{
- assert(rxq_ibv);
- assert(rxq_ibv->wq);
- assert(rxq_ibv->cq);
- if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
- rxq_free_elts(rxq_ibv->rxq_ctrl);
- claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
- claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq));
- if (rxq_ibv->channel)
+ tmpl->type = type;
+ tmpl->rxq_ctrl = rxq_ctrl;
+ if (rxq_ctrl->irq) {
+ tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
+ if (!tmpl->channel) {
+ DRV_LOG(ERR, "port %u: comp channel creation failure",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ }
+ if (mlx5_rxq_mprq_enabled(rxq_data))
+ cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
+ else
+ cqe_n = wqe_n - 1;
+ tmpl->cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl);
+ if (!tmpl->cq) {
+ DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ obj.cq.in = tmpl->cq;
+ obj.cq.out = &cq_info;
+ ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
+ if (ret) {
+ rte_errno = ret;
+ goto error;
+ }
+ if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
+ DRV_LOG(ERR,
+ "port %u wrong MLX5_CQE_SIZE environment variable"
+ " value: it should be set to %u",
+ dev->data->port_id, RTE_CACHE_LINE_SIZE);
+ rte_errno = EINVAL;
+ goto error;
+ }
+ DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
+ dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
+ DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
+ dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
+ /* Allocate door-bell for types created with DevX. */
+ if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {
+ struct mlx5_devx_dbr_page *dbr_page;
+ int64_t dbr_offset;
+
+ dbr_offset = mlx5_get_dbr(dev, &dbr_page);
+ if (dbr_offset < 0)
+ goto error;
+ rxq_ctrl->dbr_offset = dbr_offset;
+ rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id;
+ rxq_ctrl->dbr_umem_id_valid = 1;
+ rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
+ (uintptr_t)rxq_ctrl->dbr_offset);
+ }
+ if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
+ tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n,
+ tmpl);
+ if (!tmpl->wq) {
+ DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ /* Change queue state to ready. */
+ mod = (struct ibv_wq_attr){
+ .attr_mask = IBV_WQ_ATTR_STATE,
+ .wq_state = IBV_WQS_RDY,
+ };
+ ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
+ if (ret) {
+ DRV_LOG(ERR,
+ "port %u Rx queue %u WQ state to IBV_WQS_RDY"
+ " failed", dev->data->port_id, idx);
+ rte_errno = ret;
+ goto error;
+ }
+ obj.rwq.in = tmpl->wq;
+ obj.rwq.out = &rwq;
+ ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
+ if (ret) {
+ rte_errno = ret;
+ goto error;
+ }
+ rxq_data->wqes = rwq.buf;
+ rxq_data->rq_db = rwq.dbrec;
+ } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
+ struct mlx5_devx_modify_rq_attr rq_attr;
+
+ memset(&rq_attr, 0, sizeof(rq_attr));
+ tmpl->rq = mlx5_devx_rq_new(dev, idx, cq_info.cqn);
+ if (!tmpl->rq) {
+ DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ /* Change queue state to ready. */
+ rq_attr.rq_state = MLX5_RQC_STATE_RST;
+ rq_attr.state = MLX5_RQC_STATE_RDY;
+ ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
+ if (ret)
+ goto error;
+ }
+ /* Fill the rings. */
+ rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
+ rxq_data->cq_db = cq_info.dbrec;
+ rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
+ rxq_data->cq_uar = cq_info.cq_uar;
+ rxq_data->cqn = cq_info.cqn;
+ rxq_data->cq_arm_sn = 0;
+ mlx5_rxq_initialize(rxq_data);
+ rxq_data->cq_ci = 0;
+ DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
+ idx, (void *)&tmpl);
+ rte_atomic32_inc(&tmpl->refcnt);
+ LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ return tmpl;
+error:
+ if (tmpl) {
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV && tmpl->wq)
+ claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
+ else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ && tmpl->rq)
+ claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
+ if (tmpl->cq)
+ claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
+ if (tmpl->channel)
claim_zero(mlx5_glue->destroy_comp_channel
- (rxq_ibv->channel));
- LIST_REMOVE(rxq_ibv, next);
- rte_free(rxq_ibv);
- return 0;
+ (tmpl->channel));
+ rte_free(tmpl);
+ rte_errno = ret; /* Restore rte_errno. */
}
- return 1;
+ if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
+ rxq_release_rq_resources(rxq_ctrl);
+ priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ return NULL;
}
/**
- * Verify the Verbs Rx queue list is empty
+ * Verify the Rx queue objects list is empty
*
* @param dev
* Pointer to Ethernet device.
*
* @return
- * The number of object not released.
+ * The number of objects not released.
*/
int
-mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
+mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
int ret = 0;
- struct mlx5_rxq_ibv *rxq_ibv;
+ struct mlx5_rxq_obj *rxq_obj;
- LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
- DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
- dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
+ LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
+ DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
+ dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
++ret;
}
return ret;
}
-/**
- * Return true if a single reference exists on the object.
- *
- * @param rxq_ibv
- * Verbs Rx queue object.
- */
-int
-mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
-{
- assert(rxq_ibv);
- return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
-}
-
/**
* Callback function to initialize mbufs for Multi-Packet RQ.
*/
static inline void
-mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused,
+mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
void *_m, unsigned int i __rte_unused)
{
struct mlx5_mprq_buf *buf = _m;
+ struct rte_mbuf_ext_shared_info *shinfo;
+ unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
+ unsigned int j;
memset(_m, 0, sizeof(*buf));
buf->mp = mp;
rte_atomic16_set(&buf->refcnt, 1);
+ for (j = 0; j != strd_n; ++j) {
+ shinfo = &buf->shinfos[j];
+ shinfo->free_cb = mlx5_mprq_buf_free_cb;
+ shinfo->fcb_opaque = buf;
+ }
}
/**
dev->data->port_id, mp->name);
/*
* If a buffer in the pool has been externally attached to a mbuf and it
- * is still in use by application, destroying the Rx qeueue can spoil
+ * is still in use by application, destroying the Rx queue can spoil
* the packet. It is unlikely to happen but if application dynamically
* creates and destroys with holding Rx packets, this can happen.
*
unsigned int strd_num_n = 0;
unsigned int strd_sz_n = 0;
unsigned int i;
+ unsigned int n_ibv = 0;
if (!mlx5_mprq_enabled(dev))
return 0;
/* Count the total number of descriptors configured. */
for (i = 0; i != priv->rxqs_n; ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl = container_of
+ (rxq, struct mlx5_rxq_ctrl, rxq);
- if (rxq == NULL)
+ if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
continue;
+ n_ibv++;
desc += 1 << rxq->elts_n;
/* Get the max number of strides. */
if (strd_num_n < rxq->strd_num_n)
}
assert(strd_num_n && strd_sz_n);
buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
- obj_size = buf_len + sizeof(struct mlx5_mprq_buf);
+ obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
+ sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
/*
* Received packets can be either memcpy'd or externally referenced. In
* case that the packet is attached to an mbuf as an external buffer, as
* this Mempool gets available again.
*/
desc *= 4;
- obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n;
+ obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv;
/*
* rte_mempool_create_empty() has sanity check to refuse large cache
* size compared to the number of elements.
return -rte_errno;
}
}
- snprintf(name, sizeof(name), "%s-mprq", dev->device->name);
+ snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
- 0, NULL, NULL, mlx5_mprq_buf_init, NULL,
+ 0, NULL, NULL, mlx5_mprq_buf_init,
+ (void *)(uintptr_t)(1 << strd_num_n),
dev->device->numa_node, 0);
if (mp == NULL) {
DRV_LOG(ERR,
/* Set mempool for each Rx queue. */
for (i = 0; i != priv->rxqs_n; ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl = container_of
+ (rxq, struct mlx5_rxq_ctrl, rxq);
- if (rxq == NULL)
+ if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
continue;
rxq->mprq_mp = mp;
}
return 0;
}
+#define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
+ sizeof(struct rte_vlan_hdr) * 2 + \
+ sizeof(struct rte_ipv6_hdr)))
+#define MAX_TCP_OPTION_SIZE 40u
+#define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
+ sizeof(struct rte_tcp_hdr) + \
+ MAX_TCP_OPTION_SIZE))
+
+/**
+ * Adjust the maximum LRO massage size.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ * @param max_lro_size
+ * The maximum size for LRO packet.
+ */
+static void
+mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
+ uint32_t max_lro_size)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->config.hca_attr.lro_max_msg_sz_mode ==
+ MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
+ MLX5_MAX_TCP_HDR_OFFSET)
+ max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
+ max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
+ assert(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
+ max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
+ if (priv->max_lro_msg_size)
+ priv->max_lro_msg_size =
+ RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
+ else
+ priv->max_lro_msg_size = max_lro_size;
+ DRV_LOG(DEBUG,
+ "port %u Rx Queue %u max LRO message size adjusted to %u bytes",
+ dev->data->port_id, idx,
+ priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
+}
+
/**
* Create a DPDK Rx queue.
*
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
unsigned int mprq_stride_size;
struct mlx5_dev_config *config = &priv->config;
+ unsigned int strd_headroom_en;
/*
* Always allocate extra slots, even if eventually
* the vector Rx will not be used.
desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
uint64_t offloads = conf->offloads |
dev->data->dev_conf.rxmode.offloads;
+ unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
const int mprq_en = mlx5_check_mprq_support(dev) > 0;
-
+ unsigned int max_rx_pkt_len = lro_on_queue ?
+ dev->data->dev_conf.rxmode.max_lro_pkt_size :
+ dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
+ RTE_PKTMBUF_HEADROOM;
+ unsigned int max_lro_size = 0;
+ unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
+
+ if (non_scatter_min_mbuf_size > mb_len && !(offloads &
+ DEV_RX_OFFLOAD_SCATTER)) {
+ DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
+ " configured and no enough mbuf space(%u) to contain "
+ "the maximum RX packet length(%u) with head-room(%u)",
+ dev->data->port_id, idx, mb_len, max_rx_pkt_len,
+ RTE_PKTMBUF_HEADROOM);
+ rte_errno = ENOSPC;
+ return NULL;
+ }
tmpl = rte_calloc_socket("RXQ", 1,
sizeof(*tmpl) +
desc_n * sizeof(struct rte_mbuf *),
rte_errno = ENOMEM;
return NULL;
}
+ tmpl->type = MLX5_RXQ_TYPE_STANDARD;
if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
MLX5_MR_BTREE_CACHE_N, socket)) {
/* rte_errno is already set. */
tmpl->socket = socket;
if (dev->data->dev_conf.intr_conf.rxq)
tmpl->irq = 1;
+ /*
+ * LRO packet may consume all the stride memory, hence we cannot
+ * guaranty head-room near the packet memory in the stride.
+ * In this case scatter is, for sure, enabled and an empty mbuf may be
+ * added in the start for the head-room.
+ */
+ if (lro_on_queue && RTE_PKTMBUF_HEADROOM > 0 &&
+ non_scatter_min_mbuf_size > mb_len) {
+ strd_headroom_en = 0;
+ mprq_stride_size = RTE_MIN(max_rx_pkt_len,
+ 1u << config->mprq.max_stride_size_n);
+ } else {
+ strd_headroom_en = 1;
+ mprq_stride_size = non_scatter_min_mbuf_size;
+ }
/*
* This Rx queue can be configured as a Multi-Packet RQ if all of the
* following conditions are met:
* stride.
* Otherwise, enable Rx scatter if necessary.
*/
- assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- mprq_stride_size =
- dev->data->dev_conf.rxmode.max_rx_pkt_len +
- sizeof(struct rte_mbuf_ext_shared_info) +
- RTE_PKTMBUF_HEADROOM;
if (mprq_en &&
desc > (1U << config->mprq.stride_num_n) &&
mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
config->mprq.min_stride_size_n);
tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
- tmpl->rxq.mprq_max_memcpy_len =
- RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM,
+ tmpl->rxq.strd_headroom_en = strd_headroom_en;
+ tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
config->mprq.max_memcpy_len);
+ max_lro_size = RTE_MIN(max_rx_pkt_len,
+ (1u << tmpl->rxq.strd_num_n) *
+ (1u << tmpl->rxq.strd_sz_n));
DRV_LOG(DEBUG,
"port %u Rx queue %u: Multi-Packet RQ is enabled"
" strd_num_n = %u, strd_sz_n = %u",
dev->data->port_id, idx,
tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
- } else if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
- (mb_len - RTE_PKTMBUF_HEADROOM)) {
+ } else if (max_rx_pkt_len <= first_mb_free_size) {
tmpl->rxq.sges_n = 0;
+ max_lro_size = max_rx_pkt_len;
} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
- unsigned int size =
- RTE_PKTMBUF_HEADROOM +
- dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ unsigned int size = non_scatter_min_mbuf_size;
unsigned int sges_n;
+ if (lro_on_queue && first_mb_free_size <
+ MLX5_MAX_LRO_HEADER_FIX) {
+ DRV_LOG(ERR, "Not enough space in the first segment(%u)"
+ " to include the max header size(%u) for LRO",
+ first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
+ rte_errno = ENOTSUP;
+ goto error;
+ }
/*
* Determine the number of SGEs needed for a full packet
* and round it to the next power of two.
*/
sges_n = log2above((size / mb_len) + !!(size % mb_len));
- tmpl->rxq.sges_n = sges_n;
- /* Make sure rxq.sges_n did not overflow. */
- size = mb_len * (1 << tmpl->rxq.sges_n);
- size -= RTE_PKTMBUF_HEADROOM;
- if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
+ if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
DRV_LOG(ERR,
"port %u too many SGEs (%u) needed to handle"
- " requested maximum packet size %u",
- dev->data->port_id,
- 1 << sges_n,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
- rte_errno = EOVERFLOW;
+ " requested maximum packet size %u, the maximum"
+ " supported are %u", dev->data->port_id,
+ 1 << sges_n, max_rx_pkt_len,
+ 1u << MLX5_MAX_LOG_RQ_SEGS);
+ rte_errno = ENOTSUP;
goto error;
}
- } else {
- DRV_LOG(WARNING,
- "port %u the requested maximum Rx packet size (%u) is"
- " larger than a single mbuf (%u) and scattered mode has"
- " not been requested",
- dev->data->port_id,
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
- mb_len - RTE_PKTMBUF_HEADROOM);
+ tmpl->rxq.sges_n = sges_n;
+ max_lro_size = max_rx_pkt_len;
}
if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
DRV_LOG(WARNING,
rte_errno = EINVAL;
goto error;
}
+ mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
/* Toggle RX checksum offload if hardware supports it. */
tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
/* By default, FCS (CRC) is stripped by hardware. */
tmpl->rxq.crc_present = 0;
+ tmpl->rxq.lro = lro_on_queue;
if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
if (config->hw_fcs_strip) {
- tmpl->rxq.crc_present = 1;
+ /*
+ * RQs used for LRO-enabled TIRs should not be
+ * configured to scatter the FCS.
+ */
+ if (lro_on_queue)
+ DRV_LOG(WARNING,
+ "port %u CRC stripping has been "
+ "disabled but will still be performed "
+ "by hardware, because LRO is enabled",
+ dev->data->port_id);
+ else
+ tmpl->rxq.crc_present = 1;
} else {
DRV_LOG(WARNING,
"port %u CRC stripping has been disabled but will"
tmpl->rxq.port_id = dev->data->port_id;
tmpl->priv = priv;
tmpl->rxq.mp = mp;
- tmpl->rxq.stats.idx = idx;
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.rq_repl_thresh =
MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
#ifndef RTE_ARCH_64
tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
#endif
- tmpl->idx = idx;
+ tmpl->rxq.idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
return NULL;
}
+/**
+ * Create a DPDK Rx hairpin queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param hairpin_conf
+ * The hairpin binding configuration.
+ *
+ * @return
+ * A DPDK queue object on success, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_rxq_ctrl *
+mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ const struct rte_eth_hairpin_conf *hairpin_conf)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ctrl *tmpl;
+
+ tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl), 0, SOCKET_ID_ANY);
+ if (!tmpl) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
+ tmpl->socket = SOCKET_ID_ANY;
+ tmpl->rxq.rss_hash = 0;
+ tmpl->rxq.port_id = dev->data->port_id;
+ tmpl->priv = priv;
+ tmpl->rxq.mp = NULL;
+ tmpl->rxq.elts_n = log2above(desc);
+ tmpl->rxq.elts = NULL;
+ tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
+ tmpl->hairpin_conf = *hairpin_conf;
+ tmpl->rxq.idx = idx;
+ rte_atomic32_inc(&tmpl->refcnt);
+ LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
+ return tmpl;
+}
+
/**
* Get a Rx queue.
*
* @param dev
* Pointer to Ethernet device.
* @param idx
- * TX queue index.
+ * RX queue index.
*
* @return
* A pointer to the queue if it exists, NULL otherwise.
rxq_ctrl = container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl,
rxq);
- mlx5_rxq_ibv_get(dev, idx);
+ mlx5_rxq_obj_get(dev, idx);
rte_atomic32_inc(&rxq_ctrl->refcnt);
}
return rxq_ctrl;
* @param dev
* Pointer to Ethernet device.
* @param idx
- * TX queue index.
+ * RX queue index.
*
* @return
* 1 while a reference on it exists, 0 when freed.
return 0;
rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
assert(rxq_ctrl->priv);
- if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
- rxq_ctrl->ibv = NULL;
+ if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
+ rxq_ctrl->obj = NULL;
if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
- mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
+ if (rxq_ctrl->dbr_umem_id_valid)
+ claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
+ rxq_ctrl->dbr_offset));
+ if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
+ mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
LIST_REMOVE(rxq_ctrl, next);
rte_free(rxq_ctrl);
(*priv->rxqs)[idx] = NULL;
}
/**
- * Verify if the queue can be released.
+ * Verify the Rx Queue list is empty
*
* @param dev
* Pointer to Ethernet device.
- * @param idx
- * TX queue index.
*
* @return
- * 1 if the queue can be released, negative errno otherwise and rte_errno is
- * set.
+ * The number of object not released.
*/
int
-mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
+mlx5_rxq_verify(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
+ int ret = 0;
- if (!(*priv->rxqs)[idx]) {
- rte_errno = EINVAL;
- return -rte_errno;
+ LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
+ DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
+ dev->data->port_id, rxq_ctrl->rxq.idx);
+ ++ret;
}
- rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
- return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
+ return ret;
}
/**
- * Verify the Rx Queue list is empty
+ * Get a Rx queue type.
*
* @param dev
* Pointer to Ethernet device.
+ * @param idx
+ * Rx queue index.
*
* @return
- * The number of object not released.
+ * The Rx queue type.
*/
-int
-mlx5_rxq_verify(struct rte_eth_dev *dev)
+enum mlx5_rxq_type
+mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl;
- int ret = 0;
+ struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
- LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
- DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
- dev->data->port_id, rxq_ctrl->idx);
- ++ret;
+ if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
+ rxq_ctrl = container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl,
+ rxq);
+ return rxq_ctrl->type;
}
- return ret;
+ return MLX5_RXQ_TYPE_UNDEFINED;
}
/**
* Number of queues in the array.
*
* @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
-struct mlx5_ind_table_ibv *
-mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
- uint32_t queues_n)
+static struct mlx5_ind_table_obj *
+mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
+ uint32_t queues_n, enum mlx5_ind_tbl_type type)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
- const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
- log2above(queues_n) :
- log2above(priv->config.ind_table_max_size);
- struct ibv_wq *wq[1 << wq_n];
- unsigned int i;
- unsigned int j;
+ struct mlx5_ind_table_obj *ind_tbl;
+ unsigned int i = 0, j = 0, k = 0;
ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
queues_n * sizeof(uint16_t), 0);
rte_errno = ENOMEM;
return NULL;
}
- for (i = 0; i != queues_n; ++i) {
- struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
-
- if (!rxq)
+ ind_tbl->type = type;
+ if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
+ const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
+ log2above(queues_n) :
+ log2above(priv->config.ind_table_max_size);
+ struct ibv_wq *wq[1 << wq_n];
+
+ for (i = 0; i != queues_n; ++i) {
+ struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
+ queues[i]);
+ if (!rxq)
+ goto error;
+ wq[i] = rxq->obj->wq;
+ ind_tbl->queues[i] = queues[i];
+ }
+ ind_tbl->queues_n = queues_n;
+ /* Finalise indirection table. */
+ k = i; /* Retain value of i for use in error case. */
+ for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j)
+ wq[k] = wq[j];
+ ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
+ (priv->sh->ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
+ .log_ind_tbl_size = wq_n,
+ .ind_tbl = wq,
+ .comp_mask = 0,
+ });
+ if (!ind_tbl->ind_table) {
+ rte_errno = errno;
goto error;
- wq[i] = rxq->ibv->wq;
- ind_tbl->queues[i] = queues[i];
- }
- ind_tbl->queues_n = queues_n;
- /* Finalise indirection table. */
- for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j)
- wq[i] = wq[j];
- ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
- (priv->ctx,
- &(struct ibv_rwq_ind_table_init_attr){
- .log_ind_tbl_size = wq_n,
- .ind_tbl = wq,
- .comp_mask = 0,
- });
- if (!ind_tbl->ind_table) {
- rte_errno = errno;
- goto error;
+ }
+ } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
+ struct mlx5_devx_rqt_attr *rqt_attr = NULL;
+ const unsigned int rqt_n =
+ 1 << (rte_is_power_of_2(queues_n) ?
+ log2above(queues_n) :
+ log2above(priv->config.ind_table_max_size));
+
+ rqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) +
+ rqt_n * sizeof(uint32_t), 0);
+ if (!rqt_attr) {
+ DRV_LOG(ERR, "port %u cannot allocate RQT resources",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
+ rqt_attr->rqt_actual_size = rqt_n;
+ for (i = 0; i != queues_n; ++i) {
+ struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
+ queues[i]);
+ if (!rxq)
+ goto error;
+ rqt_attr->rq_list[i] = rxq->obj->rq->id;
+ ind_tbl->queues[i] = queues[i];
+ }
+ k = i; /* Retain value of i for use in error case. */
+ for (j = 0; k != rqt_n; ++k, ++j)
+ rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
+ ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
+ rqt_attr);
+ rte_free(rqt_attr);
+ if (!ind_tbl->rqt) {
+ DRV_LOG(ERR, "port %u cannot create DevX RQT",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
+ ind_tbl->queues_n = queues_n;
}
rte_atomic32_inc(&ind_tbl->refcnt);
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
return ind_tbl;
error:
+ for (j = 0; j < i; j++)
+ mlx5_rxq_release(dev, ind_tbl->queues[j]);
rte_free(ind_tbl);
DEBUG("port %u cannot create indirection table", dev->data->port_id);
return NULL;
* @return
* An indirection table if found.
*/
-struct mlx5_ind_table_ibv *
-mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
+static struct mlx5_ind_table_obj *
+mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
uint32_t queues_n)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
if ((ind_tbl->queues_n == queues_n) &&
* @return
* 1 while a reference on it exists, 0 when freed.
*/
-int
-mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
- struct mlx5_ind_table_ibv *ind_tbl)
+static int
+mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl)
{
unsigned int i;
- if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
- claim_zero(mlx5_glue->destroy_rwq_ind_table
- (ind_tbl->ind_table));
+ if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
+ if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV)
+ claim_zero(mlx5_glue->destroy_rwq_ind_table
+ (ind_tbl->ind_table));
+ else if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX)
+ claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
+ }
for (i = 0; i != ind_tbl->queues_n; ++i)
claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
if (!rte_atomic32_read(&ind_tbl->refcnt)) {
* The number of object not released.
*/
int
-mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
+mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
int ret = 0;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
DRV_LOG(DEBUG,
- "port %u Verbs indirection table %p still referenced",
+ "port %u indirection table obj %p still referenced",
dev->data->port_id, (void *)ind_tbl);
++ret;
}
* Tunnel type.
*
* @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
struct mlx5_hrxq *
mlx5_hrxq_new(struct rte_eth_dev *dev,
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
- struct mlx5_ind_table_ibv *ind_tbl;
- struct ibv_qp *qp;
-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- struct mlx5dv_qp_init_attr qp_init_attr;
-#endif
+ struct ibv_qp *qp = NULL;
+ struct mlx5_ind_table_obj *ind_tbl;
int err;
+ struct mlx5_devx_obj *tir = NULL;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
queues_n = hash_fields ? queues_n : 1;
- ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
- if (!ind_tbl)
- ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
+ ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
+ if (!ind_tbl) {
+ enum mlx5_ind_tbl_type type;
+
+ type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?
+ MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX;
+ ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type);
+ }
if (!ind_tbl) {
rte_errno = ENOMEM;
return NULL;
}
+ if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- memset(&qp_init_attr, 0, sizeof(qp_init_attr));
- if (tunnel) {
- qp_init_attr.comp_mask =
+ struct mlx5dv_qp_init_attr qp_init_attr;
+
+ memset(&qp_init_attr, 0, sizeof(qp_init_attr));
+ if (tunnel) {
+ qp_init_attr.comp_mask =
MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
- qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
- }
+ qp_init_attr.create_flags =
+ MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
+ }
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- if (dev->data->dev_conf.lpbk_mode) {
- /* Allow packet sent from NIC loop back w/o source MAC check. */
- qp_init_attr.comp_mask |=
+ if (dev->data->dev_conf.lpbk_mode) {
+ /*
+ * Allow packet sent from NIC loop back
+ * w/o source MAC check.
+ */
+ qp_init_attr.comp_mask |=
MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
- qp_init_attr.create_flags |=
+ qp_init_attr.create_flags |=
MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
- }
+ }
#endif
- qp = mlx5_glue->dv_create_qp
- (priv->ctx,
- &(struct ibv_qp_init_attr_ex){
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask =
- IBV_QP_INIT_ATTR_PD |
- IBV_QP_INIT_ATTR_IND_TABLE |
- IBV_QP_INIT_ATTR_RX_HASH,
- .rx_hash_conf = (struct ibv_rx_hash_conf){
- .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
- .rx_hash_key_len = rss_key_len,
- .rx_hash_key = (void *)(uintptr_t)rss_key,
- .rx_hash_fields_mask = hash_fields,
- },
- .rwq_ind_tbl = ind_tbl->ind_table,
- .pd = priv->sh->pd,
- },
- &qp_init_attr);
+ qp = mlx5_glue->dv_create_qp
+ (priv->sh->ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_IND_TABLE |
+ IBV_QP_INIT_ATTR_RX_HASH,
+ .rx_hash_conf = (struct ibv_rx_hash_conf){
+ .rx_hash_function =
+ IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = rss_key_len,
+ .rx_hash_key =
+ (void *)(uintptr_t)rss_key,
+ .rx_hash_fields_mask = hash_fields,
+ },
+ .rwq_ind_tbl = ind_tbl->ind_table,
+ .pd = priv->sh->pd,
+ },
+ &qp_init_attr);
#else
- qp = mlx5_glue->create_qp_ex
- (priv->ctx,
- &(struct ibv_qp_init_attr_ex){
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask =
- IBV_QP_INIT_ATTR_PD |
- IBV_QP_INIT_ATTR_IND_TABLE |
- IBV_QP_INIT_ATTR_RX_HASH,
- .rx_hash_conf = (struct ibv_rx_hash_conf){
- .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
- .rx_hash_key_len = rss_key_len,
- .rx_hash_key = (void *)(uintptr_t)rss_key,
- .rx_hash_fields_mask = hash_fields,
- },
- .rwq_ind_tbl = ind_tbl->ind_table,
- .pd = priv->sh->pd,
- });
+ qp = mlx5_glue->create_qp_ex
+ (priv->sh->ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_IND_TABLE |
+ IBV_QP_INIT_ATTR_RX_HASH,
+ .rx_hash_conf = (struct ibv_rx_hash_conf){
+ .rx_hash_function =
+ IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = rss_key_len,
+ .rx_hash_key =
+ (void *)(uintptr_t)rss_key,
+ .rx_hash_fields_mask = hash_fields,
+ },
+ .rwq_ind_tbl = ind_tbl->ind_table,
+ .pd = priv->sh->pd,
+ });
#endif
- if (!qp) {
- rte_errno = errno;
- goto error;
+ if (!qp) {
+ rte_errno = errno;
+ goto error;
+ }
+ } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
+ struct mlx5_devx_tir_attr tir_attr;
+ struct mlx5_rx_hash_field_select *rx_hash_field_select;
+ uint32_t i;
+ uint32_t lro = 1;
+
+ /* Enable TIR LRO only if all the queues were configured for. */
+ for (i = 0; i < queues_n; ++i) {
+ if (!(*priv->rxqs)[queues[i]]->lro) {
+ lro = 0;
+ break;
+ }
+ }
+ memset(&tir_attr, 0, sizeof(tir_attr));
+ tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
+ tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ tir_attr.tunneled_offload_en = !!tunnel;
+ /* Translate hash_fields bitmap to PRM format. */
+ rx_hash_field_select = hash_fields & IBV_RX_HASH_INNER ?
+ &tir_attr.rx_hash_field_selector_inner :
+ &tir_attr.rx_hash_field_selector_outer;
+#else
+ rx_hash_field_select = &tir_attr.rx_hash_field_selector_outer;
+#endif
+ /* 1 bit: 0: IPv4, 1: IPv6. */
+ rx_hash_field_select->l3_prot_type =
+ !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
+ /* 1 bit: 0: TCP, 1: UDP. */
+ rx_hash_field_select->l4_prot_type =
+ !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
+ /* Bitmask which sets which fields to use in RX Hash. */
+ rx_hash_field_select->selected_fields =
+ ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
+ (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
+ (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
+ (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
+ if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
+ tir_attr.transport_domain = priv->sh->td->id;
+ else
+ tir_attr.transport_domain = priv->sh->tdn;
+ memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, rss_key_len);
+ tir_attr.indirect_table = ind_tbl->rqt->id;
+ if (dev->data->dev_conf.lpbk_mode)
+ tir_attr.self_lb_block =
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ if (lro) {
+ tir_attr.lro_timeout_period_usecs =
+ priv->config.lro.timeout;
+ tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
+ tir_attr.lro_enable_mask =
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
+ }
+ tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
+ if (!tir) {
+ DRV_LOG(ERR, "port %u cannot create DevX TIR",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
}
hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
if (!hrxq)
goto error;
hrxq->ind_table = ind_tbl;
- hrxq->qp = qp;
+ if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) {
+ hrxq->qp = qp;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ hrxq->action =
+ mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
+ if (!hrxq->action) {
+ rte_errno = errno;
+ goto error;
+ }
+#endif
+ } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
+ hrxq->tir = tir;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
+ (hrxq->tir->obj);
+ if (!hrxq->action) {
+ rte_errno = errno;
+ goto error;
+ }
+#endif
+ }
hrxq->rss_key_len = rss_key_len;
hrxq->hash_fields = hash_fields;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
return hrxq;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_ind_table_ibv_release(dev, ind_tbl);
+ mlx5_ind_table_obj_release(dev, ind_tbl);
if (qp)
claim_zero(mlx5_glue->destroy_qp(qp));
+ else if (tir)
+ claim_zero(mlx5_devx_cmd_destroy(tir));
rte_errno = err; /* Restore rte_errno. */
return NULL;
}
queues_n = hash_fields ? queues_n : 1;
LIST_FOREACH(hrxq, &priv->hrxqs, next) {
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
if (hrxq->rss_key_len != rss_key_len)
continue;
continue;
if (hrxq->hash_fields != hash_fields)
continue;
- ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
+ ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
continue;
if (ind_tbl != hrxq->ind_table) {
- mlx5_ind_table_ibv_release(dev, ind_tbl);
+ mlx5_ind_table_obj_release(dev, ind_tbl);
continue;
}
rte_atomic32_inc(&hrxq->refcnt);
mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
{
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
- claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
- mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ mlx5_glue->destroy_flow_action(hrxq->action);
+#endif
+ if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV)
+ claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+ else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */
+ claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
+ mlx5_ind_table_obj_release(dev, hrxq->ind_table);
LIST_REMOVE(hrxq, next);
rte_free(hrxq);
return 0;
}
- claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
+ claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
return 1;
}
* The number of object not released.
*/
int
-mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
+mlx5_hrxq_verify(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
LIST_FOREACH(hrxq, &priv->hrxqs, next) {
DRV_LOG(DEBUG,
- "port %u Verbs hash Rx queue %p still referenced",
+ "port %u hash Rx queue %p still referenced",
dev->data->port_id, (void *)hrxq);
++ret;
}
}
/**
- * Create a drop Rx queue Verbs object.
+ * Create a drop Rx queue Verbs/DevX object.
*
* @param dev
* Pointer to Ethernet device.
*
* @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
-struct mlx5_rxq_ibv *
-mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
+static struct mlx5_rxq_obj *
+mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ struct ibv_context *ctx = priv->sh->ctx;
struct ibv_cq *cq;
struct ibv_wq *wq = NULL;
- struct mlx5_rxq_ibv *rxq;
+ struct mlx5_rxq_obj *rxq;
if (priv->drop_queue.rxq)
return priv->drop_queue.rxq;
- cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
+ cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
if (!cq) {
DEBUG("port %u cannot allocate CQ for drop queue",
dev->data->port_id);
rte_errno = errno;
goto error;
}
- wq = mlx5_glue->create_wq(priv->ctx,
+ wq = mlx5_glue->create_wq(ctx,
&(struct ibv_wq_init_attr){
.wq_type = IBV_WQT_RQ,
.max_wr = 1,
}
/**
- * Release a drop Rx queue Verbs object.
+ * Release a drop Rx queue Verbs/DevX object.
*
* @param dev
* Pointer to Ethernet device.
*
* @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
-void
-mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev)
+static void
+mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ibv *rxq = priv->drop_queue.rxq;
+ struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
if (rxq->wq)
claim_zero(mlx5_glue->destroy_wq(rxq->wq));
* Pointer to Ethernet device.
*
* @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
-struct mlx5_ind_table_ibv *
-mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
+static struct mlx5_ind_table_obj *
+mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
- struct mlx5_rxq_ibv *rxq;
- struct mlx5_ind_table_ibv tmpl;
+ struct mlx5_ind_table_obj *ind_tbl;
+ struct mlx5_rxq_obj *rxq;
+ struct mlx5_ind_table_obj tmpl;
- rxq = mlx5_rxq_ibv_drop_new(dev);
+ rxq = mlx5_rxq_obj_drop_new(dev);
if (!rxq)
return NULL;
tmpl.ind_table = mlx5_glue->create_rwq_ind_table
- (priv->ctx,
+ (priv->sh->ctx,
&(struct ibv_rwq_ind_table_init_attr){
.log_ind_tbl_size = 0,
.ind_tbl = &rxq->wq,
ind_tbl->ind_table = tmpl.ind_table;
return ind_tbl;
error:
- mlx5_rxq_ibv_drop_release(dev);
+ mlx5_rxq_obj_drop_release(dev);
return NULL;
}
* @param dev
* Pointer to Ethernet device.
*/
-void
-mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)
+static void
+mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table;
+ struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
- mlx5_rxq_ibv_drop_release(dev);
+ mlx5_rxq_obj_drop_release(dev);
rte_free(ind_tbl);
priv->drop_queue.hrxq->ind_table = NULL;
}
* Pointer to Ethernet device.
*
* @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
struct mlx5_hrxq *
mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
- struct ibv_qp *qp;
- struct mlx5_hrxq *hrxq;
+ struct mlx5_ind_table_obj *ind_tbl = NULL;
+ struct ibv_qp *qp = NULL;
+ struct mlx5_hrxq *hrxq = NULL;
if (priv->drop_queue.hrxq) {
rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
return priv->drop_queue.hrxq;
}
- ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
+ hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
+ if (!hrxq) {
+ DRV_LOG(WARNING,
+ "port %u cannot allocate memory for drop queue",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ priv->drop_queue.hrxq = hrxq;
+ ind_tbl = mlx5_ind_table_obj_drop_new(dev);
if (!ind_tbl)
- return NULL;
- qp = mlx5_glue->create_qp_ex(priv->ctx,
+ goto error;
+ hrxq->ind_table = ind_tbl;
+ qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
&(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
rte_errno = errno;
goto error;
}
- hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
- if (!hrxq) {
- DRV_LOG(WARNING,
- "port %u cannot allocate memory for drop queue",
- dev->data->port_id);
- rte_errno = ENOMEM;
+ hrxq->qp = qp;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
+ if (!hrxq->action) {
+ rte_errno = errno;
goto error;
}
- hrxq->ind_table = ind_tbl;
- hrxq->qp = qp;
- priv->drop_queue.hrxq = hrxq;
+#endif
rte_atomic32_set(&hrxq->refcnt, 1);
return hrxq;
error:
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ if (hrxq && hrxq->action)
+ mlx5_glue->destroy_flow_action(hrxq->action);
+#endif
+ if (qp)
+ claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
if (ind_tbl)
- mlx5_ind_table_ibv_drop_release(dev);
+ mlx5_ind_table_obj_drop_release(dev);
+ if (hrxq) {
+ priv->drop_queue.hrxq = NULL;
+ rte_free(hrxq);
+ }
return NULL;
}
struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ mlx5_glue->destroy_flow_action(hrxq->action);
+#endif
claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
- mlx5_ind_table_ibv_drop_release(dev);
+ mlx5_ind_table_obj_drop_release(dev);
rte_free(hrxq);
priv->drop_queue.hrxq = NULL;
}