X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=9009eb8d185e81c363b36716f5898ec85164e9e9;hb=e2bd08d569d9821131d8e245446d24eaed145f21;hp=88a42d02f4e21013d45edf8d2d39194a4aef02ea;hpb=1c7e57f9bd3319f63a111b633ef12016fc732d2f;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 88a42d02f4..9009eb8d18 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -4,38 +4,30 @@ */ #include -#include #include #include #include #include #include -/* Verbs header. */ -/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ -#ifdef PEDANTIC -#pragma GCC diagnostic ignored "-Wpedantic" -#endif -#include -#include -#ifdef PEDANTIC -#pragma GCC diagnostic error "-Wpedantic" -#endif - #include #include -#include +#include #include #include #include #include +#include + +#include +#include +#include "mlx5_defs.h" #include "mlx5.h" #include "mlx5_rxtx.h" #include "mlx5_utils.h" #include "mlx5_autoconf.h" -#include "mlx5_defs.h" -#include "mlx5_glue.h" + /* Default RSS hash key also used for ConnectX-3. */ uint8_t rss_hash_default_key[] = { @@ -57,74 +49,25 @@ static_assert(MLX5_RSS_HASH_KEY_LEN == "wrong RSS default key size."); /** - * Check whether Multi-Packet RQ can be enabled for the device. - * - * @param dev - * Pointer to Ethernet device. - * - * @return - * 1 if supported, negative errno value if not. - */ -inline int -mlx5_check_mprq_support(struct rte_eth_dev *dev) -{ - struct mlx5_priv *priv = dev->data->dev_private; - - if (priv->config.mprq.enabled && - priv->rxqs_n >= priv->config.mprq.min_rxqs_num) - return 1; - return -ENOTSUP; -} - -/** - * Check whether Multi-Packet RQ is enabled for the Rx queue. + * Calculate the number of CQEs in CQ for the Rx queue. * - * @param rxq + * @param rxq_data * Pointer to receive queue structure. * * @return - * 0 if disabled, otherwise enabled. - */ -inline int -mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) -{ - return rxq->strd_num_n > 0; -} - -/** - * Check whether Multi-Packet RQ is enabled for the device. - * - * @param dev - * Pointer to Ethernet device. - * - * @return - * 0 if disabled, otherwise enabled. + * Number of CQEs in CQ. */ -inline int -mlx5_mprq_enabled(struct rte_eth_dev *dev) +unsigned int +mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data) { - struct mlx5_priv *priv = dev->data->dev_private; - uint16_t i; - uint16_t n = 0; - uint16_t n_ibv = 0; - - if (mlx5_check_mprq_support(dev) < 0) - return 0; - /* All the configured queues should be enabled. */ - for (i = 0; i < priv->rxqs_n; ++i) { - struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; - struct mlx5_rxq_ctrl *rxq_ctrl = container_of - (rxq, struct mlx5_rxq_ctrl, rxq); + unsigned int cqe_n; + unsigned int wqe_n = 1 << rxq_data->elts_n; - if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD) - continue; - n_ibv++; - if (mlx5_rxq_mprq_enabled(rxq)) - ++n; - } - /* Multi-Packet RQ can't be partially configured. */ - assert(n == 0 || n == n_ibv); - return n == n_ibv; + if (mlx5_rxq_mprq_enabled(rxq_data)) + cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1; + else + cqe_n = wqe_n - 1; + return cqe_n; } /** @@ -159,7 +102,7 @@ rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) rxq->mprq_repl = buf; } DRV_LOG(DEBUG, - "port %u Rx queue %u allocated and configured %u segments", + "port %u MPRQ queue %u allocated and configured %u segments", rxq->port_id, rxq->idx, wqe_n); return 0; error: @@ -171,7 +114,7 @@ error: (*rxq->mprq_bufs)[i]); (*rxq->mprq_bufs)[i] = NULL; } - DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything", + DRV_LOG(DEBUG, "port %u MPRQ queue %u failed, freed everything", rxq->port_id, rxq->idx); rte_errno = err; /* Restore rte_errno. */ return -rte_errno; @@ -190,15 +133,18 @@ static int rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) { const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; - unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n; + unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? + (1 << rxq_ctrl->rxq.elts_n) * (1 << rxq_ctrl->rxq.strd_num_n) : + (1 << rxq_ctrl->rxq.elts_n); unsigned int i; int err; /* Iterate on segments. */ for (i = 0; (i != elts_n); ++i) { + struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n]; struct rte_mbuf *buf; - buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp); + buf = rte_pktmbuf_alloc(seg->mp); if (buf == NULL) { DRV_LOG(ERR, "port %u empty mbuf pool", PORT_ID(rxq_ctrl->priv)); @@ -206,17 +152,15 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) goto error; } /* Headroom is reserved by rte_pktmbuf_alloc(). */ - assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); + MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); /* Buffer is supposed to be empty. */ - assert(rte_pktmbuf_data_len(buf) == 0); - assert(rte_pktmbuf_pkt_len(buf) == 0); - assert(!buf->next); - /* Only the first segment keeps headroom. */ - if (i % sges_n) - SET_DATA_OFF(buf, 0); + MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0); + MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0); + MLX5_ASSERT(!buf->next); + SET_DATA_OFF(buf, seg->offset); PORT(buf) = rxq_ctrl->rxq.port_id; - DATA_LEN(buf) = rte_pktmbuf_tailroom(buf); - PKT_LEN(buf) = DATA_LEN(buf); + DATA_LEN(buf) = seg->length; + PKT_LEN(buf) = seg->length; NB_SEGS(buf) = 1; (*rxq_ctrl->rxq.elts)[i] = buf; } @@ -224,6 +168,9 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; struct rte_mbuf *mbuf_init = &rxq->fake_mbuf; + struct rte_pktmbuf_pool_private *priv = + (struct rte_pktmbuf_pool_private *) + rte_mempool_get_priv(rxq_ctrl->rxq.mp); int j; /* Initialize default rearm_data for vPMD. */ @@ -231,19 +178,21 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) rte_mbuf_refcnt_set(mbuf_init, 1); mbuf_init->nb_segs = 1; mbuf_init->port = rxq->port_id; + if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) + mbuf_init->ol_flags = EXT_ATTACHED_MBUF; /* * prevent compiler reordering: * rearm_data covers previous fields. */ rte_compiler_barrier(); rxq->mbuf_initializer = - *(uint64_t *)&mbuf_init->rearm_data; + *(rte_xmm_t *)&mbuf_init->rearm_data; /* Padding with a fake mbuf for vectorized Rx. */ for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j) (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf; } DRV_LOG(DEBUG, - "port %u Rx queue %u allocated and configured %u segments" + "port %u SPRQ queue %u allocated and configured %u segments" " (max %u packets)", PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n)); @@ -256,7 +205,7 @@ error: rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]); (*rxq_ctrl->rxq.elts)[i] = NULL; } - DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything", + DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything", PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx); rte_errno = err; /* Restore rte_errno. */ return -rte_errno; @@ -274,8 +223,15 @@ error: int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) { - return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? - rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl); + int ret = 0; + + /** + * For MPRQ we need to allocate both MPRQ buffers + * for WQEs and simple mbufs for vector processing. + */ + if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) + ret = rxq_alloc_elts_mprq(rxq_ctrl); + return (ret || rxq_alloc_elts_sprq(rxq_ctrl)); } /** @@ -290,11 +246,10 @@ rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; uint16_t i; - DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs", - rxq->port_id, rxq->idx); + DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing %d WRs", + rxq->port_id, rxq->idx, (1u << rxq->elts_n)); if (rxq->mprq_bufs == NULL) return; - assert(mlx5_rxq_check_vec_support(rxq) < 0); for (i = 0; (i != (1u << rxq->elts_n)); ++i) { if ((*rxq->mprq_bufs)[i] != NULL) mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]); @@ -316,25 +271,29 @@ static void rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) { struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; - const uint16_t q_n = (1 << rxq->elts_n); + const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? + (1 << rxq->elts_n) * (1 << rxq->strd_num_n) : + (1 << rxq->elts_n); const uint16_t q_mask = q_n - 1; - uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi); + uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? + rxq->elts_ci : rxq->rq_ci; + uint16_t used = q_n - (elts_ci - rxq->rq_pi); uint16_t i; - DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs", - PORT_ID(rxq_ctrl->priv), rxq->idx); + DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs", + PORT_ID(rxq_ctrl->priv), rxq->idx, q_n); if (rxq->elts == NULL) return; /** - * Some mbuf in the Ring belongs to the application. They cannot be - * freed. + * Some mbuf in the Ring belongs to the application. + * They cannot be freed. */ if (mlx5_rxq_check_vec_support(rxq) > 0) { for (i = 0; i < used; ++i) - (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL; - rxq->rq_pi = rxq->rq_ci; + (*rxq->elts)[(elts_ci + i) & q_mask] = NULL; + rxq->rq_pi = elts_ci; } - for (i = 0; (i != (1u << rxq->elts_n)); ++i) { + for (i = 0; i != q_n; ++i) { if ((*rxq->elts)[i] != NULL) rte_pktmbuf_free_seg((*rxq->elts)[i]); (*rxq->elts)[i] = NULL; @@ -350,10 +309,13 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) static void rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) { + /* + * For MPRQ we need to allocate both MPRQ buffers + * for WQEs and simple mbufs for vector processing. + */ if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) rxq_free_elts_mprq(rxq_ctrl); - else - rxq_free_elts_sprq(rxq_ctrl); + rxq_free_elts_sprq(rxq_ctrl); } /** @@ -375,9 +337,10 @@ mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_RSS_HASH); + if (!config->mprq.enabled) + offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT; if (config->hw_fcs_strip) offloads |= DEV_RX_OFFLOAD_KEEP_CRC; - if (config->hw_csum) offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | @@ -428,7 +391,217 @@ mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) return -rte_errno; } rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); - return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1); + return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1); +} + +/* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */ +static void +rxq_sync_cq(struct mlx5_rxq_data *rxq) +{ + const uint16_t cqe_n = 1 << rxq->cqe_n; + const uint16_t cqe_mask = cqe_n - 1; + volatile struct mlx5_cqe *cqe; + int ret, i; + + i = cqe_n; + do { + cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask]; + ret = check_cqe(cqe, cqe_n, rxq->cq_ci); + if (ret == MLX5_CQE_STATUS_HW_OWN) + break; + if (ret == MLX5_CQE_STATUS_ERR) { + rxq->cq_ci++; + continue; + } + MLX5_ASSERT(ret == MLX5_CQE_STATUS_SW_OWN); + if (MLX5_CQE_FORMAT(cqe->op_own) != MLX5_COMPRESSED) { + rxq->cq_ci++; + continue; + } + /* Compute the next non compressed CQE. */ + rxq->cq_ci += rte_be_to_cpu_32(cqe->byte_cnt); + + } while (--i); + /* Move all CQEs to HW ownership, including possible MiniCQEs. */ + for (i = 0; i < cqe_n; i++) { + cqe = &(*rxq->cqes)[i]; + cqe->op_own = MLX5_CQE_INVALIDATE; + } + /* Resync CQE and WQE (WQ in RESET state). */ + rte_io_wmb(); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + rte_io_wmb(); + *rxq->rq_db = rte_cpu_to_be_32(0); + rte_io_wmb(); +} + +/** + * Rx queue stop. Device queue goes to the RESET state, + * all involved mbufs are freed from WQ. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * RX queue index. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + int ret; + + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RDY2RST); + if (ret) { + DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s", + strerror(errno)); + rte_errno = errno; + return ret; + } + /* Remove all processes CQEs. */ + rxq_sync_cq(rxq); + /* Free all involved mbufs. */ + rxq_free_elts(rxq_ctrl); + /* Set the actual queue state. */ + dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; +} + +/** + * Rx queue stop. Device queue goes to the RESET state, + * all involved mbufs are freed from WQ. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * RX queue index. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx) +{ + eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; + int ret; + + if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) { + DRV_LOG(ERR, "Hairpin queue can't be stopped"); + rte_errno = EINVAL; + return -EINVAL; + } + if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED) + return 0; + /* + * Vectorized Rx burst requires the CQ and RQ indices + * synchronized, that might be broken on RQ restart + * and cause Rx malfunction, so queue stopping is + * not supported if vectorized Rx burst is engaged. + * The routine pointer depends on the process + * type, should perform check there. + */ + if (pkt_burst == mlx5_rx_burst_vec) { + DRV_LOG(ERR, "Rx queue stop is not supported " + "for vectorized Rx"); + rte_errno = EINVAL; + return -EINVAL; + } + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + ret = mlx5_mp_os_req_queue_control(dev, idx, + MLX5_MP_REQ_QUEUE_RX_STOP); + } else { + ret = mlx5_rx_queue_stop_primary(dev, idx); + } + return ret; +} + +/** + * Rx queue start. Device queue goes to the ready state, + * all required mbufs are allocated and WQ is replenished. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * RX queue index. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq, struct mlx5_rxq_ctrl, rxq); + int ret; + + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + /* Allocate needed buffers. */ + ret = rxq_alloc_elts(rxq_ctrl); + if (ret) { + DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ"); + rte_errno = errno; + return ret; + } + rte_io_wmb(); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + rte_io_wmb(); + /* Reset RQ consumer before moving queue to READY state. */ + *rxq->rq_db = rte_cpu_to_be_32(0); + rte_io_wmb(); + ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY); + if (ret) { + DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s", + strerror(errno)); + rte_errno = errno; + return ret; + } + /* Reinitialize RQ - set WQEs. */ + mlx5_rxq_initialize(rxq); + rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR; + /* Set actual queue state. */ + dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; +} + +/** + * Rx queue start. Device queue goes to the ready state, + * all required mbufs are allocated and WQ is replenished. + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * RX queue index. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx) +{ + int ret; + + if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) { + DRV_LOG(ERR, "Hairpin queue can't be started"); + rte_errno = EINVAL; + return -EINVAL; + } + if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED) + return 0; + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + ret = mlx5_mp_os_req_queue_control(dev, idx, + MLX5_MP_REQ_QUEUE_RX_START); + } else { + ret = mlx5_rx_queue_start_primary(dev, idx); + } + return ret; } /** @@ -445,19 +618,19 @@ mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc) +mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc) { struct mlx5_priv *priv = dev->data->dev_private; - if (!rte_is_power_of_2(desc)) { - desc = 1 << log2above(desc); + if (!rte_is_power_of_2(*desc)) { + *desc = 1 << log2above(*desc); DRV_LOG(WARNING, "port %u increased number of descriptors in Rx queue %u" " to the next power of two (%d)", - dev->data->port_id, idx, desc); + dev->data->port_id, idx, *desc); } DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors", - dev->data->port_id, idx, desc); + dev->data->port_id, idx, *desc); if (idx >= priv->rxqs_n) { DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)", dev->data->port_id, idx, priv->rxqs_n); @@ -501,12 +674,40 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); + struct rte_eth_rxseg_split *rx_seg = + (struct rte_eth_rxseg_split *)conf->rx_seg; + struct rte_eth_rxseg_split rx_single = {.mp = mp}; + uint16_t n_seg = conf->rx_nseg; int res; - res = mlx5_rx_queue_pre_setup(dev, idx, desc); + if (mp) { + /* + * The parameters should be checked on rte_eth_dev layer. + * If mp is specified it means the compatible configuration + * without buffer split feature tuning. + */ + rx_seg = &rx_single; + n_seg = 1; + } + if (n_seg > 1) { + uint64_t offloads = conf->offloads | + dev->data->dev_conf.rxmode.offloads; + + /* The offloads should be checked on rte_eth_dev layer. */ + MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER); + if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { + DRV_LOG(ERR, "port %u queue index %u split " + "offload not configured", + dev->data->port_id, idx); + rte_errno = ENOSPC; + return -rte_errno; + } + MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG); + } + res = mlx5_rx_queue_pre_setup(dev, idx, &desc); if (res) return res; - rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp); + rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg); if (!rxq_ctrl) { DRV_LOG(ERR, "port %u unable to allocate queue index %u", dev->data->port_id, idx); @@ -544,18 +745,38 @@ mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx, container_of(rxq, struct mlx5_rxq_ctrl, rxq); int res; - res = mlx5_rx_queue_pre_setup(dev, idx, desc); + res = mlx5_rx_queue_pre_setup(dev, idx, &desc); if (res) return res; - if (hairpin_conf->peer_count != 1 || - hairpin_conf->peers[0].port != dev->data->port_id || - hairpin_conf->peers[0].queue >= priv->txqs_n) { - DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u " - " invalid hairpind configuration", dev->data->port_id, - idx); + if (hairpin_conf->peer_count != 1) { rte_errno = EINVAL; + DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue index %u" + " peer count is %u", dev->data->port_id, + idx, hairpin_conf->peer_count); return -rte_errno; } + if (hairpin_conf->peers[0].port == dev->data->port_id) { + if (hairpin_conf->peers[0].queue >= priv->txqs_n) { + rte_errno = EINVAL; + DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue" + " index %u, Tx %u is larger than %u", + dev->data->port_id, idx, + hairpin_conf->peers[0].queue, priv->txqs_n); + return -rte_errno; + } + } else { + if (hairpin_conf->manual_bind == 0 || + hairpin_conf->tx_explicit == 0) { + rte_errno = EINVAL; + DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue" + " index %u peer port %u with attributes %u %u", + dev->data->port_id, idx, + hairpin_conf->peers[0].port, + hairpin_conf->manual_bind, + hairpin_conf->tx_explicit); + return -rte_errno; + } + } rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf); if (!rxq_ctrl) { DRV_LOG(ERR, "port %u unable to allocate queue index %u", @@ -593,114 +814,6 @@ mlx5_rx_queue_release(void *dpdk_rxq) mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx); } -/** - * Get an Rx queue Verbs/DevX object. - * - * @param dev - * Pointer to Ethernet device. - * @param idx - * Queue index in DPDK Rx queue array - * - * @return - * The Verbs/DevX object if it exists. - */ -static struct mlx5_rxq_obj * -mlx5_rxq_obj_get(struct rte_eth_dev *dev, uint16_t idx) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; - struct mlx5_rxq_ctrl *rxq_ctrl; - - if (idx >= priv->rxqs_n) - return NULL; - if (!rxq_data) - return NULL; - rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - if (rxq_ctrl->obj) - rte_atomic32_inc(&rxq_ctrl->obj->refcnt); - return rxq_ctrl->obj; -} - -/** - * Release the resources allocated for an RQ DevX object. - * - * @param rxq_ctrl - * DevX Rx queue object. - */ -static void -rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl) -{ - if (rxq_ctrl->rxq.wqes) { - rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes); - rxq_ctrl->rxq.wqes = NULL; - } - if (rxq_ctrl->wq_umem) { - mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem); - rxq_ctrl->wq_umem = NULL; - } -} - -/** - * Release an Rx hairpin related resources. - * - * @param rxq_obj - * Hairpin Rx queue object. - */ -static void -rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj) -{ - struct mlx5_devx_modify_rq_attr rq_attr = { 0 }; - - assert(rxq_obj); - rq_attr.state = MLX5_RQC_STATE_RST; - rq_attr.rq_state = MLX5_RQC_STATE_RDY; - mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr); - claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); -} - -/** - * Release an Rx verbs/DevX queue object. - * - * @param rxq_obj - * Verbs/DevX Rx queue object. - * - * @return - * 1 while a reference on it exists, 0 when freed. - */ -static int -mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj) -{ - assert(rxq_obj); - if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_IBV) - assert(rxq_obj->wq); - assert(rxq_obj->cq); - if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) { - switch (rxq_obj->type) { - case MLX5_RXQ_OBJ_TYPE_IBV: - rxq_free_elts(rxq_obj->rxq_ctrl); - claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq)); - claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq)); - break; - case MLX5_RXQ_OBJ_TYPE_DEVX_RQ: - rxq_free_elts(rxq_obj->rxq_ctrl); - claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); - rxq_release_rq_resources(rxq_obj->rxq_ctrl); - claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq)); - break; - case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN: - rxq_obj_hairpin_release(rxq_obj); - break; - } - if (rxq_obj->channel) - claim_zero(mlx5_glue->destroy_comp_channel - (rxq_obj->channel)); - LIST_REMOVE(rxq_obj, next); - rte_free(rxq_obj); - return 0; - } - return 1; -} - /** * Allocate queue vector and fill epoll fd list for Rx interrupts. * @@ -720,10 +833,15 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) unsigned int count = 0; struct rte_intr_handle *intr_handle = dev->intr_handle; + /* Representor shares dev->intr_handle with PF. */ + if (priv->representor) + return 0; if (!dev->data->dev_conf.intr_conf.rxq) return 0; mlx5_rx_intr_vec_disable(dev); - intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0])); + intr_handle->intr_vec = mlx5_malloc(0, + n * sizeof(intr_handle->intr_vec[0]), + 0, SOCKET_ID_ANY); if (intr_handle->intr_vec == NULL) { DRV_LOG(ERR, "port %u failed to allocate memory for interrupt" @@ -735,17 +853,20 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) intr_handle->type = RTE_INTR_HANDLE_EXT; for (i = 0; i != n; ++i) { /* This rxq obj must not be released in this function. */ - struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i); - int fd; - int flags; + struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i); + struct mlx5_rxq_obj *rxq_obj = rxq_ctrl ? rxq_ctrl->obj : NULL; int rc; /* Skip queues that cannot request interrupts. */ - if (!rxq_obj || !rxq_obj->channel) { + if (!rxq_obj || (!rxq_obj->ibv_channel && + !rxq_obj->devx_channel)) { /* Use invalid intr_vec[] index to disable entry. */ intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID; + /* Decrease the rxq_ctrl's refcnt */ + if (rxq_ctrl) + mlx5_rxq_release(dev, i); continue; } if (count >= RTE_MAX_RXTX_INTR_VEC_ID) { @@ -758,21 +879,19 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) rte_errno = ENOMEM; return -rte_errno; } - fd = rxq_obj->channel->fd; - flags = fcntl(fd, F_GETFL); - rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK); + rc = mlx5_os_set_nonblock_channel_fd(rxq_obj->fd); if (rc < 0) { rte_errno = errno; DRV_LOG(ERR, "port %u failed to make Rx interrupt file" " descriptor %d non-blocking for queue index" " %d", - dev->data->port_id, fd, i); + dev->data->port_id, rxq_obj->fd, i); mlx5_rx_intr_vec_disable(dev); return -rte_errno; } intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count; - intr_handle->efds[count] = fd; + intr_handle->efds[count] = rxq_obj->fd; count++; } if (!count) @@ -797,14 +916,14 @@ mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev) unsigned int rxqs_n = priv->rxqs_n; unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); + /* Representor shares dev->intr_handle with PF. */ + if (priv->representor) + return; if (!dev->data->dev_conf.intr_conf.rxq) return; if (!intr_handle->intr_vec) goto free; for (i = 0; i != n; ++i) { - struct mlx5_rxq_ctrl *rxq_ctrl; - struct mlx5_rxq_data *rxq_data; - if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID) continue; @@ -812,15 +931,12 @@ mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev) * Need to access directly the queue to release the reference * kept in mlx5_rx_intr_vec_enable(). */ - rxq_data = (*priv->rxqs)[i]; - rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - if (rxq_ctrl->obj) - mlx5_rxq_obj_release(rxq_ctrl->obj); + mlx5_rxq_release(dev, i); } free: rte_intr_free_epoll_fd(intr_handle); if (intr_handle->intr_vec) - free(intr_handle->intr_vec); + mlx5_free(intr_handle->intr_vec); intr_handle->nb_efd = 0; intr_handle->intr_vec = NULL; } @@ -844,7 +960,7 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK; doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK); doorbell = (uint64_t)doorbell_hi << 32; - doorbell |= rxq->cqn; + doorbell |= rxq->cqn; rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi); mlx5_uar_write64(rte_cpu_to_be_64(doorbell), cq_db_reg, rxq->uar_lock_cq); @@ -864,28 +980,23 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *rxq_data; struct mlx5_rxq_ctrl *rxq_ctrl; - rxq_data = (*priv->rxqs)[rx_queue_id]; - if (!rxq_data) { - rte_errno = EINVAL; - return -rte_errno; - } - rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id); + if (!rxq_ctrl) + goto error; if (rxq_ctrl->irq) { - struct mlx5_rxq_obj *rxq_obj; - - rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id); - if (!rxq_obj) { - rte_errno = EINVAL; - return -rte_errno; + if (!rxq_ctrl->obj) { + mlx5_rxq_release(dev, rx_queue_id); + goto error; } - mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn); - mlx5_rxq_obj_release(rxq_obj); + mlx5_arm_cq(&rxq_ctrl->rxq, rxq_ctrl->rxq.cq_arm_sn); } + mlx5_rxq_release(dev, rx_queue_id); return 0; +error: + rte_errno = EINVAL; + return -rte_errno; } /** @@ -903,587 +1014,40 @@ int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *rxq_data; struct mlx5_rxq_ctrl *rxq_ctrl; - struct mlx5_rxq_obj *rxq_obj = NULL; - struct ibv_cq *ev_cq; - void *ev_ctx; - int ret; + int ret = 0; - rxq_data = (*priv->rxqs)[rx_queue_id]; - if (!rxq_data) { + rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id); + if (!rxq_ctrl) { rte_errno = EINVAL; return -rte_errno; } - rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - if (!rxq_ctrl->irq) - return 0; - rxq_obj = mlx5_rxq_obj_get(dev, rx_queue_id); - if (!rxq_obj) { - rte_errno = EINVAL; - return -rte_errno; + if (!rxq_ctrl->obj) + goto error; + if (rxq_ctrl->irq) { + ret = priv->obj_ops.rxq_event_get(rxq_ctrl->obj); + if (ret < 0) + goto error; + rxq_ctrl->rxq.cq_arm_sn++; } - ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx); - if (ret || ev_cq != rxq_obj->cq) { - rte_errno = EINVAL; - goto exit; - } - rxq_data->cq_arm_sn++; - mlx5_glue->ack_cq_events(rxq_obj->cq, 1); - mlx5_rxq_obj_release(rxq_obj); + mlx5_rxq_release(dev, rx_queue_id); return 0; -exit: - ret = rte_errno; /* Save rte_errno before cleanup. */ - if (rxq_obj) - mlx5_rxq_obj_release(rxq_obj); - DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d", - dev->data->port_id, rx_queue_id); - rte_errno = ret; /* Restore rte_errno. */ - return -rte_errno; -} - -/** - * Create a CQ Verbs object. - * - * @param dev - * Pointer to Ethernet device. - * @param priv - * Pointer to device private data. - * @param rxq_data - * Pointer to Rx queue data. - * @param cqe_n - * Number of CQEs in CQ. - * @param rxq_obj - * Pointer to Rx queue object data. - * - * @return - * The Verbs object initialised, NULL otherwise and rte_errno is set. - */ -static struct ibv_cq * -mlx5_ibv_cq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv, - struct mlx5_rxq_data *rxq_data, - unsigned int cqe_n, struct mlx5_rxq_obj *rxq_obj) -{ - struct { - struct ibv_cq_init_attr_ex ibv; - struct mlx5dv_cq_init_attr mlx5; - } cq_attr; - - cq_attr.ibv = (struct ibv_cq_init_attr_ex){ - .cqe = cqe_n, - .channel = rxq_obj->channel, - .comp_mask = 0, - }; - cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){ - .comp_mask = 0, - }; - if (priv->config.cqe_comp && !rxq_data->hw_timestamp && - !rxq_data->lro) { - cq_attr.mlx5.comp_mask |= - MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; -#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT - cq_attr.mlx5.cqe_comp_res_format = - mlx5_rxq_mprq_enabled(rxq_data) ? - MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX : - MLX5DV_CQE_RES_FORMAT_HASH; -#else - cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH; -#endif - /* - * For vectorized Rx, it must not be doubled in order to - * make cq_ci and rq_ci aligned. - */ - if (mlx5_rxq_check_vec_support(rxq_data) < 0) - cq_attr.ibv.cqe *= 2; - } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) { - DRV_LOG(DEBUG, - "port %u Rx CQE compression is disabled for HW" - " timestamp", - dev->data->port_id); - } else if (priv->config.cqe_comp && rxq_data->lro) { - DRV_LOG(DEBUG, - "port %u Rx CQE compression is disabled for LRO", - dev->data->port_id); - } -#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD - if (priv->config.cqe_pad) { - cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS; - cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD; - } -#endif - return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx, - &cq_attr.ibv, - &cq_attr.mlx5)); -} - -/** - * Create a WQ Verbs object. - * - * @param dev - * Pointer to Ethernet device. - * @param priv - * Pointer to device private data. - * @param rxq_data - * Pointer to Rx queue data. - * @param idx - * Queue index in DPDK Rx queue array - * @param wqe_n - * Number of WQEs in WQ. - * @param rxq_obj - * Pointer to Rx queue object data. - * - * @return - * The Verbs object initialised, NULL otherwise and rte_errno is set. - */ -static struct ibv_wq * -mlx5_ibv_wq_new(struct rte_eth_dev *dev, struct mlx5_priv *priv, - struct mlx5_rxq_data *rxq_data, uint16_t idx, - unsigned int wqe_n, struct mlx5_rxq_obj *rxq_obj) -{ - struct { - struct ibv_wq_init_attr ibv; -#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT - struct mlx5dv_wq_init_attr mlx5; -#endif - } wq_attr; - - wq_attr.ibv = (struct ibv_wq_init_attr){ - .wq_context = NULL, /* Could be useful in the future. */ - .wq_type = IBV_WQT_RQ, - /* Max number of outstanding WRs. */ - .max_wr = wqe_n >> rxq_data->sges_n, - /* Max number of scatter/gather elements in a WR. */ - .max_sge = 1 << rxq_data->sges_n, - .pd = priv->sh->pd, - .cq = rxq_obj->cq, - .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0, - .create_flags = (rxq_data->vlan_strip ? - IBV_WQ_FLAGS_CVLAN_STRIPPING : 0), - }; - /* By default, FCS (CRC) is stripped by hardware. */ - if (rxq_data->crc_present) { - wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS; - wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; - } - if (priv->config.hw_padding) { -#if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING) - wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING; - wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; -#elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING) - wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING; - wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; -#endif - } -#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT - wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){ - .comp_mask = 0, - }; - if (mlx5_rxq_mprq_enabled(rxq_data)) { - struct mlx5dv_striding_rq_init_attr *mprq_attr = - &wq_attr.mlx5.striding_rq_attrs; - - wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ; - *mprq_attr = (struct mlx5dv_striding_rq_init_attr){ - .single_stride_log_num_of_bytes = rxq_data->strd_sz_n, - .single_wqe_log_num_of_strides = rxq_data->strd_num_n, - .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT, - }; - } - rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv, - &wq_attr.mlx5); -#else - rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv); -#endif - if (rxq_obj->wq) { - /* - * Make sure number of WRs*SGEs match expectations since a queue - * cannot allocate more than "desc" buffers. - */ - if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) || - wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) { - DRV_LOG(ERR, - "port %u Rx queue %u requested %u*%u but got" - " %u*%u WRs*SGEs", - dev->data->port_id, idx, - wqe_n >> rxq_data->sges_n, - (1 << rxq_data->sges_n), - wq_attr.ibv.max_wr, wq_attr.ibv.max_sge); - claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq)); - rxq_obj->wq = NULL; - rte_errno = EINVAL; - } - } - return rxq_obj->wq; -} - -/** - * Fill common fields of create RQ attributes structure. - * - * @param rxq_data - * Pointer to Rx queue data. - * @param cqn - * CQ number to use with this RQ. - * @param rq_attr - * RQ attributes structure to fill.. - */ -static void -mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn, - struct mlx5_devx_create_rq_attr *rq_attr) -{ - rq_attr->state = MLX5_RQC_STATE_RST; - rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1; - rq_attr->cqn = cqn; - rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0; -} - -/** - * Fill common fields of DevX WQ attributes structure. - * - * @param priv - * Pointer to device private data. - * @param rxq_ctrl - * Pointer to Rx queue control structure. - * @param wq_attr - * WQ attributes structure to fill.. - */ -static void -mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl, - struct mlx5_devx_wq_attr *wq_attr) -{ - wq_attr->end_padding_mode = priv->config.cqe_pad ? - MLX5_WQ_END_PAD_MODE_ALIGN : - MLX5_WQ_END_PAD_MODE_NONE; - wq_attr->pd = priv->sh->pdn; - wq_attr->dbr_addr = rxq_ctrl->dbr_offset; - wq_attr->dbr_umem_id = rxq_ctrl->dbr_umem_id; - wq_attr->dbr_umem_valid = 1; - wq_attr->wq_umem_id = rxq_ctrl->wq_umem->umem_id; - wq_attr->wq_umem_valid = 1; -} - -/** - * Create a RQ object using DevX. - * - * @param dev - * Pointer to Ethernet device. - * @param idx - * Queue index in DPDK Rx queue array - * @param cqn - * CQ number to use with this RQ. - * - * @return - * The DevX object initialised, NULL otherwise and rte_errno is set. - */ -static struct mlx5_devx_obj * -mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - struct mlx5_devx_create_rq_attr rq_attr; - uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n); - uint32_t wq_size = 0; - uint32_t wqe_size = 0; - uint32_t log_wqe_size = 0; - void *buf = NULL; - struct mlx5_devx_obj *rq; - - memset(&rq_attr, 0, sizeof(rq_attr)); - /* Fill RQ attributes. */ - rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE; - rq_attr.flush_in_error_en = 1; - mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr); - /* Fill WQ attributes for this RQ. */ - if (mlx5_rxq_mprq_enabled(rxq_data)) { - rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ; - /* - * Number of strides in each WQE: - * 512*2^single_wqe_log_num_of_strides. - */ - rq_attr.wq_attr.single_wqe_log_num_of_strides = - rxq_data->strd_num_n - - MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; - /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */ - rq_attr.wq_attr.single_stride_log_num_of_bytes = - rxq_data->strd_sz_n - - MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; - wqe_size = sizeof(struct mlx5_wqe_mprq); - } else { - rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; - wqe_size = sizeof(struct mlx5_wqe_data_seg); - } - log_wqe_size = log2above(wqe_size) + rxq_data->sges_n; - rq_attr.wq_attr.log_wq_stride = log_wqe_size; - rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n; - /* Calculate and allocate WQ memory space. */ - wqe_size = 1 << log_wqe_size; /* round up power of two.*/ - wq_size = wqe_n * wqe_size; - buf = rte_calloc_socket(__func__, 1, wq_size, MLX5_WQE_BUF_ALIGNMENT, - rxq_ctrl->socket); - if (!buf) - return NULL; - rxq_data->wqes = buf; - rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, - buf, wq_size, 0); - if (!rxq_ctrl->wq_umem) { - rte_free(buf); - return NULL; - } - mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr); - rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket); - if (!rq) - rxq_release_rq_resources(rxq_ctrl); - return rq; -} - -/** - * Create the Rx hairpin queue object. - * - * @param dev - * Pointer to Ethernet device. - * @param idx - * Queue index in DPDK Rx queue array - * - * @return - * The hairpin DevX object initialised, NULL otherwise and rte_errno is set. - */ -static struct mlx5_rxq_obj * -mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - struct mlx5_devx_create_rq_attr attr = { 0 }; - struct mlx5_rxq_obj *tmpl = NULL; - int ret = 0; - - assert(rxq_data); - assert(!rxq_ctrl->obj); - tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0, - rxq_ctrl->socket); - if (!tmpl) { - DRV_LOG(ERR, - "port %u Rx queue %u cannot allocate verbs resources", - dev->data->port_id, rxq_data->idx); - rte_errno = ENOMEM; - goto error; - } - tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN; - tmpl->rxq_ctrl = rxq_ctrl; - attr.hairpin = 1; - /* Workaround for hairpin startup */ - attr.wq_attr.log_hairpin_num_packets = log2above(32); - /* Workaround for packets larger than 1KB */ - attr.wq_attr.log_hairpin_data_sz = - priv->config.hca_attr.log_max_hairpin_wq_data_sz; - tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr, - rxq_ctrl->socket); - if (!tmpl->rq) { - DRV_LOG(ERR, - "port %u Rx hairpin queue %u can't create rq object", - dev->data->port_id, idx); - rte_errno = errno; - goto error; - } - DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id, - idx, (void *)&tmpl); - rte_atomic32_inc(&tmpl->refcnt); - LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next); - priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; - return tmpl; error: - ret = rte_errno; /* Save rte_errno before cleanup. */ - if (tmpl->rq) - mlx5_devx_cmd_destroy(tmpl->rq); - rte_errno = ret; /* Restore rte_errno. */ - return NULL; -} - -/** - * Create the Rx queue Verbs/DevX object. - * - * @param dev - * Pointer to Ethernet device. - * @param idx - * Queue index in DPDK Rx queue array - * @param type - * Type of Rx queue object to create. - * - * @return - * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. - */ -struct mlx5_rxq_obj * -mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx, - enum mlx5_rxq_obj_type type) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - struct ibv_wq_attr mod; - unsigned int cqe_n; - unsigned int wqe_n = 1 << rxq_data->elts_n; - struct mlx5_rxq_obj *tmpl = NULL; - struct mlx5dv_cq cq_info; - struct mlx5dv_rwq rwq; - int ret = 0; - struct mlx5dv_obj obj; - - assert(rxq_data); - assert(!rxq_ctrl->obj); - if (type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) - return mlx5_rxq_obj_hairpin_new(dev, idx); - priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE; - priv->verbs_alloc_ctx.obj = rxq_ctrl; - tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0, - rxq_ctrl->socket); - if (!tmpl) { - DRV_LOG(ERR, - "port %u Rx queue %u cannot allocate verbs resources", - dev->data->port_id, rxq_data->idx); - rte_errno = ENOMEM; - goto error; - } - tmpl->type = type; - tmpl->rxq_ctrl = rxq_ctrl; - if (rxq_ctrl->irq) { - tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx); - if (!tmpl->channel) { - DRV_LOG(ERR, "port %u: comp channel creation failure", - dev->data->port_id); - rte_errno = ENOMEM; - goto error; - } - } - if (mlx5_rxq_mprq_enabled(rxq_data)) - cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1; + /** + * The ret variable may be EAGAIN which means the get_event function was + * called before receiving one. + */ + if (ret < 0) + rte_errno = errno; else - cqe_n = wqe_n - 1; - tmpl->cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl); - if (!tmpl->cq) { - DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure", - dev->data->port_id, idx); - rte_errno = ENOMEM; - goto error; - } - obj.cq.in = tmpl->cq; - obj.cq.out = &cq_info; - ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ); - if (ret) { - rte_errno = ret; - goto error; - } - if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { - DRV_LOG(ERR, - "port %u wrong MLX5_CQE_SIZE environment variable" - " value: it should be set to %u", - dev->data->port_id, RTE_CACHE_LINE_SIZE); rte_errno = EINVAL; - goto error; - } - DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d", - dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr); - DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d", - dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge); - /* Allocate door-bell for types created with DevX. */ - if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) { - struct mlx5_devx_dbr_page *dbr_page; - int64_t dbr_offset; - - dbr_offset = mlx5_get_dbr(dev, &dbr_page); - if (dbr_offset < 0) - goto error; - rxq_ctrl->dbr_offset = dbr_offset; - rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id; - rxq_ctrl->dbr_umem_id_valid = 1; - rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs + - (uintptr_t)rxq_ctrl->dbr_offset); - } - if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) { - tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n, - tmpl); - if (!tmpl->wq) { - DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure", - dev->data->port_id, idx); - rte_errno = ENOMEM; - goto error; - } - /* Change queue state to ready. */ - mod = (struct ibv_wq_attr){ - .attr_mask = IBV_WQ_ATTR_STATE, - .wq_state = IBV_WQS_RDY, - }; - ret = mlx5_glue->modify_wq(tmpl->wq, &mod); - if (ret) { - DRV_LOG(ERR, - "port %u Rx queue %u WQ state to IBV_WQS_RDY" - " failed", dev->data->port_id, idx); - rte_errno = ret; - goto error; - } - obj.rwq.in = tmpl->wq; - obj.rwq.out = &rwq; - ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ); - if (ret) { - rte_errno = ret; - goto error; - } - rxq_data->wqes = rwq.buf; - rxq_data->rq_db = rwq.dbrec; - } else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) { - struct mlx5_devx_modify_rq_attr rq_attr; - - memset(&rq_attr, 0, sizeof(rq_attr)); - tmpl->rq = mlx5_devx_rq_new(dev, idx, cq_info.cqn); - if (!tmpl->rq) { - DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure", - dev->data->port_id, idx); - rte_errno = ENOMEM; - goto error; - } - /* Change queue state to ready. */ - rq_attr.rq_state = MLX5_RQC_STATE_RST; - rq_attr.state = MLX5_RQC_STATE_RDY; - ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr); - if (ret) - goto error; - } - /* Fill the rings. */ - rxq_data->cqe_n = log2above(cq_info.cqe_cnt); - rxq_data->cq_db = cq_info.dbrec; - rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf; - rxq_data->cq_uar = cq_info.cq_uar; - rxq_data->cqn = cq_info.cqn; - rxq_data->cq_arm_sn = 0; - mlx5_rxq_initialize(rxq_data); - rxq_data->cq_ci = 0; - DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id, - idx, (void *)&tmpl); - rte_atomic32_inc(&tmpl->refcnt); - LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next); - priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; - return tmpl; -error: - if (tmpl) { - ret = rte_errno; /* Save rte_errno before cleanup. */ - if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV && tmpl->wq) - claim_zero(mlx5_glue->destroy_wq(tmpl->wq)); - else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ && tmpl->rq) - claim_zero(mlx5_devx_cmd_destroy(tmpl->rq)); - if (tmpl->cq) - claim_zero(mlx5_glue->destroy_cq(tmpl->cq)); - if (tmpl->channel) - claim_zero(mlx5_glue->destroy_comp_channel - (tmpl->channel)); - rte_free(tmpl); - rte_errno = ret; /* Restore rte_errno. */ - } - if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) - rxq_release_rq_resources(rxq_ctrl); - priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; - return NULL; + ret = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_rxq_release(dev, rx_queue_id); + if (ret != EAGAIN) + DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d", + dev->data->port_id, rx_queue_id); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -1524,7 +1088,7 @@ mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg, memset(_m, 0, sizeof(*buf)); buf->mp = mp; - rte_atomic16_set(&buf->refcnt, 1); + __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED); for (j = 0; j != strd_n; ++j) { shinfo = &buf->shinfos[j]; shinfo->free_cb = mlx5_mprq_buf_free_cb; @@ -1626,7 +1190,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) if (strd_sz_n < rxq->strd_sz_n) strd_sz_n = rxq->strd_sz_n; } - assert(strd_num_n && strd_sz_n); + MLX5_ASSERT(strd_num_n && strd_sz_n); buf_len = (1 << strd_num_n) * (1 << strd_sz_n); obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) * sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM; @@ -1715,11 +1279,14 @@ exit: * * @param dev * Pointer to Ethernet device. + * @param idx + * RX queue index. * @param max_lro_size * The maximum size for LRO packet. */ static void -mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint32_t max_lro_size) +mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx, + uint32_t max_lro_size) { struct mlx5_priv *priv = dev->data->dev_private; @@ -1728,13 +1295,17 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint32_t max_lro_size) MLX5_MAX_TCP_HDR_OFFSET) max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET; max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE); - assert(max_lro_size >= 256u); - max_lro_size /= 256u; + MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE); + max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE; if (priv->max_lro_msg_size) priv->max_lro_msg_size = RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size); else priv->max_lro_msg_size = max_lro_size; + DRV_LOG(DEBUG, + "port %u Rx Queue %u max LRO message size adjusted to %u bytes", + dev->data->port_id, idx, + priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE); } /** @@ -1755,24 +1326,15 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint32_t max_lro_size) struct mlx5_rxq_ctrl * mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp) + const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *tmpl; - unsigned int mb_len = rte_pktmbuf_data_room_size(mp); - unsigned int mprq_stride_size; + unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp); struct mlx5_dev_config *config = &priv->config; - unsigned int strd_headroom_en; - /* - * Always allocate extra slots, even if eventually - * the vector Rx will not be used. - */ - uint16_t desc_n = - desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; uint64_t offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO); - const int mprq_en = mlx5_check_mprq_support(dev) > 0; unsigned int max_rx_pkt_len = lro_on_queue ? dev->data->dev_conf.rxmode.max_lro_pkt_size : dev->data->dev_conf.rxmode.max_rx_pkt_len; @@ -1780,24 +1342,108 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, RTE_PKTMBUF_HEADROOM; unsigned int max_lro_size = 0; unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM; + const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 && + !rx_seg[0].offset && !rx_seg[0].length; + unsigned int mprq_stride_nums = config->mprq.stride_num_n ? + config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N; + unsigned int mprq_stride_size = non_scatter_min_mbuf_size <= + (1U << config->mprq.max_stride_size_n) ? + log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N; + unsigned int mprq_stride_cap = (config->mprq.stride_num_n ? + (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) * + (config->mprq.stride_size_n ? + (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size)); + /* + * Always allocate extra slots, even if eventually + * the vector Rx will not be used. + */ + uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; + const struct rte_eth_rxseg_split *qs_seg = rx_seg; + unsigned int tail_len; + + tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, + sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) + + (!!mprq_en) * + (desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *), + 0, socket); + if (!tmpl) { + rte_errno = ENOMEM; + return NULL; + } + MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG); + /* + * Build the array of actual buffer offsets and lengths. + * Pad with the buffers from the last memory pool if + * needed to handle max size packets, replace zero length + * with the buffer length from the pool. + */ + tail_len = max_rx_pkt_len; + do { + struct mlx5_eth_rxseg *hw_seg = + &tmpl->rxq.rxseg[tmpl->rxq.rxseg_n]; + uint32_t buf_len, offset, seg_len; - if (non_scatter_min_mbuf_size > mb_len && !(offloads & - DEV_RX_OFFLOAD_SCATTER)) { + /* + * For the buffers beyond descriptions offset is zero, + * the first buffer contains head room. + */ + buf_len = rte_pktmbuf_data_room_size(qs_seg->mp); + offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) + + (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM); + /* + * For the buffers beyond descriptions the length is + * pool buffer length, zero lengths are replaced with + * pool buffer length either. + */ + seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len : + qs_seg->length ? + qs_seg->length : + (buf_len - offset); + /* Check is done in long int, now overflows. */ + if (buf_len < seg_len + offset) { + DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length " + "%u/%u can't be satisfied", + dev->data->port_id, idx, + qs_seg->length, qs_seg->offset); + rte_errno = EINVAL; + goto error; + } + if (seg_len > tail_len) + seg_len = buf_len - offset; + if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) { + DRV_LOG(ERR, + "port %u too many SGEs (%u) needed to handle" + " requested maximum packet size %u, the maximum" + " supported are %u", dev->data->port_id, + tmpl->rxq.rxseg_n, max_rx_pkt_len, + MLX5_MAX_RXQ_NSEG); + rte_errno = ENOTSUP; + goto error; + } + /* Build the actual scattering element in the queue object. */ + hw_seg->mp = qs_seg->mp; + MLX5_ASSERT(offset <= UINT16_MAX); + MLX5_ASSERT(seg_len <= UINT16_MAX); + hw_seg->offset = (uint16_t)offset; + hw_seg->length = (uint16_t)seg_len; + /* + * Advance the segment descriptor, the padding is the based + * on the attributes of the last descriptor. + */ + if (tmpl->rxq.rxseg_n < n_seg) + qs_seg++; + tail_len -= RTE_MIN(tail_len, seg_len); + } while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n)); + MLX5_ASSERT(tmpl->rxq.rxseg_n && + tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG); + if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) { DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not" " configured and no enough mbuf space(%u) to contain " "the maximum RX packet length(%u) with head-room(%u)", dev->data->port_id, idx, mb_len, max_rx_pkt_len, RTE_PKTMBUF_HEADROOM); rte_errno = ENOSPC; - return NULL; - } - tmpl = rte_calloc_socket("RXQ", 1, - sizeof(*tmpl) + - desc_n * sizeof(struct rte_mbuf *), - 0, socket); - if (!tmpl) { - rte_errno = ENOMEM; - return NULL; + goto error; } tmpl->type = MLX5_RXQ_TYPE_STANDARD; if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh, @@ -1808,42 +1454,33 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->socket = socket; if (dev->data->dev_conf.intr_conf.rxq) tmpl->irq = 1; - /* - * LRO packet may consume all the stride memory, hence we cannot - * guaranty head-room near the packet memory in the stride. - * In this case scatter is, for sure, enabled and an empty mbuf may be - * added in the start for the head-room. - */ - if (lro_on_queue && RTE_PKTMBUF_HEADROOM > 0 && - non_scatter_min_mbuf_size > mb_len) { - strd_headroom_en = 0; - mprq_stride_size = RTE_MIN(max_rx_pkt_len, - 1u << config->mprq.max_stride_size_n); - } else { - strd_headroom_en = 1; - mprq_stride_size = non_scatter_min_mbuf_size; - } /* * This Rx queue can be configured as a Multi-Packet RQ if all of the * following conditions are met: * - MPRQ is enabled. * - The number of descs is more than the number of strides. - * - max_rx_pkt_len plus overhead is less than the max size of a - * stride. + * - max_rx_pkt_len plus overhead is less than the max size + * of a stride or mprq_stride_size is specified by a user. + * Need to make sure that there are enough strides to encap + * the maximum packet size in case mprq_stride_size is set. * Otherwise, enable Rx scatter if necessary. */ - if (mprq_en && - desc > (1U << config->mprq.stride_num_n) && - mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) { + if (mprq_en && desc > (1U << mprq_stride_nums) && + (non_scatter_min_mbuf_size <= + (1U << config->mprq.max_stride_size_n) || + (config->mprq.stride_size_n && + non_scatter_min_mbuf_size <= mprq_stride_cap))) { /* TODO: Rx scatter isn't supported yet. */ tmpl->rxq.sges_n = 0; /* Trim the number of descs needed. */ - desc >>= config->mprq.stride_num_n; - tmpl->rxq.strd_num_n = config->mprq.stride_num_n; - tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size), - config->mprq.min_stride_size_n); + desc >>= mprq_stride_nums; + tmpl->rxq.strd_num_n = config->mprq.stride_num_n ? + config->mprq.stride_num_n : mprq_stride_nums; + tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ? + config->mprq.stride_size_n : mprq_stride_size; tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT; - tmpl->rxq.strd_headroom_en = strd_headroom_en; + tmpl->rxq.strd_scatter_en = + !!(offloads & DEV_RX_OFFLOAD_SCATTER); tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size, config->mprq.max_memcpy_len); max_lro_size = RTE_MIN(max_rx_pkt_len, @@ -1854,11 +1491,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, " strd_num_n = %u, strd_sz_n = %u", dev->data->port_id, idx, tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n); - } else if (max_rx_pkt_len <= first_mb_free_size) { + } else if (tmpl->rxq.rxseg_n == 1) { + MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size); tmpl->rxq.sges_n = 0; max_lro_size = max_rx_pkt_len; } else if (offloads & DEV_RX_OFFLOAD_SCATTER) { - unsigned int size = non_scatter_min_mbuf_size; unsigned int sges_n; if (lro_on_queue && first_mb_free_size < @@ -1873,7 +1510,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, * Determine the number of SGEs needed for a full packet * and round it to the next power of two. */ - sges_n = log2above((size / mb_len) + !!(size % mb_len)); + sges_n = log2above(tmpl->rxq.rxseg_n); if (sges_n > MLX5_MAX_LOG_RQ_SEGS) { DRV_LOG(ERR, "port %u too many SGEs (%u) needed to handle" @@ -1887,14 +1524,24 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.sges_n = sges_n; max_lro_size = max_rx_pkt_len; } - if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq)) + if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq)) DRV_LOG(WARNING, - "port %u MPRQ is requested but cannot be enabled" - " (requested: desc = %u, stride_sz = %u," - " supported: min_stride_num = %u, max_stride_sz = %u).", - dev->data->port_id, desc, mprq_stride_size, - (1 << config->mprq.stride_num_n), - (1 << config->mprq.max_stride_size_n)); + "port %u MPRQ is requested but cannot be enabled\n" + " (requested: pkt_sz = %u, desc_num = %u," + " rxq_num = %u, stride_sz = %u, stride_num = %u\n" + " supported: min_rxqs_num = %u," + " min_stride_sz = %u, max_stride_sz = %u).", + dev->data->port_id, non_scatter_min_mbuf_size, + desc, priv->rxqs_n, + config->mprq.stride_size_n ? + (1U << config->mprq.stride_size_n) : + (1U << mprq_stride_size), + config->mprq.stride_num_n ? + (1U << config->mprq.stride_num_n) : + (1U << mprq_stride_nums), + config->mprq.min_rxqs_num, + (1U << config->mprq.min_stride_size_n), + (1U << config->mprq.max_stride_size_n)); DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u", dev->data->port_id, 1 << tmpl->rxq.sges_n); if (desc % (1 << tmpl->rxq.sges_n)) { @@ -1907,10 +1554,18 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, rte_errno = EINVAL; goto error; } - mlx5_max_lro_msg_size_adjust(dev, max_lro_size); + mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size); /* Toggle RX checksum offload if hardware supports it. */ tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM); + /* Configure Rx timestamp. */ tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP); + tmpl->rxq.timestamp_rx_flag = 0; + if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register( + &tmpl->rxq.timestamp_offset, + &tmpl->rxq.timestamp_rx_flag) != 0) { + DRV_LOG(ERR, "Cannot register Rx timestamp field/flag"); + goto error; + } /* Configure VLAN stripping. */ tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP); /* By default, FCS (CRC) is stripped by hardware. */ @@ -1949,21 +1604,24 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS)); tmpl->rxq.port_id = dev->data->port_id; tmpl->priv = priv; - tmpl->rxq.mp = mp; + tmpl->rxq.mp = rx_seg[0].mp; tmpl->rxq.elts_n = log2above(desc); tmpl->rxq.rq_repl_thresh = - MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n); + MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n); tmpl->rxq.elts = - (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1); + (struct rte_mbuf *(*)[desc_n])(tmpl + 1); + tmpl->rxq.mprq_bufs = + (struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n); #ifndef RTE_ARCH_64 - tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq; + tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq; #endif tmpl->rxq.idx = idx; - rte_atomic32_inc(&tmpl->refcnt); + __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; error: - rte_free(tmpl); + mlx5_mr_btree_free(&tmpl->rxq.mr_ctrl.cache_bh); + mlx5_free(tmpl); return NULL; } @@ -1989,7 +1647,8 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *tmpl; - tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl), 0, SOCKET_ID_ANY); + tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0, + SOCKET_ID_ANY); if (!tmpl) { rte_errno = ENOMEM; return NULL; @@ -2005,7 +1664,7 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 }; tmpl->hairpin_conf = *hairpin_conf; tmpl->rxq.idx = idx; - rte_atomic32_inc(&tmpl->refcnt); + __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; } @@ -2025,14 +1684,12 @@ struct mlx5_rxq_ctrl * mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) { struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl = NULL; - if ((*priv->rxqs)[idx]) { - rxq_ctrl = container_of((*priv->rxqs)[idx], - struct mlx5_rxq_ctrl, - rxq); - mlx5_rxq_obj_get(dev, idx); - rte_atomic32_inc(&rxq_ctrl->refcnt); + if (rxq_data) { + rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + __atomic_fetch_add(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED); } return rxq_ctrl; } @@ -2057,21 +1714,26 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) if (!(*priv->rxqs)[idx]) return 0; rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); - assert(rxq_ctrl->priv); - if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj)) + if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1) + return 1; + if (rxq_ctrl->obj) { + priv->obj_ops.rxq_obj_release(rxq_ctrl->obj); + LIST_REMOVE(rxq_ctrl->obj, next); + mlx5_free(rxq_ctrl->obj); rxq_ctrl->obj = NULL; - if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) { - if (rxq_ctrl->dbr_umem_id_valid) - claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id, - rxq_ctrl->dbr_offset)); + } + if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) { + rxq_free_elts(rxq_ctrl); + dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED; + } + if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) { if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh); LIST_REMOVE(rxq_ctrl, next); - rte_free(rxq_ctrl); + mlx5_free(rxq_ctrl); (*priv->rxqs)[idx] = NULL; - return 0; } - return 1; + return 0; } /** @@ -2115,7 +1777,7 @@ mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx) struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl = NULL; - if ((*priv->rxqs)[idx]) { + if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) { rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); @@ -2124,114 +1786,56 @@ mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx) return MLX5_RXQ_TYPE_UNDEFINED; } -/** - * Create an indirection table. +/* + * Get a Rx hairpin queue configuration. * * @param dev * Pointer to Ethernet device. - * @param queues - * Queues entering in the indirection table. - * @param queues_n - * Number of queues in the array. + * @param idx + * Rx queue index. * * @return - * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. + * Pointer to the configuration if a hairpin RX queue, otherwise NULL. */ -static struct mlx5_ind_table_obj * -mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, - uint32_t queues_n, enum mlx5_ind_tbl_type type) +const struct rte_eth_hairpin_conf * +mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_obj *ind_tbl; - unsigned int i = 0, j = 0, k = 0; + struct mlx5_rxq_ctrl *rxq_ctrl = NULL; - ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) + - queues_n * sizeof(uint16_t), 0); - if (!ind_tbl) { - rte_errno = ENOMEM; - return NULL; + if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) { + rxq_ctrl = container_of((*priv->rxqs)[idx], + struct mlx5_rxq_ctrl, + rxq); + if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) + return &rxq_ctrl->hairpin_conf; } - ind_tbl->type = type; - if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) { - const unsigned int wq_n = rte_is_power_of_2(queues_n) ? - log2above(queues_n) : - log2above(priv->config.ind_table_max_size); - struct ibv_wq *wq[1 << wq_n]; - - for (i = 0; i != queues_n; ++i) { - struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, - queues[i]); - if (!rxq) - goto error; - wq[i] = rxq->obj->wq; - ind_tbl->queues[i] = queues[i]; - } - ind_tbl->queues_n = queues_n; - /* Finalise indirection table. */ - k = i; /* Retain value of i for use in error case. */ - for (j = 0; k != (unsigned int)(1 << wq_n); ++k, ++j) - wq[k] = wq[j]; - ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table - (priv->sh->ctx, - &(struct ibv_rwq_ind_table_init_attr){ - .log_ind_tbl_size = wq_n, - .ind_tbl = wq, - .comp_mask = 0, - }); - if (!ind_tbl->ind_table) { - rte_errno = errno; - goto error; - } - } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */ - struct mlx5_devx_rqt_attr *rqt_attr = NULL; - const unsigned int rqt_n = - 1 << (rte_is_power_of_2(queues_n) ? - log2above(queues_n) : - log2above(priv->config.ind_table_max_size)); - - rqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) + - rqt_n * sizeof(uint32_t), 0); - if (!rqt_attr) { - DRV_LOG(ERR, "port %u cannot allocate RQT resources", - dev->data->port_id); - rte_errno = ENOMEM; - goto error; - } - rqt_attr->rqt_max_size = priv->config.ind_table_max_size; - rqt_attr->rqt_actual_size = rqt_n; - for (i = 0; i != queues_n; ++i) { - struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, - queues[i]); - if (!rxq) - goto error; - rqt_attr->rq_list[i] = rxq->obj->rq->id; - ind_tbl->queues[i] = queues[i]; - } - k = i; /* Retain value of i for use in error case. */ - for (j = 0; k != rqt_n; ++k, ++j) - rqt_attr->rq_list[k] = rqt_attr->rq_list[j]; - ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, - rqt_attr); - rte_free(rqt_attr); - if (!ind_tbl->rqt) { - DRV_LOG(ERR, "port %u cannot create DevX RQT", - dev->data->port_id); - rte_errno = errno; - goto error; - } - ind_tbl->queues_n = queues_n; - } - rte_atomic32_inc(&ind_tbl->refcnt); - LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); - return ind_tbl; -error: - for (j = 0; j < i; j++) - mlx5_rxq_release(dev, ind_tbl->queues[j]); - rte_free(ind_tbl); - DEBUG("port %u cannot create indirection table", dev->data->port_id); return NULL; } +/** + * Match queues listed in arguments to queues contained in indirection table + * object. + * + * @param ind_tbl + * Pointer to indirection table to match. + * @param queues + * Queues to match to ques in indirection table. + * @param queues_n + * Number of queues in the array. + * + * @return + * 1 if all queues in indirection table match 0 othrwise. + */ +static int +mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl, + const uint16_t *queues, uint32_t queues_n) +{ + return (ind_tbl->queues_n == queues_n) && + (!memcmp(ind_tbl->queues, queues, + ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))); +} + /** * Get an indirection table. * @@ -2245,7 +1849,7 @@ error: * @return * An indirection table if found. */ -static struct mlx5_ind_table_obj * +struct mlx5_ind_table_obj * mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, uint32_t queues_n) { @@ -2262,7 +1866,7 @@ mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, if (ind_tbl) { unsigned int i; - rte_atomic32_inc(&ind_tbl->refcnt); + __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED); for (i = 0; i != ind_tbl->queues_n; ++i) mlx5_rxq_get(dev, ind_tbl->queues[i]); } @@ -2276,28 +1880,28 @@ mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, * Pointer to Ethernet device. * @param ind_table * Indirection table to release. + * @param standalone + * Indirection table for Standalone queue. * * @return * 1 while a reference on it exists, 0 when freed. */ -static int +int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, - struct mlx5_ind_table_obj *ind_tbl) + struct mlx5_ind_table_obj *ind_tbl, + bool standalone) { + struct mlx5_priv *priv = dev->data->dev_private; unsigned int i; - if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) { - if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) - claim_zero(mlx5_glue->destroy_rwq_ind_table - (ind_tbl->ind_table)); - else if (ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX) - claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt)); - } + if (__atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) == 0) + priv->obj_ops.ind_table_destroy(ind_tbl); for (i = 0; i != ind_tbl->queues_n; ++i) claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i])); - if (!rte_atomic32_read(&ind_tbl->refcnt)) { - LIST_REMOVE(ind_tbl, next); - rte_free(ind_tbl); + if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) == 0) { + if (!standalone) + LIST_REMOVE(ind_tbl, next); + mlx5_free(ind_tbl); return 0; } return 1; @@ -2329,458 +1933,457 @@ mlx5_ind_table_obj_verify(struct rte_eth_dev *dev) } /** - * Create an Rx Hash queue. + * Setup an indirection table structure fields. * * @param dev * Pointer to Ethernet device. - * @param rss_key - * RSS key for the Rx hash queue. - * @param rss_key_len - * RSS key length. - * @param hash_fields - * Verbs protocol hash field to make the RSS on. - * @param queues - * Queues entering in hash queue. In case of empty hash_fields only the - * first queue index will be taken for the indirection table. - * @param queues_n - * Number of queues. - * @param tunnel - * Tunnel type. + * @param ind_table + * Indirection table to modify. * * @return - * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -struct mlx5_hrxq * -mlx5_hrxq_new(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n, - int tunnel __rte_unused) +int +mlx5_ind_table_obj_setup(struct rte_eth_dev *dev, + struct mlx5_ind_table_obj *ind_tbl) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hrxq *hrxq; - struct ibv_qp *qp = NULL; - struct mlx5_ind_table_obj *ind_tbl; - int err; - struct mlx5_devx_obj *tir = NULL; - struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - - queues_n = hash_fields ? queues_n : 1; - ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); - if (!ind_tbl) { - enum mlx5_ind_tbl_type type; - - type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ? - MLX5_IND_TBL_TYPE_IBV : MLX5_IND_TBL_TYPE_DEVX; - ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, type); - } - if (!ind_tbl) { - rte_errno = ENOMEM; - return NULL; - } - if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) { -#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT - struct mlx5dv_qp_init_attr qp_init_attr; - - memset(&qp_init_attr, 0, sizeof(qp_init_attr)); - if (tunnel) { - qp_init_attr.comp_mask = - MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS; - qp_init_attr.create_flags = - MLX5DV_QP_CREATE_TUNNEL_OFFLOADS; - } -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - if (dev->data->dev_conf.lpbk_mode) { - /* - * Allow packet sent from NIC loop back - * w/o source MAC check. - */ - qp_init_attr.comp_mask |= - MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS; - qp_init_attr.create_flags |= - MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC; - } -#endif - qp = mlx5_glue->dv_create_qp - (priv->sh->ctx, - &(struct ibv_qp_init_attr_ex){ - .qp_type = IBV_QPT_RAW_PACKET, - .comp_mask = - IBV_QP_INIT_ATTR_PD | - IBV_QP_INIT_ATTR_IND_TABLE | - IBV_QP_INIT_ATTR_RX_HASH, - .rx_hash_conf = (struct ibv_rx_hash_conf){ - .rx_hash_function = - IBV_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = rss_key_len, - .rx_hash_key = - (void *)(uintptr_t)rss_key, - .rx_hash_fields_mask = hash_fields, - }, - .rwq_ind_tbl = ind_tbl->ind_table, - .pd = priv->sh->pd, - }, - &qp_init_attr); -#else - qp = mlx5_glue->create_qp_ex - (priv->sh->ctx, - &(struct ibv_qp_init_attr_ex){ - .qp_type = IBV_QPT_RAW_PACKET, - .comp_mask = - IBV_QP_INIT_ATTR_PD | - IBV_QP_INIT_ATTR_IND_TABLE | - IBV_QP_INIT_ATTR_RX_HASH, - .rx_hash_conf = (struct ibv_rx_hash_conf){ - .rx_hash_function = - IBV_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = rss_key_len, - .rx_hash_key = - (void *)(uintptr_t)rss_key, - .rx_hash_fields_mask = hash_fields, - }, - .rwq_ind_tbl = ind_tbl->ind_table, - .pd = priv->sh->pd, - }); -#endif - if (!qp) { - rte_errno = errno; - goto error; - } - } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */ - struct mlx5_devx_tir_attr tir_attr; - uint32_t i; - uint32_t lro = 1; - - /* Enable TIR LRO only if all the queues were configured for. */ - for (i = 0; i < queues_n; ++i) { - if (!(*priv->rxqs)[queues[i]]->lro) { - lro = 0; - break; - } - } - memset(&tir_attr, 0, sizeof(tir_attr)); - tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; - tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; - memcpy(&tir_attr.rx_hash_field_selector_outer, &hash_fields, - sizeof(uint64_t)); - if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) - tir_attr.transport_domain = priv->sh->td->id; - else - tir_attr.transport_domain = priv->sh->tdn; - memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, rss_key_len); - tir_attr.indirect_table = ind_tbl->rqt->id; - if (dev->data->dev_conf.lpbk_mode) - tir_attr.self_lb_block = - MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; - if (lro) { - tir_attr.lro_timeout_period_usecs = - priv->config.lro.timeout; - tir_attr.lro_max_msg_sz = priv->max_lro_msg_size; - tir_attr.lro_enable_mask = - MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | - MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; - } - tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr); - if (!tir) { - DRV_LOG(ERR, "port %u cannot create DevX TIR", - dev->data->port_id); - rte_errno = errno; + uint32_t queues_n = ind_tbl->queues_n; + uint16_t *queues = ind_tbl->queues; + unsigned int i, j; + int ret = 0, err; + const unsigned int n = rte_is_power_of_2(queues_n) ? + log2above(queues_n) : + log2above(priv->config.ind_table_max_size); + + for (i = 0; i != queues_n; ++i) { + if (!mlx5_rxq_get(dev, queues[i])) { + ret = -rte_errno; goto error; } } - hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0); - if (!hrxq) + ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl); + if (ret) goto error; - hrxq->ind_table = ind_tbl; - if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) { - hrxq->qp = qp; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - hrxq->action = - mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp); - if (!hrxq->action) { - rte_errno = errno; - goto error; - } -#endif - } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */ - hrxq->tir = tir; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir - (hrxq->tir->obj); - if (!hrxq->action) { - rte_errno = errno; - goto error; - } -#endif - } - hrxq->rss_key_len = rss_key_len; - hrxq->hash_fields = hash_fields; - memcpy(hrxq->rss_key, rss_key, rss_key_len); - rte_atomic32_inc(&hrxq->refcnt); - LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next); - return hrxq; + __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED); + return 0; error: - err = rte_errno; /* Save rte_errno before cleanup. */ - mlx5_ind_table_obj_release(dev, ind_tbl); - if (qp) - claim_zero(mlx5_glue->destroy_qp(qp)); - else if (tir) - claim_zero(mlx5_devx_cmd_destroy(tir)); - rte_errno = err; /* Restore rte_errno. */ - return NULL; + err = rte_errno; + for (j = 0; j < i; j++) + mlx5_rxq_release(dev, ind_tbl->queues[j]); + rte_errno = err; + DRV_LOG(DEBUG, "Port %u cannot setup indirection table.", + dev->data->port_id); + return ret; } /** - * Get an Rx Hash queue. + * Create an indirection table. * * @param dev * Pointer to Ethernet device. - * @param rss_conf - * RSS configuration for the Rx hash queue. * @param queues - * Queues entering in hash queue. In case of empty hash_fields only the - * first queue index will be taken for the indirection table. + * Queues entering in the indirection table. * @param queues_n - * Number of queues. + * Number of queues in the array. + * @param standalone + * Indirection table for Standalone queue. * * @return - * An hash Rx queue on success. + * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set. */ -struct mlx5_hrxq * -mlx5_hrxq_get(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n) +static struct mlx5_ind_table_obj * +mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, + uint32_t queues_n, bool standalone) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hrxq *hrxq; - - queues_n = hash_fields ? queues_n : 1; - LIST_FOREACH(hrxq, &priv->hrxqs, next) { - struct mlx5_ind_table_obj *ind_tbl; + struct mlx5_ind_table_obj *ind_tbl; + int ret; - if (hrxq->rss_key_len != rss_key_len) - continue; - if (memcmp(hrxq->rss_key, rss_key, rss_key_len)) - continue; - if (hrxq->hash_fields != hash_fields) - continue; - ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); - if (!ind_tbl) - continue; - if (ind_tbl != hrxq->ind_table) { - mlx5_ind_table_obj_release(dev, ind_tbl); - continue; - } - rte_atomic32_inc(&hrxq->refcnt); - return hrxq; + ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) + + queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY); + if (!ind_tbl) { + rte_errno = ENOMEM; + return NULL; } - return NULL; + ind_tbl->queues_n = queues_n; + ind_tbl->queues = (uint16_t *)(ind_tbl + 1); + memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues)); + ret = mlx5_ind_table_obj_setup(dev, ind_tbl); + if (ret < 0) { + mlx5_free(ind_tbl); + return NULL; + } + if (!standalone) + LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); + return ind_tbl; } /** - * Release the hash Rx queue. + * Modify an indirection table. * * @param dev * Pointer to Ethernet device. - * @param hrxq - * Pointer to Hash Rx queue to release. + * @param ind_table + * Indirection table to modify. + * @param queues + * Queues replacement for the indirection table. + * @param queues_n + * Number of queues in the array. + * @param standalone + * Indirection table for Standalone queue. * * @return - * 1 while a reference on it exists, 0 when freed. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) +mlx5_ind_table_obj_modify(struct rte_eth_dev *dev, + struct mlx5_ind_table_obj *ind_tbl, + uint16_t *queues, const uint32_t queues_n, + bool standalone) { - if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - mlx5_glue->destroy_flow_action(hrxq->action); -#endif - if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV) - claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); - else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */ - claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); - mlx5_ind_table_obj_release(dev, hrxq->ind_table); - LIST_REMOVE(hrxq, next); - rte_free(hrxq); - return 0; + struct mlx5_priv *priv = dev->data->dev_private; + unsigned int i, j; + int ret = 0, err; + const unsigned int n = rte_is_power_of_2(queues_n) ? + log2above(queues_n) : + log2above(priv->config.ind_table_max_size); + + MLX5_ASSERT(standalone); + RTE_SET_USED(standalone); + if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) > 1) { + /* + * Modification of indirection ntables having more than 1 + * reference unsupported. Intended for standalone indirection + * tables only. + */ + DRV_LOG(DEBUG, + "Port %u cannot modify indirection table (refcnt> 1).", + dev->data->port_id); + rte_errno = EINVAL; + return -rte_errno; } - claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table)); - return 1; + for (i = 0; i != queues_n; ++i) { + if (!mlx5_rxq_get(dev, queues[i])) { + ret = -rte_errno; + goto error; + } + } + MLX5_ASSERT(priv->obj_ops.ind_table_modify); + ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl); + if (ret) + goto error; + for (j = 0; j < ind_tbl->queues_n; j++) + mlx5_rxq_release(dev, ind_tbl->queues[j]); + ind_tbl->queues_n = queues_n; + ind_tbl->queues = queues; + return 0; +error: + err = rte_errno; + for (j = 0; j < i; j++) + mlx5_rxq_release(dev, ind_tbl->queues[j]); + rte_errno = err; + DRV_LOG(DEBUG, "Port %u cannot setup indirection table.", + dev->data->port_id); + return ret; } /** - * Verify the Rx Queue list is empty + * Match an Rx Hash queue. * - * @param dev - * Pointer to Ethernet device. + * @param list + * Cache list pointer. + * @param entry + * Hash queue entry pointer. + * @param cb_ctx + * Context of the callback function. * * @return - * The number of object not released. + * 0 if match, none zero if not match. */ int -mlx5_hrxq_verify(struct rte_eth_dev *dev) +mlx5_hrxq_match_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry, + void *cb_ctx) { - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hrxq *hrxq; - int ret = 0; + struct rte_eth_dev *dev = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_rss_desc *rss_desc = ctx->data; + struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry); + struct mlx5_ind_table_obj *ind_tbl; - LIST_FOREACH(hrxq, &priv->hrxqs, next) { - DRV_LOG(DEBUG, - "port %u hash Rx queue %p still referenced", - dev->data->port_id, (void *)hrxq); - ++ret; - } - return ret; + if (hrxq->rss_key_len != rss_desc->key_len || + memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) || + hrxq->hash_fields != rss_desc->hash_fields) + return 1; + ind_tbl = mlx5_ind_table_obj_get(dev, rss_desc->queue, + rss_desc->queue_num); + if (ind_tbl) + mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone); + return ind_tbl != hrxq->ind_table; } /** - * Create a drop Rx queue Verbs/DevX object. + * Modify an Rx Hash queue configuration. * * @param dev * Pointer to Ethernet device. + * @param hrxq + * Index to Hash Rx queue to modify. + * @param rss_key + * RSS key for the Rx hash queue. + * @param rss_key_len + * RSS key length. + * @param hash_fields + * Verbs protocol hash field to make the RSS on. + * @param queues + * Queues entering in hash queue. In case of empty hash_fields only the + * first queue index will be taken for the indirection table. + * @param queues_n + * Number of queues. * * @return - * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -static struct mlx5_rxq_obj * -mlx5_rxq_obj_drop_new(struct rte_eth_dev *dev) +int +mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n) { + int err; + struct mlx5_ind_table_obj *ind_tbl = NULL; struct mlx5_priv *priv = dev->data->dev_private; - struct ibv_context *ctx = priv->sh->ctx; - struct ibv_cq *cq; - struct ibv_wq *wq = NULL; - struct mlx5_rxq_obj *rxq; - - if (priv->drop_queue.rxq) - return priv->drop_queue.rxq; - cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0); - if (!cq) { - DEBUG("port %u cannot allocate CQ for drop queue", - dev->data->port_id); - rte_errno = errno; - goto error; + struct mlx5_hrxq *hrxq = + mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); + int ret; + + if (!hrxq) { + rte_errno = EINVAL; + return -rte_errno; } - wq = mlx5_glue->create_wq(ctx, - &(struct ibv_wq_init_attr){ - .wq_type = IBV_WQT_RQ, - .max_wr = 1, - .max_sge = 1, - .pd = priv->sh->pd, - .cq = cq, - }); - if (!wq) { - DEBUG("port %u cannot allocate WQ for drop queue", - dev->data->port_id); - rte_errno = errno; - goto error; + /* validations */ + if (hrxq->rss_key_len != rss_key_len) { + /* rss_key_len is fixed size 40 byte & not supposed to change */ + rte_errno = EINVAL; + return -rte_errno; + } + queues_n = hash_fields ? queues_n : 1; + if (mlx5_ind_table_obj_match_queues(hrxq->ind_table, + queues, queues_n)) { + ind_tbl = hrxq->ind_table; + } else { + if (hrxq->standalone) { + /* + * Replacement of indirection table unsupported for + * stanalone hrxq objects (used by shared RSS). + */ + rte_errno = ENOTSUP; + return -rte_errno; + } + ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); + if (!ind_tbl) + ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, + hrxq->standalone); } - rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0); - if (!rxq) { - DEBUG("port %u cannot allocate drop Rx queue memory", - dev->data->port_id); + if (!ind_tbl) { rte_errno = ENOMEM; + return -rte_errno; + } + MLX5_ASSERT(priv->obj_ops.hrxq_modify); + ret = priv->obj_ops.hrxq_modify(dev, hrxq, rss_key, + hash_fields, ind_tbl); + if (ret) { + rte_errno = errno; goto error; } - rxq->cq = cq; - rxq->wq = wq; - priv->drop_queue.rxq = rxq; - return rxq; + if (ind_tbl != hrxq->ind_table) { + MLX5_ASSERT(!hrxq->standalone); + mlx5_ind_table_obj_release(dev, hrxq->ind_table, + hrxq->standalone); + hrxq->ind_table = ind_tbl; + } + hrxq->hash_fields = hash_fields; + memcpy(hrxq->rss_key, rss_key, rss_key_len); + return 0; error: - if (wq) - claim_zero(mlx5_glue->destroy_wq(wq)); - if (cq) - claim_zero(mlx5_glue->destroy_cq(cq)); - return NULL; + err = rte_errno; + if (ind_tbl != hrxq->ind_table) { + MLX5_ASSERT(!hrxq->standalone); + mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone); + } + rte_errno = err; + return -rte_errno; +} + +static void +__mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) +{ + struct mlx5_priv *priv = dev->data->dev_private; + +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + mlx5_glue->destroy_flow_action(hrxq->action); +#endif + priv->obj_ops.hrxq_destroy(hrxq); + if (!hrxq->standalone) { + mlx5_ind_table_obj_release(dev, hrxq->ind_table, + hrxq->standalone); + } + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx); } /** - * Release a drop Rx queue Verbs/DevX object. + * Release the hash Rx queue. * * @param dev * Pointer to Ethernet device. + * @param hrxq + * Index to Hash Rx queue to release. * - * @return - * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. + * @param list + * Cache list pointer. + * @param entry + * Hash queue entry pointer. */ -static void -mlx5_rxq_obj_drop_release(struct rte_eth_dev *dev) +void +mlx5_hrxq_remove_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) +{ + struct rte_eth_dev *dev = list->ctx; + struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry); + + __mlx5_hrxq_remove(dev, hrxq); +} + +static struct mlx5_hrxq * +__mlx5_hrxq_create(struct rte_eth_dev *dev, + struct mlx5_flow_rss_desc *rss_desc) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq; + const uint8_t *rss_key = rss_desc->key; + uint32_t rss_key_len = rss_desc->key_len; + bool standalone = !!rss_desc->shared_rss; + const uint16_t *queues = + standalone ? rss_desc->const_q : rss_desc->queue; + uint32_t queues_n = rss_desc->queue_num; + struct mlx5_hrxq *hrxq = NULL; + uint32_t hrxq_idx = 0; + struct mlx5_ind_table_obj *ind_tbl = rss_desc->ind_tbl; + int ret; - if (rxq->wq) - claim_zero(mlx5_glue->destroy_wq(rxq->wq)); - if (rxq->cq) - claim_zero(mlx5_glue->destroy_cq(rxq->cq)); - rte_free(rxq); - priv->drop_queue.rxq = NULL; + queues_n = rss_desc->hash_fields ? queues_n : 1; + if (!ind_tbl) + ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); + if (!ind_tbl) + ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, + standalone); + if (!ind_tbl) + return NULL; + hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx); + if (!hrxq) + goto error; + hrxq->standalone = standalone; + hrxq->idx = hrxq_idx; + hrxq->ind_table = ind_tbl; + hrxq->rss_key_len = rss_key_len; + hrxq->hash_fields = rss_desc->hash_fields; + memcpy(hrxq->rss_key, rss_key, rss_key_len); + ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel); + if (ret < 0) + goto error; + return hrxq; +error: + if (!rss_desc->ind_tbl) + mlx5_ind_table_obj_release(dev, ind_tbl, standalone); + if (hrxq) + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); + return NULL; +} + +/** + * Create an Rx Hash queue. + * + * @param list + * Cache list pointer. + * @param entry + * Hash queue entry pointer. + * @param cb_ctx + * Context of the callback function. + * + * @return + * queue entry on success, NULL otherwise. + */ +struct mlx5_cache_entry * +mlx5_hrxq_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, + void *cb_ctx) +{ + struct rte_eth_dev *dev = list->ctx; + struct mlx5_flow_cb_ctx *ctx = cb_ctx; + struct mlx5_flow_rss_desc *rss_desc = ctx->data; + struct mlx5_hrxq *hrxq; + + hrxq = __mlx5_hrxq_create(dev, rss_desc); + return hrxq ? &hrxq->entry : NULL; } /** - * Create a drop indirection table. + * Get an Rx Hash queue. * * @param dev * Pointer to Ethernet device. + * @param rss_desc + * RSS configuration for the Rx hash queue. * * @return - * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. + * An hash Rx queue index on success. */ -static struct mlx5_ind_table_obj * -mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev) +uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, + struct mlx5_flow_rss_desc *rss_desc) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_obj *ind_tbl; - struct mlx5_rxq_obj *rxq; - struct mlx5_ind_table_obj tmpl; + struct mlx5_hrxq *hrxq; + struct mlx5_cache_entry *entry; + struct mlx5_flow_cb_ctx ctx = { + .data = rss_desc, + }; - rxq = mlx5_rxq_obj_drop_new(dev); - if (!rxq) - return NULL; - tmpl.ind_table = mlx5_glue->create_rwq_ind_table - (priv->sh->ctx, - &(struct ibv_rwq_ind_table_init_attr){ - .log_ind_tbl_size = 0, - .ind_tbl = &rxq->wq, - .comp_mask = 0, - }); - if (!tmpl.ind_table) { - DEBUG("port %u cannot allocate indirection table for drop" - " queue", - dev->data->port_id); - rte_errno = errno; - goto error; - } - ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0); - if (!ind_tbl) { - rte_errno = ENOMEM; - goto error; + if (rss_desc->shared_rss) { + hrxq = __mlx5_hrxq_create(dev, rss_desc); + } else { + entry = mlx5_cache_register(&priv->hrxqs, &ctx); + if (!entry) + return 0; + hrxq = container_of(entry, typeof(*hrxq), entry); } - ind_tbl->ind_table = tmpl.ind_table; - return ind_tbl; -error: - mlx5_rxq_obj_drop_release(dev); - return NULL; + if (hrxq) + return hrxq->idx; + return 0; } /** - * Release a drop indirection table. + * Release the hash Rx queue. * * @param dev * Pointer to Ethernet device. + * @param hrxq_idx + * Index to Hash Rx queue to release. + * + * @return + * 1 while a reference on it exists, 0 when freed. */ -static void -mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev) +int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table; + struct mlx5_hrxq *hrxq; - claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table)); - mlx5_rxq_obj_drop_release(dev); - rte_free(ind_tbl); - priv->drop_queue.hrxq->ind_table = NULL; + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); + if (!hrxq) + return 0; + if (!hrxq->standalone) + return mlx5_cache_unregister(&priv->hrxqs, &hrxq->entry); + __mlx5_hrxq_remove(dev, hrxq); + return 0; } /** @@ -2790,78 +2393,42 @@ mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev) * Pointer to Ethernet device. * * @return - * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set. + * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set. */ struct mlx5_hrxq * -mlx5_hrxq_drop_new(struct rte_eth_dev *dev) +mlx5_drop_action_create(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_ind_table_obj *ind_tbl = NULL; - struct ibv_qp *qp = NULL; struct mlx5_hrxq *hrxq = NULL; + int ret; - if (priv->drop_queue.hrxq) { - rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt); + if (priv->drop_queue.hrxq) return priv->drop_queue.hrxq; - } - hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0); + hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY); if (!hrxq) { DRV_LOG(WARNING, - "port %u cannot allocate memory for drop queue", + "Port %u cannot allocate memory for drop queue.", dev->data->port_id); rte_errno = ENOMEM; goto error; } priv->drop_queue.hrxq = hrxq; - ind_tbl = mlx5_ind_table_obj_drop_new(dev); - if (!ind_tbl) - goto error; - hrxq->ind_table = ind_tbl; - qp = mlx5_glue->create_qp_ex(priv->sh->ctx, - &(struct ibv_qp_init_attr_ex){ - .qp_type = IBV_QPT_RAW_PACKET, - .comp_mask = - IBV_QP_INIT_ATTR_PD | - IBV_QP_INIT_ATTR_IND_TABLE | - IBV_QP_INIT_ATTR_RX_HASH, - .rx_hash_conf = (struct ibv_rx_hash_conf){ - .rx_hash_function = - IBV_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN, - .rx_hash_key = rss_hash_default_key, - .rx_hash_fields_mask = 0, - }, - .rwq_ind_tbl = ind_tbl->ind_table, - .pd = priv->sh->pd - }); - if (!qp) { - DEBUG("port %u cannot allocate QP for drop queue", - dev->data->port_id); - rte_errno = errno; + hrxq->ind_table = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq->ind_table), + 0, SOCKET_ID_ANY); + if (!hrxq->ind_table) { + rte_errno = ENOMEM; goto error; } - hrxq->qp = qp; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp); - if (!hrxq->action) { - rte_errno = errno; + ret = priv->obj_ops.drop_action_create(dev); + if (ret < 0) goto error; - } -#endif - rte_atomic32_set(&hrxq->refcnt, 1); return hrxq; error: -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - if (hrxq && hrxq->action) - mlx5_glue->destroy_flow_action(hrxq->action); -#endif - if (qp) - claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); - if (ind_tbl) - mlx5_ind_table_obj_drop_release(dev); if (hrxq) { + if (hrxq->ind_table) + mlx5_free(hrxq->ind_table); priv->drop_queue.hrxq = NULL; - rte_free(hrxq); + mlx5_free(hrxq); } return NULL; } @@ -2873,18 +2440,57 @@ error: * Pointer to Ethernet device. */ void -mlx5_hrxq_drop_release(struct rte_eth_dev *dev) +mlx5_drop_action_destroy(struct rte_eth_dev *dev) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; - if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - mlx5_glue->destroy_flow_action(hrxq->action); -#endif - claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); - mlx5_ind_table_obj_drop_release(dev); - rte_free(hrxq); - priv->drop_queue.hrxq = NULL; + if (!priv->drop_queue.hrxq) + return; + priv->obj_ops.drop_action_destroy(dev); + mlx5_free(priv->drop_queue.rxq); + mlx5_free(hrxq->ind_table); + mlx5_free(hrxq); + priv->drop_queue.rxq = NULL; + priv->drop_queue.hrxq = NULL; +} + +/** + * Verify the Rx Queue list is empty + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The number of object not released. + */ +uint32_t +mlx5_hrxq_verify(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + return mlx5_cache_list_get_entry_num(&priv->hrxqs); +} + +/** + * Set the Rx queue timestamp conversion parameters + * + * @param[in] dev + * Pointer to the Ethernet device structure. + */ +void +mlx5_rxq_timestamp_set(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_rxq_data *data; + unsigned int i; + + for (i = 0; i != priv->rxqs_n; ++i) { + if (!(*priv->rxqs)[i]) + continue; + data = (*priv->rxqs)[i]; + data->sh = sh; + data->rt_timestamp = priv->config.rt_timestamp; } }