X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=a00cb129800f4f5dc6c32bcbc4702a0eb6a79616;hb=be11774d453ba9fee825c1055b6602cef4f0afb9;hp=f81de251fd0bac6eddf6c52681c151cb404bdd64;hpb=d4a405186b73a868bee962a5a941a058a1e59f3f;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index f81de251fd..a00cb12980 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -52,10 +52,79 @@ uint8_t rss_hash_default_key[] = { }; /* Length of the default RSS hash key. */ -const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); +static_assert(MLX5_RSS_HASH_KEY_LEN == + (unsigned int)sizeof(rss_hash_default_key), + "wrong RSS default key size."); /** - * Allocate RX queue elements. + * Check whether Multi-Packet RQ can be enabled for the device. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 1 if supported, negative errno value if not. + */ +inline int +mlx5_check_mprq_support(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + + if (priv->config.mprq.enabled && + priv->rxqs_n >= priv->config.mprq.min_rxqs_num) + return 1; + return -ENOTSUP; +} + +/** + * Check whether Multi-Packet RQ is enabled for the Rx queue. + * + * @param rxq + * Pointer to receive queue structure. + * + * @return + * 0 if disabled, otherwise enabled. + */ +inline int +mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) +{ + return rxq->strd_num_n > 0; +} + +/** + * Check whether Multi-Packet RQ is enabled for the device. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 if disabled, otherwise enabled. + */ +inline int +mlx5_mprq_enabled(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint16_t i; + uint16_t n = 0; + + if (mlx5_check_mprq_support(dev) < 0) + return 0; + /* All the configured queues should be enabled. */ + for (i = 0; i < priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + + if (!rxq) + continue; + if (mlx5_rxq_mprq_enabled(rxq)) + ++n; + } + /* Multi-Packet RQ can't be partially configured. */ + assert(n == 0 || n == priv->rxqs_n); + return n == priv->rxqs_n; +} + +/** + * Allocate RX queue elements for Multi-Packet RQ. * * @param rxq_ctrl * Pointer to RX queue structure. @@ -63,8 +132,58 @@ const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ -int -rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) +static int +rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; + unsigned int wqe_n = 1 << rxq->elts_n; + unsigned int i; + int err; + + /* Iterate on segments. */ + for (i = 0; i <= wqe_n; ++i) { + struct mlx5_mprq_buf *buf; + + if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) { + DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id); + rte_errno = ENOMEM; + goto error; + } + if (i < wqe_n) + (*rxq->mprq_bufs)[i] = buf; + else + rxq->mprq_repl = buf; + } + DRV_LOG(DEBUG, + "port %u Rx queue %u allocated and configured %u segments", + rxq->port_id, rxq->idx, wqe_n); + return 0; +error: + err = rte_errno; /* Save rte_errno before cleanup. */ + wqe_n = i; + for (i = 0; (i != wqe_n); ++i) { + if ((*rxq->mprq_bufs)[i] != NULL) + rte_mempool_put(rxq->mprq_mp, + (*rxq->mprq_bufs)[i]); + (*rxq->mprq_bufs)[i] = NULL; + } + DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything", + rxq->port_id, rxq->idx); + rte_errno = err; /* Restore rte_errno. */ + return -rte_errno; +} + +/** + * Allocate RX queue elements for Single-Packet RQ. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + * + * @return + * 0 on success, errno value on failure. + */ +static int +rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) { const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n; @@ -78,7 +197,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp); if (buf == NULL) { DRV_LOG(ERR, "port %u empty mbuf pool", - rxq_ctrl->priv->dev->data->port_id); + PORT_ID(rxq_ctrl->priv)); rte_errno = ENOMEM; goto error; } @@ -122,7 +241,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) DRV_LOG(DEBUG, "port %u Rx queue %u allocated and configured %u segments" " (max %u packets)", - rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx, elts_n, + PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n)); return 0; error: @@ -134,19 +253,63 @@ error: (*rxq_ctrl->rxq.elts)[i] = NULL; } DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything", - rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx); + PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx); rte_errno = err; /* Restore rte_errno. */ return -rte_errno; } /** - * Free RX queue elements. + * Allocate RX queue elements. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + * + * @return + * 0 on success, errno value on failure. + */ +int +rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? + rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl); +} + +/** + * Free RX queue elements for Multi-Packet RQ. * * @param rxq_ctrl * Pointer to RX queue structure. */ static void -rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) +rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; + uint16_t i; + + DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs", + rxq->port_id, rxq->idx); + if (rxq->mprq_bufs == NULL) + return; + assert(mlx5_rxq_check_vec_support(rxq) < 0); + for (i = 0; (i != (1u << rxq->elts_n)); ++i) { + if ((*rxq->mprq_bufs)[i] != NULL) + mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]); + (*rxq->mprq_bufs)[i] = NULL; + } + if (rxq->mprq_repl != NULL) { + mlx5_mprq_buf_free(rxq->mprq_repl); + rxq->mprq_repl = NULL; + } +} + +/** + * Free RX queue elements for Single-Packet RQ. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + */ +static void +rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) { struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; const uint16_t q_n = (1 << rxq->elts_n); @@ -155,7 +318,7 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) uint16_t i; DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs", - rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx); + PORT_ID(rxq_ctrl->priv), rxq->idx); if (rxq->elts == NULL) return; /** @@ -174,6 +337,21 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) } } +/** + * Free RX queue elements. + * + * @param rxq_ctrl + * Pointer to RX queue structure. + */ +static void +rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) +{ + if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) + rxq_free_elts_mprq(rxq_ctrl); + else + rxq_free_elts_sprq(rxq_ctrl); +} + /** * Clean up a RX queue. * @@ -186,7 +364,7 @@ void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl) { DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u", - rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx); + PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx); if (rxq_ctrl->ibv) mlx5_rxq_ibv_release(rxq_ctrl->ibv); memset(rxq_ctrl, 0, sizeof(*rxq_ctrl)); @@ -204,14 +382,15 @@ mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl) uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *config = &priv->config; uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER | DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_JUMBO_FRAME); if (config->hw_fcs_strip) - offloads |= DEV_RX_OFFLOAD_CRC_STRIP; + offloads |= DEV_RX_OFFLOAD_KEEP_CRC; + if (config->hw_csum) offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | @@ -236,32 +415,6 @@ mlx5_get_rx_port_offloads(void) return offloads; } -/** - * Checks if the per-queue offload configuration is valid. - * - * @param dev - * Pointer to Ethernet device. - * @param offloads - * Per-queue offloads configuration. - * - * @return - * 1 if the configuration is valid, 0 otherwise. - */ -static int -mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads) -{ - uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads; - uint64_t queue_supp_offloads = mlx5_get_rx_queue_offloads(dev); - uint64_t port_supp_offloads = mlx5_get_rx_port_offloads(); - - if ((offloads & (queue_supp_offloads | port_supp_offloads)) != - offloads) - return 0; - if (((port_offloads ^ offloads) & port_supp_offloads)) - return 0; - return 1; -} - /** * * @param dev @@ -285,7 +438,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_rxconf *conf, struct rte_mempool *mp) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); @@ -305,18 +458,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, rte_errno = EOVERFLOW; return -rte_errno; } - if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) { - DRV_LOG(ERR, - "port %u Rx queue offloads 0x%" PRIx64 " don't match" - " port offloads 0x%" PRIx64 " or supported offloads 0x%" - PRIx64, - dev->data->port_id, conf->offloads, - dev->data->dev_conf.rxmode.offloads, - (mlx5_get_rx_port_offloads() | - mlx5_get_rx_queue_offloads(dev))); - rte_errno = ENOTSUP; - return -rte_errno; - } if (!mlx5_rxq_releasable(dev, idx)) { DRV_LOG(ERR, "port %u unable to release queue index %u", dev->data->port_id, idx); @@ -348,17 +489,17 @@ mlx5_rx_queue_release(void *dpdk_rxq) { struct mlx5_rxq_data *rxq = (struct mlx5_rxq_data *)dpdk_rxq; struct mlx5_rxq_ctrl *rxq_ctrl; - struct priv *priv; + struct mlx5_priv *priv; if (rxq == NULL) return; rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); priv = rxq_ctrl->priv; - if (!mlx5_rxq_releasable(priv->dev, rxq_ctrl->rxq.stats.idx)) + if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.idx)) rte_panic("port %u Rx queue %u is still used by a flow and" - " cannot be removed\n", priv->dev->data->port_id, - rxq_ctrl->idx); - mlx5_rxq_release(priv->dev, rxq_ctrl->rxq.stats.idx); + " cannot be removed\n", + PORT_ID(priv), rxq->idx); + mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.idx); } /** @@ -373,14 +514,14 @@ mlx5_rx_queue_release(void *dpdk_rxq) int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; unsigned int i; unsigned int rxqs_n = priv->rxqs_n; unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); unsigned int count = 0; - struct rte_intr_handle *intr_handle = priv->dev->intr_handle; + struct rte_intr_handle *intr_handle = dev->intr_handle; - if (!priv->dev->data->dev_conf.intr_conf.rxq) + if (!dev->data->dev_conf.intr_conf.rxq) return 0; mlx5_rx_intr_vec_disable(dev); intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0])); @@ -451,13 +592,13 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; - struct rte_intr_handle *intr_handle = priv->dev->intr_handle; + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_intr_handle *intr_handle = dev->intr_handle; unsigned int i; unsigned int rxqs_n = priv->rxqs_n; unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); - if (!priv->dev->data->dev_conf.intr_conf.rxq) + if (!dev->data->dev_conf.intr_conf.rxq) return; if (!intr_handle->intr_vec) goto free; @@ -505,7 +646,8 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) doorbell = (uint64_t)doorbell_hi << 32; doorbell |= rxq->cqn; rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi); - rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg); + mlx5_uar_write64(rte_cpu_to_be_64(doorbell), + cq_db_reg, rxq->uar_lock_cq); } /** @@ -522,7 +664,7 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data; struct mlx5_rxq_ctrl *rxq_ctrl; @@ -560,7 +702,7 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data; struct mlx5_rxq_ctrl *rxq_ctrl; struct mlx5_rxq_ibv *rxq_ibv = NULL; @@ -588,6 +730,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) } rxq_data->cq_arm_sn++; mlx5_glue->ack_cq_events(rxq_ibv->cq, 1); + mlx5_rxq_ibv_release(rxq_ibv); return 0; exit: ret = rte_errno; /* Save rte_errno before cleanup. */ @@ -613,7 +756,7 @@ exit: struct mlx5_rxq_ibv * mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); @@ -623,10 +766,16 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) struct ibv_cq_init_attr_ex ibv; struct mlx5dv_cq_init_attr mlx5; } cq; - struct ibv_wq_init_attr wq; + struct { + struct ibv_wq_init_attr ibv; +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + struct mlx5dv_wq_init_attr mlx5; +#endif + } wq; struct ibv_cq_ex cq_attr; } attr; - unsigned int cqe_n = (1 << rxq_data->elts_n) - 1; + unsigned int cqe_n; + unsigned int wqe_n = 1 << rxq_data->elts_n; struct mlx5_rxq_ibv *tmpl; struct mlx5dv_cq cq_info; struct mlx5dv_rwq rwq; @@ -634,6 +783,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) int ret = 0; struct mlx5dv_obj obj; struct mlx5_dev_config *config = &priv->config; + const int mprq_en = mlx5_rxq_mprq_enabled(rxq_data); assert(rxq_data); assert(!rxq_ctrl->ibv); @@ -644,23 +794,13 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) if (!tmpl) { DRV_LOG(ERR, "port %u Rx queue %u cannot allocate verbs resources", - dev->data->port_id, rxq_ctrl->idx); + dev->data->port_id, rxq_data->idx); rte_errno = ENOMEM; goto error; } tmpl->rxq_ctrl = rxq_ctrl; - /* Use the entire RX mempool as the memory region. */ - tmpl->mr = mlx5_mr_get(dev, rxq_data->mp); - if (!tmpl->mr) { - tmpl->mr = mlx5_mr_new(dev, rxq_data->mp); - if (!tmpl->mr) { - DRV_LOG(ERR, "port %u: memeroy region creation failure", - dev->data->port_id); - goto error; - } - } if (rxq_ctrl->irq) { - tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx); + tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx); if (!tmpl->channel) { DRV_LOG(ERR, "port %u: comp channel creation failure", dev->data->port_id); @@ -668,6 +808,10 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) goto error; } } + if (mprq_en) + cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1; + else + cqe_n = wqe_n - 1; attr.cq.ibv = (struct ibv_cq_init_attr_ex){ .cqe = cqe_n, .channel = tmpl->channel, @@ -679,7 +823,13 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) if (config->cqe_comp && !rxq_data->hw_timestamp) { attr.cq.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + attr.cq.mlx5.cqe_comp_res_format = + mprq_en ? MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX : + MLX5DV_CQE_RES_FORMAT_HASH; +#else attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH; +#endif /* * For vectorized Rx, it must not be doubled in order to * make cq_ci and rq_ci aligned. @@ -692,8 +842,14 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) " timestamp", dev->data->port_id); } +#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD + if (config->cqe_pad) { + attr.cq.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS; + attr.cq.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD; + } +#endif tmpl->cq = mlx5_glue->cq_ex_to_cq - (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv, + (mlx5_glue->dv_create_cq(priv->sh->ctx, &attr.cq.ibv, &attr.cq.mlx5)); if (tmpl->cq == NULL) { DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure", @@ -701,18 +857,18 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) rte_errno = ENOMEM; goto error; } - DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d", - dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr); - DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d", - dev->data->port_id, priv->device_attr.orig_attr.max_sge); - attr.wq = (struct ibv_wq_init_attr){ + DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d", + dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr); + DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d", + dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge); + attr.wq.ibv = (struct ibv_wq_init_attr){ .wq_context = NULL, /* Could be useful in the future. */ .wq_type = IBV_WQT_RQ, /* Max number of outstanding WRs. */ - .max_wr = (1 << rxq_data->elts_n) >> rxq_data->sges_n, + .max_wr = wqe_n >> rxq_data->sges_n, /* Max number of scatter/gather elements in a WR. */ .max_sge = 1 << rxq_data->sges_n, - .pd = priv->pd, + .pd = priv->sh->pd, .cq = tmpl->cq, .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | @@ -723,16 +879,38 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) }; /* By default, FCS (CRC) is stripped by hardware. */ if (rxq_data->crc_present) { - attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS; - attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; + attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS; + attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; } -#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING if (config->hw_padding) { - attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING; - attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; +#if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING) + attr.wq.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING; + attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; +#elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING) + attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING; + attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; +#endif + } +#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT + attr.wq.mlx5 = (struct mlx5dv_wq_init_attr){ + .comp_mask = 0, + }; + if (mprq_en) { + struct mlx5dv_striding_rq_init_attr *mprq_attr = + &attr.wq.mlx5.striding_rq_attrs; + + attr.wq.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ; + *mprq_attr = (struct mlx5dv_striding_rq_init_attr){ + .single_stride_log_num_of_bytes = rxq_data->strd_sz_n, + .single_wqe_log_num_of_strides = rxq_data->strd_num_n, + .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT, + }; } + tmpl->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &attr.wq.ibv, + &attr.wq.mlx5); +#else + tmpl->wq = mlx5_glue->create_wq(priv->sh->ctx, &attr.wq.ibv); #endif - tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq); if (tmpl->wq == NULL) { DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure", dev->data->port_id, idx); @@ -743,16 +921,14 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) * Make sure number of WRs*SGEs match expectations since a queue * cannot allocate more than "desc" buffers. */ - if (((int)attr.wq.max_wr != - ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) || - ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) { + if (attr.wq.ibv.max_wr != (wqe_n >> rxq_data->sges_n) || + attr.wq.ibv.max_sge != (1u << rxq_data->sges_n)) { DRV_LOG(ERR, "port %u Rx queue %u requested %u*%u but got %u*%u" " WRs*SGEs", dev->data->port_id, idx, - ((1 << rxq_data->elts_n) >> rxq_data->sges_n), - (1 << rxq_data->sges_n), - attr.wq.max_wr, attr.wq.max_sge); + wqe_n >> rxq_data->sges_n, (1 << rxq_data->sges_n), + attr.wq.ibv.max_wr, attr.wq.ibv.max_sge); rte_errno = EINVAL; goto error; } @@ -787,25 +963,40 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) goto error; } /* Fill the rings. */ - rxq_data->wqes = (volatile struct mlx5_wqe_data_seg (*)[]) - (uintptr_t)rwq.buf; - for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) { - struct rte_mbuf *buf = (*rxq_data->elts)[i]; - volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i]; - + rxq_data->wqes = rwq.buf; + for (i = 0; (i != wqe_n); ++i) { + volatile struct mlx5_wqe_data_seg *scat; + uintptr_t addr; + uint32_t byte_count; + + if (mprq_en) { + struct mlx5_mprq_buf *buf = (*rxq_data->mprq_bufs)[i]; + + scat = &((volatile struct mlx5_wqe_mprq *) + rxq_data->wqes)[i].dseg; + addr = (uintptr_t)mlx5_mprq_buf_addr(buf); + byte_count = (1 << rxq_data->strd_sz_n) * + (1 << rxq_data->strd_num_n); + } else { + struct rte_mbuf *buf = (*rxq_data->elts)[i]; + + scat = &((volatile struct mlx5_wqe_data_seg *) + rxq_data->wqes)[i]; + addr = rte_pktmbuf_mtod(buf, uintptr_t); + byte_count = DATA_LEN(buf); + } /* scat->addr must be able to store a pointer. */ assert(sizeof(scat->addr) >= sizeof(uintptr_t)); *scat = (struct mlx5_wqe_data_seg){ - .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, - uintptr_t)), - .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)), - .lkey = tmpl->mr->lkey, + .addr = rte_cpu_to_be_64(addr), + .byte_count = rte_cpu_to_be_32(byte_count), + .lkey = mlx5_rx_addr2mr(rxq_data, addr), }; } rxq_data->rq_db = rwq.dbrec; rxq_data->cqe_n = log2above(cq_info.cqe_cnt); rxq_data->cq_ci = 0; - rxq_data->rq_ci = 0; + rxq_data->consumed_strd = 0; rxq_data->rq_pi = 0; rxq_data->zip = (struct rxq_zip){ .ai = 0, @@ -816,14 +1007,12 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) rxq_data->cqn = cq_info.cqn; rxq_data->cq_arm_sn = 0; /* Update doorbell counter. */ - rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n; + rxq_data->rq_ci = wqe_n >> rxq_data->sges_n; rte_wmb(); *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci); DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id, idx, (void *)&tmpl); rte_atomic32_inc(&tmpl->refcnt); - DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d", - dev->data->port_id, idx, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return tmpl; @@ -835,8 +1024,6 @@ error: claim_zero(mlx5_glue->destroy_cq(tmpl->cq)); if (tmpl->channel) claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel)); - if (tmpl->mr) - mlx5_mr_release(tmpl->mr); priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; rte_errno = ret; /* Restore rte_errno. */ return NULL; @@ -856,7 +1043,7 @@ error: struct mlx5_rxq_ibv * mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl; @@ -866,11 +1053,7 @@ mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) return NULL; rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (rxq_ctrl->ibv) { - mlx5_mr_get(dev, rxq_data->mp); rte_atomic32_inc(&rxq_ctrl->ibv->refcnt); - DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d", - dev->data->port_id, rxq_ctrl->idx, - rte_atomic32_read(&rxq_ctrl->ibv->refcnt)); } return rxq_ctrl->ibv; } @@ -887,18 +1070,9 @@ mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv) { - int ret; - assert(rxq_ibv); assert(rxq_ibv->wq); assert(rxq_ibv->cq); - assert(rxq_ibv->mr); - ret = mlx5_mr_release(rxq_ibv->mr); - if (!ret) - rxq_ibv->mr = NULL; - DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d", - rxq_ibv->rxq_ctrl->priv->dev->data->port_id, - rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt)); if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) { rxq_free_elts(rxq_ibv->rxq_ctrl); claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq)); @@ -925,13 +1099,13 @@ mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv) int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; int ret = 0; struct mlx5_rxq_ibv *rxq_ibv; LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) { DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced", - dev->data->port_id, rxq_ibv->rxq_ctrl->idx); + dev->data->port_id, rxq_ibv->rxq_ctrl->rxq.idx); ++ret; } return ret; @@ -950,13 +1124,189 @@ mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv) return (rte_atomic32_read(&rxq_ibv->refcnt) == 1); } +/** + * Callback function to initialize mbufs for Multi-Packet RQ. + */ +static inline void +mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused, + void *_m, unsigned int i __rte_unused) +{ + struct mlx5_mprq_buf *buf = _m; + + memset(_m, 0, sizeof(*buf)); + buf->mp = mp; + rte_atomic16_set(&buf->refcnt, 1); +} + +/** + * Free mempool of Multi-Packet RQ. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_mprq_free_mp(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_mempool *mp = priv->mprq_mp; + unsigned int i; + + if (mp == NULL) + return 0; + DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ", + dev->data->port_id, mp->name); + /* + * If a buffer in the pool has been externally attached to a mbuf and it + * is still in use by application, destroying the Rx qeueue can spoil + * the packet. It is unlikely to happen but if application dynamically + * creates and destroys with holding Rx packets, this can happen. + * + * TODO: It is unavoidable for now because the mempool for Multi-Packet + * RQ isn't provided by application but managed by PMD. + */ + if (!rte_mempool_full(mp)) { + DRV_LOG(ERR, + "port %u mempool for Multi-Packet RQ is still in use", + dev->data->port_id); + rte_errno = EBUSY; + return -rte_errno; + } + rte_mempool_free(mp); + /* Unset mempool for each Rx queue. */ + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + + if (rxq == NULL) + continue; + rxq->mprq_mp = NULL; + } + priv->mprq_mp = NULL; + return 0; +} + +/** + * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the + * mempool. If already allocated, reuse it if there're enough elements. + * Otherwise, resize it. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * 0 on success, negative errno value on failure. + */ +int +mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct rte_mempool *mp = priv->mprq_mp; + char name[RTE_MEMPOOL_NAMESIZE]; + unsigned int desc = 0; + unsigned int buf_len; + unsigned int obj_num; + unsigned int obj_size; + unsigned int strd_num_n = 0; + unsigned int strd_sz_n = 0; + unsigned int i; + + if (!mlx5_mprq_enabled(dev)) + return 0; + /* Count the total number of descriptors configured. */ + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + + if (rxq == NULL) + continue; + desc += 1 << rxq->elts_n; + /* Get the max number of strides. */ + if (strd_num_n < rxq->strd_num_n) + strd_num_n = rxq->strd_num_n; + /* Get the max size of a stride. */ + if (strd_sz_n < rxq->strd_sz_n) + strd_sz_n = rxq->strd_sz_n; + } + assert(strd_num_n && strd_sz_n); + buf_len = (1 << strd_num_n) * (1 << strd_sz_n); + obj_size = buf_len + sizeof(struct mlx5_mprq_buf); + /* + * Received packets can be either memcpy'd or externally referenced. In + * case that the packet is attached to an mbuf as an external buffer, as + * it isn't possible to predict how the buffers will be queued by + * application, there's no option to exactly pre-allocate needed buffers + * in advance but to speculatively prepares enough buffers. + * + * In the data path, if this Mempool is depleted, PMD will try to memcpy + * received packets to buffers provided by application (rxq->mp) until + * this Mempool gets available again. + */ + desc *= 4; + obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n; + /* + * rte_mempool_create_empty() has sanity check to refuse large cache + * size compared to the number of elements. + * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a + * constant number 2 instead. + */ + obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2); + /* Check a mempool is already allocated and if it can be resued. */ + if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) { + DRV_LOG(DEBUG, "port %u mempool %s is being reused", + dev->data->port_id, mp->name); + /* Reuse. */ + goto exit; + } else if (mp != NULL) { + DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it", + dev->data->port_id, mp->name); + /* + * If failed to free, which means it may be still in use, no way + * but to keep using the existing one. On buffer underrun, + * packets will be memcpy'd instead of external buffer + * attachment. + */ + if (mlx5_mprq_free_mp(dev)) { + if (mp->elt_size >= obj_size) + goto exit; + else + return -rte_errno; + } + } + snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id); + mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ, + 0, NULL, NULL, mlx5_mprq_buf_init, NULL, + dev->device->numa_node, 0); + if (mp == NULL) { + DRV_LOG(ERR, + "port %u failed to allocate a mempool for" + " Multi-Packet RQ, count=%u, size=%u", + dev->data->port_id, obj_num, obj_size); + rte_errno = ENOMEM; + return -rte_errno; + } + priv->mprq_mp = mp; +exit: + /* Set mempool for each Rx queue. */ + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; + + if (rxq == NULL) + continue; + rxq->mprq_mp = mp; + } + DRV_LOG(INFO, "port %u Multi-Packet RQ is configured", + dev->data->port_id); + return 0; +} + /** * Create a DPDK Rx queue. * * @param dev * Pointer to Ethernet device. * @param idx - * TX queue index. + * RX queue index. * @param desc * Number of descriptors to configure in queue. * @param socket @@ -970,16 +1320,20 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_rxconf *conf, struct rte_mempool *mp) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *tmpl; unsigned int mb_len = rte_pktmbuf_data_room_size(mp); + unsigned int mprq_stride_size; struct mlx5_dev_config *config = &priv->config; /* * Always allocate extra slots, even if eventually * the vector Rx will not be used. */ - const uint16_t desc_n = + uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; + uint64_t offloads = conf->offloads | + dev->data->dev_conf.rxmode.offloads; + const int mprq_en = mlx5_check_mprq_support(dev) > 0; tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl) + @@ -989,15 +1343,51 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, rte_errno = ENOMEM; return NULL; } + if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh, + MLX5_MR_BTREE_CACHE_N, socket)) { + /* rte_errno is already set. */ + goto error; + } tmpl->socket = socket; - if (priv->dev->data->dev_conf.intr_conf.rxq) + if (dev->data->dev_conf.intr_conf.rxq) tmpl->irq = 1; - /* Enable scattered packets support for this queue if necessary. */ + /* + * This Rx queue can be configured as a Multi-Packet RQ if all of the + * following conditions are met: + * - MPRQ is enabled. + * - The number of descs is more than the number of strides. + * - max_rx_pkt_len plus overhead is less than the max size of a + * stride. + * Otherwise, enable Rx scatter if necessary. + */ assert(mb_len >= RTE_PKTMBUF_HEADROOM); - if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= - (mb_len - RTE_PKTMBUF_HEADROOM)) { + mprq_stride_size = + dev->data->dev_conf.rxmode.max_rx_pkt_len + + sizeof(struct rte_mbuf_ext_shared_info) + + RTE_PKTMBUF_HEADROOM; + if (mprq_en && + desc > (1U << config->mprq.stride_num_n) && + mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) { + /* TODO: Rx scatter isn't supported yet. */ + tmpl->rxq.sges_n = 0; + /* Trim the number of descs needed. */ + desc >>= config->mprq.stride_num_n; + tmpl->rxq.strd_num_n = config->mprq.stride_num_n; + tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size), + config->mprq.min_stride_size_n); + tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT; + tmpl->rxq.mprq_max_memcpy_len = + RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM, + config->mprq.max_memcpy_len); + DRV_LOG(DEBUG, + "port %u Rx queue %u: Multi-Packet RQ is enabled" + " strd_num_n = %u, strd_sz_n = %u", + dev->data->port_id, idx, + tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n); + } else if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= + (mb_len - RTE_PKTMBUF_HEADROOM)) { tmpl->rxq.sges_n = 0; - } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) { + } else if (offloads & DEV_RX_OFFLOAD_SCATTER) { unsigned int size = RTE_PKTMBUF_HEADROOM + dev->data->dev_conf.rxmode.max_rx_pkt_len; @@ -1031,6 +1421,14 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, dev->data->dev_conf.rxmode.max_rx_pkt_len, mb_len - RTE_PKTMBUF_HEADROOM); } + if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq)) + DRV_LOG(WARNING, + "port %u MPRQ is requested but cannot be enabled" + " (requested: desc = %u, stride_sz = %u," + " supported: min_stride_num = %u, max_stride_sz = %u).", + dev->data->port_id, desc, mprq_stride_size, + (1 << config->mprq.stride_num_n), + (1 << config->mprq.max_stride_size_n)); DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u", dev->data->port_id, 1 << tmpl->rxq.sges_n); if (desc % (1 << tmpl->rxq.sges_n)) { @@ -1044,22 +1442,22 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, goto error; } /* Toggle RX checksum offload if hardware supports it. */ - tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM); - tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP); + tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM); + tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP); /* Configure VLAN stripping. */ - tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); + tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP); /* By default, FCS (CRC) is stripped by hardware. */ - if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) { - tmpl->rxq.crc_present = 0; - } else if (config->hw_fcs_strip) { - tmpl->rxq.crc_present = 1; - } else { - DRV_LOG(WARNING, - "port %u CRC stripping has been disabled but will" - " still be performed by hardware, make sure MLNX_OFED" - " and firmware are up to date", - dev->data->port_id); - tmpl->rxq.crc_present = 0; + tmpl->rxq.crc_present = 0; + if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) { + if (config->hw_fcs_strip) { + tmpl->rxq.crc_present = 1; + } else { + DRV_LOG(WARNING, + "port %u CRC stripping has been disabled but will" + " still be performed by hardware, make sure MLNX_OFED" + " and firmware are up to date", + dev->data->port_id); + } } DRV_LOG(DEBUG, "port %u CRC stripping is %s, %u bytes will be subtracted from" @@ -1073,14 +1471,16 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.port_id = dev->data->port_id; tmpl->priv = priv; tmpl->rxq.mp = mp; - tmpl->rxq.stats.idx = idx; tmpl->rxq.elts_n = log2above(desc); + tmpl->rxq.rq_repl_thresh = + MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n); tmpl->rxq.elts = (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1); - tmpl->idx = idx; +#ifndef RTE_ARCH_64 + tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq; +#endif + tmpl->rxq.idx = idx; rte_atomic32_inc(&tmpl->refcnt); - DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id, - idx, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; error: @@ -1094,7 +1494,7 @@ error: * @param dev * Pointer to Ethernet device. * @param idx - * TX queue index. + * RX queue index. * * @return * A pointer to the queue if it exists, NULL otherwise. @@ -1102,7 +1502,7 @@ error: struct mlx5_rxq_ctrl * mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl = NULL; if ((*priv->rxqs)[idx]) { @@ -1111,9 +1511,6 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) rxq); mlx5_rxq_ibv_get(dev, idx); rte_atomic32_inc(&rxq_ctrl->refcnt); - DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", - dev->data->port_id, rxq_ctrl->idx, - rte_atomic32_read(&rxq_ctrl->refcnt)); } return rxq_ctrl; } @@ -1124,7 +1521,7 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) * @param dev * Pointer to Ethernet device. * @param idx - * TX queue index. + * RX queue index. * * @return * 1 while a reference on it exists, 0 when freed. @@ -1132,7 +1529,7 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; if (!(*priv->rxqs)[idx]) @@ -1141,9 +1538,8 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) assert(rxq_ctrl->priv); if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv)) rxq_ctrl->ibv = NULL; - DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id, - rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt)); if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) { + mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh); LIST_REMOVE(rxq_ctrl, next); rte_free(rxq_ctrl); (*priv->rxqs)[idx] = NULL; @@ -1158,7 +1554,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) * @param dev * Pointer to Ethernet device. * @param idx - * TX queue index. + * RX queue index. * * @return * 1 if the queue can be released, negative errno otherwise and rte_errno is @@ -1167,7 +1563,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; if (!(*priv->rxqs)[idx]) { @@ -1190,13 +1586,13 @@ mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) int mlx5_rxq_verify(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; int ret = 0; LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced", - dev->data->port_id, rxq_ctrl->idx); + dev->data->port_id, rxq_ctrl->rxq.idx); ++ret; } return ret; @@ -1219,7 +1615,7 @@ struct mlx5_ind_table_ibv * mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues, uint32_t queues_n) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; const unsigned int wq_n = rte_is_power_of_2(queues_n) ? log2above(queues_n) : @@ -1247,7 +1643,7 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues, for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j) wq[i] = wq[j]; ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table - (priv->ctx, + (priv->sh->ctx, &(struct ibv_rwq_ind_table_init_attr){ .log_ind_tbl_size = wq_n, .ind_tbl = wq, @@ -1259,14 +1655,10 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues, } rte_atomic32_inc(&ind_tbl->refcnt); LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); - DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d", - dev->data->port_id, (void *)ind_tbl, - rte_atomic32_read(&ind_tbl->refcnt)); return ind_tbl; error: rte_free(ind_tbl); - DRV_LOG(DEBUG, "port %u cannot create indirection table", - dev->data->port_id); + DEBUG("port %u cannot create indirection table", dev->data->port_id); return NULL; } @@ -1287,7 +1679,7 @@ struct mlx5_ind_table_ibv * mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues, uint32_t queues_n) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { @@ -1301,9 +1693,6 @@ mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues, unsigned int i; rte_atomic32_inc(&ind_tbl->refcnt); - DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d", - dev->data->port_id, (void *)ind_tbl, - rte_atomic32_read(&ind_tbl->refcnt)); for (i = 0; i != ind_tbl->queues_n; ++i) mlx5_rxq_get(dev, ind_tbl->queues[i]); } @@ -1327,9 +1716,6 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, { unsigned int i; - DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d", - ((struct priv *)dev->data->dev_private)->port, - (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) claim_zero(mlx5_glue->destroy_rwq_ind_table (ind_tbl->ind_table)); @@ -1355,7 +1741,7 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; int ret = 0; @@ -1385,9 +1771,7 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev) * @param queues_n * Number of queues. * @param tunnel - * Tunnel type, implies tunnel offloading like inner checksum if available. - * @param rss_level - * RSS hash on tunnel level. + * Tunnel type. * * @return * The Verbs object initialised, NULL otherwise and rte_errno is set. @@ -1397,16 +1781,16 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, const uint16_t *queues, uint32_t queues_n, - uint32_t tunnel, uint32_t rss_level) + int tunnel __rte_unused) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; struct mlx5_ind_table_ibv *ind_tbl; struct ibv_qp *qp; - int err; #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT - struct mlx5dv_qp_init_attr qp_init_attr = {0}; + struct mlx5dv_qp_init_attr qp_init_attr; #endif + int err; queues_n = hash_fields ? queues_n : 1; ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); @@ -1416,18 +1800,24 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, rte_errno = ENOMEM; return NULL; } - if (!rss_key_len) { - rss_key_len = rss_hash_default_key_len; - rss_key = rss_hash_default_key; - } #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + memset(&qp_init_attr, 0, sizeof(qp_init_attr)); if (tunnel) { qp_init_attr.comp_mask = MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS; qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS; } +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (dev->data->dev_conf.lpbk_mode) { + /* Allow packet sent from NIC loop back w/o source MAC check. */ + qp_init_attr.comp_mask |= + MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS; + qp_init_attr.create_flags |= + MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC; + } +#endif qp = mlx5_glue->dv_create_qp - (priv->ctx, + (priv->sh->ctx, &(struct ibv_qp_init_attr_ex){ .qp_type = IBV_QPT_RAW_PACKET, .comp_mask = @@ -1436,22 +1826,17 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, IBV_QP_INIT_ATTR_RX_HASH, .rx_hash_conf = (struct ibv_rx_hash_conf){ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = rss_key_len ? rss_key_len : - rss_hash_default_key_len, - .rx_hash_key = rss_key ? - (void *)(uintptr_t)rss_key : - rss_hash_default_key, - .rx_hash_fields_mask = hash_fields | - (tunnel && rss_level > 1 ? - (uint32_t)IBV_RX_HASH_INNER : 0), + .rx_hash_key_len = rss_key_len, + .rx_hash_key = (void *)(uintptr_t)rss_key, + .rx_hash_fields_mask = hash_fields, }, .rwq_ind_tbl = ind_tbl->ind_table, - .pd = priv->pd, + .pd = priv->sh->pd, }, &qp_init_attr); #else qp = mlx5_glue->create_qp_ex - (priv->ctx, + (priv->sh->ctx, &(struct ibv_qp_init_attr_ex){ .qp_type = IBV_QPT_RAW_PACKET, .comp_mask = @@ -1460,15 +1845,12 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, IBV_QP_INIT_ATTR_RX_HASH, .rx_hash_conf = (struct ibv_rx_hash_conf){ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = rss_key_len ? rss_key_len : - rss_hash_default_key_len, - .rx_hash_key = rss_key ? - (void *)(uintptr_t)rss_key : - rss_hash_default_key, + .rx_hash_key_len = rss_key_len, + .rx_hash_key = (void *)(uintptr_t)rss_key, .rx_hash_fields_mask = hash_fields, }, .rwq_ind_tbl = ind_tbl->ind_table, - .pd = priv->pd, + .pd = priv->sh->pd, }); #endif if (!qp) { @@ -1482,14 +1864,16 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, hrxq->qp = qp; hrxq->rss_key_len = rss_key_len; hrxq->hash_fields = hash_fields; - hrxq->tunnel = tunnel; - hrxq->rss_level = rss_level; memcpy(hrxq->rss_key, rss_key, rss_key_len); +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp); + if (!hrxq->action) { + rte_errno = errno; + goto error; + } +#endif rte_atomic32_inc(&hrxq->refcnt); LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next); - DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d", - dev->data->port_id, (void *)hrxq, - rte_atomic32_read(&hrxq->refcnt)); return hrxq; error: err = rte_errno; /* Save rte_errno before cleanup. */ @@ -1512,10 +1896,6 @@ error: * first queue index will be taken for the indirection table. * @param queues_n * Number of queues. - * @param tunnel - * Tunnel type, implies tunnel offloading like inner checksum if available. - * @param rss_level - * RSS hash on tunnel level * * @return * An hash Rx queue on success. @@ -1524,10 +1904,9 @@ struct mlx5_hrxq * mlx5_hrxq_get(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n, - uint32_t tunnel, uint32_t rss_level) + const uint16_t *queues, uint32_t queues_n) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; queues_n = hash_fields ? queues_n : 1; @@ -1540,10 +1919,6 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, continue; if (hrxq->hash_fields != hash_fields) continue; - if (hrxq->tunnel != tunnel) - continue; - if (hrxq->rss_level != rss_level) - continue; ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); if (!ind_tbl) continue; @@ -1552,9 +1927,6 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, continue; } rte_atomic32_inc(&hrxq->refcnt); - DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d", - dev->data->port_id, (void *)hrxq, - rte_atomic32_read(&hrxq->refcnt)); return hrxq; } return NULL; @@ -1574,10 +1946,10 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) { - DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d", - ((struct priv *)dev->data->dev_private)->port, - (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + mlx5_glue->destroy_flow_action(hrxq->action); +#endif claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); mlx5_ind_table_ibv_release(dev, hrxq->ind_table); LIST_REMOVE(hrxq, next); @@ -1600,7 +1972,7 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; int ret = 0; @@ -1612,3 +1984,246 @@ mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev) } return ret; } + +/** + * Create a drop Rx queue Verbs object. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The Verbs object initialised, NULL otherwise and rte_errno is set. + */ +struct mlx5_rxq_ibv * +mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct ibv_context *ctx = priv->sh->ctx; + struct ibv_cq *cq; + struct ibv_wq *wq = NULL; + struct mlx5_rxq_ibv *rxq; + + if (priv->drop_queue.rxq) + return priv->drop_queue.rxq; + cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0); + if (!cq) { + DEBUG("port %u cannot allocate CQ for drop queue", + dev->data->port_id); + rte_errno = errno; + goto error; + } + wq = mlx5_glue->create_wq(ctx, + &(struct ibv_wq_init_attr){ + .wq_type = IBV_WQT_RQ, + .max_wr = 1, + .max_sge = 1, + .pd = priv->sh->pd, + .cq = cq, + }); + if (!wq) { + DEBUG("port %u cannot allocate WQ for drop queue", + dev->data->port_id); + rte_errno = errno; + goto error; + } + rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0); + if (!rxq) { + DEBUG("port %u cannot allocate drop Rx queue memory", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } + rxq->cq = cq; + rxq->wq = wq; + priv->drop_queue.rxq = rxq; + return rxq; +error: + if (wq) + claim_zero(mlx5_glue->destroy_wq(wq)); + if (cq) + claim_zero(mlx5_glue->destroy_cq(cq)); + return NULL; +} + +/** + * Release a drop Rx queue Verbs object. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The Verbs object initialised, NULL otherwise and rte_errno is set. + */ +void +mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_rxq_ibv *rxq = priv->drop_queue.rxq; + + if (rxq->wq) + claim_zero(mlx5_glue->destroy_wq(rxq->wq)); + if (rxq->cq) + claim_zero(mlx5_glue->destroy_cq(rxq->cq)); + rte_free(rxq); + priv->drop_queue.rxq = NULL; +} + +/** + * Create a drop indirection table. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The Verbs object initialised, NULL otherwise and rte_errno is set. + */ +struct mlx5_ind_table_ibv * +mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ind_table_ibv *ind_tbl; + struct mlx5_rxq_ibv *rxq; + struct mlx5_ind_table_ibv tmpl; + + rxq = mlx5_rxq_ibv_drop_new(dev); + if (!rxq) + return NULL; + tmpl.ind_table = mlx5_glue->create_rwq_ind_table + (priv->sh->ctx, + &(struct ibv_rwq_ind_table_init_attr){ + .log_ind_tbl_size = 0, + .ind_tbl = &rxq->wq, + .comp_mask = 0, + }); + if (!tmpl.ind_table) { + DEBUG("port %u cannot allocate indirection table for drop" + " queue", + dev->data->port_id); + rte_errno = errno; + goto error; + } + ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0); + if (!ind_tbl) { + rte_errno = ENOMEM; + goto error; + } + ind_tbl->ind_table = tmpl.ind_table; + return ind_tbl; +error: + mlx5_rxq_ibv_drop_release(dev); + return NULL; +} + +/** + * Release a drop indirection table. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table; + + claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table)); + mlx5_rxq_ibv_drop_release(dev); + rte_free(ind_tbl); + priv->drop_queue.hrxq->ind_table = NULL; +} + +/** + * Create a drop Rx Hash queue. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * The Verbs object initialised, NULL otherwise and rte_errno is set. + */ +struct mlx5_hrxq * +mlx5_hrxq_drop_new(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ind_table_ibv *ind_tbl; + struct ibv_qp *qp; + struct mlx5_hrxq *hrxq; + + if (priv->drop_queue.hrxq) { + rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt); + return priv->drop_queue.hrxq; + } + ind_tbl = mlx5_ind_table_ibv_drop_new(dev); + if (!ind_tbl) + return NULL; + qp = mlx5_glue->create_qp_ex(priv->sh->ctx, + &(struct ibv_qp_init_attr_ex){ + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = + IBV_QP_INIT_ATTR_PD | + IBV_QP_INIT_ATTR_IND_TABLE | + IBV_QP_INIT_ATTR_RX_HASH, + .rx_hash_conf = (struct ibv_rx_hash_conf){ + .rx_hash_function = + IBV_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN, + .rx_hash_key = rss_hash_default_key, + .rx_hash_fields_mask = 0, + }, + .rwq_ind_tbl = ind_tbl->ind_table, + .pd = priv->sh->pd + }); + if (!qp) { + DEBUG("port %u cannot allocate QP for drop queue", + dev->data->port_id); + rte_errno = errno; + goto error; + } + hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0); + if (!hrxq) { + DRV_LOG(WARNING, + "port %u cannot allocate memory for drop queue", + dev->data->port_id); + rte_errno = ENOMEM; + goto error; + } + hrxq->ind_table = ind_tbl; + hrxq->qp = qp; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp); + if (!hrxq->action) { + rte_errno = errno; + goto error; + } +#endif + priv->drop_queue.hrxq = hrxq; + rte_atomic32_set(&hrxq->refcnt, 1); + return hrxq; +error: + if (ind_tbl) + mlx5_ind_table_ibv_drop_release(dev); + return NULL; +} + +/** + * Release a drop hash Rx queue. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_hrxq_drop_release(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; + + if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + mlx5_glue->destroy_flow_action(hrxq->action); +#endif + claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); + mlx5_ind_table_ibv_drop_release(dev); + rte_free(hrxq); + priv->drop_queue.hrxq = NULL; + } +}