net/mlx5: remove port info from shareable Rx queue
authorXueming Li <xuemingl@nvidia.com>
Thu, 4 Nov 2021 12:33:16 +0000 (20:33 +0800)
committerRaslan Darawsheh <rasland@nvidia.com>
Thu, 4 Nov 2021 21:55:47 +0000 (22:55 +0100)
To prepare for shared Rx queue, removes port info from shareable Rx
queue control.

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
drivers/net/mlx5/mlx5_devx.c
drivers/net/mlx5/mlx5_rx.c
drivers/net/mlx5/mlx5_rx.h
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_rxtx_vec.c
drivers/net/mlx5/mlx5_trigger.c

index 443252d..8b3651f 100644 (file)
@@ -918,7 +918,7 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
        }
        rxq->rxq_ctrl = rxq_ctrl;
        rxq_ctrl->type = MLX5_RXQ_TYPE_STANDARD;
-       rxq_ctrl->priv = priv;
+       rxq_ctrl->sh = priv->sh;
        rxq_ctrl->obj = rxq;
        rxq_data = &rxq_ctrl->rxq;
        /* Create CQ using DevX API. */
index 258a645..d41905a 100644 (file)
@@ -118,15 +118,7 @@ int
 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
 {
        struct mlx5_rxq_data *rxq = rx_queue;
-       struct mlx5_rxq_ctrl *rxq_ctrl =
-                       container_of(rxq, struct mlx5_rxq_ctrl, rxq);
-       struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
 
-       if (dev->rx_pkt_burst == NULL ||
-           dev->rx_pkt_burst == removed_rx_burst) {
-               rte_errno = ENOTSUP;
-               return -rte_errno;
-       }
        if (offset >= (1 << rxq->cqe_n)) {
                rte_errno = EINVAL;
                return -rte_errno;
@@ -438,10 +430,10 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
                sm.is_wq = 1;
                sm.queue_id = rxq->idx;
                sm.state = IBV_WQS_RESET;
-               if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
+               if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm))
                        return -1;
                if (rxq_ctrl->dump_file_n <
-                   rxq_ctrl->priv->config.max_dump_files_num) {
+                   RXQ_PORT(rxq_ctrl)->config.max_dump_files_num) {
                        MKSTR(err_str, "Unexpected CQE error syndrome "
                              "0x%02x CQN = %u RQN = %u wqe_counter = %u"
                              " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
@@ -478,8 +470,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
                        sm.is_wq = 1;
                        sm.queue_id = rxq->idx;
                        sm.state = IBV_WQS_RDY;
-                       if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
-                                                   &sm))
+                       if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm))
                                return -1;
                        if (vec) {
                                const uint32_t elts_n =
index b219182..c04c0c7 100644 (file)
 /* Support tunnel matching. */
 #define MLX5_FLOW_TUNNEL 10
 
+#define RXQ_PORT(rxq_ctrl) LIST_FIRST(&(rxq_ctrl)->owners)->priv
+#define RXQ_DEV(rxq_ctrl) ETH_DEV(RXQ_PORT(rxq_ctrl))
+#define RXQ_PORT_ID(rxq_ctrl) PORT_ID(RXQ_PORT(rxq_ctrl))
+
 /* First entry must be NULL for comparison. */
 #define mlx5_mr_btree_len(bt) ((bt)->len - 1)
 
@@ -152,7 +156,6 @@ struct mlx5_rxq_ctrl {
        LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */
        struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
        struct mlx5_dev_ctx_shared *sh; /* Shared context. */
-       struct mlx5_priv *priv; /* Back pointer to private data. */
        enum mlx5_rxq_type type; /* Rxq type. */
        unsigned int socket; /* CPU socket ID for allocations. */
        uint32_t share_group; /* Group ID of shared RXQ. */
@@ -318,7 +321,7 @@ mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
         */
        rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
        mp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp;
-       return mlx5_mr_mempool2mr_bh(&rxq_ctrl->priv->sh->cdev->mr_scache,
+       return mlx5_mr_mempool2mr_bh(&rxq_ctrl->sh->cdev->mr_scache,
                                     mr_ctrl, mp, addr);
 }
 
index 7b637fd..5a20966 100644 (file)
@@ -148,8 +148,14 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
 
                buf = rte_pktmbuf_alloc(seg->mp);
                if (buf == NULL) {
-                       DRV_LOG(ERR, "port %u empty mbuf pool",
-                               PORT_ID(rxq_ctrl->priv));
+                       if (rxq_ctrl->share_group == 0)
+                               DRV_LOG(ERR, "port %u queue %u empty mbuf pool",
+                                       RXQ_PORT_ID(rxq_ctrl),
+                                       rxq_ctrl->rxq.idx);
+                       else
+                               DRV_LOG(ERR, "share group %u queue %u empty mbuf pool",
+                                       rxq_ctrl->share_group,
+                                       rxq_ctrl->share_qid);
                        rte_errno = ENOMEM;
                        goto error;
                }
@@ -193,11 +199,16 @@ rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
                for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
                        (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
        }
-       DRV_LOG(DEBUG,
-               "port %u SPRQ queue %u allocated and configured %u segments"
-               " (max %u packets)",
-               PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
-               elts_n / (1 << rxq_ctrl->rxq.sges_n));
+       if (rxq_ctrl->share_group == 0)
+               DRV_LOG(DEBUG,
+                       "port %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
+                       RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx, elts_n,
+                       elts_n / (1 << rxq_ctrl->rxq.sges_n));
+       else
+               DRV_LOG(DEBUG,
+                       "share group %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
+                       rxq_ctrl->share_group, rxq_ctrl->share_qid, elts_n,
+                       elts_n / (1 << rxq_ctrl->rxq.sges_n));
        return 0;
 error:
        err = rte_errno; /* Save rte_errno before cleanup. */
@@ -207,8 +218,12 @@ error:
                        rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
                (*rxq_ctrl->rxq.elts)[i] = NULL;
        }
-       DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
-               PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
+       if (rxq_ctrl->share_group == 0)
+               DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
+                       RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx);
+       else
+               DRV_LOG(DEBUG, "share group %u SPRQ queue %u failed, freed everything",
+                       rxq_ctrl->share_group, rxq_ctrl->share_qid);
        rte_errno = err; /* Restore rte_errno. */
        return -rte_errno;
 }
@@ -284,8 +299,12 @@ rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
        uint16_t used = q_n - (elts_ci - rxq->rq_pi);
        uint16_t i;
 
-       DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
-               PORT_ID(rxq_ctrl->priv), rxq->idx, q_n);
+       if (rxq_ctrl->share_group == 0)
+               DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
+                       RXQ_PORT_ID(rxq_ctrl), rxq->idx, q_n);
+       else
+               DRV_LOG(DEBUG, "share group %u Rx queue %u freeing %d WRs",
+                       rxq_ctrl->share_group, rxq_ctrl->share_qid, q_n);
        if (rxq->elts == NULL)
                return;
        /**
@@ -1630,7 +1649,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
                (!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
        tmpl->rxq.port_id = dev->data->port_id;
        tmpl->sh = priv->sh;
-       tmpl->priv = priv;
        tmpl->rxq.mp = rx_seg[0].mp;
        tmpl->rxq.elts_n = log2above(desc);
        tmpl->rxq.rq_repl_thresh =
@@ -1690,7 +1708,6 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
        tmpl->rxq.rss_hash = 0;
        tmpl->rxq.port_id = dev->data->port_id;
        tmpl->sh = priv->sh;
-       tmpl->priv = priv;
        tmpl->rxq.mp = NULL;
        tmpl->rxq.elts_n = log2above(desc);
        tmpl->rxq.elts = NULL;
index ecd273e..5116818 100644 (file)
@@ -550,7 +550,7 @@ mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq)
        struct mlx5_rxq_ctrl *ctrl =
                container_of(rxq, struct mlx5_rxq_ctrl, rxq);
 
-       if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)
+       if (!RXQ_PORT(ctrl)->config.rx_vec_en || rxq->sges_n != 0)
                return -ENOTSUP;
        if (rxq->lro)
                return -ENOTSUP;
index a124f74..caafdf2 100644 (file)
@@ -131,9 +131,11 @@ mlx5_rxq_mempool_register_cb(struct rte_mempool *mp, void *opaque,
  *   0 on success, (-1) on failure and rte_errno is set.
  */
 static int
-mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
+mlx5_rxq_mempool_register(struct rte_eth_dev *dev,
+                         struct mlx5_rxq_ctrl *rxq_ctrl)
 {
-       struct mlx5_priv *priv = rxq_ctrl->priv;
+       struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_dev_ctx_shared *sh = rxq_ctrl->sh;
        struct rte_mempool *mp;
        uint32_t s;
        int ret = 0;
@@ -148,9 +150,8 @@ mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
        }
        for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
                mp = rxq_ctrl->rxq.rxseg[s].mp;
-               ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache,
-                                              priv->sh->cdev->pd, mp,
-                                              &priv->mp_id);
+               ret = mlx5_mr_mempool_register(&sh->cdev->mr_scache,
+                                              sh->cdev->pd, mp, &priv->mp_id);
                if (ret < 0 && rte_errno != EEXIST)
                        return ret;
                rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb,
@@ -213,7 +214,7 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
                         * the implicit registration is enabled or not,
                         * Rx mempool destruction is tracked to free MRs.
                         */
-                       if (mlx5_rxq_mempool_register(rxq_ctrl) < 0)
+                       if (mlx5_rxq_mempool_register(dev, rxq_ctrl) < 0)
                                goto error;
                        ret = rxq_alloc_elts(rxq_ctrl);
                        if (ret)