net/mlx5: move Rx queue hairpin info to private data
authorXueming Li <xuemingl@nvidia.com>
Thu, 4 Nov 2021 12:33:15 +0000 (20:33 +0800)
committerRaslan Darawsheh <rasland@nvidia.com>
Thu, 4 Nov 2021 21:55:47 +0000 (22:55 +0100)
Hairpin info of Rx queue can't be shared, moves to private queue data.

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
drivers/net/mlx5/mlx5_rx.h
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_trigger.c

index eccfbf1..b219182 100644 (file)
@@ -162,8 +162,6 @@ struct mlx5_rxq_ctrl {
        uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
        uint32_t wqn; /* WQ number. */
        uint16_t dump_file_n; /* Number of dump files. */
-       struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
-       uint32_t hairpin_status; /* Hairpin binding status. */
 };
 
 /* RX queue private data. */
@@ -173,6 +171,8 @@ struct mlx5_rxq_priv {
        struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
        LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
        struct mlx5_priv *priv; /* Back pointer to private data. */
+       struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
+       uint32_t hairpin_status; /* Hairpin binding status. */
 };
 
 /* mlx5_rxq.c */
index 8071ddb..7b637fd 100644 (file)
@@ -1695,8 +1695,8 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
        tmpl->rxq.elts_n = log2above(desc);
        tmpl->rxq.elts = NULL;
        tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
-       tmpl->hairpin_conf = *hairpin_conf;
        tmpl->rxq.idx = idx;
+       rxq->hairpin_conf = *hairpin_conf;
        mlx5_rxq_ref(dev, idx);
        LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
        return tmpl;
@@ -1913,14 +1913,11 @@ const struct rte_eth_hairpin_conf *
 mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
+       struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
 
-       if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
-               rxq_ctrl = container_of((*priv->rxqs)[idx],
-                                       struct mlx5_rxq_ctrl,
-                                       rxq);
-               if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
-                       return &rxq_ctrl->hairpin_conf;
+       if (idx < priv->rxqs_n && rxq != NULL) {
+               if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
+                       return &rxq->hairpin_conf;
        }
        return NULL;
 }
index e5d74d2..a124f74 100644 (file)
@@ -324,7 +324,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
                }
                rxq_ctrl = rxq->ctrl;
                if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
-                   rxq_ctrl->hairpin_conf.peers[0].queue != i) {
+                   rxq->hairpin_conf.peers[0].queue != i) {
                        rte_errno = ENOMEM;
                        DRV_LOG(ERR, "port %u Tx queue %d can't be binded to "
                                "Rx queue %d", dev->data->port_id,
@@ -354,7 +354,7 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
                if (ret)
                        goto error;
                /* Qs with auto-bind will be destroyed directly. */
-               rxq_ctrl->hairpin_status = 1;
+               rxq->hairpin_status = 1;
                txq_ctrl->hairpin_status = 1;
                mlx5_txq_release(dev, i);
        }
@@ -457,9 +457,9 @@ mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
                }
                peer_info->qp_id = rxq_ctrl->obj->rq->id;
                peer_info->vhca_id = priv->config.hca_attr.vhca_id;
-               peer_info->peer_q = rxq_ctrl->hairpin_conf.peers[0].queue;
-               peer_info->tx_explicit = rxq_ctrl->hairpin_conf.tx_explicit;
-               peer_info->manual_bind = rxq_ctrl->hairpin_conf.manual_bind;
+               peer_info->peer_q = rxq->hairpin_conf.peers[0].queue;
+               peer_info->tx_explicit = rxq->hairpin_conf.tx_explicit;
+               peer_info->manual_bind = rxq->hairpin_conf.manual_bind;
        }
        return 0;
 }
@@ -581,20 +581,20 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
                                dev->data->port_id, cur_queue);
                        return -rte_errno;
                }
-               if (rxq_ctrl->hairpin_status != 0) {
+               if (rxq->hairpin_status != 0) {
                        DRV_LOG(DEBUG, "port %u Rx queue %d is already bound",
                                dev->data->port_id, cur_queue);
                        return 0;
                }
                if (peer_info->tx_explicit !=
-                   rxq_ctrl->hairpin_conf.tx_explicit) {
+                   rxq->hairpin_conf.tx_explicit) {
                        rte_errno = EINVAL;
                        DRV_LOG(ERR, "port %u Rx queue %d and peer Tx rule mode"
                                " mismatch", dev->data->port_id, cur_queue);
                        return -rte_errno;
                }
                if (peer_info->manual_bind !=
-                   rxq_ctrl->hairpin_conf.manual_bind) {
+                   rxq->hairpin_conf.manual_bind) {
                        rte_errno = EINVAL;
                        DRV_LOG(ERR, "port %u Rx queue %d and peer binding mode"
                                " mismatch", dev->data->port_id, cur_queue);
@@ -606,7 +606,7 @@ mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
                rq_attr.hairpin_peer_vhca = peer_info->vhca_id;
                ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
                if (ret == 0)
-                       rxq_ctrl->hairpin_status = 1;
+                       rxq->hairpin_status = 1;
        }
        return ret;
 }
@@ -688,7 +688,7 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
                                dev->data->port_id, cur_queue);
                        return -rte_errno;
                }
-               if (rxq_ctrl->hairpin_status == 0) {
+               if (rxq->hairpin_status == 0) {
                        DRV_LOG(DEBUG, "port %u Rx queue %d is already unbound",
                                dev->data->port_id, cur_queue);
                        return 0;
@@ -703,7 +703,7 @@ mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
                rq_attr.rq_state = MLX5_SQC_STATE_RST;
                ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
                if (ret == 0)
-                       rxq_ctrl->hairpin_status = 0;
+                       rxq->hairpin_status = 0;
        }
        return ret;
 }
@@ -1041,7 +1041,7 @@ mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
                        rxq_ctrl = rxq->ctrl;
                        if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN)
                                continue;
-                       pp = rxq_ctrl->hairpin_conf.peers[0].port;
+                       pp = rxq->hairpin_conf.peers[0].port;
                        if (pp >= RTE_MAX_ETHPORTS) {
                                rte_errno = ERANGE;
                                DRV_LOG(ERR, "port %hu queue %u peer port "