net/mlx5: fix Rx queue completion index consistency
authorViacheslav Ovsiienko <viacheslavo@nvidia.com>
Fri, 6 Nov 2020 17:16:10 +0000 (17:16 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 13 Nov 2020 18:43:25 +0000 (19:43 +0100)
The Rx queue completion consumer index got temporary
wrong value pointing to the midst of the compressed CQE
session. If application crashed at the moment the next
queue restart caused handling wrong CQEs pointed by index
and losing consuming index synchronization, that made
reliable queue restart impossible.

Fixes: 88c0733535d6 ("net/mlx5: extend Rx completion with error handling")
Cc: stable@dpdk.org
Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_rxtx.c

index c04746a..f299fbc 100644 (file)
@@ -622,7 +622,7 @@ mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
        rte_io_wmb();
        *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
        rte_io_wmb();
-       /* Reset RQ consumer before moving queue ro READY state. */
+       /* Reset RQ consumer before moving queue to READY state. */
        *rxq->rq_db = rte_cpu_to_be_32(0);
        rte_io_wmb();
        ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY);
index 402e7d1..a5829f0 100644 (file)
@@ -1181,6 +1181,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
                } else {
                        int ret;
                        int8_t op_own;
+                       uint32_t cq_ci;
 
                        ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
                        if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
@@ -1194,14 +1195,19 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
                                        return 0;
                                }
                        }
-                       ++rxq->cq_ci;
+                       /*
+                        * Introduce the local variable to have queue cq_ci
+                        * index in queue structure always consistent with
+                        * actual CQE boundary (not pointing to the middle
+                        * of compressed CQE session).
+                        */
+                       cq_ci = rxq->cq_ci + 1;
                        op_own = cqe->op_own;
                        if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
                                volatile struct mlx5_mini_cqe8 (*mc)[8] =
                                        (volatile struct mlx5_mini_cqe8 (*)[8])
                                        (uintptr_t)(&(*rxq->cqes)
-                                               [rxq->cq_ci &
-                                                cqe_cnt].pkt_info);
+                                               [cq_ci & cqe_cnt].pkt_info);
 
                                /* Fix endianness. */
                                zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
@@ -1214,10 +1220,9 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
                                 * 7 CQEs after the initial CQE instead of 8
                                 * for subsequent ones.
                                 */
-                               zip->ca = rxq->cq_ci;
+                               zip->ca = cq_ci;
                                zip->na = zip->ca + 7;
                                /* Compute the next non compressed CQE. */
-                               --rxq->cq_ci;
                                zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
                                /* Get packet size to return. */
                                len = rte_be_to_cpu_32((*mc)[0].byte_cnt &
@@ -1233,6 +1238,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
                                        ++idx;
                                }
                        } else {
+                               rxq->cq_ci = cq_ci;
                                len = rte_be_to_cpu_32(cqe->byte_cnt);
                        }
                }