net/mlx5: check Tx queue size overflow
authorYongseok Koh <yskoh@mellanox.com>
Wed, 1 May 2019 01:01:43 +0000 (18:01 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 3 May 2019 16:45:23 +0000 (18:45 +0200)
If Tx packet inlining is enabled, rdma-core library should allocate large
Tx WQ enough to support it. It is better for PMD to calculate the size of
WQ based on the parameters and return error with appropriate message if it
exceeds the device capability.

Cc: stable@dpdk.org
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
Acked-by: Shahaf Shuler <shahafs@mellanox.com>
drivers/net/mlx5/mlx5_txq.c

index 4d55fd4..b281c45 100644 (file)
@@ -678,6 +678,27 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
        return ret;
 }
 
+/**
+ * Calcuate the total number of WQEBB for Tx queue.
+ *
+ * Simplified version of calc_sq_size() in rdma-core.
+ *
+ * @param txq_ctrl
+ *   Pointer to Tx queue control structure.
+ *
+ * @return
+ *   The number of WQEBB.
+ */
+static int
+txq_calc_wqebb_cnt(struct mlx5_txq_ctrl *txq_ctrl)
+{
+       unsigned int wqe_size;
+       const unsigned int desc = 1 << txq_ctrl->txq.elts_n;
+
+       wqe_size = MLX5_WQE_SIZE + txq_ctrl->max_inline_data;
+       return rte_align32pow2(wqe_size * desc) / MLX5_WQE_SIZE;
+}
+
 /**
  * Set Tx queue parameters from device configuration.
  *
@@ -824,10 +845,16 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        tmpl->txq.port_id = dev->data->port_id;
        tmpl->txq.idx = idx;
        txq_set_params(tmpl);
-       DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
-               dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
-       DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
-               dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
+       if (txq_calc_wqebb_cnt(tmpl) >
+           priv->sh->device_attr.orig_attr.max_qp_wr) {
+               DRV_LOG(ERR,
+                       "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
+                       " try smaller queue size",
+                       dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
+                       priv->sh->device_attr.orig_attr.max_qp_wr);
+               rte_errno = ENOMEM;
+               goto error;
+       }
        tmpl->txq.elts =
                (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
        rte_atomic32_inc(&tmpl->refcnt);