]> git.droids-corp.org - dpdk.git/commitdiff
net/mlx5: handle MPRQ incompatibility with external buffers
authorAlexander Kozyrev <akozyrev@nvidia.com>
Fri, 11 Mar 2022 23:08:25 +0000 (01:08 +0200)
committerRaslan Darawsheh <rasland@nvidia.com>
Thu, 21 Apr 2022 10:47:18 +0000 (12:47 +0200)
Multi-Packet Rx queue uses PMD-managed buffers to store packets.
These buffers are externally attached to user mbufs.
This conflicts with the feature that allows using user-managed
externally attached buffers in an application.
Fall back to SPRQ in case external buffers mempool is configured.
The limitation is already documented in mlx5 guide.

Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
drivers/net/mlx5/mlx5_rx.h
drivers/net/mlx5/mlx5_rxq.c

index acebe3348ce3f4aceddbc0f86c45e561deaff199..5bf88b61811791dcfe5878cc54bd018d7d4a0641 100644 (file)
@@ -209,7 +209,7 @@ struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
                                   uint16_t desc, unsigned int socket,
                                   const struct rte_eth_rxconf *conf,
                                   const struct rte_eth_rxseg_split *rx_seg,
-                                  uint16_t n_seg);
+                                  uint16_t n_seg, bool is_extmem);
 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
        (struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, uint16_t desc,
         const struct rte_eth_hairpin_conf *hairpin_conf);
index f16795bac373d798f502a02fa5b0fa7bcedabb01..925544ae3dd29e3ec29cbf6bf8499cf04841dba0 100644 (file)
@@ -840,6 +840,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        int res;
        uint64_t offloads = conf->offloads |
                            dev->data->dev_conf.rxmode.offloads;
+       bool is_extmem = false;
 
        if (mp) {
                /*
@@ -849,6 +850,8 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                 */
                rx_seg = &rx_single;
                n_seg = 1;
+               is_extmem = rte_pktmbuf_priv_flags(mp) &
+                           RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF;
        }
        if (n_seg > 1) {
                /* The offloads should be checked on rte_eth_dev layer. */
@@ -912,7 +915,7 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        }
        if (rxq_ctrl == NULL) {
                rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg,
-                                       n_seg);
+                                       n_seg, is_extmem);
                if (rxq_ctrl == NULL) {
                        DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
                                dev->data->port_id, idx);
@@ -1548,7 +1551,8 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
  *   Log number of strides to configure for this queue.
  * @param actual_log_stride_size
  *   Log stride size to configure for this queue.
- *
+ * @param is_extmem
+ *   Is external pinned memory pool used.
  * @return
  *   0 if Multi-Packet RQ is supported, otherwise -1.
  */
@@ -1556,7 +1560,8 @@ static int
 mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                  bool rx_seg_en, uint32_t min_mbuf_size,
                  uint32_t *actual_log_stride_num,
-                 uint32_t *actual_log_stride_size)
+                 uint32_t *actual_log_stride_size,
+                 bool is_extmem)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_port_config *config = &priv->config;
@@ -1575,7 +1580,7 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                                log_max_stride_size);
        uint32_t log_stride_wqe_size;
 
-       if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en)
+       if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en || is_extmem)
                goto unsupport;
        /* Checks if chosen number of strides is in supported range. */
        if (config->mprq.log_stride_num > log_max_stride_num ||
@@ -1641,7 +1646,7 @@ unsupport:
                        " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
                        "  supported: min_rxqs_num = %u, min_buf_wqe_sz = %u"
                        " min_stride_sz = %u, max_stride_sz = %u).\n"
-                       "Rx segment is %senable.",
+                       "Rx segment is %senabled. External mempool is %sused.",
                        dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n,
                        RTE_BIT32(config->mprq.log_stride_size),
                        RTE_BIT32(config->mprq.log_stride_num),
@@ -1649,7 +1654,7 @@ unsupport:
                        RTE_BIT32(dev_cap->mprq.log_min_stride_wqe_size),
                        RTE_BIT32(dev_cap->mprq.log_min_stride_size),
                        RTE_BIT32(dev_cap->mprq.log_max_stride_size),
-                       rx_seg_en ? "" : "not ");
+                       rx_seg_en ? "" : "not ", is_extmem ? "" : "not ");
        return -1;
 }
 
@@ -1671,7 +1676,8 @@ unsupport:
 struct mlx5_rxq_ctrl *
 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
             unsigned int socket, const struct rte_eth_rxconf *conf,
-            const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
+            const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg,
+            bool is_extmem)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rxq_ctrl *tmpl;
@@ -1694,7 +1700,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        const int mprq_en = !mlx5_mprq_prepare(dev, idx, desc, rx_seg_en,
                                               non_scatter_min_mbuf_size,
                                               &mprq_log_actual_stride_num,
-                                              &mprq_log_actual_stride_size);
+                                              &mprq_log_actual_stride_size,
+                                              is_extmem);
        /*
         * Always allocate extra slots, even if eventually
         * the vector Rx will not be used.