]> git.droids-corp.org - dpdk.git/commitdiff
net/mlx5: support Rx buffer split description
authorViacheslav Ovsiienko <viacheslavo@nvidia.com>
Mon, 26 Oct 2020 11:55:00 +0000 (11:55 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 3 Nov 2020 22:35:02 +0000 (23:35 +0100)
The routine to provide Rx queue setup with specifying
extended receiving buffer description is added.
It allows application to specify desired segment
lengths, data position offsets in the buffer
and dedicated memory pool for each segment.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_rxtx.h

index 72dac7119f53f1fa05c23363e7f4fd1304394aeb..0149338c8fb64d5bb5cb1623ee479142db5371d6 100644 (file)
@@ -164,6 +164,9 @@ struct mlx5_stats_ctrl {
 /* Maximal size of aggregated LRO packet. */
 #define MLX5_MAX_LRO_SIZE (UINT8_MAX * MLX5_LRO_SEG_CHUNK_SIZE)
 
+/* Maximal number of segments to split. */
+#define MLX5_MAX_RXQ_NSEG (1u << MLX5_MAX_LOG_RQ_SEGS)
+
 /* LRO configurations structure. */
 struct mlx5_lro_config {
        uint32_t supported:1; /* Whether LRO is supported. */
index fc7d097191356e898f1165aeca0118c5cd0c2997..747abc96fafa420dd930bb1375032ce4b1987336 100644 (file)
@@ -744,12 +744,40 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
        struct mlx5_rxq_ctrl *rxq_ctrl =
                container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+       struct rte_eth_rxseg_split *rx_seg =
+                               (struct rte_eth_rxseg_split *)conf->rx_seg;
+       struct rte_eth_rxseg_split rx_single = {.mp = mp};
+       uint16_t n_seg = conf->rx_nseg;
        int res;
 
+       if (mp) {
+               /*
+                * The parameters should be checked on rte_eth_dev layer.
+                * If mp is specified it means the compatible configuration
+                * without buffer split feature tuning.
+                */
+               rx_seg = &rx_single;
+               n_seg = 1;
+       }
+       if (n_seg > 1) {
+               uint64_t offloads = conf->offloads |
+                                   dev->data->dev_conf.rxmode.offloads;
+
+               /* The offloads should be checked on rte_eth_dev layer. */
+               MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
+               if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
+                       DRV_LOG(ERR, "port %u queue index %u split "
+                                    "offload not configured",
+                                    dev->data->port_id, idx);
+                       rte_errno = ENOSPC;
+                       return -rte_errno;
+               }
+               MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
+       }
        res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
        if (res)
                return res;
-       rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
+       rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);
        if (!rxq_ctrl) {
                DRV_LOG(ERR, "port %u unable to allocate queue index %u",
                        dev->data->port_id, idx);
@@ -1342,11 +1370,11 @@ mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx,
 struct mlx5_rxq_ctrl *
 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
             unsigned int socket, const struct rte_eth_rxconf *conf,
-            struct rte_mempool *mp)
+            const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_rxq_ctrl *tmpl;
-       unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
+       unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
        struct mlx5_dev_config *config = &priv->config;
        uint64_t offloads = conf->offloads |
                           dev->data->dev_conf.rxmode.offloads;
@@ -1358,7 +1386,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                                                        RTE_PKTMBUF_HEADROOM;
        unsigned int max_lro_size = 0;
        unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
-       const int mprq_en = mlx5_check_mprq_support(dev) > 0;
+       const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 &&
+                           !rx_seg[0].offset && !rx_seg[0].length;
        unsigned int mprq_stride_nums = config->mprq.stride_num_n ?
                config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
        unsigned int mprq_stride_size = non_scatter_min_mbuf_size <=
@@ -1552,7 +1581,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
        tmpl->rxq.port_id = dev->data->port_id;
        tmpl->priv = priv;
-       tmpl->rxq.mp = mp;
+       tmpl->rxq.mp = rx_seg[0].mp;
        tmpl->rxq.elts_n = log2above(desc);
        tmpl->rxq.rq_repl_thresh =
                MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
index 2ca0a4797988a3e66c1d95f651cdc6e8fc94200c..9618ad0bf0652c3cc8c5a54fd8bac986e1ab87ea 100644 (file)
@@ -101,6 +101,13 @@ enum mlx5_rqx_code {
        MLX5_RXQ_CODE_DROPPED,
 };
 
+struct mlx5_eth_rxseg {
+       struct rte_mempool *mp; /**< Memory pool to allocate segment from. */
+       uint16_t length; /**< Segment data length, configures split point. */
+       uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */
+       uint32_t reserved; /**< Reserved field. */
+};
+
 /* RX queue descriptor. */
 struct mlx5_rxq_data {
        unsigned int csum:1; /* Enable checksum offloading. */
@@ -160,6 +167,9 @@ struct mlx5_rxq_data {
        uint64_t timestamp_rx_flag; /* Dynamic mbuf flag for timestamp. */
        uint64_t flow_meta_mask;
        int32_t flow_meta_offset;
+       uint32_t rxseg_n; /* Number of split segment descriptions. */
+       struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG];
+       /* Buffer split segment descriptions - sizes, offsets, pools. */
 } __rte_cache_aligned;
 
 enum mlx5_rxq_type {
@@ -323,7 +333,8 @@ int mlx5_rxq_obj_verify(struct rte_eth_dev *dev);
 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
                                   uint16_t desc, unsigned int socket,
                                   const struct rte_eth_rxconf *conf,
-                                  struct rte_mempool *mp);
+                                  const struct rte_eth_rxseg_split *rx_seg,
+                                  uint16_t n_seg);
 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
        (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
         const struct rte_eth_hairpin_conf *hairpin_conf);