From bd41389e35ee3ed29cdee851efc8433f151e5928 Mon Sep 17 00:00:00 2001 From: Matan Azrad Date: Mon, 29 Jul 2019 11:53:25 +0000 Subject: [PATCH] net/mlx5: allow LRO in regular Rx queue LRO support was only for MPRQ, hence mprq Rx burst was selected when LRO was configured in the port. The current support for MPRQ is suffering from bad memory utilization since an external mempool is allocated by the PMD for the packets data in addition to the user mempool, besides that, the user may get packet data addresses which were not configured by him. Even though MPRQ has the best performance for packet receiving in the most cases and because of the above facts it is better to remove the automatic MPRQ select when LRO is configured. Move MPRQ to be selected only when the user force it by the PMD arguments including LRO case. Allow LRO offload using the regular RQ with the regular Rx burst function. Signed-off-by: Matan Azrad Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5.c | 4 +--- drivers/net/mlx5/mlx5_ethdev.c | 6 ------ drivers/net/mlx5/mlx5_prm.h | 3 +++ drivers/net/mlx5/mlx5_rxq.c | 27 ++++++++++++++------------- drivers/net/mlx5/mlx5_rxtx.h | 4 ++-- drivers/net/mlx5/mlx5_rxtx_vec.c | 2 ++ 6 files changed, 22 insertions(+), 24 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index ad0883dee9..a490bf2632 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1856,7 +1856,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, if (priv->counter_fallback) DRV_LOG(INFO, "Use fall-back DV counter management\n"); /* Check for LRO support. */ - if (config.dest_tir && mprq && config.hca_attr.lro_cap) { + if (config.dest_tir && config.hca_attr.lro_cap) { /* TBD check tunnel lro caps. */ config.lro.supported = config.hca_attr.lro_cap; DRV_LOG(DEBUG, "Device supports LRO"); @@ -1869,8 +1869,6 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev, config.hca_attr.lro_timer_supported_periods[0]; DRV_LOG(DEBUG, "LRO session timeout set to %d usec", config.lro.timeout); - config.mprq.enabled = 1; - DRV_LOG(DEBUG, "Enable MPRQ for LRO use"); } } if (config.mprq.enabled && mprq) { diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index e627909abe..9d11831181 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -432,12 +432,6 @@ mlx5_dev_configure(struct rte_eth_dev *dev) DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u", dev->data->port_id, priv->rxqs_n, rxqs_n); priv->rxqs_n = rxqs_n; - /* - * WHen using LRO, MPRQ is implicitly enabled. - * Adjust threshold value to ensure MPRQ can be enabled. - */ - if (lro_on && priv->config.mprq.min_rxqs_num > priv->rxqs_n) - priv->config.mprq.min_rxqs_num = priv->rxqs_n; /* * If the requested number of RX queues is not a power of two, * use the maximum indirection table size for better balancing. diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index 0716bbd808..6ea634541f 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -237,6 +237,9 @@ /* Amount of data bytes after eth data segment. */ #define MLX5_ESEG_EXTRA_DATA_SIZE 32u +/* The maximum log value of segments per RQ WQE. */ +#define MLX5_MAX_LOG_RQ_SEGS 5u + /* Completion mode. */ enum mlx5_completion_mode { MLX5_COMP_ONLY_ERR = 0x0, diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 5e541566b3..ad5b0a95a3 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -93,7 +93,6 @@ mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq) /** * Check whether Multi-Packet RQ is enabled for the device. - * MPRQ can be enabled explicitly, or implicitly by enabling LRO. * * @param dev * Pointer to Ethernet device. @@ -1607,6 +1606,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len + RTE_PKTMBUF_HEADROOM; + unsigned int max_lro_size = 0; if (non_scatter_min_mbuf_size > mb_len && !(offloads & DEV_RX_OFFLOAD_SCATTER)) { @@ -1672,8 +1672,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.strd_headroom_en = strd_headroom_en; tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM, config->mprq.max_memcpy_len); - mlx5_max_lro_msg_size_adjust(dev, RTE_MIN(max_rx_pkt_len, - (1u << tmpl->rxq.strd_num_n) * (1u << tmpl->rxq.strd_sz_n))); + max_lro_size = RTE_MIN(max_rx_pkt_len, + (1u << tmpl->rxq.strd_num_n) * + (1u << tmpl->rxq.strd_sz_n)); DRV_LOG(DEBUG, "port %u Rx queue %u: Multi-Packet RQ is enabled" " strd_num_n = %u, strd_sz_n = %u", @@ -1681,6 +1682,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n); } else if (max_rx_pkt_len <= (mb_len - RTE_PKTMBUF_HEADROOM)) { tmpl->rxq.sges_n = 0; + max_lro_size = max_rx_pkt_len; } else if (offloads & DEV_RX_OFFLOAD_SCATTER) { unsigned int size = non_scatter_min_mbuf_size; unsigned int sges_n; @@ -1690,20 +1692,18 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, * and round it to the next power of two. */ sges_n = log2above((size / mb_len) + !!(size % mb_len)); - tmpl->rxq.sges_n = sges_n; - /* Make sure rxq.sges_n did not overflow. */ - size = mb_len * (1 << tmpl->rxq.sges_n); - size -= RTE_PKTMBUF_HEADROOM; - if (size < max_rx_pkt_len) { + if (sges_n > MLX5_MAX_LOG_RQ_SEGS) { DRV_LOG(ERR, "port %u too many SGEs (%u) needed to handle" - " requested maximum packet size %u", - dev->data->port_id, - 1 << sges_n, - max_rx_pkt_len); - rte_errno = EOVERFLOW; + " requested maximum packet size %u, the maximum" + " supported are %u", dev->data->port_id, + 1 << sges_n, max_rx_pkt_len, + 1u << MLX5_MAX_LOG_RQ_SEGS); + rte_errno = ENOTSUP; goto error; } + tmpl->rxq.sges_n = sges_n; + max_lro_size = max_rx_pkt_len; } if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq)) DRV_LOG(WARNING, @@ -1725,6 +1725,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, rte_errno = EINVAL; goto error; } + mlx5_max_lro_msg_size_adjust(dev, max_lro_size); /* Toggle RX checksum offload if hardware supports it. */ tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM); tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP); diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 60d871c39a..5704d0a418 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -105,7 +105,7 @@ struct mlx5_rxq_data { unsigned int hw_timestamp:1; /* Enable HW timestamp. */ unsigned int vlan_strip:1; /* Enable VLAN stripping. */ unsigned int crc_present:1; /* CRC must be subtracted. */ - unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */ + unsigned int sges_n:3; /* Log 2 of SGEs (max buffers per packet). */ unsigned int cqe_n:4; /* Log 2 of CQ elements. */ unsigned int elts_n:4; /* Log 2 of Mbufs. */ unsigned int rss_hash:1; /* RSS hash result is enabled. */ @@ -115,7 +115,7 @@ struct mlx5_rxq_data { unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */ unsigned int err_state:2; /* enum mlx5_rxq_err_state. */ unsigned int strd_headroom_en:1; /* Enable mbuf headroom in MPRQ. */ - unsigned int :3; /* Remaining bits. */ + unsigned int :2; /* Remaining bits. */ volatile uint32_t *rq_db; volatile uint32_t *cq_db; uint16_t port_id; diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c index f6ec8289e6..3815ff616a 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec.c @@ -151,6 +151,8 @@ mlx5_check_vec_rx_support(struct rte_eth_dev *dev) return -ENOTSUP; if (mlx5_mprq_enabled(dev)) return -ENOTSUP; + if (mlx5_lro_on(dev)) + return -ENOTSUP; /* All the configured queues should support. */ for (i = 0; i < priv->rxqs_n; ++i) { struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; -- 2.20.1