return n == priv->rxqs_n;
}
-/**
- * Check whether LRO is supported and enabled for the device.
- *
- * @param dev
- * Pointer to Ethernet device.
- *
- * @return
- * 0 if disabled, 1 if enabled.
- */
-inline int
-mlx5_lro_on(struct rte_eth_dev *dev)
-{
- return (MLX5_LRO_SUPPORTED(dev) && MLX5_LRO_ENABLED(dev));
-}
-
/**
* Allocate RX queue elements for Multi-Packet RQ.
*
DEV_RX_OFFLOAD_TCP_CKSUM);
if (config->hw_vlan_strip)
offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ if (MLX5_LRO_SUPPORTED(dev))
+ offloads |= DEV_RX_OFFLOAD_TCP_LRO;
return offloads;
}
/**
* Returns the per-port supported offloads.
*
- * @param dev
- * Pointer to Ethernet device.
- *
* @return
* Supported Rx offloads.
*/
uint64_t
-mlx5_get_rx_port_offloads(struct rte_eth_dev *dev)
+mlx5_get_rx_port_offloads(void)
{
uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
- if (MLX5_LRO_SUPPORTED(dev))
- offloads |= DEV_RX_OFFLOAD_TCP_LRO;
return offloads;
}
cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
.comp_mask = 0,
};
- if (priv->config.cqe_comp && !rxq_data->hw_timestamp) {
+ if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
+ !rxq_data->lro) {
cq_attr.mlx5.comp_mask |=
MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
"port %u Rx CQE compression is disabled for HW"
" timestamp",
dev->data->port_id);
+ } else if (priv->config.cqe_comp && rxq_data->lro) {
+ DRV_LOG(DEBUG,
+ "port %u Rx CQE compression is disabled for LRO",
+ dev->data->port_id);
}
#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
if (priv->config.cqe_pad) {
#define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
sizeof(struct rte_vlan_hdr) * 2 + \
sizeof(struct rte_ipv6_hdr)))
+#define MAX_TCP_OPTION_SIZE 40u
+#define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
+ sizeof(struct rte_tcp_hdr) + \
+ MAX_TCP_OPTION_SIZE))
+
/**
* Adjust the maximum LRO massage size.
*
desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
uint64_t offloads = conf->offloads |
dev->data->dev_conf.rxmode.offloads;
+ unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
const int mprq_en = mlx5_check_mprq_support(dev) > 0;
unsigned int max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
RTE_PKTMBUF_HEADROOM;
unsigned int max_lro_size = 0;
+ unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
if (non_scatter_min_mbuf_size > mb_len && !(offloads &
DEV_RX_OFFLOAD_SCATTER)) {
* In this case scatter is, for sure, enabled and an empty mbuf may be
* added in the start for the head-room.
*/
- if (mlx5_lro_on(dev) && RTE_PKTMBUF_HEADROOM > 0 &&
+ if (lro_on_queue && RTE_PKTMBUF_HEADROOM > 0 &&
non_scatter_min_mbuf_size > mb_len) {
strd_headroom_en = 0;
mprq_stride_size = RTE_MIN(max_rx_pkt_len,
config->mprq.min_stride_size_n);
tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
tmpl->rxq.strd_headroom_en = strd_headroom_en;
- tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(mb_len -
- RTE_PKTMBUF_HEADROOM, config->mprq.max_memcpy_len);
+ tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
+ config->mprq.max_memcpy_len);
max_lro_size = RTE_MIN(max_rx_pkt_len,
(1u << tmpl->rxq.strd_num_n) *
(1u << tmpl->rxq.strd_sz_n));
" strd_num_n = %u, strd_sz_n = %u",
dev->data->port_id, idx,
tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
- } else if (max_rx_pkt_len <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
+ } else if (max_rx_pkt_len <= first_mb_free_size) {
tmpl->rxq.sges_n = 0;
max_lro_size = max_rx_pkt_len;
} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
unsigned int size = non_scatter_min_mbuf_size;
unsigned int sges_n;
+ if (lro_on_queue && first_mb_free_size <
+ MLX5_MAX_LRO_HEADER_FIX) {
+ DRV_LOG(ERR, "Not enough space in the first segment(%u)"
+ " to include the max header size(%u) for LRO",
+ first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
+ rte_errno = ENOTSUP;
+ goto error;
+ }
/*
* Determine the number of SGEs needed for a full packet
* and round it to the next power of two.
tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
/* By default, FCS (CRC) is stripped by hardware. */
tmpl->rxq.crc_present = 0;
+ tmpl->rxq.lro = lro_on_queue;
if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
if (config->hw_fcs_strip) {
/*
* RQs used for LRO-enabled TIRs should not be
* configured to scatter the FCS.
*/
- if (mlx5_lro_on(dev))
+ if (lro_on_queue)
DRV_LOG(WARNING,
"port %u CRC stripping has been "
"disabled but will still be performed "
* Number of queues.
* @param tunnel
* Tunnel type.
- * @param lro
- * Flow rule is relevant for LRO, i.e. contains IPv4/IPv6 and TCP.
*
* @return
* The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,
const uint16_t *queues, uint32_t queues_n,
- int tunnel __rte_unused, int lro)
+ int tunnel __rte_unused)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
}
} else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
struct mlx5_devx_tir_attr tir_attr;
-
+ uint32_t i;
+ uint32_t lro = 1;
+
+ /* Enable TIR LRO only if all the queues were configured for. */
+ for (i = 0; i < queues_n; ++i) {
+ if (!(*priv->rxqs)[queues[i]]->lro) {
+ lro = 0;
+ break;
+ }
+ }
memset(&tir_attr, 0, sizeof(tir_attr));
tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
tir_attr.lro_timeout_period_usecs =
priv->config.lro.timeout;
tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
- tir_attr.lro_enable_mask = lro;
+ tir_attr.lro_enable_mask =
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
}
tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
if (!tir) {