#define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
sizeof(struct rte_vlan_hdr) * 2 + \
sizeof(struct rte_ipv6_hdr)))
+#define MAX_TCP_OPTION_SIZE 40u
+#define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
+ sizeof(struct rte_tcp_hdr) + \
+ MAX_TCP_OPTION_SIZE))
+
/**
* Adjust the maximum LRO massage size.
*
unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
RTE_PKTMBUF_HEADROOM;
unsigned int max_lro_size = 0;
+ unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
if (non_scatter_min_mbuf_size > mb_len && !(offloads &
DEV_RX_OFFLOAD_SCATTER)) {
config->mprq.min_stride_size_n);
tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
tmpl->rxq.strd_headroom_en = strd_headroom_en;
- tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(mb_len -
- RTE_PKTMBUF_HEADROOM, config->mprq.max_memcpy_len);
+ tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
+ config->mprq.max_memcpy_len);
max_lro_size = RTE_MIN(max_rx_pkt_len,
(1u << tmpl->rxq.strd_num_n) *
(1u << tmpl->rxq.strd_sz_n));
" strd_num_n = %u, strd_sz_n = %u",
dev->data->port_id, idx,
tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
- } else if (max_rx_pkt_len <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
+ } else if (max_rx_pkt_len <= first_mb_free_size) {
tmpl->rxq.sges_n = 0;
max_lro_size = max_rx_pkt_len;
} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
unsigned int size = non_scatter_min_mbuf_size;
unsigned int sges_n;
+ if (mlx5_lro_on(dev) && first_mb_free_size <
+ MLX5_MAX_LRO_HEADER_FIX) {
+ DRV_LOG(ERR, "Not enough space in the first segment(%u)"
+ " to include the max header size(%u) for LRO",
+ first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
+ rte_errno = ENOTSUP;
+ goto error;
+ }
/*
* Determine the number of SGEs needed for a full packet
* and round it to the next power of two.
mlx5_queue_state_modify(struct rte_eth_dev *dev,
struct mlx5_mp_arg_queue_state_modify *sm);
+static inline void
+mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
+ volatile struct mlx5_cqe *restrict cqe,
+ uint32_t phcsum);
+
+static inline void
+mlx5_lro_update_hdr(uint8_t *restrict padd,
+ volatile struct mlx5_cqe *restrict cqe,
+ uint32_t len);
+
uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
[0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
};
if (rxq->crc_present)
len -= RTE_ETHER_CRC_LEN;
PKT_LEN(pkt) = len;
+ if (cqe->lro_num_seg > 1) {
+ mlx5_lro_update_hdr
+ (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
+ len);
+ pkt->ol_flags |= PKT_RX_LRO;
+ pkt->tso_segsz = len / cqe->lro_num_seg;
+ }
}
DATA_LEN(rep) = DATA_LEN(seg);
PKT_LEN(rep) = PKT_LEN(seg);