mlx5_queue_state_modify(struct rte_eth_dev *dev,
struct mlx5_mp_arg_queue_state_modify *sm);
+static inline void
+mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
+ volatile struct mlx5_cqe *restrict cqe,
+ uint32_t phcsum);
+
+static inline void
+mlx5_lro_update_hdr(uint8_t *restrict padd,
+ volatile struct mlx5_cqe *restrict cqe,
+ uint32_t len);
+
uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
[0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
};
if (rxq->crc_present)
len -= RTE_ETHER_CRC_LEN;
PKT_LEN(pkt) = len;
+ if (cqe->lro_num_seg > 1) {
+ mlx5_lro_update_hdr
+ (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
+ len);
+ pkt->ol_flags |= PKT_RX_LRO;
+ pkt->tso_segsz = len / cqe->lro_num_seg;
+ }
}
DATA_LEN(rep) = DATA_LEN(seg);
PKT_LEN(rep) = PKT_LEN(seg);
continue;
}
rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
+ DATA_LEN(pkt) = len;
} else {
rte_iova_t buf_iova;
struct rte_mbuf_ext_shared_info *shinfo;
++rxq->stats.idropped;
continue;
}
+ DATA_LEN(pkt) = len;
+ /*
+ * LRO packet may consume all the stride memory, in this
+ * case packet head-room space is not guaranteed so must
+ * to add an empty mbuf for the head-room.
+ */
+ if (!rxq->strd_headroom_en) {
+ struct rte_mbuf *headroom_mbuf =
+ rte_pktmbuf_alloc(rxq->mp);
+
+ if (unlikely(headroom_mbuf == NULL)) {
+ rte_pktmbuf_free_seg(pkt);
+ ++rxq->stats.rx_nombuf;
+ break;
+ }
+ PORT(pkt) = rxq->port_id;
+ NEXT(headroom_mbuf) = pkt;
+ pkt = headroom_mbuf;
+ NB_SEGS(pkt) = 2;
+ }
}
rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
if (lro_num_seg > 1) {
pkt->tso_segsz = strd_sz;
}
PKT_LEN(pkt) = len;
- DATA_LEN(pkt) = len;
PORT(pkt) = rxq->port_id;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment bytes counter. */