+ MLX5_ASSERT(rxq->sges_n == 0);
+ cq = &(*rxq->cqes)[cq_idx];
+ rte_prefetch0(cq);
+ rte_prefetch0(cq + 1);
+ rte_prefetch0(cq + 2);
+ rte_prefetch0(cq + 3);
+ pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
+ mlx5_rx_mprq_replenish_bulk_mbuf(rxq);
+ /* Not to move past the allocated mbufs. */
+ pkts_n = RTE_MIN(pkts_n, rxq->elts_ci - rxq->rq_pi);
+ /* See if there're unreturned mbufs from compressed CQE. */
+ rcvd_pkt = rxq->decompressed;
+ if (rcvd_pkt > 0) {
+ rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
+ cp_pkt = rxq_copy_mprq_mbuf_v(rxq, pkts, rcvd_pkt);
+ rxq->decompressed -= rcvd_pkt;
+ pkts += cp_pkt;
+ }
+ elts_idx = rxq->rq_pi & elts_mask;
+ elts = &(*rxq->elts)[elts_idx];
+ /* Not to overflow pkts array. */
+ pkts_n = RTE_ALIGN_FLOOR(pkts_n - cp_pkt, MLX5_VPMD_DESCS_PER_LOOP);
+ /* Not to cross queue end. */
+ pkts_n = RTE_MIN(pkts_n, elts_n - elts_idx);
+ pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
+ if (!pkts_n) {
+ *no_cq = !cp_pkt;
+ return cp_pkt;
+ }
+ /* At this point, there shouldn't be any remaining packets. */
+ MLX5_ASSERT(rxq->decompressed == 0);
+ /* Process all the CQEs */
+ nocmp_n = rxq_cq_process_v(rxq, cq, elts, pkts, pkts_n, err, &comp_idx);
+ /* If no new CQE seen, return without updating cq_db. */
+ if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) {
+ *no_cq = true;
+ return cp_pkt;
+ }
+ /* Update the consumer indexes for non-compressed CQEs. */
+ MLX5_ASSERT(nocmp_n <= pkts_n);
+ cp_pkt = rxq_copy_mprq_mbuf_v(rxq, pkts, nocmp_n);
+ rcvd_pkt += cp_pkt;
+ /* Decompress the last CQE if compressed. */
+ if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP) {
+ MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
+ rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
+ &elts[nocmp_n]);
+ /* Return more packets if needed. */
+ if (nocmp_n < pkts_n) {
+ uint16_t n = rxq->decompressed;