rxq->stats.ibytes += rcvd_byte;
#endif
- rxq->cq_ci += mcqe_n;
return mcqe_n;
}
/*
* A. load first Qword (8bytes) in one loop.
- * B. copy 4 mbuf pointers from elts ring to returing pkts.
+ * B. copy 4 mbuf pointers from elts ring to returning pkts.
* C. load remaining CQE data and extract necessary fields.
* Final 16bytes cqes[] extracted from original 64bytes CQE has the
* following structure:
if (rxq->dynf_meta) {
uint64_t flag = rxq->flow_meta_mask;
int32_t offs = rxq->flow_meta_offset;
+ uint32_t mask = rxq->flow_meta_port_mask;
+ uint32_t shift =
+ __builtin_popcount(rxq->flow_meta_port_mask);
uint32_t metadata;
/* This code is subject for futher optimization. */
- metadata = cq[pos].flow_table_metadata;
+ metadata = (rte_be_to_cpu_32
+ (cq[pos].flow_table_metadata) >> shift) &
+ mask;
*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
metadata;
pkts[pos]->ol_flags |= metadata ? flag : 0ULL;
- metadata = cq[pos + 1].flow_table_metadata;
+ metadata = (rte_be_to_cpu_32
+ (cq[pos + 1].flow_table_metadata) >> shift) &
+ mask;
*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
metadata;
pkts[pos + 1]->ol_flags |= metadata ? flag : 0ULL;
- metadata = cq[pos + 2].flow_table_metadata;
+ metadata = (rte_be_to_cpu_32
+ (cq[pos + 2].flow_table_metadata) >> shift) &
+ mask;
*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
metadata;
pkts[pos + 2]->ol_flags |= metadata ? flag : 0ULL;
- metadata = cq[pos + 3].flow_table_metadata;
+ metadata = (rte_be_to_cpu_32
+ (cq[pos + 3].flow_table_metadata) >> shift) &
+ mask;
*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
metadata;
pkts[pos + 3]->ol_flags |= metadata ? flag : 0ULL;