-/**
- * Receive burst of packets. An errored completion also consumes a mbuf, but the
- * packet_type is set to be RTE_PTYPE_ALL_MASK. Marked mbufs should be freed
- * before returning to application.
- *
- * @param rxq
- * Pointer to RX queue structure.
- * @param[out] pkts
- * Array to store received packets.
- * @param pkts_n
- * Maximum number of packets in array.
- *
- * @return
- * Number of packets received including errors (<= pkts_n).
- */
-static inline uint16_t
-rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- const uint16_t q_n = 1 << rxq->cqe_n;
- const uint16_t q_mask = q_n - 1;
- volatile struct mlx5_cqe *cq;
- struct rte_mbuf **elts;
- unsigned int pos;
- uint64_t n;
- uint16_t repl_n;
- uint64_t comp_idx = MLX5_VPMD_DESCS_PER_LOOP;
- uint16_t nocmp_n = 0;
- uint16_t rcvd_pkt = 0;
- unsigned int cq_idx = rxq->cq_ci & q_mask;
- unsigned int elts_idx;
- unsigned int ownership = !!(rxq->cq_ci & (q_mask + 1));
- const __m128i owner_check =
- _mm_set_epi64x(0x0100000001000000LL, 0x0100000001000000LL);
- const __m128i opcode_check =
- _mm_set_epi64x(0xf0000000f0000000LL, 0xf0000000f0000000LL);
- const __m128i format_check =
- _mm_set_epi64x(0x0c0000000c000000LL, 0x0c0000000c000000LL);
- const __m128i resp_err_check =
- _mm_set_epi64x(0xe0000000e0000000LL, 0xe0000000e0000000LL);
-#ifdef MLX5_PMD_SOFT_COUNTERS
- uint32_t rcvd_byte = 0;
- /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
- const __m128i len_shuf_mask =
- _mm_set_epi8(-1, -1, -1, -1,
- -1, -1, -1, -1,
- 12, 13, 8, 9,
- 4, 5, 0, 1);
-#endif
- /* Mask to shuffle from extracted CQE to mbuf. */
- const __m128i shuf_mask =
- _mm_set_epi8(-1, 3, 2, 1, /* fdir.hi */
- 12, 13, 14, 15, /* rss, bswap32 */
- 10, 11, /* vlan_tci, bswap16 */
- 4, 5, /* data_len, bswap16 */
- -1, -1, /* zero out 2nd half of pkt_len */
- 4, 5 /* pkt_len, bswap16 */);
- /* Mask to blend from the last Qword to the first DQword. */
- const __m128i blend_mask =
- _mm_set_epi8(-1, -1, -1, -1,
- -1, -1, -1, -1,
- 0, 0, 0, 0,
- 0, 0, 0, -1);
- const __m128i zero = _mm_setzero_si128();
- const __m128i ones = _mm_cmpeq_epi32(zero, zero);
- const __m128i crc_adj =
- _mm_set_epi16(0, 0, 0, 0, 0,
- rxq->crc_present * ETHER_CRC_LEN,
- 0,
- rxq->crc_present * ETHER_CRC_LEN);
- const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0);
-
- /* Compile time sanity check for this function. */
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
- offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
- RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
- offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
- RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, pkt_info) != 0);
- RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, rx_hash_res) !=
- offsetof(struct mlx5_cqe, pkt_info) + 12);
- RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, rsvd1) +
- sizeof(((struct mlx5_cqe *)0)->rsvd1) !=
- offsetof(struct mlx5_cqe, hdr_type_etc));
- RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, vlan_info) !=
- offsetof(struct mlx5_cqe, hdr_type_etc) + 2);
- RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, rsvd2) +
- sizeof(((struct mlx5_cqe *)0)->rsvd2) !=
- offsetof(struct mlx5_cqe, byte_cnt));
- RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, sop_drop_qpn) !=
- RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8));
- RTE_BUILD_BUG_ON(offsetof(struct mlx5_cqe, op_own) !=
- offsetof(struct mlx5_cqe, sop_drop_qpn) + 7);
- assert(rxq->sges_n == 0);
- assert(rxq->cqe_n == rxq->elts_n);
- cq = &(*rxq->cqes)[cq_idx];
- rte_prefetch0(cq);
- rte_prefetch0(cq + 1);
- rte_prefetch0(cq + 2);
- rte_prefetch0(cq + 3);
- pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
- /*
- * Order of indexes:
- * rq_ci >= cq_ci >= rq_pi
- * Definition of indexes:
- * rq_ci - cq_ci := # of buffers owned by HW (posted).
- * cq_ci - rq_pi := # of buffers not returned to app (decompressed).
- * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
- */
- repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
- if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH)
- rxq_replenish_bulk_mbuf(rxq, repl_n);
- /* See if there're unreturned mbufs from compressed CQE. */
- rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
- if (rcvd_pkt > 0) {
- rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
- rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
- rxq->rq_pi += rcvd_pkt;
- pkts += rcvd_pkt;
- }
- elts_idx = rxq->rq_pi & q_mask;
- elts = &(*rxq->elts)[elts_idx];
- pkts_n = RTE_MIN(pkts_n - rcvd_pkt,
- (uint16_t)(rxq->rq_ci - rxq->cq_ci));
- /* Not to overflow pkts/elts array. */
- pkts_n = RTE_ALIGN_FLOOR(pkts_n, MLX5_VPMD_DESCS_PER_LOOP);
- /* Not to cross queue end. */
- pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
- if (!pkts_n)
- return rcvd_pkt;
- /* At this point, there shouldn't be any remained packets. */
- assert(rxq->rq_pi == rxq->cq_ci);
- /*
- * A. load first Qword (8bytes) in one loop.
- * B. copy 4 mbuf pointers from elts ring to returing pkts.
- * C. load remained CQE data and extract necessary fields.
- * Final 16bytes cqes[] extracted from original 64bytes CQE has the
- * following structure:
- * struct {
- * uint8_t pkt_info;
- * uint8_t flow_tag[3];
- * uint16_t byte_cnt;
- * uint8_t rsvd4;
- * uint8_t op_own;
- * uint16_t hdr_type_etc;
- * uint16_t vlan_info;
- * uint32_t rx_has_res;
- * } c;
- * D. fill in mbuf.
- * E. get valid CQEs.
- * F. find compressed CQE.
- */
- for (pos = 0;
- pos < pkts_n;
- pos += MLX5_VPMD_DESCS_PER_LOOP) {
- __m128i cqes[MLX5_VPMD_DESCS_PER_LOOP];
- __m128i cqe_tmp1, cqe_tmp2;
- __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
- __m128i op_own, op_own_tmp1, op_own_tmp2;
- __m128i opcode, owner_mask, invalid_mask;
- __m128i comp_mask;
- __m128i mask;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- __m128i byte_cnt;
-#endif
- __m128i mbp1, mbp2;
- __m128i p = _mm_set_epi16(0, 0, 0, 0, 3, 2, 1, 0);
- unsigned int p1, p2, p3;
-
- /* Prefetch next 4 CQEs. */
- if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
- rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP]);
- rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 1]);
- rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 2]);
- rte_prefetch0(&cq[pos + MLX5_VPMD_DESCS_PER_LOOP + 3]);
- }
- /* A.0 do not cross the end of CQ. */
- mask = _mm_set_epi64x(0, (pkts_n - pos) * sizeof(uint16_t) * 8);
- mask = _mm_sll_epi64(ones, mask);
- p = _mm_andnot_si128(mask, p);
- /* A.1 load cqes. */
- p3 = _mm_extract_epi16(p, 3);
- cqes[3] = _mm_loadl_epi64((__m128i *)
- &cq[pos + p3].sop_drop_qpn);
- rte_compiler_barrier();
- p2 = _mm_extract_epi16(p, 2);
- cqes[2] = _mm_loadl_epi64((__m128i *)
- &cq[pos + p2].sop_drop_qpn);
- rte_compiler_barrier();
- /* B.1 load mbuf pointers. */
- mbp1 = _mm_loadu_si128((__m128i *)&elts[pos]);
- mbp2 = _mm_loadu_si128((__m128i *)&elts[pos + 2]);
- /* A.1 load a block having op_own. */
- p1 = _mm_extract_epi16(p, 1);
- cqes[1] = _mm_loadl_epi64((__m128i *)
- &cq[pos + p1].sop_drop_qpn);
- rte_compiler_barrier();
- cqes[0] = _mm_loadl_epi64((__m128i *)
- &cq[pos].sop_drop_qpn);
- /* B.2 copy mbuf pointers. */
- _mm_storeu_si128((__m128i *)&pkts[pos], mbp1);
- _mm_storeu_si128((__m128i *)&pkts[pos + 2], mbp2);
- rte_compiler_barrier();
- /* C.1 load remained CQE data and extract necessary fields. */
- cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p3]);
- cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos + p2]);
- cqes[3] = _mm_blendv_epi8(cqes[3], cqe_tmp2, blend_mask);
- cqes[2] = _mm_blendv_epi8(cqes[2], cqe_tmp1, blend_mask);
- cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p3].rsvd1[3]);
- cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].rsvd1[3]);
- cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x30);
- cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x30);
- cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd2[10]);
- cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd2[10]);
- cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x04);
- cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x04);
- /* C.2 generate final structure for mbuf with swapping bytes. */
- pkt_mb3 = _mm_shuffle_epi8(cqes[3], shuf_mask);
- pkt_mb2 = _mm_shuffle_epi8(cqes[2], shuf_mask);
- /* C.3 adjust CRC length. */
- pkt_mb3 = _mm_sub_epi16(pkt_mb3, crc_adj);
- pkt_mb2 = _mm_sub_epi16(pkt_mb2, crc_adj);
- /* C.4 adjust flow mark. */
- pkt_mb3 = _mm_add_epi32(pkt_mb3, flow_mark_adj);
- pkt_mb2 = _mm_add_epi32(pkt_mb2, flow_mark_adj);
- /* D.1 fill in mbuf - rx_descriptor_fields1. */
- _mm_storeu_si128((void *)&pkts[pos + 3]->pkt_len, pkt_mb3);
- _mm_storeu_si128((void *)&pkts[pos + 2]->pkt_len, pkt_mb2);
- /* E.1 extract op_own field. */
- op_own_tmp2 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
- /* C.1 load remained CQE data and extract necessary fields. */
- cqe_tmp2 = _mm_load_si128((__m128i *)&cq[pos + p1]);
- cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos]);
- cqes[1] = _mm_blendv_epi8(cqes[1], cqe_tmp2, blend_mask);
- cqes[0] = _mm_blendv_epi8(cqes[0], cqe_tmp1, blend_mask);
- cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p1].rsvd1[3]);
- cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].rsvd1[3]);
- cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x30);
- cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x30);
- cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd2[10]);
- cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd2[10]);
- cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x04);
- cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x04);
- /* C.2 generate final structure for mbuf with swapping bytes. */
- pkt_mb1 = _mm_shuffle_epi8(cqes[1], shuf_mask);
- pkt_mb0 = _mm_shuffle_epi8(cqes[0], shuf_mask);
- /* C.3 adjust CRC length. */
- pkt_mb1 = _mm_sub_epi16(pkt_mb1, crc_adj);
- pkt_mb0 = _mm_sub_epi16(pkt_mb0, crc_adj);
- /* C.4 adjust flow mark. */
- pkt_mb1 = _mm_add_epi32(pkt_mb1, flow_mark_adj);
- pkt_mb0 = _mm_add_epi32(pkt_mb0, flow_mark_adj);
- /* E.1 extract op_own byte. */
- op_own_tmp1 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
- op_own = _mm_unpackhi_epi64(op_own_tmp1, op_own_tmp2);
- /* D.1 fill in mbuf - rx_descriptor_fields1. */
- _mm_storeu_si128((void *)&pkts[pos + 1]->pkt_len, pkt_mb1);
- _mm_storeu_si128((void *)&pkts[pos]->pkt_len, pkt_mb0);
- /* E.2 flip owner bit to mark CQEs from last round. */
- owner_mask = _mm_and_si128(op_own, owner_check);
- if (ownership)
- owner_mask = _mm_xor_si128(owner_mask, owner_check);
- owner_mask = _mm_cmpeq_epi32(owner_mask, owner_check);
- owner_mask = _mm_packs_epi32(owner_mask, zero);
- /* E.3 get mask for invalidated CQEs. */
- opcode = _mm_and_si128(op_own, opcode_check);
- invalid_mask = _mm_cmpeq_epi32(opcode_check, opcode);
- invalid_mask = _mm_packs_epi32(invalid_mask, zero);
- /* E.4 mask out beyond boundary. */
- invalid_mask = _mm_or_si128(invalid_mask, mask);
- /* E.5 merge invalid_mask with invalid owner. */
- invalid_mask = _mm_or_si128(invalid_mask, owner_mask);
- /* F.1 find compressed CQE format. */
- comp_mask = _mm_and_si128(op_own, format_check);
- comp_mask = _mm_cmpeq_epi32(comp_mask, format_check);
- comp_mask = _mm_packs_epi32(comp_mask, zero);
- /* F.2 mask out invalid entries. */
- comp_mask = _mm_andnot_si128(invalid_mask, comp_mask);
- comp_idx = _mm_cvtsi128_si64(comp_mask);
- /* F.3 get the first compressed CQE. */
- comp_idx = comp_idx ?
- __builtin_ctzll(comp_idx) /
- (sizeof(uint16_t) * 8) :
- MLX5_VPMD_DESCS_PER_LOOP;
- /* E.6 mask out entries after the compressed CQE. */
- mask = _mm_set_epi64x(0, comp_idx * sizeof(uint16_t) * 8);
- mask = _mm_sll_epi64(ones, mask);
- invalid_mask = _mm_or_si128(invalid_mask, mask);
- /* E.7 count non-compressed valid CQEs. */
- n = _mm_cvtsi128_si64(invalid_mask);
- n = n ? __builtin_ctzll(n) / (sizeof(uint16_t) * 8) :
- MLX5_VPMD_DESCS_PER_LOOP;
- nocmp_n += n;
- /* D.2 get the final invalid mask. */
- mask = _mm_set_epi64x(0, n * sizeof(uint16_t) * 8);
- mask = _mm_sll_epi64(ones, mask);
- invalid_mask = _mm_or_si128(invalid_mask, mask);
- /* D.3 check error in opcode. */
- opcode = _mm_cmpeq_epi32(resp_err_check, opcode);
- opcode = _mm_packs_epi32(opcode, zero);
- opcode = _mm_andnot_si128(invalid_mask, opcode);
- /* D.4 mark if any error is set */
- rxq->pending_err |= !!_mm_cvtsi128_si64(opcode);
- /* D.5 fill in mbuf - rearm_data and packet_type. */
- rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
-#ifdef MLX5_PMD_SOFT_COUNTERS
- /* Add up received bytes count. */
- byte_cnt = _mm_shuffle_epi8(op_own, len_shuf_mask);
- byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
- byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
- rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
-#endif
- /*
- * Break the loop unless more valid CQE is expected, or if
- * there's a compressed CQE.
- */
- if (n != MLX5_VPMD_DESCS_PER_LOOP)
- break;
- }
- /* If no new CQE seen, return without updating cq_db. */
- if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
- return rcvd_pkt;
- /* Update the consumer indexes for non-compressed CQEs. */
- assert(nocmp_n <= pkts_n);
- rxq->cq_ci += nocmp_n;
- rxq->rq_pi += nocmp_n;
- rcvd_pkt += nocmp_n;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- rxq->stats.ipackets += nocmp_n;
- rxq->stats.ibytes += rcvd_byte;
-#endif
- /* Decompress the last CQE if compressed. */
- if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
- assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
- rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
- /* Return more packets if needed. */
- if (nocmp_n < pkts_n) {
- uint16_t n = rxq->cq_ci - rxq->rq_pi;
-
- n = RTE_MIN(n, pkts_n - nocmp_n);
- rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
- rxq->rq_pi += n;
- rcvd_pkt += n;
- }
- }
- rte_compiler_barrier();
- *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
- return rcvd_pkt;
-}
-