- mlx5_tx_dbrec(txq, wqe);
- return n;
-}
-
-/**
- * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet,
- * it returns to make it processed by txq_scatter_v(). All the packets in
- * the pkts list should be single segment packets having same offload flags.
- * This must be checked by txq_check_multiseg() and txq_calc_offload().
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param pkts
- * Pointer to array of packets to be sent.
- * @param pkts_n
- * Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
- * @param cs_flags
- * Checksum offload flags to be written in the descriptor.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-static inline uint16_t
-txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
- uint8_t cs_flags)
-{
- struct rte_mbuf **elts;
- uint16_t elts_head = txq->elts_head;
- const uint16_t elts_n = 1 << txq->elts_n;
- const uint16_t elts_m = elts_n - 1;
- const unsigned int nb_dword_per_wqebb =
- MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
- const unsigned int nb_dword_in_hdr =
- sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
- unsigned int n = 0;
- unsigned int pos;
- uint16_t max_elts;
- uint16_t max_wqe;
- uint32_t comp_req = 0;
- const uint16_t wq_n = 1 << txq->wqe_n;
- const uint16_t wq_mask = wq_n - 1;
- uint16_t wq_idx = txq->wqe_ci & wq_mask;
- volatile struct mlx5_wqe64 *wq =
- &((volatile struct mlx5_wqe64 *)txq->wqes)[wq_idx];
- volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)wq;
- const __m128i shuf_mask_ctrl =
- _mm_set_epi8(15, 14, 13, 12,
- 8, 9, 10, 11, /* bswap32 */
- 4, 5, 6, 7, /* bswap32 */
- 0, 1, 2, 3 /* bswap32 */);
- __m128i *t_wqe, *dseg;
- __m128i ctrl;
-
- /* Make sure all packets can fit into a single WQE. */
- assert(elts_n > pkts_n);
- mlx5_tx_complete(txq);
- max_elts = (elts_n - (elts_head - txq->elts_tail));
- max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
- pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
- assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr);
- if (unlikely(!pkts_n))
- return 0;
- elts = &(*txq->elts)[elts_head & elts_m];
- /* Loop for available tailroom first. */
- n = RTE_MIN(elts_n - (elts_head & elts_m), pkts_n);
- for (pos = 0; pos < (n & -2); pos += 2)
- _mm_storeu_si128((__m128i *)&elts[pos],
- _mm_loadu_si128((__m128i *)&pkts[pos]));
- if (n & 1)
- elts[pos] = pkts[pos];
- /* Check if it crosses the end of the queue. */
- if (unlikely(n < pkts_n)) {
- elts = &(*txq->elts)[0];
- for (pos = 0; pos < pkts_n - n; ++pos)
- elts[pos] = pkts[n + pos];
- }
- txq->elts_head += pkts_n;
- /* Save title WQEBB pointer. */
- t_wqe = (__m128i *)wqe;
- dseg = (__m128i *)(wqe + 1);
- /* Calculate the number of entries to the end. */
- n = RTE_MIN(
- (wq_n - wq_idx) * nb_dword_per_wqebb - nb_dword_in_hdr,
- pkts_n);
- /* Fill DSEGs. */
- txq_wr_dseg_v(txq, dseg, pkts, n);
- /* Check if it crosses the end of the queue. */
- if (n < pkts_n) {
- dseg = (__m128i *)txq->wqes;
- txq_wr_dseg_v(txq, dseg, &pkts[n], pkts_n - n);
- }
- if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
- txq->elts_comp += pkts_n;
- } else {
- /* Request a completion. */
- txq->elts_comp = 0;
- ++txq->cq_pi;
- comp_req = 8;
- }
- /* Fill CTRL in the header. */
- ctrl = _mm_set_epi32(txq->elts_head, comp_req,
- txq->qp_num_8s | (pkts_n + 2),
- MLX5_OPC_MOD_ENHANCED_MPSW << 24 |
- txq->wqe_ci << 8 | MLX5_OPCODE_ENHANCED_MPSW);
- ctrl = _mm_shuffle_epi8(ctrl, shuf_mask_ctrl);
- _mm_store_si128(t_wqe, ctrl);
- /* Fill ESEG in the header. */
- _mm_store_si128(t_wqe + 1,
- _mm_set_epi8(0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, cs_flags,
- 0, 0, 0, 0));
-#ifdef MLX5_PMD_SOFT_COUNTERS
- txq->stats.opackets += pkts_n;
-#endif
- txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) /
- nb_dword_per_wqebb;
- /* Ring QP doorbell. */
- mlx5_tx_dbrec(txq, wqe);
- return pkts_n;
-}
-
-/**
- * DPDK callback for vectorized TX.
- *
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-uint16_t
-mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
- uint16_t pkts_n)
-{
- struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
- uint16_t nb_tx = 0;
-
- while (pkts_n > nb_tx) {
- uint16_t n;
- uint16_t ret;
-
- n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
- ret = txq_burst_v(txq, &pkts[nb_tx], n, 0);
- nb_tx += ret;
- if (!ret)
- break;
- }
- return nb_tx;
-}
-
-/**
- * DPDK callback for vectorized TX with multi-seg packets and offload.
- *
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-uint16_t
-mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
-{
- struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
- uint16_t nb_tx = 0;
-
- while (pkts_n > nb_tx) {
- uint8_t cs_flags = 0;
- uint16_t n;
- uint16_t ret;
-
- /* Transmit multi-seg packets in the head of pkts list. */
- if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) &&
- NB_SEGS(pkts[nb_tx]) > 1)
- nb_tx += txq_scatter_v(txq,
- &pkts[nb_tx],
- pkts_n - nb_tx);
- n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
- if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS))
- n = txq_check_multiseg(&pkts[nb_tx], n);
- if (!(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
- n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
- ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
- nb_tx += ret;
- if (!ret)
- break;
- }
- return nb_tx;
-}
-
-/**
- * Store free buffers to RX SW ring.
- *
- * @param rxq
- * Pointer to RX queue structure.
- * @param pkts
- * Pointer to array of packets to be stored.
- * @param pkts_n
- * Number of packets to be stored.
- */
-static inline void
-rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
-{
- const uint16_t q_mask = (1 << rxq->elts_n) - 1;
- struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
- unsigned int pos;
- uint16_t p = n & -2;
-
- for (pos = 0; pos < p; pos += 2) {
- __m128i mbp;
-
- mbp = _mm_loadu_si128((__m128i *)&elts[pos]);
- _mm_storeu_si128((__m128i *)&pkts[pos], mbp);
- }
- if (n & 1)
- pkts[pos] = elts[pos];
-}
-
-/**
- * Replenish buffers for RX in bulk.
- *
- * @param rxq
- * Pointer to RX queue structure.
- * @param n
- * Number of buffers to be replenished.
- */
-static inline void
-rxq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
-{
- const uint16_t q_n = 1 << rxq->elts_n;
- const uint16_t q_mask = q_n - 1;
- const uint16_t elts_idx = rxq->rq_ci & q_mask;
- struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
- volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx];
- unsigned int i;
-
- assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH);
- assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
- assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP);
- /* Not to cross queue end. */
- n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
- if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
- rxq->stats.rx_nombuf += n;
- return;
- }
- for (i = 0; i < n; ++i)
- wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
- RTE_PKTMBUF_HEADROOM);
- rxq->rq_ci += n;
- rte_io_wmb();
- *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
-}
-
-/**
- * Decompress a compressed completion and fill in mbufs in RX SW ring with data
- * extracted from the title completion descriptor.
- *
- * @param rxq
- * Pointer to RX queue structure.
- * @param cq
- * Pointer to completion array having a compressed completion at first.
- * @param elts
- * Pointer to SW ring to be filled. The first mbuf has to be pre-built from
- * the title completion descriptor to be copied to the rest of mbufs.
- */
-static inline void
-rxq_cq_decompress_v(struct mlx5_rxq_data *rxq,
- volatile struct mlx5_cqe *cq,
- struct rte_mbuf **elts)
-{
- volatile struct mlx5_mini_cqe8 *mcq = (void *)(cq + 1);
- struct rte_mbuf *t_pkt = elts[0]; /* Title packet is pre-built. */
- unsigned int pos;
- unsigned int i;
- unsigned int inv = 0;
- /* Mask to shuffle from extracted mini CQE to mbuf. */
- const __m128i shuf_mask1 =
- _mm_set_epi8(0, 1, 2, 3, /* rss, bswap32 */
- -1, -1, /* skip vlan_tci */
- 6, 7, /* data_len, bswap16 */
- -1, -1, 6, 7, /* pkt_len, bswap16 */
- -1, -1, -1, -1 /* skip packet_type */);
- const __m128i shuf_mask2 =
- _mm_set_epi8(8, 9, 10, 11, /* rss, bswap32 */
- -1, -1, /* skip vlan_tci */
- 14, 15, /* data_len, bswap16 */
- -1, -1, 14, 15, /* pkt_len, bswap16 */
- -1, -1, -1, -1 /* skip packet_type */);
- /* Restore the compressed count. Must be 16 bits. */
- const uint16_t mcqe_n = t_pkt->data_len +
- (rxq->crc_present * ETHER_CRC_LEN);
- const __m128i rearm =
- _mm_loadu_si128((__m128i *)&t_pkt->rearm_data);
- const __m128i rxdf =
- _mm_loadu_si128((__m128i *)&t_pkt->rx_descriptor_fields1);
- const __m128i crc_adj =
- _mm_set_epi16(0, 0, 0,
- rxq->crc_present * ETHER_CRC_LEN,
- 0,
- rxq->crc_present * ETHER_CRC_LEN,
- 0, 0);
- const uint32_t flow_tag = t_pkt->hash.fdir.hi;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- const __m128i zero = _mm_setzero_si128();
- const __m128i ones = _mm_cmpeq_epi32(zero, zero);
- uint32_t rcvd_byte = 0;
- /* Mask to shuffle byte_cnt to add up stats. Do bswap16 for all. */
- const __m128i len_shuf_mask =
- _mm_set_epi8(-1, -1, -1, -1,
- -1, -1, -1, -1,
- 14, 15, 6, 7,
- 10, 11, 2, 3);
-#endif
-
- /*
- * Not to overflow elts array. Decompress next time after mbuf
- * replenishment.
- */
- if (unlikely(mcqe_n + MLX5_VPMD_DESCS_PER_LOOP >
- (uint16_t)(rxq->rq_ci - rxq->cq_ci)))
- return;
- /*
- * A. load mCQEs into a 128bit register.
- * B. store rearm data to mbuf.
- * C. combine data from mCQEs with rx_descriptor_fields1.
- * D. store rx_descriptor_fields1.
- * E. store flow tag (rte_flow mark).
- */
- for (pos = 0; pos < mcqe_n; ) {
- __m128i mcqe1, mcqe2;
- __m128i rxdf1, rxdf2;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- __m128i byte_cnt, invalid_mask;
-#endif
-
- if (!(pos & 0x7) && pos + 8 < mcqe_n)
- rte_prefetch0((void *)(cq + pos + 8));
- /* A.1 load mCQEs into a 128bit register. */
- mcqe1 = _mm_loadu_si128((__m128i *)&mcq[pos % 8]);
- mcqe2 = _mm_loadu_si128((__m128i *)&mcq[pos % 8 + 2]);
- /* B.1 store rearm data to mbuf. */
- _mm_storeu_si128((__m128i *)&elts[pos]->rearm_data, rearm);
- _mm_storeu_si128((__m128i *)&elts[pos + 1]->rearm_data, rearm);
- /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
- rxdf1 = _mm_shuffle_epi8(mcqe1, shuf_mask1);
- rxdf2 = _mm_shuffle_epi8(mcqe1, shuf_mask2);
- rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
- rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
- rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
- rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
- /* D.1 store rx_descriptor_fields1. */
- _mm_storeu_si128((__m128i *)
- &elts[pos]->rx_descriptor_fields1,
- rxdf1);
- _mm_storeu_si128((__m128i *)
- &elts[pos + 1]->rx_descriptor_fields1,
- rxdf2);
- /* B.1 store rearm data to mbuf. */
- _mm_storeu_si128((__m128i *)&elts[pos + 2]->rearm_data, rearm);
- _mm_storeu_si128((__m128i *)&elts[pos + 3]->rearm_data, rearm);
- /* C.1 combine data from mCQEs with rx_descriptor_fields1. */
- rxdf1 = _mm_shuffle_epi8(mcqe2, shuf_mask1);
- rxdf2 = _mm_shuffle_epi8(mcqe2, shuf_mask2);
- rxdf1 = _mm_sub_epi16(rxdf1, crc_adj);
- rxdf2 = _mm_sub_epi16(rxdf2, crc_adj);
- rxdf1 = _mm_blend_epi16(rxdf1, rxdf, 0x23);
- rxdf2 = _mm_blend_epi16(rxdf2, rxdf, 0x23);
- /* D.1 store rx_descriptor_fields1. */
- _mm_storeu_si128((__m128i *)
- &elts[pos + 2]->rx_descriptor_fields1,
- rxdf1);
- _mm_storeu_si128((__m128i *)
- &elts[pos + 3]->rx_descriptor_fields1,
- rxdf2);
-#ifdef MLX5_PMD_SOFT_COUNTERS
- invalid_mask = _mm_set_epi64x(0,
- (mcqe_n - pos) *
- sizeof(uint16_t) * 8);
- invalid_mask = _mm_sll_epi64(ones, invalid_mask);
- mcqe1 = _mm_srli_si128(mcqe1, 4);
- byte_cnt = _mm_blend_epi16(mcqe1, mcqe2, 0xcc);
- byte_cnt = _mm_shuffle_epi8(byte_cnt, len_shuf_mask);
- byte_cnt = _mm_andnot_si128(invalid_mask, byte_cnt);
- byte_cnt = _mm_hadd_epi16(byte_cnt, zero);
- rcvd_byte += _mm_cvtsi128_si64(_mm_hadd_epi16(byte_cnt, zero));
-#endif
- if (rxq->mark) {
- /* E.1 store flow tag (rte_flow mark). */
- elts[pos]->hash.fdir.hi = flow_tag;
- elts[pos + 1]->hash.fdir.hi = flow_tag;
- elts[pos + 2]->hash.fdir.hi = flow_tag;
- elts[pos + 3]->hash.fdir.hi = flow_tag;
- }
- pos += MLX5_VPMD_DESCS_PER_LOOP;
- /* Move to next CQE and invalidate consumed CQEs. */
- if (!(pos & 0x7) && pos < mcqe_n) {
- mcq = (void *)(cq + pos);
- for (i = 0; i < 8; ++i)
- cq[inv++].op_own = MLX5_CQE_INVALIDATE;
- }
- }
- /* Invalidate the rest of CQEs. */
- for (; inv < mcqe_n; ++inv)
- cq[inv].op_own = MLX5_CQE_INVALIDATE;
-#ifdef MLX5_PMD_SOFT_COUNTERS
- rxq->stats.ipackets += mcqe_n;
- rxq->stats.ibytes += rcvd_byte;
-#endif
- rxq->cq_ci += mcqe_n;
-}
-
-/**
- * Calculate packet type and offload flag for mbuf and store it.
- *
- * @param rxq
- * Pointer to RX queue structure.
- * @param cqes[4]
- * Array of four 16bytes completions extracted from the original completion
- * descriptor.
- * @param op_err
- * Opcode vector having responder error status. Each field is 4B.
- * @param pkts
- * Pointer to array of packets to be filled.
- */
-static inline void
-rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
- __m128i op_err, struct rte_mbuf **pkts)
-{
- __m128i pinfo0, pinfo1;
- __m128i pinfo, ptype;
- __m128i ol_flags = _mm_set1_epi32(rxq->rss_hash * PKT_RX_RSS_HASH);
- __m128i cv_flags;
- const __m128i zero = _mm_setzero_si128();
- const __m128i ptype_mask =
- _mm_set_epi32(0xfd06, 0xfd06, 0xfd06, 0xfd06);
- const __m128i ptype_ol_mask =
- _mm_set_epi32(0x106, 0x106, 0x106, 0x106);
- const __m128i pinfo_mask =
- _mm_set_epi32(0x3, 0x3, 0x3, 0x3);
- const __m128i cv_flag_sel =
- _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0,
- (uint8_t)((PKT_RX_IP_CKSUM_GOOD |
- PKT_RX_L4_CKSUM_GOOD) >> 1),
- 0,
- (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
- 0,
- (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
- (uint8_t)(PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED),
- 0);
- const __m128i cv_mask =
- _mm_set_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
- const __m128i mbuf_init =
- _mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer);
- __m128i rearm0, rearm1, rearm2, rearm3;
-
- /* Extract pkt_info field. */
- pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
- pinfo1 = _mm_unpacklo_epi32(cqes[2], cqes[3]);
- pinfo = _mm_unpacklo_epi64(pinfo0, pinfo1);
- /* Extract hdr_type_etc field. */
- pinfo0 = _mm_unpackhi_epi32(cqes[0], cqes[1]);
- pinfo1 = _mm_unpackhi_epi32(cqes[2], cqes[3]);
- ptype = _mm_unpacklo_epi64(pinfo0, pinfo1);
- if (rxq->mark) {
- const __m128i pinfo_ft_mask =
- _mm_set_epi32(0xffffff00, 0xffffff00,
- 0xffffff00, 0xffffff00);
- const __m128i fdir_flags = _mm_set1_epi32(PKT_RX_FDIR);
- const __m128i fdir_id_flags = _mm_set1_epi32(PKT_RX_FDIR_ID);
- __m128i flow_tag, invalid_mask;
-
- flow_tag = _mm_and_si128(pinfo, pinfo_ft_mask);
- /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
- invalid_mask = _mm_cmpeq_epi32(flow_tag, zero);
- ol_flags = _mm_or_si128(ol_flags,
- _mm_andnot_si128(invalid_mask,
- fdir_flags));
- /* Mask out invalid entries. */
- flow_tag = _mm_andnot_si128(invalid_mask, flow_tag);
- /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
- ol_flags = _mm_or_si128(ol_flags,
- _mm_andnot_si128(
- _mm_cmpeq_epi32(flow_tag,
- pinfo_ft_mask),
- fdir_id_flags));
- }
- /*
- * Merge the two fields to generate the following:
- * bit[1] = l3_ok
- * bit[2] = l4_ok
- * bit[8] = cv
- * bit[11:10] = l3_hdr_type
- * bit[14:12] = l4_hdr_type
- * bit[15] = ip_frag
- * bit[16] = tunneled
- * bit[17] = outer_l3_type
- */
- ptype = _mm_and_si128(ptype, ptype_mask);
- pinfo = _mm_and_si128(pinfo, pinfo_mask);
- pinfo = _mm_slli_epi32(pinfo, 16);
- /* Make pinfo has merged fields for ol_flags calculation. */
- pinfo = _mm_or_si128(ptype, pinfo);
- ptype = _mm_srli_epi32(pinfo, 10);
- ptype = _mm_packs_epi32(ptype, zero);
- /* Errored packets will have RTE_PTYPE_ALL_MASK. */
- op_err = _mm_srli_epi16(op_err, 8);
- ptype = _mm_or_si128(ptype, op_err);
- pkts[0]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 0)];
- pkts[1]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 2)];
- pkts[2]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 4)];
- pkts[3]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 6)];
- /* Fill flags for checksum and VLAN. */
- pinfo = _mm_and_si128(pinfo, ptype_ol_mask);
- pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo);
- /* Locate checksum flags at byte[2:1] and merge with VLAN flags. */
- cv_flags = _mm_slli_epi32(pinfo, 9);
- cv_flags = _mm_or_si128(pinfo, cv_flags);
- /* Move back flags to start from byte[0]. */
- cv_flags = _mm_srli_epi32(cv_flags, 8);
- /* Mask out garbage bits. */
- cv_flags = _mm_and_si128(cv_flags, cv_mask);
- /* Merge to ol_flags. */
- ol_flags = _mm_or_si128(ol_flags, cv_flags);
- /* Merge mbuf_init and ol_flags. */
- rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 8), 0x30);
- rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(ol_flags, 4), 0x30);
- rearm2 = _mm_blend_epi16(mbuf_init, ol_flags, 0x30);
- rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(ol_flags, 4), 0x30);
- /* Write 8B rearm_data and 8B ol_flags. */
- _mm_store_si128((__m128i *)&pkts[0]->rearm_data, rearm0);
- _mm_store_si128((__m128i *)&pkts[1]->rearm_data, rearm1);
- _mm_store_si128((__m128i *)&pkts[2]->rearm_data, rearm2);
- _mm_store_si128((__m128i *)&pkts[3]->rearm_data, rearm3);
-}