X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx_vec_neon.h;h=6c3149523e453cdd3090abb30e66426d80382ab4;hb=6f52bd338374e4bd54ff1f872928e53ebdf1108d;hp=55f0983ec6621361b11e8bd7aa28a3fcfbfaa1b7;hpb=fb870be5a879c6617fecabf47873ae2b576e6e69;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h index 55f0983ec6..6c3149523e 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h @@ -1,40 +1,11 @@ -/*- - * BSD LICENSE - * - * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of 6WIND S.A. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_RXTX_VEC_NEON_H_ #define RTE_PMD_MLX5_RXTX_VEC_NEON_H_ -#include #include #include #include @@ -44,311 +15,17 @@ #include #include +#include + +#include "mlx5_defs.h" #include "mlx5.h" #include "mlx5_utils.h" #include "mlx5_rxtx.h" #include "mlx5_rxtx_vec.h" #include "mlx5_autoconf.h" -#include "mlx5_defs.h" -#include "mlx5_prm.h" #pragma GCC diagnostic ignored "-Wcast-qual" -/** - * Fill in buffer descriptors in a multi-packet send descriptor. - * - * @param txq - * Pointer to TX queue structure. - * @param dseg - * Pointer to buffer descriptor to be writen. - * @param pkts - * Pointer to array of packets to be sent. - * @param n - * Number of packets to be filled. - */ -static inline void -txq_wr_dseg_v(struct mlx5_txq_data *txq, uint8_t *dseg, - struct rte_mbuf **pkts, unsigned int n) -{ - unsigned int pos; - uintptr_t addr; - const uint8x16_t dseg_shuf_m = { - 3, 2, 1, 0, /* length, bswap32 */ - 4, 5, 6, 7, /* lkey */ - 15, 14, 13, 12, /* addr, bswap64 */ - 11, 10, 9, 8 - }; -#ifdef MLX5_PMD_SOFT_COUNTERS - uint32_t tx_byte = 0; -#endif - - for (pos = 0; pos < n; ++pos, dseg += MLX5_WQE_DWORD_SIZE) { - uint8x16_t desc; - struct rte_mbuf *pkt = pkts[pos]; - - addr = rte_pktmbuf_mtod(pkt, uintptr_t); - desc = vreinterpretq_u8_u32((uint32x4_t) { - DATA_LEN(pkt), - mlx5_tx_mb2mr(txq, pkt), - addr, - addr >> 32 }); - desc = vqtbl1q_u8(desc, dseg_shuf_m); - vst1q_u8(dseg, desc); -#ifdef MLX5_PMD_SOFT_COUNTERS - tx_byte += DATA_LEN(pkt); -#endif - } -#ifdef MLX5_PMD_SOFT_COUNTERS - txq->stats.obytes += tx_byte; -#endif -} - -/** - * Send multi-segmented packets until it encounters a single segment packet in - * the pkts list. - * - * @param txq - * Pointer to TX queue structure. - * @param pkts - * Pointer to array of packets to be sent. - * @param pkts_n - * Number of packets to be sent. - * - * @return - * Number of packets successfully transmitted (<= pkts_n). - */ -static uint16_t -txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, - uint16_t pkts_n) -{ - uint16_t elts_head = txq->elts_head; - const uint16_t elts_n = 1 << txq->elts_n; - const uint16_t elts_m = elts_n - 1; - const uint16_t wq_n = 1 << txq->wqe_n; - const uint16_t wq_mask = wq_n - 1; - const unsigned int nb_dword_per_wqebb = - MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE; - const unsigned int nb_dword_in_hdr = - sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE; - unsigned int n; - volatile struct mlx5_wqe *wqe = NULL; - - assert(elts_n > pkts_n); - mlx5_tx_complete(txq); - if (unlikely(!pkts_n)) - return 0; - for (n = 0; n < pkts_n; ++n) { - struct rte_mbuf *buf = pkts[n]; - unsigned int segs_n = buf->nb_segs; - unsigned int ds = nb_dword_in_hdr; - unsigned int len = PKT_LEN(buf); - uint16_t wqe_ci = txq->wqe_ci; - const uint8x16_t ctrl_shuf_m = { - 3, 2, 1, 0, /* bswap32 */ - 7, 6, 5, 4, /* bswap32 */ - 11, 10, 9, 8, /* bswap32 */ - 12, 13, 14, 15 - }; - uint8_t cs_flags = 0; - uint16_t max_elts; - uint16_t max_wqe; - uint8x16_t *t_wqe; - uint8_t *dseg; - uint8x16_t ctrl; - - assert(segs_n); - max_elts = elts_n - (elts_head - txq->elts_tail); - max_wqe = wq_n - (txq->wqe_ci - txq->wqe_pi); - /* - * A MPW session consumes 2 WQEs at most to - * include MLX5_MPW_DSEG_MAX pointers. - */ - if (segs_n == 1 || - max_elts < segs_n || max_wqe < 2) - break; - wqe = &((volatile struct mlx5_wqe64 *) - txq->wqes)[wqe_ci & wq_mask].hdr; - if (buf->ol_flags & - (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { - const uint64_t is_tunneled = - buf->ol_flags & (PKT_TX_TUNNEL_GRE | - PKT_TX_TUNNEL_VXLAN); - - if (is_tunneled && txq->tunnel_en) { - cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM | - MLX5_ETH_WQE_L4_INNER_CSUM; - if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM) - cs_flags |= MLX5_ETH_WQE_L3_CSUM; - } else { - cs_flags = MLX5_ETH_WQE_L3_CSUM | - MLX5_ETH_WQE_L4_CSUM; - } - } - /* Title WQEBB pointer. */ - t_wqe = (uint8x16_t *)wqe; - dseg = (uint8_t *)(wqe + 1); - do { - if (!(ds++ % nb_dword_per_wqebb)) { - dseg = (uint8_t *) - &((volatile struct mlx5_wqe64 *) - txq->wqes)[++wqe_ci & wq_mask]; - } - txq_wr_dseg_v(txq, dseg, &buf, 1); - dseg += MLX5_WQE_DWORD_SIZE; - (*txq->elts)[elts_head++ & elts_m] = buf; - buf = buf->next; - } while (--segs_n); - ++wqe_ci; - /* Fill CTRL in the header. */ - ctrl = vreinterpretq_u8_u32((uint32x4_t) { - MLX5_OPC_MOD_MPW << 24 | - txq->wqe_ci << 8 | MLX5_OPCODE_TSO, - txq->qp_num_8s | ds, 0, 0}); - ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m); - vst1q_u8((void *)t_wqe, ctrl); - /* Fill ESEG in the header. */ - vst1q_u16((void *)(t_wqe + 1), - (uint16x8_t) { 0, 0, cs_flags, rte_cpu_to_be_16(len), - 0, 0, 0, 0 }); - txq->wqe_ci = wqe_ci; - } - if (!n) - return 0; - txq->elts_comp += (uint16_t)(elts_head - txq->elts_head); - txq->elts_head = elts_head; - if (txq->elts_comp >= MLX5_TX_COMP_THRESH) { - wqe->ctrl[2] = rte_cpu_to_be_32(8); - wqe->ctrl[3] = txq->elts_head; - txq->elts_comp = 0; - ++txq->cq_pi; - } -#ifdef MLX5_PMD_SOFT_COUNTERS - txq->stats.opackets += n; -#endif - mlx5_tx_dbrec(txq, wqe); - return n; -} - -/** - * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet, - * it returns to make it processed by txq_scatter_v(). All the packets in - * the pkts list should be single segment packets having same offload flags. - * This must be checked by txq_check_multiseg() and txq_calc_offload(). - * - * @param txq - * Pointer to TX queue structure. - * @param pkts - * Pointer to array of packets to be sent. - * @param pkts_n - * Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST). - * @param cs_flags - * Checksum offload flags to be written in the descriptor. - * - * @return - * Number of packets successfully transmitted (<= pkts_n). - */ -static inline uint16_t -txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n, - uint8_t cs_flags) -{ - struct rte_mbuf **elts; - uint16_t elts_head = txq->elts_head; - const uint16_t elts_n = 1 << txq->elts_n; - const uint16_t elts_m = elts_n - 1; - const unsigned int nb_dword_per_wqebb = - MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE; - const unsigned int nb_dword_in_hdr = - sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE; - unsigned int n = 0; - unsigned int pos; - uint16_t max_elts; - uint16_t max_wqe; - uint32_t comp_req = 0; - const uint16_t wq_n = 1 << txq->wqe_n; - const uint16_t wq_mask = wq_n - 1; - uint16_t wq_idx = txq->wqe_ci & wq_mask; - volatile struct mlx5_wqe64 *wq = - &((volatile struct mlx5_wqe64 *)txq->wqes)[wq_idx]; - volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)wq; - const uint8x16_t ctrl_shuf_m = { - 3, 2, 1, 0, /* bswap32 */ - 7, 6, 5, 4, /* bswap32 */ - 11, 10, 9, 8, /* bswap32 */ - 12, 13, 14, 15 - }; - uint8x16_t *t_wqe; - uint8_t *dseg; - uint8x16_t ctrl; - - /* Make sure all packets can fit into a single WQE. */ - assert(elts_n > pkts_n); - mlx5_tx_complete(txq); - max_elts = (elts_n - (elts_head - txq->elts_tail)); - max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); - pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts); - if (unlikely(!pkts_n)) - return 0; - elts = &(*txq->elts)[elts_head & elts_m]; - /* Loop for available tailroom first. */ - n = RTE_MIN(elts_n - (elts_head & elts_m), pkts_n); - for (pos = 0; pos < (n & -2); pos += 2) - vst1q_u64((void *)&elts[pos], vld1q_u64((void *)&pkts[pos])); - if (n & 1) - elts[pos] = pkts[pos]; - /* Check if it crosses the end of the queue. */ - if (unlikely(n < pkts_n)) { - elts = &(*txq->elts)[0]; - for (pos = 0; pos < pkts_n - n; ++pos) - elts[pos] = pkts[n + pos]; - } - txq->elts_head += pkts_n; - /* Save title WQEBB pointer. */ - t_wqe = (uint8x16_t *)wqe; - dseg = (uint8_t *)(wqe + 1); - /* Calculate the number of entries to the end. */ - n = RTE_MIN( - (wq_n - wq_idx) * nb_dword_per_wqebb - nb_dword_in_hdr, - pkts_n); - /* Fill DSEGs. */ - txq_wr_dseg_v(txq, dseg, pkts, n); - /* Check if it crosses the end of the queue. */ - if (n < pkts_n) { - dseg = (uint8_t *)txq->wqes; - txq_wr_dseg_v(txq, dseg, &pkts[n], pkts_n - n); - } - if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) { - txq->elts_comp += pkts_n; - } else { - /* Request a completion. */ - txq->elts_comp = 0; - ++txq->cq_pi; - comp_req = 8; - } - /* Fill CTRL in the header. */ - ctrl = vreinterpretq_u8_u32((uint32x4_t) { - MLX5_OPC_MOD_ENHANCED_MPSW << 24 | - txq->wqe_ci << 8 | MLX5_OPCODE_ENHANCED_MPSW, - txq->qp_num_8s | (pkts_n + 2), - comp_req, - txq->elts_head }); - ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m); - vst1q_u8((void *)t_wqe, ctrl); - /* Fill ESEG in the header. */ - vst1q_u8((void *)(t_wqe + 1), - (uint8x16_t) { 0, 0, 0, 0, - cs_flags, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0 }); -#ifdef MLX5_PMD_SOFT_COUNTERS - txq->stats.opackets += pkts_n; -#endif - txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) / - nb_dword_per_wqebb; - /* Ring QP doorbell. */ - mlx5_tx_dbrec_cond_wmb(txq, wqe, pkts_n < MLX5_VPMD_TX_MAX_BURST); - return pkts_n; -} - /** * Store free buffers to RX SW ring. * @@ -388,8 +65,11 @@ rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n) * @param elts * Pointer to SW ring to be filled. The first mbuf has to be pre-built from * the title completion descriptor to be copied to the rest of mbufs. + * + * @return + * Number of mini-CQEs successfully decompressed. */ -static inline void +static inline uint16_t rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, struct rte_mbuf **elts) { @@ -415,7 +95,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, }; /* Restore the compressed count. Must be 16 bits. */ const uint16_t mcqe_n = t_pkt->data_len + - (rxq->crc_present * ETHER_CRC_LEN); + (rxq->crc_present * RTE_ETHER_CRC_LEN); const uint64x2_t rearm = vld1q_u64((void *)&t_pkt->rearm_data); const uint32x4_t rxdf_mask = { @@ -429,8 +109,8 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, vreinterpretq_u8_u32(rxdf_mask)); const uint16x8_t crc_adj = { 0, 0, - rxq->crc_present * ETHER_CRC_LEN, 0, - rxq->crc_present * ETHER_CRC_LEN, 0, + rxq->crc_present * RTE_ETHER_CRC_LEN, 0, + rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0 }; const uint32_t flow_tag = t_pkt->hash.fdir.hi; @@ -466,8 +146,9 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, sizeof(uint16_t) * 8) : 0); #endif - if (!(pos & 0x7) && pos + 8 < mcqe_n) - rte_prefetch0((void *)(cq + pos + 8)); + for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i) + if (likely(pos + i < mcqe_n)) + rte_prefetch0((void *)(cq + pos + i)); __asm__ volatile ( /* A.1 load mCQEs into a 128bit register. */ "ld1 {v16.16b - v17.16b}, [%[mcq]] \n\t" @@ -525,9 +206,30 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, elts[pos + 2]->hash.fdir.hi = flow_tag; elts[pos + 3]->hash.fdir.hi = flow_tag; } + if (rxq->dynf_meta) { + int32_t offs = rxq->flow_meta_offset; + const uint32_t meta = + *RTE_MBUF_DYNFIELD(t_pkt, offs, uint32_t *); + + /* Check if title packet has valid metadata. */ + if (meta) { + MLX5_ASSERT(t_pkt->ol_flags & + rxq->flow_meta_mask); + *RTE_MBUF_DYNFIELD(elts[pos], offs, + uint32_t *) = meta; + *RTE_MBUF_DYNFIELD(elts[pos + 1], offs, + uint32_t *) = meta; + *RTE_MBUF_DYNFIELD(elts[pos + 2], offs, + uint32_t *) = meta; + *RTE_MBUF_DYNFIELD(elts[pos + 3], offs, + uint32_t *) = meta; + } + } pos += MLX5_VPMD_DESCS_PER_LOOP; /* Move to next CQE and invalidate consumed CQEs. */ if (!(pos & 0x7) && pos < mcqe_n) { + if (pos + 8 < mcqe_n) + rte_prefetch0((void *)(cq + pos + 8)); mcq = (void *)&(cq + pos)->pkt_info; for (i = 0; i < 8; ++i) cq[inv++].op_own = MLX5_CQE_INVALIDATE; @@ -541,6 +243,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, rxq->stats.ibytes += rcvd_byte; #endif rxq->cq_ci += mcqe_n; + return mcqe_n; } /** @@ -583,18 +286,23 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, const uint32x4_t cv_mask = vdupq_n_u32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED); - const uint64x1_t mbuf_init = vld1_u64(&rxq->mbuf_initializer); - const uint64x1_t r32_mask = vcreate_u64(0xffffffff); + const uint64x2_t mbuf_init = vld1q_u64 + ((const uint64_t *)&rxq->mbuf_initializer); uint64x2_t rearm0, rearm1, rearm2, rearm3; + uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3; if (rxq->mark) { const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT); const uint32x4_t fdir_flags = vdupq_n_u32(PKT_RX_FDIR); - const uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID); + uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID); + uint32x4_t invalid_mask; /* Check if flow tag is non-zero then set PKT_RX_FDIR. */ - ol_flags = vorrq_u32(ol_flags, vbicq_u32(fdir_flags, - vceqzq_u32(flow_tag))); + invalid_mask = vceqzq_u32(flow_tag); + ol_flags = vorrq_u32(ol_flags, + vbicq_u32(fdir_flags, invalid_mask)); + /* Mask out invalid entries. */ + fdir_id_flags = vbicq_u32(fdir_id_flags, invalid_mask); /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */ ol_flags = vorrq_u32(ol_flags, vbicq_u32(fdir_id_flags, @@ -614,14 +322,18 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, ptype = vshrn_n_u32(ptype_info, 10); /* Errored packets will have RTE_PTYPE_ALL_MASK. */ ptype = vorr_u16(ptype, op_err); - pkts[0]->packet_type = - mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 6)]; - pkts[1]->packet_type = - mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 4)]; - pkts[2]->packet_type = - mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 2)]; - pkts[3]->packet_type = - mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 0)]; + pt_idx0 = vget_lane_u8(vreinterpret_u8_u16(ptype), 6); + pt_idx1 = vget_lane_u8(vreinterpret_u8_u16(ptype), 4); + pt_idx2 = vget_lane_u8(vreinterpret_u8_u16(ptype), 2); + pt_idx3 = vget_lane_u8(vreinterpret_u8_u16(ptype), 0); + pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] | + !!(pt_idx0 & (1 << 6)) * rxq->tunnel; + pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] | + !!(pt_idx1 & (1 << 6)) * rxq->tunnel; + pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] | + !!(pt_idx2 & (1 << 6)) * rxq->tunnel; + pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] | + !!(pt_idx3 & (1 << 6)) * rxq->tunnel; /* Fill flags for checksum and VLAN. */ pinfo = vandq_u32(ptype_info, ptype_ol_mask); pinfo = vreinterpretq_u32_u8( @@ -636,18 +348,19 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, /* Merge to ol_flags. */ ol_flags = vorrq_u32(ol_flags, cv_flags); /* Merge mbuf_init and ol_flags, and store. */ - rearm0 = vcombine_u64(mbuf_init, - vshr_n_u64(vget_high_u64(vreinterpretq_u64_u32( - ol_flags)), 32)); - rearm1 = vcombine_u64(mbuf_init, - vand_u64(vget_high_u64(vreinterpretq_u64_u32( - ol_flags)), r32_mask)); - rearm2 = vcombine_u64(mbuf_init, - vshr_n_u64(vget_low_u64(vreinterpretq_u64_u32( - ol_flags)), 32)); - rearm3 = vcombine_u64(mbuf_init, - vand_u64(vget_low_u64(vreinterpretq_u64_u32( - ol_flags)), r32_mask)); + rearm0 = vreinterpretq_u64_u32(vsetq_lane_u32 + (vgetq_lane_u32(ol_flags, 3), + vreinterpretq_u32_u64(mbuf_init), 2)); + rearm1 = vreinterpretq_u64_u32(vsetq_lane_u32 + (vgetq_lane_u32(ol_flags, 2), + vreinterpretq_u32_u64(mbuf_init), 2)); + rearm2 = vreinterpretq_u64_u32(vsetq_lane_u32 + (vgetq_lane_u32(ol_flags, 1), + vreinterpretq_u32_u64(mbuf_init), 2)); + rearm3 = vreinterpretq_u64_u32(vsetq_lane_u32 + (vgetq_lane_u32(ol_flags, 0), + vreinterpretq_u32_u64(mbuf_init), 2)); + vst1q_u64((void *)&pkts[0]->rearm_data, rearm0); vst1q_u64((void *)&pkts[1]->rearm_data, rearm1); vst1q_u64((void *)&pkts[2]->rearm_data, rearm2); @@ -665,12 +378,18 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, * Array to store received packets. * @param pkts_n * Maximum number of packets in array. + * @param[out] err + * Pointer to a flag. Set non-zero value if pkts array has at least one error + * packet to handle. + * @param[out] no_cq + * Pointer to a boolean. Set true if no new CQE seen. * * @return * Number of packets received including errors (<= pkts_n). */ static inline uint16_t -rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, + uint64_t *err, bool *no_cq) { const uint16_t q_n = 1 << rxq->cqe_n; const uint16_t q_mask = q_n - 1; @@ -740,36 +459,29 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) 12, 13, 14, -1 /* 1st CQE */ }; const uint16x8_t crc_adj = { - 0, 0, rxq->crc_present * ETHER_CRC_LEN, 0, 0, 0, 0, 0 + 0, 0, rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0, 0, 0 }; const uint32x4_t flow_mark_adj = { 0, 0, 0, rxq->mark * (-1) }; - assert(rxq->sges_n == 0); - assert(rxq->cqe_n == rxq->elts_n); + MLX5_ASSERT(rxq->sges_n == 0); + MLX5_ASSERT(rxq->cqe_n == rxq->elts_n); cq = &(*rxq->cqes)[cq_idx]; rte_prefetch_non_temporal(cq); rte_prefetch_non_temporal(cq + 1); rte_prefetch_non_temporal(cq + 2); rte_prefetch_non_temporal(cq + 3); pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST); - /* - * Order of indexes: - * rq_ci >= cq_ci >= rq_pi - * Definition of indexes: - * rq_ci - cq_ci := # of buffers owned by HW (posted). - * cq_ci - rq_pi := # of buffers not returned to app (decompressed). - * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished). - */ repl_n = q_n - (rxq->rq_ci - rxq->rq_pi); - if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH) + if (repl_n >= rxq->rq_repl_thresh) mlx5_rx_replenish_bulk_mbuf(rxq, repl_n); /* See if there're unreturned mbufs from compressed CQE. */ - rcvd_pkt = rxq->cq_ci - rxq->rq_pi; + rcvd_pkt = rxq->decompressed; if (rcvd_pkt > 0) { rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n); rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt); rxq->rq_pi += rcvd_pkt; pkts += rcvd_pkt; + rxq->decompressed -= rcvd_pkt; } elts_idx = rxq->rq_pi & q_mask; elts = &(*rxq->elts)[elts_idx]; @@ -777,10 +489,13 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP); /* Not to cross queue end. */ pkts_n = RTE_MIN(pkts_n, q_n - elts_idx); - if (!pkts_n) + pkts_n = RTE_MIN(pkts_n, q_n - cq_idx); + if (!pkts_n) { + *no_cq = !rcvd_pkt; return rcvd_pkt; + } /* At this point, there shouldn't be any remained packets. */ - assert(rxq->rq_pi == rxq->cq_ci); + MLX5_ASSERT(rxq->decompressed == 0); /* * Note that vectors have reverse order - {v3, v2, v1, v0}, because * there's no instruction to count trailing zeros. __builtin_clzl() is @@ -813,6 +528,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) uint16x4_t mask; uint16x4_t byte_cnt; uint32x4_t ptype_info, flow_tag; + register uint64x2_t c0, c1, c2, c3; uint8_t *p0, *p1, *p2, *p3; uint8_t *e0 = (void *)&elts[pos]->pkt_len; uint8_t *e1 = (void *)&elts[pos + 1]->pkt_len; @@ -829,6 +545,16 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) p1 = p0 + (pkts_n - pos > 1) * sizeof(struct mlx5_cqe); p2 = p1 + (pkts_n - pos > 2) * sizeof(struct mlx5_cqe); p3 = p2 + (pkts_n - pos > 3) * sizeof(struct mlx5_cqe); + /* B.0 (CQE 3) load a block having op_own. */ + c3 = vld1q_u64((uint64_t *)(p3 + 48)); + /* B.0 (CQE 2) load a block having op_own. */ + c2 = vld1q_u64((uint64_t *)(p2 + 48)); + /* B.0 (CQE 1) load a block having op_own. */ + c1 = vld1q_u64((uint64_t *)(p1 + 48)); + /* B.0 (CQE 0) load a block having op_own. */ + c0 = vld1q_u64((uint64_t *)(p0 + 48)); + /* Synchronize for loading the rest of blocks. */ + rte_cio_rmb(); /* Prefetch next 4 CQEs. */ if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) { unsigned int next = pos + MLX5_VPMD_DESCS_PER_LOOP; @@ -838,50 +564,46 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) rte_prefetch_non_temporal(&cq[next + 3]); } __asm__ volatile ( - /* B.1 (CQE 3) load a block having op_own. */ - "ld1 {v19.16b}, [%[p3]] \n\t" - "sub %[p3], %[p3], #48 \n\t" - /* B.2 (CQE 3) load the rest blocks. */ + /* B.1 (CQE 3) load the rest of blocks. */ "ld1 {v16.16b - v18.16b}, [%[p3]] \n\t" + /* B.2 (CQE 3) move the block having op_own. */ + "mov v19.16b, %[c3].16b \n\t" /* B.3 (CQE 3) extract 16B fields. */ "tbl v23.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t" + /* B.1 (CQE 2) load the rest of blocks. */ + "ld1 {v16.16b - v18.16b}, [%[p2]] \n\t" /* B.4 (CQE 3) adjust CRC length. */ "sub v23.8h, v23.8h, %[crc_adj].8h \n\t" - /* B.1 (CQE 2) load a block having op_own. */ - "ld1 {v19.16b}, [%[p2]] \n\t" - "sub %[p2], %[p2], #48 \n\t" /* C.1 (CQE 3) generate final structure for mbuf. */ "tbl v15.16b, {v23.16b}, %[mb_shuf_m].16b \n\t" - /* B.2 (CQE 2) load the rest blocks. */ - "ld1 {v16.16b - v18.16b}, [%[p2]] \n\t" + /* B.2 (CQE 2) move the block having op_own. */ + "mov v19.16b, %[c2].16b \n\t" /* B.3 (CQE 2) extract 16B fields. */ "tbl v22.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t" + /* B.1 (CQE 1) load the rest of blocks. */ + "ld1 {v16.16b - v18.16b}, [%[p1]] \n\t" /* B.4 (CQE 2) adjust CRC length. */ "sub v22.8h, v22.8h, %[crc_adj].8h \n\t" - /* B.1 (CQE 1) load a block having op_own. */ - "ld1 {v19.16b}, [%[p1]] \n\t" - "sub %[p1], %[p1], #48 \n\t" /* C.1 (CQE 2) generate final structure for mbuf. */ "tbl v14.16b, {v22.16b}, %[mb_shuf_m].16b \n\t" - /* B.2 (CQE 1) load the rest blocks. */ - "ld1 {v16.16b - v18.16b}, [%[p1]] \n\t" + /* B.2 (CQE 1) move the block having op_own. */ + "mov v19.16b, %[c1].16b \n\t" /* B.3 (CQE 1) extract 16B fields. */ "tbl v21.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t" + /* B.1 (CQE 0) load the rest of blocks. */ + "ld1 {v16.16b - v18.16b}, [%[p0]] \n\t" /* B.4 (CQE 1) adjust CRC length. */ "sub v21.8h, v21.8h, %[crc_adj].8h \n\t" - /* B.1 (CQE 0) load a block having op_own. */ - "ld1 {v19.16b}, [%[p0]] \n\t" - "sub %[p0], %[p0], #48 \n\t" /* C.1 (CQE 1) generate final structure for mbuf. */ "tbl v13.16b, {v21.16b}, %[mb_shuf_m].16b \n\t" - /* B.2 (CQE 0) load the rest blocks. */ - "ld1 {v16.16b - v18.16b}, [%[p0]] \n\t" + /* B.2 (CQE 0) move the block having op_own. */ + "mov v19.16b, %[c0].16b \n\t" + /* A.1 load mbuf pointers. */ + "ld1 {v24.2d - v25.2d}, [%[elts_p]] \n\t" /* B.3 (CQE 0) extract 16B fields. */ "tbl v20.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t" /* B.4 (CQE 0) adjust CRC length. */ "sub v20.8h, v20.8h, %[crc_adj].8h \n\t" - /* A.1 load mbuf pointers. */ - "ld1 {v24.2d - v25.2d}, [%[elts_p]] \n\t" /* D.1 extract op_own byte. */ "tbl %[op_own].8b, {v20.16b - v23.16b}, %[owner_shuf_m].8b \n\t" /* C.2 (CQE 3) adjust flow mark. */ @@ -916,9 +638,9 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) [byte_cnt]"=&w"(byte_cnt), [ptype_info]"=&w"(ptype_info), [flow_tag]"=&w"(flow_tag) - :[p3]"r"(p3 + 48), [p2]"r"(p2 + 48), - [p1]"r"(p1 + 48), [p0]"r"(p0 + 48), + :[p3]"r"(p3), [p2]"r"(p2), [p1]"r"(p1), [p0]"r"(p0), [e3]"r"(e3), [e2]"r"(e2), [e1]"r"(e1), [e0]"r"(e0), + [c3]"w"(c3), [c2]"w"(c2), [c1]"w"(c1), [c0]"w"(c0), [elts_p]"r"(elts_p), [pkts_p]"r"(pkts_p), [cqe_shuf_m]"w"(cqe_shuf_m), @@ -970,28 +692,74 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) opcode = vceq_u16(resp_err_check, opcode); opcode = vbic_u16(opcode, invalid_mask); /* D.4 mark if any error is set */ - rxq->pending_err |= - !!vget_lane_u64(vreinterpret_u64_u16(opcode), 0); + *err |= vget_lane_u64(vreinterpret_u64_u16(opcode), 0); /* C.4 fill in mbuf - rearm_data and packet_type. */ rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag, opcode, &elts[pos]); if (rxq->hw_timestamp) { - elts[pos]->timestamp = - rte_be_to_cpu_64( - container_of(p0, struct mlx5_cqe, - pkt_info)->timestamp); - elts[pos + 1]->timestamp = - rte_be_to_cpu_64( - container_of(p1, struct mlx5_cqe, - pkt_info)->timestamp); - elts[pos + 2]->timestamp = - rte_be_to_cpu_64( - container_of(p2, struct mlx5_cqe, - pkt_info)->timestamp); - elts[pos + 3]->timestamp = - rte_be_to_cpu_64( - container_of(p3, struct mlx5_cqe, - pkt_info)->timestamp); + if (rxq->rt_timestamp) { + struct mlx5_dev_ctx_shared *sh = rxq->sh; + uint64_t ts; + + ts = rte_be_to_cpu_64 + (container_of(p0, struct mlx5_cqe, + pkt_info)->timestamp); + elts[pos]->timestamp = + mlx5_txpp_convert_rx_ts(sh, ts); + ts = rte_be_to_cpu_64 + (container_of(p1, struct mlx5_cqe, + pkt_info)->timestamp); + elts[pos + 1]->timestamp = + mlx5_txpp_convert_rx_ts(sh, ts); + ts = rte_be_to_cpu_64 + (container_of(p2, struct mlx5_cqe, + pkt_info)->timestamp); + elts[pos + 2]->timestamp = + mlx5_txpp_convert_rx_ts(sh, ts); + ts = rte_be_to_cpu_64 + (container_of(p3, struct mlx5_cqe, + pkt_info)->timestamp); + elts[pos + 3]->timestamp = + mlx5_txpp_convert_rx_ts(sh, ts); + } else { + elts[pos]->timestamp = rte_be_to_cpu_64 + (container_of(p0, struct mlx5_cqe, + pkt_info)->timestamp); + elts[pos + 1]->timestamp = rte_be_to_cpu_64 + (container_of(p1, struct mlx5_cqe, + pkt_info)->timestamp); + elts[pos + 2]->timestamp = rte_be_to_cpu_64 + (container_of(p2, struct mlx5_cqe, + pkt_info)->timestamp); + elts[pos + 3]->timestamp = rte_be_to_cpu_64 + (container_of(p3, struct mlx5_cqe, + pkt_info)->timestamp); + } + } + if (!!rxq->flow_meta_mask) { + /* This code is subject for futher optimization. */ + int32_t offs = rxq->flow_meta_offset; + + *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) = + container_of(p0, struct mlx5_cqe, + pkt_info)->flow_table_metadata; + *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) = + container_of(p1, struct mlx5_cqe, + pkt_info)->flow_table_metadata; + *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) = + container_of(p2, struct mlx5_cqe, + pkt_info)->flow_table_metadata; + *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) = + container_of(p3, struct mlx5_cqe, + pkt_info)->flow_table_metadata; + if (*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *)) + elts[pos]->ol_flags |= rxq->flow_meta_mask; + if (*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *)) + elts[pos + 1]->ol_flags |= rxq->flow_meta_mask; + if (*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *)) + elts[pos + 2]->ol_flags |= rxq->flow_meta_mask; + if (*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *)) + elts[pos + 3]->ol_flags |= rxq->flow_meta_mask; } #ifdef MLX5_PMD_SOFT_COUNTERS /* Add up received bytes count. */ @@ -1006,10 +774,12 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) break; } /* If no new CQE seen, return without updating cq_db. */ - if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) + if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP)) { + *no_cq = true; return rcvd_pkt; + } /* Update the consumer indexes for non-compressed CQEs. */ - assert(nocmp_n <= pkts_n); + MLX5_ASSERT(nocmp_n <= pkts_n); rxq->cq_ci += nocmp_n; rxq->rq_pi += nocmp_n; rcvd_pkt += nocmp_n; @@ -1019,20 +789,23 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) #endif /* Decompress the last CQE if compressed. */ if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) { - assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP)); - rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]); + MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP)); + rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n], + &elts[nocmp_n]); /* Return more packets if needed. */ if (nocmp_n < pkts_n) { - uint16_t n = rxq->cq_ci - rxq->rq_pi; + uint16_t n = rxq->decompressed; n = RTE_MIN(n, pkts_n - nocmp_n); rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n); rxq->rq_pi += n; rcvd_pkt += n; + rxq->decompressed -= n; } } - rte_compiler_barrier(); + rte_cio_wmb(); *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); + *no_cq = !rcvd_pkt; return rcvd_pkt; }