-/*-
- * BSD LICENSE
- *
- * Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_RXTX_VEC_NEON_H_
#include <rte_mempool.h>
#include <rte_prefetch.h>
+#include <mlx5_prm.h>
+
+#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
#include "mlx5_rxtx_vec.h"
#include "mlx5_autoconf.h"
-#include "mlx5_defs.h"
-#include "mlx5_prm.h"
#pragma GCC diagnostic ignored "-Wcast-qual"
-/**
- * Fill in buffer descriptors in a multi-packet send descriptor.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param dseg
- * Pointer to buffer descriptor to be written.
- * @param pkts
- * Pointer to array of packets to be sent.
- * @param n
- * Number of packets to be filled.
- */
-static inline void
-txq_wr_dseg_v(struct mlx5_txq_data *txq, uint8_t *dseg,
- struct rte_mbuf **pkts, unsigned int n)
-{
- unsigned int pos;
- uintptr_t addr;
- const uint8x16_t dseg_shuf_m = {
- 3, 2, 1, 0, /* length, bswap32 */
- 4, 5, 6, 7, /* lkey */
- 15, 14, 13, 12, /* addr, bswap64 */
- 11, 10, 9, 8
- };
-#ifdef MLX5_PMD_SOFT_COUNTERS
- uint32_t tx_byte = 0;
-#endif
-
- for (pos = 0; pos < n; ++pos, dseg += MLX5_WQE_DWORD_SIZE) {
- uint8x16_t desc;
- struct rte_mbuf *pkt = pkts[pos];
-
- addr = rte_pktmbuf_mtod(pkt, uintptr_t);
- desc = vreinterpretq_u8_u32((uint32x4_t) {
- DATA_LEN(pkt),
- mlx5_tx_mb2mr(txq, pkt),
- addr,
- addr >> 32 });
- desc = vqtbl1q_u8(desc, dseg_shuf_m);
- vst1q_u8(dseg, desc);
-#ifdef MLX5_PMD_SOFT_COUNTERS
- tx_byte += DATA_LEN(pkt);
-#endif
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- txq->stats.obytes += tx_byte;
-#endif
-}
-
-/**
- * Send multi-segmented packets until it encounters a single segment packet in
- * the pkts list.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param pkts
- * Pointer to array of packets to be sent.
- * @param pkts_n
- * Number of packets to be sent.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-static uint16_t
-txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
- uint16_t pkts_n)
-{
- uint16_t elts_head = txq->elts_head;
- const uint16_t elts_n = 1 << txq->elts_n;
- const uint16_t elts_m = elts_n - 1;
- const uint16_t wq_n = 1 << txq->wqe_n;
- const uint16_t wq_mask = wq_n - 1;
- const unsigned int nb_dword_per_wqebb =
- MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
- const unsigned int nb_dword_in_hdr =
- sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
- unsigned int n;
- volatile struct mlx5_wqe *wqe = NULL;
-
- assert(elts_n > pkts_n);
- mlx5_tx_complete(txq);
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
- if (unlikely(!pkts_n))
- return 0;
- for (n = 0; n < pkts_n; ++n) {
- struct rte_mbuf *buf = pkts[n];
- unsigned int segs_n = buf->nb_segs;
- unsigned int ds = nb_dword_in_hdr;
- unsigned int len = PKT_LEN(buf);
- uint16_t wqe_ci = txq->wqe_ci;
- const uint8x16_t ctrl_shuf_m = {
- 3, 2, 1, 0, /* bswap32 */
- 7, 6, 5, 4, /* bswap32 */
- 11, 10, 9, 8, /* bswap32 */
- 12, 13, 14, 15
- };
- uint8_t cs_flags;
- uint16_t max_elts;
- uint16_t max_wqe;
- uint8x16_t *t_wqe;
- uint8_t *dseg;
- uint8x16_t ctrl;
-
- assert(segs_n);
- max_elts = elts_n - (elts_head - txq->elts_tail);
- max_wqe = wq_n - (txq->wqe_ci - txq->wqe_pi);
- /*
- * A MPW session consumes 2 WQEs at most to
- * include MLX5_MPW_DSEG_MAX pointers.
- */
- if (segs_n == 1 ||
- max_elts < segs_n || max_wqe < 2)
- break;
- wqe = &((volatile struct mlx5_wqe64 *)
- txq->wqes)[wqe_ci & wq_mask].hdr;
- cs_flags = txq_ol_cksum_to_cs(txq, buf);
- /* Title WQEBB pointer. */
- t_wqe = (uint8x16_t *)wqe;
- dseg = (uint8_t *)(wqe + 1);
- do {
- if (!(ds++ % nb_dword_per_wqebb)) {
- dseg = (uint8_t *)
- &((volatile struct mlx5_wqe64 *)
- txq->wqes)[++wqe_ci & wq_mask];
- }
- txq_wr_dseg_v(txq, dseg, &buf, 1);
- dseg += MLX5_WQE_DWORD_SIZE;
- (*txq->elts)[elts_head++ & elts_m] = buf;
- buf = buf->next;
- } while (--segs_n);
- ++wqe_ci;
- /* Fill CTRL in the header. */
- ctrl = vreinterpretq_u8_u32((uint32x4_t) {
- MLX5_OPC_MOD_MPW << 24 |
- txq->wqe_ci << 8 | MLX5_OPCODE_TSO,
- txq->qp_num_8s | ds, 0, 0});
- ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m);
- vst1q_u8((void *)t_wqe, ctrl);
- /* Fill ESEG in the header. */
- vst1q_u16((void *)(t_wqe + 1),
- (uint16x8_t) { 0, 0, cs_flags, rte_cpu_to_be_16(len),
- 0, 0, 0, 0 });
- txq->wqe_ci = wqe_ci;
- }
- if (!n)
- return 0;
- txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
- txq->elts_head = elts_head;
- if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
- wqe->ctrl[2] = rte_cpu_to_be_32(8);
- wqe->ctrl[3] = txq->elts_head;
- txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- txq->stats.opackets += n;
-#endif
- mlx5_tx_dbrec(txq, wqe);
- return n;
-}
-
-/**
- * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet,
- * it returns to make it processed by txq_scatter_v(). All the packets in
- * the pkts list should be single segment packets having same offload flags.
- * This must be checked by txq_count_contig_single_seg() and txq_calc_offload().
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param pkts
- * Pointer to array of packets to be sent.
- * @param pkts_n
- * Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
- * @param cs_flags
- * Checksum offload flags to be written in the descriptor.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-static inline uint16_t
-txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
- uint8_t cs_flags)
-{
- struct rte_mbuf **elts;
- uint16_t elts_head = txq->elts_head;
- const uint16_t elts_n = 1 << txq->elts_n;
- const uint16_t elts_m = elts_n - 1;
- const unsigned int nb_dword_per_wqebb =
- MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
- const unsigned int nb_dword_in_hdr =
- sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
- unsigned int n = 0;
- unsigned int pos;
- uint16_t max_elts;
- uint16_t max_wqe;
- uint32_t comp_req = 0;
- const uint16_t wq_n = 1 << txq->wqe_n;
- const uint16_t wq_mask = wq_n - 1;
- uint16_t wq_idx = txq->wqe_ci & wq_mask;
- volatile struct mlx5_wqe64 *wq =
- &((volatile struct mlx5_wqe64 *)txq->wqes)[wq_idx];
- volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)wq;
- const uint8x16_t ctrl_shuf_m = {
- 3, 2, 1, 0, /* bswap32 */
- 7, 6, 5, 4, /* bswap32 */
- 11, 10, 9, 8, /* bswap32 */
- 12, 13, 14, 15
- };
- uint8x16_t *t_wqe;
- uint8_t *dseg;
- uint8x16_t ctrl;
-
- /* Make sure all packets can fit into a single WQE. */
- assert(elts_n > pkts_n);
- mlx5_tx_complete(txq);
- max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
- max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
- pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
- if (unlikely(!pkts_n))
- return 0;
- elts = &(*txq->elts)[elts_head & elts_m];
- /* Loop for available tailroom first. */
- n = RTE_MIN(elts_n - (elts_head & elts_m), pkts_n);
- for (pos = 0; pos < (n & -2); pos += 2)
- vst1q_u64((void *)&elts[pos], vld1q_u64((void *)&pkts[pos]));
- if (n & 1)
- elts[pos] = pkts[pos];
- /* Check if it crosses the end of the queue. */
- if (unlikely(n < pkts_n)) {
- elts = &(*txq->elts)[0];
- for (pos = 0; pos < pkts_n - n; ++pos)
- elts[pos] = pkts[n + pos];
- }
- txq->elts_head += pkts_n;
- /* Save title WQEBB pointer. */
- t_wqe = (uint8x16_t *)wqe;
- dseg = (uint8_t *)(wqe + 1);
- /* Calculate the number of entries to the end. */
- n = RTE_MIN(
- (wq_n - wq_idx) * nb_dword_per_wqebb - nb_dword_in_hdr,
- pkts_n);
- /* Fill DSEGs. */
- txq_wr_dseg_v(txq, dseg, pkts, n);
- /* Check if it crosses the end of the queue. */
- if (n < pkts_n) {
- dseg = (uint8_t *)txq->wqes;
- txq_wr_dseg_v(txq, dseg, &pkts[n], pkts_n - n);
- }
- if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
- txq->elts_comp += pkts_n;
- } else {
- /* Request a completion. */
- txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
- comp_req = 8;
- }
- /* Fill CTRL in the header. */
- ctrl = vreinterpretq_u8_u32((uint32x4_t) {
- MLX5_OPC_MOD_ENHANCED_MPSW << 24 |
- txq->wqe_ci << 8 | MLX5_OPCODE_ENHANCED_MPSW,
- txq->qp_num_8s | (pkts_n + 2),
- comp_req,
- txq->elts_head });
- ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m);
- vst1q_u8((void *)t_wqe, ctrl);
- /* Fill ESEG in the header. */
- vst1q_u8((void *)(t_wqe + 1),
- (uint8x16_t) { 0, 0, 0, 0,
- cs_flags, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0 });
-#ifdef MLX5_PMD_SOFT_COUNTERS
- txq->stats.opackets += pkts_n;
-#endif
- txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) /
- nb_dword_per_wqebb;
- /* Ring QP doorbell. */
- mlx5_tx_dbrec_cond_wmb(txq, wqe, pkts_n < MLX5_VPMD_TX_MAX_BURST);
- return pkts_n;
-}
-
/**
* Store free buffers to RX SW ring.
*
* @param elts
* Pointer to SW ring to be filled. The first mbuf has to be pre-built from
* the title completion descriptor to be copied to the rest of mbufs.
+ *
+ * @return
+ * Number of mini-CQEs successfully decompressed.
*/
-static inline void
+static inline uint16_t
rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
struct rte_mbuf **elts)
{
};
/* Restore the compressed count. Must be 16 bits. */
const uint16_t mcqe_n = t_pkt->data_len +
- (rxq->crc_present * ETHER_CRC_LEN);
+ (rxq->crc_present * RTE_ETHER_CRC_LEN);
const uint64x2_t rearm =
vld1q_u64((void *)&t_pkt->rearm_data);
const uint32x4_t rxdf_mask = {
vreinterpretq_u8_u32(rxdf_mask));
const uint16x8_t crc_adj = {
0, 0,
- rxq->crc_present * ETHER_CRC_LEN, 0,
- rxq->crc_present * ETHER_CRC_LEN, 0,
+ rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
+ rxq->crc_present * RTE_ETHER_CRC_LEN, 0,
0, 0
};
const uint32_t flow_tag = t_pkt->hash.fdir.hi;
rxq->stats.ibytes += rcvd_byte;
#endif
rxq->cq_ci += mcqe_n;
+ return mcqe_n;
}
/**
const uint32x4_t cv_mask =
vdupq_n_u32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
- const uint64x1_t mbuf_init = vld1_u64(&rxq->mbuf_initializer);
- const uint64x1_t r32_mask = vcreate_u64(0xffffffff);
+ const uint64x2_t mbuf_init = vld1q_u64
+ ((const uint64_t *)&rxq->mbuf_initializer);
uint64x2_t rearm0, rearm1, rearm2, rearm3;
+ uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
if (rxq->mark) {
const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT);
const uint32x4_t fdir_flags = vdupq_n_u32(PKT_RX_FDIR);
- const uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID);
+ uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID);
+ uint32x4_t invalid_mask;
/* Check if flow tag is non-zero then set PKT_RX_FDIR. */
- ol_flags = vorrq_u32(ol_flags, vbicq_u32(fdir_flags,
- vceqzq_u32(flow_tag)));
+ invalid_mask = vceqzq_u32(flow_tag);
+ ol_flags = vorrq_u32(ol_flags,
+ vbicq_u32(fdir_flags, invalid_mask));
+ /* Mask out invalid entries. */
+ fdir_id_flags = vbicq_u32(fdir_id_flags, invalid_mask);
/* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */
ol_flags = vorrq_u32(ol_flags,
vbicq_u32(fdir_id_flags,
ptype = vshrn_n_u32(ptype_info, 10);
/* Errored packets will have RTE_PTYPE_ALL_MASK. */
ptype = vorr_u16(ptype, op_err);
- pkts[0]->packet_type =
- mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 6)];
- pkts[1]->packet_type =
- mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 4)];
- pkts[2]->packet_type =
- mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 2)];
- pkts[3]->packet_type =
- mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 0)];
+ pt_idx0 = vget_lane_u8(vreinterpret_u8_u16(ptype), 6);
+ pt_idx1 = vget_lane_u8(vreinterpret_u8_u16(ptype), 4);
+ pt_idx2 = vget_lane_u8(vreinterpret_u8_u16(ptype), 2);
+ pt_idx3 = vget_lane_u8(vreinterpret_u8_u16(ptype), 0);
+ pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
+ !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
+ pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
+ !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
+ pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
+ !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
+ pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
+ !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
/* Fill flags for checksum and VLAN. */
pinfo = vandq_u32(ptype_info, ptype_ol_mask);
pinfo = vreinterpretq_u32_u8(
/* Merge to ol_flags. */
ol_flags = vorrq_u32(ol_flags, cv_flags);
/* Merge mbuf_init and ol_flags, and store. */
- rearm0 = vcombine_u64(mbuf_init,
- vshr_n_u64(vget_high_u64(vreinterpretq_u64_u32(
- ol_flags)), 32));
- rearm1 = vcombine_u64(mbuf_init,
- vand_u64(vget_high_u64(vreinterpretq_u64_u32(
- ol_flags)), r32_mask));
- rearm2 = vcombine_u64(mbuf_init,
- vshr_n_u64(vget_low_u64(vreinterpretq_u64_u32(
- ol_flags)), 32));
- rearm3 = vcombine_u64(mbuf_init,
- vand_u64(vget_low_u64(vreinterpretq_u64_u32(
- ol_flags)), r32_mask));
+ rearm0 = vreinterpretq_u64_u32(vsetq_lane_u32
+ (vgetq_lane_u32(ol_flags, 3),
+ vreinterpretq_u32_u64(mbuf_init), 2));
+ rearm1 = vreinterpretq_u64_u32(vsetq_lane_u32
+ (vgetq_lane_u32(ol_flags, 2),
+ vreinterpretq_u32_u64(mbuf_init), 2));
+ rearm2 = vreinterpretq_u64_u32(vsetq_lane_u32
+ (vgetq_lane_u32(ol_flags, 1),
+ vreinterpretq_u32_u64(mbuf_init), 2));
+ rearm3 = vreinterpretq_u64_u32(vsetq_lane_u32
+ (vgetq_lane_u32(ol_flags, 0),
+ vreinterpretq_u32_u64(mbuf_init), 2));
+
vst1q_u64((void *)&pkts[0]->rearm_data, rearm0);
vst1q_u64((void *)&pkts[1]->rearm_data, rearm1);
vst1q_u64((void *)&pkts[2]->rearm_data, rearm2);
12, 13, 14, -1 /* 1st CQE */
};
const uint16x8_t crc_adj = {
- 0, 0, rxq->crc_present * ETHER_CRC_LEN, 0, 0, 0, 0, 0
+ 0, 0, rxq->crc_present * RTE_ETHER_CRC_LEN, 0, 0, 0, 0, 0
};
const uint32x4_t flow_mark_adj = { 0, 0, 0, rxq->mark * (-1) };
rte_prefetch_non_temporal(cq + 2);
rte_prefetch_non_temporal(cq + 3);
pkts_n = RTE_MIN(pkts_n, MLX5_VPMD_RX_MAX_BURST);
- /*
- * Order of indexes:
- * rq_ci >= cq_ci >= rq_pi
- * Definition of indexes:
- * rq_ci - cq_ci := # of buffers owned by HW (posted).
- * cq_ci - rq_pi := # of buffers not returned to app (decompressed).
- * N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
- */
repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
- if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH)
+ if (repl_n >= rxq->rq_repl_thresh)
mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
/* See if there're unreturned mbufs from compressed CQE. */
- rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
+ rcvd_pkt = rxq->decompressed;
if (rcvd_pkt > 0) {
rcvd_pkt = RTE_MIN(rcvd_pkt, pkts_n);
rxq_copy_mbuf_v(rxq, pkts, rcvd_pkt);
rxq->rq_pi += rcvd_pkt;
pkts += rcvd_pkt;
+ rxq->decompressed -= rcvd_pkt;
}
elts_idx = rxq->rq_pi & q_mask;
elts = &(*rxq->elts)[elts_idx];
pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
/* Not to cross queue end. */
pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
+ pkts_n = RTE_MIN(pkts_n, q_n - cq_idx);
if (!pkts_n)
return rcvd_pkt;
/* At this point, there shouldn't be any remained packets. */
- assert(rxq->rq_pi == rxq->cq_ci);
+ assert(rxq->decompressed == 0);
/*
* Note that vectors have reverse order - {v3, v2, v1, v0}, because
* there's no instruction to count trailing zeros. __builtin_clzl() is
uint16x4_t mask;
uint16x4_t byte_cnt;
uint32x4_t ptype_info, flow_tag;
+ register uint64x2_t c0, c1, c2, c3;
uint8_t *p0, *p1, *p2, *p3;
uint8_t *e0 = (void *)&elts[pos]->pkt_len;
uint8_t *e1 = (void *)&elts[pos + 1]->pkt_len;
p1 = p0 + (pkts_n - pos > 1) * sizeof(struct mlx5_cqe);
p2 = p1 + (pkts_n - pos > 2) * sizeof(struct mlx5_cqe);
p3 = p2 + (pkts_n - pos > 3) * sizeof(struct mlx5_cqe);
+ /* B.0 (CQE 3) load a block having op_own. */
+ c3 = vld1q_u64((uint64_t *)(p3 + 48));
+ /* B.0 (CQE 2) load a block having op_own. */
+ c2 = vld1q_u64((uint64_t *)(p2 + 48));
+ /* B.0 (CQE 1) load a block having op_own. */
+ c1 = vld1q_u64((uint64_t *)(p1 + 48));
+ /* B.0 (CQE 0) load a block having op_own. */
+ c0 = vld1q_u64((uint64_t *)(p0 + 48));
+ /* Synchronize for loading the rest of blocks. */
+ rte_cio_rmb();
/* Prefetch next 4 CQEs. */
if (pkts_n - pos >= 2 * MLX5_VPMD_DESCS_PER_LOOP) {
unsigned int next = pos + MLX5_VPMD_DESCS_PER_LOOP;
rte_prefetch_non_temporal(&cq[next + 3]);
}
__asm__ volatile (
- /* B.1 (CQE 3) load a block having op_own. */
- "ld1 {v19.16b}, [%[p3]] \n\t"
- "sub %[p3], %[p3], #48 \n\t"
- /* B.2 (CQE 3) load the rest blocks. */
+ /* B.1 (CQE 3) load the rest of blocks. */
"ld1 {v16.16b - v18.16b}, [%[p3]] \n\t"
+ /* B.2 (CQE 3) move the block having op_own. */
+ "mov v19.16b, %[c3].16b \n\t"
/* B.3 (CQE 3) extract 16B fields. */
"tbl v23.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
+ /* B.1 (CQE 2) load the rest of blocks. */
+ "ld1 {v16.16b - v18.16b}, [%[p2]] \n\t"
/* B.4 (CQE 3) adjust CRC length. */
"sub v23.8h, v23.8h, %[crc_adj].8h \n\t"
- /* B.1 (CQE 2) load a block having op_own. */
- "ld1 {v19.16b}, [%[p2]] \n\t"
- "sub %[p2], %[p2], #48 \n\t"
/* C.1 (CQE 3) generate final structure for mbuf. */
"tbl v15.16b, {v23.16b}, %[mb_shuf_m].16b \n\t"
- /* B.2 (CQE 2) load the rest blocks. */
- "ld1 {v16.16b - v18.16b}, [%[p2]] \n\t"
+ /* B.2 (CQE 2) move the block having op_own. */
+ "mov v19.16b, %[c2].16b \n\t"
/* B.3 (CQE 2) extract 16B fields. */
"tbl v22.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
+ /* B.1 (CQE 1) load the rest of blocks. */
+ "ld1 {v16.16b - v18.16b}, [%[p1]] \n\t"
/* B.4 (CQE 2) adjust CRC length. */
"sub v22.8h, v22.8h, %[crc_adj].8h \n\t"
- /* B.1 (CQE 1) load a block having op_own. */
- "ld1 {v19.16b}, [%[p1]] \n\t"
- "sub %[p1], %[p1], #48 \n\t"
/* C.1 (CQE 2) generate final structure for mbuf. */
"tbl v14.16b, {v22.16b}, %[mb_shuf_m].16b \n\t"
- /* B.2 (CQE 1) load the rest blocks. */
- "ld1 {v16.16b - v18.16b}, [%[p1]] \n\t"
+ /* B.2 (CQE 1) move the block having op_own. */
+ "mov v19.16b, %[c1].16b \n\t"
/* B.3 (CQE 1) extract 16B fields. */
"tbl v21.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
+ /* B.1 (CQE 0) load the rest of blocks. */
+ "ld1 {v16.16b - v18.16b}, [%[p0]] \n\t"
/* B.4 (CQE 1) adjust CRC length. */
"sub v21.8h, v21.8h, %[crc_adj].8h \n\t"
- /* B.1 (CQE 0) load a block having op_own. */
- "ld1 {v19.16b}, [%[p0]] \n\t"
- "sub %[p0], %[p0], #48 \n\t"
/* C.1 (CQE 1) generate final structure for mbuf. */
"tbl v13.16b, {v21.16b}, %[mb_shuf_m].16b \n\t"
- /* B.2 (CQE 0) load the rest blocks. */
- "ld1 {v16.16b - v18.16b}, [%[p0]] \n\t"
+ /* B.2 (CQE 0) move the block having op_own. */
+ "mov v19.16b, %[c0].16b \n\t"
+ /* A.1 load mbuf pointers. */
+ "ld1 {v24.2d - v25.2d}, [%[elts_p]] \n\t"
/* B.3 (CQE 0) extract 16B fields. */
"tbl v20.16b, {v16.16b - v19.16b}, %[cqe_shuf_m].16b \n\t"
/* B.4 (CQE 0) adjust CRC length. */
"sub v20.8h, v20.8h, %[crc_adj].8h \n\t"
- /* A.1 load mbuf pointers. */
- "ld1 {v24.2d - v25.2d}, [%[elts_p]] \n\t"
/* D.1 extract op_own byte. */
"tbl %[op_own].8b, {v20.16b - v23.16b}, %[owner_shuf_m].8b \n\t"
/* C.2 (CQE 3) adjust flow mark. */
[byte_cnt]"=&w"(byte_cnt),
[ptype_info]"=&w"(ptype_info),
[flow_tag]"=&w"(flow_tag)
- :[p3]"r"(p3 + 48), [p2]"r"(p2 + 48),
- [p1]"r"(p1 + 48), [p0]"r"(p0 + 48),
+ :[p3]"r"(p3), [p2]"r"(p2), [p1]"r"(p1), [p0]"r"(p0),
[e3]"r"(e3), [e2]"r"(e2), [e1]"r"(e1), [e0]"r"(e0),
+ [c3]"w"(c3), [c2]"w"(c2), [c1]"w"(c1), [c0]"w"(c0),
[elts_p]"r"(elts_p),
[pkts_p]"r"(pkts_p),
[cqe_shuf_m]"w"(cqe_shuf_m),
container_of(p3, struct mlx5_cqe,
pkt_info)->timestamp);
}
+ if (rte_flow_dynf_metadata_avail()) {
+ /* This code is subject for futher optimization. */
+ *RTE_FLOW_DYNF_METADATA(elts[pos]) =
+ container_of(p0, struct mlx5_cqe,
+ pkt_info)->flow_table_metadata;
+ *RTE_FLOW_DYNF_METADATA(elts[pos + 1]) =
+ container_of(p1, struct mlx5_cqe,
+ pkt_info)->flow_table_metadata;
+ *RTE_FLOW_DYNF_METADATA(elts[pos + 2]) =
+ container_of(p2, struct mlx5_cqe,
+ pkt_info)->flow_table_metadata;
+ *RTE_FLOW_DYNF_METADATA(elts[pos + 3]) =
+ container_of(p3, struct mlx5_cqe,
+ pkt_info)->flow_table_metadata;
+ if (*RTE_FLOW_DYNF_METADATA(elts[pos]))
+ elts[pos]->ol_flags |= PKT_RX_DYNF_METADATA;
+ if (*RTE_FLOW_DYNF_METADATA(elts[pos + 1]))
+ elts[pos + 1]->ol_flags |= PKT_RX_DYNF_METADATA;
+ if (*RTE_FLOW_DYNF_METADATA(elts[pos + 2]))
+ elts[pos + 2]->ol_flags |= PKT_RX_DYNF_METADATA;
+ if (*RTE_FLOW_DYNF_METADATA(elts[pos + 3]))
+ elts[pos + 3]->ol_flags |= PKT_RX_DYNF_METADATA;
+ }
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Add up received bytes count. */
byte_cnt = vbic_u16(byte_cnt, invalid_mask);
/* Decompress the last CQE if compressed. */
if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
- rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
+ rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
+ &elts[nocmp_n]);
/* Return more packets if needed. */
if (nocmp_n < pkts_n) {
- uint16_t n = rxq->cq_ci - rxq->rq_pi;
+ uint16_t n = rxq->decompressed;
n = RTE_MIN(n, pkts_n - nocmp_n);
rxq_copy_mbuf_v(rxq, &pkts[nocmp_n], n);
rxq->rq_pi += n;
rcvd_pkt += n;
+ rxq->decompressed -= n;
}
}
- rte_compiler_barrier();
+ rte_cio_wmb();
*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
return rcvd_pkt;
}