#pragma GCC diagnostic error "-Wpedantic"
#endif
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_mbuf.h>
#include <rte_mempool.h>
#include <rte_prefetch.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
#include "mlx5.h"
#include "mlx5_utils.h"
}
/**
- * Count the number of continuous single segment packets. The first packet must
- * be a single segment packet.
+ * Count the number of continuous single segment packets.
*
* @param pkts
* Pointer to array of packets.
if (!pkts_n)
return 0;
- assert(NB_SEGS(pkts[0]) == 1);
/* Count the number of continuous single segment packets. */
- for (pos = 1; pos < pkts_n; ++pos)
+ for (pos = 0; pos < pkts_n; ++pos)
if (NB_SEGS(pkts[pos]) > 1)
break;
return pos;
if (segs_n == 1 ||
max_elts < segs_n || max_wqe < 2)
break;
+ if (segs_n > MLX5_MPW_DSEG_MAX) {
+ txq->stats.oerrors++;
+ break;
+ }
wqe = &((volatile struct mlx5_wqe64 *)
txq->wqes)[wqe_ci & wq_mask].hdr;
if (buf->ol_flags &
/* Fill ESEG in the header. */
_mm_store_si128(t_wqe + 1,
_mm_set_epi16(0, 0, 0, 0,
- htons(len), cs_flags,
+ rte_cpu_to_be_16(len), cs_flags,
0, 0));
txq->wqe_ci = wqe_ci;
}
txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
txq->elts_head = elts_head;
if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
- wqe->ctrl[2] = htonl(8);
+ wqe->ctrl[2] = rte_cpu_to_be_32(8);
wqe->ctrl[3] = txq->elts_head;
txq->elts_comp = 0;
++txq->cq_pi;
max_elts = (elts_n - (elts_head - txq->elts_tail));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
+ assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr);
if (unlikely(!pkts_n))
return 0;
elts = &(*txq->elts)[elts_head & elts_m];
return;
}
for (i = 0; i < n; ++i)
- wq[i].addr = htonll((uintptr_t)elts[i]->buf_addr +
- RTE_PKTMBUF_HEADROOM);
+ wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
+ RTE_PKTMBUF_HEADROOM);
rxq->rq_ci += n;
rte_wmb();
- *rxq->rq_db = htonl(rxq->rq_ci);
+ *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
}
/**
ptype = _mm_and_si128(ptype, ptype_mask);
pinfo = _mm_and_si128(pinfo, pinfo_mask);
pinfo = _mm_slli_epi32(pinfo, 16);
- ptype = _mm_or_si128(ptype, pinfo);
- ptype = _mm_srli_epi32(ptype, 10);
+ /* Make pinfo has merged fields for ol_flags calculation. */
+ pinfo = _mm_or_si128(ptype, pinfo);
+ ptype = _mm_srli_epi32(pinfo, 10);
ptype = _mm_packs_epi32(ptype, zero);
/* Errored packets will have RTE_PTYPE_ALL_MASK. */
op_err = _mm_srli_epi16(op_err, 8);
{
uint16_t n = 0;
unsigned int i;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t err_bytes = 0;
+#endif
for (i = 0; i < pkts_n; ++i) {
struct rte_mbuf *pkt = pkts[i];
- if (pkt->packet_type == RTE_PTYPE_ALL_MASK)
+ if (pkt->packet_type == RTE_PTYPE_ALL_MASK) {
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ err_bytes += PKT_LEN(pkt);
+#endif
rte_pktmbuf_free_seg(pkt);
- else
+ } else {
pkts[n++] = pkt;
+ }
}
rxq->stats.idropped += (pkts_n - n);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Correct counters of errored completions. */
+ rxq->stats.ipackets -= (pkts_n - n);
+ rxq->stats.ibytes -= err_bytes;
+#endif
rxq->pending_err = 0;
return n;
}
}
}
rte_wmb();
- *rxq->cq_db = htonl(rxq->cq_ci);
+ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
return rcvd_pkt;
}
int __attribute__((cold))
priv_check_vec_tx_support(struct priv *priv)
{
- if (priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
+ if (!priv->tx_vec_en ||
+ priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
priv->mps != MLX5_MPW_ENHANCED ||
priv->tso)
return -ENOTSUP;
int __attribute__((cold))
rxq_check_vec_support(struct rxq *rxq)
{
- if (rxq->sges_n != 0)
+ struct rxq_ctrl *ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+
+ if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0)
return -ENOTSUP;
return 1;
}
{
uint16_t i;
+ if (!priv->rx_vec_en)
+ return -ENOTSUP;
/* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) {
struct rxq *rxq = (*priv->rxqs)[i];
return -ENOTSUP;
return 1;
}
-
-/**
- * Prepare for vectorized RX.
- *
- * @param priv
- * Pointer to private structure.
- */
-void
-priv_prep_vec_rx_function(struct priv *priv)
-{
- uint16_t i;
-
- for (i = 0; i < priv->rxqs_n; ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
- struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
- const uint16_t desc = 1 << rxq->elts_n;
- int j;
-
- assert(rxq->elts_n == rxq->cqe_n);
- /* Initialize default rearm_data for vPMD. */
- mbuf_init->data_off = RTE_PKTMBUF_HEADROOM;
- rte_mbuf_refcnt_set(mbuf_init, 1);
- mbuf_init->nb_segs = 1;
- mbuf_init->port = rxq->port_id;
- /*
- * prevent compiler reordering:
- * rearm_data covers previous fields.
- */
- rte_compiler_barrier();
- rxq->mbuf_initializer =
- *(uint64_t *)&mbuf_init->rearm_data;
- /* Padding with a fake mbuf for vectorized Rx. */
- for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
- (*rxq->elts)[desc + j] = &rxq->fake_mbuf;
- /* Mark that it need to be cleaned up for rxq_alloc_elts(). */
- rxq->trim_elts = 1;
- }
-}