#pragma GCC diagnostic ignored "-Wpedantic"
#endif
#include <infiniband/verbs.h>
-#include <infiniband/mlx5_hw.h>
-#include <infiniband/arch.h>
+#include <infiniband/mlx5dv.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
* Number of packets to be filled.
*/
static inline void
-txq_wr_dseg_v(struct txq *txq, __m128i *dseg,
+txq_wr_dseg_v(struct mlx5_txq_data *txq, __m128i *dseg,
struct rte_mbuf **pkts, unsigned int n)
{
unsigned int pos;
* Number of packets having same ol_flags.
*/
static inline unsigned int
-txq_calc_offload(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
- uint8_t *cs_flags)
+txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n, uint8_t *cs_flags)
{
unsigned int pos;
const uint64_t ol_mask =
* Number of packets successfully transmitted (<= pkts_n).
*/
static uint16_t
-txq_scatter_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n)
{
uint16_t elts_head = txq->elts_head;
const uint16_t elts_n = 1 << txq->elts_n;
if (segs_n == 1 ||
max_elts < segs_n || max_wqe < 2)
break;
+ if (segs_n > MLX5_MPW_DSEG_MAX) {
+ txq->stats.oerrors++;
+ break;
+ }
wqe = &((volatile struct mlx5_wqe64 *)
txq->wqes)[wqe_ci & wq_mask].hdr;
if (buf->ol_flags &
/* Fill ESEG in the header. */
_mm_store_si128(t_wqe + 1,
_mm_set_epi16(0, 0, 0, 0,
- htons(len), cs_flags,
+ rte_cpu_to_be_16(len), cs_flags,
0, 0));
txq->wqe_ci = wqe_ci;
}
txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
txq->elts_head = elts_head;
if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
- wqe->ctrl[2] = htonl(8);
+ wqe->ctrl[2] = rte_cpu_to_be_32(8);
wqe->ctrl[3] = txq->elts_head;
txq->elts_comp = 0;
++txq->cq_pi;
* Number of packets successfully transmitted (<= pkts_n).
*/
static inline uint16_t
-txq_burst_v(struct txq *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
+txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
uint8_t cs_flags)
{
struct rte_mbuf **elts;
max_elts = (elts_n - (elts_head - txq->elts_tail));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
+ assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr);
if (unlikely(!pkts_n))
return 0;
elts = &(*txq->elts)[elts_head & elts_m];
mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
uint16_t pkts_n)
{
- struct txq *txq = (struct txq *)dpdk_txq;
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
uint16_t nb_tx = 0;
while (pkts_n > nb_tx) {
uint16_t
mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
- struct txq *txq = (struct txq *)dpdk_txq;
+ struct mlx5_txq_data *txq = (struct mlx5_txq_data *)dpdk_txq;
uint16_t nb_tx = 0;
while (pkts_n > nb_tx) {
* Number of packets to be stored.
*/
static inline void
-rxq_copy_mbuf_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t n)
+rxq_copy_mbuf_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t n)
{
const uint16_t q_mask = (1 << rxq->elts_n) - 1;
struct rte_mbuf **elts = &(*rxq->elts)[rxq->rq_pi & q_mask];
* Number of buffers to be replenished.
*/
static inline void
-rxq_replenish_bulk_mbuf(struct rxq *rxq, uint16_t n)
+rxq_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
{
const uint16_t q_n = 1 << rxq->elts_n;
const uint16_t q_mask = q_n - 1;
return;
}
for (i = 0; i < n; ++i)
- wq[i].addr = htonll((uintptr_t)elts[i]->buf_addr +
- RTE_PKTMBUF_HEADROOM);
+ wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
+ RTE_PKTMBUF_HEADROOM);
rxq->rq_ci += n;
rte_wmb();
- *rxq->rq_db = htonl(rxq->rq_ci);
+ *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
}
/**
* the title completion descriptor to be copied to the rest of mbufs.
*/
static inline void
-rxq_cq_decompress_v(struct rxq *rxq,
+rxq_cq_decompress_v(struct mlx5_rxq_data *rxq,
volatile struct mlx5_cqe *cq,
struct rte_mbuf **elts)
{
offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) !=
offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
+ /*
+ * Not to overflow elts array. Decompress next time after mbuf
+ * replenishment.
+ */
+ if (unlikely(mcqe_n + MLX5_VPMD_DESCS_PER_LOOP >
+ (uint16_t)(rxq->rq_ci - rxq->cq_ci)))
+ return;
/*
* A. load mCQEs into a 128bit register.
* B. store rearm data to mbuf.
* Pointer to array of packets to be filled.
*/
static inline void
-rxq_cq_to_ptype_oflags_v(struct rxq *rxq, __m128i cqes[4], __m128i op_err,
- struct rte_mbuf **pkts)
+rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
+ __m128i op_err, struct rte_mbuf **pkts)
{
__m128i pinfo0, pinfo1;
__m128i pinfo, ptype;
* Number of packets successfully received (<= pkts_n).
*/
static uint16_t
-rxq_handle_pending_error(struct rxq *rxq, struct rte_mbuf **pkts,
+rxq_handle_pending_error(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
uint16_t pkts_n)
{
uint16_t n = 0;
* Number of packets received including errors (<= pkts_n).
*/
static inline uint16_t
-rxq_burst_v(struct rxq *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
const uint16_t q_n = 1 << rxq->cqe_n;
const uint16_t q_mask = q_n - 1;
}
elts_idx = rxq->rq_pi & q_mask;
elts = &(*rxq->elts)[elts_idx];
- /* Not to overflow pkts array. */
- pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP);
+ pkts_n = RTE_MIN(pkts_n - rcvd_pkt,
+ (uint16_t)(rxq->rq_ci - rxq->cq_ci));
+ /* Not to overflow pkts/elts array. */
+ pkts_n = RTE_ALIGN_FLOOR(pkts_n, MLX5_VPMD_DESCS_PER_LOOP);
/* Not to cross queue end. */
pkts_n = RTE_MIN(pkts_n, q_n - elts_idx);
if (!pkts_n)
}
}
rte_wmb();
- *rxq->cq_db = htonl(rxq->cq_ci);
+ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
return rcvd_pkt;
}
uint16_t
mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
- struct rxq *rxq = dpdk_rxq;
+ struct mlx5_rxq_data *rxq = dpdk_rxq;
uint16_t nb_rx;
nb_rx = rxq_burst_v(rxq, pkts, pkts_n);
/* All the configured queues should support. */
for (i = 0; i < priv->txqs_n; ++i) {
- struct txq *txq = (*priv->txqs)[i];
+ struct mlx5_txq_data *txq = (*priv->txqs)[i];
if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) ||
!(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-rxq_check_vec_support(struct rxq *rxq)
+rxq_check_vec_support(struct mlx5_rxq_data *rxq)
{
- struct rxq_ctrl *ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ struct mlx5_rxq_ctrl *ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
if (!ctrl->priv->rx_vec_en || rxq->sges_n != 0)
return -ENOTSUP;
return -ENOTSUP;
/* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+ if (!rxq)
+ continue;
if (rxq_check_vec_support(rxq) < 0)
break;
}