#include <rte_prefetch.h>
#include <rte_common.h>
#include <rte_branch_prediction.h>
+#include <rte_memory.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-pedantic"
#endif
#include "mlx5.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
+#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
/**
{
unsigned int elts_comp = txq->elts_comp;
unsigned int elts_tail = txq->elts_tail;
+ unsigned int elts_free = txq->elts_tail;
const unsigned int elts_n = txq->elts_n;
int wcs_n;
DEBUG("%p: processing %u work requests completions",
(void *)txq, elts_comp);
#endif
- wcs_n = txq->if_cq->poll_cnt(txq->cq, elts_comp);
+ wcs_n = txq->poll_cnt(txq->cq, elts_comp);
if (unlikely(wcs_n == 0))
return 0;
if (unlikely(wcs_n < 0)) {
elts_tail += wcs_n * txq->elts_comp_cd_init;
if (elts_tail >= elts_n)
elts_tail -= elts_n;
+
+ while (elts_free != elts_tail) {
+ struct txq_elt *elt = &(*txq->elts)[elts_free];
+ unsigned int elts_free_next =
+ (((elts_free + 1) == elts_n) ? 0 : elts_free + 1);
+ struct rte_mbuf *tmp = elt->buf;
+ struct txq_elt *elt_next = &(*txq->elts)[elts_free_next];
+
+ RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
+ /* Faster than rte_pktmbuf_free(). */
+ do {
+ struct rte_mbuf *next = NEXT(tmp);
+
+ rte_pktmbuf_free_seg(tmp);
+ tmp = next;
+ } while (tmp != NULL);
+ elts_free = elts_free_next;
+ }
+
txq->elts_tail = elts_tail;
txq->elts_comp = elts_comp;
return 0;
}
+/* For best performance, this function should not be inlined. */
+struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, const struct rte_mempool *)
+ __attribute__((noinline));
+
+/**
+ * Register mempool as a memory region.
+ *
+ * @param pd
+ * Pointer to protection domain.
+ * @param mp
+ * Pointer to memory pool.
+ *
+ * @return
+ * Memory region pointer, NULL in case of error.
+ */
+struct ibv_mr *
+mlx5_mp2mr(struct ibv_pd *pd, const struct rte_mempool *mp)
+{
+ const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ uintptr_t start = mp->elt_va_start;
+ uintptr_t end = mp->elt_va_end;
+ unsigned int i;
+
+ DEBUG("mempool %p area start=%p end=%p size=%zu",
+ (const void *)mp, (void *)start, (void *)end,
+ (size_t)(end - start));
+ /* Round start and end to page boundary if found in memory segments. */
+ for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
+ uintptr_t addr = (uintptr_t)ms[i].addr;
+ size_t len = ms[i].len;
+ unsigned int align = ms[i].hugepage_sz;
+
+ if ((start > addr) && (start < addr + len))
+ start = RTE_ALIGN_FLOOR(start, align);
+ if ((end > addr) && (end < addr + len))
+ end = RTE_ALIGN_CEIL(end, align);
+ }
+ DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
+ (const void *)mp, (void *)start, (void *)end,
+ (size_t)(end - start));
+ return ibv_reg_mr(pd,
+ (void *)start,
+ end - start,
+ IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
+}
+
+/**
+ * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
+ * the cloned mbuf is allocated is returned instead.
+ *
+ * @param buf
+ * Pointer to mbuf.
+ *
+ * @return
+ * Memory pool where data is located for given mbuf.
+ */
+static struct rte_mempool *
+txq_mb2mp(struct rte_mbuf *buf)
+{
+ if (unlikely(RTE_MBUF_INDIRECT(buf)))
+ return rte_mbuf_from_indirect(buf)->pool;
+ return buf->pool;
+}
+
/**
* Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
* Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
* mr->lkey on success, (uint32_t)-1 on failure.
*/
static uint32_t
-txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
+txq_mp2mr(struct txq *txq, const struct rte_mempool *mp)
{
unsigned int i;
struct ibv_mr *mr;
}
}
/* Add a new entry, register MR first. */
- DEBUG("%p: discovered new memory pool %p", (void *)txq, (void *)mp);
- mr = ibv_reg_mr(txq->priv->pd,
- (void *)mp->elt_va_start,
- (mp->elt_va_end - mp->elt_va_start),
- (IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE));
+ DEBUG("%p: discovered new memory pool \"%s\" (%p)",
+ (void *)txq, mp->name, (const void *)mp);
+ mr = mlx5_mp2mr(txq->priv->pd, mp);
if (unlikely(mr == NULL)) {
DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
(void *)txq);
DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
(void *)txq);
--i;
- claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr));
+ claim_zero(ibv_dereg_mr(txq->mp2mr[0].mr));
memmove(&txq->mp2mr[0], &txq->mp2mr[1],
(sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
}
txq->mp2mr[i].mp = mp;
txq->mp2mr[i].mr = mr;
txq->mp2mr[i].lkey = mr->lkey;
- DEBUG("%p: new MR lkey for MP %p: 0x%08" PRIu32,
- (void *)txq, (void *)mp, txq->mp2mr[i].lkey);
+ DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
+ (void *)txq, mp->name, (const void *)mp, txq->mp2mr[i].lkey);
return txq->mp2mr[i].lkey;
}
+struct txq_mp2mr_mbuf_check_data {
+ const struct rte_mempool *mp;
+ int ret;
+};
+
+/**
+ * Callback function for rte_mempool_obj_iter() to check whether a given
+ * mempool object looks like a mbuf.
+ *
+ * @param[in, out] arg
+ * Context data (struct txq_mp2mr_mbuf_check_data). Contains mempool pointer
+ * and return value.
+ * @param[in] start
+ * Object start address.
+ * @param[in] end
+ * Object end address.
+ * @param index
+ * Unused.
+ *
+ * @return
+ * Nonzero value when object is not a mbuf.
+ */
+static void
+txq_mp2mr_mbuf_check(void *arg, void *start, void *end,
+ uint32_t index __rte_unused)
+{
+ struct txq_mp2mr_mbuf_check_data *data = arg;
+ struct rte_mbuf *buf =
+ (void *)((uintptr_t)start + data->mp->header_size);
+
+ (void)index;
+ /* Check whether mbuf structure fits element size and whether mempool
+ * pointer is valid. */
+ if (((uintptr_t)end >= (uintptr_t)(buf + 1)) &&
+ (buf->pool == data->mp))
+ data->ret = 0;
+ else
+ data->ret = -1;
+}
+
+/**
+ * Iterator function for rte_mempool_walk() to register existing mempools and
+ * fill the MP to MR cache of a TX queue.
+ *
+ * @param[in] mp
+ * Memory Pool to register.
+ * @param *arg
+ * Pointer to TX queue structure.
+ */
+void
+txq_mp2mr_iter(const struct rte_mempool *mp, void *arg)
+{
+ struct txq *txq = arg;
+ struct txq_mp2mr_mbuf_check_data data = {
+ .mp = mp,
+ .ret = -1,
+ };
+
+ /* Discard empty mempools. */
+ if (mp->size == 0)
+ return;
+ /* Register mempool only if the first element looks like a mbuf. */
+ rte_mempool_obj_iter((void *)mp->elt_va_start,
+ 1,
+ mp->header_size + mp->elt_size + mp->trailer_size,
+ 1,
+ mp->elt_pa,
+ mp->pg_num,
+ mp->pg_shift,
+ txq_mp2mr_mbuf_check,
+ &data);
+ if (data.ret)
+ return;
+ txq_mp2mr(txq, mp);
+}
+
#if MLX5_PMD_SGE_WR_N > 1
/**
uint32_t lkey;
/* Retrieve Memory Region key for this memory pool. */
- lkey = txq_mp2mr(txq, buf->pool);
+ lkey = txq_mp2mr(txq, txq_mb2mp(buf));
if (unlikely(lkey == (uint32_t)-1)) {
/* MR does not exist. */
DEBUG("%p: unable to get MP <-> MR association",
sge->length = size;
sge->lkey = txq->mr_linear->lkey;
sent_size += size;
+ /* Include last segment. */
+ segs++;
}
return (struct tx_burst_sg_ret){
.length = sent_size,
{
struct txq *txq = (struct txq *)dpdk_txq;
unsigned int elts_head = txq->elts_head;
- const unsigned int elts_tail = txq->elts_tail;
const unsigned int elts_n = txq->elts_n;
unsigned int elts_comp_cd = txq->elts_comp_cd;
unsigned int elts_comp = 0;
unsigned int i;
unsigned int max;
int err;
+ struct rte_mbuf *buf = pkts[0];
assert(elts_comp_cd != 0);
+ /* Prefetch first packet cacheline. */
+ rte_prefetch0(buf);
txq_complete(txq);
- max = (elts_n - (elts_head - elts_tail));
+ max = (elts_n - (elts_head - txq->elts_tail));
if (max > elts_n)
max -= elts_n;
assert(max >= 1);
if (max > pkts_n)
max = pkts_n;
for (i = 0; (i != max); ++i) {
- struct rte_mbuf *buf = pkts[i];
+ struct rte_mbuf *buf_next = pkts[i + 1];
unsigned int elts_head_next =
(((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
- struct txq_elt *elt_next = &(*txq->elts)[elts_head_next];
struct txq_elt *elt = &(*txq->elts)[elts_head];
unsigned int segs = NB_SEGS(buf);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ unsigned int sent_size = 0;
+#endif
uint32_t send_flags = 0;
- /* Clean up old buffer. */
- if (likely(elt->buf != NULL)) {
- struct rte_mbuf *tmp = elt->buf;
-
- /* Faster than rte_pktmbuf_free(). */
- do {
- struct rte_mbuf *next = NEXT(tmp);
-
- rte_pktmbuf_free_seg(tmp);
- tmp = next;
- } while (tmp != NULL);
- }
+ if (i + 1 < max)
+ rte_prefetch0(buf_next);
/* Request TX completion. */
if (unlikely(--elts_comp_cd == 0)) {
elts_comp_cd = txq->elts_comp_cd_init;
++elts_comp;
send_flags |= IBV_EXP_QP_BURST_SIGNALED;
}
+ /* Should we enable HW CKSUM offload */
+ if (buf->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+ send_flags |= IBV_EXP_QP_BURST_IP_CSUM;
+ /* HW does not support checksum offloads at arbitrary
+ * offsets but automatically recognizes the packet
+ * type. For inner L3/L4 checksums, only VXLAN (UDP)
+ * tunnels are currently supported. */
+ if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type))
+ send_flags |= IBV_EXP_QP_BURST_TUNNEL;
+ }
if (likely(segs == 1)) {
uintptr_t addr;
uint32_t length;
uint32_t lkey;
+ uintptr_t buf_next_addr;
/* Retrieve buffer information. */
addr = rte_pktmbuf_mtod(buf, uintptr_t);
length = DATA_LEN(buf);
- /* Retrieve Memory Region key for this memory pool. */
- lkey = txq_mp2mr(txq, buf->pool);
- if (unlikely(lkey == (uint32_t)-1)) {
- /* MR does not exist. */
- DEBUG("%p: unable to get MP <-> MR"
- " association", (void *)txq);
- /* Clean up TX element. */
- elt->buf = NULL;
- goto stop;
- }
/* Update element. */
elt->buf = buf;
if (txq->priv->vf)
rte_prefetch0((volatile void *)
(uintptr_t)addr);
- RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
+ /* Prefetch next buffer data. */
+ if (i + 1 < max) {
+ buf_next_addr =
+ rte_pktmbuf_mtod(buf_next, uintptr_t);
+ rte_prefetch0((volatile void *)
+ (uintptr_t)buf_next_addr);
+ }
/* Put packet into send queue. */
#if MLX5_PMD_MAX_INLINE > 0
if (length <= txq->max_inline)
- err = txq->if_qp->send_pending_inline
+ err = txq->send_pending_inline
(txq->qp,
(void *)addr,
length,
send_flags);
else
#endif
- err = txq->if_qp->send_pending
+ {
+ /* Retrieve Memory Region key for this
+ * memory pool. */
+ lkey = txq_mp2mr(txq, txq_mb2mp(buf));
+ if (unlikely(lkey == (uint32_t)-1)) {
+ /* MR does not exist. */
+ DEBUG("%p: unable to get MP <-> MR"
+ " association", (void *)txq);
+ /* Clean up TX element. */
+ elt->buf = NULL;
+ goto stop;
+ }
+ err = txq->send_pending
(txq->qp,
addr,
length,
lkey,
send_flags);
+ }
if (unlikely(err))
goto stop;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ sent_size += length;
+#endif
} else {
#if MLX5_PMD_SGE_WR_N > 1
struct ibv_sge sges[MLX5_PMD_SGE_WR_N];
&sges);
if (ret.length == (unsigned int)-1)
goto stop;
- RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
/* Put SG list into send queue. */
- err = txq->if_qp->send_pending_sg_list
+ err = txq->send_pending_sg_list
(txq->qp,
sges,
ret.num,
send_flags);
if (unlikely(err))
goto stop;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ sent_size += ret.length;
+#endif
#else /* MLX5_PMD_SGE_WR_N > 1 */
DEBUG("%p: TX scattered buffers support not"
" compiled in", (void *)txq);
#endif /* MLX5_PMD_SGE_WR_N > 1 */
}
elts_head = elts_head_next;
+ buf = buf_next;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent bytes counter. */
+ txq->stats.obytes += sent_size;
+#endif
}
stop:
/* Take a shortcut if nothing must be sent. */
if (unlikely(i == 0))
return 0;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent packets counter. */
+ txq->stats.opackets += i;
+#endif
/* Ring QP doorbell. */
- err = txq->if_qp->send_flush(txq->qp);
+ err = txq->send_flush(txq->qp);
if (unlikely(err)) {
/* A nonzero value is not supposed to be returned.
* Nothing can be done about it. */
return i;
}
+/**
+ * Translate RX completion flags to packet type.
+ *
+ * @param flags
+ * RX completion flags returned by poll_length_flags().
+ *
+ * @return
+ * Packet type for struct rte_mbuf.
+ */
+static inline uint32_t
+rxq_cq_to_pkt_type(uint32_t flags)
+{
+ uint32_t pkt_type;
+
+ if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET)
+ pkt_type =
+ TRANSPOSE(flags,
+ IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
+ RTE_PTYPE_L3_IPV4) |
+ TRANSPOSE(flags,
+ IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
+ RTE_PTYPE_L3_IPV6) |
+ TRANSPOSE(flags,
+ IBV_EXP_CQ_RX_IPV4_PACKET,
+ RTE_PTYPE_INNER_L3_IPV4) |
+ TRANSPOSE(flags,
+ IBV_EXP_CQ_RX_IPV6_PACKET,
+ RTE_PTYPE_INNER_L3_IPV6);
+ else
+ pkt_type =
+ TRANSPOSE(flags,
+ IBV_EXP_CQ_RX_IPV4_PACKET,
+ RTE_PTYPE_L3_IPV4) |
+ TRANSPOSE(flags,
+ IBV_EXP_CQ_RX_IPV6_PACKET,
+ RTE_PTYPE_L3_IPV6);
+ return pkt_type;
+}
+
+/**
+ * Translate RX completion flags to offload flags.
+ *
+ * @param[in] rxq
+ * Pointer to RX queue structure.
+ * @param flags
+ * RX completion flags returned by poll_length_flags().
+ *
+ * @return
+ * Offload flags (ol_flags) for struct rte_mbuf.
+ */
+static inline uint32_t
+rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
+{
+ uint32_t ol_flags = 0;
+
+ if (rxq->csum) {
+ /* Set IP checksum flag only for IPv4/IPv6 packets. */
+ if (flags &
+ (IBV_EXP_CQ_RX_IPV4_PACKET | IBV_EXP_CQ_RX_IPV6_PACKET))
+ ol_flags |=
+ TRANSPOSE(~flags,
+ IBV_EXP_CQ_RX_IP_CSUM_OK,
+ PKT_RX_IP_CKSUM_BAD);
+#ifdef HAVE_EXP_CQ_RX_TCP_PACKET
+ /* Set L4 checksum flag only for TCP/UDP packets. */
+ if (flags &
+ (IBV_EXP_CQ_RX_TCP_PACKET | IBV_EXP_CQ_RX_UDP_PACKET))
+#endif /* HAVE_EXP_CQ_RX_TCP_PACKET */
+ ol_flags |=
+ TRANSPOSE(~flags,
+ IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK,
+ PKT_RX_L4_CKSUM_BAD);
+ }
+ /*
+ * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
+ * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
+ * (its value is 0).
+ */
+ if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
+ ol_flags |=
+ TRANSPOSE(~flags,
+ IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
+ PKT_RX_IP_CKSUM_BAD) |
+ TRANSPOSE(~flags,
+ IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
+ PKT_RX_L4_CKSUM_BAD);
+ return ol_flags;
+}
+
/**
* DPDK callback for RX with scattered packets support.
*
struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp;
const unsigned int elts_n = rxq->elts_n;
unsigned int elts_head = rxq->elts_head;
- struct ibv_recv_wr head;
- struct ibv_recv_wr **next = &head.next;
- struct ibv_recv_wr *bad_wr;
unsigned int i;
unsigned int pkts_ret = 0;
int ret;
return 0;
for (i = 0; (i != pkts_n); ++i) {
struct rxq_elt_sp *elt = &(*elts)[elts_head];
- struct ibv_recv_wr *wr = &elt->wr;
- uint64_t wr_id = wr->wr_id;
unsigned int len;
unsigned int pkt_buf_len;
struct rte_mbuf *pkt_buf = NULL; /* Buffer returned in pkts. */
unsigned int seg_headroom = RTE_PKTMBUF_HEADROOM;
unsigned int j = 0;
uint32_t flags;
+ uint16_t vlan_tci;
/* Sanity checks. */
-#ifdef NDEBUG
- (void)wr_id;
-#endif
- assert(wr_id < rxq->elts_n);
- assert(wr->sg_list == elt->sges);
- assert(wr->num_sge == RTE_DIM(elt->sges));
assert(elts_head < rxq->elts_n);
assert(rxq->elts_head < rxq->elts_n);
- ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
- &flags);
+ ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci);
if (unlikely(ret < 0)) {
struct ibv_wc wc;
int wcs_n;
" completion status (%d): %s",
(void *)rxq, wc.wr_id, wc.status,
ibv_wc_status_str(wc.status));
- /* Link completed WRs together for repost. */
- *next = wr;
- next = &wr->next;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment dropped packets counter. */
+ ++rxq->stats.idropped;
+#endif
goto repost;
}
ret = wc.byte_len;
break;
len = ret;
pkt_buf_len = len;
- /* Link completed WRs together for repost. */
- *next = wr;
- next = &wr->next;
/*
* Replace spent segments with new ones, concatenate and
* return them as pkt_buf.
struct rte_mbuf *rep;
unsigned int seg_tailroom;
+ assert(seg != NULL);
/*
* Fetch initial bytes of packet descriptor into a
* cacheline while allocating rep.
* Unable to allocate a replacement mbuf,
* repost WR.
*/
- DEBUG("rxq=%p, wr_id=%" PRIu64 ":"
- " can't allocate a new mbuf",
- (void *)rxq, wr_id);
+ DEBUG("rxq=%p: can't allocate a new mbuf",
+ (void *)rxq);
if (pkt_buf != NULL) {
*pkt_buf_next = NULL;
rte_pktmbuf_free(pkt_buf);
}
/* Increment out of memory counters. */
+ ++rxq->stats.rx_nombuf;
++rxq->priv->dev->data->rx_mbuf_alloc_failed;
goto repost;
}
NB_SEGS(pkt_buf) = j;
PORT(pkt_buf) = rxq->port_id;
PKT_LEN(pkt_buf) = pkt_buf_len;
+ if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) {
+ pkt_buf->packet_type = rxq_cq_to_pkt_type(flags);
+ pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) {
+ pkt_buf->ol_flags |= PKT_RX_VLAN_PKT;
+ pkt_buf->vlan_tci = vlan_tci;
+ }
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
+ }
/* Return packet. */
*(pkts++) = pkt_buf;
++pkts_ret;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment bytes counter. */
+ rxq->stats.ibytes += pkt_buf_len;
+#endif
repost:
+ ret = rxq->recv(rxq->wq, elt->sges, RTE_DIM(elt->sges));
+ if (unlikely(ret)) {
+ /* Inability to repost WRs is fatal. */
+ DEBUG("%p: recv_sg_list(): failed (ret=%d)",
+ (void *)rxq->priv,
+ ret);
+ abort();
+ }
if (++elts_head >= elts_n)
elts_head = 0;
continue;
}
if (unlikely(i == 0))
return 0;
- *next = NULL;
- /* Repost WRs. */
-#ifdef DEBUG_RECV
- DEBUG("%p: reposting %d WRs", (void *)rxq, i);
-#endif
- ret = ibv_post_recv(rxq->qp, head.next, &bad_wr);
- if (unlikely(ret)) {
- /* Inability to repost WRs is fatal. */
- DEBUG("%p: ibv_post_recv(): failed for WR %p: %s",
- (void *)rxq->priv,
- (void *)bad_wr,
- strerror(ret));
- abort();
- }
rxq->elts_head = elts_head;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment packets counter. */
+ rxq->stats.ipackets += pkts_ret;
+#endif
return pkts_ret;
}
return mlx5_rx_burst_sp(dpdk_rxq, pkts, pkts_n);
for (i = 0; (i != pkts_n); ++i) {
struct rxq_elt *elt = &(*elts)[elts_head];
- struct ibv_recv_wr *wr = &elt->wr;
- uint64_t wr_id = wr->wr_id;
unsigned int len;
- struct rte_mbuf *seg = (void *)((uintptr_t)elt->sge.addr -
- WR_ID(wr_id).offset);
+ struct rte_mbuf *seg = elt->buf;
struct rte_mbuf *rep;
uint32_t flags;
+ uint16_t vlan_tci;
/* Sanity checks. */
- assert(WR_ID(wr_id).id < rxq->elts_n);
- assert(wr->sg_list == &elt->sge);
- assert(wr->num_sge == 1);
+ assert(seg != NULL);
assert(elts_head < rxq->elts_n);
assert(rxq->elts_head < rxq->elts_n);
/*
*/
rte_prefetch0(seg);
rte_prefetch0(&seg->cacheline1);
- ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
- &flags);
+ ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci);
if (unlikely(ret < 0)) {
struct ibv_wc wc;
int wcs_n;
" completion status (%d): %s",
(void *)rxq, wc.wr_id, wc.status,
ibv_wc_status_str(wc.status));
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment dropped packets counter. */
+ ++rxq->stats.idropped;
+#endif
/* Add SGE to array for repost. */
sges[i] = elt->sge;
goto repost;
* Unable to allocate a replacement mbuf,
* repost WR.
*/
- DEBUG("rxq=%p, wr_id=%" PRIu32 ":"
- " can't allocate a new mbuf",
- (void *)rxq, WR_ID(wr_id).id);
+ DEBUG("rxq=%p: can't allocate a new mbuf",
+ (void *)rxq);
/* Increment out of memory counters. */
+ ++rxq->stats.rx_nombuf;
++rxq->priv->dev->data->rx_mbuf_alloc_failed;
goto repost;
}
/* Reconfigure sge to use rep instead of seg. */
elt->sge.addr = (uintptr_t)rep->buf_addr + RTE_PKTMBUF_HEADROOM;
assert(elt->sge.lkey == rxq->mr->lkey);
- WR_ID(wr->wr_id).offset =
- (((uintptr_t)rep->buf_addr + RTE_PKTMBUF_HEADROOM) -
- (uintptr_t)rep);
- assert(WR_ID(wr->wr_id).id == WR_ID(wr_id).id);
+ elt->buf = rep;
/* Add SGE to array for repost. */
sges[i] = elt->sge;
NEXT(seg) = NULL;
PKT_LEN(seg) = len;
DATA_LEN(seg) = len;
-
+ if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) {
+ seg->packet_type = rxq_cq_to_pkt_type(flags);
+ seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) {
+ seg->ol_flags |= PKT_RX_VLAN_PKT;
+ seg->vlan_tci = vlan_tci;
+ }
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
+ }
/* Return packet. */
*(pkts++) = seg;
++pkts_ret;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment bytes counter. */
+ rxq->stats.ibytes += len;
+#endif
repost:
if (++elts_head >= elts_n)
elts_head = 0;
#ifdef DEBUG_RECV
DEBUG("%p: reposting %u WRs", (void *)rxq, i);
#endif
- ret = rxq->if_qp->recv_burst(rxq->qp, sges, i);
+ ret = rxq->recv(rxq->wq, sges, i);
if (unlikely(ret)) {
/* Inability to repost WRs is fatal. */
DEBUG("%p: recv_burst(): failed (ret=%d)",
abort();
}
rxq->elts_head = elts_head;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment packets counter. */
+ rxq->stats.ipackets += pkts_ret;
+#endif
return pkts_ret;
}