4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
42 #pragma GCC diagnostic ignored "-pedantic"
44 #include <infiniband/verbs.h>
46 #pragma GCC diagnostic error "-pedantic"
49 /* DPDK headers don't like -pedantic. */
51 #pragma GCC diagnostic ignored "-pedantic"
54 #include <rte_mempool.h>
55 #include <rte_prefetch.h>
56 #include <rte_common.h>
57 #include <rte_branch_prediction.h>
59 #pragma GCC diagnostic error "-pedantic"
63 #include "mlx5_utils.h"
64 #include "mlx5_rxtx.h"
65 #include "mlx5_autoconf.h"
66 #include "mlx5_defs.h"
69 * Manage TX completions.
71 * When sending a burst, mlx5_tx_burst() posts several WRs.
72 * To improve performance, a completion event is only required once every
73 * MLX5_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
74 * for other WRs, but this information would not be used anyway.
77 * Pointer to TX queue structure.
80 * 0 on success, -1 on failure.
83 txq_complete(struct txq *txq)
85 unsigned int elts_comp = txq->elts_comp;
86 unsigned int elts_tail = txq->elts_tail;
87 const unsigned int elts_n = txq->elts_n;
90 if (unlikely(elts_comp == 0))
93 DEBUG("%p: processing %u work requests completions",
94 (void *)txq, elts_comp);
96 wcs_n = txq->poll_cnt(txq->cq, elts_comp);
97 if (unlikely(wcs_n == 0))
99 if (unlikely(wcs_n < 0)) {
100 DEBUG("%p: ibv_poll_cq() failed (wcs_n=%d)",
105 assert(elts_comp <= txq->elts_comp);
107 * Assume WC status is successful as nothing can be done about it
110 elts_tail += wcs_n * txq->elts_comp_cd_init;
111 if (elts_tail >= elts_n)
113 txq->elts_tail = elts_tail;
114 txq->elts_comp = elts_comp;
119 * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
120 * the cloned mbuf is allocated is returned instead.
126 * Memory pool where data is located for given mbuf.
128 static struct rte_mempool *
129 txq_mb2mp(struct rte_mbuf *buf)
131 if (unlikely(RTE_MBUF_INDIRECT(buf)))
132 return rte_mbuf_from_indirect(buf)->pool;
137 * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
138 * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
139 * remove an entry first.
142 * Pointer to TX queue structure.
144 * Memory Pool for which a Memory Region lkey must be returned.
147 * mr->lkey on success, (uint32_t)-1 on failure.
150 txq_mp2mr(struct txq *txq, const struct rte_mempool *mp)
155 for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
156 if (unlikely(txq->mp2mr[i].mp == NULL)) {
157 /* Unknown MP, add a new MR for it. */
160 if (txq->mp2mr[i].mp == mp) {
161 assert(txq->mp2mr[i].lkey != (uint32_t)-1);
162 assert(txq->mp2mr[i].mr->lkey == txq->mp2mr[i].lkey);
163 return txq->mp2mr[i].lkey;
166 /* Add a new entry, register MR first. */
167 DEBUG("%p: discovered new memory pool \"%s\" (%p)",
168 (void *)txq, mp->name, (const void *)mp);
169 mr = ibv_reg_mr(txq->priv->pd,
170 (void *)mp->elt_va_start,
171 (mp->elt_va_end - mp->elt_va_start),
172 (IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE));
173 if (unlikely(mr == NULL)) {
174 DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
178 if (unlikely(i == RTE_DIM(txq->mp2mr))) {
179 /* Table is full, remove oldest entry. */
180 DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
183 claim_zero(ibv_dereg_mr(txq->mp2mr[0].mr));
184 memmove(&txq->mp2mr[0], &txq->mp2mr[1],
185 (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
187 /* Store the new entry. */
188 txq->mp2mr[i].mp = mp;
189 txq->mp2mr[i].mr = mr;
190 txq->mp2mr[i].lkey = mr->lkey;
191 DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
192 (void *)txq, mp->name, (const void *)mp, txq->mp2mr[i].lkey);
193 return txq->mp2mr[i].lkey;
196 struct txq_mp2mr_mbuf_check_data {
197 const struct rte_mempool *mp;
202 * Callback function for rte_mempool_obj_iter() to check whether a given
203 * mempool object looks like a mbuf.
205 * @param[in, out] arg
206 * Context data (struct txq_mp2mr_mbuf_check_data). Contains mempool pointer
209 * Object start address.
211 * Object end address.
216 * Nonzero value when object is not a mbuf.
219 txq_mp2mr_mbuf_check(void *arg, void *start, void *end,
220 uint32_t index __rte_unused)
222 struct txq_mp2mr_mbuf_check_data *data = arg;
223 struct rte_mbuf *buf =
224 (void *)((uintptr_t)start + data->mp->header_size);
227 /* Check whether mbuf structure fits element size and whether mempool
228 * pointer is valid. */
229 if (((uintptr_t)end >= (uintptr_t)(buf + 1)) &&
230 (buf->pool == data->mp))
237 * Iterator function for rte_mempool_walk() to register existing mempools and
238 * fill the MP to MR cache of a TX queue.
241 * Memory Pool to register.
243 * Pointer to TX queue structure.
246 txq_mp2mr_iter(const struct rte_mempool *mp, void *arg)
248 struct txq *txq = arg;
249 struct txq_mp2mr_mbuf_check_data data = {
254 /* Discard empty mempools. */
257 /* Register mempool only if the first element looks like a mbuf. */
258 rte_mempool_obj_iter((void *)mp->elt_va_start,
260 mp->header_size + mp->elt_size + mp->trailer_size,
265 txq_mp2mr_mbuf_check,
272 #if MLX5_PMD_SGE_WR_N > 1
275 * Copy scattered mbuf contents to a single linear buffer.
278 * Linear output buffer.
280 * Scattered input buffer.
283 * Number of bytes copied to the output buffer or 0 if not large enough.
286 linearize_mbuf(linear_t *linear, struct rte_mbuf *buf)
288 unsigned int size = 0;
292 unsigned int len = DATA_LEN(buf);
296 if (unlikely(size > sizeof(*linear)))
298 memcpy(&(*linear)[offset],
299 rte_pktmbuf_mtod(buf, uint8_t *),
302 } while (buf != NULL);
307 * Handle scattered buffers for mlx5_tx_burst().
310 * TX queue structure.
312 * Number of segments in buf.
314 * TX queue element to fill.
318 * Index of the linear buffer to use if necessary (normally txq->elts_head).
320 * Array filled with SGEs on success.
323 * A structure containing the processed packet size in bytes and the
324 * number of SGEs. Both fields are set to (unsigned int)-1 in case of
327 static struct tx_burst_sg_ret {
331 tx_burst_sg(struct txq *txq, unsigned int segs, struct txq_elt *elt,
332 struct rte_mbuf *buf, unsigned int elts_head,
333 struct ibv_sge (*sges)[MLX5_PMD_SGE_WR_N])
335 unsigned int sent_size = 0;
339 /* When there are too many segments, extra segments are
340 * linearized in the last SGE. */
341 if (unlikely(segs > RTE_DIM(*sges))) {
342 segs = (RTE_DIM(*sges) - 1);
345 /* Update element. */
347 /* Register segments as SGEs. */
348 for (j = 0; (j != segs); ++j) {
349 struct ibv_sge *sge = &(*sges)[j];
352 /* Retrieve Memory Region key for this memory pool. */
353 lkey = txq_mp2mr(txq, txq_mb2mp(buf));
354 if (unlikely(lkey == (uint32_t)-1)) {
355 /* MR does not exist. */
356 DEBUG("%p: unable to get MP <-> MR association",
358 /* Clean up TX element. */
363 sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
365 rte_prefetch0((volatile void *)
366 (uintptr_t)sge->addr);
367 sge->length = DATA_LEN(buf);
369 sent_size += sge->length;
372 /* If buf is not NULL here and is not going to be linearized,
373 * nb_segs is not valid. */
375 assert((buf == NULL) || (linearize));
376 /* Linearize extra segments. */
378 struct ibv_sge *sge = &(*sges)[segs];
379 linear_t *linear = &(*txq->elts_linear)[elts_head];
380 unsigned int size = linearize_mbuf(linear, buf);
382 assert(segs == (RTE_DIM(*sges) - 1));
384 /* Invalid packet. */
385 DEBUG("%p: packet too large to be linearized.",
387 /* Clean up TX element. */
391 /* If MLX5_PMD_SGE_WR_N is 1, free mbuf immediately. */
392 if (RTE_DIM(*sges) == 1) {
394 struct rte_mbuf *next = NEXT(buf);
396 rte_pktmbuf_free_seg(buf);
398 } while (buf != NULL);
402 sge->addr = (uintptr_t)&(*linear)[0];
404 sge->lkey = txq->mr_linear->lkey;
406 /* Include last segment. */
409 return (struct tx_burst_sg_ret){
414 return (struct tx_burst_sg_ret){
420 #endif /* MLX5_PMD_SGE_WR_N > 1 */
423 * DPDK callback for TX.
426 * Generic pointer to TX queue structure.
428 * Packets to transmit.
430 * Number of packets in array.
433 * Number of packets successfully transmitted (<= pkts_n).
436 mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
438 struct txq *txq = (struct txq *)dpdk_txq;
439 unsigned int elts_head = txq->elts_head;
440 const unsigned int elts_n = txq->elts_n;
441 unsigned int elts_comp_cd = txq->elts_comp_cd;
442 unsigned int elts_comp = 0;
446 struct rte_mbuf *buf = pkts[0];
448 assert(elts_comp_cd != 0);
449 /* Prefetch first packet cacheline. */
452 max = (elts_n - (elts_head - txq->elts_tail));
456 assert(max <= elts_n);
457 /* Always leave one free entry in the ring. */
463 for (i = 0; (i != max); ++i) {
464 struct rte_mbuf *buf_next = pkts[i + 1];
465 unsigned int elts_head_next =
466 (((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
467 struct txq_elt *elt_next = &(*txq->elts)[elts_head_next];
468 struct txq_elt *elt = &(*txq->elts)[elts_head];
469 unsigned int segs = NB_SEGS(buf);
470 #ifdef MLX5_PMD_SOFT_COUNTERS
471 unsigned int sent_size = 0;
473 uint32_t send_flags = 0;
475 /* Clean up old buffer. */
476 if (likely(elt->buf != NULL)) {
477 struct rte_mbuf *tmp = elt->buf;
479 /* Faster than rte_pktmbuf_free(). */
481 struct rte_mbuf *next = NEXT(tmp);
483 rte_pktmbuf_free_seg(tmp);
485 } while (tmp != NULL);
488 rte_prefetch0(buf_next);
489 /* Request TX completion. */
490 if (unlikely(--elts_comp_cd == 0)) {
491 elts_comp_cd = txq->elts_comp_cd_init;
493 send_flags |= IBV_EXP_QP_BURST_SIGNALED;
495 /* Should we enable HW CKSUM offload */
497 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
498 send_flags |= IBV_EXP_QP_BURST_IP_CSUM;
499 /* HW does not support checksum offloads at arbitrary
500 * offsets but automatically recognizes the packet
501 * type. For inner L3/L4 checksums, only VXLAN (UDP)
502 * tunnels are currently supported. */
503 if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type))
504 send_flags |= IBV_EXP_QP_BURST_TUNNEL;
506 if (likely(segs == 1)) {
510 uintptr_t buf_next_addr;
512 /* Retrieve buffer information. */
513 addr = rte_pktmbuf_mtod(buf, uintptr_t);
514 length = DATA_LEN(buf);
515 /* Update element. */
518 rte_prefetch0((volatile void *)
520 RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
521 /* Prefetch next buffer data. */
524 rte_pktmbuf_mtod(buf_next, uintptr_t);
525 rte_prefetch0((volatile void *)
526 (uintptr_t)buf_next_addr);
528 /* Put packet into send queue. */
529 #if MLX5_PMD_MAX_INLINE > 0
530 if (length <= txq->max_inline)
531 err = txq->send_pending_inline
539 /* Retrieve Memory Region key for this
541 lkey = txq_mp2mr(txq, txq_mb2mp(buf));
542 if (unlikely(lkey == (uint32_t)-1)) {
543 /* MR does not exist. */
544 DEBUG("%p: unable to get MP <-> MR"
545 " association", (void *)txq);
546 /* Clean up TX element. */
550 err = txq->send_pending
559 #ifdef MLX5_PMD_SOFT_COUNTERS
563 #if MLX5_PMD_SGE_WR_N > 1
564 struct ibv_sge sges[MLX5_PMD_SGE_WR_N];
565 struct tx_burst_sg_ret ret;
567 ret = tx_burst_sg(txq, segs, elt, buf, elts_head,
569 if (ret.length == (unsigned int)-1)
571 RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
572 /* Put SG list into send queue. */
573 err = txq->send_pending_sg_list
580 #ifdef MLX5_PMD_SOFT_COUNTERS
581 sent_size += ret.length;
583 #else /* MLX5_PMD_SGE_WR_N > 1 */
584 DEBUG("%p: TX scattered buffers support not"
585 " compiled in", (void *)txq);
587 #endif /* MLX5_PMD_SGE_WR_N > 1 */
589 elts_head = elts_head_next;
591 #ifdef MLX5_PMD_SOFT_COUNTERS
592 /* Increment sent bytes counter. */
593 txq->stats.obytes += sent_size;
597 /* Take a shortcut if nothing must be sent. */
598 if (unlikely(i == 0))
600 #ifdef MLX5_PMD_SOFT_COUNTERS
601 /* Increment sent packets counter. */
602 txq->stats.opackets += i;
604 /* Ring QP doorbell. */
605 err = txq->send_flush(txq->qp);
607 /* A nonzero value is not supposed to be returned.
608 * Nothing can be done about it. */
609 DEBUG("%p: send_flush() failed with error %d",
612 txq->elts_head = elts_head;
613 txq->elts_comp += elts_comp;
614 txq->elts_comp_cd = elts_comp_cd;
619 * Translate RX completion flags to packet type.
622 * RX completion flags returned by poll_length_flags().
625 * Packet type for struct rte_mbuf.
627 static inline uint32_t
628 rxq_cq_to_pkt_type(uint32_t flags)
632 if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET)
635 IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
638 IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
641 IBV_EXP_CQ_RX_IPV4_PACKET,
642 RTE_PTYPE_INNER_L3_IPV4) |
644 IBV_EXP_CQ_RX_IPV6_PACKET,
645 RTE_PTYPE_INNER_L3_IPV6);
649 IBV_EXP_CQ_RX_IPV4_PACKET,
652 IBV_EXP_CQ_RX_IPV6_PACKET,
658 * Translate RX completion flags to offload flags.
661 * Pointer to RX queue structure.
663 * RX completion flags returned by poll_length_flags().
666 * Offload flags (ol_flags) for struct rte_mbuf.
668 static inline uint32_t
669 rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
671 uint32_t ol_flags = 0;
676 IBV_EXP_CQ_RX_IP_CSUM_OK,
677 PKT_RX_IP_CKSUM_BAD) |
679 IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK,
680 PKT_RX_L4_CKSUM_BAD);
682 * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
683 * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
686 if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
689 IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
690 PKT_RX_IP_CKSUM_BAD) |
692 IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
693 PKT_RX_L4_CKSUM_BAD);
698 * DPDK callback for RX with scattered packets support.
701 * Generic pointer to RX queue structure.
703 * Array to store received packets.
705 * Maximum number of packets in array.
708 * Number of packets successfully received (<= pkts_n).
711 mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
713 struct rxq *rxq = (struct rxq *)dpdk_rxq;
714 struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp;
715 const unsigned int elts_n = rxq->elts_n;
716 unsigned int elts_head = rxq->elts_head;
718 unsigned int pkts_ret = 0;
721 if (unlikely(!rxq->sp))
722 return mlx5_rx_burst(dpdk_rxq, pkts, pkts_n);
723 if (unlikely(elts == NULL)) /* See RTE_DEV_CMD_SET_MTU. */
725 for (i = 0; (i != pkts_n); ++i) {
726 struct rxq_elt_sp *elt = &(*elts)[elts_head];
728 unsigned int pkt_buf_len;
729 struct rte_mbuf *pkt_buf = NULL; /* Buffer returned in pkts. */
730 struct rte_mbuf **pkt_buf_next = &pkt_buf;
731 unsigned int seg_headroom = RTE_PKTMBUF_HEADROOM;
737 assert(elts_head < rxq->elts_n);
738 assert(rxq->elts_head < rxq->elts_n);
739 ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci);
740 if (unlikely(ret < 0)) {
744 DEBUG("rxq=%p, poll_length() failed (ret=%d)",
746 /* ibv_poll_cq() must be used in case of failure. */
747 wcs_n = ibv_poll_cq(rxq->cq, 1, &wc);
748 if (unlikely(wcs_n == 0))
750 if (unlikely(wcs_n < 0)) {
751 DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)",
756 if (unlikely(wc.status != IBV_WC_SUCCESS)) {
757 /* Whatever, just repost the offending WR. */
758 DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work"
759 " completion status (%d): %s",
760 (void *)rxq, wc.wr_id, wc.status,
761 ibv_wc_status_str(wc.status));
762 #ifdef MLX5_PMD_SOFT_COUNTERS
763 /* Increment dropped packets counter. */
764 ++rxq->stats.idropped;
775 * Replace spent segments with new ones, concatenate and
776 * return them as pkt_buf.
779 struct ibv_sge *sge = &elt->sges[j];
780 struct rte_mbuf *seg = elt->bufs[j];
781 struct rte_mbuf *rep;
782 unsigned int seg_tailroom;
786 * Fetch initial bytes of packet descriptor into a
787 * cacheline while allocating rep.
790 rep = __rte_mbuf_raw_alloc(rxq->mp);
791 if (unlikely(rep == NULL)) {
793 * Unable to allocate a replacement mbuf,
796 DEBUG("rxq=%p: can't allocate a new mbuf",
798 if (pkt_buf != NULL) {
799 *pkt_buf_next = NULL;
800 rte_pktmbuf_free(pkt_buf);
802 /* Increment out of memory counters. */
803 ++rxq->stats.rx_nombuf;
804 ++rxq->priv->dev->data->rx_mbuf_alloc_failed;
808 /* Poison user-modifiable fields in rep. */
809 NEXT(rep) = (void *)((uintptr_t)-1);
810 SET_DATA_OFF(rep, 0xdead);
811 DATA_LEN(rep) = 0xd00d;
812 PKT_LEN(rep) = 0xdeadd00d;
817 assert(rep->buf_len == seg->buf_len);
818 assert(rep->buf_len == rxq->mb_len);
819 /* Reconfigure sge to use rep instead of seg. */
820 assert(sge->lkey == rxq->mr->lkey);
821 sge->addr = ((uintptr_t)rep->buf_addr + seg_headroom);
824 /* Update pkt_buf if it's the first segment, or link
825 * seg to the previous one and update pkt_buf_next. */
827 pkt_buf_next = &NEXT(seg);
828 /* Update seg information. */
829 seg_tailroom = (seg->buf_len - seg_headroom);
830 assert(sge->length == seg_tailroom);
831 SET_DATA_OFF(seg, seg_headroom);
832 if (likely(len <= seg_tailroom)) {
837 assert(rte_pktmbuf_headroom(seg) ==
839 assert(rte_pktmbuf_tailroom(seg) ==
840 (seg_tailroom - len));
843 DATA_LEN(seg) = seg_tailroom;
844 PKT_LEN(seg) = seg_tailroom;
846 assert(rte_pktmbuf_headroom(seg) == seg_headroom);
847 assert(rte_pktmbuf_tailroom(seg) == 0);
848 /* Fix len and clear headroom for next segments. */
852 /* Update head and tail segments. */
853 *pkt_buf_next = NULL;
854 assert(pkt_buf != NULL);
856 NB_SEGS(pkt_buf) = j;
857 PORT(pkt_buf) = rxq->port_id;
858 PKT_LEN(pkt_buf) = pkt_buf_len;
859 if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) {
860 pkt_buf->packet_type = rxq_cq_to_pkt_type(flags);
861 pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
862 #ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
863 if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) {
864 pkt_buf->ol_flags |= PKT_RX_VLAN_PKT;
865 pkt_buf->vlan_tci = vlan_tci;
867 #endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
873 #ifdef MLX5_PMD_SOFT_COUNTERS
874 /* Increment bytes counter. */
875 rxq->stats.ibytes += pkt_buf_len;
878 ret = rxq->recv(rxq->wq, elt->sges, RTE_DIM(elt->sges));
880 /* Inability to repost WRs is fatal. */
881 DEBUG("%p: recv_sg_list(): failed (ret=%d)",
886 if (++elts_head >= elts_n)
890 if (unlikely(i == 0))
892 rxq->elts_head = elts_head;
893 #ifdef MLX5_PMD_SOFT_COUNTERS
894 /* Increment packets counter. */
895 rxq->stats.ipackets += pkts_ret;
901 * DPDK callback for RX.
903 * The following function is the same as mlx5_rx_burst_sp(), except it doesn't
904 * manage scattered packets. Improves performance when MRU is lower than the
905 * size of the first segment.
908 * Generic pointer to RX queue structure.
910 * Array to store received packets.
912 * Maximum number of packets in array.
915 * Number of packets successfully received (<= pkts_n).
918 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
920 struct rxq *rxq = (struct rxq *)dpdk_rxq;
921 struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp;
922 const unsigned int elts_n = rxq->elts_n;
923 unsigned int elts_head = rxq->elts_head;
924 struct ibv_sge sges[pkts_n];
926 unsigned int pkts_ret = 0;
929 if (unlikely(rxq->sp))
930 return mlx5_rx_burst_sp(dpdk_rxq, pkts, pkts_n);
931 for (i = 0; (i != pkts_n); ++i) {
932 struct rxq_elt *elt = &(*elts)[elts_head];
934 struct rte_mbuf *seg = elt->buf;
935 struct rte_mbuf *rep;
941 assert(elts_head < rxq->elts_n);
942 assert(rxq->elts_head < rxq->elts_n);
944 * Fetch initial bytes of packet descriptor into a
945 * cacheline while allocating rep.
948 rte_prefetch0(&seg->cacheline1);
949 ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci);
950 if (unlikely(ret < 0)) {
954 DEBUG("rxq=%p, poll_length() failed (ret=%d)",
956 /* ibv_poll_cq() must be used in case of failure. */
957 wcs_n = ibv_poll_cq(rxq->cq, 1, &wc);
958 if (unlikely(wcs_n == 0))
960 if (unlikely(wcs_n < 0)) {
961 DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)",
966 if (unlikely(wc.status != IBV_WC_SUCCESS)) {
967 /* Whatever, just repost the offending WR. */
968 DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work"
969 " completion status (%d): %s",
970 (void *)rxq, wc.wr_id, wc.status,
971 ibv_wc_status_str(wc.status));
972 #ifdef MLX5_PMD_SOFT_COUNTERS
973 /* Increment dropped packets counter. */
974 ++rxq->stats.idropped;
976 /* Add SGE to array for repost. */
985 rep = __rte_mbuf_raw_alloc(rxq->mp);
986 if (unlikely(rep == NULL)) {
988 * Unable to allocate a replacement mbuf,
991 DEBUG("rxq=%p: can't allocate a new mbuf",
993 /* Increment out of memory counters. */
994 ++rxq->stats.rx_nombuf;
995 ++rxq->priv->dev->data->rx_mbuf_alloc_failed;
999 /* Reconfigure sge to use rep instead of seg. */
1000 elt->sge.addr = (uintptr_t)rep->buf_addr + RTE_PKTMBUF_HEADROOM;
1001 assert(elt->sge.lkey == rxq->mr->lkey);
1004 /* Add SGE to array for repost. */
1007 /* Update seg information. */
1008 SET_DATA_OFF(seg, RTE_PKTMBUF_HEADROOM);
1010 PORT(seg) = rxq->port_id;
1013 DATA_LEN(seg) = len;
1014 if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) {
1015 seg->packet_type = rxq_cq_to_pkt_type(flags);
1016 seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
1017 #ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
1018 if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) {
1019 seg->ol_flags |= PKT_RX_VLAN_PKT;
1020 seg->vlan_tci = vlan_tci;
1022 #endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
1024 /* Return packet. */
1027 #ifdef MLX5_PMD_SOFT_COUNTERS
1028 /* Increment bytes counter. */
1029 rxq->stats.ibytes += len;
1032 if (++elts_head >= elts_n)
1036 if (unlikely(i == 0))
1040 DEBUG("%p: reposting %u WRs", (void *)rxq, i);
1042 ret = rxq->recv(rxq->wq, sges, i);
1043 if (unlikely(ret)) {
1044 /* Inability to repost WRs is fatal. */
1045 DEBUG("%p: recv_burst(): failed (ret=%d)",
1050 rxq->elts_head = elts_head;
1051 #ifdef MLX5_PMD_SOFT_COUNTERS
1052 /* Increment packets counter. */
1053 rxq->stats.ipackets += pkts_ret;
1059 * Dummy DPDK callback for TX.
1061 * This function is used to temporarily replace the real callback during
1062 * unsafe control operations on the queue, or in case of error.
1065 * Generic pointer to TX queue structure.
1067 * Packets to transmit.
1069 * Number of packets in array.
1072 * Number of packets successfully transmitted (<= pkts_n).
1075 removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
1084 * Dummy DPDK callback for RX.
1086 * This function is used to temporarily replace the real callback during
1087 * unsafe control operations on the queue, or in case of error.
1090 * Generic pointer to RX queue structure.
1092 * Array to store received packets.
1094 * Maximum number of packets in array.
1097 * Number of packets successfully received (<= pkts_n).
1100 removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)