4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
42 #pragma GCC diagnostic ignored "-pedantic"
44 #include <infiniband/verbs.h>
46 #pragma GCC diagnostic error "-pedantic"
49 /* DPDK headers don't like -pedantic. */
51 #pragma GCC diagnostic ignored "-pedantic"
54 #include <rte_mempool.h>
55 #include <rte_prefetch.h>
56 #include <rte_common.h>
57 #include <rte_branch_prediction.h>
58 #include <rte_memory.h>
60 #pragma GCC diagnostic error "-pedantic"
64 #include "mlx5_utils.h"
65 #include "mlx5_rxtx.h"
66 #include "mlx5_autoconf.h"
67 #include "mlx5_defs.h"
70 * Manage TX completions.
72 * When sending a burst, mlx5_tx_burst() posts several WRs.
73 * To improve performance, a completion event is only required once every
74 * MLX5_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
75 * for other WRs, but this information would not be used anyway.
78 * Pointer to TX queue structure.
81 * 0 on success, -1 on failure.
84 txq_complete(struct txq *txq)
86 unsigned int elts_comp = txq->elts_comp;
87 unsigned int elts_tail = txq->elts_tail;
88 unsigned int elts_free = txq->elts_tail;
89 const unsigned int elts_n = txq->elts_n;
92 if (unlikely(elts_comp == 0))
95 DEBUG("%p: processing %u work requests completions",
96 (void *)txq, elts_comp);
98 wcs_n = txq->poll_cnt(txq->cq, elts_comp);
99 if (unlikely(wcs_n == 0))
101 if (unlikely(wcs_n < 0)) {
102 DEBUG("%p: ibv_poll_cq() failed (wcs_n=%d)",
107 assert(elts_comp <= txq->elts_comp);
109 * Assume WC status is successful as nothing can be done about it
112 elts_tail += wcs_n * txq->elts_comp_cd_init;
113 if (elts_tail >= elts_n)
116 while (elts_free != elts_tail) {
117 struct txq_elt *elt = &(*txq->elts)[elts_free];
118 unsigned int elts_free_next =
119 (((elts_free + 1) == elts_n) ? 0 : elts_free + 1);
120 struct rte_mbuf *tmp = elt->buf;
121 struct txq_elt *elt_next = &(*txq->elts)[elts_free_next];
125 memset(elt, 0x66, sizeof(*elt));
127 RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
128 /* Faster than rte_pktmbuf_free(). */
130 struct rte_mbuf *next = NEXT(tmp);
132 rte_pktmbuf_free_seg(tmp);
134 } while (tmp != NULL);
135 elts_free = elts_free_next;
138 txq->elts_tail = elts_tail;
139 txq->elts_comp = elts_comp;
144 * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
145 * the cloned mbuf is allocated is returned instead.
151 * Memory pool where data is located for given mbuf.
153 static struct rte_mempool *
154 txq_mb2mp(struct rte_mbuf *buf)
156 if (unlikely(RTE_MBUF_INDIRECT(buf)))
157 return rte_mbuf_from_indirect(buf)->pool;
161 static inline uint32_t
162 txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
163 __attribute__((always_inline));
166 * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
167 * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
168 * remove an entry first.
171 * Pointer to TX queue structure.
173 * Memory Pool for which a Memory Region lkey must be returned.
176 * mr->lkey on success, (uint32_t)-1 on failure.
178 static inline uint32_t
179 txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
182 uint32_t lkey = (uint32_t)-1;
184 for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
185 if (unlikely(txq->mp2mr[i].mp == NULL)) {
186 /* Unknown MP, add a new MR for it. */
189 if (txq->mp2mr[i].mp == mp) {
190 assert(txq->mp2mr[i].lkey != (uint32_t)-1);
191 assert(txq->mp2mr[i].mr->lkey == txq->mp2mr[i].lkey);
192 lkey = txq->mp2mr[i].lkey;
196 if (unlikely(lkey == (uint32_t)-1))
197 lkey = txq_mp2mr_reg(txq, mp, i);
202 * Insert VLAN using mbuf headroom space.
205 * Buffer for VLAN insertion.
208 * 0 on success, errno value on failure.
211 insert_vlan_sw(struct rte_mbuf *buf)
215 uint16_t head_room_len = rte_pktmbuf_headroom(buf);
217 if (head_room_len < 4)
220 addr = rte_pktmbuf_mtod(buf, uintptr_t);
221 vlan = htonl(0x81000000 | buf->vlan_tci);
222 memmove((void *)(addr - 4), (void *)addr, 12);
223 memcpy((void *)(addr + 8), &vlan, sizeof(vlan));
225 SET_DATA_OFF(buf, head_room_len - 4);
231 #if MLX5_PMD_SGE_WR_N > 1
234 * Copy scattered mbuf contents to a single linear buffer.
237 * Linear output buffer.
239 * Scattered input buffer.
242 * Number of bytes copied to the output buffer or 0 if not large enough.
245 linearize_mbuf(linear_t *linear, struct rte_mbuf *buf)
247 unsigned int size = 0;
251 unsigned int len = DATA_LEN(buf);
255 if (unlikely(size > sizeof(*linear)))
257 memcpy(&(*linear)[offset],
258 rte_pktmbuf_mtod(buf, uint8_t *),
261 } while (buf != NULL);
266 * Handle scattered buffers for mlx5_tx_burst().
269 * TX queue structure.
271 * Number of segments in buf.
273 * TX queue element to fill.
277 * Index of the linear buffer to use if necessary (normally txq->elts_head).
279 * Array filled with SGEs on success.
282 * A structure containing the processed packet size in bytes and the
283 * number of SGEs. Both fields are set to (unsigned int)-1 in case of
286 static struct tx_burst_sg_ret {
290 tx_burst_sg(struct txq *txq, unsigned int segs, struct txq_elt *elt,
291 struct rte_mbuf *buf, unsigned int elts_head,
292 struct ibv_sge (*sges)[MLX5_PMD_SGE_WR_N])
294 unsigned int sent_size = 0;
298 /* When there are too many segments, extra segments are
299 * linearized in the last SGE. */
300 if (unlikely(segs > RTE_DIM(*sges))) {
301 segs = (RTE_DIM(*sges) - 1);
304 /* Update element. */
306 /* Register segments as SGEs. */
307 for (j = 0; (j != segs); ++j) {
308 struct ibv_sge *sge = &(*sges)[j];
311 /* Retrieve Memory Region key for this memory pool. */
312 lkey = txq_mp2mr(txq, txq_mb2mp(buf));
313 if (unlikely(lkey == (uint32_t)-1)) {
314 /* MR does not exist. */
315 DEBUG("%p: unable to get MP <-> MR association",
317 /* Clean up TX element. */
322 sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
323 if (txq->priv->sriov)
324 rte_prefetch0((volatile void *)
325 (uintptr_t)sge->addr);
326 sge->length = DATA_LEN(buf);
328 sent_size += sge->length;
331 /* If buf is not NULL here and is not going to be linearized,
332 * nb_segs is not valid. */
334 assert((buf == NULL) || (linearize));
335 /* Linearize extra segments. */
337 struct ibv_sge *sge = &(*sges)[segs];
338 linear_t *linear = &(*txq->elts_linear)[elts_head];
339 unsigned int size = linearize_mbuf(linear, buf);
341 assert(segs == (RTE_DIM(*sges) - 1));
343 /* Invalid packet. */
344 DEBUG("%p: packet too large to be linearized.",
346 /* Clean up TX element. */
350 /* If MLX5_PMD_SGE_WR_N is 1, free mbuf immediately. */
351 if (RTE_DIM(*sges) == 1) {
353 struct rte_mbuf *next = NEXT(buf);
355 rte_pktmbuf_free_seg(buf);
357 } while (buf != NULL);
361 sge->addr = (uintptr_t)&(*linear)[0];
363 sge->lkey = txq->mr_linear->lkey;
365 /* Include last segment. */
368 return (struct tx_burst_sg_ret){
373 return (struct tx_burst_sg_ret){
379 #endif /* MLX5_PMD_SGE_WR_N > 1 */
382 * DPDK callback for TX.
385 * Generic pointer to TX queue structure.
387 * Packets to transmit.
389 * Number of packets in array.
392 * Number of packets successfully transmitted (<= pkts_n).
395 mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
397 struct txq *txq = (struct txq *)dpdk_txq;
398 unsigned int elts_head = txq->elts_head;
399 const unsigned int elts_n = txq->elts_n;
400 unsigned int elts_comp_cd = txq->elts_comp_cd;
401 unsigned int elts_comp = 0;
405 struct rte_mbuf *buf = pkts[0];
407 assert(elts_comp_cd != 0);
408 /* Prefetch first packet cacheline. */
411 max = (elts_n - (elts_head - txq->elts_tail));
415 assert(max <= elts_n);
416 /* Always leave one free entry in the ring. */
422 for (i = 0; (i != max); ++i) {
423 struct rte_mbuf *buf_next = pkts[i + 1];
424 unsigned int elts_head_next =
425 (((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
426 struct txq_elt *elt = &(*txq->elts)[elts_head];
427 unsigned int segs = NB_SEGS(buf);
428 #ifdef MLX5_PMD_SOFT_COUNTERS
429 unsigned int sent_size = 0;
431 uint32_t send_flags = 0;
432 #ifdef HAVE_VERBS_VLAN_INSERTION
434 #endif /* HAVE_VERBS_VLAN_INSERTION */
437 rte_prefetch0(buf_next);
438 /* Request TX completion. */
439 if (unlikely(--elts_comp_cd == 0)) {
440 elts_comp_cd = txq->elts_comp_cd_init;
442 send_flags |= IBV_EXP_QP_BURST_SIGNALED;
444 /* Should we enable HW CKSUM offload */
446 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
447 send_flags |= IBV_EXP_QP_BURST_IP_CSUM;
448 /* HW does not support checksum offloads at arbitrary
449 * offsets but automatically recognizes the packet
450 * type. For inner L3/L4 checksums, only VXLAN (UDP)
451 * tunnels are currently supported. */
452 if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type))
453 send_flags |= IBV_EXP_QP_BURST_TUNNEL;
455 if (buf->ol_flags & PKT_TX_VLAN_PKT) {
456 #ifdef HAVE_VERBS_VLAN_INSERTION
460 #endif /* HAVE_VERBS_VLAN_INSERTION */
462 err = insert_vlan_sw(buf);
467 if (likely(segs == 1)) {
471 uintptr_t buf_next_addr;
473 /* Retrieve buffer information. */
474 addr = rte_pktmbuf_mtod(buf, uintptr_t);
475 length = DATA_LEN(buf);
476 /* Update element. */
478 if (txq->priv->sriov)
479 rte_prefetch0((volatile void *)
481 /* Prefetch next buffer data. */
484 rte_pktmbuf_mtod(buf_next, uintptr_t);
485 rte_prefetch0((volatile void *)
486 (uintptr_t)buf_next_addr);
488 /* Put packet into send queue. */
489 #if MLX5_PMD_MAX_INLINE > 0
490 if (length <= txq->max_inline) {
491 #ifdef HAVE_VERBS_VLAN_INSERTION
493 err = txq->send_pending_inline_vlan
500 #endif /* HAVE_VERBS_VLAN_INSERTION */
501 err = txq->send_pending_inline
509 /* Retrieve Memory Region key for this
511 lkey = txq_mp2mr(txq, txq_mb2mp(buf));
512 if (unlikely(lkey == (uint32_t)-1)) {
513 /* MR does not exist. */
514 DEBUG("%p: unable to get MP <-> MR"
515 " association", (void *)txq);
516 /* Clean up TX element. */
520 #ifdef HAVE_VERBS_VLAN_INSERTION
522 err = txq->send_pending_vlan
530 #endif /* HAVE_VERBS_VLAN_INSERTION */
531 err = txq->send_pending
540 #ifdef MLX5_PMD_SOFT_COUNTERS
544 #if MLX5_PMD_SGE_WR_N > 1
545 struct ibv_sge sges[MLX5_PMD_SGE_WR_N];
546 struct tx_burst_sg_ret ret;
548 ret = tx_burst_sg(txq, segs, elt, buf, elts_head,
550 if (ret.length == (unsigned int)-1)
552 /* Put SG list into send queue. */
553 #ifdef HAVE_VERBS_VLAN_INSERTION
555 err = txq->send_pending_sg_list_vlan
562 #endif /* HAVE_VERBS_VLAN_INSERTION */
563 err = txq->send_pending_sg_list
570 #ifdef MLX5_PMD_SOFT_COUNTERS
571 sent_size += ret.length;
573 #else /* MLX5_PMD_SGE_WR_N > 1 */
574 DEBUG("%p: TX scattered buffers support not"
575 " compiled in", (void *)txq);
577 #endif /* MLX5_PMD_SGE_WR_N > 1 */
579 elts_head = elts_head_next;
581 #ifdef MLX5_PMD_SOFT_COUNTERS
582 /* Increment sent bytes counter. */
583 txq->stats.obytes += sent_size;
587 /* Take a shortcut if nothing must be sent. */
588 if (unlikely(i == 0))
590 #ifdef MLX5_PMD_SOFT_COUNTERS
591 /* Increment sent packets counter. */
592 txq->stats.opackets += i;
594 /* Ring QP doorbell. */
595 err = txq->send_flush(txq->qp);
597 /* A nonzero value is not supposed to be returned.
598 * Nothing can be done about it. */
599 DEBUG("%p: send_flush() failed with error %d",
602 txq->elts_head = elts_head;
603 txq->elts_comp += elts_comp;
604 txq->elts_comp_cd = elts_comp_cd;
609 * Translate RX completion flags to packet type.
612 * RX completion flags returned by poll_length_flags().
614 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
617 * Packet type for struct rte_mbuf.
619 static inline uint32_t
620 rxq_cq_to_pkt_type(uint32_t flags)
624 if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET)
627 IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
630 IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
633 IBV_EXP_CQ_RX_IPV4_PACKET,
634 RTE_PTYPE_INNER_L3_IPV4) |
636 IBV_EXP_CQ_RX_IPV6_PACKET,
637 RTE_PTYPE_INNER_L3_IPV6);
641 IBV_EXP_CQ_RX_IPV4_PACKET,
644 IBV_EXP_CQ_RX_IPV6_PACKET,
650 * Translate RX completion flags to offload flags.
653 * Pointer to RX queue structure.
655 * RX completion flags returned by poll_length_flags().
658 * Offload flags (ol_flags) for struct rte_mbuf.
660 static inline uint32_t
661 rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
663 uint32_t ol_flags = 0;
666 /* Set IP checksum flag only for IPv4/IPv6 packets. */
668 (IBV_EXP_CQ_RX_IPV4_PACKET | IBV_EXP_CQ_RX_IPV6_PACKET))
671 IBV_EXP_CQ_RX_IP_CSUM_OK,
672 PKT_RX_IP_CKSUM_BAD);
673 #ifdef HAVE_EXP_CQ_RX_TCP_PACKET
674 /* Set L4 checksum flag only for TCP/UDP packets. */
676 (IBV_EXP_CQ_RX_TCP_PACKET | IBV_EXP_CQ_RX_UDP_PACKET))
677 #endif /* HAVE_EXP_CQ_RX_TCP_PACKET */
680 IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK,
681 PKT_RX_L4_CKSUM_BAD);
684 * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
685 * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
688 if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
691 IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
692 PKT_RX_IP_CKSUM_BAD) |
694 IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
695 PKT_RX_L4_CKSUM_BAD);
700 * DPDK callback for RX with scattered packets support.
703 * Generic pointer to RX queue structure.
705 * Array to store received packets.
707 * Maximum number of packets in array.
710 * Number of packets successfully received (<= pkts_n).
713 mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
715 struct rxq *rxq = (struct rxq *)dpdk_rxq;
716 struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp;
717 const unsigned int elts_n = rxq->elts_n;
718 unsigned int elts_head = rxq->elts_head;
720 unsigned int pkts_ret = 0;
723 if (unlikely(!rxq->sp))
724 return mlx5_rx_burst(dpdk_rxq, pkts, pkts_n);
725 if (unlikely(elts == NULL)) /* See RTE_DEV_CMD_SET_MTU. */
727 for (i = 0; (i != pkts_n); ++i) {
728 struct rxq_elt_sp *elt = &(*elts)[elts_head];
730 unsigned int pkt_buf_len;
731 struct rte_mbuf *pkt_buf = NULL; /* Buffer returned in pkts. */
732 struct rte_mbuf **pkt_buf_next = &pkt_buf;
733 unsigned int seg_headroom = RTE_PKTMBUF_HEADROOM;
739 assert(elts_head < rxq->elts_n);
740 assert(rxq->elts_head < rxq->elts_n);
741 ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci);
742 if (unlikely(ret < 0)) {
746 DEBUG("rxq=%p, poll_length() failed (ret=%d)",
748 /* ibv_poll_cq() must be used in case of failure. */
749 wcs_n = ibv_poll_cq(rxq->cq, 1, &wc);
750 if (unlikely(wcs_n == 0))
752 if (unlikely(wcs_n < 0)) {
753 DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)",
758 if (unlikely(wc.status != IBV_WC_SUCCESS)) {
759 /* Whatever, just repost the offending WR. */
760 DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work"
761 " completion status (%d): %s",
762 (void *)rxq, wc.wr_id, wc.status,
763 ibv_wc_status_str(wc.status));
764 #ifdef MLX5_PMD_SOFT_COUNTERS
765 /* Increment dropped packets counter. */
766 ++rxq->stats.idropped;
774 assert(ret >= (rxq->crc_present << 2));
775 len = ret - (rxq->crc_present << 2);
778 * Replace spent segments with new ones, concatenate and
779 * return them as pkt_buf.
782 struct ibv_sge *sge = &elt->sges[j];
783 struct rte_mbuf *seg = elt->bufs[j];
784 struct rte_mbuf *rep;
785 unsigned int seg_tailroom;
789 * Fetch initial bytes of packet descriptor into a
790 * cacheline while allocating rep.
793 rep = rte_mbuf_raw_alloc(rxq->mp);
794 if (unlikely(rep == NULL)) {
796 * Unable to allocate a replacement mbuf,
799 DEBUG("rxq=%p: can't allocate a new mbuf",
801 if (pkt_buf != NULL) {
802 *pkt_buf_next = NULL;
803 rte_pktmbuf_free(pkt_buf);
805 /* Increment out of memory counters. */
806 ++rxq->stats.rx_nombuf;
807 ++rxq->priv->dev->data->rx_mbuf_alloc_failed;
811 /* Poison user-modifiable fields in rep. */
812 NEXT(rep) = (void *)((uintptr_t)-1);
813 SET_DATA_OFF(rep, 0xdead);
814 DATA_LEN(rep) = 0xd00d;
815 PKT_LEN(rep) = 0xdeadd00d;
820 assert(rep->buf_len == seg->buf_len);
821 /* Reconfigure sge to use rep instead of seg. */
822 assert(sge->lkey == rxq->mr->lkey);
823 sge->addr = ((uintptr_t)rep->buf_addr + seg_headroom);
826 /* Update pkt_buf if it's the first segment, or link
827 * seg to the previous one and update pkt_buf_next. */
829 pkt_buf_next = &NEXT(seg);
830 /* Update seg information. */
831 seg_tailroom = (seg->buf_len - seg_headroom);
832 assert(sge->length == seg_tailroom);
833 SET_DATA_OFF(seg, seg_headroom);
834 if (likely(len <= seg_tailroom)) {
839 assert(rte_pktmbuf_headroom(seg) ==
841 assert(rte_pktmbuf_tailroom(seg) ==
842 (seg_tailroom - len));
845 DATA_LEN(seg) = seg_tailroom;
846 PKT_LEN(seg) = seg_tailroom;
848 assert(rte_pktmbuf_headroom(seg) == seg_headroom);
849 assert(rte_pktmbuf_tailroom(seg) == 0);
850 /* Fix len and clear headroom for next segments. */
854 /* Update head and tail segments. */
855 *pkt_buf_next = NULL;
856 assert(pkt_buf != NULL);
858 NB_SEGS(pkt_buf) = j;
859 PORT(pkt_buf) = rxq->port_id;
860 PKT_LEN(pkt_buf) = pkt_buf_len;
861 if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) {
862 pkt_buf->packet_type = rxq_cq_to_pkt_type(flags);
863 pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
864 #ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
865 if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) {
866 pkt_buf->ol_flags |= PKT_RX_VLAN_PKT |
867 PKT_RX_VLAN_STRIPPED;
868 pkt_buf->vlan_tci = vlan_tci;
870 #endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
876 #ifdef MLX5_PMD_SOFT_COUNTERS
877 /* Increment bytes counter. */
878 rxq->stats.ibytes += pkt_buf_len;
881 ret = rxq->recv(rxq->wq, elt->sges, RTE_DIM(elt->sges));
883 /* Inability to repost WRs is fatal. */
884 DEBUG("%p: recv_sg_list(): failed (ret=%d)",
889 if (++elts_head >= elts_n)
893 if (unlikely(i == 0))
895 rxq->elts_head = elts_head;
896 #ifdef MLX5_PMD_SOFT_COUNTERS
897 /* Increment packets counter. */
898 rxq->stats.ipackets += pkts_ret;
904 * DPDK callback for RX.
906 * The following function is the same as mlx5_rx_burst_sp(), except it doesn't
907 * manage scattered packets. Improves performance when MRU is lower than the
908 * size of the first segment.
911 * Generic pointer to RX queue structure.
913 * Array to store received packets.
915 * Maximum number of packets in array.
918 * Number of packets successfully received (<= pkts_n).
921 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
923 struct rxq *rxq = (struct rxq *)dpdk_rxq;
924 struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp;
925 const unsigned int elts_n = rxq->elts_n;
926 unsigned int elts_head = rxq->elts_head;
927 struct ibv_sge sges[pkts_n];
929 unsigned int pkts_ret = 0;
932 if (unlikely(rxq->sp))
933 return mlx5_rx_burst_sp(dpdk_rxq, pkts, pkts_n);
934 for (i = 0; (i != pkts_n); ++i) {
935 struct rxq_elt *elt = &(*elts)[elts_head];
937 struct rte_mbuf *seg = elt->buf;
938 struct rte_mbuf *rep;
944 assert(elts_head < rxq->elts_n);
945 assert(rxq->elts_head < rxq->elts_n);
947 * Fetch initial bytes of packet descriptor into a
948 * cacheline while allocating rep.
950 rte_mbuf_prefetch_part1(seg);
951 rte_mbuf_prefetch_part2(seg);
952 ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci);
953 if (unlikely(ret < 0)) {
957 DEBUG("rxq=%p, poll_length() failed (ret=%d)",
959 /* ibv_poll_cq() must be used in case of failure. */
960 wcs_n = ibv_poll_cq(rxq->cq, 1, &wc);
961 if (unlikely(wcs_n == 0))
963 if (unlikely(wcs_n < 0)) {
964 DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)",
969 if (unlikely(wc.status != IBV_WC_SUCCESS)) {
970 /* Whatever, just repost the offending WR. */
971 DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work"
972 " completion status (%d): %s",
973 (void *)rxq, wc.wr_id, wc.status,
974 ibv_wc_status_str(wc.status));
975 #ifdef MLX5_PMD_SOFT_COUNTERS
976 /* Increment dropped packets counter. */
977 ++rxq->stats.idropped;
979 /* Add SGE to array for repost. */
987 assert(ret >= (rxq->crc_present << 2));
988 len = ret - (rxq->crc_present << 2);
989 rep = rte_mbuf_raw_alloc(rxq->mp);
990 if (unlikely(rep == NULL)) {
992 * Unable to allocate a replacement mbuf,
995 DEBUG("rxq=%p: can't allocate a new mbuf",
997 /* Increment out of memory counters. */
998 ++rxq->stats.rx_nombuf;
999 ++rxq->priv->dev->data->rx_mbuf_alloc_failed;
1003 /* Reconfigure sge to use rep instead of seg. */
1004 elt->sge.addr = (uintptr_t)rep->buf_addr + RTE_PKTMBUF_HEADROOM;
1005 assert(elt->sge.lkey == rxq->mr->lkey);
1008 /* Add SGE to array for repost. */
1011 /* Update seg information. */
1012 SET_DATA_OFF(seg, RTE_PKTMBUF_HEADROOM);
1014 PORT(seg) = rxq->port_id;
1017 DATA_LEN(seg) = len;
1018 if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) {
1019 seg->packet_type = rxq_cq_to_pkt_type(flags);
1020 seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
1021 #ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
1022 if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) {
1023 seg->ol_flags |= PKT_RX_VLAN_PKT |
1024 PKT_RX_VLAN_STRIPPED;
1025 seg->vlan_tci = vlan_tci;
1027 #endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
1029 /* Return packet. */
1032 #ifdef MLX5_PMD_SOFT_COUNTERS
1033 /* Increment bytes counter. */
1034 rxq->stats.ibytes += len;
1037 if (++elts_head >= elts_n)
1041 if (unlikely(i == 0))
1045 DEBUG("%p: reposting %u WRs", (void *)rxq, i);
1047 ret = rxq->recv(rxq->wq, sges, i);
1048 if (unlikely(ret)) {
1049 /* Inability to repost WRs is fatal. */
1050 DEBUG("%p: recv_burst(): failed (ret=%d)",
1055 rxq->elts_head = elts_head;
1056 #ifdef MLX5_PMD_SOFT_COUNTERS
1057 /* Increment packets counter. */
1058 rxq->stats.ipackets += pkts_ret;
1064 * Dummy DPDK callback for TX.
1066 * This function is used to temporarily replace the real callback during
1067 * unsafe control operations on the queue, or in case of error.
1070 * Generic pointer to TX queue structure.
1072 * Packets to transmit.
1074 * Number of packets in array.
1077 * Number of packets successfully transmitted (<= pkts_n).
1080 removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
1089 * Dummy DPDK callback for RX.
1091 * This function is used to temporarily replace the real callback during
1092 * unsafe control operations on the queue, or in case of error.
1095 * Generic pointer to RX queue structure.
1097 * Array to store received packets.
1099 * Maximum number of packets in array.
1102 * Number of packets successfully received (<= pkts_n).
1105 removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)