4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
42 #pragma GCC diagnostic ignored "-pedantic"
44 #include <infiniband/verbs.h>
46 #pragma GCC diagnostic error "-pedantic"
49 /* DPDK headers don't like -pedantic. */
51 #pragma GCC diagnostic ignored "-pedantic"
54 #include <rte_mempool.h>
55 #include <rte_prefetch.h>
56 #include <rte_common.h>
57 #include <rte_branch_prediction.h>
58 #include <rte_memory.h>
60 #pragma GCC diagnostic error "-pedantic"
64 #include "mlx5_utils.h"
65 #include "mlx5_rxtx.h"
66 #include "mlx5_autoconf.h"
67 #include "mlx5_defs.h"
70 * Manage TX completions.
72 * When sending a burst, mlx5_tx_burst() posts several WRs.
73 * To improve performance, a completion event is only required once every
74 * MLX5_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
75 * for other WRs, but this information would not be used anyway.
78 * Pointer to TX queue structure.
81 * 0 on success, -1 on failure.
84 txq_complete(struct txq *txq)
86 unsigned int elts_comp = txq->elts_comp;
87 unsigned int elts_tail = txq->elts_tail;
88 unsigned int elts_free = txq->elts_tail;
89 const unsigned int elts_n = txq->elts_n;
92 if (unlikely(elts_comp == 0))
95 DEBUG("%p: processing %u work requests completions",
96 (void *)txq, elts_comp);
98 wcs_n = txq->poll_cnt(txq->cq, elts_comp);
99 if (unlikely(wcs_n == 0))
101 if (unlikely(wcs_n < 0)) {
102 DEBUG("%p: ibv_poll_cq() failed (wcs_n=%d)",
107 assert(elts_comp <= txq->elts_comp);
109 * Assume WC status is successful as nothing can be done about it
112 elts_tail += wcs_n * txq->elts_comp_cd_init;
113 if (elts_tail >= elts_n)
116 while (elts_free != elts_tail) {
117 struct txq_elt *elt = &(*txq->elts)[elts_free];
118 unsigned int elts_free_next =
119 (((elts_free + 1) == elts_n) ? 0 : elts_free + 1);
120 struct rte_mbuf *tmp = elt->buf;
121 struct txq_elt *elt_next = &(*txq->elts)[elts_free_next];
125 memset(elt, 0x66, sizeof(*elt));
127 RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
128 /* Faster than rte_pktmbuf_free(). */
130 struct rte_mbuf *next = NEXT(tmp);
132 rte_pktmbuf_free_seg(tmp);
134 } while (tmp != NULL);
135 elts_free = elts_free_next;
138 txq->elts_tail = elts_tail;
139 txq->elts_comp = elts_comp;
143 /* For best performance, this function should not be inlined. */
144 struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, const struct rte_mempool *)
145 __attribute__((noinline));
148 * Register mempool as a memory region.
151 * Pointer to protection domain.
153 * Pointer to memory pool.
156 * Memory region pointer, NULL in case of error.
159 mlx5_mp2mr(struct ibv_pd *pd, const struct rte_mempool *mp)
161 const struct rte_memseg *ms = rte_eal_get_physmem_layout();
162 uintptr_t start = mp->elt_va_start;
163 uintptr_t end = mp->elt_va_end;
166 DEBUG("mempool %p area start=%p end=%p size=%zu",
167 (const void *)mp, (void *)start, (void *)end,
168 (size_t)(end - start));
169 /* Round start and end to page boundary if found in memory segments. */
170 for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
171 uintptr_t addr = (uintptr_t)ms[i].addr;
172 size_t len = ms[i].len;
173 unsigned int align = ms[i].hugepage_sz;
175 if ((start > addr) && (start < addr + len))
176 start = RTE_ALIGN_FLOOR(start, align);
177 if ((end > addr) && (end < addr + len))
178 end = RTE_ALIGN_CEIL(end, align);
180 DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
181 (const void *)mp, (void *)start, (void *)end,
182 (size_t)(end - start));
183 return ibv_reg_mr(pd,
186 IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
190 * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
191 * the cloned mbuf is allocated is returned instead.
197 * Memory pool where data is located for given mbuf.
199 static struct rte_mempool *
200 txq_mb2mp(struct rte_mbuf *buf)
202 if (unlikely(RTE_MBUF_INDIRECT(buf)))
203 return rte_mbuf_from_indirect(buf)->pool;
208 * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
209 * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
210 * remove an entry first.
213 * Pointer to TX queue structure.
215 * Memory Pool for which a Memory Region lkey must be returned.
218 * mr->lkey on success, (uint32_t)-1 on failure.
221 txq_mp2mr(struct txq *txq, const struct rte_mempool *mp)
226 for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
227 if (unlikely(txq->mp2mr[i].mp == NULL)) {
228 /* Unknown MP, add a new MR for it. */
231 if (txq->mp2mr[i].mp == mp) {
232 assert(txq->mp2mr[i].lkey != (uint32_t)-1);
233 assert(txq->mp2mr[i].mr->lkey == txq->mp2mr[i].lkey);
234 return txq->mp2mr[i].lkey;
237 /* Add a new entry, register MR first. */
238 DEBUG("%p: discovered new memory pool \"%s\" (%p)",
239 (void *)txq, mp->name, (const void *)mp);
240 mr = mlx5_mp2mr(txq->priv->pd, mp);
241 if (unlikely(mr == NULL)) {
242 DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
246 if (unlikely(i == RTE_DIM(txq->mp2mr))) {
247 /* Table is full, remove oldest entry. */
248 DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
251 claim_zero(ibv_dereg_mr(txq->mp2mr[0].mr));
252 memmove(&txq->mp2mr[0], &txq->mp2mr[1],
253 (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
255 /* Store the new entry. */
256 txq->mp2mr[i].mp = mp;
257 txq->mp2mr[i].mr = mr;
258 txq->mp2mr[i].lkey = mr->lkey;
259 DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
260 (void *)txq, mp->name, (const void *)mp, txq->mp2mr[i].lkey);
261 return txq->mp2mr[i].lkey;
264 struct txq_mp2mr_mbuf_check_data {
265 const struct rte_mempool *mp;
270 * Callback function for rte_mempool_obj_iter() to check whether a given
271 * mempool object looks like a mbuf.
273 * @param[in, out] arg
274 * Context data (struct txq_mp2mr_mbuf_check_data). Contains mempool pointer
277 * Object start address.
279 * Object end address.
284 * Nonzero value when object is not a mbuf.
287 txq_mp2mr_mbuf_check(void *arg, void *start, void *end,
288 uint32_t index __rte_unused)
290 struct txq_mp2mr_mbuf_check_data *data = arg;
291 struct rte_mbuf *buf =
292 (void *)((uintptr_t)start + data->mp->header_size);
295 /* Check whether mbuf structure fits element size and whether mempool
296 * pointer is valid. */
297 if (((uintptr_t)end >= (uintptr_t)(buf + 1)) &&
298 (buf->pool == data->mp))
305 * Iterator function for rte_mempool_walk() to register existing mempools and
306 * fill the MP to MR cache of a TX queue.
309 * Memory Pool to register.
311 * Pointer to TX queue structure.
314 txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
316 struct txq *txq = arg;
317 struct txq_mp2mr_mbuf_check_data data = {
322 /* Discard empty mempools. */
325 /* Register mempool only if the first element looks like a mbuf. */
326 rte_mempool_obj_iter((void *)mp->elt_va_start,
328 mp->header_size + mp->elt_size + mp->trailer_size,
333 txq_mp2mr_mbuf_check,
341 * Insert VLAN using mbuf headroom space.
344 * Buffer for VLAN insertion.
347 * 0 on success, errno value on failure.
350 insert_vlan_sw(struct rte_mbuf *buf)
354 uint16_t head_room_len = rte_pktmbuf_headroom(buf);
356 if (head_room_len < 4)
359 addr = rte_pktmbuf_mtod(buf, uintptr_t);
360 vlan = htonl(0x81000000 | buf->vlan_tci);
361 memmove((void *)(addr - 4), (void *)addr, 12);
362 memcpy((void *)(addr + 8), &vlan, sizeof(vlan));
364 SET_DATA_OFF(buf, head_room_len - 4);
370 #if MLX5_PMD_SGE_WR_N > 1
373 * Copy scattered mbuf contents to a single linear buffer.
376 * Linear output buffer.
378 * Scattered input buffer.
381 * Number of bytes copied to the output buffer or 0 if not large enough.
384 linearize_mbuf(linear_t *linear, struct rte_mbuf *buf)
386 unsigned int size = 0;
390 unsigned int len = DATA_LEN(buf);
394 if (unlikely(size > sizeof(*linear)))
396 memcpy(&(*linear)[offset],
397 rte_pktmbuf_mtod(buf, uint8_t *),
400 } while (buf != NULL);
405 * Handle scattered buffers for mlx5_tx_burst().
408 * TX queue structure.
410 * Number of segments in buf.
412 * TX queue element to fill.
416 * Index of the linear buffer to use if necessary (normally txq->elts_head).
418 * Array filled with SGEs on success.
421 * A structure containing the processed packet size in bytes and the
422 * number of SGEs. Both fields are set to (unsigned int)-1 in case of
425 static struct tx_burst_sg_ret {
429 tx_burst_sg(struct txq *txq, unsigned int segs, struct txq_elt *elt,
430 struct rte_mbuf *buf, unsigned int elts_head,
431 struct ibv_sge (*sges)[MLX5_PMD_SGE_WR_N])
433 unsigned int sent_size = 0;
437 /* When there are too many segments, extra segments are
438 * linearized in the last SGE. */
439 if (unlikely(segs > RTE_DIM(*sges))) {
440 segs = (RTE_DIM(*sges) - 1);
443 /* Update element. */
445 /* Register segments as SGEs. */
446 for (j = 0; (j != segs); ++j) {
447 struct ibv_sge *sge = &(*sges)[j];
450 /* Retrieve Memory Region key for this memory pool. */
451 lkey = txq_mp2mr(txq, txq_mb2mp(buf));
452 if (unlikely(lkey == (uint32_t)-1)) {
453 /* MR does not exist. */
454 DEBUG("%p: unable to get MP <-> MR association",
456 /* Clean up TX element. */
461 sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
463 rte_prefetch0((volatile void *)
464 (uintptr_t)sge->addr);
465 sge->length = DATA_LEN(buf);
467 sent_size += sge->length;
470 /* If buf is not NULL here and is not going to be linearized,
471 * nb_segs is not valid. */
473 assert((buf == NULL) || (linearize));
474 /* Linearize extra segments. */
476 struct ibv_sge *sge = &(*sges)[segs];
477 linear_t *linear = &(*txq->elts_linear)[elts_head];
478 unsigned int size = linearize_mbuf(linear, buf);
480 assert(segs == (RTE_DIM(*sges) - 1));
482 /* Invalid packet. */
483 DEBUG("%p: packet too large to be linearized.",
485 /* Clean up TX element. */
489 /* If MLX5_PMD_SGE_WR_N is 1, free mbuf immediately. */
490 if (RTE_DIM(*sges) == 1) {
492 struct rte_mbuf *next = NEXT(buf);
494 rte_pktmbuf_free_seg(buf);
496 } while (buf != NULL);
500 sge->addr = (uintptr_t)&(*linear)[0];
502 sge->lkey = txq->mr_linear->lkey;
504 /* Include last segment. */
507 return (struct tx_burst_sg_ret){
512 return (struct tx_burst_sg_ret){
518 #endif /* MLX5_PMD_SGE_WR_N > 1 */
521 * DPDK callback for TX.
524 * Generic pointer to TX queue structure.
526 * Packets to transmit.
528 * Number of packets in array.
531 * Number of packets successfully transmitted (<= pkts_n).
534 mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
536 struct txq *txq = (struct txq *)dpdk_txq;
537 unsigned int elts_head = txq->elts_head;
538 const unsigned int elts_n = txq->elts_n;
539 unsigned int elts_comp_cd = txq->elts_comp_cd;
540 unsigned int elts_comp = 0;
544 struct rte_mbuf *buf = pkts[0];
546 assert(elts_comp_cd != 0);
547 /* Prefetch first packet cacheline. */
550 max = (elts_n - (elts_head - txq->elts_tail));
554 assert(max <= elts_n);
555 /* Always leave one free entry in the ring. */
561 for (i = 0; (i != max); ++i) {
562 struct rte_mbuf *buf_next = pkts[i + 1];
563 unsigned int elts_head_next =
564 (((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
565 struct txq_elt *elt = &(*txq->elts)[elts_head];
566 unsigned int segs = NB_SEGS(buf);
567 #ifdef MLX5_PMD_SOFT_COUNTERS
568 unsigned int sent_size = 0;
570 uint32_t send_flags = 0;
571 #ifdef HAVE_VERBS_VLAN_INSERTION
573 #endif /* HAVE_VERBS_VLAN_INSERTION */
576 rte_prefetch0(buf_next);
577 /* Request TX completion. */
578 if (unlikely(--elts_comp_cd == 0)) {
579 elts_comp_cd = txq->elts_comp_cd_init;
581 send_flags |= IBV_EXP_QP_BURST_SIGNALED;
583 /* Should we enable HW CKSUM offload */
585 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
586 send_flags |= IBV_EXP_QP_BURST_IP_CSUM;
587 /* HW does not support checksum offloads at arbitrary
588 * offsets but automatically recognizes the packet
589 * type. For inner L3/L4 checksums, only VXLAN (UDP)
590 * tunnels are currently supported. */
591 if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type))
592 send_flags |= IBV_EXP_QP_BURST_TUNNEL;
594 if (buf->ol_flags & PKT_TX_VLAN_PKT) {
595 #ifdef HAVE_VERBS_VLAN_INSERTION
599 #endif /* HAVE_VERBS_VLAN_INSERTION */
601 err = insert_vlan_sw(buf);
606 if (likely(segs == 1)) {
610 uintptr_t buf_next_addr;
612 /* Retrieve buffer information. */
613 addr = rte_pktmbuf_mtod(buf, uintptr_t);
614 length = DATA_LEN(buf);
615 /* Update element. */
618 rte_prefetch0((volatile void *)
620 /* Prefetch next buffer data. */
623 rte_pktmbuf_mtod(buf_next, uintptr_t);
624 rte_prefetch0((volatile void *)
625 (uintptr_t)buf_next_addr);
627 /* Put packet into send queue. */
628 #if MLX5_PMD_MAX_INLINE > 0
629 if (length <= txq->max_inline) {
630 #ifdef HAVE_VERBS_VLAN_INSERTION
632 err = txq->send_pending_inline_vlan
639 #endif /* HAVE_VERBS_VLAN_INSERTION */
640 err = txq->send_pending_inline
648 /* Retrieve Memory Region key for this
650 lkey = txq_mp2mr(txq, txq_mb2mp(buf));
651 if (unlikely(lkey == (uint32_t)-1)) {
652 /* MR does not exist. */
653 DEBUG("%p: unable to get MP <-> MR"
654 " association", (void *)txq);
655 /* Clean up TX element. */
659 #ifdef HAVE_VERBS_VLAN_INSERTION
661 err = txq->send_pending_vlan
669 #endif /* HAVE_VERBS_VLAN_INSERTION */
670 err = txq->send_pending
679 #ifdef MLX5_PMD_SOFT_COUNTERS
683 #if MLX5_PMD_SGE_WR_N > 1
684 struct ibv_sge sges[MLX5_PMD_SGE_WR_N];
685 struct tx_burst_sg_ret ret;
687 ret = tx_burst_sg(txq, segs, elt, buf, elts_head,
689 if (ret.length == (unsigned int)-1)
691 /* Put SG list into send queue. */
692 #ifdef HAVE_VERBS_VLAN_INSERTION
694 err = txq->send_pending_sg_list_vlan
701 #endif /* HAVE_VERBS_VLAN_INSERTION */
702 err = txq->send_pending_sg_list
709 #ifdef MLX5_PMD_SOFT_COUNTERS
710 sent_size += ret.length;
712 #else /* MLX5_PMD_SGE_WR_N > 1 */
713 DEBUG("%p: TX scattered buffers support not"
714 " compiled in", (void *)txq);
716 #endif /* MLX5_PMD_SGE_WR_N > 1 */
718 elts_head = elts_head_next;
720 #ifdef MLX5_PMD_SOFT_COUNTERS
721 /* Increment sent bytes counter. */
722 txq->stats.obytes += sent_size;
726 /* Take a shortcut if nothing must be sent. */
727 if (unlikely(i == 0))
729 #ifdef MLX5_PMD_SOFT_COUNTERS
730 /* Increment sent packets counter. */
731 txq->stats.opackets += i;
733 /* Ring QP doorbell. */
734 err = txq->send_flush(txq->qp);
736 /* A nonzero value is not supposed to be returned.
737 * Nothing can be done about it. */
738 DEBUG("%p: send_flush() failed with error %d",
741 txq->elts_head = elts_head;
742 txq->elts_comp += elts_comp;
743 txq->elts_comp_cd = elts_comp_cd;
748 * Translate RX completion flags to packet type.
751 * RX completion flags returned by poll_length_flags().
753 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
756 * Packet type for struct rte_mbuf.
758 static inline uint32_t
759 rxq_cq_to_pkt_type(uint32_t flags)
763 if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET)
766 IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
769 IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
772 IBV_EXP_CQ_RX_IPV4_PACKET,
773 RTE_PTYPE_INNER_L3_IPV4) |
775 IBV_EXP_CQ_RX_IPV6_PACKET,
776 RTE_PTYPE_INNER_L3_IPV6);
780 IBV_EXP_CQ_RX_IPV4_PACKET,
783 IBV_EXP_CQ_RX_IPV6_PACKET,
789 * Translate RX completion flags to offload flags.
792 * Pointer to RX queue structure.
794 * RX completion flags returned by poll_length_flags().
797 * Offload flags (ol_flags) for struct rte_mbuf.
799 static inline uint32_t
800 rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
802 uint32_t ol_flags = 0;
805 /* Set IP checksum flag only for IPv4/IPv6 packets. */
807 (IBV_EXP_CQ_RX_IPV4_PACKET | IBV_EXP_CQ_RX_IPV6_PACKET))
810 IBV_EXP_CQ_RX_IP_CSUM_OK,
811 PKT_RX_IP_CKSUM_BAD);
812 #ifdef HAVE_EXP_CQ_RX_TCP_PACKET
813 /* Set L4 checksum flag only for TCP/UDP packets. */
815 (IBV_EXP_CQ_RX_TCP_PACKET | IBV_EXP_CQ_RX_UDP_PACKET))
816 #endif /* HAVE_EXP_CQ_RX_TCP_PACKET */
819 IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK,
820 PKT_RX_L4_CKSUM_BAD);
823 * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
824 * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
827 if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
830 IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
831 PKT_RX_IP_CKSUM_BAD) |
833 IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
834 PKT_RX_L4_CKSUM_BAD);
839 * DPDK callback for RX with scattered packets support.
842 * Generic pointer to RX queue structure.
844 * Array to store received packets.
846 * Maximum number of packets in array.
849 * Number of packets successfully received (<= pkts_n).
852 mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
854 struct rxq *rxq = (struct rxq *)dpdk_rxq;
855 struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp;
856 const unsigned int elts_n = rxq->elts_n;
857 unsigned int elts_head = rxq->elts_head;
859 unsigned int pkts_ret = 0;
862 if (unlikely(!rxq->sp))
863 return mlx5_rx_burst(dpdk_rxq, pkts, pkts_n);
864 if (unlikely(elts == NULL)) /* See RTE_DEV_CMD_SET_MTU. */
866 for (i = 0; (i != pkts_n); ++i) {
867 struct rxq_elt_sp *elt = &(*elts)[elts_head];
869 unsigned int pkt_buf_len;
870 struct rte_mbuf *pkt_buf = NULL; /* Buffer returned in pkts. */
871 struct rte_mbuf **pkt_buf_next = &pkt_buf;
872 unsigned int seg_headroom = RTE_PKTMBUF_HEADROOM;
878 assert(elts_head < rxq->elts_n);
879 assert(rxq->elts_head < rxq->elts_n);
880 ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci);
881 if (unlikely(ret < 0)) {
885 DEBUG("rxq=%p, poll_length() failed (ret=%d)",
887 /* ibv_poll_cq() must be used in case of failure. */
888 wcs_n = ibv_poll_cq(rxq->cq, 1, &wc);
889 if (unlikely(wcs_n == 0))
891 if (unlikely(wcs_n < 0)) {
892 DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)",
897 if (unlikely(wc.status != IBV_WC_SUCCESS)) {
898 /* Whatever, just repost the offending WR. */
899 DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work"
900 " completion status (%d): %s",
901 (void *)rxq, wc.wr_id, wc.status,
902 ibv_wc_status_str(wc.status));
903 #ifdef MLX5_PMD_SOFT_COUNTERS
904 /* Increment dropped packets counter. */
905 ++rxq->stats.idropped;
913 assert(ret >= (rxq->crc_present << 2));
914 len = ret - (rxq->crc_present << 2);
917 * Replace spent segments with new ones, concatenate and
918 * return them as pkt_buf.
921 struct ibv_sge *sge = &elt->sges[j];
922 struct rte_mbuf *seg = elt->bufs[j];
923 struct rte_mbuf *rep;
924 unsigned int seg_tailroom;
928 * Fetch initial bytes of packet descriptor into a
929 * cacheline while allocating rep.
932 rep = rte_mbuf_raw_alloc(rxq->mp);
933 if (unlikely(rep == NULL)) {
935 * Unable to allocate a replacement mbuf,
938 DEBUG("rxq=%p: can't allocate a new mbuf",
940 if (pkt_buf != NULL) {
941 *pkt_buf_next = NULL;
942 rte_pktmbuf_free(pkt_buf);
944 /* Increment out of memory counters. */
945 ++rxq->stats.rx_nombuf;
946 ++rxq->priv->dev->data->rx_mbuf_alloc_failed;
950 /* Poison user-modifiable fields in rep. */
951 NEXT(rep) = (void *)((uintptr_t)-1);
952 SET_DATA_OFF(rep, 0xdead);
953 DATA_LEN(rep) = 0xd00d;
954 PKT_LEN(rep) = 0xdeadd00d;
959 assert(rep->buf_len == seg->buf_len);
960 assert(rep->buf_len == rxq->mb_len);
961 /* Reconfigure sge to use rep instead of seg. */
962 assert(sge->lkey == rxq->mr->lkey);
963 sge->addr = ((uintptr_t)rep->buf_addr + seg_headroom);
966 /* Update pkt_buf if it's the first segment, or link
967 * seg to the previous one and update pkt_buf_next. */
969 pkt_buf_next = &NEXT(seg);
970 /* Update seg information. */
971 seg_tailroom = (seg->buf_len - seg_headroom);
972 assert(sge->length == seg_tailroom);
973 SET_DATA_OFF(seg, seg_headroom);
974 if (likely(len <= seg_tailroom)) {
979 assert(rte_pktmbuf_headroom(seg) ==
981 assert(rte_pktmbuf_tailroom(seg) ==
982 (seg_tailroom - len));
985 DATA_LEN(seg) = seg_tailroom;
986 PKT_LEN(seg) = seg_tailroom;
988 assert(rte_pktmbuf_headroom(seg) == seg_headroom);
989 assert(rte_pktmbuf_tailroom(seg) == 0);
990 /* Fix len and clear headroom for next segments. */
994 /* Update head and tail segments. */
995 *pkt_buf_next = NULL;
996 assert(pkt_buf != NULL);
998 NB_SEGS(pkt_buf) = j;
999 PORT(pkt_buf) = rxq->port_id;
1000 PKT_LEN(pkt_buf) = pkt_buf_len;
1001 if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) {
1002 pkt_buf->packet_type = rxq_cq_to_pkt_type(flags);
1003 pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
1004 #ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
1005 if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) {
1006 pkt_buf->ol_flags |= PKT_RX_VLAN_PKT;
1007 pkt_buf->vlan_tci = vlan_tci;
1009 #endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
1012 /* Return packet. */
1013 *(pkts++) = pkt_buf;
1015 #ifdef MLX5_PMD_SOFT_COUNTERS
1016 /* Increment bytes counter. */
1017 rxq->stats.ibytes += pkt_buf_len;
1020 ret = rxq->recv(rxq->wq, elt->sges, RTE_DIM(elt->sges));
1021 if (unlikely(ret)) {
1022 /* Inability to repost WRs is fatal. */
1023 DEBUG("%p: recv_sg_list(): failed (ret=%d)",
1028 if (++elts_head >= elts_n)
1032 if (unlikely(i == 0))
1034 rxq->elts_head = elts_head;
1035 #ifdef MLX5_PMD_SOFT_COUNTERS
1036 /* Increment packets counter. */
1037 rxq->stats.ipackets += pkts_ret;
1043 * DPDK callback for RX.
1045 * The following function is the same as mlx5_rx_burst_sp(), except it doesn't
1046 * manage scattered packets. Improves performance when MRU is lower than the
1047 * size of the first segment.
1050 * Generic pointer to RX queue structure.
1052 * Array to store received packets.
1054 * Maximum number of packets in array.
1057 * Number of packets successfully received (<= pkts_n).
1060 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1062 struct rxq *rxq = (struct rxq *)dpdk_rxq;
1063 struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp;
1064 const unsigned int elts_n = rxq->elts_n;
1065 unsigned int elts_head = rxq->elts_head;
1066 struct ibv_sge sges[pkts_n];
1068 unsigned int pkts_ret = 0;
1071 if (unlikely(rxq->sp))
1072 return mlx5_rx_burst_sp(dpdk_rxq, pkts, pkts_n);
1073 for (i = 0; (i != pkts_n); ++i) {
1074 struct rxq_elt *elt = &(*elts)[elts_head];
1076 struct rte_mbuf *seg = elt->buf;
1077 struct rte_mbuf *rep;
1081 /* Sanity checks. */
1082 assert(seg != NULL);
1083 assert(elts_head < rxq->elts_n);
1084 assert(rxq->elts_head < rxq->elts_n);
1086 * Fetch initial bytes of packet descriptor into a
1087 * cacheline while allocating rep.
1090 rte_prefetch0(&seg->cacheline1);
1091 ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci);
1092 if (unlikely(ret < 0)) {
1096 DEBUG("rxq=%p, poll_length() failed (ret=%d)",
1098 /* ibv_poll_cq() must be used in case of failure. */
1099 wcs_n = ibv_poll_cq(rxq->cq, 1, &wc);
1100 if (unlikely(wcs_n == 0))
1102 if (unlikely(wcs_n < 0)) {
1103 DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)",
1104 (void *)rxq, wcs_n);
1108 if (unlikely(wc.status != IBV_WC_SUCCESS)) {
1109 /* Whatever, just repost the offending WR. */
1110 DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work"
1111 " completion status (%d): %s",
1112 (void *)rxq, wc.wr_id, wc.status,
1113 ibv_wc_status_str(wc.status));
1114 #ifdef MLX5_PMD_SOFT_COUNTERS
1115 /* Increment dropped packets counter. */
1116 ++rxq->stats.idropped;
1118 /* Add SGE to array for repost. */
1126 assert(ret >= (rxq->crc_present << 2));
1127 len = ret - (rxq->crc_present << 2);
1128 rep = rte_mbuf_raw_alloc(rxq->mp);
1129 if (unlikely(rep == NULL)) {
1131 * Unable to allocate a replacement mbuf,
1134 DEBUG("rxq=%p: can't allocate a new mbuf",
1136 /* Increment out of memory counters. */
1137 ++rxq->stats.rx_nombuf;
1138 ++rxq->priv->dev->data->rx_mbuf_alloc_failed;
1142 /* Reconfigure sge to use rep instead of seg. */
1143 elt->sge.addr = (uintptr_t)rep->buf_addr + RTE_PKTMBUF_HEADROOM;
1144 assert(elt->sge.lkey == rxq->mr->lkey);
1147 /* Add SGE to array for repost. */
1150 /* Update seg information. */
1151 SET_DATA_OFF(seg, RTE_PKTMBUF_HEADROOM);
1153 PORT(seg) = rxq->port_id;
1156 DATA_LEN(seg) = len;
1157 if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) {
1158 seg->packet_type = rxq_cq_to_pkt_type(flags);
1159 seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
1160 #ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
1161 if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) {
1162 seg->ol_flags |= PKT_RX_VLAN_PKT;
1163 seg->vlan_tci = vlan_tci;
1165 #endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
1167 /* Return packet. */
1170 #ifdef MLX5_PMD_SOFT_COUNTERS
1171 /* Increment bytes counter. */
1172 rxq->stats.ibytes += len;
1175 if (++elts_head >= elts_n)
1179 if (unlikely(i == 0))
1183 DEBUG("%p: reposting %u WRs", (void *)rxq, i);
1185 ret = rxq->recv(rxq->wq, sges, i);
1186 if (unlikely(ret)) {
1187 /* Inability to repost WRs is fatal. */
1188 DEBUG("%p: recv_burst(): failed (ret=%d)",
1193 rxq->elts_head = elts_head;
1194 #ifdef MLX5_PMD_SOFT_COUNTERS
1195 /* Increment packets counter. */
1196 rxq->stats.ipackets += pkts_ret;
1202 * Dummy DPDK callback for TX.
1204 * This function is used to temporarily replace the real callback during
1205 * unsafe control operations on the queue, or in case of error.
1208 * Generic pointer to TX queue structure.
1210 * Packets to transmit.
1212 * Number of packets in array.
1215 * Number of packets successfully transmitted (<= pkts_n).
1218 removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
1227 * Dummy DPDK callback for RX.
1229 * This function is used to temporarily replace the real callback during
1230 * unsafe control operations on the queue, or in case of error.
1233 * Generic pointer to RX queue structure.
1235 * Array to store received packets.
1237 * Maximum number of packets in array.
1240 * Number of packets successfully received (<= pkts_n).
1243 removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)