4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
42 #pragma GCC diagnostic ignored "-pedantic"
44 #include <infiniband/verbs.h>
46 #pragma GCC diagnostic error "-pedantic"
49 /* DPDK headers don't like -pedantic. */
51 #pragma GCC diagnostic ignored "-pedantic"
54 #include <rte_mempool.h>
55 #include <rte_prefetch.h>
56 #include <rte_common.h>
57 #include <rte_branch_prediction.h>
59 #pragma GCC diagnostic error "-pedantic"
63 #include "mlx5_utils.h"
64 #include "mlx5_rxtx.h"
65 #include "mlx5_defs.h"
68 * Manage TX completions.
70 * When sending a burst, mlx5_tx_burst() posts several WRs.
71 * To improve performance, a completion event is only required once every
72 * MLX5_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
73 * for other WRs, but this information would not be used anyway.
76 * Pointer to TX queue structure.
79 * 0 on success, -1 on failure.
82 txq_complete(struct txq *txq)
84 unsigned int elts_comp = txq->elts_comp;
85 unsigned int elts_tail = txq->elts_tail;
86 const unsigned int elts_n = txq->elts_n;
89 if (unlikely(elts_comp == 0))
92 DEBUG("%p: processing %u work requests completions",
93 (void *)txq, elts_comp);
95 wcs_n = txq->if_cq->poll_cnt(txq->cq, elts_comp);
96 if (unlikely(wcs_n == 0))
98 if (unlikely(wcs_n < 0)) {
99 DEBUG("%p: ibv_poll_cq() failed (wcs_n=%d)",
104 assert(elts_comp <= txq->elts_comp);
106 * Assume WC status is successful as nothing can be done about it
109 elts_tail += wcs_n * txq->elts_comp_cd_init;
110 if (elts_tail >= elts_n)
112 txq->elts_tail = elts_tail;
113 txq->elts_comp = elts_comp;
118 * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
119 * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
120 * remove an entry first.
123 * Pointer to TX queue structure.
125 * Memory Pool for which a Memory Region lkey must be returned.
128 * mr->lkey on success, (uint32_t)-1 on failure.
131 txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
136 for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
137 if (unlikely(txq->mp2mr[i].mp == NULL)) {
138 /* Unknown MP, add a new MR for it. */
141 if (txq->mp2mr[i].mp == mp) {
142 assert(txq->mp2mr[i].lkey != (uint32_t)-1);
143 assert(txq->mp2mr[i].mr->lkey == txq->mp2mr[i].lkey);
144 return txq->mp2mr[i].lkey;
147 /* Add a new entry, register MR first. */
148 DEBUG("%p: discovered new memory pool %p", (void *)txq, (void *)mp);
149 mr = ibv_reg_mr(txq->priv->pd,
150 (void *)mp->elt_va_start,
151 (mp->elt_va_end - mp->elt_va_start),
152 (IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE));
153 if (unlikely(mr == NULL)) {
154 DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
158 if (unlikely(i == RTE_DIM(txq->mp2mr))) {
159 /* Table is full, remove oldest entry. */
160 DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
163 claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr));
164 memmove(&txq->mp2mr[0], &txq->mp2mr[1],
165 (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
167 /* Store the new entry. */
168 txq->mp2mr[i].mp = mp;
169 txq->mp2mr[i].mr = mr;
170 txq->mp2mr[i].lkey = mr->lkey;
171 DEBUG("%p: new MR lkey for MP %p: 0x%08" PRIu32,
172 (void *)txq, (void *)mp, txq->mp2mr[i].lkey);
173 return txq->mp2mr[i].lkey;
176 #if MLX5_PMD_SGE_WR_N > 1
179 * Copy scattered mbuf contents to a single linear buffer.
182 * Linear output buffer.
184 * Scattered input buffer.
187 * Number of bytes copied to the output buffer or 0 if not large enough.
190 linearize_mbuf(linear_t *linear, struct rte_mbuf *buf)
192 unsigned int size = 0;
196 unsigned int len = DATA_LEN(buf);
200 if (unlikely(size > sizeof(*linear)))
202 memcpy(&(*linear)[offset],
203 rte_pktmbuf_mtod(buf, uint8_t *),
206 } while (buf != NULL);
211 * Handle scattered buffers for mlx5_tx_burst().
214 * TX queue structure.
216 * Number of segments in buf.
218 * TX queue element to fill.
222 * Index of the linear buffer to use if necessary (normally txq->elts_head).
224 * Array filled with SGEs on success.
227 * A structure containing the processed packet size in bytes and the
228 * number of SGEs. Both fields are set to (unsigned int)-1 in case of
231 static struct tx_burst_sg_ret {
235 tx_burst_sg(struct txq *txq, unsigned int segs, struct txq_elt *elt,
236 struct rte_mbuf *buf, unsigned int elts_head,
237 struct ibv_sge (*sges)[MLX5_PMD_SGE_WR_N])
239 unsigned int sent_size = 0;
243 /* When there are too many segments, extra segments are
244 * linearized in the last SGE. */
245 if (unlikely(segs > RTE_DIM(*sges))) {
246 segs = (RTE_DIM(*sges) - 1);
249 /* Update element. */
251 /* Register segments as SGEs. */
252 for (j = 0; (j != segs); ++j) {
253 struct ibv_sge *sge = &(*sges)[j];
256 /* Retrieve Memory Region key for this memory pool. */
257 lkey = txq_mp2mr(txq, buf->pool);
258 if (unlikely(lkey == (uint32_t)-1)) {
259 /* MR does not exist. */
260 DEBUG("%p: unable to get MP <-> MR association",
262 /* Clean up TX element. */
267 sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
269 rte_prefetch0((volatile void *)
270 (uintptr_t)sge->addr);
271 sge->length = DATA_LEN(buf);
273 sent_size += sge->length;
276 /* If buf is not NULL here and is not going to be linearized,
277 * nb_segs is not valid. */
279 assert((buf == NULL) || (linearize));
280 /* Linearize extra segments. */
282 struct ibv_sge *sge = &(*sges)[segs];
283 linear_t *linear = &(*txq->elts_linear)[elts_head];
284 unsigned int size = linearize_mbuf(linear, buf);
286 assert(segs == (RTE_DIM(*sges) - 1));
288 /* Invalid packet. */
289 DEBUG("%p: packet too large to be linearized.",
291 /* Clean up TX element. */
295 /* If MLX5_PMD_SGE_WR_N is 1, free mbuf immediately. */
296 if (RTE_DIM(*sges) == 1) {
298 struct rte_mbuf *next = NEXT(buf);
300 rte_pktmbuf_free_seg(buf);
302 } while (buf != NULL);
306 sge->addr = (uintptr_t)&(*linear)[0];
308 sge->lkey = txq->mr_linear->lkey;
311 return (struct tx_burst_sg_ret){
316 return (struct tx_burst_sg_ret){
322 #endif /* MLX5_PMD_SGE_WR_N > 1 */
325 * DPDK callback for TX.
328 * Generic pointer to TX queue structure.
330 * Packets to transmit.
332 * Number of packets in array.
335 * Number of packets successfully transmitted (<= pkts_n).
338 mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
340 struct txq *txq = (struct txq *)dpdk_txq;
341 unsigned int elts_head = txq->elts_head;
342 const unsigned int elts_tail = txq->elts_tail;
343 const unsigned int elts_n = txq->elts_n;
344 unsigned int elts_comp_cd = txq->elts_comp_cd;
345 unsigned int elts_comp = 0;
350 assert(elts_comp_cd != 0);
352 max = (elts_n - (elts_head - elts_tail));
356 assert(max <= elts_n);
357 /* Always leave one free entry in the ring. */
363 for (i = 0; (i != max); ++i) {
364 struct rte_mbuf *buf = pkts[i];
365 unsigned int elts_head_next =
366 (((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
367 struct txq_elt *elt_next = &(*txq->elts)[elts_head_next];
368 struct txq_elt *elt = &(*txq->elts)[elts_head];
369 unsigned int segs = NB_SEGS(buf);
370 #ifdef MLX5_PMD_SOFT_COUNTERS
371 unsigned int sent_size = 0;
373 uint32_t send_flags = 0;
375 /* Clean up old buffer. */
376 if (likely(elt->buf != NULL)) {
377 struct rte_mbuf *tmp = elt->buf;
379 /* Faster than rte_pktmbuf_free(). */
381 struct rte_mbuf *next = NEXT(tmp);
383 rte_pktmbuf_free_seg(tmp);
385 } while (tmp != NULL);
387 /* Request TX completion. */
388 if (unlikely(--elts_comp_cd == 0)) {
389 elts_comp_cd = txq->elts_comp_cd_init;
391 send_flags |= IBV_EXP_QP_BURST_SIGNALED;
393 /* Should we enable HW CKSUM offload */
395 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
396 send_flags |= IBV_EXP_QP_BURST_IP_CSUM;
397 /* HW does not support checksum offloads at arbitrary
398 * offsets but automatically recognizes the packet
399 * type. For inner L3/L4 checksums, only VXLAN (UDP)
400 * tunnels are currently supported. */
401 if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type))
402 send_flags |= IBV_EXP_QP_BURST_TUNNEL;
404 if (likely(segs == 1)) {
409 /* Retrieve buffer information. */
410 addr = rte_pktmbuf_mtod(buf, uintptr_t);
411 length = DATA_LEN(buf);
412 /* Retrieve Memory Region key for this memory pool. */
413 lkey = txq_mp2mr(txq, buf->pool);
414 if (unlikely(lkey == (uint32_t)-1)) {
415 /* MR does not exist. */
416 DEBUG("%p: unable to get MP <-> MR"
417 " association", (void *)txq);
418 /* Clean up TX element. */
422 /* Update element. */
425 rte_prefetch0((volatile void *)
427 RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
428 /* Put packet into send queue. */
429 #if MLX5_PMD_MAX_INLINE > 0
430 if (length <= txq->max_inline)
431 err = txq->if_qp->send_pending_inline
438 err = txq->if_qp->send_pending
446 #ifdef MLX5_PMD_SOFT_COUNTERS
450 #if MLX5_PMD_SGE_WR_N > 1
451 struct ibv_sge sges[MLX5_PMD_SGE_WR_N];
452 struct tx_burst_sg_ret ret;
454 ret = tx_burst_sg(txq, segs, elt, buf, elts_head,
456 if (ret.length == (unsigned int)-1)
458 RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
459 /* Put SG list into send queue. */
460 err = txq->if_qp->send_pending_sg_list
467 #ifdef MLX5_PMD_SOFT_COUNTERS
468 sent_size += ret.length;
470 #else /* MLX5_PMD_SGE_WR_N > 1 */
471 DEBUG("%p: TX scattered buffers support not"
472 " compiled in", (void *)txq);
474 #endif /* MLX5_PMD_SGE_WR_N > 1 */
476 elts_head = elts_head_next;
477 #ifdef MLX5_PMD_SOFT_COUNTERS
478 /* Increment sent bytes counter. */
479 txq->stats.obytes += sent_size;
483 /* Take a shortcut if nothing must be sent. */
484 if (unlikely(i == 0))
486 #ifdef MLX5_PMD_SOFT_COUNTERS
487 /* Increment sent packets counter. */
488 txq->stats.opackets += i;
490 /* Ring QP doorbell. */
491 err = txq->if_qp->send_flush(txq->qp);
493 /* A nonzero value is not supposed to be returned.
494 * Nothing can be done about it. */
495 DEBUG("%p: send_flush() failed with error %d",
498 txq->elts_head = elts_head;
499 txq->elts_comp += elts_comp;
500 txq->elts_comp_cd = elts_comp_cd;
505 * Translate RX completion flags to packet type.
508 * RX completion flags returned by poll_length_flags().
511 * Packet type for struct rte_mbuf.
513 static inline uint32_t
514 rxq_cq_to_pkt_type(uint32_t flags)
518 if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET)
521 IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
524 IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
527 IBV_EXP_CQ_RX_IPV4_PACKET,
528 RTE_PTYPE_INNER_L3_IPV4) |
530 IBV_EXP_CQ_RX_IPV6_PACKET,
531 RTE_PTYPE_INNER_L3_IPV6);
535 IBV_EXP_CQ_RX_IPV4_PACKET,
538 IBV_EXP_CQ_RX_IPV6_PACKET,
544 * Translate RX completion flags to offload flags.
547 * Pointer to RX queue structure.
549 * RX completion flags returned by poll_length_flags().
552 * Offload flags (ol_flags) for struct rte_mbuf.
554 static inline uint32_t
555 rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
557 uint32_t ol_flags = 0;
562 IBV_EXP_CQ_RX_IP_CSUM_OK,
563 PKT_RX_IP_CKSUM_BAD) |
565 IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK,
566 PKT_RX_L4_CKSUM_BAD);
568 * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
569 * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
572 if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
575 IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
576 PKT_RX_IP_CKSUM_BAD) |
578 IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
579 PKT_RX_L4_CKSUM_BAD);
584 * DPDK callback for RX with scattered packets support.
587 * Generic pointer to RX queue structure.
589 * Array to store received packets.
591 * Maximum number of packets in array.
594 * Number of packets successfully received (<= pkts_n).
597 mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
599 struct rxq *rxq = (struct rxq *)dpdk_rxq;
600 struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp;
601 const unsigned int elts_n = rxq->elts_n;
602 unsigned int elts_head = rxq->elts_head;
603 struct ibv_recv_wr head;
604 struct ibv_recv_wr **next = &head.next;
605 struct ibv_recv_wr *bad_wr;
607 unsigned int pkts_ret = 0;
610 if (unlikely(!rxq->sp))
611 return mlx5_rx_burst(dpdk_rxq, pkts, pkts_n);
612 if (unlikely(elts == NULL)) /* See RTE_DEV_CMD_SET_MTU. */
614 for (i = 0; (i != pkts_n); ++i) {
615 struct rxq_elt_sp *elt = &(*elts)[elts_head];
616 struct ibv_recv_wr *wr = &elt->wr;
617 uint64_t wr_id = wr->wr_id;
619 unsigned int pkt_buf_len;
620 struct rte_mbuf *pkt_buf = NULL; /* Buffer returned in pkts. */
621 struct rte_mbuf **pkt_buf_next = &pkt_buf;
622 unsigned int seg_headroom = RTE_PKTMBUF_HEADROOM;
630 assert(wr_id < rxq->elts_n);
631 assert(wr->sg_list == elt->sges);
632 assert(wr->num_sge == RTE_DIM(elt->sges));
633 assert(elts_head < rxq->elts_n);
634 assert(rxq->elts_head < rxq->elts_n);
635 ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
637 if (unlikely(ret < 0)) {
641 DEBUG("rxq=%p, poll_length() failed (ret=%d)",
643 /* ibv_poll_cq() must be used in case of failure. */
644 wcs_n = ibv_poll_cq(rxq->cq, 1, &wc);
645 if (unlikely(wcs_n == 0))
647 if (unlikely(wcs_n < 0)) {
648 DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)",
653 if (unlikely(wc.status != IBV_WC_SUCCESS)) {
654 /* Whatever, just repost the offending WR. */
655 DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work"
656 " completion status (%d): %s",
657 (void *)rxq, wc.wr_id, wc.status,
658 ibv_wc_status_str(wc.status));
659 #ifdef MLX5_PMD_SOFT_COUNTERS
660 /* Increment dropped packets counter. */
661 ++rxq->stats.idropped;
663 /* Link completed WRs together for repost. */
674 /* Link completed WRs together for repost. */
678 * Replace spent segments with new ones, concatenate and
679 * return them as pkt_buf.
682 struct ibv_sge *sge = &elt->sges[j];
683 struct rte_mbuf *seg = elt->bufs[j];
684 struct rte_mbuf *rep;
685 unsigned int seg_tailroom;
688 * Fetch initial bytes of packet descriptor into a
689 * cacheline while allocating rep.
692 rep = __rte_mbuf_raw_alloc(rxq->mp);
693 if (unlikely(rep == NULL)) {
695 * Unable to allocate a replacement mbuf,
698 DEBUG("rxq=%p, wr_id=%" PRIu64 ":"
699 " can't allocate a new mbuf",
701 if (pkt_buf != NULL) {
702 *pkt_buf_next = NULL;
703 rte_pktmbuf_free(pkt_buf);
705 /* Increment out of memory counters. */
706 ++rxq->stats.rx_nombuf;
707 ++rxq->priv->dev->data->rx_mbuf_alloc_failed;
711 /* Poison user-modifiable fields in rep. */
712 NEXT(rep) = (void *)((uintptr_t)-1);
713 SET_DATA_OFF(rep, 0xdead);
714 DATA_LEN(rep) = 0xd00d;
715 PKT_LEN(rep) = 0xdeadd00d;
720 assert(rep->buf_len == seg->buf_len);
721 assert(rep->buf_len == rxq->mb_len);
722 /* Reconfigure sge to use rep instead of seg. */
723 assert(sge->lkey == rxq->mr->lkey);
724 sge->addr = ((uintptr_t)rep->buf_addr + seg_headroom);
727 /* Update pkt_buf if it's the first segment, or link
728 * seg to the previous one and update pkt_buf_next. */
730 pkt_buf_next = &NEXT(seg);
731 /* Update seg information. */
732 seg_tailroom = (seg->buf_len - seg_headroom);
733 assert(sge->length == seg_tailroom);
734 SET_DATA_OFF(seg, seg_headroom);
735 if (likely(len <= seg_tailroom)) {
740 assert(rte_pktmbuf_headroom(seg) ==
742 assert(rte_pktmbuf_tailroom(seg) ==
743 (seg_tailroom - len));
746 DATA_LEN(seg) = seg_tailroom;
747 PKT_LEN(seg) = seg_tailroom;
749 assert(rte_pktmbuf_headroom(seg) == seg_headroom);
750 assert(rte_pktmbuf_tailroom(seg) == 0);
751 /* Fix len and clear headroom for next segments. */
755 /* Update head and tail segments. */
756 *pkt_buf_next = NULL;
757 assert(pkt_buf != NULL);
759 NB_SEGS(pkt_buf) = j;
760 PORT(pkt_buf) = rxq->port_id;
761 PKT_LEN(pkt_buf) = pkt_buf_len;
762 pkt_buf->packet_type = rxq_cq_to_pkt_type(flags);
763 pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
768 #ifdef MLX5_PMD_SOFT_COUNTERS
769 /* Increment bytes counter. */
770 rxq->stats.ibytes += pkt_buf_len;
773 if (++elts_head >= elts_n)
777 if (unlikely(i == 0))
782 DEBUG("%p: reposting %d WRs", (void *)rxq, i);
784 ret = ibv_post_recv(rxq->qp, head.next, &bad_wr);
786 /* Inability to repost WRs is fatal. */
787 DEBUG("%p: ibv_post_recv(): failed for WR %p: %s",
793 rxq->elts_head = elts_head;
794 #ifdef MLX5_PMD_SOFT_COUNTERS
795 /* Increment packets counter. */
796 rxq->stats.ipackets += pkts_ret;
802 * DPDK callback for RX.
804 * The following function is the same as mlx5_rx_burst_sp(), except it doesn't
805 * manage scattered packets. Improves performance when MRU is lower than the
806 * size of the first segment.
809 * Generic pointer to RX queue structure.
811 * Array to store received packets.
813 * Maximum number of packets in array.
816 * Number of packets successfully received (<= pkts_n).
819 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
821 struct rxq *rxq = (struct rxq *)dpdk_rxq;
822 struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp;
823 const unsigned int elts_n = rxq->elts_n;
824 unsigned int elts_head = rxq->elts_head;
825 struct ibv_sge sges[pkts_n];
827 unsigned int pkts_ret = 0;
830 if (unlikely(rxq->sp))
831 return mlx5_rx_burst_sp(dpdk_rxq, pkts, pkts_n);
832 for (i = 0; (i != pkts_n); ++i) {
833 struct rxq_elt *elt = &(*elts)[elts_head];
834 struct ibv_recv_wr *wr = &elt->wr;
835 uint64_t wr_id = wr->wr_id;
837 struct rte_mbuf *seg = (void *)((uintptr_t)elt->sge.addr -
838 WR_ID(wr_id).offset);
839 struct rte_mbuf *rep;
843 assert(WR_ID(wr_id).id < rxq->elts_n);
844 assert(wr->sg_list == &elt->sge);
845 assert(wr->num_sge == 1);
846 assert(elts_head < rxq->elts_n);
847 assert(rxq->elts_head < rxq->elts_n);
849 * Fetch initial bytes of packet descriptor into a
850 * cacheline while allocating rep.
853 rte_prefetch0(&seg->cacheline1);
854 ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
856 if (unlikely(ret < 0)) {
860 DEBUG("rxq=%p, poll_length() failed (ret=%d)",
862 /* ibv_poll_cq() must be used in case of failure. */
863 wcs_n = ibv_poll_cq(rxq->cq, 1, &wc);
864 if (unlikely(wcs_n == 0))
866 if (unlikely(wcs_n < 0)) {
867 DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)",
872 if (unlikely(wc.status != IBV_WC_SUCCESS)) {
873 /* Whatever, just repost the offending WR. */
874 DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work"
875 " completion status (%d): %s",
876 (void *)rxq, wc.wr_id, wc.status,
877 ibv_wc_status_str(wc.status));
878 #ifdef MLX5_PMD_SOFT_COUNTERS
879 /* Increment dropped packets counter. */
880 ++rxq->stats.idropped;
882 /* Add SGE to array for repost. */
891 rep = __rte_mbuf_raw_alloc(rxq->mp);
892 if (unlikely(rep == NULL)) {
894 * Unable to allocate a replacement mbuf,
897 DEBUG("rxq=%p, wr_id=%" PRIu32 ":"
898 " can't allocate a new mbuf",
899 (void *)rxq, WR_ID(wr_id).id);
900 /* Increment out of memory counters. */
901 ++rxq->stats.rx_nombuf;
902 ++rxq->priv->dev->data->rx_mbuf_alloc_failed;
906 /* Reconfigure sge to use rep instead of seg. */
907 elt->sge.addr = (uintptr_t)rep->buf_addr + RTE_PKTMBUF_HEADROOM;
908 assert(elt->sge.lkey == rxq->mr->lkey);
909 WR_ID(wr->wr_id).offset =
910 (((uintptr_t)rep->buf_addr + RTE_PKTMBUF_HEADROOM) -
912 assert(WR_ID(wr->wr_id).id == WR_ID(wr_id).id);
914 /* Add SGE to array for repost. */
917 /* Update seg information. */
918 SET_DATA_OFF(seg, RTE_PKTMBUF_HEADROOM);
920 PORT(seg) = rxq->port_id;
924 seg->packet_type = rxq_cq_to_pkt_type(flags);
925 seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
930 #ifdef MLX5_PMD_SOFT_COUNTERS
931 /* Increment bytes counter. */
932 rxq->stats.ibytes += len;
935 if (++elts_head >= elts_n)
939 if (unlikely(i == 0))
943 DEBUG("%p: reposting %u WRs", (void *)rxq, i);
945 ret = rxq->if_qp->recv_burst(rxq->qp, sges, i);
947 /* Inability to repost WRs is fatal. */
948 DEBUG("%p: recv_burst(): failed (ret=%d)",
953 rxq->elts_head = elts_head;
954 #ifdef MLX5_PMD_SOFT_COUNTERS
955 /* Increment packets counter. */
956 rxq->stats.ipackets += pkts_ret;
962 * Dummy DPDK callback for TX.
964 * This function is used to temporarily replace the real callback during
965 * unsafe control operations on the queue, or in case of error.
968 * Generic pointer to TX queue structure.
970 * Packets to transmit.
972 * Number of packets in array.
975 * Number of packets successfully transmitted (<= pkts_n).
978 removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
987 * Dummy DPDK callback for RX.
989 * This function is used to temporarily replace the real callback during
990 * unsafe control operations on the queue, or in case of error.
993 * Generic pointer to RX queue structure.
995 * Array to store received packets.
997 * Maximum number of packets in array.
1000 * Number of packets successfully received (<= pkts_n).
1003 removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)