4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Data plane functions for mlx4 driver.
43 /* Verbs headers do not support -pedantic. */
45 #pragma GCC diagnostic ignored "-Wpedantic"
47 #include <infiniband/verbs.h>
49 #pragma GCC diagnostic error "-Wpedantic"
52 #include <rte_branch_prediction.h>
53 #include <rte_common.h>
56 #include <rte_mempool.h>
57 #include <rte_prefetch.h>
61 #include "mlx4_rxtx.h"
62 #include "mlx4_utils.h"
64 #define WQE_ONE_DATA_SEG_SIZE \
65 (sizeof(struct mlx4_wqe_ctrl_seg) + sizeof(struct mlx4_wqe_data_seg))
68 * Pointer-value pair structure used in tx_post_send for saving the first
69 * DWORD (32 byte) of a TXBB.
72 volatile struct mlx4_wqe_data_seg *dseg;
77 * Stamp a WQE so it won't be reused by the HW.
79 * Routine is used when freeing WQE used by the chip or when failing
80 * building an WQ entry has failed leaving partial information on the queue.
83 * Pointer to the SQ structure.
85 * Index of the freed WQE.
87 * Number of blocks to stamp.
88 * If < 0 the routine will use the size written in the WQ entry.
90 * The value of the WQE owner bit to use in the stamp.
93 * The number of Tx basic blocs (TXBB) the WQE contained.
96 mlx4_txq_stamp_freed_wqe(struct mlx4_sq *sq, uint16_t index, uint8_t owner)
98 uint32_t stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL |
99 (!!owner << MLX4_SQ_STAMP_SHIFT));
100 volatile uint8_t *wqe = mlx4_get_send_wqe(sq,
101 (index & sq->txbb_cnt_mask));
102 volatile uint32_t *ptr = (volatile uint32_t *)wqe;
107 /* Extract the size from the control segment of the WQE. */
108 num_txbbs = MLX4_SIZE_TO_TXBBS((((volatile struct mlx4_wqe_ctrl_seg *)
109 wqe)->fence_size & 0x3f) << 4);
110 txbbs_size = num_txbbs * MLX4_TXBB_SIZE;
111 /* Optimize the common case when there is no wrap-around. */
112 if (wqe + txbbs_size <= sq->eob) {
113 /* Stamp the freed descriptor. */
114 for (i = 0; i < txbbs_size; i += MLX4_SQ_STAMP_STRIDE) {
116 ptr += MLX4_SQ_STAMP_DWORDS;
119 /* Stamp the freed descriptor. */
120 for (i = 0; i < txbbs_size; i += MLX4_SQ_STAMP_STRIDE) {
122 ptr += MLX4_SQ_STAMP_DWORDS;
123 if ((volatile uint8_t *)ptr >= sq->eob) {
124 ptr = (volatile uint32_t *)sq->buf;
125 stamp ^= RTE_BE32(0x80000000);
133 * Manage Tx completions.
135 * When sending a burst, mlx4_tx_burst() posts several WRs.
136 * To improve performance, a completion event is only required once every
137 * MLX4_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
138 * for other WRs, but this information would not be used anyway.
141 * Pointer to Tx queue structure.
144 * 0 on success, -1 on failure.
147 mlx4_txq_complete(struct txq *txq, const unsigned int elts_n,
150 unsigned int elts_comp = txq->elts_comp;
151 unsigned int elts_tail = txq->elts_tail;
152 struct mlx4_cq *cq = &txq->mcq;
153 volatile struct mlx4_cqe *cqe;
154 uint32_t cons_index = cq->cons_index;
156 uint16_t nr_txbbs = 0;
160 * Traverse over all CQ entries reported and handle each WQ entry
164 cqe = (volatile struct mlx4_cqe *)mlx4_get_cqe(cq, cons_index);
165 if (unlikely(!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
166 !!(cons_index & cq->cqe_cnt)))
169 * Make sure we read the CQE after we read the ownership bit.
173 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
174 MLX4_CQE_OPCODE_ERROR)) {
175 volatile struct mlx4_err_cqe *cqe_err =
176 (volatile struct mlx4_err_cqe *)cqe;
177 ERROR("%p CQE error - vendor syndrome: 0x%x"
179 (void *)txq, cqe_err->vendor_err,
183 /* Get WQE index reported in the CQE. */
185 rte_be_to_cpu_16(cqe->wqe_index) & sq->txbb_cnt_mask;
187 /* Free next descriptor. */
189 mlx4_txq_stamp_freed_wqe(sq,
190 (sq->tail + nr_txbbs) & sq->txbb_cnt_mask,
191 !!((sq->tail + nr_txbbs) & sq->txbb_cnt));
193 } while (((sq->tail + nr_txbbs) & sq->txbb_cnt_mask) !=
197 if (unlikely(pkts == 0))
200 cq->cons_index = cons_index;
201 *cq->set_ci_db = rte_cpu_to_be_32(cq->cons_index & MLX4_CQ_DB_CI_MASK);
202 sq->tail = sq->tail + nr_txbbs;
203 /* Update the list of packets posted for transmission. */
205 assert(elts_comp <= txq->elts_comp);
207 * Assume completion status is successful as nothing can be done about
211 if (elts_tail >= elts_n)
213 txq->elts_tail = elts_tail;
214 txq->elts_comp = elts_comp;
219 * Get memory pool (MP) from mbuf. If mbuf is indirect, the pool from which
220 * the cloned mbuf is allocated is returned instead.
226 * Memory pool where data is located for given mbuf.
228 static struct rte_mempool *
229 mlx4_txq_mb2mp(struct rte_mbuf *buf)
231 if (unlikely(RTE_MBUF_INDIRECT(buf)))
232 return rte_mbuf_from_indirect(buf)->pool;
237 mlx4_tx_burst_segs(struct rte_mbuf *buf, struct txq *txq,
238 volatile struct mlx4_wqe_ctrl_seg **pctrl)
242 struct pv *pv = (struct pv *)txq->bounce_buf;
243 struct mlx4_sq *sq = &txq->msq;
244 uint32_t head_idx = sq->head & sq->txbb_cnt_mask;
245 volatile struct mlx4_wqe_ctrl_seg *ctrl;
246 volatile struct mlx4_wqe_data_seg *dseg;
247 struct rte_mbuf *sbuf;
253 /* Calculate the needed work queue entry size for this packet. */
254 wqe_real_size = sizeof(volatile struct mlx4_wqe_ctrl_seg) +
255 buf->nb_segs * sizeof(volatile struct mlx4_wqe_data_seg);
256 nr_txbbs = MLX4_SIZE_TO_TXBBS(wqe_real_size);
258 * Check that there is room for this WQE in the send queue and that
259 * the WQE size is legal.
261 if (((sq->head - sq->tail) + nr_txbbs +
262 sq->headroom_txbbs) >= sq->txbb_cnt ||
263 nr_txbbs > MLX4_MAX_WQE_TXBBS) {
266 /* Get the control and data entries of the WQE. */
267 ctrl = (volatile struct mlx4_wqe_ctrl_seg *)
268 mlx4_get_send_wqe(sq, head_idx);
269 dseg = (volatile struct mlx4_wqe_data_seg *)
270 ((uintptr_t)ctrl + sizeof(struct mlx4_wqe_ctrl_seg));
272 /* Fill the data segments with buffer information. */
273 for (sbuf = buf; sbuf != NULL; sbuf = sbuf->next, dseg++) {
274 addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
275 rte_prefetch0((volatile void *)addr);
276 /* Handle WQE wraparound. */
277 if (dseg >= (volatile struct mlx4_wqe_data_seg *)sq->eob)
278 dseg = (volatile struct mlx4_wqe_data_seg *)sq->buf;
279 dseg->addr = rte_cpu_to_be_64(addr);
280 /* Memory region key (big endian) for this memory pool. */
281 lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
282 dseg->lkey = rte_cpu_to_be_32(lkey);
284 /* Calculate the needed work queue entry size for this packet */
285 if (unlikely(dseg->lkey == rte_cpu_to_be_32((uint32_t)-1))) {
286 /* MR does not exist. */
287 DEBUG("%p: unable to get MP <-> MR association",
290 * Restamp entry in case of failure.
291 * Make sure that size is written correctly
292 * Note that we give ownership to the SW, not the HW.
294 wqe_real_size = sizeof(struct mlx4_wqe_ctrl_seg) +
295 buf->nb_segs * sizeof(struct mlx4_wqe_data_seg);
296 ctrl->fence_size = (wqe_real_size >> 4) & 0x3f;
297 mlx4_txq_stamp_freed_wqe(sq, head_idx,
298 (sq->head & sq->txbb_cnt) ? 0 : 1);
302 if (likely(sbuf->data_len)) {
303 byte_count = rte_cpu_to_be_32(sbuf->data_len);
306 * Zero length segment is treated as inline segment
309 byte_count = RTE_BE32(0x80000000);
312 * If the data segment is not at the beginning of a
313 * Tx basic block (TXBB) then write the byte count,
314 * else postpone the writing to just before updating the
317 if ((uintptr_t)dseg & (uintptr_t)(MLX4_TXBB_SIZE - 1)) {
318 #if RTE_CACHE_LINE_SIZE < 64
320 * Need a barrier here before writing the byte_count
321 * fields to make sure that all the data is visible
322 * before the byte_count field is set.
323 * Otherwise, if the segment begins a new cacheline,
324 * the HCA prefetcher could grab the 64-byte chunk and
325 * get a valid (!= 0xffffffff) byte count but stale
326 * data, and end up sending the wrong data.
329 #endif /* RTE_CACHE_LINE_SIZE */
330 dseg->byte_count = byte_count;
333 * This data segment starts at the beginning of a new
334 * TXBB, so we need to postpone its byte_count writing
337 pv[pv_counter].dseg = dseg;
338 pv[pv_counter++].val = byte_count;
341 /* Write the first DWORD of each TXBB save earlier. */
343 /* Need a barrier here before writing the byte_count. */
345 for (--pv_counter; pv_counter >= 0; pv_counter--)
346 pv[pv_counter].dseg->byte_count = pv[pv_counter].val;
348 /* Fill the control parameters for this packet. */
349 ctrl->fence_size = (wqe_real_size >> 4) & 0x3f;
354 * DPDK callback for Tx.
357 * Generic pointer to Tx queue structure.
359 * Packets to transmit.
361 * Number of packets in array.
364 * Number of packets successfully transmitted (<= pkts_n).
367 mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
369 struct txq *txq = (struct txq *)dpdk_txq;
370 unsigned int elts_head = txq->elts_head;
371 const unsigned int elts_n = txq->elts_n;
372 unsigned int bytes_sent = 0;
375 struct mlx4_sq *sq = &txq->msq;
378 assert(txq->elts_comp_cd != 0);
379 if (likely(txq->elts_comp != 0))
380 mlx4_txq_complete(txq, elts_n, sq);
381 max = (elts_n - (elts_head - txq->elts_tail));
385 assert(max <= elts_n);
386 /* Always leave one free entry in the ring. */
390 for (i = 0; (i != max); ++i) {
391 struct rte_mbuf *buf = pkts[i];
392 unsigned int elts_head_next =
393 (((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
394 struct txq_elt *elt_next = &(*txq->elts)[elts_head_next];
395 struct txq_elt *elt = &(*txq->elts)[elts_head];
396 uint32_t owner_opcode = MLX4_OPCODE_SEND;
397 volatile struct mlx4_wqe_ctrl_seg *ctrl;
398 volatile struct mlx4_wqe_data_seg *dseg;
403 uint32_t head_idx = sq->head & sq->txbb_cnt_mask;
407 /* Clean up old buffer. */
408 if (likely(elt->buf != NULL)) {
409 struct rte_mbuf *tmp = elt->buf;
413 memset(elt, 0x66, sizeof(*elt));
415 /* Faster than rte_pktmbuf_free(). */
417 struct rte_mbuf *next = tmp->next;
419 rte_pktmbuf_free_seg(tmp);
421 } while (tmp != NULL);
423 RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
424 if (buf->nb_segs == 1) {
426 * Check that there is room for this WQE in the send
427 * queue and that the WQE size is legal
429 if (((sq->head - sq->tail) + 1 + sq->headroom_txbbs) >=
430 sq->txbb_cnt || 1 > MLX4_MAX_WQE_TXBBS) {
434 /* Get the control and data entries of the WQE. */
435 ctrl = (volatile struct mlx4_wqe_ctrl_seg *)
436 mlx4_get_send_wqe(sq, head_idx);
437 dseg = (volatile struct mlx4_wqe_data_seg *)
439 sizeof(struct mlx4_wqe_ctrl_seg));
440 addr = rte_pktmbuf_mtod(buf, uintptr_t);
441 rte_prefetch0((volatile void *)addr);
442 /* Handle WQE wraparound. */
444 (volatile struct mlx4_wqe_data_seg *)sq->eob)
445 dseg = (volatile struct mlx4_wqe_data_seg *)
447 dseg->addr = rte_cpu_to_be_64(addr);
448 /* Memory region key (big endian). */
449 lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(buf));
450 dseg->lkey = rte_cpu_to_be_32(lkey);
452 if (unlikely(dseg->lkey ==
453 rte_cpu_to_be_32((uint32_t)-1))) {
454 /* MR does not exist. */
455 DEBUG("%p: unable to get MP <-> MR association",
458 * Restamp entry in case of failure.
459 * Make sure that size is written correctly
460 * Note that we give ownership to the SW,
464 (WQE_ONE_DATA_SEG_SIZE >> 4) & 0x3f;
465 mlx4_txq_stamp_freed_wqe(sq, head_idx,
466 (sq->head & sq->txbb_cnt) ? 0 : 1);
471 /* Never be TXBB aligned, no need compiler barrier. */
472 dseg->byte_count = rte_cpu_to_be_32(buf->data_len);
473 /* Fill the control parameters for this packet. */
474 ctrl->fence_size = (WQE_ONE_DATA_SEG_SIZE >> 4) & 0x3f;
477 nr_txbbs = mlx4_tx_burst_segs(buf, txq, &ctrl);
484 * For raw Ethernet, the SOLICIT flag is used to indicate
485 * that no ICRC should be calculated.
487 txq->elts_comp_cd -= nr_txbbs;
488 if (unlikely(txq->elts_comp_cd <= 0)) {
489 txq->elts_comp_cd = txq->elts_comp_cd_init;
490 srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT |
491 MLX4_WQE_CTRL_CQ_UPDATE);
493 srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT);
495 /* Enable HW checksum offload if requested */
498 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))) {
499 const uint64_t is_tunneled = (buf->ol_flags &
501 PKT_TX_TUNNEL_VXLAN));
503 if (is_tunneled && txq->csum_l2tun) {
504 owner_opcode |= MLX4_WQE_CTRL_IIP_HDR_CSUM |
505 MLX4_WQE_CTRL_IL4_HDR_CSUM;
506 if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
508 RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM);
511 RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM |
512 MLX4_WQE_CTRL_TCP_UDP_CSUM);
517 * Copy destination MAC address to the WQE, this allows
518 * loopback in eSwitch, so that VFs and PF can
519 * communicate with each other.
521 srcrb.flags16[0] = *(rte_pktmbuf_mtod(buf, uint16_t *));
522 ctrl->imm = *(rte_pktmbuf_mtod_offset(buf, uint32_t *,
527 ctrl->srcrb_flags = srcrb.flags;
529 * Make sure descriptor is fully written before
530 * setting ownership bit (because HW can start
531 * executing as soon as we do).
534 ctrl->owner_opcode = rte_cpu_to_be_32(owner_opcode |
535 ((sq->head & sq->txbb_cnt) ?
536 MLX4_BIT_WQE_OWN : 0));
537 sq->head += nr_txbbs;
539 bytes_sent += buf->pkt_len;
540 elts_head = elts_head_next;
542 /* Take a shortcut if nothing must be sent. */
543 if (unlikely(i == 0))
545 /* Increment send statistics counters. */
546 txq->stats.opackets += i;
547 txq->stats.obytes += bytes_sent;
548 /* Make sure that descriptors are written before doorbell record. */
550 /* Ring QP doorbell. */
551 rte_write32(txq->msq.doorbell_qpn, txq->msq.db);
552 txq->elts_head = elts_head;
558 * Translate Rx completion flags to packet type.
561 * Rx completion flags returned by mlx4_cqe_flags().
564 * Packet type in mbuf format.
566 static inline uint32_t
567 rxq_cq_to_pkt_type(uint32_t flags)
571 if (flags & MLX4_CQE_L2_TUNNEL)
573 mlx4_transpose(flags,
574 MLX4_CQE_L2_TUNNEL_IPV4,
575 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN) |
576 mlx4_transpose(flags,
577 MLX4_CQE_STATUS_IPV4_PKT,
578 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN);
580 pkt_type = mlx4_transpose(flags,
581 MLX4_CQE_STATUS_IPV4_PKT,
582 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
587 * Translate Rx completion flags to offload flags.
590 * Rx completion flags returned by mlx4_cqe_flags().
592 * Whether Rx checksums are enabled.
594 * Whether Rx L2 tunnel checksums are enabled.
597 * Offload flags (ol_flags) in mbuf format.
599 static inline uint32_t
600 rxq_cq_to_ol_flags(uint32_t flags, int csum, int csum_l2tun)
602 uint32_t ol_flags = 0;
606 mlx4_transpose(flags,
607 MLX4_CQE_STATUS_IP_HDR_CSUM_OK,
608 PKT_RX_IP_CKSUM_GOOD) |
609 mlx4_transpose(flags,
610 MLX4_CQE_STATUS_TCP_UDP_CSUM_OK,
611 PKT_RX_L4_CKSUM_GOOD);
612 if ((flags & MLX4_CQE_L2_TUNNEL) && csum_l2tun)
614 mlx4_transpose(flags,
615 MLX4_CQE_L2_TUNNEL_IPOK,
616 PKT_RX_IP_CKSUM_GOOD) |
617 mlx4_transpose(flags,
618 MLX4_CQE_L2_TUNNEL_L4_CSUM,
619 PKT_RX_L4_CKSUM_GOOD);
624 * Extract checksum information from CQE flags.
627 * Pointer to CQE structure.
629 * Whether Rx checksums are enabled.
631 * Whether Rx L2 tunnel checksums are enabled.
634 * CQE checksum information.
636 static inline uint32_t
637 mlx4_cqe_flags(volatile struct mlx4_cqe *cqe, int csum, int csum_l2tun)
642 * The relevant bits are in different locations on their
643 * CQE fields therefore we can join them in one 32bit
647 flags = (rte_be_to_cpu_32(cqe->status) &
648 MLX4_CQE_STATUS_IPV4_CSUM_OK);
650 flags |= (rte_be_to_cpu_32(cqe->vlan_my_qpn) &
651 (MLX4_CQE_L2_TUNNEL |
652 MLX4_CQE_L2_TUNNEL_IPOK |
653 MLX4_CQE_L2_TUNNEL_L4_CSUM |
654 MLX4_CQE_L2_TUNNEL_IPV4));
659 * Poll one CQE from CQ.
662 * Pointer to the receive queue structure.
667 * Number of bytes of the CQE, 0 in case there is no completion.
670 mlx4_cq_poll_one(struct rxq *rxq, volatile struct mlx4_cqe **out)
673 volatile struct mlx4_cqe *cqe = NULL;
674 struct mlx4_cq *cq = &rxq->mcq;
676 cqe = (volatile struct mlx4_cqe *)mlx4_get_cqe(cq, cq->cons_index);
677 if (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
678 !!(cq->cons_index & cq->cqe_cnt))
681 * Make sure we read CQ entry contents after we've checked the
685 assert(!(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK));
686 assert((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) !=
687 MLX4_CQE_OPCODE_ERROR);
688 ret = rte_be_to_cpu_32(cqe->byte_cnt);
696 * DPDK callback for Rx with scattered packets support.
699 * Generic pointer to Rx queue structure.
701 * Array to store received packets.
703 * Maximum number of packets in array.
706 * Number of packets successfully received (<= pkts_n).
709 mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
711 struct rxq *rxq = dpdk_rxq;
712 const uint32_t wr_cnt = (1 << rxq->elts_n) - 1;
713 const uint16_t sges_n = rxq->sges_n;
714 struct rte_mbuf *pkt = NULL;
715 struct rte_mbuf *seg = NULL;
717 uint32_t rq_ci = rxq->rq_ci << sges_n;
721 volatile struct mlx4_cqe *cqe;
722 uint32_t idx = rq_ci & wr_cnt;
723 struct rte_mbuf *rep = (*rxq->elts)[idx];
724 volatile struct mlx4_wqe_data_seg *scat = &(*rxq->wqes)[idx];
726 /* Update the 'next' pointer of the previous segment. */
732 rep = rte_mbuf_raw_alloc(rxq->mp);
733 if (unlikely(rep == NULL)) {
734 ++rxq->stats.rx_nombuf;
737 * No buffers before we even started,
743 assert(pkt != (*rxq->elts)[idx]);
747 rte_mbuf_raw_free(pkt);
753 /* Looking for the new packet. */
754 len = mlx4_cq_poll_one(rxq, &cqe);
756 rte_mbuf_raw_free(rep);
759 if (unlikely(len < 0)) {
760 /* Rx error, packet is likely too large. */
761 rte_mbuf_raw_free(rep);
762 ++rxq->stats.idropped;
766 if (rxq->csum | rxq->csum_l2tun) {
773 rxq_cq_to_ol_flags(flags,
776 pkt->packet_type = rxq_cq_to_pkt_type(flags);
778 pkt->packet_type = 0;
784 rep->port = rxq->port_id;
785 rep->data_len = seg->data_len;
786 rep->data_off = seg->data_off;
787 (*rxq->elts)[idx] = rep;
789 * Fill NIC descriptor with the new buffer. The lkey and size
790 * of the buffers are already known, only the buffer address
793 scat->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
794 if (len > seg->data_len) {
795 len -= seg->data_len;
800 /* The last segment. */
802 /* Increment bytes counter. */
803 rxq->stats.ibytes += pkt->pkt_len;
810 /* Align consumer index to the next stride. */
815 if (unlikely(i == 0 && (rq_ci >> sges_n) == rxq->rq_ci))
817 /* Update the consumer index. */
818 rxq->rq_ci = rq_ci >> sges_n;
820 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
821 *rxq->mcq.set_ci_db =
822 rte_cpu_to_be_32(rxq->mcq.cons_index & MLX4_CQ_DB_CI_MASK);
823 /* Increment packets counter. */
824 rxq->stats.ipackets += i;
829 * Dummy DPDK callback for Tx.
831 * This function is used to temporarily replace the real callback during
832 * unsafe control operations on the queue, or in case of error.
835 * Generic pointer to Tx queue structure.
837 * Packets to transmit.
839 * Number of packets in array.
842 * Number of packets successfully transmitted (<= pkts_n).
845 mlx4_tx_burst_removed(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
854 * Dummy DPDK callback for Rx.
856 * This function is used to temporarily replace the real callback during
857 * unsafe control operations on the queue, or in case of error.
860 * Generic pointer to Rx queue structure.
862 * Array to store received packets.
864 * Maximum number of packets in array.
867 * Number of packets successfully received (<= pkts_n).
870 mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)