4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Data plane functions for mlx4 driver.
44 /* Verbs headers do not support -pedantic. */
46 #pragma GCC diagnostic ignored "-Wpedantic"
48 #include <infiniband/verbs.h>
50 #pragma GCC diagnostic error "-Wpedantic"
53 #include <rte_branch_prediction.h>
54 #include <rte_common.h>
57 #include <rte_mempool.h>
58 #include <rte_prefetch.h>
62 #include "mlx4_rxtx.h"
63 #include "mlx4_utils.h"
66 * Pointer-value pair structure used in tx_post_send for saving the first
67 * DWORD (32 byte) of a TXBB.
70 struct mlx4_wqe_data_seg *dseg;
75 * Stamp a WQE so it won't be reused by the HW.
77 * Routine is used when freeing WQE used by the chip or when failing
78 * building an WQ entry has failed leaving partial information on the queue.
81 * Pointer to the SQ structure.
83 * Index of the freed WQE.
85 * Number of blocks to stamp.
86 * If < 0 the routine will use the size written in the WQ entry.
88 * The value of the WQE owner bit to use in the stamp.
91 * The number of Tx basic blocs (TXBB) the WQE contained.
94 mlx4_txq_stamp_freed_wqe(struct mlx4_sq *sq, uint16_t index, uint8_t owner)
96 uint32_t stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL |
97 (!!owner << MLX4_SQ_STAMP_SHIFT));
98 uint8_t *wqe = mlx4_get_send_wqe(sq, (index & sq->txbb_cnt_mask));
99 uint32_t *ptr = (uint32_t *)wqe;
104 /* Extract the size from the control segment of the WQE. */
105 num_txbbs = MLX4_SIZE_TO_TXBBS((((struct mlx4_wqe_ctrl_seg *)
106 wqe)->fence_size & 0x3f) << 4);
107 txbbs_size = num_txbbs * MLX4_TXBB_SIZE;
108 /* Optimize the common case when there is no wrap-around. */
109 if (wqe + txbbs_size <= sq->eob) {
110 /* Stamp the freed descriptor. */
111 for (i = 0; i < txbbs_size; i += MLX4_SQ_STAMP_STRIDE) {
113 ptr += MLX4_SQ_STAMP_DWORDS;
116 /* Stamp the freed descriptor. */
117 for (i = 0; i < txbbs_size; i += MLX4_SQ_STAMP_STRIDE) {
119 ptr += MLX4_SQ_STAMP_DWORDS;
120 if ((uint8_t *)ptr >= sq->eob) {
121 ptr = (uint32_t *)sq->buf;
122 stamp ^= RTE_BE32(0x80000000);
130 * Manage Tx completions.
132 * When sending a burst, mlx4_tx_burst() posts several WRs.
133 * To improve performance, a completion event is only required once every
134 * MLX4_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
135 * for other WRs, but this information would not be used anyway.
138 * Pointer to Tx queue structure.
141 * 0 on success, -1 on failure.
144 mlx4_txq_complete(struct txq *txq)
146 unsigned int elts_comp = txq->elts_comp;
147 unsigned int elts_tail = txq->elts_tail;
148 const unsigned int elts_n = txq->elts_n;
149 struct mlx4_cq *cq = &txq->mcq;
150 struct mlx4_sq *sq = &txq->msq;
151 struct mlx4_cqe *cqe;
152 uint32_t cons_index = cq->cons_index;
154 uint16_t nr_txbbs = 0;
157 if (unlikely(elts_comp == 0))
160 * Traverse over all CQ entries reported and handle each WQ entry
164 cqe = (struct mlx4_cqe *)mlx4_get_cqe(cq, cons_index);
165 if (unlikely(!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
166 !!(cons_index & cq->cqe_cnt)))
169 * Make sure we read the CQE after we read the ownership bit.
172 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
173 MLX4_CQE_OPCODE_ERROR)) {
174 struct mlx4_err_cqe *cqe_err =
175 (struct mlx4_err_cqe *)cqe;
176 ERROR("%p CQE error - vendor syndrome: 0x%x"
178 (void *)txq, cqe_err->vendor_err,
181 /* Get WQE index reported in the CQE. */
183 rte_be_to_cpu_16(cqe->wqe_index) & sq->txbb_cnt_mask;
185 /* Free next descriptor. */
187 mlx4_txq_stamp_freed_wqe(sq,
188 (sq->tail + nr_txbbs) & sq->txbb_cnt_mask,
189 !!((sq->tail + nr_txbbs) & sq->txbb_cnt));
191 } while (((sq->tail + nr_txbbs) & sq->txbb_cnt_mask) !=
195 if (unlikely(pkts == 0))
199 * To prevent CQ overflow we first update CQ consumer and only then
202 cq->cons_index = cons_index;
203 *cq->set_ci_db = rte_cpu_to_be_32(cq->cons_index & 0xffffff);
205 sq->tail = sq->tail + nr_txbbs;
206 /* Update the list of packets posted for transmission. */
208 assert(elts_comp <= txq->elts_comp);
210 * Assume completion status is successful as nothing can be done about
214 if (elts_tail >= elts_n)
216 txq->elts_tail = elts_tail;
217 txq->elts_comp = elts_comp;
222 * Get memory pool (MP) from mbuf. If mbuf is indirect, the pool from which
223 * the cloned mbuf is allocated is returned instead.
229 * Memory pool where data is located for given mbuf.
231 static struct rte_mempool *
232 mlx4_txq_mb2mp(struct rte_mbuf *buf)
234 if (unlikely(RTE_MBUF_INDIRECT(buf)))
235 return rte_mbuf_from_indirect(buf)->pool;
240 * Get memory region (MR) <-> memory pool (MP) association from txq->mp2mr[].
241 * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
242 * remove an entry first.
245 * Pointer to Tx queue structure.
247 * Memory pool for which a memory region lkey must be returned.
250 * mr->lkey on success, (uint32_t)-1 on failure.
253 mlx4_txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
258 for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
259 if (unlikely(txq->mp2mr[i].mp == NULL)) {
260 /* Unknown MP, add a new MR for it. */
263 if (txq->mp2mr[i].mp == mp) {
264 assert(txq->mp2mr[i].lkey != (uint32_t)-1);
265 assert(txq->mp2mr[i].mr->lkey == txq->mp2mr[i].lkey);
266 return txq->mp2mr[i].lkey;
269 /* Add a new entry, register MR first. */
270 DEBUG("%p: discovered new memory pool \"%s\" (%p)",
271 (void *)txq, mp->name, (void *)mp);
272 mr = mlx4_mp2mr(txq->priv->pd, mp);
273 if (unlikely(mr == NULL)) {
274 DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
278 if (unlikely(i == RTE_DIM(txq->mp2mr))) {
279 /* Table is full, remove oldest entry. */
280 DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
283 claim_zero(ibv_dereg_mr(txq->mp2mr[0].mr));
284 memmove(&txq->mp2mr[0], &txq->mp2mr[1],
285 (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
287 /* Store the new entry. */
288 txq->mp2mr[i].mp = mp;
289 txq->mp2mr[i].mr = mr;
290 txq->mp2mr[i].lkey = mr->lkey;
291 DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
292 (void *)txq, mp->name, (void *)mp, txq->mp2mr[i].lkey);
293 return txq->mp2mr[i].lkey;
297 * Posts a single work request to a send queue.
302 * Packet to transmit.
305 * 0 on success, negative errno value otherwise and rte_errno is set.
308 mlx4_post_send(struct txq *txq, struct rte_mbuf *pkt)
310 struct mlx4_wqe_ctrl_seg *ctrl;
311 struct mlx4_wqe_data_seg *dseg;
312 struct mlx4_sq *sq = &txq->msq;
313 struct rte_mbuf *buf;
318 uint32_t head_idx = sq->head & sq->txbb_cnt_mask;
321 uint32_t owner_opcode = MLX4_OPCODE_SEND;
326 struct pv *pv = (struct pv *)txq->bounce_buf;
329 /* Calculate the needed work queue entry size for this packet. */
330 wqe_real_size = sizeof(struct mlx4_wqe_ctrl_seg) +
331 pkt->nb_segs * sizeof(struct mlx4_wqe_data_seg);
332 nr_txbbs = MLX4_SIZE_TO_TXBBS(wqe_real_size);
334 * Check that there is room for this WQE in the send queue and that
335 * the WQE size is legal.
337 if (((sq->head - sq->tail) + nr_txbbs +
338 sq->headroom_txbbs) >= sq->txbb_cnt ||
339 nr_txbbs > MLX4_MAX_WQE_TXBBS) {
343 /* Get the control and data entries of the WQE. */
344 ctrl = (struct mlx4_wqe_ctrl_seg *)mlx4_get_send_wqe(sq, head_idx);
345 dseg = (struct mlx4_wqe_data_seg *)((uintptr_t)ctrl +
346 sizeof(struct mlx4_wqe_ctrl_seg));
347 /* Fill the data segments with buffer information. */
348 for (buf = pkt; buf != NULL; buf = buf->next, dseg++) {
349 addr = rte_pktmbuf_mtod(buf, uintptr_t);
350 rte_prefetch0((volatile void *)addr);
351 /* Handle WQE wraparound. */
352 if (unlikely(dseg >= (struct mlx4_wqe_data_seg *)sq->eob))
353 dseg = (struct mlx4_wqe_data_seg *)sq->buf;
354 dseg->addr = rte_cpu_to_be_64(addr);
355 /* Memory region key for this memory pool. */
356 lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(buf));
357 if (unlikely(lkey == (uint32_t)-1)) {
358 /* MR does not exist. */
359 DEBUG("%p: unable to get MP <-> MR association",
362 * Restamp entry in case of failure.
363 * Make sure that size is written correctly
364 * Note that we give ownership to the SW, not the HW.
366 ctrl->fence_size = (wqe_real_size >> 4) & 0x3f;
367 mlx4_txq_stamp_freed_wqe(sq, head_idx,
368 (sq->head & sq->txbb_cnt) ? 0 : 1);
372 dseg->lkey = rte_cpu_to_be_32(lkey);
373 if (likely(buf->data_len)) {
374 byte_count = rte_cpu_to_be_32(buf->data_len);
377 * Zero length segment is treated as inline segment
380 byte_count = RTE_BE32(0x80000000);
383 * If the data segment is not at the beginning of a
384 * Tx basic block (TXBB) then write the byte count,
385 * else postpone the writing to just before updating the
388 if ((uintptr_t)dseg & (uintptr_t)(MLX4_TXBB_SIZE - 1)) {
390 * Need a barrier here before writing the byte_count
391 * fields to make sure that all the data is visible
392 * before the byte_count field is set.
393 * Otherwise, if the segment begins a new cacheline,
394 * the HCA prefetcher could grab the 64-byte chunk and
395 * get a valid (!= 0xffffffff) byte count but stale
396 * data, and end up sending the wrong data.
399 dseg->byte_count = byte_count;
402 * This data segment starts at the beginning of a new
403 * TXBB, so we need to postpone its byte_count writing
406 pv[pv_counter].dseg = dseg;
407 pv[pv_counter++].val = byte_count;
410 /* Write the first DWORD of each TXBB save earlier. */
412 /* Need a barrier here before writing the byte_count. */
414 for (--pv_counter; pv_counter >= 0; pv_counter--)
415 pv[pv_counter].dseg->byte_count = pv[pv_counter].val;
417 /* Fill the control parameters for this packet. */
418 ctrl->fence_size = (wqe_real_size >> 4) & 0x3f;
420 * For raw Ethernet, the SOLICIT flag is used to indicate that no ICRC
421 * should be calculated.
423 txq->elts_comp_cd -= nr_txbbs;
424 if (unlikely(txq->elts_comp_cd <= 0)) {
425 txq->elts_comp_cd = txq->elts_comp_cd_init;
426 srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT |
427 MLX4_WQE_CTRL_CQ_UPDATE);
429 srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT);
431 /* Enable HW checksum offload if requested */
434 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))) {
435 const uint64_t is_tunneled = (pkt->ol_flags &
437 PKT_TX_TUNNEL_VXLAN));
439 if (is_tunneled && txq->csum_l2tun) {
440 owner_opcode |= MLX4_WQE_CTRL_IIP_HDR_CSUM |
441 MLX4_WQE_CTRL_IL4_HDR_CSUM;
442 if (pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM)
444 RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM);
446 srcrb.flags |= RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM |
447 MLX4_WQE_CTRL_TCP_UDP_CSUM);
452 * Copy destination MAC address to the WQE, this allows
453 * loopback in eSwitch, so that VFs and PF can communicate
456 srcrb.flags16[0] = *(rte_pktmbuf_mtod(pkt, uint16_t *));
457 ctrl->imm = *(rte_pktmbuf_mtod_offset(pkt, uint32_t *,
462 ctrl->srcrb_flags = srcrb.flags;
464 * Make sure descriptor is fully written before
465 * setting ownership bit (because HW can start
466 * executing as soon as we do).
469 ctrl->owner_opcode = rte_cpu_to_be_32(owner_opcode |
470 ((sq->head & sq->txbb_cnt) ?
471 MLX4_BIT_WQE_OWN : 0));
472 sq->head += nr_txbbs;
480 * DPDK callback for Tx.
483 * Generic pointer to Tx queue structure.
485 * Packets to transmit.
487 * Number of packets in array.
490 * Number of packets successfully transmitted (<= pkts_n).
493 mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
495 struct txq *txq = (struct txq *)dpdk_txq;
496 unsigned int elts_head = txq->elts_head;
497 const unsigned int elts_n = txq->elts_n;
498 unsigned int elts_comp = 0;
499 unsigned int bytes_sent = 0;
504 assert(txq->elts_comp_cd != 0);
505 mlx4_txq_complete(txq);
506 max = (elts_n - (elts_head - txq->elts_tail));
510 assert(max <= elts_n);
511 /* Always leave one free entry in the ring. */
517 for (i = 0; (i != max); ++i) {
518 struct rte_mbuf *buf = pkts[i];
519 unsigned int elts_head_next =
520 (((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
521 struct txq_elt *elt_next = &(*txq->elts)[elts_head_next];
522 struct txq_elt *elt = &(*txq->elts)[elts_head];
524 /* Clean up old buffer. */
525 if (likely(elt->buf != NULL)) {
526 struct rte_mbuf *tmp = elt->buf;
530 memset(elt, 0x66, sizeof(*elt));
532 /* Faster than rte_pktmbuf_free(). */
534 struct rte_mbuf *next = tmp->next;
536 rte_pktmbuf_free_seg(tmp);
538 } while (tmp != NULL);
540 RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
541 /* Post the packet for sending. */
542 err = mlx4_post_send(txq, buf);
548 bytes_sent += buf->pkt_len;
550 elts_head = elts_head_next;
553 /* Take a shortcut if nothing must be sent. */
554 if (unlikely(i == 0))
556 /* Increment send statistics counters. */
557 txq->stats.opackets += i;
558 txq->stats.obytes += bytes_sent;
559 /* Make sure that descriptors are written before doorbell record. */
561 /* Ring QP doorbell. */
562 rte_write32(txq->msq.doorbell_qpn, txq->msq.db);
563 txq->elts_head = elts_head;
564 txq->elts_comp += elts_comp;
569 * Translate Rx completion flags to packet type.
572 * Rx completion flags returned by mlx4_cqe_flags().
575 * Packet type in mbuf format.
577 static inline uint32_t
578 rxq_cq_to_pkt_type(uint32_t flags)
582 if (flags & MLX4_CQE_L2_TUNNEL)
584 mlx4_transpose(flags,
585 MLX4_CQE_L2_TUNNEL_IPV4,
586 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN) |
587 mlx4_transpose(flags,
588 MLX4_CQE_STATUS_IPV4_PKT,
589 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN);
591 pkt_type = mlx4_transpose(flags,
592 MLX4_CQE_STATUS_IPV4_PKT,
593 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
598 * Translate Rx completion flags to offload flags.
601 * Rx completion flags returned by mlx4_cqe_flags().
603 * Whether Rx checksums are enabled.
605 * Whether Rx L2 tunnel checksums are enabled.
608 * Offload flags (ol_flags) in mbuf format.
610 static inline uint32_t
611 rxq_cq_to_ol_flags(uint32_t flags, int csum, int csum_l2tun)
613 uint32_t ol_flags = 0;
617 mlx4_transpose(flags,
618 MLX4_CQE_STATUS_IP_HDR_CSUM_OK,
619 PKT_RX_IP_CKSUM_GOOD) |
620 mlx4_transpose(flags,
621 MLX4_CQE_STATUS_TCP_UDP_CSUM_OK,
622 PKT_RX_L4_CKSUM_GOOD);
623 if ((flags & MLX4_CQE_L2_TUNNEL) && csum_l2tun)
625 mlx4_transpose(flags,
626 MLX4_CQE_L2_TUNNEL_IPOK,
627 PKT_RX_IP_CKSUM_GOOD) |
628 mlx4_transpose(flags,
629 MLX4_CQE_L2_TUNNEL_L4_CSUM,
630 PKT_RX_L4_CKSUM_GOOD);
635 * Extract checksum information from CQE flags.
638 * Pointer to CQE structure.
640 * Whether Rx checksums are enabled.
642 * Whether Rx L2 tunnel checksums are enabled.
645 * CQE checksum information.
647 static inline uint32_t
648 mlx4_cqe_flags(struct mlx4_cqe *cqe, int csum, int csum_l2tun)
653 * The relevant bits are in different locations on their
654 * CQE fields therefore we can join them in one 32bit
658 flags = (rte_be_to_cpu_32(cqe->status) &
659 MLX4_CQE_STATUS_IPV4_CSUM_OK);
661 flags |= (rte_be_to_cpu_32(cqe->vlan_my_qpn) &
662 (MLX4_CQE_L2_TUNNEL |
663 MLX4_CQE_L2_TUNNEL_IPOK |
664 MLX4_CQE_L2_TUNNEL_L4_CSUM |
665 MLX4_CQE_L2_TUNNEL_IPV4));
670 * Poll one CQE from CQ.
673 * Pointer to the receive queue structure.
678 * Number of bytes of the CQE, 0 in case there is no completion.
681 mlx4_cq_poll_one(struct rxq *rxq, struct mlx4_cqe **out)
684 struct mlx4_cqe *cqe = NULL;
685 struct mlx4_cq *cq = &rxq->mcq;
687 cqe = (struct mlx4_cqe *)mlx4_get_cqe(cq, cq->cons_index);
688 if (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
689 !!(cq->cons_index & cq->cqe_cnt))
692 * Make sure we read CQ entry contents after we've checked the
696 assert(!(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK));
697 assert((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) !=
698 MLX4_CQE_OPCODE_ERROR);
699 ret = rte_be_to_cpu_32(cqe->byte_cnt);
707 * DPDK callback for Rx with scattered packets support.
710 * Generic pointer to Rx queue structure.
712 * Array to store received packets.
714 * Maximum number of packets in array.
717 * Number of packets successfully received (<= pkts_n).
720 mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
722 struct rxq *rxq = dpdk_rxq;
723 const uint32_t wr_cnt = (1 << rxq->elts_n) - 1;
724 const uint16_t sges_n = rxq->sges_n;
725 struct rte_mbuf *pkt = NULL;
726 struct rte_mbuf *seg = NULL;
728 uint32_t rq_ci = rxq->rq_ci << sges_n;
732 struct mlx4_cqe *cqe;
733 uint32_t idx = rq_ci & wr_cnt;
734 struct rte_mbuf *rep = (*rxq->elts)[idx];
735 volatile struct mlx4_wqe_data_seg *scat = &(*rxq->wqes)[idx];
737 /* Update the 'next' pointer of the previous segment. */
743 rep = rte_mbuf_raw_alloc(rxq->mp);
744 if (unlikely(rep == NULL)) {
745 ++rxq->stats.rx_nombuf;
748 * No buffers before we even started,
754 assert(pkt != (*rxq->elts)[idx]);
758 rte_mbuf_raw_free(pkt);
764 /* Looking for the new packet. */
765 len = mlx4_cq_poll_one(rxq, &cqe);
767 rte_mbuf_raw_free(rep);
770 if (unlikely(len < 0)) {
771 /* Rx error, packet is likely too large. */
772 rte_mbuf_raw_free(rep);
773 ++rxq->stats.idropped;
777 if (rxq->csum | rxq->csum_l2tun) {
784 rxq_cq_to_ol_flags(flags,
787 pkt->packet_type = rxq_cq_to_pkt_type(flags);
789 pkt->packet_type = 0;
795 rep->port = rxq->port_id;
796 rep->data_len = seg->data_len;
797 rep->data_off = seg->data_off;
798 (*rxq->elts)[idx] = rep;
800 * Fill NIC descriptor with the new buffer. The lkey and size
801 * of the buffers are already known, only the buffer address
804 scat->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
805 if (len > seg->data_len) {
806 len -= seg->data_len;
811 /* The last segment. */
813 /* Increment bytes counter. */
814 rxq->stats.ibytes += pkt->pkt_len;
821 /* Align consumer index to the next stride. */
826 if (unlikely(i == 0 && (rq_ci >> sges_n) == rxq->rq_ci))
828 /* Update the consumer index. */
829 rxq->rq_ci = rq_ci >> sges_n;
831 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
832 *rxq->mcq.set_ci_db = rte_cpu_to_be_32(rxq->mcq.cons_index & 0xffffff);
833 /* Increment packets counter. */
834 rxq->stats.ipackets += i;
839 * Dummy DPDK callback for Tx.
841 * This function is used to temporarily replace the real callback during
842 * unsafe control operations on the queue, or in case of error.
845 * Generic pointer to Tx queue structure.
847 * Packets to transmit.
849 * Number of packets in array.
852 * Number of packets successfully transmitted (<= pkts_n).
855 mlx4_tx_burst_removed(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
864 * Dummy DPDK callback for Rx.
866 * This function is used to temporarily replace the real callback during
867 * unsafe control operations on the queue, or in case of error.
870 * Generic pointer to Rx queue structure.
872 * Array to store received packets.
874 * Maximum number of packets in array.
877 * Number of packets successfully received (<= pkts_n).
880 mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)