1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
9 #include <rte_ethdev.h>
10 #include <ethdev_driver.h>
11 #include <rte_malloc.h>
13 #include "ngbe_logs.h"
14 #include "base/ngbe.h"
15 #include "ngbe_ethdev.h"
16 #include "ngbe_rxtx.h"
19 * Prefetch a cache line into all cache levels.
21 #define rte_ngbe_prefetch(p) rte_prefetch0(p)
23 /*********************************************************************
27 **********************************************************************/
30 * Check for descriptors with their DD bit set and free mbufs.
31 * Return the total number of buffers freed.
33 static __rte_always_inline int
34 ngbe_tx_free_bufs(struct ngbe_tx_queue *txq)
36 struct ngbe_tx_entry *txep;
39 struct rte_mbuf *m, *free[RTE_NGBE_TX_MAX_FREE_BUF_SZ];
41 /* check DD bit on threshold descriptor */
42 status = txq->tx_ring[txq->tx_next_dd].dw3;
43 if (!(status & rte_cpu_to_le_32(NGBE_TXD_DD))) {
44 if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
45 ngbe_set32_masked(txq->tdc_reg_addr,
46 NGBE_TXCFG_FLUSH, NGBE_TXCFG_FLUSH);
51 * first buffer to free from S/W ring is at index
52 * tx_next_dd - (tx_free_thresh-1)
54 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_free_thresh - 1)];
55 for (i = 0; i < txq->tx_free_thresh; ++i, ++txep) {
56 /* free buffers one at a time */
57 m = rte_pktmbuf_prefree_seg(txep->mbuf);
60 if (unlikely(m == NULL))
63 if (nb_free >= RTE_NGBE_TX_MAX_FREE_BUF_SZ ||
64 (nb_free > 0 && m->pool != free[0]->pool)) {
65 rte_mempool_put_bulk(free[0]->pool,
66 (void **)free, nb_free);
74 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
76 /* buffers were freed, update counters */
77 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_free_thresh);
78 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_free_thresh);
79 if (txq->tx_next_dd >= txq->nb_tx_desc)
80 txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
82 return txq->tx_free_thresh;
85 /* Populate 4 descriptors with data from 4 mbufs */
87 tx4(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf **pkts)
89 uint64_t buf_dma_addr;
93 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
94 buf_dma_addr = rte_mbuf_data_iova(*pkts);
95 pkt_len = (*pkts)->data_len;
97 /* write data to descriptor */
98 txdp->qw0 = rte_cpu_to_le_64(buf_dma_addr);
99 txdp->dw2 = cpu_to_le32(NGBE_TXD_FLAGS |
100 NGBE_TXD_DATLEN(pkt_len));
101 txdp->dw3 = cpu_to_le32(NGBE_TXD_PAYLEN(pkt_len));
103 rte_prefetch0(&(*pkts)->pool);
107 /* Populate 1 descriptor with data from 1 mbuf */
109 tx1(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf **pkts)
111 uint64_t buf_dma_addr;
114 buf_dma_addr = rte_mbuf_data_iova(*pkts);
115 pkt_len = (*pkts)->data_len;
117 /* write data to descriptor */
118 txdp->qw0 = cpu_to_le64(buf_dma_addr);
119 txdp->dw2 = cpu_to_le32(NGBE_TXD_FLAGS |
120 NGBE_TXD_DATLEN(pkt_len));
121 txdp->dw3 = cpu_to_le32(NGBE_TXD_PAYLEN(pkt_len));
123 rte_prefetch0(&(*pkts)->pool);
127 * Fill H/W descriptor ring with mbuf data.
128 * Copy mbuf pointers to the S/W ring.
131 ngbe_tx_fill_hw_ring(struct ngbe_tx_queue *txq, struct rte_mbuf **pkts,
134 volatile struct ngbe_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
135 struct ngbe_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
136 const int N_PER_LOOP = 4;
137 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
138 int mainpart, leftover;
142 * Process most of the packets in chunks of N pkts. Any
143 * leftover packets will get processed one at a time.
145 mainpart = (nb_pkts & ((uint32_t)~N_PER_LOOP_MASK));
146 leftover = (nb_pkts & ((uint32_t)N_PER_LOOP_MASK));
147 for (i = 0; i < mainpart; i += N_PER_LOOP) {
148 /* Copy N mbuf pointers to the S/W ring */
149 for (j = 0; j < N_PER_LOOP; ++j)
150 (txep + i + j)->mbuf = *(pkts + i + j);
151 tx4(txdp + i, pkts + i);
154 if (unlikely(leftover > 0)) {
155 for (i = 0; i < leftover; ++i) {
156 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
157 tx1(txdp + mainpart + i, pkts + mainpart + i);
162 static inline uint16_t
163 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
166 struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;
170 * Begin scanning the H/W ring for done descriptors when the
171 * number of available descriptors drops below tx_free_thresh.
172 * For each done descriptor, free the associated buffer.
174 if (txq->nb_tx_free < txq->tx_free_thresh)
175 ngbe_tx_free_bufs(txq);
177 /* Only use descriptors that are available */
178 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
179 if (unlikely(nb_pkts == 0))
182 /* Use exactly nb_pkts descriptors */
183 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
186 * At this point, we know there are enough descriptors in the
187 * ring to transmit all the packets. This assumes that each
188 * mbuf contains a single segment, and that no new offloads
189 * are expected, which would require a new context descriptor.
193 * See if we're going to wrap-around. If so, handle the top
194 * of the descriptor ring first, then do the bottom. If not,
195 * the processing looks just like the "bottom" part anyway...
197 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
198 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
199 ngbe_tx_fill_hw_ring(txq, tx_pkts, n);
203 /* Fill H/W descriptor ring with mbuf data */
204 ngbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
205 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
208 * Check for wrap-around. This would only happen if we used
209 * up to the last descriptor in the ring, no more, no less.
211 if (txq->tx_tail >= txq->nb_tx_desc)
214 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
215 (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
216 (uint16_t)txq->tx_tail, (uint16_t)nb_pkts);
218 /* update tail pointer */
220 ngbe_set32_relaxed(txq->tdt_reg_addr, txq->tx_tail);
226 ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
231 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
232 if (likely(nb_pkts <= RTE_PMD_NGBE_TX_MAX_BURST))
233 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
235 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
237 while (nb_pkts != 0) {
240 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_NGBE_TX_MAX_BURST);
241 ret = tx_xmit_pkts(tx_queue, &tx_pkts[nb_tx], n);
242 nb_tx = (uint16_t)(nb_tx + ret);
243 nb_pkts = (uint16_t)(nb_pkts - ret);
251 /*********************************************************************
255 **********************************************************************/
256 static inline uint32_t
257 ngbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptid_mask)
259 uint16_t ptid = NGBE_RXD_PTID(pkt_info);
263 return ngbe_decode_ptype(ptid);
267 * LOOK_AHEAD defines how many desc statuses to check beyond the
268 * current descriptor.
269 * It must be a pound define for optimal performance.
270 * Do not change the value of LOOK_AHEAD, as the ngbe_rx_scan_hw_ring
271 * function only works with LOOK_AHEAD=8.
274 #if (LOOK_AHEAD != 8)
275 #error "PMD NGBE: LOOK_AHEAD must be 8\n"
278 ngbe_rx_scan_hw_ring(struct ngbe_rx_queue *rxq)
280 volatile struct ngbe_rx_desc *rxdp;
281 struct ngbe_rx_entry *rxep;
285 uint32_t s[LOOK_AHEAD];
286 uint32_t pkt_info[LOOK_AHEAD];
290 /* get references to current descriptor and S/W ring entry */
291 rxdp = &rxq->rx_ring[rxq->rx_tail];
292 rxep = &rxq->sw_ring[rxq->rx_tail];
294 status = rxdp->qw1.lo.status;
295 /* check to make sure there is at least 1 packet to receive */
296 if (!(status & rte_cpu_to_le_32(NGBE_RXD_STAT_DD)))
300 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
301 * reference packets that are ready to be received.
303 for (i = 0; i < RTE_PMD_NGBE_RX_MAX_BURST;
304 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
305 /* Read desc statuses backwards to avoid race condition */
306 for (j = 0; j < LOOK_AHEAD; j++)
307 s[j] = rte_le_to_cpu_32(rxdp[j].qw1.lo.status);
309 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
311 /* Compute how many status bits were set */
312 for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
313 (s[nb_dd] & NGBE_RXD_STAT_DD); nb_dd++)
316 for (j = 0; j < nb_dd; j++)
317 pkt_info[j] = rte_le_to_cpu_32(rxdp[j].qw0.dw0);
321 /* Translate descriptor info to mbuf format */
322 for (j = 0; j < nb_dd; ++j) {
324 pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len);
325 mb->data_len = pkt_len;
326 mb->pkt_len = pkt_len;
329 ngbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
333 /* Move mbuf pointers from the S/W ring to the stage */
334 for (j = 0; j < LOOK_AHEAD; ++j)
335 rxq->rx_stage[i + j] = rxep[j].mbuf;
337 /* stop if all requested packets could not be received */
338 if (nb_dd != LOOK_AHEAD)
342 /* clear software ring entries so we can cleanup correctly */
343 for (i = 0; i < nb_rx; ++i)
344 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
350 ngbe_rx_alloc_bufs(struct ngbe_rx_queue *rxq, bool reset_mbuf)
352 volatile struct ngbe_rx_desc *rxdp;
353 struct ngbe_rx_entry *rxep;
359 /* allocate buffers in bulk directly into the S/W ring */
360 alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
361 rxep = &rxq->sw_ring[alloc_idx];
362 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
363 rxq->rx_free_thresh);
364 if (unlikely(diag != 0))
367 rxdp = &rxq->rx_ring[alloc_idx];
368 for (i = 0; i < rxq->rx_free_thresh; ++i) {
369 /* populate the static rte mbuf fields */
372 mb->port = rxq->port_id;
374 rte_mbuf_refcnt_set(mb, 1);
375 mb->data_off = RTE_PKTMBUF_HEADROOM;
377 /* populate the descriptors */
378 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
379 NGBE_RXD_HDRADDR(&rxdp[i], 0);
380 NGBE_RXD_PKTADDR(&rxdp[i], dma_addr);
383 /* update state of internal queue structure */
384 rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
385 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
386 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
392 static inline uint16_t
393 ngbe_rx_fill_from_stage(struct ngbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
396 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
399 /* how many packets are ready to return? */
400 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
402 /* copy mbuf pointers to the application's packet list */
403 for (i = 0; i < nb_pkts; ++i)
404 rx_pkts[i] = stage[i];
406 /* update internal queue state */
407 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
408 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
413 static inline uint16_t
414 ngbe_rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
417 struct ngbe_rx_queue *rxq = (struct ngbe_rx_queue *)rx_queue;
418 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
421 /* Any previously recv'd pkts will be returned from the Rx stage */
422 if (rxq->rx_nb_avail)
423 return ngbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
425 /* Scan the H/W ring for packets to receive */
426 nb_rx = (uint16_t)ngbe_rx_scan_hw_ring(rxq);
428 /* update internal queue state */
429 rxq->rx_next_avail = 0;
430 rxq->rx_nb_avail = nb_rx;
431 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
433 /* if required, allocate new buffers to replenish descriptors */
434 if (rxq->rx_tail > rxq->rx_free_trigger) {
435 uint16_t cur_free_trigger = rxq->rx_free_trigger;
437 if (ngbe_rx_alloc_bufs(rxq, true) != 0) {
440 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
441 "queue_id=%u", (uint16_t)rxq->port_id,
442 (uint16_t)rxq->queue_id);
444 dev->data->rx_mbuf_alloc_failed +=
448 * Need to rewind any previous receives if we cannot
449 * allocate new buffers to replenish the old ones.
451 rxq->rx_nb_avail = 0;
452 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
453 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
454 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
459 /* update tail pointer */
461 ngbe_set32_relaxed(rxq->rdt_reg_addr, cur_free_trigger);
464 if (rxq->rx_tail >= rxq->nb_rx_desc)
467 /* received any packets this loop? */
468 if (rxq->rx_nb_avail)
469 return ngbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
474 /* split requests into chunks of size RTE_PMD_NGBE_RX_MAX_BURST */
476 ngbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
481 if (unlikely(nb_pkts == 0))
484 if (likely(nb_pkts <= RTE_PMD_NGBE_RX_MAX_BURST))
485 return ngbe_rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
487 /* request is relatively large, chunk it up */
492 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_NGBE_RX_MAX_BURST);
493 ret = ngbe_rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
494 nb_rx = (uint16_t)(nb_rx + ret);
495 nb_pkts = (uint16_t)(nb_pkts - ret);
504 ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
507 struct ngbe_rx_queue *rxq;
508 volatile struct ngbe_rx_desc *rx_ring;
509 volatile struct ngbe_rx_desc *rxdp;
510 struct ngbe_rx_entry *sw_ring;
511 struct ngbe_rx_entry *rxe;
512 struct rte_mbuf *rxm;
513 struct rte_mbuf *nmb;
514 struct ngbe_rx_desc rxd;
526 rx_id = rxq->rx_tail;
527 rx_ring = rxq->rx_ring;
528 sw_ring = rxq->sw_ring;
529 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
530 while (nb_rx < nb_pkts) {
532 * The order of operations here is important as the DD status
533 * bit must not be read after any other descriptor fields.
534 * rx_ring and rxdp are pointing to volatile data so the order
535 * of accesses cannot be reordered by the compiler. If they were
536 * not volatile, they could be reordered which could lead to
537 * using invalid descriptor fields when read from rxd.
539 rxdp = &rx_ring[rx_id];
540 staterr = rxdp->qw1.lo.status;
541 if (!(staterr & rte_cpu_to_le_32(NGBE_RXD_STAT_DD)))
548 * If the NGBE_RXD_STAT_EOP flag is not set, the Rx packet
549 * is likely to be invalid and to be dropped by the various
550 * validation checks performed by the network stack.
552 * Allocate a new mbuf to replenish the RX ring descriptor.
553 * If the allocation fails:
554 * - arrange for that Rx descriptor to be the first one
555 * being parsed the next time the receive function is
556 * invoked [on the same queue].
558 * - Stop parsing the Rx ring and return immediately.
560 * This policy do not drop the packet received in the Rx
561 * descriptor for which the allocation of a new mbuf failed.
562 * Thus, it allows that packet to be later retrieved if
563 * mbuf have been freed in the mean time.
564 * As a side effect, holding Rx descriptors instead of
565 * systematically giving them back to the NIC may lead to
566 * Rx ring exhaustion situations.
567 * However, the NIC can gracefully prevent such situations
568 * to happen by sending specific "back-pressure" flow control
569 * frames to its peer(s).
572 "port_id=%u queue_id=%u rx_id=%u ext_err_stat=0x%08x pkt_len=%u",
573 (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
574 (uint16_t)rx_id, (uint32_t)staterr,
575 (uint16_t)rte_le_to_cpu_16(rxd.qw1.hi.len));
577 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
580 "Rx mbuf alloc failed port_id=%u queue_id=%u",
581 (uint16_t)rxq->port_id,
582 (uint16_t)rxq->queue_id);
583 dev->data->rx_mbuf_alloc_failed++;
588 rxe = &sw_ring[rx_id];
590 if (rx_id == rxq->nb_rx_desc)
593 /* Prefetch next mbuf while processing current one. */
594 rte_ngbe_prefetch(sw_ring[rx_id].mbuf);
597 * When next Rx descriptor is on a cache-line boundary,
598 * prefetch the next 4 Rx descriptors and the next 8 pointers
601 if ((rx_id & 0x3) == 0) {
602 rte_ngbe_prefetch(&rx_ring[rx_id]);
603 rte_ngbe_prefetch(&sw_ring[rx_id]);
608 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
609 NGBE_RXD_HDRADDR(rxdp, 0);
610 NGBE_RXD_PKTADDR(rxdp, dma_addr);
613 * Initialize the returned mbuf.
614 * setup generic mbuf fields:
615 * - number of segments,
618 * - Rx port identifier.
620 pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len));
621 rxm->data_off = RTE_PKTMBUF_HEADROOM;
622 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
625 rxm->pkt_len = pkt_len;
626 rxm->data_len = pkt_len;
627 rxm->port = rxq->port_id;
629 pkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);
630 rxm->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
634 * Store the mbuf address into the next entry of the array
635 * of returned packets.
637 rx_pkts[nb_rx++] = rxm;
639 rxq->rx_tail = rx_id;
642 * If the number of free Rx descriptors is greater than the Rx free
643 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
645 * Update the RDT with the value of the last processed Rx descriptor
646 * minus 1, to guarantee that the RDT register is never equal to the
647 * RDH register, which creates a "full" ring situation from the
648 * hardware point of view...
650 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
651 if (nb_hold > rxq->rx_free_thresh) {
653 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u nb_rx=%u",
654 (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
655 (uint16_t)rx_id, (uint16_t)nb_hold,
657 rx_id = (uint16_t)((rx_id == 0) ?
658 (rxq->nb_rx_desc - 1) : (rx_id - 1));
659 ngbe_set32(rxq->rdt_reg_addr, rx_id);
662 rxq->nb_rx_hold = nb_hold;
667 ngbe_fill_cluster_head_buf(struct rte_mbuf *head, struct ngbe_rx_desc *desc,
668 struct ngbe_rx_queue *rxq, uint32_t staterr)
672 RTE_SET_USED(staterr);
673 head->port = rxq->port_id;
675 pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
676 head->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
681 * ngbe_recv_pkts_sc - receive handler for scatter case.
683 * @rx_queue Rx queue handle
684 * @rx_pkts table of received packets
685 * @nb_pkts size of rx_pkts table
686 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
688 * Returns the number of received packets/clusters (according to the "bulk
689 * receive" interface).
691 static inline uint16_t
692 ngbe_recv_pkts_sc(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
695 struct ngbe_rx_queue *rxq = rx_queue;
696 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
697 volatile struct ngbe_rx_desc *rx_ring = rxq->rx_ring;
698 struct ngbe_rx_entry *sw_ring = rxq->sw_ring;
699 struct ngbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
700 uint16_t rx_id = rxq->rx_tail;
702 uint16_t nb_hold = rxq->nb_rx_hold;
703 uint16_t prev_id = rxq->rx_tail;
705 while (nb_rx < nb_pkts) {
707 struct ngbe_rx_entry *rxe;
708 struct ngbe_scattered_rx_entry *sc_entry;
709 struct ngbe_scattered_rx_entry *next_sc_entry = NULL;
710 struct ngbe_rx_entry *next_rxe = NULL;
711 struct rte_mbuf *first_seg;
712 struct rte_mbuf *rxm;
713 struct rte_mbuf *nmb = NULL;
714 struct ngbe_rx_desc rxd;
717 volatile struct ngbe_rx_desc *rxdp;
721 rxdp = &rx_ring[rx_id];
722 staterr = rte_le_to_cpu_32(rxdp->qw1.lo.status);
724 if (!(staterr & NGBE_RXD_STAT_DD))
729 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
730 "staterr=0x%x data_len=%u",
731 rxq->port_id, rxq->queue_id, rx_id, staterr,
732 rte_le_to_cpu_16(rxd.qw1.hi.len));
735 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
737 PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed "
738 "port_id=%u queue_id=%u",
739 rxq->port_id, rxq->queue_id);
741 dev->data->rx_mbuf_alloc_failed++;
744 } else if (nb_hold > rxq->rx_free_thresh) {
745 uint16_t next_rdt = rxq->rx_free_trigger;
747 if (!ngbe_rx_alloc_bufs(rxq, false)) {
749 ngbe_set32_relaxed(rxq->rdt_reg_addr,
751 nb_hold -= rxq->rx_free_thresh;
753 PMD_RX_LOG(DEBUG, "Rx bulk alloc failed "
754 "port_id=%u queue_id=%u",
755 rxq->port_id, rxq->queue_id);
757 dev->data->rx_mbuf_alloc_failed++;
763 rxe = &sw_ring[rx_id];
764 eop = staterr & NGBE_RXD_STAT_EOP;
767 if (next_id == rxq->nb_rx_desc)
770 /* Prefetch next mbuf while processing current one. */
771 rte_ngbe_prefetch(sw_ring[next_id].mbuf);
774 * When next Rx descriptor is on a cache-line boundary,
775 * prefetch the next 4 RX descriptors and the next 4 pointers
778 if ((next_id & 0x3) == 0) {
779 rte_ngbe_prefetch(&rx_ring[next_id]);
780 rte_ngbe_prefetch(&sw_ring[next_id]);
787 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
789 * Update Rx descriptor with the physical address of the
790 * new data buffer of the new allocated mbuf.
794 rxm->data_off = RTE_PKTMBUF_HEADROOM;
795 NGBE_RXD_HDRADDR(rxdp, 0);
796 NGBE_RXD_PKTADDR(rxdp, dma);
802 * Set data length & data buffer address of mbuf.
804 data_len = rte_le_to_cpu_16(rxd.qw1.hi.len);
805 rxm->data_len = data_len;
811 next_sc_entry = &sw_sc_ring[nextp_id];
812 next_rxe = &sw_ring[nextp_id];
813 rte_ngbe_prefetch(next_rxe);
816 sc_entry = &sw_sc_ring[rx_id];
817 first_seg = sc_entry->fbuf;
818 sc_entry->fbuf = NULL;
821 * If this is the first buffer of the received packet,
822 * set the pointer to the first mbuf of the packet and
823 * initialize its context.
824 * Otherwise, update the total length and the number of segments
825 * of the current scattered packet, and update the pointer to
826 * the last mbuf of the current packet.
828 if (first_seg == NULL) {
830 first_seg->pkt_len = data_len;
831 first_seg->nb_segs = 1;
833 first_seg->pkt_len += data_len;
834 first_seg->nb_segs++;
841 * If this is not the last buffer of the received packet, update
842 * the pointer to the first mbuf at the NEXTP entry in the
843 * sw_sc_ring and continue to parse the Rx ring.
845 if (!eop && next_rxe) {
846 rxm->next = next_rxe->mbuf;
847 next_sc_entry->fbuf = first_seg;
851 /* Initialize the first mbuf of the returned packet */
852 ngbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
854 /* Prefetch data of first segment, if configured to do so. */
855 rte_packet_prefetch((char *)first_seg->buf_addr +
856 first_seg->data_off);
859 * Store the mbuf address into the next entry of the array
860 * of returned packets.
862 rx_pkts[nb_rx++] = first_seg;
866 * Record index of the next Rx descriptor to probe.
868 rxq->rx_tail = rx_id;
871 * If the number of free Rx descriptors is greater than the Rx free
872 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
874 * Update the RDT with the value of the last processed Rx descriptor
875 * minus 1, to guarantee that the RDT register is never equal to the
876 * RDH register, which creates a "full" ring situation from the
877 * hardware point of view...
879 if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
880 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
881 "nb_hold=%u nb_rx=%u",
882 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
885 ngbe_set32_relaxed(rxq->rdt_reg_addr, prev_id);
889 rxq->nb_rx_hold = nb_hold;
894 ngbe_recv_pkts_sc_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
897 return ngbe_recv_pkts_sc(rx_queue, rx_pkts, nb_pkts, false);
901 ngbe_recv_pkts_sc_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
904 return ngbe_recv_pkts_sc(rx_queue, rx_pkts, nb_pkts, true);
907 /*********************************************************************
909 * Queue management functions
911 **********************************************************************/
914 ngbe_tx_queue_release_mbufs(struct ngbe_tx_queue *txq)
918 if (txq->sw_ring != NULL) {
919 for (i = 0; i < txq->nb_tx_desc; i++) {
920 if (txq->sw_ring[i].mbuf != NULL) {
921 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
922 txq->sw_ring[i].mbuf = NULL;
929 ngbe_tx_free_swring(struct ngbe_tx_queue *txq)
932 rte_free(txq->sw_ring);
936 ngbe_tx_queue_release(struct ngbe_tx_queue *txq)
939 if (txq->ops != NULL) {
940 txq->ops->release_mbufs(txq);
941 txq->ops->free_swring(txq);
948 ngbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
950 ngbe_tx_queue_release(dev->data->tx_queues[qid]);
953 /* (Re)set dynamic ngbe_tx_queue fields to defaults */
955 ngbe_reset_tx_queue(struct ngbe_tx_queue *txq)
957 static const struct ngbe_tx_desc zeroed_desc = {0};
958 struct ngbe_tx_entry *txe = txq->sw_ring;
961 /* Zero out HW ring memory */
962 for (i = 0; i < txq->nb_tx_desc; i++)
963 txq->tx_ring[i] = zeroed_desc;
965 /* Initialize SW ring entries */
966 prev = (uint16_t)(txq->nb_tx_desc - 1);
967 for (i = 0; i < txq->nb_tx_desc; i++) {
968 /* the ring can also be modified by hardware */
969 volatile struct ngbe_tx_desc *txd = &txq->tx_ring[i];
971 txd->dw3 = rte_cpu_to_le_32(NGBE_TXD_DD);
974 txe[prev].next_id = i;
978 txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
982 * Always allow 1 descriptor to be un-allocated to avoid
983 * a H/W race condition
985 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
986 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
988 memset((void *)&txq->ctx_cache, 0,
989 NGBE_CTX_NUM * sizeof(struct ngbe_ctx_info));
992 static const struct ngbe_txq_ops def_txq_ops = {
993 .release_mbufs = ngbe_tx_queue_release_mbufs,
994 .free_swring = ngbe_tx_free_swring,
995 .reset = ngbe_reset_tx_queue,
999 ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
1002 unsigned int socket_id,
1003 const struct rte_eth_txconf *tx_conf)
1005 const struct rte_memzone *tz;
1006 struct ngbe_tx_queue *txq;
1008 uint16_t tx_free_thresh;
1010 PMD_INIT_FUNC_TRACE();
1011 hw = ngbe_dev_hw(dev);
1014 * The Tx descriptor ring will be cleaned after txq->tx_free_thresh
1015 * descriptors are used or if the number of descriptors required
1016 * to transmit a packet is greater than the number of free Tx
1018 * One descriptor in the Tx ring is used as a sentinel to avoid a
1019 * H/W race condition, hence the maximum threshold constraints.
1020 * When set to zero use default values.
1022 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1023 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
1024 if (tx_free_thresh >= (nb_desc - 3)) {
1026 "tx_free_thresh must be less than the number of TX descriptors minus 3. (tx_free_thresh=%u port=%d queue=%d)",
1027 (unsigned int)tx_free_thresh,
1028 (int)dev->data->port_id, (int)queue_idx);
1032 if (nb_desc % tx_free_thresh != 0) {
1034 "tx_free_thresh must be a divisor of the number of Tx descriptors. (tx_free_thresh=%u port=%d queue=%d)",
1035 (unsigned int)tx_free_thresh,
1036 (int)dev->data->port_id, (int)queue_idx);
1040 /* Free memory prior to re-allocation if needed... */
1041 if (dev->data->tx_queues[queue_idx] != NULL) {
1042 ngbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
1043 dev->data->tx_queues[queue_idx] = NULL;
1046 /* First allocate the Tx queue data structure */
1047 txq = rte_zmalloc_socket("ethdev Tx queue",
1048 sizeof(struct ngbe_tx_queue),
1049 RTE_CACHE_LINE_SIZE, socket_id);
1054 * Allocate Tx ring hardware descriptors. A memzone large enough to
1055 * handle the maximum ring size is allocated in order to allow for
1056 * resizing in later calls to the queue setup function.
1058 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1059 sizeof(struct ngbe_tx_desc) * NGBE_RING_DESC_MAX,
1060 NGBE_ALIGN, socket_id);
1062 ngbe_tx_queue_release(txq);
1066 txq->nb_tx_desc = nb_desc;
1067 txq->tx_free_thresh = tx_free_thresh;
1068 txq->pthresh = tx_conf->tx_thresh.pthresh;
1069 txq->hthresh = tx_conf->tx_thresh.hthresh;
1070 txq->wthresh = tx_conf->tx_thresh.wthresh;
1071 txq->queue_id = queue_idx;
1072 txq->reg_idx = queue_idx;
1073 txq->port_id = dev->data->port_id;
1074 txq->ops = &def_txq_ops;
1075 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1077 txq->tdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXWP(txq->reg_idx));
1078 txq->tdc_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXCFG(txq->reg_idx));
1080 txq->tx_ring_phys_addr = TMZ_PADDR(tz);
1081 txq->tx_ring = (struct ngbe_tx_desc *)TMZ_VADDR(tz);
1083 /* Allocate software ring */
1084 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
1085 sizeof(struct ngbe_tx_entry) * nb_desc,
1086 RTE_CACHE_LINE_SIZE, socket_id);
1087 if (txq->sw_ring == NULL) {
1088 ngbe_tx_queue_release(txq);
1092 "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
1093 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1095 txq->ops->reset(txq);
1097 dev->data->tx_queues[queue_idx] = txq;
1103 * ngbe_free_sc_cluster - free the not-yet-completed scattered cluster
1105 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
1106 * in the sw_sc_ring is not set to NULL but rather points to the next
1107 * mbuf of this RSC aggregation (that has not been completed yet and still
1108 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
1109 * will just free first "nb_segs" segments of the cluster explicitly by calling
1110 * an rte_pktmbuf_free_seg().
1112 * @m scattered cluster head
1115 ngbe_free_sc_cluster(struct rte_mbuf *m)
1117 uint16_t i, nb_segs = m->nb_segs;
1118 struct rte_mbuf *next_seg;
1120 for (i = 0; i < nb_segs; i++) {
1122 rte_pktmbuf_free_seg(m);
1128 ngbe_rx_queue_release_mbufs(struct ngbe_rx_queue *rxq)
1132 if (rxq->sw_ring != NULL) {
1133 for (i = 0; i < rxq->nb_rx_desc; i++) {
1134 if (rxq->sw_ring[i].mbuf != NULL) {
1135 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1136 rxq->sw_ring[i].mbuf = NULL;
1139 for (i = 0; i < rxq->rx_nb_avail; ++i) {
1140 struct rte_mbuf *mb;
1142 mb = rxq->rx_stage[rxq->rx_next_avail + i];
1143 rte_pktmbuf_free_seg(mb);
1145 rxq->rx_nb_avail = 0;
1148 if (rxq->sw_sc_ring != NULL)
1149 for (i = 0; i < rxq->nb_rx_desc; i++)
1150 if (rxq->sw_sc_ring[i].fbuf != NULL) {
1151 ngbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
1152 rxq->sw_sc_ring[i].fbuf = NULL;
1157 ngbe_rx_queue_release(struct ngbe_rx_queue *rxq)
1160 ngbe_rx_queue_release_mbufs(rxq);
1161 rte_free(rxq->sw_ring);
1162 rte_free(rxq->sw_sc_ring);
1168 ngbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1170 ngbe_rx_queue_release(dev->data->rx_queues[qid]);
1174 * Check if Rx Burst Bulk Alloc function can be used.
1176 * 0: the preconditions are satisfied and the bulk allocation function
1178 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
1179 * function must be used.
1182 check_rx_burst_bulk_alloc_preconditions(struct ngbe_rx_queue *rxq)
1187 * Make sure the following pre-conditions are satisfied:
1188 * rxq->rx_free_thresh >= RTE_PMD_NGBE_RX_MAX_BURST
1189 * rxq->rx_free_thresh < rxq->nb_rx_desc
1190 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
1191 * Scattered packets are not supported. This should be checked
1192 * outside of this function.
1194 if (rxq->rx_free_thresh < RTE_PMD_NGBE_RX_MAX_BURST) {
1196 "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, RTE_PMD_NGBE_RX_MAX_BURST=%d",
1197 rxq->rx_free_thresh, RTE_PMD_NGBE_RX_MAX_BURST);
1199 } else if (rxq->rx_free_thresh >= rxq->nb_rx_desc) {
1201 "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, rxq->nb_rx_desc=%d",
1202 rxq->rx_free_thresh, rxq->nb_rx_desc);
1204 } else if ((rxq->nb_rx_desc % rxq->rx_free_thresh) != 0) {
1206 "Rx Burst Bulk Alloc Preconditions: rxq->nb_rx_desc=%d, rxq->rx_free_thresh=%d",
1207 rxq->nb_rx_desc, rxq->rx_free_thresh);
1214 /* Reset dynamic ngbe_rx_queue fields back to defaults */
1216 ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq)
1218 static const struct ngbe_rx_desc zeroed_desc = {
1219 {{0}, {0} }, {{0}, {0} } };
1221 uint16_t len = rxq->nb_rx_desc;
1224 * By default, the Rx queue setup function allocates enough memory for
1225 * NGBE_RING_DESC_MAX. The Rx Burst bulk allocation function requires
1226 * extra memory at the end of the descriptor ring to be zero'd out.
1228 if (adapter->rx_bulk_alloc_allowed)
1229 /* zero out extra memory */
1230 len += RTE_PMD_NGBE_RX_MAX_BURST;
1233 * Zero out HW ring memory. Zero out extra memory at the end of
1234 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
1235 * reads extra memory as zeros.
1237 for (i = 0; i < len; i++)
1238 rxq->rx_ring[i] = zeroed_desc;
1241 * initialize extra software ring entries. Space for these extra
1242 * entries is always allocated
1244 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
1245 for (i = rxq->nb_rx_desc; i < len; ++i)
1246 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
1248 rxq->rx_nb_avail = 0;
1249 rxq->rx_next_avail = 0;
1250 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1252 rxq->nb_rx_hold = 0;
1253 rxq->pkt_first_seg = NULL;
1254 rxq->pkt_last_seg = NULL;
1258 ngbe_get_rx_port_offloads(struct rte_eth_dev *dev __rte_unused)
1260 return RTE_ETH_RX_OFFLOAD_SCATTER;
1264 ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
1267 unsigned int socket_id,
1268 const struct rte_eth_rxconf *rx_conf,
1269 struct rte_mempool *mp)
1271 const struct rte_memzone *rz;
1272 struct ngbe_rx_queue *rxq;
1275 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1277 PMD_INIT_FUNC_TRACE();
1278 hw = ngbe_dev_hw(dev);
1280 /* Free memory prior to re-allocation if needed... */
1281 if (dev->data->rx_queues[queue_idx] != NULL) {
1282 ngbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
1283 dev->data->rx_queues[queue_idx] = NULL;
1286 /* First allocate the Rx queue data structure */
1287 rxq = rte_zmalloc_socket("ethdev RX queue",
1288 sizeof(struct ngbe_rx_queue),
1289 RTE_CACHE_LINE_SIZE, socket_id);
1293 rxq->nb_rx_desc = nb_desc;
1294 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1295 rxq->queue_id = queue_idx;
1296 rxq->reg_idx = queue_idx;
1297 rxq->port_id = dev->data->port_id;
1298 rxq->drop_en = rx_conf->rx_drop_en;
1299 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
1302 * Allocate Rx ring hardware descriptors. A memzone large enough to
1303 * handle the maximum ring size is allocated in order to allow for
1304 * resizing in later calls to the queue setup function.
1306 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1307 RX_RING_SZ, NGBE_ALIGN, socket_id);
1309 ngbe_rx_queue_release(rxq);
1314 * Zero init all the descriptors in the ring.
1316 memset(rz->addr, 0, RX_RING_SZ);
1318 rxq->rdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXWP(rxq->reg_idx));
1319 rxq->rdh_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXRP(rxq->reg_idx));
1321 rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
1322 rxq->rx_ring = (struct ngbe_rx_desc *)TMZ_VADDR(rz);
1325 * Certain constraints must be met in order to use the bulk buffer
1326 * allocation Rx burst function. If any of Rx queues doesn't meet them
1327 * the feature should be disabled for the whole port.
1329 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
1331 "queue[%d] doesn't meet Rx Bulk Alloc preconditions - canceling the feature for the whole port[%d]",
1332 rxq->queue_id, rxq->port_id);
1333 adapter->rx_bulk_alloc_allowed = false;
1337 * Allocate software ring. Allow for space at the end of the
1338 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
1339 * function does not access an invalid memory region.
1342 if (adapter->rx_bulk_alloc_allowed)
1343 len += RTE_PMD_NGBE_RX_MAX_BURST;
1345 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
1346 sizeof(struct ngbe_rx_entry) * len,
1347 RTE_CACHE_LINE_SIZE, socket_id);
1348 if (rxq->sw_ring == NULL) {
1349 ngbe_rx_queue_release(rxq);
1354 * Always allocate even if it's not going to be needed in order to
1355 * simplify the code.
1357 * This ring is used in Scattered Rx cases and Scattered Rx may
1358 * be requested in ngbe_dev_rx_init(), which is called later from
1362 rte_zmalloc_socket("rxq->sw_sc_ring",
1363 sizeof(struct ngbe_scattered_rx_entry) * len,
1364 RTE_CACHE_LINE_SIZE, socket_id);
1365 if (rxq->sw_sc_ring == NULL) {
1366 ngbe_rx_queue_release(rxq);
1371 "sw_ring=%p sw_sc_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
1372 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
1373 rxq->rx_ring_phys_addr);
1375 dev->data->rx_queues[queue_idx] = rxq;
1377 ngbe_reset_rx_queue(adapter, rxq);
1383 ngbe_dev_clear_queues(struct rte_eth_dev *dev)
1386 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1388 PMD_INIT_FUNC_TRACE();
1390 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1391 struct ngbe_tx_queue *txq = dev->data->tx_queues[i];
1394 txq->ops->release_mbufs(txq);
1395 txq->ops->reset(txq);
1399 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1400 struct ngbe_rx_queue *rxq = dev->data->rx_queues[i];
1403 ngbe_rx_queue_release_mbufs(rxq);
1404 ngbe_reset_rx_queue(adapter, rxq);
1410 ngbe_dev_free_queues(struct rte_eth_dev *dev)
1414 PMD_INIT_FUNC_TRACE();
1416 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1417 ngbe_dev_rx_queue_release(dev, i);
1418 dev->data->rx_queues[i] = NULL;
1420 dev->data->nb_rx_queues = 0;
1422 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1423 ngbe_dev_tx_queue_release(dev, i);
1424 dev->data->tx_queues[i] = NULL;
1426 dev->data->nb_tx_queues = 0;
1430 ngbe_alloc_rx_queue_mbufs(struct ngbe_rx_queue *rxq)
1432 struct ngbe_rx_entry *rxe = rxq->sw_ring;
1436 /* Initialize software ring entries */
1437 for (i = 0; i < rxq->nb_rx_desc; i++) {
1438 /* the ring can also be modified by hardware */
1439 volatile struct ngbe_rx_desc *rxd;
1440 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
1443 PMD_INIT_LOG(ERR, "Rx mbuf alloc failed queue_id=%u port_id=%u",
1444 (unsigned int)rxq->queue_id,
1445 (unsigned int)rxq->port_id);
1449 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
1450 mbuf->port = rxq->port_id;
1453 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
1454 rxd = &rxq->rx_ring[i];
1455 NGBE_RXD_HDRADDR(rxd, 0);
1456 NGBE_RXD_PKTADDR(rxd, dma_addr);
1464 ngbe_set_rx_function(struct rte_eth_dev *dev)
1466 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1468 if (dev->data->scattered_rx) {
1470 * Set the scattered callback: there are bulk and
1471 * single allocation versions.
1473 if (adapter->rx_bulk_alloc_allowed) {
1474 PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
1475 "allocation callback (port=%d).",
1476 dev->data->port_id);
1477 dev->rx_pkt_burst = ngbe_recv_pkts_sc_bulk_alloc;
1479 PMD_INIT_LOG(DEBUG, "Using Regular (non-vector, "
1480 "single allocation) "
1481 "Scattered Rx callback "
1483 dev->data->port_id);
1485 dev->rx_pkt_burst = ngbe_recv_pkts_sc_single_alloc;
1488 * Below we set "simple" callbacks according to port/queues parameters.
1489 * If parameters allow we are going to choose between the following
1492 * - Single buffer allocation (the simplest one)
1494 } else if (adapter->rx_bulk_alloc_allowed) {
1495 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
1496 "satisfied. Rx Burst Bulk Alloc function "
1497 "will be used on port=%d.",
1498 dev->data->port_id);
1500 dev->rx_pkt_burst = ngbe_recv_pkts_bulk_alloc;
1502 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
1503 "satisfied, or Scattered Rx is requested "
1505 dev->data->port_id);
1507 dev->rx_pkt_burst = ngbe_recv_pkts;
1512 * Initializes Receive Unit.
1515 ngbe_dev_rx_init(struct rte_eth_dev *dev)
1518 struct ngbe_rx_queue *rxq;
1525 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
1527 PMD_INIT_FUNC_TRACE();
1528 hw = ngbe_dev_hw(dev);
1531 * Make sure receives are disabled while setting
1532 * up the Rx context (registers, descriptor rings, etc.).
1534 wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, 0);
1535 wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0);
1537 /* Enable receipt of broadcasted frames */
1538 fctrl = rd32(hw, NGBE_PSRCTL);
1539 fctrl |= NGBE_PSRCTL_BCA;
1540 wr32(hw, NGBE_PSRCTL, fctrl);
1542 hlreg0 = rd32(hw, NGBE_SECRXCTL);
1543 hlreg0 &= ~NGBE_SECRXCTL_XDSA;
1544 wr32(hw, NGBE_SECRXCTL, hlreg0);
1546 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
1547 NGBE_FRMSZ_MAX(NGBE_FRAME_SIZE_DFT));
1549 /* Setup Rx queues */
1550 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1551 rxq = dev->data->rx_queues[i];
1553 /* Setup the Base and Length of the Rx Descriptor Rings */
1554 bus_addr = rxq->rx_ring_phys_addr;
1555 wr32(hw, NGBE_RXBAL(rxq->reg_idx),
1556 (uint32_t)(bus_addr & BIT_MASK32));
1557 wr32(hw, NGBE_RXBAH(rxq->reg_idx),
1558 (uint32_t)(bus_addr >> 32));
1559 wr32(hw, NGBE_RXRP(rxq->reg_idx), 0);
1560 wr32(hw, NGBE_RXWP(rxq->reg_idx), 0);
1562 srrctl = NGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
1564 /* Set if packets are dropped when no descriptors available */
1566 srrctl |= NGBE_RXCFG_DROP;
1569 * Configure the Rx buffer size in the PKTLEN field of
1570 * the RXCFG register of the queue.
1571 * The value is in 1 KB resolution. Valid values can be from
1574 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
1575 RTE_PKTMBUF_HEADROOM);
1576 buf_size = ROUND_DOWN(buf_size, 0x1 << 10);
1577 srrctl |= NGBE_RXCFG_PKTLEN(buf_size);
1579 wr32(hw, NGBE_RXCFG(rxq->reg_idx), srrctl);
1582 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
1583 dev->data->scattered_rx = 1;
1585 ngbe_set_rx_function(dev);
1591 * Initializes Transmit Unit.
1594 ngbe_dev_tx_init(struct rte_eth_dev *dev)
1597 struct ngbe_tx_queue *txq;
1601 PMD_INIT_FUNC_TRACE();
1602 hw = ngbe_dev_hw(dev);
1604 wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_ODSA, NGBE_SECTXCTL_ODSA);
1605 wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_XDSA, 0);
1607 /* Setup the Base and Length of the Tx Descriptor Rings */
1608 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1609 txq = dev->data->tx_queues[i];
1611 bus_addr = txq->tx_ring_phys_addr;
1612 wr32(hw, NGBE_TXBAL(txq->reg_idx),
1613 (uint32_t)(bus_addr & BIT_MASK32));
1614 wr32(hw, NGBE_TXBAH(txq->reg_idx),
1615 (uint32_t)(bus_addr >> 32));
1616 wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_BUFLEN_MASK,
1617 NGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
1618 /* Setup the HW Tx Head and TX Tail descriptor pointers */
1619 wr32(hw, NGBE_TXRP(txq->reg_idx), 0);
1620 wr32(hw, NGBE_TXWP(txq->reg_idx), 0);
1625 * Start Transmit and Receive Units.
1628 ngbe_dev_rxtx_start(struct rte_eth_dev *dev)
1631 struct ngbe_tx_queue *txq;
1632 struct ngbe_rx_queue *rxq;
1638 PMD_INIT_FUNC_TRACE();
1639 hw = ngbe_dev_hw(dev);
1641 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1642 txq = dev->data->tx_queues[i];
1643 /* Setup Transmit Threshold Registers */
1644 wr32m(hw, NGBE_TXCFG(txq->reg_idx),
1645 NGBE_TXCFG_HTHRESH_MASK |
1646 NGBE_TXCFG_WTHRESH_MASK,
1647 NGBE_TXCFG_HTHRESH(txq->hthresh) |
1648 NGBE_TXCFG_WTHRESH(txq->wthresh));
1651 dmatxctl = rd32(hw, NGBE_DMATXCTRL);
1652 dmatxctl |= NGBE_DMATXCTRL_ENA;
1653 wr32(hw, NGBE_DMATXCTRL, dmatxctl);
1655 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1656 txq = dev->data->tx_queues[i];
1657 if (txq->tx_deferred_start == 0) {
1658 ret = ngbe_dev_tx_queue_start(dev, i);
1664 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1665 rxq = dev->data->rx_queues[i];
1666 if (rxq->rx_deferred_start == 0) {
1667 ret = ngbe_dev_rx_queue_start(dev, i);
1673 /* Enable Receive engine */
1674 rxctrl = rd32(hw, NGBE_PBRXCTL);
1675 rxctrl |= NGBE_PBRXCTL_ENA;
1676 hw->mac.enable_rx_dma(hw, rxctrl);
1682 ngbe_dev_save_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)
1684 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
1685 *(reg++) = rd32(hw, NGBE_RXBAL(rx_queue_id));
1686 *(reg++) = rd32(hw, NGBE_RXBAH(rx_queue_id));
1687 *(reg++) = rd32(hw, NGBE_RXCFG(rx_queue_id));
1691 ngbe_dev_store_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)
1693 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
1694 wr32(hw, NGBE_RXBAL(rx_queue_id), *(reg++));
1695 wr32(hw, NGBE_RXBAH(rx_queue_id), *(reg++));
1696 wr32(hw, NGBE_RXCFG(rx_queue_id), *(reg++) & ~NGBE_RXCFG_ENA);
1700 ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
1702 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
1703 *(reg++) = rd32(hw, NGBE_TXBAL(tx_queue_id));
1704 *(reg++) = rd32(hw, NGBE_TXBAH(tx_queue_id));
1705 *(reg++) = rd32(hw, NGBE_TXCFG(tx_queue_id));
1709 ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
1711 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
1712 wr32(hw, NGBE_TXBAL(tx_queue_id), *(reg++));
1713 wr32(hw, NGBE_TXBAH(tx_queue_id), *(reg++));
1714 wr32(hw, NGBE_TXCFG(tx_queue_id), *(reg++) & ~NGBE_TXCFG_ENA);
1718 * Start Receive Units for specified queue.
1721 ngbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1723 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1724 struct ngbe_rx_queue *rxq;
1728 PMD_INIT_FUNC_TRACE();
1730 rxq = dev->data->rx_queues[rx_queue_id];
1732 /* Allocate buffers for descriptor rings */
1733 if (ngbe_alloc_rx_queue_mbufs(rxq) != 0) {
1734 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
1738 rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
1739 rxdctl |= NGBE_RXCFG_ENA;
1740 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxdctl);
1742 /* Wait until Rx Enable ready */
1743 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
1746 rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
1747 } while (--poll_ms && !(rxdctl & NGBE_RXCFG_ENA));
1749 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
1751 wr32(hw, NGBE_RXRP(rxq->reg_idx), 0);
1752 wr32(hw, NGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
1753 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1759 * Stop Receive Units for specified queue.
1762 ngbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1764 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1765 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
1766 struct ngbe_rx_queue *rxq;
1770 PMD_INIT_FUNC_TRACE();
1772 rxq = dev->data->rx_queues[rx_queue_id];
1774 ngbe_dev_save_rx_queue(hw, rxq->reg_idx);
1775 wr32m(hw, NGBE_RXCFG(rxq->reg_idx), NGBE_RXCFG_ENA, 0);
1777 /* Wait until Rx Enable bit clear */
1778 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
1781 rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
1782 } while (--poll_ms && (rxdctl & NGBE_RXCFG_ENA));
1784 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
1786 rte_delay_us(RTE_NGBE_WAIT_100_US);
1787 ngbe_dev_store_rx_queue(hw, rxq->reg_idx);
1789 ngbe_rx_queue_release_mbufs(rxq);
1790 ngbe_reset_rx_queue(adapter, rxq);
1791 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1797 * Start Transmit Units for specified queue.
1800 ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1802 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1803 struct ngbe_tx_queue *txq;
1807 PMD_INIT_FUNC_TRACE();
1809 txq = dev->data->tx_queues[tx_queue_id];
1810 wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, NGBE_TXCFG_ENA);
1812 /* Wait until Tx Enable ready */
1813 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
1816 txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
1817 } while (--poll_ms && !(txdctl & NGBE_TXCFG_ENA));
1819 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
1823 wr32(hw, NGBE_TXWP(txq->reg_idx), txq->tx_tail);
1824 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1830 * Stop Transmit Units for specified queue.
1833 ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1835 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1836 struct ngbe_tx_queue *txq;
1838 uint32_t txtdh, txtdt;
1841 PMD_INIT_FUNC_TRACE();
1843 txq = dev->data->tx_queues[tx_queue_id];
1845 /* Wait until Tx queue is empty */
1846 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
1848 rte_delay_us(RTE_NGBE_WAIT_100_US);
1849 txtdh = rd32(hw, NGBE_TXRP(txq->reg_idx));
1850 txtdt = rd32(hw, NGBE_TXWP(txq->reg_idx));
1851 } while (--poll_ms && (txtdh != txtdt));
1853 PMD_INIT_LOG(ERR, "Tx Queue %d is not empty when stopping.",
1856 ngbe_dev_save_tx_queue(hw, txq->reg_idx);
1857 wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, 0);
1859 /* Wait until Tx Enable bit clear */
1860 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
1863 txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
1864 } while (--poll_ms && (txdctl & NGBE_TXCFG_ENA));
1866 PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
1869 rte_delay_us(RTE_NGBE_WAIT_100_US);
1870 ngbe_dev_store_tx_queue(hw, txq->reg_idx);
1872 if (txq->ops != NULL) {
1873 txq->ops->release_mbufs(txq);
1874 txq->ops->reset(txq);
1876 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;