1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
9 #include <rte_ethdev.h>
10 #include <ethdev_driver.h>
11 #include <rte_malloc.h>
13 #include "ngbe_logs.h"
14 #include "base/ngbe.h"
15 #include "ngbe_ethdev.h"
16 #include "ngbe_rxtx.h"
19 * Prefetch a cache line into all cache levels.
21 #define rte_ngbe_prefetch(p) rte_prefetch0(p)
23 /*********************************************************************
27 **********************************************************************/
29 ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
32 struct ngbe_rx_queue *rxq;
33 volatile struct ngbe_rx_desc *rx_ring;
34 volatile struct ngbe_rx_desc *rxdp;
35 struct ngbe_rx_entry *sw_ring;
36 struct ngbe_rx_entry *rxe;
39 struct ngbe_rx_desc rxd;
51 rx_ring = rxq->rx_ring;
52 sw_ring = rxq->sw_ring;
53 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
54 while (nb_rx < nb_pkts) {
56 * The order of operations here is important as the DD status
57 * bit must not be read after any other descriptor fields.
58 * rx_ring and rxdp are pointing to volatile data so the order
59 * of accesses cannot be reordered by the compiler. If they were
60 * not volatile, they could be reordered which could lead to
61 * using invalid descriptor fields when read from rxd.
63 rxdp = &rx_ring[rx_id];
64 staterr = rxdp->qw1.lo.status;
65 if (!(staterr & rte_cpu_to_le_32(NGBE_RXD_STAT_DD)))
72 * If the NGBE_RXD_STAT_EOP flag is not set, the Rx packet
73 * is likely to be invalid and to be dropped by the various
74 * validation checks performed by the network stack.
76 * Allocate a new mbuf to replenish the RX ring descriptor.
77 * If the allocation fails:
78 * - arrange for that Rx descriptor to be the first one
79 * being parsed the next time the receive function is
80 * invoked [on the same queue].
82 * - Stop parsing the Rx ring and return immediately.
84 * This policy do not drop the packet received in the Rx
85 * descriptor for which the allocation of a new mbuf failed.
86 * Thus, it allows that packet to be later retrieved if
87 * mbuf have been freed in the mean time.
88 * As a side effect, holding Rx descriptors instead of
89 * systematically giving them back to the NIC may lead to
90 * Rx ring exhaustion situations.
91 * However, the NIC can gracefully prevent such situations
92 * to happen by sending specific "back-pressure" flow control
93 * frames to its peer(s).
96 "port_id=%u queue_id=%u rx_id=%u ext_err_stat=0x%08x pkt_len=%u",
97 (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
98 (uint16_t)rx_id, (uint32_t)staterr,
99 (uint16_t)rte_le_to_cpu_16(rxd.qw1.hi.len));
101 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
104 "Rx mbuf alloc failed port_id=%u queue_id=%u",
105 (uint16_t)rxq->port_id,
106 (uint16_t)rxq->queue_id);
107 dev->data->rx_mbuf_alloc_failed++;
112 rxe = &sw_ring[rx_id];
114 if (rx_id == rxq->nb_rx_desc)
117 /* Prefetch next mbuf while processing current one. */
118 rte_ngbe_prefetch(sw_ring[rx_id].mbuf);
121 * When next Rx descriptor is on a cache-line boundary,
122 * prefetch the next 4 Rx descriptors and the next 8 pointers
125 if ((rx_id & 0x3) == 0) {
126 rte_ngbe_prefetch(&rx_ring[rx_id]);
127 rte_ngbe_prefetch(&sw_ring[rx_id]);
132 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
133 NGBE_RXD_HDRADDR(rxdp, 0);
134 NGBE_RXD_PKTADDR(rxdp, dma_addr);
137 * Initialize the returned mbuf.
138 * setup generic mbuf fields:
139 * - number of segments,
142 * - Rx port identifier.
144 pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len));
145 rxm->data_off = RTE_PKTMBUF_HEADROOM;
146 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
149 rxm->pkt_len = pkt_len;
150 rxm->data_len = pkt_len;
151 rxm->port = rxq->port_id;
154 * Store the mbuf address into the next entry of the array
155 * of returned packets.
157 rx_pkts[nb_rx++] = rxm;
159 rxq->rx_tail = rx_id;
162 * If the number of free Rx descriptors is greater than the Rx free
163 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
165 * Update the RDT with the value of the last processed Rx descriptor
166 * minus 1, to guarantee that the RDT register is never equal to the
167 * RDH register, which creates a "full" ring situation from the
168 * hardware point of view...
170 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
171 if (nb_hold > rxq->rx_free_thresh) {
173 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u nb_rx=%u",
174 (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
175 (uint16_t)rx_id, (uint16_t)nb_hold,
177 rx_id = (uint16_t)((rx_id == 0) ?
178 (rxq->nb_rx_desc - 1) : (rx_id - 1));
179 ngbe_set32(rxq->rdt_reg_addr, rx_id);
182 rxq->nb_rx_hold = nb_hold;
187 /*********************************************************************
189 * Queue management functions
191 **********************************************************************/
194 ngbe_tx_queue_release_mbufs(struct ngbe_tx_queue *txq)
198 if (txq->sw_ring != NULL) {
199 for (i = 0; i < txq->nb_tx_desc; i++) {
200 if (txq->sw_ring[i].mbuf != NULL) {
201 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
202 txq->sw_ring[i].mbuf = NULL;
209 ngbe_tx_free_swring(struct ngbe_tx_queue *txq)
212 rte_free(txq->sw_ring);
216 ngbe_tx_queue_release(struct ngbe_tx_queue *txq)
219 if (txq->ops != NULL) {
220 txq->ops->release_mbufs(txq);
221 txq->ops->free_swring(txq);
228 ngbe_dev_tx_queue_release(void *txq)
230 ngbe_tx_queue_release(txq);
233 /* (Re)set dynamic ngbe_tx_queue fields to defaults */
235 ngbe_reset_tx_queue(struct ngbe_tx_queue *txq)
237 static const struct ngbe_tx_desc zeroed_desc = {0};
238 struct ngbe_tx_entry *txe = txq->sw_ring;
241 /* Zero out HW ring memory */
242 for (i = 0; i < txq->nb_tx_desc; i++)
243 txq->tx_ring[i] = zeroed_desc;
245 /* Initialize SW ring entries */
246 prev = (uint16_t)(txq->nb_tx_desc - 1);
247 for (i = 0; i < txq->nb_tx_desc; i++) {
248 /* the ring can also be modified by hardware */
249 volatile struct ngbe_tx_desc *txd = &txq->tx_ring[i];
251 txd->dw3 = rte_cpu_to_le_32(NGBE_TXD_DD);
254 txe[prev].next_id = i;
258 txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
262 * Always allow 1 descriptor to be un-allocated to avoid
263 * a H/W race condition
265 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
266 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
268 memset((void *)&txq->ctx_cache, 0,
269 NGBE_CTX_NUM * sizeof(struct ngbe_ctx_info));
272 static const struct ngbe_txq_ops def_txq_ops = {
273 .release_mbufs = ngbe_tx_queue_release_mbufs,
274 .free_swring = ngbe_tx_free_swring,
275 .reset = ngbe_reset_tx_queue,
279 ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
282 unsigned int socket_id,
283 const struct rte_eth_txconf *tx_conf)
285 const struct rte_memzone *tz;
286 struct ngbe_tx_queue *txq;
288 uint16_t tx_free_thresh;
290 PMD_INIT_FUNC_TRACE();
291 hw = ngbe_dev_hw(dev);
294 * The Tx descriptor ring will be cleaned after txq->tx_free_thresh
295 * descriptors are used or if the number of descriptors required
296 * to transmit a packet is greater than the number of free Tx
298 * One descriptor in the Tx ring is used as a sentinel to avoid a
299 * H/W race condition, hence the maximum threshold constraints.
300 * When set to zero use default values.
302 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
303 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
304 if (tx_free_thresh >= (nb_desc - 3)) {
306 "tx_free_thresh must be less than the number of TX descriptors minus 3. (tx_free_thresh=%u port=%d queue=%d)",
307 (unsigned int)tx_free_thresh,
308 (int)dev->data->port_id, (int)queue_idx);
312 if (nb_desc % tx_free_thresh != 0) {
314 "tx_free_thresh must be a divisor of the number of Tx descriptors. (tx_free_thresh=%u port=%d queue=%d)",
315 (unsigned int)tx_free_thresh,
316 (int)dev->data->port_id, (int)queue_idx);
320 /* Free memory prior to re-allocation if needed... */
321 if (dev->data->tx_queues[queue_idx] != NULL) {
322 ngbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
323 dev->data->tx_queues[queue_idx] = NULL;
326 /* First allocate the Tx queue data structure */
327 txq = rte_zmalloc_socket("ethdev Tx queue",
328 sizeof(struct ngbe_tx_queue),
329 RTE_CACHE_LINE_SIZE, socket_id);
334 * Allocate Tx ring hardware descriptors. A memzone large enough to
335 * handle the maximum ring size is allocated in order to allow for
336 * resizing in later calls to the queue setup function.
338 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
339 sizeof(struct ngbe_tx_desc) * NGBE_RING_DESC_MAX,
340 NGBE_ALIGN, socket_id);
342 ngbe_tx_queue_release(txq);
346 txq->nb_tx_desc = nb_desc;
347 txq->tx_free_thresh = tx_free_thresh;
348 txq->pthresh = tx_conf->tx_thresh.pthresh;
349 txq->hthresh = tx_conf->tx_thresh.hthresh;
350 txq->wthresh = tx_conf->tx_thresh.wthresh;
351 txq->queue_id = queue_idx;
352 txq->reg_idx = queue_idx;
353 txq->port_id = dev->data->port_id;
354 txq->ops = &def_txq_ops;
355 txq->tx_deferred_start = tx_conf->tx_deferred_start;
357 txq->tdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXWP(txq->reg_idx));
358 txq->tdc_reg_addr = NGBE_REG_ADDR(hw, NGBE_TXCFG(txq->reg_idx));
360 txq->tx_ring_phys_addr = TMZ_PADDR(tz);
361 txq->tx_ring = (struct ngbe_tx_desc *)TMZ_VADDR(tz);
363 /* Allocate software ring */
364 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
365 sizeof(struct ngbe_tx_entry) * nb_desc,
366 RTE_CACHE_LINE_SIZE, socket_id);
367 if (txq->sw_ring == NULL) {
368 ngbe_tx_queue_release(txq);
372 "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
373 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
375 txq->ops->reset(txq);
377 dev->data->tx_queues[queue_idx] = txq;
383 * ngbe_free_sc_cluster - free the not-yet-completed scattered cluster
385 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
386 * in the sw_sc_ring is not set to NULL but rather points to the next
387 * mbuf of this RSC aggregation (that has not been completed yet and still
388 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
389 * will just free first "nb_segs" segments of the cluster explicitly by calling
390 * an rte_pktmbuf_free_seg().
392 * @m scattered cluster head
395 ngbe_free_sc_cluster(struct rte_mbuf *m)
397 uint16_t i, nb_segs = m->nb_segs;
398 struct rte_mbuf *next_seg;
400 for (i = 0; i < nb_segs; i++) {
402 rte_pktmbuf_free_seg(m);
408 ngbe_rx_queue_release_mbufs(struct ngbe_rx_queue *rxq)
412 if (rxq->sw_ring != NULL) {
413 for (i = 0; i < rxq->nb_rx_desc; i++) {
414 if (rxq->sw_ring[i].mbuf != NULL) {
415 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
416 rxq->sw_ring[i].mbuf = NULL;
419 for (i = 0; i < rxq->rx_nb_avail; ++i) {
422 mb = rxq->rx_stage[rxq->rx_next_avail + i];
423 rte_pktmbuf_free_seg(mb);
425 rxq->rx_nb_avail = 0;
428 if (rxq->sw_sc_ring != NULL)
429 for (i = 0; i < rxq->nb_rx_desc; i++)
430 if (rxq->sw_sc_ring[i].fbuf != NULL) {
431 ngbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
432 rxq->sw_sc_ring[i].fbuf = NULL;
437 ngbe_rx_queue_release(struct ngbe_rx_queue *rxq)
440 ngbe_rx_queue_release_mbufs(rxq);
441 rte_free(rxq->sw_ring);
442 rte_free(rxq->sw_sc_ring);
448 ngbe_dev_rx_queue_release(void *rxq)
450 ngbe_rx_queue_release(rxq);
454 * Check if Rx Burst Bulk Alloc function can be used.
456 * 0: the preconditions are satisfied and the bulk allocation function
458 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
459 * function must be used.
462 check_rx_burst_bulk_alloc_preconditions(struct ngbe_rx_queue *rxq)
467 * Make sure the following pre-conditions are satisfied:
468 * rxq->rx_free_thresh >= RTE_PMD_NGBE_RX_MAX_BURST
469 * rxq->rx_free_thresh < rxq->nb_rx_desc
470 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
471 * Scattered packets are not supported. This should be checked
472 * outside of this function.
474 if (rxq->rx_free_thresh < RTE_PMD_NGBE_RX_MAX_BURST) {
476 "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, RTE_PMD_NGBE_RX_MAX_BURST=%d",
477 rxq->rx_free_thresh, RTE_PMD_NGBE_RX_MAX_BURST);
479 } else if (rxq->rx_free_thresh >= rxq->nb_rx_desc) {
481 "Rx Burst Bulk Alloc Preconditions: rxq->rx_free_thresh=%d, rxq->nb_rx_desc=%d",
482 rxq->rx_free_thresh, rxq->nb_rx_desc);
484 } else if ((rxq->nb_rx_desc % rxq->rx_free_thresh) != 0) {
486 "Rx Burst Bulk Alloc Preconditions: rxq->nb_rx_desc=%d, rxq->rx_free_thresh=%d",
487 rxq->nb_rx_desc, rxq->rx_free_thresh);
494 /* Reset dynamic ngbe_rx_queue fields back to defaults */
496 ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq)
498 static const struct ngbe_rx_desc zeroed_desc = {
499 {{0}, {0} }, {{0}, {0} } };
501 uint16_t len = rxq->nb_rx_desc;
504 * By default, the Rx queue setup function allocates enough memory for
505 * NGBE_RING_DESC_MAX. The Rx Burst bulk allocation function requires
506 * extra memory at the end of the descriptor ring to be zero'd out.
508 if (adapter->rx_bulk_alloc_allowed)
509 /* zero out extra memory */
510 len += RTE_PMD_NGBE_RX_MAX_BURST;
513 * Zero out HW ring memory. Zero out extra memory at the end of
514 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
515 * reads extra memory as zeros.
517 for (i = 0; i < len; i++)
518 rxq->rx_ring[i] = zeroed_desc;
521 * initialize extra software ring entries. Space for these extra
522 * entries is always allocated
524 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
525 for (i = rxq->nb_rx_desc; i < len; ++i)
526 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
528 rxq->rx_nb_avail = 0;
529 rxq->rx_next_avail = 0;
530 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
533 rxq->pkt_first_seg = NULL;
534 rxq->pkt_last_seg = NULL;
538 ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
541 unsigned int socket_id,
542 const struct rte_eth_rxconf *rx_conf,
543 struct rte_mempool *mp)
545 const struct rte_memzone *rz;
546 struct ngbe_rx_queue *rxq;
549 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
551 PMD_INIT_FUNC_TRACE();
552 hw = ngbe_dev_hw(dev);
554 /* Free memory prior to re-allocation if needed... */
555 if (dev->data->rx_queues[queue_idx] != NULL) {
556 ngbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
557 dev->data->rx_queues[queue_idx] = NULL;
560 /* First allocate the Rx queue data structure */
561 rxq = rte_zmalloc_socket("ethdev RX queue",
562 sizeof(struct ngbe_rx_queue),
563 RTE_CACHE_LINE_SIZE, socket_id);
567 rxq->nb_rx_desc = nb_desc;
568 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
569 rxq->queue_id = queue_idx;
570 rxq->reg_idx = queue_idx;
571 rxq->port_id = dev->data->port_id;
572 rxq->drop_en = rx_conf->rx_drop_en;
573 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
576 * Allocate Rx ring hardware descriptors. A memzone large enough to
577 * handle the maximum ring size is allocated in order to allow for
578 * resizing in later calls to the queue setup function.
580 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
581 RX_RING_SZ, NGBE_ALIGN, socket_id);
583 ngbe_rx_queue_release(rxq);
588 * Zero init all the descriptors in the ring.
590 memset(rz->addr, 0, RX_RING_SZ);
592 rxq->rdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXWP(rxq->reg_idx));
593 rxq->rdh_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXRP(rxq->reg_idx));
595 rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
596 rxq->rx_ring = (struct ngbe_rx_desc *)TMZ_VADDR(rz);
599 * Certain constraints must be met in order to use the bulk buffer
600 * allocation Rx burst function. If any of Rx queues doesn't meet them
601 * the feature should be disabled for the whole port.
603 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
605 "queue[%d] doesn't meet Rx Bulk Alloc preconditions - canceling the feature for the whole port[%d]",
606 rxq->queue_id, rxq->port_id);
607 adapter->rx_bulk_alloc_allowed = false;
611 * Allocate software ring. Allow for space at the end of the
612 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
613 * function does not access an invalid memory region.
616 if (adapter->rx_bulk_alloc_allowed)
617 len += RTE_PMD_NGBE_RX_MAX_BURST;
619 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
620 sizeof(struct ngbe_rx_entry) * len,
621 RTE_CACHE_LINE_SIZE, socket_id);
622 if (rxq->sw_ring == NULL) {
623 ngbe_rx_queue_release(rxq);
628 * Always allocate even if it's not going to be needed in order to
631 * This ring is used in Scattered Rx cases and Scattered Rx may
632 * be requested in ngbe_dev_rx_init(), which is called later from
636 rte_zmalloc_socket("rxq->sw_sc_ring",
637 sizeof(struct ngbe_scattered_rx_entry) * len,
638 RTE_CACHE_LINE_SIZE, socket_id);
639 if (rxq->sw_sc_ring == NULL) {
640 ngbe_rx_queue_release(rxq);
645 "sw_ring=%p sw_sc_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
646 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
647 rxq->rx_ring_phys_addr);
649 dev->data->rx_queues[queue_idx] = rxq;
651 ngbe_reset_rx_queue(adapter, rxq);
657 ngbe_dev_clear_queues(struct rte_eth_dev *dev)
660 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
662 PMD_INIT_FUNC_TRACE();
664 for (i = 0; i < dev->data->nb_tx_queues; i++) {
665 struct ngbe_tx_queue *txq = dev->data->tx_queues[i];
668 txq->ops->release_mbufs(txq);
669 txq->ops->reset(txq);
673 for (i = 0; i < dev->data->nb_rx_queues; i++) {
674 struct ngbe_rx_queue *rxq = dev->data->rx_queues[i];
677 ngbe_rx_queue_release_mbufs(rxq);
678 ngbe_reset_rx_queue(adapter, rxq);
684 ngbe_alloc_rx_queue_mbufs(struct ngbe_rx_queue *rxq)
686 struct ngbe_rx_entry *rxe = rxq->sw_ring;
690 /* Initialize software ring entries */
691 for (i = 0; i < rxq->nb_rx_desc; i++) {
692 /* the ring can also be modified by hardware */
693 volatile struct ngbe_rx_desc *rxd;
694 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
697 PMD_INIT_LOG(ERR, "Rx mbuf alloc failed queue_id=%u port_id=%u",
698 (unsigned int)rxq->queue_id,
699 (unsigned int)rxq->port_id);
703 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
704 mbuf->port = rxq->port_id;
707 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
708 rxd = &rxq->rx_ring[i];
709 NGBE_RXD_HDRADDR(rxd, 0);
710 NGBE_RXD_PKTADDR(rxd, dma_addr);
718 * Initializes Receive Unit.
721 ngbe_dev_rx_init(struct rte_eth_dev *dev)
724 struct ngbe_rx_queue *rxq;
732 PMD_INIT_FUNC_TRACE();
733 hw = ngbe_dev_hw(dev);
736 * Make sure receives are disabled while setting
737 * up the Rx context (registers, descriptor rings, etc.).
739 wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_ENA, 0);
740 wr32m(hw, NGBE_PBRXCTL, NGBE_PBRXCTL_ENA, 0);
742 /* Enable receipt of broadcasted frames */
743 fctrl = rd32(hw, NGBE_PSRCTL);
744 fctrl |= NGBE_PSRCTL_BCA;
745 wr32(hw, NGBE_PSRCTL, fctrl);
747 hlreg0 = rd32(hw, NGBE_SECRXCTL);
748 hlreg0 &= ~NGBE_SECRXCTL_XDSA;
749 wr32(hw, NGBE_SECRXCTL, hlreg0);
751 wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
752 NGBE_FRMSZ_MAX(NGBE_FRAME_SIZE_DFT));
754 /* Setup Rx queues */
755 for (i = 0; i < dev->data->nb_rx_queues; i++) {
756 rxq = dev->data->rx_queues[i];
758 /* Setup the Base and Length of the Rx Descriptor Rings */
759 bus_addr = rxq->rx_ring_phys_addr;
760 wr32(hw, NGBE_RXBAL(rxq->reg_idx),
761 (uint32_t)(bus_addr & BIT_MASK32));
762 wr32(hw, NGBE_RXBAH(rxq->reg_idx),
763 (uint32_t)(bus_addr >> 32));
764 wr32(hw, NGBE_RXRP(rxq->reg_idx), 0);
765 wr32(hw, NGBE_RXWP(rxq->reg_idx), 0);
767 srrctl = NGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
769 /* Set if packets are dropped when no descriptors available */
771 srrctl |= NGBE_RXCFG_DROP;
774 * Configure the Rx buffer size in the PKTLEN field of
775 * the RXCFG register of the queue.
776 * The value is in 1 KB resolution. Valid values can be from
779 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
780 RTE_PKTMBUF_HEADROOM);
781 buf_size = ROUND_DOWN(buf_size, 0x1 << 10);
782 srrctl |= NGBE_RXCFG_PKTLEN(buf_size);
784 wr32(hw, NGBE_RXCFG(rxq->reg_idx), srrctl);
791 * Initializes Transmit Unit.
794 ngbe_dev_tx_init(struct rte_eth_dev *dev)
797 struct ngbe_tx_queue *txq;
801 PMD_INIT_FUNC_TRACE();
802 hw = ngbe_dev_hw(dev);
804 wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_ODSA, NGBE_SECTXCTL_ODSA);
805 wr32m(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_XDSA, 0);
807 /* Setup the Base and Length of the Tx Descriptor Rings */
808 for (i = 0; i < dev->data->nb_tx_queues; i++) {
809 txq = dev->data->tx_queues[i];
811 bus_addr = txq->tx_ring_phys_addr;
812 wr32(hw, NGBE_TXBAL(txq->reg_idx),
813 (uint32_t)(bus_addr & BIT_MASK32));
814 wr32(hw, NGBE_TXBAH(txq->reg_idx),
815 (uint32_t)(bus_addr >> 32));
816 wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_BUFLEN_MASK,
817 NGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
818 /* Setup the HW Tx Head and TX Tail descriptor pointers */
819 wr32(hw, NGBE_TXRP(txq->reg_idx), 0);
820 wr32(hw, NGBE_TXWP(txq->reg_idx), 0);
825 * Start Transmit and Receive Units.
828 ngbe_dev_rxtx_start(struct rte_eth_dev *dev)
831 struct ngbe_tx_queue *txq;
832 struct ngbe_rx_queue *rxq;
838 PMD_INIT_FUNC_TRACE();
839 hw = ngbe_dev_hw(dev);
841 for (i = 0; i < dev->data->nb_tx_queues; i++) {
842 txq = dev->data->tx_queues[i];
843 /* Setup Transmit Threshold Registers */
844 wr32m(hw, NGBE_TXCFG(txq->reg_idx),
845 NGBE_TXCFG_HTHRESH_MASK |
846 NGBE_TXCFG_WTHRESH_MASK,
847 NGBE_TXCFG_HTHRESH(txq->hthresh) |
848 NGBE_TXCFG_WTHRESH(txq->wthresh));
851 dmatxctl = rd32(hw, NGBE_DMATXCTRL);
852 dmatxctl |= NGBE_DMATXCTRL_ENA;
853 wr32(hw, NGBE_DMATXCTRL, dmatxctl);
855 for (i = 0; i < dev->data->nb_tx_queues; i++) {
856 txq = dev->data->tx_queues[i];
857 if (txq->tx_deferred_start == 0) {
858 ret = ngbe_dev_tx_queue_start(dev, i);
864 for (i = 0; i < dev->data->nb_rx_queues; i++) {
865 rxq = dev->data->rx_queues[i];
866 if (rxq->rx_deferred_start == 0) {
867 ret = ngbe_dev_rx_queue_start(dev, i);
873 /* Enable Receive engine */
874 rxctrl = rd32(hw, NGBE_PBRXCTL);
875 rxctrl |= NGBE_PBRXCTL_ENA;
876 hw->mac.enable_rx_dma(hw, rxctrl);
882 ngbe_dev_save_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)
884 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
885 *(reg++) = rd32(hw, NGBE_RXBAL(rx_queue_id));
886 *(reg++) = rd32(hw, NGBE_RXBAH(rx_queue_id));
887 *(reg++) = rd32(hw, NGBE_RXCFG(rx_queue_id));
891 ngbe_dev_store_rx_queue(struct ngbe_hw *hw, uint16_t rx_queue_id)
893 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
894 wr32(hw, NGBE_RXBAL(rx_queue_id), *(reg++));
895 wr32(hw, NGBE_RXBAH(rx_queue_id), *(reg++));
896 wr32(hw, NGBE_RXCFG(rx_queue_id), *(reg++) & ~NGBE_RXCFG_ENA);
900 ngbe_dev_save_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
902 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
903 *(reg++) = rd32(hw, NGBE_TXBAL(tx_queue_id));
904 *(reg++) = rd32(hw, NGBE_TXBAH(tx_queue_id));
905 *(reg++) = rd32(hw, NGBE_TXCFG(tx_queue_id));
909 ngbe_dev_store_tx_queue(struct ngbe_hw *hw, uint16_t tx_queue_id)
911 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
912 wr32(hw, NGBE_TXBAL(tx_queue_id), *(reg++));
913 wr32(hw, NGBE_TXBAH(tx_queue_id), *(reg++));
914 wr32(hw, NGBE_TXCFG(tx_queue_id), *(reg++) & ~NGBE_TXCFG_ENA);
918 * Start Receive Units for specified queue.
921 ngbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
923 struct ngbe_hw *hw = ngbe_dev_hw(dev);
924 struct ngbe_rx_queue *rxq;
928 PMD_INIT_FUNC_TRACE();
930 rxq = dev->data->rx_queues[rx_queue_id];
932 /* Allocate buffers for descriptor rings */
933 if (ngbe_alloc_rx_queue_mbufs(rxq) != 0) {
934 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
938 rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
939 rxdctl |= NGBE_RXCFG_ENA;
940 wr32(hw, NGBE_RXCFG(rxq->reg_idx), rxdctl);
942 /* Wait until Rx Enable ready */
943 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
946 rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
947 } while (--poll_ms && !(rxdctl & NGBE_RXCFG_ENA));
949 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
951 wr32(hw, NGBE_RXRP(rxq->reg_idx), 0);
952 wr32(hw, NGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
953 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
959 * Stop Receive Units for specified queue.
962 ngbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
964 struct ngbe_hw *hw = ngbe_dev_hw(dev);
965 struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
966 struct ngbe_rx_queue *rxq;
970 PMD_INIT_FUNC_TRACE();
972 rxq = dev->data->rx_queues[rx_queue_id];
974 ngbe_dev_save_rx_queue(hw, rxq->reg_idx);
975 wr32m(hw, NGBE_RXCFG(rxq->reg_idx), NGBE_RXCFG_ENA, 0);
977 /* Wait until Rx Enable bit clear */
978 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
981 rxdctl = rd32(hw, NGBE_RXCFG(rxq->reg_idx));
982 } while (--poll_ms && (rxdctl & NGBE_RXCFG_ENA));
984 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
986 rte_delay_us(RTE_NGBE_WAIT_100_US);
987 ngbe_dev_store_rx_queue(hw, rxq->reg_idx);
989 ngbe_rx_queue_release_mbufs(rxq);
990 ngbe_reset_rx_queue(adapter, rxq);
991 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
997 * Start Transmit Units for specified queue.
1000 ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1002 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1003 struct ngbe_tx_queue *txq;
1007 PMD_INIT_FUNC_TRACE();
1009 txq = dev->data->tx_queues[tx_queue_id];
1010 wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, NGBE_TXCFG_ENA);
1012 /* Wait until Tx Enable ready */
1013 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
1016 txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
1017 } while (--poll_ms && !(txdctl & NGBE_TXCFG_ENA));
1019 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
1023 wr32(hw, NGBE_TXWP(txq->reg_idx), txq->tx_tail);
1024 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1030 * Stop Transmit Units for specified queue.
1033 ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1035 struct ngbe_hw *hw = ngbe_dev_hw(dev);
1036 struct ngbe_tx_queue *txq;
1038 uint32_t txtdh, txtdt;
1041 PMD_INIT_FUNC_TRACE();
1043 txq = dev->data->tx_queues[tx_queue_id];
1045 /* Wait until Tx queue is empty */
1046 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
1048 rte_delay_us(RTE_NGBE_WAIT_100_US);
1049 txtdh = rd32(hw, NGBE_TXRP(txq->reg_idx));
1050 txtdt = rd32(hw, NGBE_TXWP(txq->reg_idx));
1051 } while (--poll_ms && (txtdh != txtdt));
1053 PMD_INIT_LOG(ERR, "Tx Queue %d is not empty when stopping.",
1056 ngbe_dev_save_tx_queue(hw, txq->reg_idx);
1057 wr32m(hw, NGBE_TXCFG(txq->reg_idx), NGBE_TXCFG_ENA, 0);
1059 /* Wait until Tx Enable bit clear */
1060 poll_ms = RTE_NGBE_REGISTER_POLL_WAIT_10_MS;
1063 txdctl = rd32(hw, NGBE_TXCFG(txq->reg_idx));
1064 } while (--poll_ms && (txdctl & NGBE_TXCFG_ENA));
1066 PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
1069 rte_delay_us(RTE_NGBE_WAIT_100_US);
1070 ngbe_dev_store_tx_queue(hw, txq->reg_idx);
1072 if (txq->ops != NULL) {
1073 txq->ops->release_mbufs(txq);
1074 txq->ops->reset(txq);
1076 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;