1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
12 #include <rte_common.h>
13 #include <rte_cycles.h>
15 #include <rte_debug.h>
16 #include <rte_ethdev.h>
17 #include <rte_ethdev_driver.h>
18 #include <rte_memzone.h>
19 #include <rte_mempool.h>
20 #include <rte_malloc.h>
23 #include "txgbe_logs.h"
24 #include "base/txgbe.h"
25 #include "txgbe_ethdev.h"
26 #include "txgbe_rxtx.h"
29 txgbe_is_vf(struct rte_eth_dev *dev)
31 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
33 switch (hw->mac.type) {
34 case txgbe_mac_raptor_vf:
41 /*********************************************************************
45 **********************************************************************/
48 * Check for descriptors with their DD bit set and free mbufs.
49 * Return the total number of buffers freed.
51 static __rte_always_inline int
52 txgbe_tx_free_bufs(struct txgbe_tx_queue *txq)
54 struct txgbe_tx_entry *txep;
57 struct rte_mbuf *m, *free[RTE_TXGBE_TX_MAX_FREE_BUF_SZ];
59 /* check DD bit on threshold descriptor */
60 status = txq->tx_ring[txq->tx_next_dd].dw3;
61 if (!(status & rte_cpu_to_le_32(TXGBE_TXD_DD))) {
62 if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
63 txgbe_set32_masked(txq->tdc_reg_addr,
64 TXGBE_TXCFG_FLUSH, TXGBE_TXCFG_FLUSH);
69 * first buffer to free from S/W ring is at index
70 * tx_next_dd - (tx_free_thresh-1)
72 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_free_thresh - 1)];
73 for (i = 0; i < txq->tx_free_thresh; ++i, ++txep) {
74 /* free buffers one at a time */
75 m = rte_pktmbuf_prefree_seg(txep->mbuf);
78 if (unlikely(m == NULL))
81 if (nb_free >= RTE_TXGBE_TX_MAX_FREE_BUF_SZ ||
82 (nb_free > 0 && m->pool != free[0]->pool)) {
83 rte_mempool_put_bulk(free[0]->pool,
84 (void **)free, nb_free);
92 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
94 /* buffers were freed, update counters */
95 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_free_thresh);
96 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_free_thresh);
97 if (txq->tx_next_dd >= txq->nb_tx_desc)
98 txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
100 return txq->tx_free_thresh;
103 /* Populate 4 descriptors with data from 4 mbufs */
105 tx4(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts)
107 uint64_t buf_dma_addr;
111 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
112 buf_dma_addr = rte_mbuf_data_iova(*pkts);
113 pkt_len = (*pkts)->data_len;
115 /* write data to descriptor */
116 txdp->qw0 = rte_cpu_to_le_64(buf_dma_addr);
117 txdp->dw2 = cpu_to_le32(TXGBE_TXD_FLAGS |
118 TXGBE_TXD_DATLEN(pkt_len));
119 txdp->dw3 = cpu_to_le32(TXGBE_TXD_PAYLEN(pkt_len));
121 rte_prefetch0(&(*pkts)->pool);
125 /* Populate 1 descriptor with data from 1 mbuf */
127 tx1(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts)
129 uint64_t buf_dma_addr;
132 buf_dma_addr = rte_mbuf_data_iova(*pkts);
133 pkt_len = (*pkts)->data_len;
135 /* write data to descriptor */
136 txdp->qw0 = cpu_to_le64(buf_dma_addr);
137 txdp->dw2 = cpu_to_le32(TXGBE_TXD_FLAGS |
138 TXGBE_TXD_DATLEN(pkt_len));
139 txdp->dw3 = cpu_to_le32(TXGBE_TXD_PAYLEN(pkt_len));
141 rte_prefetch0(&(*pkts)->pool);
145 * Fill H/W descriptor ring with mbuf data.
146 * Copy mbuf pointers to the S/W ring.
149 txgbe_tx_fill_hw_ring(struct txgbe_tx_queue *txq, struct rte_mbuf **pkts,
152 volatile struct txgbe_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
153 struct txgbe_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
154 const int N_PER_LOOP = 4;
155 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
156 int mainpart, leftover;
160 * Process most of the packets in chunks of N pkts. Any
161 * leftover packets will get processed one at a time.
163 mainpart = (nb_pkts & ((uint32_t)~N_PER_LOOP_MASK));
164 leftover = (nb_pkts & ((uint32_t)N_PER_LOOP_MASK));
165 for (i = 0; i < mainpart; i += N_PER_LOOP) {
166 /* Copy N mbuf pointers to the S/W ring */
167 for (j = 0; j < N_PER_LOOP; ++j)
168 (txep + i + j)->mbuf = *(pkts + i + j);
169 tx4(txdp + i, pkts + i);
172 if (unlikely(leftover > 0)) {
173 for (i = 0; i < leftover; ++i) {
174 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
175 tx1(txdp + mainpart + i, pkts + mainpart + i);
180 static inline uint16_t
181 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
184 struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
188 * Begin scanning the H/W ring for done descriptors when the
189 * number of available descriptors drops below tx_free_thresh. For
190 * each done descriptor, free the associated buffer.
192 if (txq->nb_tx_free < txq->tx_free_thresh)
193 txgbe_tx_free_bufs(txq);
195 /* Only use descriptors that are available */
196 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
197 if (unlikely(nb_pkts == 0))
200 /* Use exactly nb_pkts descriptors */
201 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
204 * At this point, we know there are enough descriptors in the
205 * ring to transmit all the packets. This assumes that each
206 * mbuf contains a single segment, and that no new offloads
207 * are expected, which would require a new context descriptor.
211 * See if we're going to wrap-around. If so, handle the top
212 * of the descriptor ring first, then do the bottom. If not,
213 * the processing looks just like the "bottom" part anyway...
215 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
216 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
217 txgbe_tx_fill_hw_ring(txq, tx_pkts, n);
221 /* Fill H/W descriptor ring with mbuf data */
222 txgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
223 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
226 * Check for wrap-around. This would only happen if we used
227 * up to the last descriptor in the ring, no more, no less.
229 if (txq->tx_tail >= txq->nb_tx_desc)
232 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
233 (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
234 (uint16_t)txq->tx_tail, (uint16_t)nb_pkts);
236 /* update tail pointer */
238 txgbe_set32_relaxed(txq->tdt_reg_addr, txq->tx_tail);
244 txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
249 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
250 if (likely(nb_pkts <= RTE_PMD_TXGBE_TX_MAX_BURST))
251 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
253 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
258 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_TXGBE_TX_MAX_BURST);
259 ret = tx_xmit_pkts(tx_queue, &tx_pkts[nb_tx], n);
260 nb_tx = (uint16_t)(nb_tx + ret);
261 nb_pkts = (uint16_t)(nb_pkts - ret);
269 #ifndef DEFAULT_TX_FREE_THRESH
270 #define DEFAULT_TX_FREE_THRESH 32
274 txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
276 return DEV_RX_OFFLOAD_VLAN_STRIP;
280 txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
283 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
284 struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
286 offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
287 DEV_RX_OFFLOAD_UDP_CKSUM |
288 DEV_RX_OFFLOAD_TCP_CKSUM |
289 DEV_RX_OFFLOAD_KEEP_CRC |
290 DEV_RX_OFFLOAD_JUMBO_FRAME |
291 DEV_RX_OFFLOAD_VLAN_FILTER |
292 DEV_RX_OFFLOAD_RSS_HASH |
293 DEV_RX_OFFLOAD_SCATTER;
295 if (!txgbe_is_vf(dev))
296 offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
297 DEV_RX_OFFLOAD_QINQ_STRIP |
298 DEV_RX_OFFLOAD_VLAN_EXTEND);
301 * RSC is only supported by PF devices in a non-SR-IOV
304 if (hw->mac.type == txgbe_mac_raptor && !sriov->active)
305 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
307 if (hw->mac.type == txgbe_mac_raptor)
308 offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
310 offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
315 static void __rte_cold
316 txgbe_tx_queue_release_mbufs(struct txgbe_tx_queue *txq)
320 if (txq->sw_ring != NULL) {
321 for (i = 0; i < txq->nb_tx_desc; i++) {
322 if (txq->sw_ring[i].mbuf != NULL) {
323 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
324 txq->sw_ring[i].mbuf = NULL;
330 static void __rte_cold
331 txgbe_tx_free_swring(struct txgbe_tx_queue *txq)
334 txq->sw_ring != NULL)
335 rte_free(txq->sw_ring);
338 static void __rte_cold
339 txgbe_tx_queue_release(struct txgbe_tx_queue *txq)
341 if (txq != NULL && txq->ops != NULL) {
342 txq->ops->release_mbufs(txq);
343 txq->ops->free_swring(txq);
349 txgbe_dev_tx_queue_release(void *txq)
351 txgbe_tx_queue_release(txq);
354 static const struct txgbe_txq_ops def_txq_ops = {
355 .release_mbufs = txgbe_tx_queue_release_mbufs,
356 .free_swring = txgbe_tx_free_swring,
360 txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
362 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
363 if (txq->offloads == 0 &&
364 txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) {
365 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
366 dev->tx_pkt_burst = txgbe_xmit_pkts_simple;
367 dev->tx_pkt_prepare = NULL;
372 txgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
380 txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
382 uint64_t tx_offload_capa;
385 DEV_TX_OFFLOAD_VLAN_INSERT |
386 DEV_TX_OFFLOAD_IPV4_CKSUM |
387 DEV_TX_OFFLOAD_UDP_CKSUM |
388 DEV_TX_OFFLOAD_TCP_CKSUM |
389 DEV_TX_OFFLOAD_SCTP_CKSUM |
390 DEV_TX_OFFLOAD_TCP_TSO |
391 DEV_TX_OFFLOAD_UDP_TSO |
392 DEV_TX_OFFLOAD_UDP_TNL_TSO |
393 DEV_TX_OFFLOAD_IP_TNL_TSO |
394 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
395 DEV_TX_OFFLOAD_GRE_TNL_TSO |
396 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
397 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
398 DEV_TX_OFFLOAD_MULTI_SEGS;
400 if (!txgbe_is_vf(dev))
401 tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
403 tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
405 tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
407 return tx_offload_capa;
411 txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
414 unsigned int socket_id,
415 const struct rte_eth_txconf *tx_conf)
417 const struct rte_memzone *tz;
418 struct txgbe_tx_queue *txq;
420 uint16_t tx_free_thresh;
423 PMD_INIT_FUNC_TRACE();
424 hw = TXGBE_DEV_HW(dev);
426 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
429 * Validate number of transmit descriptors.
430 * It must not exceed hardware maximum, and must be multiple
433 if (nb_desc % TXGBE_TXD_ALIGN != 0 ||
434 nb_desc > TXGBE_RING_DESC_MAX ||
435 nb_desc < TXGBE_RING_DESC_MIN) {
440 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
441 * descriptors are used or if the number of descriptors required
442 * to transmit a packet is greater than the number of free TX
444 * One descriptor in the TX ring is used as a sentinel to avoid a
445 * H/W race condition, hence the maximum threshold constraints.
446 * When set to zero use default values.
448 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
449 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
450 if (tx_free_thresh >= (nb_desc - 3)) {
451 PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the number of "
452 "TX descriptors minus 3. (tx_free_thresh=%u "
454 (unsigned int)tx_free_thresh,
455 (int)dev->data->port_id, (int)queue_idx);
459 if ((nb_desc % tx_free_thresh) != 0) {
460 PMD_INIT_LOG(ERR, "tx_free_thresh must be a divisor of the "
461 "number of TX descriptors. (tx_free_thresh=%u "
462 "port=%d queue=%d)", (unsigned int)tx_free_thresh,
463 (int)dev->data->port_id, (int)queue_idx);
467 /* Free memory prior to re-allocation if needed... */
468 if (dev->data->tx_queues[queue_idx] != NULL) {
469 txgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
470 dev->data->tx_queues[queue_idx] = NULL;
473 /* First allocate the tx queue data structure */
474 txq = rte_zmalloc_socket("ethdev TX queue",
475 sizeof(struct txgbe_tx_queue),
476 RTE_CACHE_LINE_SIZE, socket_id);
481 * Allocate TX ring hardware descriptors. A memzone large enough to
482 * handle the maximum ring size is allocated in order to allow for
483 * resizing in later calls to the queue setup function.
485 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
486 sizeof(struct txgbe_tx_desc) * TXGBE_RING_DESC_MAX,
487 TXGBE_ALIGN, socket_id);
489 txgbe_tx_queue_release(txq);
493 txq->nb_tx_desc = nb_desc;
494 txq->tx_free_thresh = tx_free_thresh;
495 txq->pthresh = tx_conf->tx_thresh.pthresh;
496 txq->hthresh = tx_conf->tx_thresh.hthresh;
497 txq->wthresh = tx_conf->tx_thresh.wthresh;
498 txq->queue_id = queue_idx;
499 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
500 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
501 txq->port_id = dev->data->port_id;
502 txq->offloads = offloads;
503 txq->ops = &def_txq_ops;
504 txq->tx_deferred_start = tx_conf->tx_deferred_start;
506 /* Modification to set tail pointer for virtual function
509 if (hw->mac.type == txgbe_mac_raptor_vf) {
510 txq->tdt_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXWP(queue_idx));
511 txq->tdc_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXCFG(queue_idx));
513 txq->tdt_reg_addr = TXGBE_REG_ADDR(hw,
514 TXGBE_TXWP(txq->reg_idx));
515 txq->tdc_reg_addr = TXGBE_REG_ADDR(hw,
516 TXGBE_TXCFG(txq->reg_idx));
519 txq->tx_ring_phys_addr = TMZ_PADDR(tz);
520 txq->tx_ring = (struct txgbe_tx_desc *)TMZ_VADDR(tz);
522 /* Allocate software ring */
523 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
524 sizeof(struct txgbe_tx_entry) * nb_desc,
525 RTE_CACHE_LINE_SIZE, socket_id);
526 if (txq->sw_ring == NULL) {
527 txgbe_tx_queue_release(txq);
530 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
531 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
533 /* set up scalar TX function as appropriate */
534 txgbe_set_tx_function(dev, txq);
536 txq->ops->reset(txq);
538 dev->data->tx_queues[queue_idx] = txq;
544 * txgbe_free_sc_cluster - free the not-yet-completed scattered cluster
546 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
547 * in the sw_rsc_ring is not set to NULL but rather points to the next
548 * mbuf of this RSC aggregation (that has not been completed yet and still
549 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
550 * will just free first "nb_segs" segments of the cluster explicitly by calling
551 * an rte_pktmbuf_free_seg().
553 * @m scattered cluster head
555 static void __rte_cold
556 txgbe_free_sc_cluster(struct rte_mbuf *m)
558 uint16_t i, nb_segs = m->nb_segs;
559 struct rte_mbuf *next_seg;
561 for (i = 0; i < nb_segs; i++) {
563 rte_pktmbuf_free_seg(m);
568 static void __rte_cold
569 txgbe_rx_queue_release_mbufs(struct txgbe_rx_queue *rxq)
573 if (rxq->sw_ring != NULL) {
574 for (i = 0; i < rxq->nb_rx_desc; i++) {
575 if (rxq->sw_ring[i].mbuf != NULL) {
576 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
577 rxq->sw_ring[i].mbuf = NULL;
580 if (rxq->rx_nb_avail) {
581 for (i = 0; i < rxq->rx_nb_avail; ++i) {
584 mb = rxq->rx_stage[rxq->rx_next_avail + i];
585 rte_pktmbuf_free_seg(mb);
587 rxq->rx_nb_avail = 0;
592 for (i = 0; i < rxq->nb_rx_desc; i++)
593 if (rxq->sw_sc_ring[i].fbuf) {
594 txgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
595 rxq->sw_sc_ring[i].fbuf = NULL;
599 static void __rte_cold
600 txgbe_rx_queue_release(struct txgbe_rx_queue *rxq)
603 txgbe_rx_queue_release_mbufs(rxq);
604 rte_free(rxq->sw_ring);
605 rte_free(rxq->sw_sc_ring);
611 txgbe_dev_rx_queue_release(void *rxq)
613 txgbe_rx_queue_release(rxq);
617 * Check if Rx Burst Bulk Alloc function can be used.
619 * 0: the preconditions are satisfied and the bulk allocation function
621 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
622 * function must be used.
624 static inline int __rte_cold
625 check_rx_burst_bulk_alloc_preconditions(struct txgbe_rx_queue *rxq)
630 * Make sure the following pre-conditions are satisfied:
631 * rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST
632 * rxq->rx_free_thresh < rxq->nb_rx_desc
633 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
634 * Scattered packets are not supported. This should be checked
635 * outside of this function.
637 if (!(rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST)) {
638 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
639 "rxq->rx_free_thresh=%d, "
640 "RTE_PMD_TXGBE_RX_MAX_BURST=%d",
641 rxq->rx_free_thresh, RTE_PMD_TXGBE_RX_MAX_BURST);
643 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
644 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
645 "rxq->rx_free_thresh=%d, "
646 "rxq->nb_rx_desc=%d",
647 rxq->rx_free_thresh, rxq->nb_rx_desc);
649 } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
650 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
651 "rxq->nb_rx_desc=%d, "
652 "rxq->rx_free_thresh=%d",
653 rxq->nb_rx_desc, rxq->rx_free_thresh);
660 /* Reset dynamic txgbe_rx_queue fields back to defaults */
661 static void __rte_cold
662 txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq)
664 static const struct txgbe_rx_desc zeroed_desc = {
665 {{0}, {0} }, {{0}, {0} } };
667 uint16_t len = rxq->nb_rx_desc;
670 * By default, the Rx queue setup function allocates enough memory for
671 * TXGBE_RING_DESC_MAX. The Rx Burst bulk allocation function requires
672 * extra memory at the end of the descriptor ring to be zero'd out.
674 if (adapter->rx_bulk_alloc_allowed)
675 /* zero out extra memory */
676 len += RTE_PMD_TXGBE_RX_MAX_BURST;
679 * Zero out HW ring memory. Zero out extra memory at the end of
680 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
681 * reads extra memory as zeros.
683 for (i = 0; i < len; i++)
684 rxq->rx_ring[i] = zeroed_desc;
687 * initialize extra software ring entries. Space for these extra
688 * entries is always allocated
690 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
691 for (i = rxq->nb_rx_desc; i < len; ++i)
692 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
694 rxq->rx_nb_avail = 0;
695 rxq->rx_next_avail = 0;
696 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
699 rxq->pkt_first_seg = NULL;
700 rxq->pkt_last_seg = NULL;
704 txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
707 unsigned int socket_id,
708 const struct rte_eth_rxconf *rx_conf,
709 struct rte_mempool *mp)
711 const struct rte_memzone *rz;
712 struct txgbe_rx_queue *rxq;
715 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
718 PMD_INIT_FUNC_TRACE();
719 hw = TXGBE_DEV_HW(dev);
721 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
724 * Validate number of receive descriptors.
725 * It must not exceed hardware maximum, and must be multiple
728 if (nb_desc % TXGBE_RXD_ALIGN != 0 ||
729 nb_desc > TXGBE_RING_DESC_MAX ||
730 nb_desc < TXGBE_RING_DESC_MIN) {
734 /* Free memory prior to re-allocation if needed... */
735 if (dev->data->rx_queues[queue_idx] != NULL) {
736 txgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
737 dev->data->rx_queues[queue_idx] = NULL;
740 /* First allocate the rx queue data structure */
741 rxq = rte_zmalloc_socket("ethdev RX queue",
742 sizeof(struct txgbe_rx_queue),
743 RTE_CACHE_LINE_SIZE, socket_id);
747 rxq->nb_rx_desc = nb_desc;
748 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
749 rxq->queue_id = queue_idx;
750 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
751 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
752 rxq->port_id = dev->data->port_id;
753 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
754 rxq->crc_len = RTE_ETHER_CRC_LEN;
757 rxq->drop_en = rx_conf->rx_drop_en;
758 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
759 rxq->offloads = offloads;
762 * The packet type in RX descriptor is different for different NICs.
763 * So set different masks for different NICs.
765 rxq->pkt_type_mask = TXGBE_PTID_MASK;
768 * Allocate RX ring hardware descriptors. A memzone large enough to
769 * handle the maximum ring size is allocated in order to allow for
770 * resizing in later calls to the queue setup function.
772 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
773 RX_RING_SZ, TXGBE_ALIGN, socket_id);
775 txgbe_rx_queue_release(rxq);
780 * Zero init all the descriptors in the ring.
782 memset(rz->addr, 0, RX_RING_SZ);
785 * Modified to setup VFRDT for Virtual Function
787 if (hw->mac.type == txgbe_mac_raptor_vf) {
789 TXGBE_REG_ADDR(hw, TXGBE_RXWP(queue_idx));
791 TXGBE_REG_ADDR(hw, TXGBE_RXRP(queue_idx));
794 TXGBE_REG_ADDR(hw, TXGBE_RXWP(rxq->reg_idx));
796 TXGBE_REG_ADDR(hw, TXGBE_RXRP(rxq->reg_idx));
799 rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
800 rxq->rx_ring = (struct txgbe_rx_desc *)TMZ_VADDR(rz);
803 * Certain constraints must be met in order to use the bulk buffer
804 * allocation Rx burst function. If any of Rx queues doesn't meet them
805 * the feature should be disabled for the whole port.
807 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
808 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
809 "preconditions - canceling the feature for "
810 "the whole port[%d]",
811 rxq->queue_id, rxq->port_id);
812 adapter->rx_bulk_alloc_allowed = false;
816 * Allocate software ring. Allow for space at the end of the
817 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
818 * function does not access an invalid memory region.
821 if (adapter->rx_bulk_alloc_allowed)
822 len += RTE_PMD_TXGBE_RX_MAX_BURST;
824 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
825 sizeof(struct txgbe_rx_entry) * len,
826 RTE_CACHE_LINE_SIZE, socket_id);
828 txgbe_rx_queue_release(rxq);
833 * Always allocate even if it's not going to be needed in order to
836 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
837 * be requested in txgbe_dev_rx_init(), which is called later from
841 rte_zmalloc_socket("rxq->sw_sc_ring",
842 sizeof(struct txgbe_scattered_rx_entry) * len,
843 RTE_CACHE_LINE_SIZE, socket_id);
844 if (!rxq->sw_sc_ring) {
845 txgbe_rx_queue_release(rxq);
849 PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
850 "dma_addr=0x%" PRIx64,
851 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
852 rxq->rx_ring_phys_addr);
854 dev->data->rx_queues[queue_idx] = rxq;
856 txgbe_reset_rx_queue(adapter, rxq);
862 txgbe_dev_free_queues(struct rte_eth_dev *dev)
866 PMD_INIT_FUNC_TRACE();
868 for (i = 0; i < dev->data->nb_rx_queues; i++) {
869 txgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
870 dev->data->rx_queues[i] = NULL;
872 dev->data->nb_rx_queues = 0;
874 for (i = 0; i < dev->data->nb_tx_queues; i++) {
875 txgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
876 dev->data->tx_queues[i] = NULL;
878 dev->data->nb_tx_queues = 0;
882 txgbe_set_rx_function(struct rte_eth_dev *dev)
887 static int __rte_cold
888 txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
890 struct txgbe_rx_entry *rxe = rxq->sw_ring;
894 /* Initialize software ring entries */
895 for (i = 0; i < rxq->nb_rx_desc; i++) {
896 volatile struct txgbe_rx_desc *rxd;
897 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
900 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
901 (unsigned int)rxq->queue_id);
905 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
906 mbuf->port = rxq->port_id;
909 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
910 rxd = &rxq->rx_ring[i];
911 TXGBE_RXD_HDRADDR(rxd, 0);
912 TXGBE_RXD_PKTADDR(rxd, dma_addr);
920 * txgbe_get_rscctl_maxdesc
922 * @pool Memory pool of the Rx queue
924 static inline uint32_t
925 txgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
927 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
930 RTE_IPV4_MAX_PKT_LEN /
931 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
934 return TXGBE_RXCFG_RSCMAX_16;
935 else if (maxdesc >= 8)
936 return TXGBE_RXCFG_RSCMAX_8;
937 else if (maxdesc >= 4)
938 return TXGBE_RXCFG_RSCMAX_4;
940 return TXGBE_RXCFG_RSCMAX_1;
944 * txgbe_set_rsc - configure RSC related port HW registers
946 * Configures the port's RSC related registers.
950 * Returns 0 in case of success or a non-zero error code
953 txgbe_set_rsc(struct rte_eth_dev *dev)
955 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
956 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
957 struct rte_eth_dev_info dev_info = { 0 };
958 bool rsc_capable = false;
964 dev->dev_ops->dev_infos_get(dev, &dev_info);
965 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
968 if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
969 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
974 /* RSC global configuration */
976 if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
977 (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
978 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
983 rfctl = rd32(hw, TXGBE_PSRCTL);
984 if (rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
985 rfctl &= ~TXGBE_PSRCTL_RSCDIA;
987 rfctl |= TXGBE_PSRCTL_RSCDIA;
988 wr32(hw, TXGBE_PSRCTL, rfctl);
990 /* If LRO hasn't been requested - we are done here. */
991 if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
994 /* Set PSRCTL.RSCACK bit */
995 rdrxctl = rd32(hw, TXGBE_PSRCTL);
996 rdrxctl |= TXGBE_PSRCTL_RSCACK;
997 wr32(hw, TXGBE_PSRCTL, rdrxctl);
999 /* Per-queue RSC configuration */
1000 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1001 struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
1003 rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
1005 rd32(hw, TXGBE_POOLRSS(rxq->reg_idx));
1007 rd32(hw, TXGBE_ITR(rxq->reg_idx));
1010 * txgbe PMD doesn't support header-split at the moment.
1012 srrctl &= ~TXGBE_RXCFG_HDRLEN_MASK;
1013 srrctl |= TXGBE_RXCFG_HDRLEN(128);
1016 * TODO: Consider setting the Receive Descriptor Minimum
1017 * Threshold Size for an RSC case. This is not an obviously
1018 * beneficiary option but the one worth considering...
1021 srrctl |= TXGBE_RXCFG_RSCENA;
1022 srrctl &= ~TXGBE_RXCFG_RSCMAX_MASK;
1023 srrctl |= txgbe_get_rscctl_maxdesc(rxq->mb_pool);
1024 psrtype |= TXGBE_POOLRSS_L4HDR;
1027 * RSC: Set ITR interval corresponding to 2K ints/s.
1029 * Full-sized RSC aggregations for a 10Gb/s link will
1030 * arrive at about 20K aggregation/s rate.
1032 * 2K inst/s rate will make only 10% of the
1033 * aggregations to be closed due to the interrupt timer
1034 * expiration for a streaming at wire-speed case.
1036 * For a sparse streaming case this setting will yield
1037 * at most 500us latency for a single RSC aggregation.
1039 eitr &= ~TXGBE_ITR_IVAL_MASK;
1040 eitr |= TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
1041 eitr |= TXGBE_ITR_WRDSA;
1043 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
1044 wr32(hw, TXGBE_POOLRSS(rxq->reg_idx), psrtype);
1045 wr32(hw, TXGBE_ITR(rxq->reg_idx), eitr);
1048 * RSC requires the mapping of the queue to the
1051 txgbe_set_ivar_map(hw, 0, rxq->reg_idx, i);
1056 PMD_INIT_LOG(DEBUG, "enabling LRO mode");
1062 * Initializes Receive Unit.
1065 txgbe_dev_rx_init(struct rte_eth_dev *dev)
1067 struct txgbe_hw *hw;
1068 struct txgbe_rx_queue *rxq;
1077 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
1080 PMD_INIT_FUNC_TRACE();
1081 hw = TXGBE_DEV_HW(dev);
1084 * Make sure receives are disabled while setting
1085 * up the RX context (registers, descriptor rings, etc.).
1087 wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, 0);
1088 wr32m(hw, TXGBE_PBRXCTL, TXGBE_PBRXCTL_ENA, 0);
1090 /* Enable receipt of broadcasted frames */
1091 fctrl = rd32(hw, TXGBE_PSRCTL);
1092 fctrl |= TXGBE_PSRCTL_BCA;
1093 wr32(hw, TXGBE_PSRCTL, fctrl);
1096 * Configure CRC stripping, if any.
1098 hlreg0 = rd32(hw, TXGBE_SECRXCTL);
1099 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1100 hlreg0 &= ~TXGBE_SECRXCTL_CRCSTRIP;
1102 hlreg0 |= TXGBE_SECRXCTL_CRCSTRIP;
1103 wr32(hw, TXGBE_SECRXCTL, hlreg0);
1106 * Configure jumbo frame support, if any.
1108 if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1109 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
1110 TXGBE_FRMSZ_MAX(rx_conf->max_rx_pkt_len));
1112 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
1113 TXGBE_FRMSZ_MAX(TXGBE_FRAME_SIZE_DFT));
1117 * If loopback mode is configured, set LPBK bit.
1119 hlreg0 = rd32(hw, TXGBE_PSRCTL);
1120 if (hw->mac.type == txgbe_mac_raptor &&
1121 dev->data->dev_conf.lpbk_mode)
1122 hlreg0 |= TXGBE_PSRCTL_LBENA;
1124 hlreg0 &= ~TXGBE_PSRCTL_LBENA;
1126 wr32(hw, TXGBE_PSRCTL, hlreg0);
1129 * Assume no header split and no VLAN strip support
1130 * on any Rx queue first .
1132 rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1134 /* Setup RX queues */
1135 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1136 rxq = dev->data->rx_queues[i];
1139 * Reset crc_len in case it was changed after queue setup by a
1140 * call to configure.
1142 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1143 rxq->crc_len = RTE_ETHER_CRC_LEN;
1147 /* Setup the Base and Length of the Rx Descriptor Rings */
1148 bus_addr = rxq->rx_ring_phys_addr;
1149 wr32(hw, TXGBE_RXBAL(rxq->reg_idx),
1150 (uint32_t)(bus_addr & BIT_MASK32));
1151 wr32(hw, TXGBE_RXBAH(rxq->reg_idx),
1152 (uint32_t)(bus_addr >> 32));
1153 wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
1154 wr32(hw, TXGBE_RXWP(rxq->reg_idx), 0);
1156 srrctl = TXGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
1158 /* Set if packets are dropped when no descriptors available */
1160 srrctl |= TXGBE_RXCFG_DROP;
1163 * Configure the RX buffer size in the PKTLEN field of
1164 * the RXCFG register of the queue.
1165 * The value is in 1 KB resolution. Valid values can be from
1168 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
1169 RTE_PKTMBUF_HEADROOM);
1170 buf_size = ROUND_UP(buf_size, 0x1 << 10);
1171 srrctl |= TXGBE_RXCFG_PKTLEN(buf_size);
1173 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
1175 /* It adds dual VLAN length for supporting dual VLAN */
1176 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
1177 2 * TXGBE_VLAN_TAG_SIZE > buf_size)
1178 dev->data->scattered_rx = 1;
1179 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1180 rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1183 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
1184 dev->data->scattered_rx = 1;
1187 * Setup the Checksum Register.
1188 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
1189 * Enable IP/L4 checksum computation by hardware if requested to do so.
1191 rxcsum = rd32(hw, TXGBE_PSRCTL);
1192 rxcsum |= TXGBE_PSRCTL_PCSD;
1193 if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
1194 rxcsum |= TXGBE_PSRCTL_L4CSUM;
1196 rxcsum &= ~TXGBE_PSRCTL_L4CSUM;
1198 wr32(hw, TXGBE_PSRCTL, rxcsum);
1200 if (hw->mac.type == txgbe_mac_raptor) {
1201 rdrxctl = rd32(hw, TXGBE_SECRXCTL);
1202 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1203 rdrxctl &= ~TXGBE_SECRXCTL_CRCSTRIP;
1205 rdrxctl |= TXGBE_SECRXCTL_CRCSTRIP;
1206 wr32(hw, TXGBE_SECRXCTL, rdrxctl);
1209 rc = txgbe_set_rsc(dev);
1213 txgbe_set_rx_function(dev);
1219 * Initializes Transmit Unit.
1222 txgbe_dev_tx_init(struct rte_eth_dev *dev)
1224 struct txgbe_hw *hw;
1225 struct txgbe_tx_queue *txq;
1229 PMD_INIT_FUNC_TRACE();
1230 hw = TXGBE_DEV_HW(dev);
1232 /* Setup the Base and Length of the Tx Descriptor Rings */
1233 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1234 txq = dev->data->tx_queues[i];
1236 bus_addr = txq->tx_ring_phys_addr;
1237 wr32(hw, TXGBE_TXBAL(txq->reg_idx),
1238 (uint32_t)(bus_addr & BIT_MASK32));
1239 wr32(hw, TXGBE_TXBAH(txq->reg_idx),
1240 (uint32_t)(bus_addr >> 32));
1241 wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_BUFLEN_MASK,
1242 TXGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
1243 /* Setup the HW Tx Head and TX Tail descriptor pointers */
1244 wr32(hw, TXGBE_TXRP(txq->reg_idx), 0);
1245 wr32(hw, TXGBE_TXWP(txq->reg_idx), 0);
1250 txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
1252 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
1253 *(reg++) = rd32(hw, TXGBE_RXBAL(rx_queue_id));
1254 *(reg++) = rd32(hw, TXGBE_RXBAH(rx_queue_id));
1255 *(reg++) = rd32(hw, TXGBE_RXCFG(rx_queue_id));
1259 txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
1261 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
1262 wr32(hw, TXGBE_RXBAL(rx_queue_id), *(reg++));
1263 wr32(hw, TXGBE_RXBAH(rx_queue_id), *(reg++));
1264 wr32(hw, TXGBE_RXCFG(rx_queue_id), *(reg++) & ~TXGBE_RXCFG_ENA);
1268 txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
1270 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
1271 *(reg++) = rd32(hw, TXGBE_TXBAL(tx_queue_id));
1272 *(reg++) = rd32(hw, TXGBE_TXBAH(tx_queue_id));
1273 *(reg++) = rd32(hw, TXGBE_TXCFG(tx_queue_id));
1277 txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
1279 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
1280 wr32(hw, TXGBE_TXBAL(tx_queue_id), *(reg++));
1281 wr32(hw, TXGBE_TXBAH(tx_queue_id), *(reg++));
1282 wr32(hw, TXGBE_TXCFG(tx_queue_id), *(reg++) & ~TXGBE_TXCFG_ENA);
1286 * Start Receive Units for specified queue.
1289 txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1291 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1292 struct txgbe_rx_queue *rxq;
1296 PMD_INIT_FUNC_TRACE();
1298 rxq = dev->data->rx_queues[rx_queue_id];
1300 /* Allocate buffers for descriptor rings */
1301 if (txgbe_alloc_rx_queue_mbufs(rxq) != 0) {
1302 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
1306 rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
1307 rxdctl |= TXGBE_RXCFG_ENA;
1308 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxdctl);
1310 /* Wait until RX Enable ready */
1311 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
1314 rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
1315 } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA));
1317 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
1319 wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
1320 wr32(hw, TXGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
1321 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1327 * Stop Receive Units for specified queue.
1330 txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1332 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1333 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1334 struct txgbe_rx_queue *rxq;
1338 PMD_INIT_FUNC_TRACE();
1340 rxq = dev->data->rx_queues[rx_queue_id];
1342 txgbe_dev_save_rx_queue(hw, rxq->reg_idx);
1343 wr32m(hw, TXGBE_RXCFG(rxq->reg_idx), TXGBE_RXCFG_ENA, 0);
1345 /* Wait until RX Enable bit clear */
1346 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
1349 rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
1350 } while (--poll_ms && (rxdctl & TXGBE_RXCFG_ENA));
1352 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
1354 rte_delay_us(RTE_TXGBE_WAIT_100_US);
1355 txgbe_dev_store_rx_queue(hw, rxq->reg_idx);
1357 txgbe_rx_queue_release_mbufs(rxq);
1358 txgbe_reset_rx_queue(adapter, rxq);
1359 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1365 * Start Transmit Units for specified queue.
1368 txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1370 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1371 struct txgbe_tx_queue *txq;
1375 PMD_INIT_FUNC_TRACE();
1377 txq = dev->data->tx_queues[tx_queue_id];
1378 wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, TXGBE_TXCFG_ENA);
1380 /* Wait until TX Enable ready */
1381 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
1384 txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
1385 } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA));
1387 PMD_INIT_LOG(ERR, "Could not enable "
1388 "Tx Queue %d", tx_queue_id);
1391 wr32(hw, TXGBE_TXWP(txq->reg_idx), txq->tx_tail);
1392 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1398 * Stop Transmit Units for specified queue.
1401 txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1403 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1404 struct txgbe_tx_queue *txq;
1406 uint32_t txtdh, txtdt;
1409 PMD_INIT_FUNC_TRACE();
1411 txq = dev->data->tx_queues[tx_queue_id];
1413 /* Wait until TX queue is empty */
1414 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
1416 rte_delay_us(RTE_TXGBE_WAIT_100_US);
1417 txtdh = rd32(hw, TXGBE_TXRP(txq->reg_idx));
1418 txtdt = rd32(hw, TXGBE_TXWP(txq->reg_idx));
1419 } while (--poll_ms && (txtdh != txtdt));
1422 "Tx Queue %d is not empty when stopping.",
1425 txgbe_dev_save_tx_queue(hw, txq->reg_idx);
1426 wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, 0);
1428 /* Wait until TX Enable bit clear */
1429 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
1432 txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
1433 } while (--poll_ms && (txdctl & TXGBE_TXCFG_ENA));
1435 PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
1438 rte_delay_us(RTE_TXGBE_WAIT_100_US);
1439 txgbe_dev_store_tx_queue(hw, txq->reg_idx);
1441 if (txq->ops != NULL) {
1442 txq->ops->release_mbufs(txq);
1443 txq->ops->reset(txq);
1445 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;