1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
12 #include <rte_common.h>
13 #include <rte_cycles.h>
15 #include <rte_debug.h>
16 #include <rte_ethdev.h>
17 #include <rte_ethdev_driver.h>
18 #include <rte_memzone.h>
19 #include <rte_mempool.h>
20 #include <rte_malloc.h>
23 #include "txgbe_logs.h"
24 #include "base/txgbe.h"
25 #include "txgbe_ethdev.h"
26 #include "txgbe_rxtx.h"
29 txgbe_is_vf(struct rte_eth_dev *dev)
31 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
33 switch (hw->mac.type) {
34 case txgbe_mac_raptor_vf:
41 #ifndef DEFAULT_TX_FREE_THRESH
42 #define DEFAULT_TX_FREE_THRESH 32
46 txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
48 return DEV_RX_OFFLOAD_VLAN_STRIP;
52 txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
55 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
56 struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
58 offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
59 DEV_RX_OFFLOAD_UDP_CKSUM |
60 DEV_RX_OFFLOAD_TCP_CKSUM |
61 DEV_RX_OFFLOAD_KEEP_CRC |
62 DEV_RX_OFFLOAD_JUMBO_FRAME |
63 DEV_RX_OFFLOAD_VLAN_FILTER |
64 DEV_RX_OFFLOAD_RSS_HASH |
65 DEV_RX_OFFLOAD_SCATTER;
67 if (!txgbe_is_vf(dev))
68 offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
69 DEV_RX_OFFLOAD_QINQ_STRIP |
70 DEV_RX_OFFLOAD_VLAN_EXTEND);
73 * RSC is only supported by PF devices in a non-SR-IOV
76 if (hw->mac.type == txgbe_mac_raptor && !sriov->active)
77 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
79 if (hw->mac.type == txgbe_mac_raptor)
80 offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
82 offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
87 static void __rte_cold
88 txgbe_tx_queue_release_mbufs(struct txgbe_tx_queue *txq)
92 if (txq->sw_ring != NULL) {
93 for (i = 0; i < txq->nb_tx_desc; i++) {
94 if (txq->sw_ring[i].mbuf != NULL) {
95 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
96 txq->sw_ring[i].mbuf = NULL;
102 static void __rte_cold
103 txgbe_tx_free_swring(struct txgbe_tx_queue *txq)
106 txq->sw_ring != NULL)
107 rte_free(txq->sw_ring);
110 static void __rte_cold
111 txgbe_tx_queue_release(struct txgbe_tx_queue *txq)
113 if (txq != NULL && txq->ops != NULL) {
114 txq->ops->release_mbufs(txq);
115 txq->ops->free_swring(txq);
121 txgbe_dev_tx_queue_release(void *txq)
123 txgbe_tx_queue_release(txq);
126 static const struct txgbe_txq_ops def_txq_ops = {
127 .release_mbufs = txgbe_tx_queue_release_mbufs,
128 .free_swring = txgbe_tx_free_swring,
132 txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
139 txgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
147 txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
149 uint64_t tx_offload_capa;
152 DEV_TX_OFFLOAD_VLAN_INSERT |
153 DEV_TX_OFFLOAD_IPV4_CKSUM |
154 DEV_TX_OFFLOAD_UDP_CKSUM |
155 DEV_TX_OFFLOAD_TCP_CKSUM |
156 DEV_TX_OFFLOAD_SCTP_CKSUM |
157 DEV_TX_OFFLOAD_TCP_TSO |
158 DEV_TX_OFFLOAD_UDP_TSO |
159 DEV_TX_OFFLOAD_UDP_TNL_TSO |
160 DEV_TX_OFFLOAD_IP_TNL_TSO |
161 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
162 DEV_TX_OFFLOAD_GRE_TNL_TSO |
163 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
164 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
165 DEV_TX_OFFLOAD_MULTI_SEGS;
167 if (!txgbe_is_vf(dev))
168 tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
170 tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
172 tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
174 return tx_offload_capa;
178 txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
181 unsigned int socket_id,
182 const struct rte_eth_txconf *tx_conf)
184 const struct rte_memzone *tz;
185 struct txgbe_tx_queue *txq;
187 uint16_t tx_free_thresh;
190 PMD_INIT_FUNC_TRACE();
191 hw = TXGBE_DEV_HW(dev);
193 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
196 * Validate number of transmit descriptors.
197 * It must not exceed hardware maximum, and must be multiple
200 if (nb_desc % TXGBE_TXD_ALIGN != 0 ||
201 nb_desc > TXGBE_RING_DESC_MAX ||
202 nb_desc < TXGBE_RING_DESC_MIN) {
207 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
208 * descriptors are used or if the number of descriptors required
209 * to transmit a packet is greater than the number of free TX
211 * One descriptor in the TX ring is used as a sentinel to avoid a
212 * H/W race condition, hence the maximum threshold constraints.
213 * When set to zero use default values.
215 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
216 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
217 if (tx_free_thresh >= (nb_desc - 3)) {
218 PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the number of "
219 "TX descriptors minus 3. (tx_free_thresh=%u "
221 (unsigned int)tx_free_thresh,
222 (int)dev->data->port_id, (int)queue_idx);
226 if ((nb_desc % tx_free_thresh) != 0) {
227 PMD_INIT_LOG(ERR, "tx_free_thresh must be a divisor of the "
228 "number of TX descriptors. (tx_free_thresh=%u "
229 "port=%d queue=%d)", (unsigned int)tx_free_thresh,
230 (int)dev->data->port_id, (int)queue_idx);
234 /* Free memory prior to re-allocation if needed... */
235 if (dev->data->tx_queues[queue_idx] != NULL) {
236 txgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
237 dev->data->tx_queues[queue_idx] = NULL;
240 /* First allocate the tx queue data structure */
241 txq = rte_zmalloc_socket("ethdev TX queue",
242 sizeof(struct txgbe_tx_queue),
243 RTE_CACHE_LINE_SIZE, socket_id);
248 * Allocate TX ring hardware descriptors. A memzone large enough to
249 * handle the maximum ring size is allocated in order to allow for
250 * resizing in later calls to the queue setup function.
252 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
253 sizeof(struct txgbe_tx_desc) * TXGBE_RING_DESC_MAX,
254 TXGBE_ALIGN, socket_id);
256 txgbe_tx_queue_release(txq);
260 txq->nb_tx_desc = nb_desc;
261 txq->tx_free_thresh = tx_free_thresh;
262 txq->pthresh = tx_conf->tx_thresh.pthresh;
263 txq->hthresh = tx_conf->tx_thresh.hthresh;
264 txq->wthresh = tx_conf->tx_thresh.wthresh;
265 txq->queue_id = queue_idx;
266 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
267 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
268 txq->port_id = dev->data->port_id;
269 txq->offloads = offloads;
270 txq->ops = &def_txq_ops;
271 txq->tx_deferred_start = tx_conf->tx_deferred_start;
273 /* Modification to set tail pointer for virtual function
276 if (hw->mac.type == txgbe_mac_raptor_vf) {
277 txq->tdt_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXWP(queue_idx));
278 txq->tdc_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXCFG(queue_idx));
280 txq->tdt_reg_addr = TXGBE_REG_ADDR(hw,
281 TXGBE_TXWP(txq->reg_idx));
282 txq->tdc_reg_addr = TXGBE_REG_ADDR(hw,
283 TXGBE_TXCFG(txq->reg_idx));
286 txq->tx_ring_phys_addr = TMZ_PADDR(tz);
287 txq->tx_ring = (struct txgbe_tx_desc *)TMZ_VADDR(tz);
289 /* Allocate software ring */
290 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
291 sizeof(struct txgbe_tx_entry) * nb_desc,
292 RTE_CACHE_LINE_SIZE, socket_id);
293 if (txq->sw_ring == NULL) {
294 txgbe_tx_queue_release(txq);
297 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
298 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
300 /* set up scalar TX function as appropriate */
301 txgbe_set_tx_function(dev, txq);
303 txq->ops->reset(txq);
305 dev->data->tx_queues[queue_idx] = txq;
311 * txgbe_free_sc_cluster - free the not-yet-completed scattered cluster
313 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
314 * in the sw_rsc_ring is not set to NULL but rather points to the next
315 * mbuf of this RSC aggregation (that has not been completed yet and still
316 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
317 * will just free first "nb_segs" segments of the cluster explicitly by calling
318 * an rte_pktmbuf_free_seg().
320 * @m scattered cluster head
322 static void __rte_cold
323 txgbe_free_sc_cluster(struct rte_mbuf *m)
325 uint16_t i, nb_segs = m->nb_segs;
326 struct rte_mbuf *next_seg;
328 for (i = 0; i < nb_segs; i++) {
330 rte_pktmbuf_free_seg(m);
335 static void __rte_cold
336 txgbe_rx_queue_release_mbufs(struct txgbe_rx_queue *rxq)
340 if (rxq->sw_ring != NULL) {
341 for (i = 0; i < rxq->nb_rx_desc; i++) {
342 if (rxq->sw_ring[i].mbuf != NULL) {
343 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
344 rxq->sw_ring[i].mbuf = NULL;
347 if (rxq->rx_nb_avail) {
348 for (i = 0; i < rxq->rx_nb_avail; ++i) {
351 mb = rxq->rx_stage[rxq->rx_next_avail + i];
352 rte_pktmbuf_free_seg(mb);
354 rxq->rx_nb_avail = 0;
359 for (i = 0; i < rxq->nb_rx_desc; i++)
360 if (rxq->sw_sc_ring[i].fbuf) {
361 txgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
362 rxq->sw_sc_ring[i].fbuf = NULL;
366 static void __rte_cold
367 txgbe_rx_queue_release(struct txgbe_rx_queue *rxq)
370 txgbe_rx_queue_release_mbufs(rxq);
371 rte_free(rxq->sw_ring);
372 rte_free(rxq->sw_sc_ring);
378 txgbe_dev_rx_queue_release(void *rxq)
380 txgbe_rx_queue_release(rxq);
384 * Check if Rx Burst Bulk Alloc function can be used.
386 * 0: the preconditions are satisfied and the bulk allocation function
388 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
389 * function must be used.
391 static inline int __rte_cold
392 check_rx_burst_bulk_alloc_preconditions(struct txgbe_rx_queue *rxq)
397 * Make sure the following pre-conditions are satisfied:
398 * rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST
399 * rxq->rx_free_thresh < rxq->nb_rx_desc
400 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
401 * Scattered packets are not supported. This should be checked
402 * outside of this function.
404 if (!(rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST)) {
405 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
406 "rxq->rx_free_thresh=%d, "
407 "RTE_PMD_TXGBE_RX_MAX_BURST=%d",
408 rxq->rx_free_thresh, RTE_PMD_TXGBE_RX_MAX_BURST);
410 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
411 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
412 "rxq->rx_free_thresh=%d, "
413 "rxq->nb_rx_desc=%d",
414 rxq->rx_free_thresh, rxq->nb_rx_desc);
416 } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
417 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
418 "rxq->nb_rx_desc=%d, "
419 "rxq->rx_free_thresh=%d",
420 rxq->nb_rx_desc, rxq->rx_free_thresh);
427 /* Reset dynamic txgbe_rx_queue fields back to defaults */
428 static void __rte_cold
429 txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq)
431 static const struct txgbe_rx_desc zeroed_desc = {
432 {{0}, {0} }, {{0}, {0} } };
434 uint16_t len = rxq->nb_rx_desc;
437 * By default, the Rx queue setup function allocates enough memory for
438 * TXGBE_RING_DESC_MAX. The Rx Burst bulk allocation function requires
439 * extra memory at the end of the descriptor ring to be zero'd out.
441 if (adapter->rx_bulk_alloc_allowed)
442 /* zero out extra memory */
443 len += RTE_PMD_TXGBE_RX_MAX_BURST;
446 * Zero out HW ring memory. Zero out extra memory at the end of
447 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
448 * reads extra memory as zeros.
450 for (i = 0; i < len; i++)
451 rxq->rx_ring[i] = zeroed_desc;
454 * initialize extra software ring entries. Space for these extra
455 * entries is always allocated
457 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
458 for (i = rxq->nb_rx_desc; i < len; ++i)
459 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
461 rxq->rx_nb_avail = 0;
462 rxq->rx_next_avail = 0;
463 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
466 rxq->pkt_first_seg = NULL;
467 rxq->pkt_last_seg = NULL;
471 txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
474 unsigned int socket_id,
475 const struct rte_eth_rxconf *rx_conf,
476 struct rte_mempool *mp)
478 const struct rte_memzone *rz;
479 struct txgbe_rx_queue *rxq;
482 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
485 PMD_INIT_FUNC_TRACE();
486 hw = TXGBE_DEV_HW(dev);
488 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
491 * Validate number of receive descriptors.
492 * It must not exceed hardware maximum, and must be multiple
495 if (nb_desc % TXGBE_RXD_ALIGN != 0 ||
496 nb_desc > TXGBE_RING_DESC_MAX ||
497 nb_desc < TXGBE_RING_DESC_MIN) {
501 /* Free memory prior to re-allocation if needed... */
502 if (dev->data->rx_queues[queue_idx] != NULL) {
503 txgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
504 dev->data->rx_queues[queue_idx] = NULL;
507 /* First allocate the rx queue data structure */
508 rxq = rte_zmalloc_socket("ethdev RX queue",
509 sizeof(struct txgbe_rx_queue),
510 RTE_CACHE_LINE_SIZE, socket_id);
514 rxq->nb_rx_desc = nb_desc;
515 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
516 rxq->queue_id = queue_idx;
517 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
518 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
519 rxq->port_id = dev->data->port_id;
520 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
521 rxq->crc_len = RTE_ETHER_CRC_LEN;
524 rxq->drop_en = rx_conf->rx_drop_en;
525 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
526 rxq->offloads = offloads;
529 * The packet type in RX descriptor is different for different NICs.
530 * So set different masks for different NICs.
532 rxq->pkt_type_mask = TXGBE_PTID_MASK;
535 * Allocate RX ring hardware descriptors. A memzone large enough to
536 * handle the maximum ring size is allocated in order to allow for
537 * resizing in later calls to the queue setup function.
539 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
540 RX_RING_SZ, TXGBE_ALIGN, socket_id);
542 txgbe_rx_queue_release(rxq);
547 * Zero init all the descriptors in the ring.
549 memset(rz->addr, 0, RX_RING_SZ);
552 * Modified to setup VFRDT for Virtual Function
554 if (hw->mac.type == txgbe_mac_raptor_vf) {
556 TXGBE_REG_ADDR(hw, TXGBE_RXWP(queue_idx));
558 TXGBE_REG_ADDR(hw, TXGBE_RXRP(queue_idx));
561 TXGBE_REG_ADDR(hw, TXGBE_RXWP(rxq->reg_idx));
563 TXGBE_REG_ADDR(hw, TXGBE_RXRP(rxq->reg_idx));
566 rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
567 rxq->rx_ring = (struct txgbe_rx_desc *)TMZ_VADDR(rz);
570 * Certain constraints must be met in order to use the bulk buffer
571 * allocation Rx burst function. If any of Rx queues doesn't meet them
572 * the feature should be disabled for the whole port.
574 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
575 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
576 "preconditions - canceling the feature for "
577 "the whole port[%d]",
578 rxq->queue_id, rxq->port_id);
579 adapter->rx_bulk_alloc_allowed = false;
583 * Allocate software ring. Allow for space at the end of the
584 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
585 * function does not access an invalid memory region.
588 if (adapter->rx_bulk_alloc_allowed)
589 len += RTE_PMD_TXGBE_RX_MAX_BURST;
591 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
592 sizeof(struct txgbe_rx_entry) * len,
593 RTE_CACHE_LINE_SIZE, socket_id);
595 txgbe_rx_queue_release(rxq);
600 * Always allocate even if it's not going to be needed in order to
603 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
604 * be requested in txgbe_dev_rx_init(), which is called later from
608 rte_zmalloc_socket("rxq->sw_sc_ring",
609 sizeof(struct txgbe_scattered_rx_entry) * len,
610 RTE_CACHE_LINE_SIZE, socket_id);
611 if (!rxq->sw_sc_ring) {
612 txgbe_rx_queue_release(rxq);
616 PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
617 "dma_addr=0x%" PRIx64,
618 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
619 rxq->rx_ring_phys_addr);
621 dev->data->rx_queues[queue_idx] = rxq;
623 txgbe_reset_rx_queue(adapter, rxq);
629 txgbe_dev_free_queues(struct rte_eth_dev *dev)
633 PMD_INIT_FUNC_TRACE();
635 for (i = 0; i < dev->data->nb_rx_queues; i++) {
636 txgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
637 dev->data->rx_queues[i] = NULL;
639 dev->data->nb_rx_queues = 0;
641 for (i = 0; i < dev->data->nb_tx_queues; i++) {
642 txgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
643 dev->data->tx_queues[i] = NULL;
645 dev->data->nb_tx_queues = 0;
649 txgbe_set_rx_function(struct rte_eth_dev *dev)
654 static int __rte_cold
655 txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
657 struct txgbe_rx_entry *rxe = rxq->sw_ring;
661 /* Initialize software ring entries */
662 for (i = 0; i < rxq->nb_rx_desc; i++) {
663 volatile struct txgbe_rx_desc *rxd;
664 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
667 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
668 (unsigned int)rxq->queue_id);
672 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
673 mbuf->port = rxq->port_id;
676 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
677 rxd = &rxq->rx_ring[i];
678 TXGBE_RXD_HDRADDR(rxd, 0);
679 TXGBE_RXD_PKTADDR(rxd, dma_addr);
687 * txgbe_get_rscctl_maxdesc
689 * @pool Memory pool of the Rx queue
691 static inline uint32_t
692 txgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
694 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
697 RTE_IPV4_MAX_PKT_LEN /
698 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
701 return TXGBE_RXCFG_RSCMAX_16;
702 else if (maxdesc >= 8)
703 return TXGBE_RXCFG_RSCMAX_8;
704 else if (maxdesc >= 4)
705 return TXGBE_RXCFG_RSCMAX_4;
707 return TXGBE_RXCFG_RSCMAX_1;
711 * txgbe_set_rsc - configure RSC related port HW registers
713 * Configures the port's RSC related registers.
717 * Returns 0 in case of success or a non-zero error code
720 txgbe_set_rsc(struct rte_eth_dev *dev)
722 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
723 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
724 struct rte_eth_dev_info dev_info = { 0 };
725 bool rsc_capable = false;
731 dev->dev_ops->dev_infos_get(dev, &dev_info);
732 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
735 if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
736 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
741 /* RSC global configuration */
743 if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
744 (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
745 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
750 rfctl = rd32(hw, TXGBE_PSRCTL);
751 if (rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
752 rfctl &= ~TXGBE_PSRCTL_RSCDIA;
754 rfctl |= TXGBE_PSRCTL_RSCDIA;
755 wr32(hw, TXGBE_PSRCTL, rfctl);
757 /* If LRO hasn't been requested - we are done here. */
758 if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
761 /* Set PSRCTL.RSCACK bit */
762 rdrxctl = rd32(hw, TXGBE_PSRCTL);
763 rdrxctl |= TXGBE_PSRCTL_RSCACK;
764 wr32(hw, TXGBE_PSRCTL, rdrxctl);
766 /* Per-queue RSC configuration */
767 for (i = 0; i < dev->data->nb_rx_queues; i++) {
768 struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
770 rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
772 rd32(hw, TXGBE_POOLRSS(rxq->reg_idx));
774 rd32(hw, TXGBE_ITR(rxq->reg_idx));
777 * txgbe PMD doesn't support header-split at the moment.
779 srrctl &= ~TXGBE_RXCFG_HDRLEN_MASK;
780 srrctl |= TXGBE_RXCFG_HDRLEN(128);
783 * TODO: Consider setting the Receive Descriptor Minimum
784 * Threshold Size for an RSC case. This is not an obviously
785 * beneficiary option but the one worth considering...
788 srrctl |= TXGBE_RXCFG_RSCENA;
789 srrctl &= ~TXGBE_RXCFG_RSCMAX_MASK;
790 srrctl |= txgbe_get_rscctl_maxdesc(rxq->mb_pool);
791 psrtype |= TXGBE_POOLRSS_L4HDR;
794 * RSC: Set ITR interval corresponding to 2K ints/s.
796 * Full-sized RSC aggregations for a 10Gb/s link will
797 * arrive at about 20K aggregation/s rate.
799 * 2K inst/s rate will make only 10% of the
800 * aggregations to be closed due to the interrupt timer
801 * expiration for a streaming at wire-speed case.
803 * For a sparse streaming case this setting will yield
804 * at most 500us latency for a single RSC aggregation.
806 eitr &= ~TXGBE_ITR_IVAL_MASK;
807 eitr |= TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
808 eitr |= TXGBE_ITR_WRDSA;
810 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
811 wr32(hw, TXGBE_POOLRSS(rxq->reg_idx), psrtype);
812 wr32(hw, TXGBE_ITR(rxq->reg_idx), eitr);
815 * RSC requires the mapping of the queue to the
818 txgbe_set_ivar_map(hw, 0, rxq->reg_idx, i);
823 PMD_INIT_LOG(DEBUG, "enabling LRO mode");
829 * Initializes Receive Unit.
832 txgbe_dev_rx_init(struct rte_eth_dev *dev)
835 struct txgbe_rx_queue *rxq;
844 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
847 PMD_INIT_FUNC_TRACE();
848 hw = TXGBE_DEV_HW(dev);
851 * Make sure receives are disabled while setting
852 * up the RX context (registers, descriptor rings, etc.).
854 wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, 0);
855 wr32m(hw, TXGBE_PBRXCTL, TXGBE_PBRXCTL_ENA, 0);
857 /* Enable receipt of broadcasted frames */
858 fctrl = rd32(hw, TXGBE_PSRCTL);
859 fctrl |= TXGBE_PSRCTL_BCA;
860 wr32(hw, TXGBE_PSRCTL, fctrl);
863 * Configure CRC stripping, if any.
865 hlreg0 = rd32(hw, TXGBE_SECRXCTL);
866 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
867 hlreg0 &= ~TXGBE_SECRXCTL_CRCSTRIP;
869 hlreg0 |= TXGBE_SECRXCTL_CRCSTRIP;
870 wr32(hw, TXGBE_SECRXCTL, hlreg0);
873 * Configure jumbo frame support, if any.
875 if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
876 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
877 TXGBE_FRMSZ_MAX(rx_conf->max_rx_pkt_len));
879 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
880 TXGBE_FRMSZ_MAX(TXGBE_FRAME_SIZE_DFT));
884 * If loopback mode is configured, set LPBK bit.
886 hlreg0 = rd32(hw, TXGBE_PSRCTL);
887 if (hw->mac.type == txgbe_mac_raptor &&
888 dev->data->dev_conf.lpbk_mode)
889 hlreg0 |= TXGBE_PSRCTL_LBENA;
891 hlreg0 &= ~TXGBE_PSRCTL_LBENA;
893 wr32(hw, TXGBE_PSRCTL, hlreg0);
896 * Assume no header split and no VLAN strip support
897 * on any Rx queue first .
899 rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
901 /* Setup RX queues */
902 for (i = 0; i < dev->data->nb_rx_queues; i++) {
903 rxq = dev->data->rx_queues[i];
906 * Reset crc_len in case it was changed after queue setup by a
909 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
910 rxq->crc_len = RTE_ETHER_CRC_LEN;
914 /* Setup the Base and Length of the Rx Descriptor Rings */
915 bus_addr = rxq->rx_ring_phys_addr;
916 wr32(hw, TXGBE_RXBAL(rxq->reg_idx),
917 (uint32_t)(bus_addr & BIT_MASK32));
918 wr32(hw, TXGBE_RXBAH(rxq->reg_idx),
919 (uint32_t)(bus_addr >> 32));
920 wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
921 wr32(hw, TXGBE_RXWP(rxq->reg_idx), 0);
923 srrctl = TXGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
925 /* Set if packets are dropped when no descriptors available */
927 srrctl |= TXGBE_RXCFG_DROP;
930 * Configure the RX buffer size in the PKTLEN field of
931 * the RXCFG register of the queue.
932 * The value is in 1 KB resolution. Valid values can be from
935 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
936 RTE_PKTMBUF_HEADROOM);
937 buf_size = ROUND_UP(buf_size, 0x1 << 10);
938 srrctl |= TXGBE_RXCFG_PKTLEN(buf_size);
940 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
942 /* It adds dual VLAN length for supporting dual VLAN */
943 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
944 2 * TXGBE_VLAN_TAG_SIZE > buf_size)
945 dev->data->scattered_rx = 1;
946 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
947 rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
950 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
951 dev->data->scattered_rx = 1;
954 * Setup the Checksum Register.
955 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
956 * Enable IP/L4 checksum computation by hardware if requested to do so.
958 rxcsum = rd32(hw, TXGBE_PSRCTL);
959 rxcsum |= TXGBE_PSRCTL_PCSD;
960 if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
961 rxcsum |= TXGBE_PSRCTL_L4CSUM;
963 rxcsum &= ~TXGBE_PSRCTL_L4CSUM;
965 wr32(hw, TXGBE_PSRCTL, rxcsum);
967 if (hw->mac.type == txgbe_mac_raptor) {
968 rdrxctl = rd32(hw, TXGBE_SECRXCTL);
969 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
970 rdrxctl &= ~TXGBE_SECRXCTL_CRCSTRIP;
972 rdrxctl |= TXGBE_SECRXCTL_CRCSTRIP;
973 wr32(hw, TXGBE_SECRXCTL, rdrxctl);
976 rc = txgbe_set_rsc(dev);
980 txgbe_set_rx_function(dev);
986 * Initializes Transmit Unit.
989 txgbe_dev_tx_init(struct rte_eth_dev *dev)
992 struct txgbe_tx_queue *txq;
996 PMD_INIT_FUNC_TRACE();
997 hw = TXGBE_DEV_HW(dev);
999 /* Setup the Base and Length of the Tx Descriptor Rings */
1000 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1001 txq = dev->data->tx_queues[i];
1003 bus_addr = txq->tx_ring_phys_addr;
1004 wr32(hw, TXGBE_TXBAL(txq->reg_idx),
1005 (uint32_t)(bus_addr & BIT_MASK32));
1006 wr32(hw, TXGBE_TXBAH(txq->reg_idx),
1007 (uint32_t)(bus_addr >> 32));
1008 wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_BUFLEN_MASK,
1009 TXGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
1010 /* Setup the HW Tx Head and TX Tail descriptor pointers */
1011 wr32(hw, TXGBE_TXRP(txq->reg_idx), 0);
1012 wr32(hw, TXGBE_TXWP(txq->reg_idx), 0);
1017 txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
1019 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
1020 *(reg++) = rd32(hw, TXGBE_RXBAL(rx_queue_id));
1021 *(reg++) = rd32(hw, TXGBE_RXBAH(rx_queue_id));
1022 *(reg++) = rd32(hw, TXGBE_RXCFG(rx_queue_id));
1026 txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
1028 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
1029 wr32(hw, TXGBE_RXBAL(rx_queue_id), *(reg++));
1030 wr32(hw, TXGBE_RXBAH(rx_queue_id), *(reg++));
1031 wr32(hw, TXGBE_RXCFG(rx_queue_id), *(reg++) & ~TXGBE_RXCFG_ENA);
1035 txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
1037 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
1038 *(reg++) = rd32(hw, TXGBE_TXBAL(tx_queue_id));
1039 *(reg++) = rd32(hw, TXGBE_TXBAH(tx_queue_id));
1040 *(reg++) = rd32(hw, TXGBE_TXCFG(tx_queue_id));
1044 txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
1046 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
1047 wr32(hw, TXGBE_TXBAL(tx_queue_id), *(reg++));
1048 wr32(hw, TXGBE_TXBAH(tx_queue_id), *(reg++));
1049 wr32(hw, TXGBE_TXCFG(tx_queue_id), *(reg++) & ~TXGBE_TXCFG_ENA);
1053 * Start Receive Units for specified queue.
1056 txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1058 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1059 struct txgbe_rx_queue *rxq;
1063 PMD_INIT_FUNC_TRACE();
1065 rxq = dev->data->rx_queues[rx_queue_id];
1067 /* Allocate buffers for descriptor rings */
1068 if (txgbe_alloc_rx_queue_mbufs(rxq) != 0) {
1069 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
1073 rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
1074 rxdctl |= TXGBE_RXCFG_ENA;
1075 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxdctl);
1077 /* Wait until RX Enable ready */
1078 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
1081 rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
1082 } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA));
1084 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
1086 wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
1087 wr32(hw, TXGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
1088 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1094 * Stop Receive Units for specified queue.
1097 txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1099 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1100 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1101 struct txgbe_rx_queue *rxq;
1105 PMD_INIT_FUNC_TRACE();
1107 rxq = dev->data->rx_queues[rx_queue_id];
1109 txgbe_dev_save_rx_queue(hw, rxq->reg_idx);
1110 wr32m(hw, TXGBE_RXCFG(rxq->reg_idx), TXGBE_RXCFG_ENA, 0);
1112 /* Wait until RX Enable bit clear */
1113 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
1116 rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
1117 } while (--poll_ms && (rxdctl & TXGBE_RXCFG_ENA));
1119 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
1121 rte_delay_us(RTE_TXGBE_WAIT_100_US);
1122 txgbe_dev_store_rx_queue(hw, rxq->reg_idx);
1124 txgbe_rx_queue_release_mbufs(rxq);
1125 txgbe_reset_rx_queue(adapter, rxq);
1126 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1132 * Start Transmit Units for specified queue.
1135 txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1137 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1138 struct txgbe_tx_queue *txq;
1142 PMD_INIT_FUNC_TRACE();
1144 txq = dev->data->tx_queues[tx_queue_id];
1145 wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, TXGBE_TXCFG_ENA);
1147 /* Wait until TX Enable ready */
1148 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
1151 txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
1152 } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA));
1154 PMD_INIT_LOG(ERR, "Could not enable "
1155 "Tx Queue %d", tx_queue_id);
1158 wr32(hw, TXGBE_TXWP(txq->reg_idx), txq->tx_tail);
1159 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1165 * Stop Transmit Units for specified queue.
1168 txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1170 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1171 struct txgbe_tx_queue *txq;
1173 uint32_t txtdh, txtdt;
1176 PMD_INIT_FUNC_TRACE();
1178 txq = dev->data->tx_queues[tx_queue_id];
1180 /* Wait until TX queue is empty */
1181 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
1183 rte_delay_us(RTE_TXGBE_WAIT_100_US);
1184 txtdh = rd32(hw, TXGBE_TXRP(txq->reg_idx));
1185 txtdt = rd32(hw, TXGBE_TXWP(txq->reg_idx));
1186 } while (--poll_ms && (txtdh != txtdt));
1189 "Tx Queue %d is not empty when stopping.",
1192 txgbe_dev_save_tx_queue(hw, txq->reg_idx);
1193 wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, 0);
1195 /* Wait until TX Enable bit clear */
1196 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
1199 txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
1200 } while (--poll_ms && (txdctl & TXGBE_TXCFG_ENA));
1202 PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
1205 rte_delay_us(RTE_TXGBE_WAIT_100_US);
1206 txgbe_dev_store_tx_queue(hw, txq->reg_idx);
1208 if (txq->ops != NULL) {
1209 txq->ops->release_mbufs(txq);
1210 txq->ops->reset(txq);
1212 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;