1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_malloc.h>
6 #include <rte_ethdev_driver.h>
9 #include "atl_ethdev.h"
10 #include "atl_hw_regs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
17 #define ATL_TX_CKSUM_OFFLOAD_MASK ( \
22 #define ATL_TX_OFFLOAD_MASK ( \
28 #define ATL_TX_OFFLOAD_NOTSUP_MASK \
29 (PKT_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK)
32 * Structure associated with each descriptor of the RX ring of a RX queue.
35 struct rte_mbuf *mbuf;
39 * Structure associated with each descriptor of the TX ring of a TX queue.
42 struct rte_mbuf *mbuf;
48 * Structure associated with each RX queue.
51 struct rte_mempool *mb_pool;
52 struct hw_atl_rxd_s *hw_ring;
53 uint64_t hw_ring_phys_addr;
54 struct atl_rx_entry *sw_ring;
58 uint16_t rx_free_thresh;
67 * Structure associated with each TX queue.
70 struct hw_atl_txd_s *hw_ring;
71 uint64_t hw_ring_phys_addr;
72 struct atl_tx_entry *sw_ring;
78 uint16_t tx_free_thresh;
83 atl_reset_rx_queue(struct atl_rx_queue *rxq)
85 struct hw_atl_rxd_s *rxd = NULL;
88 PMD_INIT_FUNC_TRACE();
90 for (i = 0; i < rxq->nb_rx_desc; i++) {
91 rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[i];
100 atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
101 uint16_t nb_rx_desc, unsigned int socket_id,
102 const struct rte_eth_rxconf *rx_conf,
103 struct rte_mempool *mb_pool)
105 struct atl_rx_queue *rxq;
106 const struct rte_memzone *mz;
108 PMD_INIT_FUNC_TRACE();
110 /* make sure a valid number of descriptors have been requested */
111 if (nb_rx_desc < AQ_HW_MIN_RX_RING_SIZE ||
112 nb_rx_desc > AQ_HW_MAX_RX_RING_SIZE) {
113 PMD_INIT_LOG(ERR, "Number of Rx descriptors must be "
114 "less than or equal to %d, "
115 "greater than or equal to %d", AQ_HW_MAX_RX_RING_SIZE,
116 AQ_HW_MIN_RX_RING_SIZE);
121 * if this queue existed already, free the associated memory. The
122 * queue cannot be reused in case we need to allocate memory on
123 * different socket than was previously used.
125 if (dev->data->rx_queues[rx_queue_id] != NULL) {
126 atl_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
127 dev->data->rx_queues[rx_queue_id] = NULL;
130 /* allocate memory for the queue structure */
131 rxq = rte_zmalloc_socket("atlantic Rx queue", sizeof(*rxq),
132 RTE_CACHE_LINE_SIZE, socket_id);
134 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
139 rxq->mb_pool = mb_pool;
140 rxq->nb_rx_desc = nb_rx_desc;
141 rxq->port_id = dev->data->port_id;
142 rxq->queue_id = rx_queue_id;
143 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
145 rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &
146 DEV_RX_OFFLOAD_IPV4_CKSUM;
147 rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &
148 (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);
149 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
150 PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload");
152 /* allocate memory for the software ring */
153 rxq->sw_ring = rte_zmalloc_socket("atlantic sw rx ring",
154 nb_rx_desc * sizeof(struct atl_rx_entry),
155 RTE_CACHE_LINE_SIZE, socket_id);
156 if (rxq->sw_ring == NULL) {
158 "Port %d: Cannot allocate software ring for queue %d",
159 rxq->port_id, rxq->queue_id);
165 * allocate memory for the hardware descriptor ring. A memzone large
166 * enough to hold the maximum ring size is requested to allow for
167 * resizing in later calls to the queue setup function.
169 mz = rte_eth_dma_zone_reserve(dev, "rx hw_ring", rx_queue_id,
171 sizeof(struct hw_atl_rxd_s),
175 "Port %d: Cannot allocate hardware ring for queue %d",
176 rxq->port_id, rxq->queue_id);
177 rte_free(rxq->sw_ring);
181 rxq->hw_ring = mz->addr;
182 rxq->hw_ring_phys_addr = mz->iova;
184 atl_reset_rx_queue(rxq);
186 dev->data->rx_queues[rx_queue_id] = rxq;
191 atl_reset_tx_queue(struct atl_tx_queue *txq)
193 struct atl_tx_entry *tx_entry;
194 union hw_atl_txc_s *txc;
197 PMD_INIT_FUNC_TRACE();
200 PMD_DRV_LOG(ERR, "Pointer to txq is NULL");
204 tx_entry = txq->sw_ring;
206 for (i = 0; i < txq->nb_tx_desc; i++) {
207 txc = (union hw_atl_txc_s *)&txq->hw_ring[i];
212 for (i = 0; i < txq->nb_tx_desc; i++) {
213 txq->hw_ring[i].dd = 1;
214 tx_entry[i].mbuf = NULL;
219 txq->tx_free = txq->nb_tx_desc - 1;
223 atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
224 uint16_t nb_tx_desc, unsigned int socket_id,
225 const struct rte_eth_txconf *tx_conf)
227 struct atl_tx_queue *txq;
228 const struct rte_memzone *mz;
230 PMD_INIT_FUNC_TRACE();
232 /* make sure a valid number of descriptors have been requested */
233 if (nb_tx_desc < AQ_HW_MIN_TX_RING_SIZE ||
234 nb_tx_desc > AQ_HW_MAX_TX_RING_SIZE) {
235 PMD_INIT_LOG(ERR, "Number of Tx descriptors must be "
236 "less than or equal to %d, "
237 "greater than or equal to %d", AQ_HW_MAX_TX_RING_SIZE,
238 AQ_HW_MIN_TX_RING_SIZE);
243 * if this queue existed already, free the associated memory. The
244 * queue cannot be reused in case we need to allocate memory on
245 * different socket than was previously used.
247 if (dev->data->tx_queues[tx_queue_id] != NULL) {
248 atl_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
249 dev->data->tx_queues[tx_queue_id] = NULL;
252 /* allocate memory for the queue structure */
253 txq = rte_zmalloc_socket("atlantic Tx queue", sizeof(*txq),
254 RTE_CACHE_LINE_SIZE, socket_id);
256 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
261 txq->nb_tx_desc = nb_tx_desc;
262 txq->port_id = dev->data->port_id;
263 txq->queue_id = tx_queue_id;
264 txq->tx_free_thresh = tx_conf->tx_free_thresh;
267 /* allocate memory for the software ring */
268 txq->sw_ring = rte_zmalloc_socket("atlantic sw tx ring",
269 nb_tx_desc * sizeof(struct atl_tx_entry),
270 RTE_CACHE_LINE_SIZE, socket_id);
271 if (txq->sw_ring == NULL) {
273 "Port %d: Cannot allocate software ring for queue %d",
274 txq->port_id, txq->queue_id);
280 * allocate memory for the hardware descriptor ring. A memzone large
281 * enough to hold the maximum ring size is requested to allow for
282 * resizing in later calls to the queue setup function.
284 mz = rte_eth_dma_zone_reserve(dev, "tx hw_ring", tx_queue_id,
285 HW_ATL_B0_MAX_TXD * sizeof(struct hw_atl_txd_s),
289 "Port %d: Cannot allocate hardware ring for queue %d",
290 txq->port_id, txq->queue_id);
291 rte_free(txq->sw_ring);
295 txq->hw_ring = mz->addr;
296 txq->hw_ring_phys_addr = mz->iova;
298 atl_reset_tx_queue(txq);
300 dev->data->tx_queues[tx_queue_id] = txq;
305 atl_tx_init(struct rte_eth_dev *eth_dev)
307 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
308 struct atl_tx_queue *txq;
309 uint64_t base_addr = 0;
313 PMD_INIT_FUNC_TRACE();
315 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
316 txq = eth_dev->data->tx_queues[i];
317 base_addr = txq->hw_ring_phys_addr;
319 err = hw_atl_b0_hw_ring_tx_init(hw, base_addr,
326 "Port %d: Cannot init TX queue %d",
327 txq->port_id, txq->queue_id);
336 atl_rx_init(struct rte_eth_dev *eth_dev)
338 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
339 struct aq_rss_parameters *rss_params = &hw->aq_nic_cfg->aq_rss;
340 struct atl_rx_queue *rxq;
341 uint64_t base_addr = 0;
345 PMD_INIT_FUNC_TRACE();
347 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
348 rxq = eth_dev->data->rx_queues[i];
349 base_addr = rxq->hw_ring_phys_addr;
351 /* Take requested pool mbuf size and adapt
352 * descriptor buffer to best fit
354 int buff_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
355 RTE_PKTMBUF_HEADROOM;
357 buff_size = RTE_ALIGN_FLOOR(buff_size, 1024);
358 if (buff_size > HW_ATL_B0_RXD_BUF_SIZE_MAX) {
359 PMD_INIT_LOG(WARNING,
360 "Port %d queue %d: mem pool buff size is too big\n",
361 rxq->port_id, rxq->queue_id);
362 buff_size = HW_ATL_B0_RXD_BUF_SIZE_MAX;
364 if (buff_size < 1024) {
366 "Port %d queue %d: mem pool buff size is too small\n",
367 rxq->port_id, rxq->queue_id);
370 rxq->buff_size = buff_size;
372 err = hw_atl_b0_hw_ring_rx_init(hw, base_addr, rxq->queue_id,
373 rxq->nb_rx_desc, buff_size, 0,
377 PMD_INIT_LOG(ERR, "Port %d: Cannot init RX queue %d",
378 rxq->port_id, rxq->queue_id);
383 for (i = rss_params->indirection_table_size; i--;)
384 rss_params->indirection_table[i] = i &
385 (eth_dev->data->nb_rx_queues - 1);
386 hw_atl_b0_hw_rss_set(hw, rss_params);
391 atl_alloc_rx_queue_mbufs(struct atl_rx_queue *rxq)
393 struct atl_rx_entry *rx_entry = rxq->sw_ring;
394 struct hw_atl_rxd_s *rxd;
395 uint64_t dma_addr = 0;
398 PMD_INIT_FUNC_TRACE();
401 for (i = 0; i < rxq->nb_rx_desc; i++) {
402 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
406 "Port %d: mbuf alloc failed for rx queue %d",
407 rxq->port_id, rxq->queue_id);
411 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
412 mbuf->port = rxq->port_id;
414 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
415 rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[i];
416 rxd->buf_addr = dma_addr;
418 rx_entry[i].mbuf = mbuf;
425 atl_rx_queue_release_mbufs(struct atl_rx_queue *rxq)
429 PMD_INIT_FUNC_TRACE();
431 if (rxq->sw_ring != NULL) {
432 for (i = 0; i < rxq->nb_rx_desc; i++) {
433 if (rxq->sw_ring[i].mbuf != NULL) {
434 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
435 rxq->sw_ring[i].mbuf = NULL;
442 atl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
444 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
445 struct atl_rx_queue *rxq = NULL;
447 PMD_INIT_FUNC_TRACE();
449 if (rx_queue_id < dev->data->nb_rx_queues) {
450 rxq = dev->data->rx_queues[rx_queue_id];
452 if (atl_alloc_rx_queue_mbufs(rxq) != 0) {
454 "Port %d: Allocate mbufs for queue %d failed",
455 rxq->port_id, rxq->queue_id);
459 hw_atl_b0_hw_ring_rx_start(hw, rx_queue_id);
462 hw_atl_reg_rx_dma_desc_tail_ptr_set(hw, rxq->nb_rx_desc - 1,
464 dev->data->rx_queue_state[rx_queue_id] =
465 RTE_ETH_QUEUE_STATE_STARTED;
474 atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
476 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
477 struct atl_rx_queue *rxq = NULL;
479 PMD_INIT_FUNC_TRACE();
481 if (rx_queue_id < dev->data->nb_rx_queues) {
482 rxq = dev->data->rx_queues[rx_queue_id];
484 hw_atl_b0_hw_ring_rx_stop(hw, rx_queue_id);
486 atl_rx_queue_release_mbufs(rxq);
487 atl_reset_rx_queue(rxq);
489 dev->data->rx_queue_state[rx_queue_id] =
490 RTE_ETH_QUEUE_STATE_STOPPED;
499 atl_rx_queue_release(void *rx_queue)
501 PMD_INIT_FUNC_TRACE();
503 if (rx_queue != NULL) {
504 struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;
506 atl_rx_queue_release_mbufs(rxq);
507 rte_free(rxq->sw_ring);
513 atl_tx_queue_release_mbufs(struct atl_tx_queue *txq)
517 PMD_INIT_FUNC_TRACE();
519 if (txq->sw_ring != NULL) {
520 for (i = 0; i < txq->nb_tx_desc; i++) {
521 if (txq->sw_ring[i].mbuf != NULL) {
522 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
523 txq->sw_ring[i].mbuf = NULL;
530 atl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
532 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
534 PMD_INIT_FUNC_TRACE();
536 if (tx_queue_id < dev->data->nb_tx_queues) {
537 hw_atl_b0_hw_ring_tx_start(hw, tx_queue_id);
540 hw_atl_b0_hw_tx_ring_tail_update(hw, 0, tx_queue_id);
541 dev->data->tx_queue_state[tx_queue_id] =
542 RTE_ETH_QUEUE_STATE_STARTED;
551 atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
553 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
554 struct atl_tx_queue *txq;
556 PMD_INIT_FUNC_TRACE();
558 txq = dev->data->tx_queues[tx_queue_id];
560 hw_atl_b0_hw_ring_tx_stop(hw, tx_queue_id);
562 atl_tx_queue_release_mbufs(txq);
563 atl_reset_tx_queue(txq);
564 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
570 atl_tx_queue_release(void *tx_queue)
572 PMD_INIT_FUNC_TRACE();
574 if (tx_queue != NULL) {
575 struct atl_tx_queue *txq = (struct atl_tx_queue *)tx_queue;
577 atl_tx_queue_release_mbufs(txq);
578 rte_free(txq->sw_ring);
584 atl_free_queues(struct rte_eth_dev *dev)
588 PMD_INIT_FUNC_TRACE();
590 for (i = 0; i < dev->data->nb_rx_queues; i++) {
591 atl_rx_queue_release(dev->data->rx_queues[i]);
592 dev->data->rx_queues[i] = 0;
594 dev->data->nb_rx_queues = 0;
596 for (i = 0; i < dev->data->nb_tx_queues; i++) {
597 atl_tx_queue_release(dev->data->tx_queues[i]);
598 dev->data->tx_queues[i] = 0;
600 dev->data->nb_tx_queues = 0;
604 atl_start_queues(struct rte_eth_dev *dev)
608 PMD_INIT_FUNC_TRACE();
610 for (i = 0; i < dev->data->nb_tx_queues; i++) {
611 if (atl_tx_queue_start(dev, i) != 0) {
613 "Port %d: Start Tx queue %d failed",
614 dev->data->port_id, i);
619 for (i = 0; i < dev->data->nb_rx_queues; i++) {
620 if (atl_rx_queue_start(dev, i) != 0) {
622 "Port %d: Start Rx queue %d failed",
623 dev->data->port_id, i);
632 atl_stop_queues(struct rte_eth_dev *dev)
636 PMD_INIT_FUNC_TRACE();
638 for (i = 0; i < dev->data->nb_tx_queues; i++) {
639 if (atl_tx_queue_stop(dev, i) != 0) {
641 "Port %d: Stop Tx queue %d failed",
642 dev->data->port_id, i);
647 for (i = 0; i < dev->data->nb_rx_queues; i++) {
648 if (atl_rx_queue_stop(dev, i) != 0) {
650 "Port %d: Stop Rx queue %d failed",
651 dev->data->port_id, i);
660 atl_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
661 struct rte_eth_rxq_info *qinfo)
663 struct atl_rx_queue *rxq;
665 PMD_INIT_FUNC_TRACE();
667 rxq = dev->data->rx_queues[queue_id];
669 qinfo->mp = rxq->mb_pool;
670 qinfo->scattered_rx = dev->data->scattered_rx;
671 qinfo->nb_desc = rxq->nb_rx_desc;
675 atl_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
676 struct rte_eth_txq_info *qinfo)
678 struct atl_tx_queue *txq;
680 PMD_INIT_FUNC_TRACE();
682 txq = dev->data->tx_queues[queue_id];
684 qinfo->nb_desc = txq->nb_tx_desc;
687 /* Return Rx queue avail count */
690 atl_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
692 struct atl_rx_queue *rxq;
694 PMD_INIT_FUNC_TRACE();
696 if (rx_queue_id >= dev->data->nb_rx_queues) {
697 PMD_DRV_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
701 rxq = dev->data->rx_queues[rx_queue_id];
706 return rxq->nb_rx_desc - rxq->nb_rx_hold;
710 atl_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
712 struct atl_rx_queue *rxq = rx_queue;
713 struct hw_atl_rxd_wb_s *rxd;
716 PMD_INIT_FUNC_TRACE();
718 if (unlikely(offset >= rxq->nb_rx_desc))
721 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
722 return RTE_ETH_RX_DESC_UNAVAIL;
724 idx = rxq->rx_tail + offset;
726 if (idx >= rxq->nb_rx_desc)
727 idx -= rxq->nb_rx_desc;
729 rxd = (struct hw_atl_rxd_wb_s *)&rxq->hw_ring[idx];
732 return RTE_ETH_RX_DESC_DONE;
734 return RTE_ETH_RX_DESC_AVAIL;
738 atl_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
740 struct atl_tx_queue *txq = tx_queue;
741 struct hw_atl_txd_s *txd;
744 PMD_INIT_FUNC_TRACE();
746 if (unlikely(offset >= txq->nb_tx_desc))
749 idx = txq->tx_tail + offset;
751 if (idx >= txq->nb_tx_desc)
752 idx -= txq->nb_tx_desc;
754 txd = &txq->hw_ring[idx];
757 return RTE_ETH_TX_DESC_DONE;
759 return RTE_ETH_TX_DESC_FULL;
763 atl_rx_enable_intr(struct rte_eth_dev *dev, uint16_t queue_id, bool enable)
765 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
766 struct atl_rx_queue *rxq;
768 PMD_INIT_FUNC_TRACE();
770 if (queue_id >= dev->data->nb_rx_queues) {
771 PMD_DRV_LOG(ERR, "Invalid RX queue id=%d", queue_id);
775 rxq = dev->data->rx_queues[queue_id];
780 /* Mapping interrupt vector */
781 hw_atl_itr_irq_map_en_rx_set(hw, enable, queue_id);
787 atl_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t queue_id)
789 return atl_rx_enable_intr(eth_dev, queue_id, true);
793 atl_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, uint16_t queue_id)
795 return atl_rx_enable_intr(eth_dev, queue_id, false);
799 atl_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
806 PMD_INIT_FUNC_TRACE();
808 for (i = 0; i < nb_pkts; i++) {
810 ol_flags = m->ol_flags;
812 if (m->nb_segs > AQ_HW_MAX_SEGS_SIZE) {
817 if (ol_flags & ATL_TX_OFFLOAD_NOTSUP_MASK) {
818 rte_errno = -ENOTSUP;
822 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
823 ret = rte_validate_tx_offload(m);
829 ret = rte_net_intel_cksum_prepare(m);
840 atl_desc_to_offload_flags(struct atl_rx_queue *rxq,
841 struct hw_atl_rxd_wb_s *rxd_wb)
843 uint64_t mbuf_flags = 0;
845 PMD_INIT_FUNC_TRACE();
848 if (rxq->l3_csum_enabled && ((rxd_wb->pkt_type & 0x3) == 0)) {
849 /* IPv4 csum error ? */
850 if (rxd_wb->rx_stat & BIT(1))
851 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
853 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
855 mbuf_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
858 /* CSUM calculated ? */
859 if (rxq->l4_csum_enabled && (rxd_wb->rx_stat & BIT(3))) {
860 if (rxd_wb->rx_stat & BIT(2))
861 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
863 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
865 mbuf_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
872 atl_desc_to_pkt_type(struct hw_atl_rxd_wb_s *rxd_wb)
874 uint32_t type = RTE_PTYPE_UNKNOWN;
875 uint16_t l2_l3_type = rxd_wb->pkt_type & 0x3;
876 uint16_t l4_type = (rxd_wb->pkt_type & 0x1C) >> 2;
878 switch (l2_l3_type) {
880 type = RTE_PTYPE_L3_IPV4;
883 type = RTE_PTYPE_L3_IPV6;
886 type = RTE_PTYPE_L2_ETHER;
889 type = RTE_PTYPE_L2_ETHER_ARP;
895 type |= RTE_PTYPE_L4_TCP;
898 type |= RTE_PTYPE_L4_UDP;
901 type |= RTE_PTYPE_L4_SCTP;
904 type |= RTE_PTYPE_L4_ICMP;
908 if (rxd_wb->pkt_type & BIT(5))
909 type |= RTE_PTYPE_L2_ETHER_VLAN;
915 atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
917 struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;
918 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
919 struct atl_adapter *adapter =
920 ATL_DEV_TO_ADAPTER(&rte_eth_devices[rxq->port_id]);
921 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(adapter);
922 struct aq_hw_cfg_s *cfg =
923 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
924 struct atl_rx_entry *sw_ring = rxq->sw_ring;
926 struct rte_mbuf *new_mbuf;
927 struct rte_mbuf *rx_mbuf, *rx_mbuf_prev, *rx_mbuf_first;
928 struct atl_rx_entry *rx_entry;
930 uint16_t nb_hold = 0;
931 struct hw_atl_rxd_wb_s rxd_wb;
932 struct hw_atl_rxd_s *rxd = NULL;
933 uint16_t tail = rxq->rx_tail;
935 uint16_t pkt_len = 0;
937 while (nb_rx < nb_pkts) {
938 uint16_t eop_tail = tail;
940 rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail];
941 rxd_wb = *(struct hw_atl_rxd_wb_s *)rxd;
943 if (!rxd_wb.dd) { /* RxD is not done */
947 PMD_RX_LOG(ERR, "port_id=%u queue_id=%u tail=%u "
948 "eop=0x%x pkt_len=%u hash=0x%x hash_type=0x%x",
949 (unsigned int)rxq->port_id,
950 (unsigned int)rxq->queue_id,
951 (unsigned int)tail, (unsigned int)rxd_wb.eop,
952 (unsigned int)rte_le_to_cpu_16(rxd_wb.pkt_len),
953 rxd_wb.rss_hash, rxd_wb.rss_type);
955 /* RxD is not done */
958 struct hw_atl_rxd_wb_s *eop_rxwbd;
960 eop_tail = (eop_tail + 1) % rxq->nb_rx_desc;
961 eop_rxwbd = (struct hw_atl_rxd_wb_s *)
962 &rxq->hw_ring[eop_tail];
963 if (!eop_rxwbd->dd) {
964 /* no EOP received yet */
968 if (eop_rxwbd->dd && eop_rxwbd->eop)
972 if (eop_tail == tail)
976 rx_mbuf_first = NULL;
978 /* Run through packet segments */
980 new_mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
981 if (new_mbuf == NULL) {
983 "RX mbuf alloc failed port_id=%u "
984 "queue_id=%u", (unsigned int)rxq->port_id,
985 (unsigned int)rxq->queue_id);
986 dev->data->rx_mbuf_alloc_failed++;
987 adapter->sw_stats.rx_nombuf++;
992 rx_entry = &sw_ring[tail];
994 rx_mbuf = rx_entry->mbuf;
995 rx_entry->mbuf = new_mbuf;
996 dma_addr = rte_cpu_to_le_64(
997 rte_mbuf_data_iova_default(new_mbuf));
999 /* setup RX descriptor */
1001 rxd->buf_addr = dma_addr;
1004 * Initialize the returned mbuf.
1005 * 1) setup generic mbuf fields:
1006 * - number of segments,
1009 * - RX port identifier.
1010 * 2) integrate hardware offload data, if any:
1011 * < - RSS flag & hash,
1012 * - IP checksum flag,
1013 * - VLAN TCI, if any,
1016 pkt_len = (uint16_t)rte_le_to_cpu_16(rxd_wb.pkt_len);
1017 rx_mbuf->data_off = RTE_PKTMBUF_HEADROOM;
1018 rte_prefetch1((char *)rx_mbuf->buf_addr +
1020 rx_mbuf->nb_segs = 0;
1021 rx_mbuf->next = NULL;
1022 rx_mbuf->pkt_len = pkt_len;
1023 rx_mbuf->data_len = pkt_len;
1025 u16 remainder_len = pkt_len % rxq->buff_size;
1027 remainder_len = rxq->buff_size;
1028 rx_mbuf->data_len = remainder_len;
1030 rx_mbuf->data_len = pkt_len > rxq->buff_size ?
1031 rxq->buff_size : pkt_len;
1033 rx_mbuf->port = rxq->port_id;
1035 rx_mbuf->hash.rss = rxd_wb.rss_hash;
1037 rx_mbuf->vlan_tci = rxd_wb.vlan;
1040 atl_desc_to_offload_flags(rxq, &rxd_wb);
1042 rx_mbuf->packet_type = atl_desc_to_pkt_type(&rxd_wb);
1044 if (rx_mbuf->packet_type & RTE_PTYPE_L2_ETHER_VLAN) {
1045 rx_mbuf->ol_flags |= PKT_RX_VLAN;
1046 rx_mbuf->vlan_tci = rxd_wb.vlan;
1048 if (cfg->vlan_strip)
1049 rx_mbuf->ol_flags |=
1050 PKT_RX_VLAN_STRIPPED;
1054 rx_mbuf_first = rx_mbuf;
1055 rx_mbuf_first->nb_segs++;
1058 rx_mbuf_prev->next = rx_mbuf;
1059 rx_mbuf_prev = rx_mbuf;
1061 tail = (tail + 1) % rxq->nb_rx_desc;
1062 /* Prefetch next mbufs */
1063 rte_prefetch0(sw_ring[tail].mbuf);
1064 if ((tail & 0x3) == 0) {
1065 rte_prefetch0(&sw_ring[tail]);
1066 rte_prefetch0(&sw_ring[tail]);
1069 /* filled mbuf_first */
1072 rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail];
1073 rxd_wb = *(struct hw_atl_rxd_wb_s *)rxd;
1077 * Store the mbuf address into the next entry of the array
1078 * of returned packets.
1080 rx_pkts[nb_rx++] = rx_mbuf_first;
1081 adapter->sw_stats.q_ipackets[rxq->queue_id]++;
1082 adapter->sw_stats.q_ibytes[rxq->queue_id] +=
1083 rx_mbuf_first->pkt_len;
1085 PMD_RX_LOG(ERR, "add mbuf segs=%d pkt_len=%d",
1086 rx_mbuf_first->nb_segs,
1087 rx_mbuf_first->pkt_len);
1092 rxq->rx_tail = tail;
1095 * If the number of free RX descriptors is greater than the RX free
1096 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1098 * Update the RDT with the value of the last processed RX descriptor
1099 * minus 1, to guarantee that the RDT register is never equal to the
1100 * RDH register, which creates a "full" ring situtation from the
1101 * hardware point of view...
1103 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1104 if (nb_hold > rxq->rx_free_thresh) {
1105 PMD_RX_LOG(ERR, "port_id=%u queue_id=%u rx_tail=%u "
1106 "nb_hold=%u nb_rx=%u",
1107 (unsigned int)rxq->port_id, (unsigned int)rxq->queue_id,
1108 (unsigned int)tail, (unsigned int)nb_hold,
1109 (unsigned int)nb_rx);
1110 tail = (uint16_t)((tail == 0) ?
1111 (rxq->nb_rx_desc - 1) : (tail - 1));
1113 hw_atl_reg_rx_dma_desc_tail_ptr_set(hw, tail, rxq->queue_id);
1118 rxq->nb_rx_hold = nb_hold;
1124 atl_xmit_cleanup(struct atl_tx_queue *txq)
1126 struct atl_tx_entry *sw_ring;
1127 struct hw_atl_txd_s *txd;
1130 PMD_INIT_FUNC_TRACE();
1133 sw_ring = txq->sw_ring;
1134 int head = txq->tx_head;
1138 for (i = 0, cnt = head; ; i++) {
1139 txd = &txq->hw_ring[cnt];
1144 cnt = (cnt + 1) % txq->nb_tx_desc;
1145 if (cnt == txq->tx_tail)
1153 txd = &txq->hw_ring[head];
1155 struct atl_tx_entry *rx_entry = &sw_ring[head];
1157 if (rx_entry->mbuf) {
1158 rte_pktmbuf_free_seg(rx_entry->mbuf);
1159 rx_entry->mbuf = NULL;
1168 head = (head + 1) % txq->nb_tx_desc;
1172 txq->tx_head = head;
1177 atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc)
1179 uint32_t tx_cmd = 0;
1180 uint64_t ol_flags = tx_pkt->ol_flags;
1182 PMD_INIT_FUNC_TRACE();
1184 if (ol_flags & PKT_TX_TCP_SEG) {
1185 PMD_DRV_LOG(DEBUG, "xmit TSO pkt");
1187 tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
1191 if (ol_flags & PKT_TX_IPV6)
1194 txc->l2_len = tx_pkt->l2_len;
1195 txc->l3_len = tx_pkt->l3_len;
1196 txc->l4_len = tx_pkt->l4_len;
1198 txc->mss_len = tx_pkt->tso_segsz;
1201 if (ol_flags & PKT_TX_VLAN) {
1202 tx_cmd |= tx_desc_cmd_vlan;
1203 txc->vlan_tag = tx_pkt->vlan_tci;
1207 txc->type = tx_desc_type_ctx;
1215 atl_setup_csum_offload(struct rte_mbuf *mbuf, struct hw_atl_txd_s *txd,
1218 txd->cmd |= tx_desc_cmd_fcs;
1219 txd->cmd |= (mbuf->ol_flags & PKT_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
1220 /* L4 csum requested */
1221 txd->cmd |= (mbuf->ol_flags & PKT_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
1226 atl_xmit_pkt(struct aq_hw_s *hw, struct atl_tx_queue *txq,
1227 struct rte_mbuf *tx_pkt)
1229 struct atl_adapter *adapter =
1230 ATL_DEV_TO_ADAPTER(&rte_eth_devices[txq->port_id]);
1231 uint32_t pay_len = 0;
1233 struct atl_tx_entry *tx_entry;
1234 uint64_t buf_dma_addr;
1235 struct rte_mbuf *m_seg;
1236 union hw_atl_txc_s *txc = NULL;
1237 struct hw_atl_txd_s *txd = NULL;
1241 PMD_INIT_FUNC_TRACE();
1243 tail = txq->tx_tail;
1245 txc = (union hw_atl_txc_s *)&txq->hw_ring[tail];
1250 tx_cmd = atl_tso_setup(tx_pkt, txc);
1253 /* We've consumed the first desc, adjust counters */
1254 tail = (tail + 1) % txq->nb_tx_desc;
1255 txq->tx_tail = tail;
1258 txd = &txq->hw_ring[tail];
1261 txd = (struct hw_atl_txd_s *)txc;
1264 txd->ct_en = !!tx_cmd;
1266 txd->type = tx_desc_type_desc;
1268 atl_setup_csum_offload(tx_pkt, txd, tx_cmd);
1273 pay_len = tx_pkt->pkt_len;
1275 txd->pay_len = pay_len;
1277 for (m_seg = tx_pkt; m_seg; m_seg = m_seg->next) {
1278 if (desc_count > 0) {
1279 txd = &txq->hw_ring[tail];
1283 buf_dma_addr = rte_mbuf_data_iova(m_seg);
1284 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
1286 txd->type = tx_desc_type_desc;
1287 txd->len = m_seg->data_len;
1288 txd->pay_len = pay_len;
1290 /* Store mbuf for freeing later */
1291 tx_entry = &txq->sw_ring[tail];
1294 rte_pktmbuf_free_seg(tx_entry->mbuf);
1295 tx_entry->mbuf = m_seg;
1297 tail = (tail + 1) % txq->nb_tx_desc;
1302 // Last descriptor requires EOP and WB
1304 txd->cmd |= tx_desc_cmd_wb;
1306 hw_atl_b0_hw_tx_ring_tail_update(hw, tail, txq->queue_id);
1308 txq->tx_tail = tail;
1310 txq->tx_free -= desc_count;
1312 adapter->sw_stats.q_opackets[txq->queue_id]++;
1313 adapter->sw_stats.q_obytes[txq->queue_id] += pay_len;
1317 atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1319 struct rte_eth_dev *dev = NULL;
1320 struct aq_hw_s *hw = NULL;
1321 struct atl_tx_queue *txq = tx_queue;
1322 struct rte_mbuf *tx_pkt;
1325 dev = &rte_eth_devices[txq->port_id];
1326 hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1329 "port %d txq %d pkts: %d tx_free=%d tx_tail=%d tx_head=%d",
1330 txq->port_id, txq->queue_id, nb_pkts, txq->tx_free,
1331 txq->tx_tail, txq->tx_head);
1333 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1334 tx_pkt = *tx_pkts++;
1336 /* Clean Tx queue if needed */
1337 if (txq->tx_free < txq->tx_free_thresh)
1338 atl_xmit_cleanup(txq);
1340 /* Check if we have enough free descriptors */
1341 if (txq->tx_free < tx_pkt->nb_segs)
1344 /* check mbuf is valid */
1345 if ((tx_pkt->nb_segs == 0) ||
1346 ((tx_pkt->nb_segs > 1) && (tx_pkt->next == NULL)))
1349 /* Send the packet */
1350 atl_xmit_pkt(hw, txq, tx_pkt);
1353 PMD_TX_LOG(DEBUG, "atl_xmit_pkts %d transmitted", nb_tx);