1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_malloc.h>
6 #include <rte_ethdev_driver.h>
9 #include "atl_ethdev.h"
10 #include "atl_hw_regs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
17 #define ATL_TX_CKSUM_OFFLOAD_MASK ( \
22 #define ATL_TX_OFFLOAD_MASK ( \
28 #define ATL_TX_OFFLOAD_NOTSUP_MASK \
29 (PKT_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK)
32 * Structure associated with each descriptor of the RX ring of a RX queue.
35 struct rte_mbuf *mbuf;
39 * Structure associated with each descriptor of the TX ring of a TX queue.
42 struct rte_mbuf *mbuf;
48 * Structure associated with each RX queue.
51 struct rte_mempool *mb_pool;
52 struct hw_atl_rxd_s *hw_ring;
53 uint64_t hw_ring_phys_addr;
54 struct atl_rx_entry *sw_ring;
58 uint16_t rx_free_thresh;
67 * Structure associated with each TX queue.
70 struct hw_atl_txd_s *hw_ring;
71 uint64_t hw_ring_phys_addr;
72 struct atl_tx_entry *sw_ring;
78 uint16_t tx_free_thresh;
83 atl_reset_rx_queue(struct atl_rx_queue *rxq)
85 struct hw_atl_rxd_s *rxd = NULL;
88 PMD_INIT_FUNC_TRACE();
90 for (i = 0; i < rxq->nb_rx_desc; i++) {
91 rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[i];
100 atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
101 uint16_t nb_rx_desc, unsigned int socket_id,
102 const struct rte_eth_rxconf *rx_conf,
103 struct rte_mempool *mb_pool)
105 struct atl_rx_queue *rxq;
106 const struct rte_memzone *mz;
108 PMD_INIT_FUNC_TRACE();
110 /* make sure a valid number of descriptors have been requested */
111 if (nb_rx_desc < AQ_HW_MIN_RX_RING_SIZE ||
112 nb_rx_desc > AQ_HW_MAX_RX_RING_SIZE) {
113 PMD_INIT_LOG(ERR, "Number of Rx descriptors must be "
114 "less than or equal to %d, "
115 "greater than or equal to %d", AQ_HW_MAX_RX_RING_SIZE,
116 AQ_HW_MIN_RX_RING_SIZE);
121 * if this queue existed already, free the associated memory. The
122 * queue cannot be reused in case we need to allocate memory on
123 * different socket than was previously used.
125 if (dev->data->rx_queues[rx_queue_id] != NULL) {
126 atl_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
127 dev->data->rx_queues[rx_queue_id] = NULL;
130 /* allocate memory for the queue structure */
131 rxq = rte_zmalloc_socket("atlantic Rx queue", sizeof(*rxq),
132 RTE_CACHE_LINE_SIZE, socket_id);
134 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
139 rxq->mb_pool = mb_pool;
140 rxq->nb_rx_desc = nb_rx_desc;
141 rxq->port_id = dev->data->port_id;
142 rxq->queue_id = rx_queue_id;
143 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
145 rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads &
146 DEV_RX_OFFLOAD_IPV4_CKSUM;
147 rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads &
148 (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM);
149 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
150 PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload");
152 /* allocate memory for the software ring */
153 rxq->sw_ring = rte_zmalloc_socket("atlantic sw rx ring",
154 nb_rx_desc * sizeof(struct atl_rx_entry),
155 RTE_CACHE_LINE_SIZE, socket_id);
156 if (rxq->sw_ring == NULL) {
158 "Port %d: Cannot allocate software ring for queue %d",
159 rxq->port_id, rxq->queue_id);
165 * allocate memory for the hardware descriptor ring. A memzone large
166 * enough to hold the maximum ring size is requested to allow for
167 * resizing in later calls to the queue setup function.
169 mz = rte_eth_dma_zone_reserve(dev, "rx hw_ring", rx_queue_id,
171 sizeof(struct hw_atl_rxd_s),
175 "Port %d: Cannot allocate hardware ring for queue %d",
176 rxq->port_id, rxq->queue_id);
177 rte_free(rxq->sw_ring);
181 rxq->hw_ring = mz->addr;
182 rxq->hw_ring_phys_addr = mz->iova;
184 atl_reset_rx_queue(rxq);
186 dev->data->rx_queues[rx_queue_id] = rxq;
191 atl_reset_tx_queue(struct atl_tx_queue *txq)
193 struct atl_tx_entry *tx_entry;
194 union hw_atl_txc_s *txc;
197 PMD_INIT_FUNC_TRACE();
200 PMD_DRV_LOG(ERR, "Pointer to txq is NULL");
204 tx_entry = txq->sw_ring;
206 for (i = 0; i < txq->nb_tx_desc; i++) {
207 txc = (union hw_atl_txc_s *)&txq->hw_ring[i];
212 for (i = 0; i < txq->nb_tx_desc; i++) {
213 txq->hw_ring[i].dd = 1;
214 tx_entry[i].mbuf = NULL;
219 txq->tx_free = txq->nb_tx_desc - 1;
223 atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
224 uint16_t nb_tx_desc, unsigned int socket_id,
225 const struct rte_eth_txconf *tx_conf)
227 struct atl_tx_queue *txq;
228 const struct rte_memzone *mz;
230 PMD_INIT_FUNC_TRACE();
232 /* make sure a valid number of descriptors have been requested */
233 if (nb_tx_desc < AQ_HW_MIN_TX_RING_SIZE ||
234 nb_tx_desc > AQ_HW_MAX_TX_RING_SIZE) {
235 PMD_INIT_LOG(ERR, "Number of Tx descriptors must be "
236 "less than or equal to %d, "
237 "greater than or equal to %d", AQ_HW_MAX_TX_RING_SIZE,
238 AQ_HW_MIN_TX_RING_SIZE);
243 * if this queue existed already, free the associated memory. The
244 * queue cannot be reused in case we need to allocate memory on
245 * different socket than was previously used.
247 if (dev->data->tx_queues[tx_queue_id] != NULL) {
248 atl_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
249 dev->data->tx_queues[tx_queue_id] = NULL;
252 /* allocate memory for the queue structure */
253 txq = rte_zmalloc_socket("atlantic Tx queue", sizeof(*txq),
254 RTE_CACHE_LINE_SIZE, socket_id);
256 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
261 txq->nb_tx_desc = nb_tx_desc;
262 txq->port_id = dev->data->port_id;
263 txq->queue_id = tx_queue_id;
264 txq->tx_free_thresh = tx_conf->tx_free_thresh;
267 /* allocate memory for the software ring */
268 txq->sw_ring = rte_zmalloc_socket("atlantic sw tx ring",
269 nb_tx_desc * sizeof(struct atl_tx_entry),
270 RTE_CACHE_LINE_SIZE, socket_id);
271 if (txq->sw_ring == NULL) {
273 "Port %d: Cannot allocate software ring for queue %d",
274 txq->port_id, txq->queue_id);
280 * allocate memory for the hardware descriptor ring. A memzone large
281 * enough to hold the maximum ring size is requested to allow for
282 * resizing in later calls to the queue setup function.
284 mz = rte_eth_dma_zone_reserve(dev, "tx hw_ring", tx_queue_id,
285 HW_ATL_B0_MAX_TXD * sizeof(struct hw_atl_txd_s),
289 "Port %d: Cannot allocate hardware ring for queue %d",
290 txq->port_id, txq->queue_id);
291 rte_free(txq->sw_ring);
295 txq->hw_ring = mz->addr;
296 txq->hw_ring_phys_addr = mz->iova;
298 atl_reset_tx_queue(txq);
300 dev->data->tx_queues[tx_queue_id] = txq;
305 atl_tx_init(struct rte_eth_dev *eth_dev)
307 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
308 struct atl_tx_queue *txq;
309 uint64_t base_addr = 0;
313 PMD_INIT_FUNC_TRACE();
315 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
316 txq = eth_dev->data->tx_queues[i];
317 base_addr = txq->hw_ring_phys_addr;
319 err = hw_atl_b0_hw_ring_tx_init(hw, base_addr,
326 "Port %d: Cannot init TX queue %d",
327 txq->port_id, txq->queue_id);
336 atl_rx_init(struct rte_eth_dev *eth_dev)
338 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
339 struct atl_rx_queue *rxq;
340 uint64_t base_addr = 0;
344 PMD_INIT_FUNC_TRACE();
346 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
347 rxq = eth_dev->data->rx_queues[i];
348 base_addr = rxq->hw_ring_phys_addr;
350 /* Take requested pool mbuf size and adapt
351 * descriptor buffer to best fit
353 int buff_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
354 RTE_PKTMBUF_HEADROOM;
356 buff_size = RTE_ALIGN_FLOOR(buff_size, 1024);
357 if (buff_size > HW_ATL_B0_RXD_BUF_SIZE_MAX) {
358 PMD_INIT_LOG(WARNING,
359 "Port %d queue %d: mem pool buff size is too big\n",
360 rxq->port_id, rxq->queue_id);
361 buff_size = HW_ATL_B0_RXD_BUF_SIZE_MAX;
363 if (buff_size < 1024) {
365 "Port %d queue %d: mem pool buff size is too small\n",
366 rxq->port_id, rxq->queue_id);
369 rxq->buff_size = buff_size;
371 err = hw_atl_b0_hw_ring_rx_init(hw, base_addr, rxq->queue_id,
372 rxq->nb_rx_desc, buff_size, 0,
376 PMD_INIT_LOG(ERR, "Port %d: Cannot init RX queue %d",
377 rxq->port_id, rxq->queue_id);
386 atl_alloc_rx_queue_mbufs(struct atl_rx_queue *rxq)
388 struct atl_rx_entry *rx_entry = rxq->sw_ring;
389 struct hw_atl_rxd_s *rxd;
390 uint64_t dma_addr = 0;
393 PMD_INIT_FUNC_TRACE();
396 for (i = 0; i < rxq->nb_rx_desc; i++) {
397 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
401 "Port %d: mbuf alloc failed for rx queue %d",
402 rxq->port_id, rxq->queue_id);
406 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
407 mbuf->port = rxq->port_id;
409 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
410 rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[i];
411 rxd->buf_addr = dma_addr;
413 rx_entry[i].mbuf = mbuf;
420 atl_rx_queue_release_mbufs(struct atl_rx_queue *rxq)
424 PMD_INIT_FUNC_TRACE();
426 if (rxq->sw_ring != NULL) {
427 for (i = 0; i < rxq->nb_rx_desc; i++) {
428 if (rxq->sw_ring[i].mbuf != NULL) {
429 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
430 rxq->sw_ring[i].mbuf = NULL;
437 atl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
439 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
440 struct atl_rx_queue *rxq = NULL;
442 PMD_INIT_FUNC_TRACE();
444 if (rx_queue_id < dev->data->nb_rx_queues) {
445 rxq = dev->data->rx_queues[rx_queue_id];
447 if (atl_alloc_rx_queue_mbufs(rxq) != 0) {
449 "Port %d: Allocate mbufs for queue %d failed",
450 rxq->port_id, rxq->queue_id);
454 hw_atl_b0_hw_ring_rx_start(hw, rx_queue_id);
457 hw_atl_reg_rx_dma_desc_tail_ptr_set(hw, rxq->nb_rx_desc - 1,
459 dev->data->rx_queue_state[rx_queue_id] =
460 RTE_ETH_QUEUE_STATE_STARTED;
469 atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
471 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
472 struct atl_rx_queue *rxq = NULL;
474 PMD_INIT_FUNC_TRACE();
476 if (rx_queue_id < dev->data->nb_rx_queues) {
477 rxq = dev->data->rx_queues[rx_queue_id];
479 hw_atl_b0_hw_ring_rx_stop(hw, rx_queue_id);
481 atl_rx_queue_release_mbufs(rxq);
482 atl_reset_rx_queue(rxq);
484 dev->data->rx_queue_state[rx_queue_id] =
485 RTE_ETH_QUEUE_STATE_STOPPED;
494 atl_rx_queue_release(void *rx_queue)
496 PMD_INIT_FUNC_TRACE();
498 if (rx_queue != NULL) {
499 struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;
501 atl_rx_queue_release_mbufs(rxq);
502 rte_free(rxq->sw_ring);
508 atl_tx_queue_release_mbufs(struct atl_tx_queue *txq)
512 PMD_INIT_FUNC_TRACE();
514 if (txq->sw_ring != NULL) {
515 for (i = 0; i < txq->nb_tx_desc; i++) {
516 if (txq->sw_ring[i].mbuf != NULL) {
517 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
518 txq->sw_ring[i].mbuf = NULL;
525 atl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
527 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
529 PMD_INIT_FUNC_TRACE();
531 if (tx_queue_id < dev->data->nb_tx_queues) {
532 hw_atl_b0_hw_ring_tx_start(hw, tx_queue_id);
535 hw_atl_b0_hw_tx_ring_tail_update(hw, 0, tx_queue_id);
536 dev->data->tx_queue_state[tx_queue_id] =
537 RTE_ETH_QUEUE_STATE_STARTED;
546 atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
548 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
549 struct atl_tx_queue *txq;
551 PMD_INIT_FUNC_TRACE();
553 txq = dev->data->tx_queues[tx_queue_id];
555 hw_atl_b0_hw_ring_tx_stop(hw, tx_queue_id);
557 atl_tx_queue_release_mbufs(txq);
558 atl_reset_tx_queue(txq);
559 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
565 atl_tx_queue_release(void *tx_queue)
567 PMD_INIT_FUNC_TRACE();
569 if (tx_queue != NULL) {
570 struct atl_tx_queue *txq = (struct atl_tx_queue *)tx_queue;
572 atl_tx_queue_release_mbufs(txq);
573 rte_free(txq->sw_ring);
579 atl_free_queues(struct rte_eth_dev *dev)
583 PMD_INIT_FUNC_TRACE();
585 for (i = 0; i < dev->data->nb_rx_queues; i++) {
586 atl_rx_queue_release(dev->data->rx_queues[i]);
587 dev->data->rx_queues[i] = 0;
589 dev->data->nb_rx_queues = 0;
591 for (i = 0; i < dev->data->nb_tx_queues; i++) {
592 atl_tx_queue_release(dev->data->tx_queues[i]);
593 dev->data->tx_queues[i] = 0;
595 dev->data->nb_tx_queues = 0;
599 atl_start_queues(struct rte_eth_dev *dev)
603 PMD_INIT_FUNC_TRACE();
605 for (i = 0; i < dev->data->nb_tx_queues; i++) {
606 if (atl_tx_queue_start(dev, i) != 0) {
608 "Port %d: Start Tx queue %d failed",
609 dev->data->port_id, i);
614 for (i = 0; i < dev->data->nb_rx_queues; i++) {
615 if (atl_rx_queue_start(dev, i) != 0) {
617 "Port %d: Start Rx queue %d failed",
618 dev->data->port_id, i);
627 atl_stop_queues(struct rte_eth_dev *dev)
631 PMD_INIT_FUNC_TRACE();
633 for (i = 0; i < dev->data->nb_tx_queues; i++) {
634 if (atl_tx_queue_stop(dev, i) != 0) {
636 "Port %d: Stop Tx queue %d failed",
637 dev->data->port_id, i);
642 for (i = 0; i < dev->data->nb_rx_queues; i++) {
643 if (atl_rx_queue_stop(dev, i) != 0) {
645 "Port %d: Stop Rx queue %d failed",
646 dev->data->port_id, i);
655 atl_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
656 struct rte_eth_rxq_info *qinfo)
658 struct atl_rx_queue *rxq;
660 PMD_INIT_FUNC_TRACE();
662 rxq = dev->data->rx_queues[queue_id];
664 qinfo->mp = rxq->mb_pool;
665 qinfo->scattered_rx = dev->data->scattered_rx;
666 qinfo->nb_desc = rxq->nb_rx_desc;
670 atl_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
671 struct rte_eth_txq_info *qinfo)
673 struct atl_tx_queue *txq;
675 PMD_INIT_FUNC_TRACE();
677 txq = dev->data->tx_queues[queue_id];
679 qinfo->nb_desc = txq->nb_tx_desc;
682 /* Return Rx queue avail count */
685 atl_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
687 struct atl_rx_queue *rxq;
689 PMD_INIT_FUNC_TRACE();
691 if (rx_queue_id >= dev->data->nb_rx_queues) {
692 PMD_DRV_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
696 rxq = dev->data->rx_queues[rx_queue_id];
701 return rxq->nb_rx_desc - rxq->nb_rx_hold;
705 atl_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
707 struct atl_rx_queue *rxq = rx_queue;
708 struct hw_atl_rxd_wb_s *rxd;
711 PMD_INIT_FUNC_TRACE();
713 if (unlikely(offset >= rxq->nb_rx_desc))
716 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
717 return RTE_ETH_RX_DESC_UNAVAIL;
719 idx = rxq->rx_tail + offset;
721 if (idx >= rxq->nb_rx_desc)
722 idx -= rxq->nb_rx_desc;
724 rxd = (struct hw_atl_rxd_wb_s *)&rxq->hw_ring[idx];
727 return RTE_ETH_RX_DESC_DONE;
729 return RTE_ETH_RX_DESC_AVAIL;
733 atl_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
735 struct atl_tx_queue *txq = tx_queue;
736 struct hw_atl_txd_s *txd;
739 PMD_INIT_FUNC_TRACE();
741 if (unlikely(offset >= txq->nb_tx_desc))
744 idx = txq->tx_tail + offset;
746 if (idx >= txq->nb_tx_desc)
747 idx -= txq->nb_tx_desc;
749 txd = &txq->hw_ring[idx];
752 return RTE_ETH_TX_DESC_DONE;
754 return RTE_ETH_TX_DESC_FULL;
758 atl_rx_enable_intr(struct rte_eth_dev *dev, uint16_t queue_id, bool enable)
760 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
761 struct atl_rx_queue *rxq;
763 PMD_INIT_FUNC_TRACE();
765 if (queue_id >= dev->data->nb_rx_queues) {
766 PMD_DRV_LOG(ERR, "Invalid RX queue id=%d", queue_id);
770 rxq = dev->data->rx_queues[queue_id];
775 /* Mapping interrupt vector */
776 hw_atl_itr_irq_map_en_rx_set(hw, enable, queue_id);
782 atl_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t queue_id)
784 return atl_rx_enable_intr(eth_dev, queue_id, true);
788 atl_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, uint16_t queue_id)
790 return atl_rx_enable_intr(eth_dev, queue_id, false);
794 atl_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
801 PMD_INIT_FUNC_TRACE();
803 for (i = 0; i < nb_pkts; i++) {
805 ol_flags = m->ol_flags;
807 if (m->nb_segs > AQ_HW_MAX_SEGS_SIZE) {
812 if (ol_flags & ATL_TX_OFFLOAD_NOTSUP_MASK) {
813 rte_errno = -ENOTSUP;
817 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
818 ret = rte_validate_tx_offload(m);
824 ret = rte_net_intel_cksum_prepare(m);
835 atl_desc_to_offload_flags(struct atl_rx_queue *rxq,
836 struct hw_atl_rxd_wb_s *rxd_wb)
838 uint64_t mbuf_flags = 0;
840 PMD_INIT_FUNC_TRACE();
843 if (rxq->l3_csum_enabled && ((rxd_wb->pkt_type & 0x3) == 0)) {
844 /* IPv4 csum error ? */
845 if (rxd_wb->rx_stat & BIT(1))
846 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
848 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
850 mbuf_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
853 /* CSUM calculated ? */
854 if (rxq->l4_csum_enabled && (rxd_wb->rx_stat & BIT(3))) {
855 if (rxd_wb->rx_stat & BIT(2))
856 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
858 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
860 mbuf_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
867 atl_desc_to_pkt_type(struct hw_atl_rxd_wb_s *rxd_wb)
869 uint32_t type = RTE_PTYPE_UNKNOWN;
870 uint16_t l2_l3_type = rxd_wb->pkt_type & 0x3;
871 uint16_t l4_type = (rxd_wb->pkt_type & 0x1C) >> 2;
873 switch (l2_l3_type) {
875 type = RTE_PTYPE_L3_IPV4;
878 type = RTE_PTYPE_L3_IPV6;
881 type = RTE_PTYPE_L2_ETHER;
884 type = RTE_PTYPE_L2_ETHER_ARP;
890 type |= RTE_PTYPE_L4_TCP;
893 type |= RTE_PTYPE_L4_UDP;
896 type |= RTE_PTYPE_L4_SCTP;
899 type |= RTE_PTYPE_L4_ICMP;
903 if (rxd_wb->pkt_type & BIT(5))
904 type |= RTE_PTYPE_L2_ETHER_VLAN;
910 atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
912 struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue;
913 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
914 struct atl_adapter *adapter =
915 ATL_DEV_TO_ADAPTER(&rte_eth_devices[rxq->port_id]);
916 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(adapter);
917 struct atl_rx_entry *sw_ring = rxq->sw_ring;
919 struct rte_mbuf *new_mbuf;
920 struct rte_mbuf *rx_mbuf, *rx_mbuf_prev, *rx_mbuf_first;
921 struct atl_rx_entry *rx_entry;
923 uint16_t nb_hold = 0;
924 struct hw_atl_rxd_wb_s rxd_wb;
925 struct hw_atl_rxd_s *rxd = NULL;
926 uint16_t tail = rxq->rx_tail;
928 uint16_t pkt_len = 0;
930 while (nb_rx < nb_pkts) {
931 uint16_t eop_tail = tail;
933 rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail];
934 rxd_wb = *(struct hw_atl_rxd_wb_s *)rxd;
936 if (!rxd_wb.dd) { /* RxD is not done */
940 PMD_RX_LOG(ERR, "port_id=%u queue_id=%u tail=%u "
941 "eop=0x%x pkt_len=%u hash=0x%x hash_type=0x%x",
942 (unsigned int)rxq->port_id,
943 (unsigned int)rxq->queue_id,
944 (unsigned int)tail, (unsigned int)rxd_wb.eop,
945 (unsigned int)rte_le_to_cpu_16(rxd_wb.pkt_len),
946 rxd_wb.rss_hash, rxd_wb.rss_type);
948 /* RxD is not done */
951 struct hw_atl_rxd_wb_s *eop_rxwbd;
953 eop_tail = (eop_tail + 1) % rxq->nb_rx_desc;
954 eop_rxwbd = (struct hw_atl_rxd_wb_s *)
955 &rxq->hw_ring[eop_tail];
956 if (!eop_rxwbd->dd) {
957 /* no EOP received yet */
961 if (eop_rxwbd->dd && eop_rxwbd->eop)
965 if (eop_tail == tail)
969 rx_mbuf_first = NULL;
971 /* Run through packet segments */
973 new_mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
974 if (new_mbuf == NULL) {
976 "RX mbuf alloc failed port_id=%u "
977 "queue_id=%u", (unsigned int)rxq->port_id,
978 (unsigned int)rxq->queue_id);
979 dev->data->rx_mbuf_alloc_failed++;
980 adapter->sw_stats.rx_nombuf++;
985 rx_entry = &sw_ring[tail];
987 rx_mbuf = rx_entry->mbuf;
988 rx_entry->mbuf = new_mbuf;
989 dma_addr = rte_cpu_to_le_64(
990 rte_mbuf_data_iova_default(new_mbuf));
992 /* setup RX descriptor */
994 rxd->buf_addr = dma_addr;
997 * Initialize the returned mbuf.
998 * 1) setup generic mbuf fields:
999 * - number of segments,
1002 * - RX port identifier.
1003 * 2) integrate hardware offload data, if any:
1004 * < - RSS flag & hash,
1005 * - IP checksum flag,
1006 * - VLAN TCI, if any,
1009 pkt_len = (uint16_t)rte_le_to_cpu_16(rxd_wb.pkt_len);
1010 rx_mbuf->data_off = RTE_PKTMBUF_HEADROOM;
1011 rte_prefetch1((char *)rx_mbuf->buf_addr +
1013 rx_mbuf->nb_segs = 0;
1014 rx_mbuf->next = NULL;
1015 rx_mbuf->pkt_len = pkt_len;
1016 rx_mbuf->data_len = pkt_len;
1018 u16 remainder_len = pkt_len % rxq->buff_size;
1020 remainder_len = rxq->buff_size;
1021 rx_mbuf->data_len = remainder_len;
1023 rx_mbuf->data_len = pkt_len > rxq->buff_size ?
1024 rxq->buff_size : pkt_len;
1026 rx_mbuf->port = rxq->port_id;
1028 rx_mbuf->hash.rss = rxd_wb.rss_hash;
1030 rx_mbuf->vlan_tci = rxd_wb.vlan;
1033 atl_desc_to_offload_flags(rxq, &rxd_wb);
1034 rx_mbuf->packet_type = atl_desc_to_pkt_type(&rxd_wb);
1037 rx_mbuf_first = rx_mbuf;
1038 rx_mbuf_first->nb_segs++;
1041 rx_mbuf_prev->next = rx_mbuf;
1042 rx_mbuf_prev = rx_mbuf;
1044 tail = (tail + 1) % rxq->nb_rx_desc;
1045 /* Prefetch next mbufs */
1046 rte_prefetch0(sw_ring[tail].mbuf);
1047 if ((tail & 0x3) == 0) {
1048 rte_prefetch0(&sw_ring[tail]);
1049 rte_prefetch0(&sw_ring[tail]);
1052 /* filled mbuf_first */
1055 rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail];
1056 rxd_wb = *(struct hw_atl_rxd_wb_s *)rxd;
1060 * Store the mbuf address into the next entry of the array
1061 * of returned packets.
1063 rx_pkts[nb_rx++] = rx_mbuf_first;
1064 adapter->sw_stats.q_ipackets[rxq->queue_id]++;
1065 adapter->sw_stats.q_ibytes[rxq->queue_id] +=
1066 rx_mbuf_first->pkt_len;
1068 PMD_RX_LOG(ERR, "add mbuf segs=%d pkt_len=%d",
1069 rx_mbuf_first->nb_segs,
1070 rx_mbuf_first->pkt_len);
1075 rxq->rx_tail = tail;
1078 * If the number of free RX descriptors is greater than the RX free
1079 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1081 * Update the RDT with the value of the last processed RX descriptor
1082 * minus 1, to guarantee that the RDT register is never equal to the
1083 * RDH register, which creates a "full" ring situtation from the
1084 * hardware point of view...
1086 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1087 if (nb_hold > rxq->rx_free_thresh) {
1088 PMD_RX_LOG(ERR, "port_id=%u queue_id=%u rx_tail=%u "
1089 "nb_hold=%u nb_rx=%u",
1090 (unsigned int)rxq->port_id, (unsigned int)rxq->queue_id,
1091 (unsigned int)tail, (unsigned int)nb_hold,
1092 (unsigned int)nb_rx);
1093 tail = (uint16_t)((tail == 0) ?
1094 (rxq->nb_rx_desc - 1) : (tail - 1));
1096 hw_atl_reg_rx_dma_desc_tail_ptr_set(hw, tail, rxq->queue_id);
1101 rxq->nb_rx_hold = nb_hold;
1107 atl_xmit_cleanup(struct atl_tx_queue *txq)
1109 struct atl_tx_entry *sw_ring;
1110 struct hw_atl_txd_s *txd;
1113 PMD_INIT_FUNC_TRACE();
1116 sw_ring = txq->sw_ring;
1117 int head = txq->tx_head;
1121 for (i = 0, cnt = head; ; i++) {
1122 txd = &txq->hw_ring[cnt];
1127 cnt = (cnt + 1) % txq->nb_tx_desc;
1128 if (cnt == txq->tx_tail)
1136 txd = &txq->hw_ring[head];
1138 struct atl_tx_entry *rx_entry = &sw_ring[head];
1140 if (rx_entry->mbuf) {
1141 rte_pktmbuf_free_seg(rx_entry->mbuf);
1142 rx_entry->mbuf = NULL;
1151 head = (head + 1) % txq->nb_tx_desc;
1155 txq->tx_head = head;
1160 atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc)
1162 uint32_t tx_cmd = 0;
1163 uint64_t ol_flags = tx_pkt->ol_flags;
1165 PMD_INIT_FUNC_TRACE();
1167 if (ol_flags & PKT_TX_TCP_SEG) {
1168 PMD_DRV_LOG(DEBUG, "xmit TSO pkt");
1170 tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
1174 if (ol_flags & PKT_TX_IPV6)
1177 txc->l2_len = tx_pkt->l2_len;
1178 txc->l3_len = tx_pkt->l3_len;
1179 txc->l4_len = tx_pkt->l4_len;
1181 txc->mss_len = tx_pkt->tso_segsz;
1184 if (ol_flags & PKT_TX_VLAN) {
1185 tx_cmd |= tx_desc_cmd_vlan;
1186 txc->vlan_tag = tx_pkt->vlan_tci;
1190 txc->type = tx_desc_type_ctx;
1198 atl_setup_csum_offload(struct rte_mbuf *mbuf, struct hw_atl_txd_s *txd,
1201 txd->cmd |= tx_desc_cmd_fcs;
1202 txd->cmd |= (mbuf->ol_flags & PKT_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
1203 /* L4 csum requested */
1204 txd->cmd |= (mbuf->ol_flags & PKT_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
1209 atl_xmit_pkt(struct aq_hw_s *hw, struct atl_tx_queue *txq,
1210 struct rte_mbuf *tx_pkt)
1212 struct atl_adapter *adapter =
1213 ATL_DEV_TO_ADAPTER(&rte_eth_devices[txq->port_id]);
1214 uint32_t pay_len = 0;
1216 struct atl_tx_entry *tx_entry;
1217 uint64_t buf_dma_addr;
1218 struct rte_mbuf *m_seg;
1219 union hw_atl_txc_s *txc = NULL;
1220 struct hw_atl_txd_s *txd = NULL;
1224 PMD_INIT_FUNC_TRACE();
1226 tail = txq->tx_tail;
1228 txc = (union hw_atl_txc_s *)&txq->hw_ring[tail];
1233 tx_cmd = atl_tso_setup(tx_pkt, txc);
1236 /* We've consumed the first desc, adjust counters */
1237 tail = (tail + 1) % txq->nb_tx_desc;
1238 txq->tx_tail = tail;
1241 txd = &txq->hw_ring[tail];
1244 txd = (struct hw_atl_txd_s *)txc;
1247 txd->ct_en = !!tx_cmd;
1249 txd->type = tx_desc_type_desc;
1251 atl_setup_csum_offload(tx_pkt, txd, tx_cmd);
1256 pay_len = tx_pkt->pkt_len;
1258 txd->pay_len = pay_len;
1260 for (m_seg = tx_pkt; m_seg; m_seg = m_seg->next) {
1261 if (desc_count > 0) {
1262 txd = &txq->hw_ring[tail];
1266 buf_dma_addr = rte_mbuf_data_iova(m_seg);
1267 txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
1269 txd->type = tx_desc_type_desc;
1270 txd->len = m_seg->data_len;
1271 txd->pay_len = pay_len;
1273 /* Store mbuf for freeing later */
1274 tx_entry = &txq->sw_ring[tail];
1277 rte_pktmbuf_free_seg(tx_entry->mbuf);
1278 tx_entry->mbuf = m_seg;
1280 tail = (tail + 1) % txq->nb_tx_desc;
1285 // Last descriptor requires EOP and WB
1287 txd->cmd |= tx_desc_cmd_wb;
1289 hw_atl_b0_hw_tx_ring_tail_update(hw, tail, txq->queue_id);
1291 txq->tx_tail = tail;
1293 txq->tx_free -= desc_count;
1295 adapter->sw_stats.q_opackets[txq->queue_id]++;
1296 adapter->sw_stats.q_obytes[txq->queue_id] += pay_len;
1300 atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1302 struct rte_eth_dev *dev = NULL;
1303 struct aq_hw_s *hw = NULL;
1304 struct atl_tx_queue *txq = tx_queue;
1305 struct rte_mbuf *tx_pkt;
1308 dev = &rte_eth_devices[txq->port_id];
1309 hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1312 "port %d txq %d pkts: %d tx_free=%d tx_tail=%d tx_head=%d",
1313 txq->port_id, txq->queue_id, nb_pkts, txq->tx_free,
1314 txq->tx_tail, txq->tx_head);
1316 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1317 tx_pkt = *tx_pkts++;
1319 /* Clean Tx queue if needed */
1320 if (txq->tx_free < txq->tx_free_thresh)
1321 atl_xmit_cleanup(txq);
1323 /* Check if we have enough free descriptors */
1324 if (txq->tx_free < tx_pkt->nb_segs)
1327 /* check mbuf is valid */
1328 if ((tx_pkt->nb_segs == 0) ||
1329 ((tx_pkt->nb_segs > 1) && (tx_pkt->next == NULL)))
1332 /* Send the packet */
1333 atl_xmit_pkt(hw, txq, tx_pkt);
1336 PMD_TX_LOG(DEBUG, "atl_xmit_pkts %d transmitted", nb_tx);