1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
9 /* to suppress gcc warnings related to descriptor casting*/
10 #ifdef RTE_TOOLCHAIN_GCC
11 #pragma GCC diagnostic ignored "-Wcast-qual"
14 #ifdef RTE_TOOLCHAIN_CLANG
15 #pragma GCC diagnostic ignored "-Wcast-qual"
18 /* Descriptor related defines */
19 #define AXGBE_MAX_RING_DESC 4096 /*should be power of 2*/
20 #define AXGBE_TX_DESC_MIN_FREE (AXGBE_MAX_RING_DESC >> 3)
21 #define AXGBE_TX_DESC_MAX_PROC (AXGBE_MAX_RING_DESC >> 1)
22 #define AXGBE_MIN_RING_DESC 32
23 #define RTE_AXGBE_DESCS_PER_LOOP 4
24 #define RTE_AXGBE_MAX_RX_BURST 32
26 #define AXGBE_RX_FREE_THRESH 32
27 #define AXGBE_TX_FREE_THRESH 32
29 #define AXGBE_DESC_ALIGN 128
30 #define AXGBE_DESC_OWN 0x80000000
31 #define AXGBE_ERR_STATUS 0x000f0000
32 #define AXGBE_L3_CSUM_ERR 0x00050000
33 #define AXGBE_L4_CSUM_ERR 0x00060000
35 #include "axgbe_common.h"
37 #define AXGBE_GET_DESC_PT(_queue, _idx) \
39 ((_idx) & ((_queue)->nb_desc - 1)))
41 #define AXGBE_GET_DESC_IDX(_queue, _idx) \
42 ((_idx) & ((_queue)->nb_desc - 1)) \
59 struct axgbe_rx_queue {
60 /* membuf pool for rx buffers */
61 struct rte_mempool *mb_pool;
62 /* H/w Rx buffer size configured in DMA */
63 unsigned int buf_size;
66 /* address of s/w rx buffers */
67 struct rte_mbuf **sw_ring;
68 /* Port private data */
69 struct axgbe_port *pdata;
70 /* Number of Rx descriptors in queue */
72 /* max free RX desc to hold */
74 /* Index of descriptor to check for packet availability */
76 /* Index of descriptor to check for buffer reallocation */
78 /* Software Rx descriptor ring*/
79 volatile union axgbe_rx_desc *desc;
80 /* Ring physical address */
81 uint64_t ring_phys_addr;
82 /* Dma Channel register address */
84 /* Dma channel tail register address*/
85 volatile uint32_t *dma_tail_reg;
86 /* DPDK queue index */
94 /* Number of mbufs allocated from pool*/
97 } __rte_cache_aligned;
99 /*Tx descriptor format */
100 struct axgbe_tx_desc {
106 struct axgbe_tx_queue {
107 /* Port private data reference */
108 struct axgbe_port *pdata;
109 /* Number of Tx descriptors in queue*/
111 /* Start freeing TX buffers if there are less free descriptors than
114 uint16_t free_thresh;
115 /* Available descriptors for Tx processing*/
116 uint16_t nb_desc_free;
117 /* Batch of mbufs/descs to release */
118 uint16_t free_batch_cnt;
119 /* Flag for vector support */
120 uint16_t vector_disable;
121 /* Index of descriptor to be used for current transfer */
123 /* Index of descriptor to check for transfer complete */
125 /* Virtual address of ring */
126 volatile struct axgbe_tx_desc *desc;
127 /* Physical address of ring */
128 uint64_t ring_phys_addr;
129 /* Dma channel register space */
131 /* Dma tail register address of ring*/
132 volatile uint32_t *dma_tail_reg;
133 /* Tx queue index/id*/
135 /* Reference to hold Tx mbufs mapped to Tx descriptors freed
136 * after transmission confirmation
138 struct rte_mbuf **sw_ring;
146 } __rte_cache_aligned;
148 /*Queue related APIs */
151 * RX/TX function prototypes
155 void axgbe_dev_tx_queue_release(void *txq);
156 int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
157 uint16_t nb_tx_desc, unsigned int socket_id,
158 const struct rte_eth_txconf *tx_conf);
159 void axgbe_dev_enable_tx(struct rte_eth_dev *dev);
160 void axgbe_dev_disable_tx(struct rte_eth_dev *dev);
161 int axgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
162 int axgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
164 uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
166 uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
170 void axgbe_dev_rx_queue_release(void *rxq);
171 int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
172 uint16_t nb_rx_desc, unsigned int socket_id,
173 const struct rte_eth_rxconf *rx_conf,
174 struct rte_mempool *mb_pool);
175 void axgbe_dev_enable_rx(struct rte_eth_dev *dev);
176 void axgbe_dev_disable_rx(struct rte_eth_dev *dev);
177 int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
178 int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
179 uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
181 uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
182 struct rte_mbuf **rx_pkts,
184 void axgbe_dev_clear_queues(struct rte_eth_dev *dev);
186 #endif /* _AXGBE_RXTX_H_ */