1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
9 /* to suppress gcc warnings related to descriptor casting*/
10 #ifdef RTE_TOOLCHAIN_GCC
11 #pragma GCC diagnostic ignored "-Wcast-qual"
14 #ifdef RTE_TOOLCHAIN_CLANG
15 #pragma GCC diagnostic ignored "-Wcast-qual"
18 /* Descriptor related defines */
19 #define AXGBE_MAX_RING_DESC 4096 /*should be power of 2*/
20 #define AXGBE_TX_DESC_MIN_FREE (AXGBE_MAX_RING_DESC >> 3)
21 #define AXGBE_TX_DESC_MAX_PROC (AXGBE_MAX_RING_DESC >> 1)
22 #define AXGBE_MIN_RING_DESC 32
23 #define RTE_AXGBE_DESCS_PER_LOOP 4
24 #define RTE_AXGBE_MAX_RX_BURST 32
26 #define AXGBE_RX_FREE_THRESH 32
27 #define AXGBE_TX_FREE_THRESH 32
29 #define AXGBE_DESC_ALIGN 128
30 #define AXGBE_DESC_OWN 0x80000000
31 #define AXGBE_ERR_STATUS 0x000f0000
32 #define AXGBE_L3_CSUM_ERR 0x00050000
33 #define AXGBE_L4_CSUM_ERR 0x00060000
35 #include "axgbe_common.h"
37 #define AXGBE_GET_DESC_PT(_queue, _idx) \
39 ((_idx) & ((_queue)->nb_desc - 1)))
41 #define AXGBE_GET_DESC_IDX(_queue, _idx) \
42 ((_idx) & ((_queue)->nb_desc - 1)) \
59 struct axgbe_rx_queue {
60 /* membuf pool for rx buffers */
61 struct rte_mempool *mb_pool;
62 /* H/w Rx buffer size configured in DMA */
63 unsigned int buf_size;
66 /* address of s/w rx buffers */
67 struct rte_mbuf **sw_ring;
68 /* Port private data */
69 struct axgbe_port *pdata;
70 /* Number of Rx descriptors in queue */
72 /* max free RX desc to hold */
74 /* Index of descriptor to check for packet availability */
76 /* Index of descriptor to check for buffer reallocation */
78 /* Software Rx descriptor ring*/
79 volatile union axgbe_rx_desc *desc;
80 /* Ring physical address */
81 uint64_t ring_phys_addr;
82 /* Dma Channel register address */
84 /* Dma channel tail register address*/
85 volatile uint32_t *dma_tail_reg;
86 /* DPDK queue index */
94 uint64_t rx_mbuf_alloc_failed;
95 /* Number of mbufs allocated from pool*/
98 } __rte_cache_aligned;
100 /*Tx descriptor format */
101 struct axgbe_tx_desc {
107 struct axgbe_tx_queue {
108 /* Port private data reference */
109 struct axgbe_port *pdata;
110 /* Number of Tx descriptors in queue*/
112 /* Start freeing TX buffers if there are less free descriptors than
115 uint16_t free_thresh;
116 /* Available descriptors for Tx processing*/
117 uint16_t nb_desc_free;
118 /* Batch of mbufs/descs to release */
119 uint16_t free_batch_cnt;
120 /* Flag for vector support */
121 uint16_t vector_disable;
122 /* Index of descriptor to be used for current transfer */
124 /* Index of descriptor to check for transfer complete */
126 /* Virtual address of ring */
127 volatile struct axgbe_tx_desc *desc;
128 /* Physical address of ring */
129 uint64_t ring_phys_addr;
130 /* Dma channel register space */
132 /* Dma tail register address of ring*/
133 volatile uint32_t *dma_tail_reg;
134 /* Tx queue index/id*/
136 /* Reference to hold Tx mbufs mapped to Tx descriptors freed
137 * after transmission confirmation
139 struct rte_mbuf **sw_ring;
147 } __rte_cache_aligned;
149 /*Queue related APIs */
152 * RX/TX function prototypes
156 void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
157 int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
158 uint16_t nb_tx_desc, unsigned int socket_id,
159 const struct rte_eth_txconf *tx_conf);
160 void axgbe_dev_enable_tx(struct rte_eth_dev *dev);
161 void axgbe_dev_disable_tx(struct rte_eth_dev *dev);
162 int axgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
163 int axgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
165 int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev,
166 char *fw_version, size_t fw_size);
168 uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
170 uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
174 void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
175 int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
176 uint16_t nb_rx_desc, unsigned int socket_id,
177 const struct rte_eth_rxconf *rx_conf,
178 struct rte_mempool *mb_pool);
179 void axgbe_dev_enable_rx(struct rte_eth_dev *dev);
180 void axgbe_dev_disable_rx(struct rte_eth_dev *dev);
181 int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
182 int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
183 uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
185 uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue,
186 struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
187 uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
188 struct rte_mbuf **rx_pkts,
190 void axgbe_dev_clear_queues(struct rte_eth_dev *dev);
191 int axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
192 int axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
194 #endif /* _AXGBE_RXTX_H_ */