1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
11 #include "qede_ethdev.h"
13 /* Ring Descriptors */
14 #define RX_RING_SIZE_POW 16 /* 64K */
15 #define RX_RING_SIZE (1ULL << RX_RING_SIZE_POW)
16 #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
17 #define NUM_RX_BDS_MIN 128
18 #define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
19 #define NUM_RX_BDS(q) (q->nb_rx_desc - 1)
21 #define TX_RING_SIZE_POW 16 /* 64K */
22 #define TX_RING_SIZE (1ULL << TX_RING_SIZE_POW)
23 #define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
24 #define NUM_TX_BDS_MIN 128
25 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
26 #define NUM_TX_BDS(q) (q->nb_tx_desc - 1)
28 #define TX_CONS(txq) (txq->sw_tx_cons & NUM_TX_BDS(txq))
29 #define TX_PROD(txq) (txq->sw_tx_prod & NUM_TX_BDS(txq))
31 #define QEDE_DEFAULT_TX_FREE_THRESH 32
33 #define QEDE_CSUM_ERROR (1 << 0)
34 #define QEDE_CSUM_UNNECESSARY (1 << 1)
35 #define QEDE_TUNN_CSUM_UNNECESSARY (1 << 2)
37 #define QEDE_BD_SET_ADDR_LEN(bd, maddr, len) \
39 (bd)->addr.hi = rte_cpu_to_le_32(U64_HI(maddr)); \
40 (bd)->addr.lo = rte_cpu_to_le_32(U64_LO(maddr)); \
41 (bd)->nbytes = rte_cpu_to_le_16(len); \
44 #define CQE_HAS_VLAN(flags) \
45 ((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
46 << PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
48 #define CQE_HAS_OUTER_VLAN(flags) \
49 ((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
50 << PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
52 #define QEDE_MIN_RX_BUFF_SIZE (1024)
53 #define QEDE_VLAN_TAG_SIZE (4)
54 #define QEDE_LLC_SNAP_HDR_LEN (8)
56 /* Max supported alignment is 256 (8 shift)
57 * minimal alignment shift 6 is optimal for 57xxx HW performance
59 #define QEDE_L1_CACHE_SHIFT 6
60 #define QEDE_RX_ALIGN_SHIFT (RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
61 #define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT)
62 #define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
63 ~(QEDE_FW_RX_ALIGN_END - 1))
64 #define QEDE_FLOOR_TO_CACHE_LINE_SIZE(n) RTE_ALIGN_FLOOR(n, \
67 /* Note: QEDE_LLC_SNAP_HDR_LEN is optional,
68 * +2 is for padding in front of L2 header
70 #define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) \
71 + (QEDE_LLC_SNAP_HDR_LEN) + 2)
73 #define QEDE_MAX_ETHER_HDR_LEN (RTE_ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
74 #define QEDE_ETH_MAX_LEN (RTE_ETHER_MTU + QEDE_MAX_ETHER_HDR_LEN)
76 #define QEDE_RSS_OFFLOAD_ALL (RTE_ETH_RSS_IPV4 |\
77 RTE_ETH_RSS_NONFRAG_IPV4_TCP |\
78 RTE_ETH_RSS_NONFRAG_IPV4_UDP |\
80 RTE_ETH_RSS_NONFRAG_IPV6_TCP |\
81 RTE_ETH_RSS_NONFRAG_IPV6_UDP |\
85 #define QEDE_RXTX_MAX(qdev) \
86 (RTE_MAX(qdev->num_rx_queues, qdev->num_tx_queues))
88 /* Macros for non-tunnel packet types lkup table */
89 #define QEDE_PKT_TYPE_UNKNOWN 0x0
90 #define QEDE_PKT_TYPE_MAX 0x3f
92 #define QEDE_PKT_TYPE_IPV4 0x1
93 #define QEDE_PKT_TYPE_IPV6 0x2
94 #define QEDE_PKT_TYPE_IPV4_TCP 0x5
95 #define QEDE_PKT_TYPE_IPV6_TCP 0x6
96 #define QEDE_PKT_TYPE_IPV4_UDP 0x9
97 #define QEDE_PKT_TYPE_IPV6_UDP 0xa
99 /* For frag pkts, corresponding IP bits is set */
100 #define QEDE_PKT_TYPE_IPV4_FRAG 0x11
101 #define QEDE_PKT_TYPE_IPV6_FRAG 0x12
103 #define QEDE_PKT_TYPE_IPV4_VLAN 0x21
104 #define QEDE_PKT_TYPE_IPV6_VLAN 0x22
105 #define QEDE_PKT_TYPE_IPV4_TCP_VLAN 0x25
106 #define QEDE_PKT_TYPE_IPV6_TCP_VLAN 0x26
107 #define QEDE_PKT_TYPE_IPV4_UDP_VLAN 0x29
108 #define QEDE_PKT_TYPE_IPV6_UDP_VLAN 0x2a
110 #define QEDE_PKT_TYPE_IPV4_VLAN_FRAG 0x31
111 #define QEDE_PKT_TYPE_IPV6_VLAN_FRAG 0x32
113 /* Macros for tunneled packets with next protocol lkup table */
114 #define QEDE_PKT_TYPE_TUNN_GENEVE 0x1
115 #define QEDE_PKT_TYPE_TUNN_GRE 0x2
116 #define QEDE_PKT_TYPE_TUNN_VXLAN 0x3
118 /* Bit 2 is don't care bit */
119 #define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE 0x9
120 #define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE 0xa
121 #define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN 0xb
123 #define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE 0xd
124 #define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE 0xe
125 #define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN 0xf
128 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE 0x11
129 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE 0x12
130 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN 0x13
132 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE 0x15
133 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE 0x16
134 #define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN 0x17
137 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE 0x19
138 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE 0x1a
139 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN 0x1b
141 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE 0x1d
142 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE 0x1e
143 #define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN 0x1f
145 #define QEDE_PKT_TYPE_TUNN_MAX_TYPE 0x20 /* 2^5 */
147 #define QEDE_TX_CSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM | \
148 RTE_MBUF_F_TX_TCP_CKSUM | \
149 RTE_MBUF_F_TX_UDP_CKSUM | \
150 RTE_MBUF_F_TX_OUTER_IP_CKSUM | \
151 RTE_MBUF_F_TX_TCP_SEG | \
152 RTE_MBUF_F_TX_IPV4 | \
155 #define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
156 RTE_MBUF_F_TX_VLAN | \
157 RTE_MBUF_F_TX_TUNNEL_MASK)
159 #define QEDE_TX_OFFLOAD_NOTSUP_MASK \
160 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
162 /* TPA related structures */
163 struct qede_agg_info {
164 struct rte_mbuf *tpa_head; /* Pointer to first TPA segment */
165 struct rte_mbuf *tpa_tail; /* Pointer to last TPA segment */
169 * Structure associated with each RX queue.
171 struct qede_rx_queue {
172 /* Always keep qdev as first member */
173 struct qede_dev *qdev;
174 struct rte_mempool *mb_pool;
175 struct ecore_chain rx_bd_ring;
176 struct ecore_chain rx_comp_ring;
177 uint16_t *hw_cons_ptr;
178 void OSAL_IOMEM *hw_rxq_prod_addr;
179 struct rte_mbuf **sw_rx_ring;
180 struct ecore_sb_info *sb_info;
186 uint16_t rx_buf_size;
187 uint16_t rx_alloc_count;
191 uint64_t rx_hw_errors;
192 uint64_t rx_alloc_errors;
193 struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
198 struct eth_db_data data;
202 struct qede_tx_queue {
203 /* Always keep qdev as first member */
204 struct qede_dev *qdev;
205 struct ecore_chain tx_pbl;
206 struct rte_mbuf **sw_tx_ring;
208 uint16_t nb_tx_avail;
209 uint16_t tx_free_thresh;
211 uint16_t *hw_cons_ptr;
214 void OSAL_IOMEM *doorbell_addr;
215 volatile union db_prod tx_db;
222 struct qede_fastpath {
223 struct ecore_sb_info *sb_info;
224 struct qede_rx_queue *rxq;
225 struct qede_tx_queue *txq;
228 /* This structure holds the inforation of fast path queues
229 * belonging to individual engines in CMT mode.
231 struct qede_fastpath_cmt {
232 /* Always keep this a first element */
233 struct qede_dev *qdev;
234 /* fastpath info of engine 0 */
235 struct qede_fastpath *fp0;
236 /* fastpath info of engine 1 */
237 struct qede_fastpath *fp1;
241 * RX/TX function prototypes
243 int qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
244 uint16_t nb_desc, unsigned int socket_id,
245 const struct rte_eth_rxconf *rx_conf,
246 struct rte_mempool *mp);
248 int qede_tx_queue_setup(struct rte_eth_dev *dev,
251 unsigned int socket_id,
252 const struct rte_eth_txconf *tx_conf);
254 void qede_rx_queue_release(void *rx_queue);
256 void qede_tx_queue_release(void *tx_queue);
258 uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
260 uint16_t qede_xmit_pkts_cmt(void *p_txq, struct rte_mbuf **tx_pkts,
262 uint16_t qede_xmit_pkts_regular(void *p_txq, struct rte_mbuf **tx_pkts,
265 uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
268 uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
270 uint16_t qede_recv_pkts_cmt(void *p_rxq, struct rte_mbuf **rx_pkts,
273 qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts,
275 uint16_t qede_rxtx_pkts_dummy(void *p_rxq,
276 struct rte_mbuf **pkts,
279 int qede_start_queues(struct rte_eth_dev *eth_dev);
281 void qede_stop_queues(struct rte_eth_dev *eth_dev);
282 int qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
283 uint16_t max_frame_size);
285 qede_rx_descriptor_status(void *rxq, uint16_t offset);
287 /* Fastpath resource alloc/dealloc helpers */
288 int qede_alloc_fp_resc(struct qede_dev *qdev);
290 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev);
292 #endif /* _QEDE_RXTX_H_ */