1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #ifndef __OTX2_ETHDEV_H__
6 #define __OTX2_ETHDEV_H__
10 #include <rte_common.h>
11 #include <rte_ethdev.h>
12 #include <rte_kvargs.h>
14 #include <rte_mempool.h>
15 #include <rte_string_fns.h>
17 #include "otx2_common.h"
20 #include "otx2_mempool.h"
25 #define OTX2_ETH_DEV_PMD_VERSION "1.0"
27 /* Ethdev HWCAP and Fixup flags. Use from MSB bits to avoid conflict with dev */
29 /* Minimum CQ size should be 4K */
30 #define OTX2_FIXUP_F_MIN_4K_Q BIT_ULL(63)
31 #define otx2_ethdev_fixup_is_min_4k_q(dev) \
32 ((dev)->hwcap & OTX2_FIXUP_F_MIN_4K_Q)
33 /* Limit CQ being full */
34 #define OTX2_FIXUP_F_LIMIT_CQ_FULL BIT_ULL(62)
35 #define otx2_ethdev_fixup_is_limit_cq_full(dev) \
36 ((dev)->hwcap & OTX2_FIXUP_F_LIMIT_CQ_FULL)
38 /* Used for struct otx2_eth_dev::flags */
39 #define OTX2_LINK_CFG_IN_PROGRESS_F BIT_ULL(0)
41 /* VLAN tag inserted by NIX_TX_VTAG_ACTION.
42 * In Tx space is always reserved for this in FRS.
44 #define NIX_MAX_VTAG_INS 2
45 #define NIX_MAX_VTAG_ACT_SIZE (4 * NIX_MAX_VTAG_INS)
47 /* ETH_HLEN+ETH_FCS+2*VLAN_HLEN */
48 #define NIX_L2_OVERHEAD \
49 (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 8)
51 /* HW config of frame size doesn't include FCS */
52 #define NIX_MAX_HW_FRS 9212
53 #define NIX_MIN_HW_FRS 60
55 /* Since HW FRS includes NPC VTAG insertion space, user has reduced FRS */
57 (NIX_MAX_HW_FRS + RTE_ETHER_CRC_LEN - NIX_MAX_VTAG_ACT_SIZE)
60 (NIX_MIN_HW_FRS + RTE_ETHER_CRC_LEN)
63 (NIX_MAX_FRS - NIX_L2_OVERHEAD)
65 #define NIX_MAX_SQB 512
66 #define NIX_MIN_SQB 32
67 #define NIX_SQB_LIST_SPACE 2
68 #define NIX_RSS_RETA_SIZE_MAX 256
69 /* Group 0 will be used for RSS, 1 -7 will be used for rte_flow RSS action*/
70 #define NIX_RSS_GRPS 8
71 #define NIX_HASH_KEY_SIZE 48 /* 352 Bits */
72 #define NIX_RSS_RETA_SIZE 64
73 #define NIX_RX_MIN_DESC 16
74 #define NIX_RX_MIN_DESC_ALIGN 16
75 #define NIX_RX_NB_SEG_MAX 6
76 #define NIX_CQ_ENTRY_SZ 128
77 #define NIX_CQ_ALIGN 512
78 #define NIX_SQB_LOWER_THRESH 90
79 #define LMT_SLOT_MASK 0x7f
81 /* If PTP is enabled additional SEND MEM DESC is required which
82 * takes 2 words, hence max 7 iova address are possible
84 #if defined(RTE_LIBRTE_IEEE1588)
85 #define NIX_TX_NB_SEG_MAX 7
87 #define NIX_TX_NB_SEG_MAX 9
90 #define CQ_OP_STAT_OP_ERR 63
91 #define CQ_OP_STAT_CQ_ERR 46
93 #define OP_ERR BIT_ULL(CQ_OP_STAT_OP_ERR)
94 #define CQ_ERR BIT_ULL(CQ_OP_STAT_CQ_ERR)
96 #define NIX_RSS_OFFLOAD (ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP |\
97 ETH_RSS_TCP | ETH_RSS_SCTP | \
98 ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD)
100 #define NIX_TX_OFFLOAD_CAPA ( \
101 DEV_TX_OFFLOAD_MBUF_FAST_FREE | \
102 DEV_TX_OFFLOAD_MT_LOCKFREE | \
103 DEV_TX_OFFLOAD_VLAN_INSERT | \
104 DEV_TX_OFFLOAD_QINQ_INSERT | \
105 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
106 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
107 DEV_TX_OFFLOAD_TCP_CKSUM | \
108 DEV_TX_OFFLOAD_UDP_CKSUM | \
109 DEV_TX_OFFLOAD_SCTP_CKSUM | \
110 DEV_TX_OFFLOAD_MULTI_SEGS | \
111 DEV_TX_OFFLOAD_IPV4_CKSUM)
113 #define NIX_RX_OFFLOAD_CAPA ( \
114 DEV_RX_OFFLOAD_CHECKSUM | \
115 DEV_RX_OFFLOAD_SCTP_CKSUM | \
116 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
117 DEV_RX_OFFLOAD_SCATTER | \
118 DEV_RX_OFFLOAD_JUMBO_FRAME | \
119 DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
120 DEV_RX_OFFLOAD_VLAN_STRIP | \
121 DEV_RX_OFFLOAD_VLAN_FILTER | \
122 DEV_RX_OFFLOAD_QINQ_STRIP | \
123 DEV_RX_OFFLOAD_TIMESTAMP)
125 #define NIX_DEFAULT_RSS_CTX_GROUP 0
126 #define NIX_DEFAULT_RSS_MCAM_IDX -1
129 nix_q_size_16, /* 16 entries */
130 nix_q_size_64, /* 64 entries */
137 nix_q_size_1M, /* Million entries */
142 struct rte_eth_dev *eth_dev;
146 struct otx2_rss_info {
148 uint32_t flowkey_cfg;
151 uint8_t alg_idx; /* Selected algo index */
152 uint16_t ind_tbl[NIX_RSS_RETA_SIZE_MAX];
153 uint8_t key[NIX_HASH_KEY_SIZE];
156 struct otx2_eth_qconf {
158 struct rte_eth_txconf tx;
159 struct rte_eth_rxconf rx;
166 struct otx2_npc_flow_info {
167 uint16_t channel; /*rx channel */
168 uint16_t flow_prealloc_size;
169 uint16_t flow_max_priority;
172 struct otx2_eth_dev {
173 OTX2_DEV; /* Base class */
174 MARKER otx2_eth_dev_data_start;
176 uint16_t rx_chan_base;
177 uint16_t tx_chan_base;
180 uint8_t lso_tsov4_idx;
181 uint8_t lso_tsov6_idx;
182 uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
183 uint8_t max_mac_entries;
190 uint8_t configured_qints;
191 uint8_t configured_nb_rx_qs;
192 uint8_t configured_nb_tx_qs;
193 uint16_t nix_msixoff;
197 uint16_t max_sqb_count;
198 uint16_t rx_offload_flags; /* Selected Rx offload flags(NIX_RX_*_F) */
199 uint64_t rx_offloads;
200 uint16_t tx_offload_flags; /* Selected Tx offload flags(NIX_TX_*_F) */
201 uint64_t tx_offloads;
202 uint64_t rx_offload_capa;
203 uint64_t tx_offload_capa;
204 struct otx2_qint qints_mem[RTE_MAX_QUEUES_PER_PORT];
205 uint16_t txschq[NIX_TXSCH_LVL_CNT];
206 uint16_t txschq_contig[NIX_TXSCH_LVL_CNT];
207 uint16_t txschq_index[NIX_TXSCH_LVL_CNT];
208 uint16_t txschq_contig_index[NIX_TXSCH_LVL_CNT];
209 /* Dis-contiguous queues */
210 uint16_t txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
211 /* Contiguous queues */
212 uint16_t txschq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
213 uint16_t otx2_tm_root_lvl;
215 uint16_t tm_leaf_cnt;
216 struct otx2_nix_tm_node_list node_list;
217 struct otx2_nix_tm_shaper_profile_list shaper_profile_list;
218 struct otx2_rss_info rss_info;
219 uint32_t txmap[RTE_ETHDEV_QUEUE_STAT_CNTRS];
220 uint32_t rxmap[RTE_ETHDEV_QUEUE_STAT_CNTRS];
221 struct otx2_npc_flow_info npc_flow;
222 struct otx2_eth_qconf *tx_qconf;
223 struct otx2_eth_qconf *rx_qconf;
224 struct rte_eth_dev *eth_dev;
225 } __rte_cache_aligned;
227 struct otx2_eth_txq {
229 int64_t fc_cache_pkts;
234 uint16_t sqes_per_sqb_log2;
235 int16_t nb_sqb_bufs_adj;
236 MARKER slow_path_start;
237 uint16_t nb_sqb_bufs;
240 struct otx2_eth_dev *dev;
241 struct rte_mempool *sqb_pool;
242 struct otx2_eth_qconf qconf;
243 } __rte_cache_aligned;
245 struct otx2_eth_rxq {
246 uint64_t mbuf_initializer;
257 struct otx2_timesync_info *tstamp;
258 MARKER slow_path_start;
262 struct rte_mempool *pool;
263 enum nix_q_size_e qsize;
264 struct rte_eth_dev *eth_dev;
265 struct otx2_eth_qconf qconf;
266 } __rte_cache_aligned;
268 static inline struct otx2_eth_dev *
269 otx2_eth_pmd_priv(struct rte_eth_dev *eth_dev)
271 return eth_dev->data->dev_private;
275 void otx2_nix_info_get(struct rte_eth_dev *eth_dev,
276 struct rte_eth_dev_info *dev_info);
277 int otx2_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool);
278 void otx2_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
279 struct rte_eth_rxq_info *qinfo);
280 void otx2_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
281 struct rte_eth_txq_info *qinfo);
282 uint32_t otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t qidx);
283 int otx2_nix_tx_done_cleanup(void *txq, uint32_t free_cnt);
284 int otx2_nix_rx_descriptor_done(void *rxq, uint16_t offset);
285 int otx2_nix_rx_descriptor_status(void *rx_queue, uint16_t offset);
287 void otx2_nix_promisc_config(struct rte_eth_dev *eth_dev, int en);
288 void otx2_nix_promisc_enable(struct rte_eth_dev *eth_dev);
289 void otx2_nix_promisc_disable(struct rte_eth_dev *eth_dev);
290 void otx2_nix_allmulticast_enable(struct rte_eth_dev *eth_dev);
291 void otx2_nix_allmulticast_disable(struct rte_eth_dev *eth_dev);
292 int otx2_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx);
293 int otx2_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx);
294 uint64_t otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id);
297 void otx2_nix_toggle_flag_link_cfg(struct otx2_eth_dev *dev, bool set);
298 int otx2_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete);
299 void otx2_eth_dev_link_status_update(struct otx2_dev *dev,
300 struct cgx_link_user_info *link);
303 int otx2_nix_register_irqs(struct rte_eth_dev *eth_dev);
304 int oxt2_nix_register_queue_irqs(struct rte_eth_dev *eth_dev);
305 void otx2_nix_unregister_irqs(struct rte_eth_dev *eth_dev);
306 void oxt2_nix_unregister_queue_irqs(struct rte_eth_dev *eth_dev);
309 int otx2_nix_reg_dump(struct otx2_eth_dev *dev, uint64_t *data);
310 int otx2_nix_dev_get_reg(struct rte_eth_dev *eth_dev,
311 struct rte_dev_reg_info *regs);
312 int otx2_nix_queues_ctx_dump(struct rte_eth_dev *eth_dev);
313 void otx2_nix_cqe_dump(const struct nix_cqe_hdr_s *cq);
316 int otx2_nix_dev_stats_get(struct rte_eth_dev *eth_dev,
317 struct rte_eth_stats *stats);
318 void otx2_nix_dev_stats_reset(struct rte_eth_dev *eth_dev);
320 int otx2_nix_queue_stats_mapping(struct rte_eth_dev *dev,
321 uint16_t queue_id, uint8_t stat_idx,
323 int otx2_nix_xstats_get(struct rte_eth_dev *eth_dev,
324 struct rte_eth_xstat *xstats, unsigned int n);
325 int otx2_nix_xstats_get_names(struct rte_eth_dev *eth_dev,
326 struct rte_eth_xstat_name *xstats_names,
328 void otx2_nix_xstats_reset(struct rte_eth_dev *eth_dev);
330 int otx2_nix_xstats_get_by_id(struct rte_eth_dev *eth_dev,
332 uint64_t *values, unsigned int n);
333 int otx2_nix_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
334 struct rte_eth_xstat_name *xstats_names,
335 const uint64_t *ids, unsigned int limit);
338 void otx2_nix_rss_set_key(struct otx2_eth_dev *dev,
339 uint8_t *key, uint32_t key_len);
340 uint32_t otx2_rss_ethdev_to_nix(struct otx2_eth_dev *dev,
341 uint64_t ethdev_rss, uint8_t rss_level);
342 int otx2_rss_set_hf(struct otx2_eth_dev *dev,
343 uint32_t flowkey_cfg, uint8_t *alg_idx,
344 uint8_t group, int mcam_index);
345 int otx2_nix_rss_tbl_init(struct otx2_eth_dev *dev, uint8_t group,
347 int otx2_nix_rss_config(struct rte_eth_dev *eth_dev);
349 int otx2_nix_dev_reta_update(struct rte_eth_dev *eth_dev,
350 struct rte_eth_rss_reta_entry64 *reta_conf,
352 int otx2_nix_dev_reta_query(struct rte_eth_dev *eth_dev,
353 struct rte_eth_rss_reta_entry64 *reta_conf,
355 int otx2_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
356 struct rte_eth_rss_conf *rss_conf);
358 int otx2_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
359 struct rte_eth_rss_conf *rss_conf);
362 int otx2_cgx_rxtx_start(struct otx2_eth_dev *dev);
363 int otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev);
364 int otx2_cgx_mac_addr_set(struct rte_eth_dev *eth_dev,
365 struct rte_ether_addr *addr);
367 /* Lookup configuration */
368 void *otx2_nix_fastpath_lookup_mem_get(void);
371 const uint32_t *otx2_nix_supported_ptypes_get(struct rte_eth_dev *dev);
373 /* Mac address handling */
374 int otx2_nix_mac_addr_set(struct rte_eth_dev *eth_dev,
375 struct rte_ether_addr *addr);
376 int otx2_nix_mac_addr_get(struct rte_eth_dev *eth_dev, uint8_t *addr);
377 int otx2_nix_mac_addr_add(struct rte_eth_dev *eth_dev,
378 struct rte_ether_addr *addr,
379 uint32_t index, uint32_t pool);
380 void otx2_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index);
381 int otx2_cgx_mac_max_entries_get(struct otx2_eth_dev *dev);
384 int otx2_ethdev_parse_devargs(struct rte_devargs *devargs,
385 struct otx2_eth_dev *dev);
387 /* Rx and Tx routines */
388 void otx2_nix_form_default_desc(struct otx2_eth_txq *txq);
390 #endif /* __OTX2_ETHDEV_H__ */