1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #ifndef __CNXK_ETHDEV_H__
5 #define __CNXK_ETHDEV_H__
10 #include <ethdev_driver.h>
11 #include <ethdev_pci.h>
12 #include <rte_kvargs.h>
14 #include <rte_mbuf_pool_ops.h>
15 #include <rte_mempool.h>
19 #define CNXK_ETH_DEV_PMD_VERSION "1.0"
21 /* Used for struct cnxk_eth_dev::flags */
22 #define CNXK_LINK_CFG_IN_PROGRESS_F BIT_ULL(0)
24 /* VLAN tag inserted by NIX_TX_VTAG_ACTION.
25 * In Tx space is always reserved for this in FRS.
27 #define CNXK_NIX_MAX_VTAG_INS 2
28 #define CNXK_NIX_MAX_VTAG_ACT_SIZE (4 * CNXK_NIX_MAX_VTAG_INS)
30 /* ETH_HLEN+ETH_FCS+2*VLAN_HLEN */
31 #define CNXK_NIX_L2_OVERHEAD (RTE_ETHER_HDR_LEN + \
33 CNXK_NIX_MAX_VTAG_ACT_SIZE)
35 #define CNXK_NIX_RX_MIN_DESC 16
36 #define CNXK_NIX_RX_MIN_DESC_ALIGN 16
37 #define CNXK_NIX_RX_NB_SEG_MAX 6
38 #define CNXK_NIX_RX_DEFAULT_RING_SZ 4096
39 /* Max supported SQB count */
40 #define CNXK_NIX_TX_MAX_SQB 512
42 /* If PTP is enabled additional SEND MEM DESC is required which
43 * takes 2 words, hence max 7 iova address are possible
45 #if defined(RTE_LIBRTE_IEEE1588)
46 #define CNXK_NIX_TX_NB_SEG_MAX 7
48 #define CNXK_NIX_TX_NB_SEG_MAX 9
51 #define CNXK_NIX_TX_MSEG_SG_DWORDS \
52 ((RTE_ALIGN_MUL_CEIL(CNXK_NIX_TX_NB_SEG_MAX, 3) / 3) + \
53 CNXK_NIX_TX_NB_SEG_MAX)
55 #define CNXK_NIX_RSS_L3_L4_SRC_DST \
56 (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY | \
59 #define CNXK_NIX_RSS_OFFLOAD \
60 (ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP | \
61 ETH_RSS_SCTP | ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD | \
62 CNXK_NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | ETH_RSS_C_VLAN)
64 #define CNXK_NIX_TX_OFFLOAD_CAPA \
65 (DEV_TX_OFFLOAD_MBUF_FAST_FREE | DEV_TX_OFFLOAD_MT_LOCKFREE | \
66 DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT | \
67 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
68 DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | \
69 DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO | \
70 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
71 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_MULTI_SEGS | \
72 DEV_TX_OFFLOAD_IPV4_CKSUM)
74 #define CNXK_NIX_RX_OFFLOAD_CAPA \
75 (DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM | \
76 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER | \
77 DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
78 DEV_RX_OFFLOAD_RSS_HASH)
80 #define RSS_IPV4_ENABLE \
81 (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP | \
82 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_SCTP)
84 #define RSS_IPV6_ENABLE \
85 (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP | \
86 ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_SCTP)
88 #define RSS_IPV6_EX_ENABLE \
89 (ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX)
91 #define RSS_MAX_LEVELS 3
93 #define RSS_IPV4_INDEX 0
94 #define RSS_IPV6_INDEX 1
95 #define RSS_TCP_INDEX 2
96 #define RSS_UDP_INDEX 3
97 #define RSS_SCTP_INDEX 4
98 #define RSS_DMAC_INDEX 5
100 /* Default mark value used when none is provided. */
101 #define CNXK_FLOW_ACTION_FLAG_DEFAULT 0xffff
103 #define CNXK_NIX_TIMESYNC_RX_OFFSET 8
104 #define PTYPE_NON_TUNNEL_WIDTH 16
105 #define PTYPE_TUNNEL_WIDTH 12
106 #define PTYPE_NON_TUNNEL_ARRAY_SZ BIT(PTYPE_NON_TUNNEL_WIDTH)
107 #define PTYPE_TUNNEL_ARRAY_SZ BIT(PTYPE_TUNNEL_WIDTH)
108 #define PTYPE_ARRAY_SZ \
109 ((PTYPE_NON_TUNNEL_ARRAY_SZ + PTYPE_TUNNEL_ARRAY_SZ) * sizeof(uint16_t))
110 /* Fastpath lookup */
111 #define CNXK_NIX_FASTPATH_LOOKUP_MEM "cnxk_nix_fastpath_lookup_mem"
113 #define CNXK_NIX_UDP_TUN_BITMASK \
114 ((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) | \
115 (1ull << (PKT_TX_TUNNEL_GENEVE >> 45)))
118 enum rte_eth_fc_mode mode;
123 struct cnxk_eth_qconf {
125 struct rte_eth_txconf tx;
126 struct rte_eth_rxconf rx;
128 struct rte_mempool *mp;
133 struct cnxk_eth_dev {
140 /* ROC RQs, SQs and CQs */
141 struct roc_nix_rq *rqs;
142 struct roc_nix_sq *sqs;
143 struct roc_nix_cq *cqs;
145 /* Configured queue count */
150 /* Max macfilter entries */
151 uint8_t max_mac_entries;
152 bool dmac_filter_enable;
155 uint8_t ptype_disable;
159 /* Pointer back to rte */
160 struct rte_eth_dev *eth_dev;
162 /* HW capabilities / Limitations */
165 uint64_t cq_min_4k : 1;
170 /* Rx and Tx offload capabilities */
171 uint64_t rx_offload_capa;
172 uint64_t tx_offload_capa;
174 /* Configured Rx and Tx offloads */
175 uint64_t rx_offloads;
176 uint64_t tx_offloads;
177 /* Platform specific offload flags */
178 uint16_t rx_offload_flags;
179 uint16_t tx_offload_flags;
181 /* ETHDEV RSS HF bitmask */
182 uint64_t ethdev_rss_hf;
184 /* Saved qconf before lf realloc */
185 struct cnxk_eth_qconf *tx_qconf;
186 struct cnxk_eth_qconf *rx_qconf;
188 /* Flow control configuration */
189 struct cnxk_fc_cfg fc_cfg;
191 /* Rx burst for cleanup(Only Primary) */
192 eth_rx_burst_t rx_pkt_burst_no_offload;
194 /* Default mac address */
195 uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
197 /* LSO Tunnel format indices */
198 uint64_t lso_tun_fmt;
200 /* Per queue statistics counters */
201 uint32_t txq_stat_map[RTE_ETHDEV_QUEUE_STAT_CNTRS];
202 uint32_t rxq_stat_map[RTE_ETHDEV_QUEUE_STAT_CNTRS];
205 struct cnxk_eth_rxq_sp {
206 struct cnxk_eth_dev *dev;
207 struct cnxk_eth_qconf qconf;
209 } __plt_cache_aligned;
211 struct cnxk_eth_txq_sp {
212 struct cnxk_eth_dev *dev;
213 struct cnxk_eth_qconf qconf;
215 } __plt_cache_aligned;
217 static inline struct cnxk_eth_dev *
218 cnxk_eth_pmd_priv(struct rte_eth_dev *eth_dev)
220 return eth_dev->data->dev_private;
223 static inline struct cnxk_eth_rxq_sp *
224 cnxk_eth_rxq_to_sp(void *__rxq)
226 return ((struct cnxk_eth_rxq_sp *)__rxq) - 1;
229 static inline struct cnxk_eth_txq_sp *
230 cnxk_eth_txq_to_sp(void *__txq)
232 return ((struct cnxk_eth_txq_sp *)__txq) - 1;
235 /* Common ethdev ops */
236 extern struct eth_dev_ops cnxk_eth_dev_ops;
238 /* Common flow ops */
239 extern struct rte_flow_ops cnxk_flow_ops;
242 int cnxk_nix_probe(struct rte_pci_driver *pci_drv,
243 struct rte_pci_device *pci_dev);
244 int cnxk_nix_remove(struct rte_pci_device *pci_dev);
245 int cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu);
246 int cnxk_nix_mac_addr_add(struct rte_eth_dev *eth_dev,
247 struct rte_ether_addr *addr, uint32_t index,
249 void cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index);
250 int cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev,
251 struct rte_ether_addr *addr);
252 int cnxk_nix_promisc_enable(struct rte_eth_dev *eth_dev);
253 int cnxk_nix_promisc_disable(struct rte_eth_dev *eth_dev);
254 int cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev);
255 int cnxk_nix_allmulticast_disable(struct rte_eth_dev *eth_dev);
256 int cnxk_nix_info_get(struct rte_eth_dev *eth_dev,
257 struct rte_eth_dev_info *dev_info);
258 int cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
259 struct rte_eth_burst_mode *mode);
260 int cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
261 struct rte_eth_burst_mode *mode);
262 int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
263 struct rte_eth_fc_conf *fc_conf);
264 int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
265 struct rte_eth_fc_conf *fc_conf);
266 int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);
267 int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);
268 int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
269 struct rte_eth_dev_module_info *modinfo);
270 int cnxk_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
271 struct rte_dev_eeprom_info *info);
272 int cnxk_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
273 uint16_t rx_queue_id);
274 int cnxk_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
275 uint16_t rx_queue_id);
276 int cnxk_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool);
277 int cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt);
278 int cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
279 const struct rte_flow_ops **ops);
280 int cnxk_nix_configure(struct rte_eth_dev *eth_dev);
281 int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
282 uint16_t nb_desc, uint16_t fp_tx_q_sz,
283 const struct rte_eth_txconf *tx_conf);
284 int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
285 uint16_t nb_desc, uint16_t fp_rx_q_sz,
286 const struct rte_eth_rxconf *rx_conf,
287 struct rte_mempool *mp);
288 int cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid);
289 int cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid);
290 int cnxk_nix_dev_start(struct rte_eth_dev *eth_dev);
292 uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
295 uint32_t cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
299 void cnxk_nix_toggle_flag_link_cfg(struct cnxk_eth_dev *dev, bool set);
300 void cnxk_eth_dev_link_status_cb(struct roc_nix *nix,
301 struct roc_nix_link_info *link);
302 int cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete);
303 int cnxk_nix_queue_stats_mapping(struct rte_eth_dev *dev, uint16_t queue_id,
304 uint8_t stat_idx, uint8_t is_rx);
305 int cnxk_nix_stats_reset(struct rte_eth_dev *dev);
306 int cnxk_nix_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
307 int cnxk_nix_xstats_get(struct rte_eth_dev *eth_dev,
308 struct rte_eth_xstat *xstats, unsigned int n);
309 int cnxk_nix_xstats_get_names(struct rte_eth_dev *eth_dev,
310 struct rte_eth_xstat_name *xstats_names,
312 int cnxk_nix_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
313 struct rte_eth_xstat_name *xstats_names,
314 const uint64_t *ids, unsigned int limit);
315 int cnxk_nix_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids,
316 uint64_t *values, unsigned int n);
317 int cnxk_nix_xstats_reset(struct rte_eth_dev *eth_dev);
318 int cnxk_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
320 void cnxk_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
321 struct rte_eth_rxq_info *qinfo);
322 void cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
323 struct rte_eth_txq_info *qinfo);
325 /* Lookup configuration */
326 const uint32_t *cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev);
327 void *cnxk_nix_fastpath_lookup_mem_get(void);
330 int cnxk_ethdev_parse_devargs(struct rte_devargs *devargs,
331 struct cnxk_eth_dev *dev);
334 int cnxk_nix_dev_get_reg(struct rte_eth_dev *eth_dev,
335 struct rte_dev_reg_info *regs);
337 /* Other private functions */
338 int nix_recalc_mtu(struct rte_eth_dev *eth_dev);
341 static __rte_always_inline uint64_t
342 cnxk_pktmbuf_detach(struct rte_mbuf *m)
344 struct rte_mempool *mp = m->pool;
345 uint32_t mbuf_size, buf_len;
350 /* Update refcount of direct mbuf */
351 md = rte_mbuf_from_indirect(m);
352 refcount = rte_mbuf_refcnt_update(md, -1);
354 priv_size = rte_pktmbuf_priv_size(mp);
355 mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
356 buf_len = rte_pktmbuf_data_room_size(mp);
358 m->priv_size = priv_size;
359 m->buf_addr = (char *)m + mbuf_size;
360 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
361 m->buf_len = (uint16_t)buf_len;
362 rte_pktmbuf_reset_headroom(m);
368 /* Now indirect mbuf is safe to free */
372 rte_mbuf_refcnt_set(md, 1);
383 static __rte_always_inline uint64_t
384 cnxk_nix_prefree_seg(struct rte_mbuf *m)
386 if (likely(rte_mbuf_refcnt_read(m) == 1)) {
387 if (!RTE_MBUF_DIRECT(m))
388 return cnxk_pktmbuf_detach(m);
393 } else if (rte_mbuf_refcnt_update(m, -1) == 0) {
394 if (!RTE_MBUF_DIRECT(m))
395 return cnxk_pktmbuf_detach(m);
397 rte_mbuf_refcnt_set(m, 1);
403 /* Mbuf is having refcount more than 1 so need not to be freed */
407 #endif /* __CNXK_ETHDEV_H__ */