net/cnxk: add NPC configuration
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #ifndef __CNXK_ETHDEV_H__
5 #define __CNXK_ETHDEV_H__
6
7 #include <math.h>
8 #include <stdint.h>
9
10 #include <ethdev_driver.h>
11 #include <ethdev_pci.h>
12 #include <rte_kvargs.h>
13 #include <rte_mbuf.h>
14 #include <rte_mbuf_pool_ops.h>
15 #include <rte_mempool.h>
16
17 #include "roc_api.h"
18
19 #define CNXK_ETH_DEV_PMD_VERSION "1.0"
20
21 /* Used for struct cnxk_eth_dev::flags */
22 #define CNXK_LINK_CFG_IN_PROGRESS_F BIT_ULL(0)
23
24 /* VLAN tag inserted by NIX_TX_VTAG_ACTION.
25  * In Tx space is always reserved for this in FRS.
26  */
27 #define CNXK_NIX_MAX_VTAG_INS      2
28 #define CNXK_NIX_MAX_VTAG_ACT_SIZE (4 * CNXK_NIX_MAX_VTAG_INS)
29
30 /* ETH_HLEN+ETH_FCS+2*VLAN_HLEN */
31 #define CNXK_NIX_L2_OVERHEAD (RTE_ETHER_HDR_LEN + \
32                               RTE_ETHER_CRC_LEN + \
33                               CNXK_NIX_MAX_VTAG_ACT_SIZE)
34
35 #define CNXK_NIX_RX_MIN_DESC        16
36 #define CNXK_NIX_RX_MIN_DESC_ALIGN  16
37 #define CNXK_NIX_RX_NB_SEG_MAX      6
38 #define CNXK_NIX_RX_DEFAULT_RING_SZ 4096
39 /* Max supported SQB count */
40 #define CNXK_NIX_TX_MAX_SQB 512
41
42 /* If PTP is enabled additional SEND MEM DESC is required which
43  * takes 2 words, hence max 7 iova address are possible
44  */
45 #if defined(RTE_LIBRTE_IEEE1588)
46 #define CNXK_NIX_TX_NB_SEG_MAX 7
47 #else
48 #define CNXK_NIX_TX_NB_SEG_MAX 9
49 #endif
50
51 #define CNXK_NIX_TX_MSEG_SG_DWORDS                                             \
52         ((RTE_ALIGN_MUL_CEIL(CNXK_NIX_TX_NB_SEG_MAX, 3) / 3) +                 \
53          CNXK_NIX_TX_NB_SEG_MAX)
54
55 #define CNXK_NIX_RSS_L3_L4_SRC_DST                                             \
56         (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY |     \
57          ETH_RSS_L4_DST_ONLY)
58
59 #define CNXK_NIX_RSS_OFFLOAD                                                   \
60         (ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP |               \
61          ETH_RSS_SCTP | ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD |                  \
62          CNXK_NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | ETH_RSS_C_VLAN)
63
64 #define CNXK_NIX_TX_OFFLOAD_CAPA                                               \
65         (DEV_TX_OFFLOAD_MBUF_FAST_FREE | DEV_TX_OFFLOAD_MT_LOCKFREE |          \
66          DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT |             \
67          DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |    \
68          DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |                 \
69          DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO |                  \
70          DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO |        \
71          DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_MULTI_SEGS |              \
72          DEV_TX_OFFLOAD_IPV4_CKSUM)
73
74 #define CNXK_NIX_RX_OFFLOAD_CAPA                                               \
75         (DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM |                 \
76          DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER |            \
77          DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |         \
78          DEV_RX_OFFLOAD_RSS_HASH)
79
80 #define RSS_IPV4_ENABLE                                                        \
81         (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP |         \
82          ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_SCTP)
83
84 #define RSS_IPV6_ENABLE                                                        \
85         (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP |         \
86          ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_SCTP)
87
88 #define RSS_IPV6_EX_ENABLE                                                     \
89         (ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX)
90
91 #define RSS_MAX_LEVELS 3
92
93 #define RSS_IPV4_INDEX 0
94 #define RSS_IPV6_INDEX 1
95 #define RSS_TCP_INDEX  2
96 #define RSS_UDP_INDEX  3
97 #define RSS_SCTP_INDEX 4
98 #define RSS_DMAC_INDEX 5
99
100 /* Default mark value used when none is provided. */
101 #define CNXK_FLOW_ACTION_FLAG_DEFAULT 0xffff
102
103 #define PTYPE_NON_TUNNEL_WIDTH    16
104 #define PTYPE_TUNNEL_WIDTH        12
105 #define PTYPE_NON_TUNNEL_ARRAY_SZ BIT(PTYPE_NON_TUNNEL_WIDTH)
106 #define PTYPE_TUNNEL_ARRAY_SZ     BIT(PTYPE_TUNNEL_WIDTH)
107 #define PTYPE_ARRAY_SZ                                                         \
108         ((PTYPE_NON_TUNNEL_ARRAY_SZ + PTYPE_TUNNEL_ARRAY_SZ) * sizeof(uint16_t))
109 /* Fastpath lookup */
110 #define CNXK_NIX_FASTPATH_LOOKUP_MEM "cnxk_nix_fastpath_lookup_mem"
111
112 #define CNXK_NIX_UDP_TUN_BITMASK                                               \
113         ((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) |                               \
114          (1ull << (PKT_TX_TUNNEL_GENEVE >> 45)))
115
116 struct cnxk_fc_cfg {
117         enum rte_eth_fc_mode mode;
118         uint8_t rx_pause;
119         uint8_t tx_pause;
120 };
121
122 struct cnxk_eth_qconf {
123         union {
124                 struct rte_eth_txconf tx;
125                 struct rte_eth_rxconf rx;
126         } conf;
127         struct rte_mempool *mp;
128         uint16_t nb_desc;
129         uint8_t valid;
130 };
131
132 struct cnxk_eth_dev {
133         /* ROC NIX */
134         struct roc_nix nix;
135
136         /* ROC NPC */
137         struct roc_npc npc;
138
139         /* ROC RQs, SQs and CQs */
140         struct roc_nix_rq *rqs;
141         struct roc_nix_sq *sqs;
142         struct roc_nix_cq *cqs;
143
144         /* Configured queue count */
145         uint16_t nb_rxq;
146         uint16_t nb_txq;
147         uint8_t configured;
148
149         /* Max macfilter entries */
150         uint8_t max_mac_entries;
151         bool dmac_filter_enable;
152
153         uint16_t flags;
154         uint8_t ptype_disable;
155         bool scalar_ena;
156
157         /* Pointer back to rte */
158         struct rte_eth_dev *eth_dev;
159
160         /* HW capabilities / Limitations */
161         union {
162                 struct {
163                         uint64_t cq_min_4k : 1;
164                 };
165                 uint64_t hwcap;
166         };
167
168         /* Rx and Tx offload capabilities */
169         uint64_t rx_offload_capa;
170         uint64_t tx_offload_capa;
171         uint32_t speed_capa;
172         /* Configured Rx and Tx offloads */
173         uint64_t rx_offloads;
174         uint64_t tx_offloads;
175         /* Platform specific offload flags */
176         uint16_t rx_offload_flags;
177         uint16_t tx_offload_flags;
178
179         /* ETHDEV RSS HF bitmask */
180         uint64_t ethdev_rss_hf;
181
182         /* Saved qconf before lf realloc */
183         struct cnxk_eth_qconf *tx_qconf;
184         struct cnxk_eth_qconf *rx_qconf;
185
186         /* Flow control configuration */
187         struct cnxk_fc_cfg fc_cfg;
188
189         /* Rx burst for cleanup(Only Primary) */
190         eth_rx_burst_t rx_pkt_burst_no_offload;
191
192         /* Default mac address */
193         uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
194
195         /* LSO Tunnel format indices */
196         uint64_t lso_tun_fmt;
197
198         /* Per queue statistics counters */
199         uint32_t txq_stat_map[RTE_ETHDEV_QUEUE_STAT_CNTRS];
200         uint32_t rxq_stat_map[RTE_ETHDEV_QUEUE_STAT_CNTRS];
201 };
202
203 struct cnxk_eth_rxq_sp {
204         struct cnxk_eth_dev *dev;
205         struct cnxk_eth_qconf qconf;
206         uint16_t qid;
207 } __plt_cache_aligned;
208
209 struct cnxk_eth_txq_sp {
210         struct cnxk_eth_dev *dev;
211         struct cnxk_eth_qconf qconf;
212         uint16_t qid;
213 } __plt_cache_aligned;
214
215 static inline struct cnxk_eth_dev *
216 cnxk_eth_pmd_priv(struct rte_eth_dev *eth_dev)
217 {
218         return eth_dev->data->dev_private;
219 }
220
221 static inline struct cnxk_eth_rxq_sp *
222 cnxk_eth_rxq_to_sp(void *__rxq)
223 {
224         return ((struct cnxk_eth_rxq_sp *)__rxq) - 1;
225 }
226
227 static inline struct cnxk_eth_txq_sp *
228 cnxk_eth_txq_to_sp(void *__txq)
229 {
230         return ((struct cnxk_eth_txq_sp *)__txq) - 1;
231 }
232
233 /* Common ethdev ops */
234 extern struct eth_dev_ops cnxk_eth_dev_ops;
235
236 /* Ops */
237 int cnxk_nix_probe(struct rte_pci_driver *pci_drv,
238                    struct rte_pci_device *pci_dev);
239 int cnxk_nix_remove(struct rte_pci_device *pci_dev);
240 int cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu);
241 int cnxk_nix_mac_addr_add(struct rte_eth_dev *eth_dev,
242                           struct rte_ether_addr *addr, uint32_t index,
243                           uint32_t pool);
244 void cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index);
245 int cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev,
246                           struct rte_ether_addr *addr);
247 int cnxk_nix_promisc_enable(struct rte_eth_dev *eth_dev);
248 int cnxk_nix_promisc_disable(struct rte_eth_dev *eth_dev);
249 int cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev);
250 int cnxk_nix_allmulticast_disable(struct rte_eth_dev *eth_dev);
251 int cnxk_nix_info_get(struct rte_eth_dev *eth_dev,
252                       struct rte_eth_dev_info *dev_info);
253 int cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
254                                struct rte_eth_burst_mode *mode);
255 int cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
256                                struct rte_eth_burst_mode *mode);
257 int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
258                            struct rte_eth_fc_conf *fc_conf);
259 int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
260                            struct rte_eth_fc_conf *fc_conf);
261 int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);
262 int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);
263 int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
264                              struct rte_eth_dev_module_info *modinfo);
265 int cnxk_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
266                                struct rte_dev_eeprom_info *info);
267 int cnxk_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
268                                   uint16_t rx_queue_id);
269 int cnxk_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
270                                    uint16_t rx_queue_id);
271 int cnxk_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool);
272 int cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt);
273
274 int cnxk_nix_configure(struct rte_eth_dev *eth_dev);
275 int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
276                             uint16_t nb_desc, uint16_t fp_tx_q_sz,
277                             const struct rte_eth_txconf *tx_conf);
278 int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
279                             uint16_t nb_desc, uint16_t fp_rx_q_sz,
280                             const struct rte_eth_rxconf *rx_conf,
281                             struct rte_mempool *mp);
282 int cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid);
283 int cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid);
284 int cnxk_nix_dev_start(struct rte_eth_dev *eth_dev);
285
286 uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
287
288 /* RSS */
289 uint32_t cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
290                                 uint8_t rss_level);
291
292 /* Link */
293 void cnxk_nix_toggle_flag_link_cfg(struct cnxk_eth_dev *dev, bool set);
294 void cnxk_eth_dev_link_status_cb(struct roc_nix *nix,
295                                  struct roc_nix_link_info *link);
296 int cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete);
297 int cnxk_nix_queue_stats_mapping(struct rte_eth_dev *dev, uint16_t queue_id,
298                                  uint8_t stat_idx, uint8_t is_rx);
299 int cnxk_nix_stats_reset(struct rte_eth_dev *dev);
300 int cnxk_nix_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
301 int cnxk_nix_xstats_get(struct rte_eth_dev *eth_dev,
302                         struct rte_eth_xstat *xstats, unsigned int n);
303 int cnxk_nix_xstats_get_names(struct rte_eth_dev *eth_dev,
304                               struct rte_eth_xstat_name *xstats_names,
305                               unsigned int limit);
306 int cnxk_nix_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
307                                     struct rte_eth_xstat_name *xstats_names,
308                                     const uint64_t *ids, unsigned int limit);
309 int cnxk_nix_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids,
310                               uint64_t *values, unsigned int n);
311 int cnxk_nix_xstats_reset(struct rte_eth_dev *eth_dev);
312 void cnxk_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
313                            struct rte_eth_rxq_info *qinfo);
314 void cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
315                            struct rte_eth_txq_info *qinfo);
316
317 /* Lookup configuration */
318 const uint32_t *cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev);
319 void *cnxk_nix_fastpath_lookup_mem_get(void);
320
321 /* Devargs */
322 int cnxk_ethdev_parse_devargs(struct rte_devargs *devargs,
323                               struct cnxk_eth_dev *dev);
324
325 /* Inlines */
326 static __rte_always_inline uint64_t
327 cnxk_pktmbuf_detach(struct rte_mbuf *m)
328 {
329         struct rte_mempool *mp = m->pool;
330         uint32_t mbuf_size, buf_len;
331         struct rte_mbuf *md;
332         uint16_t priv_size;
333         uint16_t refcount;
334
335         /* Update refcount of direct mbuf */
336         md = rte_mbuf_from_indirect(m);
337         refcount = rte_mbuf_refcnt_update(md, -1);
338
339         priv_size = rte_pktmbuf_priv_size(mp);
340         mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
341         buf_len = rte_pktmbuf_data_room_size(mp);
342
343         m->priv_size = priv_size;
344         m->buf_addr = (char *)m + mbuf_size;
345         m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
346         m->buf_len = (uint16_t)buf_len;
347         rte_pktmbuf_reset_headroom(m);
348         m->data_len = 0;
349         m->ol_flags = 0;
350         m->next = NULL;
351         m->nb_segs = 1;
352
353         /* Now indirect mbuf is safe to free */
354         rte_pktmbuf_free(m);
355
356         if (refcount == 0) {
357                 rte_mbuf_refcnt_set(md, 1);
358                 md->data_len = 0;
359                 md->ol_flags = 0;
360                 md->next = NULL;
361                 md->nb_segs = 1;
362                 return 0;
363         } else {
364                 return 1;
365         }
366 }
367
368 static __rte_always_inline uint64_t
369 cnxk_nix_prefree_seg(struct rte_mbuf *m)
370 {
371         if (likely(rte_mbuf_refcnt_read(m) == 1)) {
372                 if (!RTE_MBUF_DIRECT(m))
373                         return cnxk_pktmbuf_detach(m);
374
375                 m->next = NULL;
376                 m->nb_segs = 1;
377                 return 0;
378         } else if (rte_mbuf_refcnt_update(m, -1) == 0) {
379                 if (!RTE_MBUF_DIRECT(m))
380                         return cnxk_pktmbuf_detach(m);
381
382                 rte_mbuf_refcnt_set(m, 1);
383                 m->next = NULL;
384                 m->nb_segs = 1;
385                 return 0;
386         }
387
388         /* Mbuf is having refcount more than 1 so need not to be freed */
389         return 1;
390 }
391
392 #endif /* __CNXK_ETHDEV_H__ */