50c75e1ae7481bb77f92b7b9c7f3c70c4e9cb989
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #ifndef __CNXK_ETHDEV_H__
5 #define __CNXK_ETHDEV_H__
6
7 #include <math.h>
8 #include <stdint.h>
9
10 #include <ethdev_driver.h>
11 #include <ethdev_pci.h>
12 #include <rte_kvargs.h>
13 #include <rte_mbuf.h>
14 #include <rte_mbuf_pool_ops.h>
15 #include <rte_mempool.h>
16
17 #include "roc_api.h"
18
19 #define CNXK_ETH_DEV_PMD_VERSION "1.0"
20
21 /* Used for struct cnxk_eth_dev::flags */
22 #define CNXK_LINK_CFG_IN_PROGRESS_F BIT_ULL(0)
23
24 /* VLAN tag inserted by NIX_TX_VTAG_ACTION.
25  * In Tx space is always reserved for this in FRS.
26  */
27 #define CNXK_NIX_MAX_VTAG_INS      2
28 #define CNXK_NIX_MAX_VTAG_ACT_SIZE (4 * CNXK_NIX_MAX_VTAG_INS)
29
30 /* ETH_HLEN+ETH_FCS+2*VLAN_HLEN */
31 #define CNXK_NIX_L2_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 8)
32
33 #define CNXK_NIX_RX_MIN_DESC        16
34 #define CNXK_NIX_RX_MIN_DESC_ALIGN  16
35 #define CNXK_NIX_RX_NB_SEG_MAX      6
36 #define CNXK_NIX_RX_DEFAULT_RING_SZ 4096
37 /* Max supported SQB count */
38 #define CNXK_NIX_TX_MAX_SQB 512
39
40 /* If PTP is enabled additional SEND MEM DESC is required which
41  * takes 2 words, hence max 7 iova address are possible
42  */
43 #if defined(RTE_LIBRTE_IEEE1588)
44 #define CNXK_NIX_TX_NB_SEG_MAX 7
45 #else
46 #define CNXK_NIX_TX_NB_SEG_MAX 9
47 #endif
48
49 #define CNXK_NIX_TX_MSEG_SG_DWORDS                                             \
50         ((RTE_ALIGN_MUL_CEIL(CNXK_NIX_TX_NB_SEG_MAX, 3) / 3) +                 \
51          CNXK_NIX_TX_NB_SEG_MAX)
52
53 #define CNXK_NIX_RSS_L3_L4_SRC_DST                                             \
54         (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY |     \
55          ETH_RSS_L4_DST_ONLY)
56
57 #define CNXK_NIX_RSS_OFFLOAD                                                   \
58         (ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP |               \
59          ETH_RSS_SCTP | ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD |                  \
60          CNXK_NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | ETH_RSS_C_VLAN)
61
62 #define CNXK_NIX_TX_OFFLOAD_CAPA                                               \
63         (DEV_TX_OFFLOAD_MBUF_FAST_FREE | DEV_TX_OFFLOAD_MT_LOCKFREE |          \
64          DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT |             \
65          DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |    \
66          DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |                 \
67          DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO |                  \
68          DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO |        \
69          DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_MULTI_SEGS |              \
70          DEV_TX_OFFLOAD_IPV4_CKSUM)
71
72 #define CNXK_NIX_RX_OFFLOAD_CAPA                                               \
73         (DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM |                 \
74          DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER |            \
75          DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |         \
76          DEV_RX_OFFLOAD_RSS_HASH)
77
78 #define RSS_IPV4_ENABLE                                                        \
79         (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP |         \
80          ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_SCTP)
81
82 #define RSS_IPV6_ENABLE                                                        \
83         (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP |         \
84          ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_SCTP)
85
86 #define RSS_IPV6_EX_ENABLE                                                     \
87         (ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX)
88
89 #define RSS_MAX_LEVELS 3
90
91 #define RSS_IPV4_INDEX 0
92 #define RSS_IPV6_INDEX 1
93 #define RSS_TCP_INDEX  2
94 #define RSS_UDP_INDEX  3
95 #define RSS_SCTP_INDEX 4
96 #define RSS_DMAC_INDEX 5
97
98 /* Default mark value used when none is provided. */
99 #define CNXK_FLOW_ACTION_FLAG_DEFAULT 0xffff
100
101 #define PTYPE_NON_TUNNEL_WIDTH    16
102 #define PTYPE_TUNNEL_WIDTH        12
103 #define PTYPE_NON_TUNNEL_ARRAY_SZ BIT(PTYPE_NON_TUNNEL_WIDTH)
104 #define PTYPE_TUNNEL_ARRAY_SZ     BIT(PTYPE_TUNNEL_WIDTH)
105 #define PTYPE_ARRAY_SZ                                                         \
106         ((PTYPE_NON_TUNNEL_ARRAY_SZ + PTYPE_TUNNEL_ARRAY_SZ) * sizeof(uint16_t))
107 /* Fastpath lookup */
108 #define CNXK_NIX_FASTPATH_LOOKUP_MEM "cnxk_nix_fastpath_lookup_mem"
109
110 #define CNXK_NIX_UDP_TUN_BITMASK                                               \
111         ((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) |                               \
112          (1ull << (PKT_TX_TUNNEL_GENEVE >> 45)))
113
114 struct cnxk_eth_qconf {
115         union {
116                 struct rte_eth_txconf tx;
117                 struct rte_eth_rxconf rx;
118         } conf;
119         struct rte_mempool *mp;
120         uint16_t nb_desc;
121         uint8_t valid;
122 };
123
124 struct cnxk_eth_dev {
125         /* ROC NIX */
126         struct roc_nix nix;
127
128         /* ROC RQs, SQs and CQs */
129         struct roc_nix_rq *rqs;
130         struct roc_nix_sq *sqs;
131         struct roc_nix_cq *cqs;
132
133         /* Configured queue count */
134         uint16_t nb_rxq;
135         uint16_t nb_txq;
136         uint8_t configured;
137
138         /* Max macfilter entries */
139         uint8_t max_mac_entries;
140
141         uint16_t flags;
142         uint8_t ptype_disable;
143         bool scalar_ena;
144
145         /* Pointer back to rte */
146         struct rte_eth_dev *eth_dev;
147
148         /* HW capabilities / Limitations */
149         union {
150                 struct {
151                         uint64_t cq_min_4k : 1;
152                 };
153                 uint64_t hwcap;
154         };
155
156         /* Rx and Tx offload capabilities */
157         uint64_t rx_offload_capa;
158         uint64_t tx_offload_capa;
159         uint32_t speed_capa;
160         /* Configured Rx and Tx offloads */
161         uint64_t rx_offloads;
162         uint64_t tx_offloads;
163         /* Platform specific offload flags */
164         uint16_t rx_offload_flags;
165         uint16_t tx_offload_flags;
166
167         /* ETHDEV RSS HF bitmask */
168         uint64_t ethdev_rss_hf;
169
170         /* Saved qconf before lf realloc */
171         struct cnxk_eth_qconf *tx_qconf;
172         struct cnxk_eth_qconf *rx_qconf;
173
174         /* Rx burst for cleanup(Only Primary) */
175         eth_rx_burst_t rx_pkt_burst_no_offload;
176
177         /* Default mac address */
178         uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
179
180         /* LSO Tunnel format indices */
181         uint64_t lso_tun_fmt;
182 };
183
184 struct cnxk_eth_rxq_sp {
185         struct cnxk_eth_dev *dev;
186         struct cnxk_eth_qconf qconf;
187         uint16_t qid;
188 } __plt_cache_aligned;
189
190 struct cnxk_eth_txq_sp {
191         struct cnxk_eth_dev *dev;
192         struct cnxk_eth_qconf qconf;
193         uint16_t qid;
194 } __plt_cache_aligned;
195
196 static inline struct cnxk_eth_dev *
197 cnxk_eth_pmd_priv(struct rte_eth_dev *eth_dev)
198 {
199         return eth_dev->data->dev_private;
200 }
201
202 static inline struct cnxk_eth_rxq_sp *
203 cnxk_eth_rxq_to_sp(void *__rxq)
204 {
205         return ((struct cnxk_eth_rxq_sp *)__rxq) - 1;
206 }
207
208 static inline struct cnxk_eth_txq_sp *
209 cnxk_eth_txq_to_sp(void *__txq)
210 {
211         return ((struct cnxk_eth_txq_sp *)__txq) - 1;
212 }
213
214 /* Common ethdev ops */
215 extern struct eth_dev_ops cnxk_eth_dev_ops;
216
217 /* Ops */
218 int cnxk_nix_probe(struct rte_pci_driver *pci_drv,
219                    struct rte_pci_device *pci_dev);
220 int cnxk_nix_remove(struct rte_pci_device *pci_dev);
221 int cnxk_nix_info_get(struct rte_eth_dev *eth_dev,
222                       struct rte_eth_dev_info *dev_info);
223 int cnxk_nix_configure(struct rte_eth_dev *eth_dev);
224 int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
225                             uint16_t nb_desc, uint16_t fp_tx_q_sz,
226                             const struct rte_eth_txconf *tx_conf);
227 int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
228                             uint16_t nb_desc, uint16_t fp_rx_q_sz,
229                             const struct rte_eth_rxconf *rx_conf,
230                             struct rte_mempool *mp);
231 int cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid);
232 int cnxk_nix_dev_start(struct rte_eth_dev *eth_dev);
233
234 uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
235
236 /* RSS */
237 uint32_t cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
238                                 uint8_t rss_level);
239
240 /* Link */
241 void cnxk_nix_toggle_flag_link_cfg(struct cnxk_eth_dev *dev, bool set);
242 void cnxk_eth_dev_link_status_cb(struct roc_nix *nix,
243                                  struct roc_nix_link_info *link);
244 int cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete);
245
246 /* Lookup configuration */
247 const uint32_t *cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev);
248 void *cnxk_nix_fastpath_lookup_mem_get(void);
249
250 /* Devargs */
251 int cnxk_ethdev_parse_devargs(struct rte_devargs *devargs,
252                               struct cnxk_eth_dev *dev);
253
254 /* Inlines */
255 static __rte_always_inline uint64_t
256 cnxk_pktmbuf_detach(struct rte_mbuf *m)
257 {
258         struct rte_mempool *mp = m->pool;
259         uint32_t mbuf_size, buf_len;
260         struct rte_mbuf *md;
261         uint16_t priv_size;
262         uint16_t refcount;
263
264         /* Update refcount of direct mbuf */
265         md = rte_mbuf_from_indirect(m);
266         refcount = rte_mbuf_refcnt_update(md, -1);
267
268         priv_size = rte_pktmbuf_priv_size(mp);
269         mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
270         buf_len = rte_pktmbuf_data_room_size(mp);
271
272         m->priv_size = priv_size;
273         m->buf_addr = (char *)m + mbuf_size;
274         m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
275         m->buf_len = (uint16_t)buf_len;
276         rte_pktmbuf_reset_headroom(m);
277         m->data_len = 0;
278         m->ol_flags = 0;
279         m->next = NULL;
280         m->nb_segs = 1;
281
282         /* Now indirect mbuf is safe to free */
283         rte_pktmbuf_free(m);
284
285         if (refcount == 0) {
286                 rte_mbuf_refcnt_set(md, 1);
287                 md->data_len = 0;
288                 md->ol_flags = 0;
289                 md->next = NULL;
290                 md->nb_segs = 1;
291                 return 0;
292         } else {
293                 return 1;
294         }
295 }
296
297 static __rte_always_inline uint64_t
298 cnxk_nix_prefree_seg(struct rte_mbuf *m)
299 {
300         if (likely(rte_mbuf_refcnt_read(m) == 1)) {
301                 if (!RTE_MBUF_DIRECT(m))
302                         return cnxk_pktmbuf_detach(m);
303
304                 m->next = NULL;
305                 m->nb_segs = 1;
306                 return 0;
307         } else if (rte_mbuf_refcnt_update(m, -1) == 0) {
308                 if (!RTE_MBUF_DIRECT(m))
309                         return cnxk_pktmbuf_detach(m);
310
311                 rte_mbuf_refcnt_set(m, 1);
312                 m->next = NULL;
313                 m->nb_segs = 1;
314                 return 0;
315         }
316
317         /* Mbuf is having refcount more than 1 so need not to be freed */
318         return 1;
319 }
320
321 #endif /* __CNXK_ETHDEV_H__ */