net/cnxk: add Rx burst for CN10K
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #ifndef __CNXK_ETHDEV_H__
5 #define __CNXK_ETHDEV_H__
6
7 #include <math.h>
8 #include <stdint.h>
9
10 #include <ethdev_driver.h>
11 #include <ethdev_pci.h>
12 #include <rte_kvargs.h>
13 #include <rte_mbuf.h>
14 #include <rte_mbuf_pool_ops.h>
15 #include <rte_mempool.h>
16
17 #include "roc_api.h"
18
19 #define CNXK_ETH_DEV_PMD_VERSION "1.0"
20
21 /* Used for struct cnxk_eth_dev::flags */
22 #define CNXK_LINK_CFG_IN_PROGRESS_F BIT_ULL(0)
23
24 /* VLAN tag inserted by NIX_TX_VTAG_ACTION.
25  * In Tx space is always reserved for this in FRS.
26  */
27 #define CNXK_NIX_MAX_VTAG_INS      2
28 #define CNXK_NIX_MAX_VTAG_ACT_SIZE (4 * CNXK_NIX_MAX_VTAG_INS)
29
30 /* ETH_HLEN+ETH_FCS+2*VLAN_HLEN */
31 #define CNXK_NIX_L2_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 8)
32
33 #define CNXK_NIX_RX_MIN_DESC        16
34 #define CNXK_NIX_RX_MIN_DESC_ALIGN  16
35 #define CNXK_NIX_RX_NB_SEG_MAX      6
36 #define CNXK_NIX_RX_DEFAULT_RING_SZ 4096
37 /* Max supported SQB count */
38 #define CNXK_NIX_TX_MAX_SQB 512
39
40 /* If PTP is enabled additional SEND MEM DESC is required which
41  * takes 2 words, hence max 7 iova address are possible
42  */
43 #if defined(RTE_LIBRTE_IEEE1588)
44 #define CNXK_NIX_TX_NB_SEG_MAX 7
45 #else
46 #define CNXK_NIX_TX_NB_SEG_MAX 9
47 #endif
48
49 #define CNXK_NIX_TX_MSEG_SG_DWORDS                                             \
50         ((RTE_ALIGN_MUL_CEIL(CNXK_NIX_TX_NB_SEG_MAX, 3) / 3) +                 \
51          CNXK_NIX_TX_NB_SEG_MAX)
52
53 #define CNXK_NIX_RSS_L3_L4_SRC_DST                                             \
54         (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY |     \
55          ETH_RSS_L4_DST_ONLY)
56
57 #define CNXK_NIX_RSS_OFFLOAD                                                   \
58         (ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP |               \
59          ETH_RSS_SCTP | ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD |                  \
60          CNXK_NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | ETH_RSS_C_VLAN)
61
62 #define CNXK_NIX_TX_OFFLOAD_CAPA                                               \
63         (DEV_TX_OFFLOAD_MBUF_FAST_FREE | DEV_TX_OFFLOAD_MT_LOCKFREE |          \
64          DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT |             \
65          DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |    \
66          DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |                 \
67          DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO |                  \
68          DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO |        \
69          DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_MULTI_SEGS |              \
70          DEV_TX_OFFLOAD_IPV4_CKSUM)
71
72 #define CNXK_NIX_RX_OFFLOAD_CAPA                                               \
73         (DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM |                 \
74          DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER |            \
75          DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |         \
76          DEV_RX_OFFLOAD_RSS_HASH)
77
78 #define RSS_IPV4_ENABLE                                                        \
79         (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP |         \
80          ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_SCTP)
81
82 #define RSS_IPV6_ENABLE                                                        \
83         (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP |         \
84          ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_SCTP)
85
86 #define RSS_IPV6_EX_ENABLE                                                     \
87         (ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX)
88
89 #define RSS_MAX_LEVELS 3
90
91 #define RSS_IPV4_INDEX 0
92 #define RSS_IPV6_INDEX 1
93 #define RSS_TCP_INDEX  2
94 #define RSS_UDP_INDEX  3
95 #define RSS_SCTP_INDEX 4
96 #define RSS_DMAC_INDEX 5
97
98 /* Default mark value used when none is provided. */
99 #define CNXK_FLOW_ACTION_FLAG_DEFAULT 0xffff
100
101 #define PTYPE_NON_TUNNEL_WIDTH    16
102 #define PTYPE_TUNNEL_WIDTH        12
103 #define PTYPE_NON_TUNNEL_ARRAY_SZ BIT(PTYPE_NON_TUNNEL_WIDTH)
104 #define PTYPE_TUNNEL_ARRAY_SZ     BIT(PTYPE_TUNNEL_WIDTH)
105 #define PTYPE_ARRAY_SZ                                                         \
106         ((PTYPE_NON_TUNNEL_ARRAY_SZ + PTYPE_TUNNEL_ARRAY_SZ) * sizeof(uint16_t))
107 /* Fastpath lookup */
108 #define CNXK_NIX_FASTPATH_LOOKUP_MEM "cnxk_nix_fastpath_lookup_mem"
109
110 #define CNXK_NIX_UDP_TUN_BITMASK                                               \
111         ((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) |                               \
112          (1ull << (PKT_TX_TUNNEL_GENEVE >> 45)))
113
114 struct cnxk_eth_qconf {
115         union {
116                 struct rte_eth_txconf tx;
117                 struct rte_eth_rxconf rx;
118         } conf;
119         struct rte_mempool *mp;
120         uint16_t nb_desc;
121         uint8_t valid;
122 };
123
124 struct cnxk_eth_dev {
125         /* ROC NIX */
126         struct roc_nix nix;
127
128         /* ROC RQs, SQs and CQs */
129         struct roc_nix_rq *rqs;
130         struct roc_nix_sq *sqs;
131         struct roc_nix_cq *cqs;
132
133         /* Configured queue count */
134         uint16_t nb_rxq;
135         uint16_t nb_txq;
136         uint8_t configured;
137
138         /* Max macfilter entries */
139         uint8_t max_mac_entries;
140
141         uint16_t flags;
142         uint8_t ptype_disable;
143         bool scalar_ena;
144
145         /* Pointer back to rte */
146         struct rte_eth_dev *eth_dev;
147
148         /* HW capabilities / Limitations */
149         union {
150                 struct {
151                         uint64_t cq_min_4k : 1;
152                 };
153                 uint64_t hwcap;
154         };
155
156         /* Rx and Tx offload capabilities */
157         uint64_t rx_offload_capa;
158         uint64_t tx_offload_capa;
159         uint32_t speed_capa;
160         /* Configured Rx and Tx offloads */
161         uint64_t rx_offloads;
162         uint64_t tx_offloads;
163         /* Platform specific offload flags */
164         uint16_t rx_offload_flags;
165         uint16_t tx_offload_flags;
166
167         /* ETHDEV RSS HF bitmask */
168         uint64_t ethdev_rss_hf;
169
170         /* Saved qconf before lf realloc */
171         struct cnxk_eth_qconf *tx_qconf;
172         struct cnxk_eth_qconf *rx_qconf;
173
174         /* Rx burst for cleanup(Only Primary) */
175         eth_rx_burst_t rx_pkt_burst_no_offload;
176
177         /* Default mac address */
178         uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
179
180         /* LSO Tunnel format indices */
181         uint64_t lso_tun_fmt;
182 };
183
184 struct cnxk_eth_rxq_sp {
185         struct cnxk_eth_dev *dev;
186         struct cnxk_eth_qconf qconf;
187         uint16_t qid;
188 } __plt_cache_aligned;
189
190 struct cnxk_eth_txq_sp {
191         struct cnxk_eth_dev *dev;
192         struct cnxk_eth_qconf qconf;
193         uint16_t qid;
194 } __plt_cache_aligned;
195
196 static inline struct cnxk_eth_dev *
197 cnxk_eth_pmd_priv(struct rte_eth_dev *eth_dev)
198 {
199         return eth_dev->data->dev_private;
200 }
201
202 static inline struct cnxk_eth_rxq_sp *
203 cnxk_eth_rxq_to_sp(void *__rxq)
204 {
205         return ((struct cnxk_eth_rxq_sp *)__rxq) - 1;
206 }
207
208 static inline struct cnxk_eth_txq_sp *
209 cnxk_eth_txq_to_sp(void *__txq)
210 {
211         return ((struct cnxk_eth_txq_sp *)__txq) - 1;
212 }
213
214 /* Common ethdev ops */
215 extern struct eth_dev_ops cnxk_eth_dev_ops;
216
217 /* Ops */
218 int cnxk_nix_probe(struct rte_pci_driver *pci_drv,
219                    struct rte_pci_device *pci_dev);
220 int cnxk_nix_remove(struct rte_pci_device *pci_dev);
221 int cnxk_nix_info_get(struct rte_eth_dev *eth_dev,
222                       struct rte_eth_dev_info *dev_info);
223 int cnxk_nix_configure(struct rte_eth_dev *eth_dev);
224 int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
225                             uint16_t nb_desc, uint16_t fp_tx_q_sz,
226                             const struct rte_eth_txconf *tx_conf);
227 int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
228                             uint16_t nb_desc, uint16_t fp_rx_q_sz,
229                             const struct rte_eth_rxconf *rx_conf,
230                             struct rte_mempool *mp);
231 int cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid);
232
233 uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
234
235 /* RSS */
236 uint32_t cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
237                                 uint8_t rss_level);
238
239 /* Link */
240 void cnxk_eth_dev_link_status_cb(struct roc_nix *nix,
241                                  struct roc_nix_link_info *link);
242 int cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete);
243
244 /* Lookup configuration */
245 const uint32_t *cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev);
246 void *cnxk_nix_fastpath_lookup_mem_get(void);
247
248 /* Devargs */
249 int cnxk_ethdev_parse_devargs(struct rte_devargs *devargs,
250                               struct cnxk_eth_dev *dev);
251
252 /* Inlines */
253 static __rte_always_inline uint64_t
254 cnxk_pktmbuf_detach(struct rte_mbuf *m)
255 {
256         struct rte_mempool *mp = m->pool;
257         uint32_t mbuf_size, buf_len;
258         struct rte_mbuf *md;
259         uint16_t priv_size;
260         uint16_t refcount;
261
262         /* Update refcount of direct mbuf */
263         md = rte_mbuf_from_indirect(m);
264         refcount = rte_mbuf_refcnt_update(md, -1);
265
266         priv_size = rte_pktmbuf_priv_size(mp);
267         mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
268         buf_len = rte_pktmbuf_data_room_size(mp);
269
270         m->priv_size = priv_size;
271         m->buf_addr = (char *)m + mbuf_size;
272         m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
273         m->buf_len = (uint16_t)buf_len;
274         rte_pktmbuf_reset_headroom(m);
275         m->data_len = 0;
276         m->ol_flags = 0;
277         m->next = NULL;
278         m->nb_segs = 1;
279
280         /* Now indirect mbuf is safe to free */
281         rte_pktmbuf_free(m);
282
283         if (refcount == 0) {
284                 rte_mbuf_refcnt_set(md, 1);
285                 md->data_len = 0;
286                 md->ol_flags = 0;
287                 md->next = NULL;
288                 md->nb_segs = 1;
289                 return 0;
290         } else {
291                 return 1;
292         }
293 }
294
295 static __rte_always_inline uint64_t
296 cnxk_nix_prefree_seg(struct rte_mbuf *m)
297 {
298         if (likely(rte_mbuf_refcnt_read(m) == 1)) {
299                 if (!RTE_MBUF_DIRECT(m))
300                         return cnxk_pktmbuf_detach(m);
301
302                 m->next = NULL;
303                 m->nb_segs = 1;
304                 return 0;
305         } else if (rte_mbuf_refcnt_update(m, -1) == 0) {
306                 if (!RTE_MBUF_DIRECT(m))
307                         return cnxk_pktmbuf_detach(m);
308
309                 rte_mbuf_refcnt_set(m, 1);
310                 m->next = NULL;
311                 m->nb_segs = 1;
312                 return 0;
313         }
314
315         /* Mbuf is having refcount more than 1 so need not to be freed */
316         return 1;
317 }
318
319 #endif /* __CNXK_ETHDEV_H__ */