net/cnxk: support Rx/Tx burst mode query
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #ifndef __CNXK_ETHDEV_H__
5 #define __CNXK_ETHDEV_H__
6
7 #include <math.h>
8 #include <stdint.h>
9
10 #include <ethdev_driver.h>
11 #include <ethdev_pci.h>
12 #include <rte_kvargs.h>
13 #include <rte_mbuf.h>
14 #include <rte_mbuf_pool_ops.h>
15 #include <rte_mempool.h>
16
17 #include "roc_api.h"
18
19 #define CNXK_ETH_DEV_PMD_VERSION "1.0"
20
21 /* Used for struct cnxk_eth_dev::flags */
22 #define CNXK_LINK_CFG_IN_PROGRESS_F BIT_ULL(0)
23
24 /* VLAN tag inserted by NIX_TX_VTAG_ACTION.
25  * In Tx space is always reserved for this in FRS.
26  */
27 #define CNXK_NIX_MAX_VTAG_INS      2
28 #define CNXK_NIX_MAX_VTAG_ACT_SIZE (4 * CNXK_NIX_MAX_VTAG_INS)
29
30 /* ETH_HLEN+ETH_FCS+2*VLAN_HLEN */
31 #define CNXK_NIX_L2_OVERHEAD (RTE_ETHER_HDR_LEN + \
32                               RTE_ETHER_CRC_LEN + \
33                               CNXK_NIX_MAX_VTAG_ACT_SIZE)
34
35 #define CNXK_NIX_RX_MIN_DESC        16
36 #define CNXK_NIX_RX_MIN_DESC_ALIGN  16
37 #define CNXK_NIX_RX_NB_SEG_MAX      6
38 #define CNXK_NIX_RX_DEFAULT_RING_SZ 4096
39 /* Max supported SQB count */
40 #define CNXK_NIX_TX_MAX_SQB 512
41
42 /* If PTP is enabled additional SEND MEM DESC is required which
43  * takes 2 words, hence max 7 iova address are possible
44  */
45 #if defined(RTE_LIBRTE_IEEE1588)
46 #define CNXK_NIX_TX_NB_SEG_MAX 7
47 #else
48 #define CNXK_NIX_TX_NB_SEG_MAX 9
49 #endif
50
51 #define CNXK_NIX_TX_MSEG_SG_DWORDS                                             \
52         ((RTE_ALIGN_MUL_CEIL(CNXK_NIX_TX_NB_SEG_MAX, 3) / 3) +                 \
53          CNXK_NIX_TX_NB_SEG_MAX)
54
55 #define CNXK_NIX_RSS_L3_L4_SRC_DST                                             \
56         (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY | ETH_RSS_L4_SRC_ONLY |     \
57          ETH_RSS_L4_DST_ONLY)
58
59 #define CNXK_NIX_RSS_OFFLOAD                                                   \
60         (ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP |               \
61          ETH_RSS_SCTP | ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD |                  \
62          CNXK_NIX_RSS_L3_L4_SRC_DST | ETH_RSS_LEVEL_MASK | ETH_RSS_C_VLAN)
63
64 #define CNXK_NIX_TX_OFFLOAD_CAPA                                               \
65         (DEV_TX_OFFLOAD_MBUF_FAST_FREE | DEV_TX_OFFLOAD_MT_LOCKFREE |          \
66          DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT |             \
67          DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |    \
68          DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |                 \
69          DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO |                  \
70          DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO |        \
71          DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_MULTI_SEGS |              \
72          DEV_TX_OFFLOAD_IPV4_CKSUM)
73
74 #define CNXK_NIX_RX_OFFLOAD_CAPA                                               \
75         (DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM |                 \
76          DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER |            \
77          DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |         \
78          DEV_RX_OFFLOAD_RSS_HASH)
79
80 #define RSS_IPV4_ENABLE                                                        \
81         (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP |         \
82          ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_SCTP)
83
84 #define RSS_IPV6_ENABLE                                                        \
85         (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP |         \
86          ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_SCTP)
87
88 #define RSS_IPV6_EX_ENABLE                                                     \
89         (ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX)
90
91 #define RSS_MAX_LEVELS 3
92
93 #define RSS_IPV4_INDEX 0
94 #define RSS_IPV6_INDEX 1
95 #define RSS_TCP_INDEX  2
96 #define RSS_UDP_INDEX  3
97 #define RSS_SCTP_INDEX 4
98 #define RSS_DMAC_INDEX 5
99
100 /* Default mark value used when none is provided. */
101 #define CNXK_FLOW_ACTION_FLAG_DEFAULT 0xffff
102
103 #define PTYPE_NON_TUNNEL_WIDTH    16
104 #define PTYPE_TUNNEL_WIDTH        12
105 #define PTYPE_NON_TUNNEL_ARRAY_SZ BIT(PTYPE_NON_TUNNEL_WIDTH)
106 #define PTYPE_TUNNEL_ARRAY_SZ     BIT(PTYPE_TUNNEL_WIDTH)
107 #define PTYPE_ARRAY_SZ                                                         \
108         ((PTYPE_NON_TUNNEL_ARRAY_SZ + PTYPE_TUNNEL_ARRAY_SZ) * sizeof(uint16_t))
109 /* Fastpath lookup */
110 #define CNXK_NIX_FASTPATH_LOOKUP_MEM "cnxk_nix_fastpath_lookup_mem"
111
112 #define CNXK_NIX_UDP_TUN_BITMASK                                               \
113         ((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) |                               \
114          (1ull << (PKT_TX_TUNNEL_GENEVE >> 45)))
115
116 struct cnxk_eth_qconf {
117         union {
118                 struct rte_eth_txconf tx;
119                 struct rte_eth_rxconf rx;
120         } conf;
121         struct rte_mempool *mp;
122         uint16_t nb_desc;
123         uint8_t valid;
124 };
125
126 struct cnxk_eth_dev {
127         /* ROC NIX */
128         struct roc_nix nix;
129
130         /* ROC RQs, SQs and CQs */
131         struct roc_nix_rq *rqs;
132         struct roc_nix_sq *sqs;
133         struct roc_nix_cq *cqs;
134
135         /* Configured queue count */
136         uint16_t nb_rxq;
137         uint16_t nb_txq;
138         uint8_t configured;
139
140         /* Max macfilter entries */
141         uint8_t max_mac_entries;
142         bool dmac_filter_enable;
143
144         uint16_t flags;
145         uint8_t ptype_disable;
146         bool scalar_ena;
147
148         /* Pointer back to rte */
149         struct rte_eth_dev *eth_dev;
150
151         /* HW capabilities / Limitations */
152         union {
153                 struct {
154                         uint64_t cq_min_4k : 1;
155                 };
156                 uint64_t hwcap;
157         };
158
159         /* Rx and Tx offload capabilities */
160         uint64_t rx_offload_capa;
161         uint64_t tx_offload_capa;
162         uint32_t speed_capa;
163         /* Configured Rx and Tx offloads */
164         uint64_t rx_offloads;
165         uint64_t tx_offloads;
166         /* Platform specific offload flags */
167         uint16_t rx_offload_flags;
168         uint16_t tx_offload_flags;
169
170         /* ETHDEV RSS HF bitmask */
171         uint64_t ethdev_rss_hf;
172
173         /* Saved qconf before lf realloc */
174         struct cnxk_eth_qconf *tx_qconf;
175         struct cnxk_eth_qconf *rx_qconf;
176
177         /* Rx burst for cleanup(Only Primary) */
178         eth_rx_burst_t rx_pkt_burst_no_offload;
179
180         /* Default mac address */
181         uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
182
183         /* LSO Tunnel format indices */
184         uint64_t lso_tun_fmt;
185 };
186
187 struct cnxk_eth_rxq_sp {
188         struct cnxk_eth_dev *dev;
189         struct cnxk_eth_qconf qconf;
190         uint16_t qid;
191 } __plt_cache_aligned;
192
193 struct cnxk_eth_txq_sp {
194         struct cnxk_eth_dev *dev;
195         struct cnxk_eth_qconf qconf;
196         uint16_t qid;
197 } __plt_cache_aligned;
198
199 static inline struct cnxk_eth_dev *
200 cnxk_eth_pmd_priv(struct rte_eth_dev *eth_dev)
201 {
202         return eth_dev->data->dev_private;
203 }
204
205 static inline struct cnxk_eth_rxq_sp *
206 cnxk_eth_rxq_to_sp(void *__rxq)
207 {
208         return ((struct cnxk_eth_rxq_sp *)__rxq) - 1;
209 }
210
211 static inline struct cnxk_eth_txq_sp *
212 cnxk_eth_txq_to_sp(void *__txq)
213 {
214         return ((struct cnxk_eth_txq_sp *)__txq) - 1;
215 }
216
217 /* Common ethdev ops */
218 extern struct eth_dev_ops cnxk_eth_dev_ops;
219
220 /* Ops */
221 int cnxk_nix_probe(struct rte_pci_driver *pci_drv,
222                    struct rte_pci_device *pci_dev);
223 int cnxk_nix_remove(struct rte_pci_device *pci_dev);
224 int cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu);
225 int cnxk_nix_mac_addr_add(struct rte_eth_dev *eth_dev,
226                           struct rte_ether_addr *addr, uint32_t index,
227                           uint32_t pool);
228 void cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index);
229 int cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev,
230                           struct rte_ether_addr *addr);
231 int cnxk_nix_promisc_enable(struct rte_eth_dev *eth_dev);
232 int cnxk_nix_promisc_disable(struct rte_eth_dev *eth_dev);
233 int cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev);
234 int cnxk_nix_allmulticast_disable(struct rte_eth_dev *eth_dev);
235 int cnxk_nix_info_get(struct rte_eth_dev *eth_dev,
236                       struct rte_eth_dev_info *dev_info);
237 int cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
238                                struct rte_eth_burst_mode *mode);
239 int cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
240                                struct rte_eth_burst_mode *mode);
241 int cnxk_nix_configure(struct rte_eth_dev *eth_dev);
242 int cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
243                             uint16_t nb_desc, uint16_t fp_tx_q_sz,
244                             const struct rte_eth_txconf *tx_conf);
245 int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
246                             uint16_t nb_desc, uint16_t fp_rx_q_sz,
247                             const struct rte_eth_rxconf *rx_conf,
248                             struct rte_mempool *mp);
249 int cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid);
250 int cnxk_nix_dev_start(struct rte_eth_dev *eth_dev);
251
252 uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
253
254 /* RSS */
255 uint32_t cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
256                                 uint8_t rss_level);
257
258 /* Link */
259 void cnxk_nix_toggle_flag_link_cfg(struct cnxk_eth_dev *dev, bool set);
260 void cnxk_eth_dev_link_status_cb(struct roc_nix *nix,
261                                  struct roc_nix_link_info *link);
262 int cnxk_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete);
263
264 /* Lookup configuration */
265 const uint32_t *cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev);
266 void *cnxk_nix_fastpath_lookup_mem_get(void);
267
268 /* Devargs */
269 int cnxk_ethdev_parse_devargs(struct rte_devargs *devargs,
270                               struct cnxk_eth_dev *dev);
271
272 /* Inlines */
273 static __rte_always_inline uint64_t
274 cnxk_pktmbuf_detach(struct rte_mbuf *m)
275 {
276         struct rte_mempool *mp = m->pool;
277         uint32_t mbuf_size, buf_len;
278         struct rte_mbuf *md;
279         uint16_t priv_size;
280         uint16_t refcount;
281
282         /* Update refcount of direct mbuf */
283         md = rte_mbuf_from_indirect(m);
284         refcount = rte_mbuf_refcnt_update(md, -1);
285
286         priv_size = rte_pktmbuf_priv_size(mp);
287         mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
288         buf_len = rte_pktmbuf_data_room_size(mp);
289
290         m->priv_size = priv_size;
291         m->buf_addr = (char *)m + mbuf_size;
292         m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
293         m->buf_len = (uint16_t)buf_len;
294         rte_pktmbuf_reset_headroom(m);
295         m->data_len = 0;
296         m->ol_flags = 0;
297         m->next = NULL;
298         m->nb_segs = 1;
299
300         /* Now indirect mbuf is safe to free */
301         rte_pktmbuf_free(m);
302
303         if (refcount == 0) {
304                 rte_mbuf_refcnt_set(md, 1);
305                 md->data_len = 0;
306                 md->ol_flags = 0;
307                 md->next = NULL;
308                 md->nb_segs = 1;
309                 return 0;
310         } else {
311                 return 1;
312         }
313 }
314
315 static __rte_always_inline uint64_t
316 cnxk_nix_prefree_seg(struct rte_mbuf *m)
317 {
318         if (likely(rte_mbuf_refcnt_read(m) == 1)) {
319                 if (!RTE_MBUF_DIRECT(m))
320                         return cnxk_pktmbuf_detach(m);
321
322                 m->next = NULL;
323                 m->nb_segs = 1;
324                 return 0;
325         } else if (rte_mbuf_refcnt_update(m, -1) == 0) {
326                 if (!RTE_MBUF_DIRECT(m))
327                         return cnxk_pktmbuf_detach(m);
328
329                 rte_mbuf_refcnt_set(m, 1);
330                 m->next = NULL;
331                 m->nb_segs = 1;
332                 return 0;
333         }
334
335         /* Mbuf is having refcount more than 1 so need not to be freed */
336         return 1;
337 }
338
339 #endif /* __CNXK_ETHDEV_H__ */