net/cnxk: support Rx/Tx burst mode query
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <cnxk_ethdev.h>
6
7 int
8 cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
9 {
10         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
11         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
12         int max_rx_pktlen;
13
14         max_rx_pktlen = (roc_nix_max_pkt_len(&dev->nix) + RTE_ETHER_CRC_LEN -
15                          CNXK_NIX_MAX_VTAG_ACT_SIZE);
16
17         devinfo->min_rx_bufsize = NIX_MIN_HW_FRS + RTE_ETHER_CRC_LEN;
18         devinfo->max_rx_pktlen = max_rx_pktlen;
19         devinfo->max_rx_queues = RTE_MAX_QUEUES_PER_PORT;
20         devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
21         devinfo->max_mac_addrs = dev->max_mac_entries;
22         devinfo->max_vfs = pci_dev->max_vfs;
23         devinfo->max_mtu = devinfo->max_rx_pktlen -
24                                 (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
25         devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD;
26
27         devinfo->rx_offload_capa = dev->rx_offload_capa;
28         devinfo->tx_offload_capa = dev->tx_offload_capa;
29         devinfo->rx_queue_offload_capa = 0;
30         devinfo->tx_queue_offload_capa = 0;
31
32         devinfo->reta_size = dev->nix.reta_sz;
33         devinfo->hash_key_size = ROC_NIX_RSS_KEY_LEN;
34         devinfo->flow_type_rss_offloads = CNXK_NIX_RSS_OFFLOAD;
35
36         devinfo->default_rxconf = (struct rte_eth_rxconf){
37                 .rx_drop_en = 0,
38                 .offloads = 0,
39         };
40
41         devinfo->default_txconf = (struct rte_eth_txconf){
42                 .offloads = 0,
43         };
44
45         devinfo->default_rxportconf = (struct rte_eth_dev_portconf){
46                 .ring_size = CNXK_NIX_RX_DEFAULT_RING_SZ,
47         };
48
49         devinfo->rx_desc_lim = (struct rte_eth_desc_lim){
50                 .nb_max = UINT16_MAX,
51                 .nb_min = CNXK_NIX_RX_MIN_DESC,
52                 .nb_align = CNXK_NIX_RX_MIN_DESC_ALIGN,
53                 .nb_seg_max = CNXK_NIX_RX_NB_SEG_MAX,
54                 .nb_mtu_seg_max = CNXK_NIX_RX_NB_SEG_MAX,
55         };
56         devinfo->rx_desc_lim.nb_max =
57                 RTE_ALIGN_MUL_FLOOR(devinfo->rx_desc_lim.nb_max,
58                                     CNXK_NIX_RX_MIN_DESC_ALIGN);
59
60         devinfo->tx_desc_lim = (struct rte_eth_desc_lim){
61                 .nb_max = UINT16_MAX,
62                 .nb_min = 1,
63                 .nb_align = 1,
64                 .nb_seg_max = CNXK_NIX_TX_NB_SEG_MAX,
65                 .nb_mtu_seg_max = CNXK_NIX_TX_NB_SEG_MAX,
66         };
67
68         devinfo->speed_capa = dev->speed_capa;
69         devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
70                             RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
71         return 0;
72 }
73
74 int
75 cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
76                            struct rte_eth_burst_mode *mode)
77 {
78         ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
79         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
80         const struct burst_info {
81                 uint64_t flags;
82                 const char *output;
83         } rx_offload_map[] = {
84                 {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
85                 {DEV_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
86                 {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
87                 {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
88                 {DEV_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
89                 {DEV_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
90                 {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
91                 {DEV_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
92                 {DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
93                 {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
94                 {DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
95                 {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo Frame,"},
96                 {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
97                 {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
98                 {DEV_RX_OFFLOAD_SECURITY, " Security,"},
99                 {DEV_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
100                 {DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
101                 {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
102                 {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
103         };
104         static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
105                                                  "Scalar, Rx Offloads:"
106         };
107         uint32_t i;
108
109         PLT_SET_USED(queue_id);
110
111         /* Update burst mode info */
112         rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
113                          str_size - bytes);
114         if (rc < 0)
115                 goto done;
116
117         bytes += rc;
118
119         /* Update Rx offload info */
120         for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
121                 if (dev->rx_offloads & rx_offload_map[i].flags) {
122                         rc = rte_strscpy(mode->info + bytes,
123                                          rx_offload_map[i].output,
124                                          str_size - bytes);
125                         if (rc < 0)
126                                 goto done;
127
128                         bytes += rc;
129                 }
130         }
131
132 done:
133         return 0;
134 }
135
136 int
137 cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
138                            struct rte_eth_burst_mode *mode)
139 {
140         ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
141         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
142         const struct burst_info {
143                 uint64_t flags;
144                 const char *output;
145         } tx_offload_map[] = {
146                 {DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
147                 {DEV_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
148                 {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
149                 {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
150                 {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
151                 {DEV_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
152                 {DEV_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
153                 {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
154                 {DEV_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
155                 {DEV_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
156                 {DEV_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
157                 {DEV_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
158                 {DEV_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
159                 {DEV_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
160                 {DEV_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
161                 {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
162                 {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
163                 {DEV_TX_OFFLOAD_SECURITY, " Security,"},
164                 {DEV_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
165                 {DEV_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
166                 {DEV_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
167                 {DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
168         };
169         static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
170                                                  "Scalar, Tx Offloads:"
171         };
172         uint32_t i;
173
174         PLT_SET_USED(queue_id);
175
176         /* Update burst mode info */
177         rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
178                          str_size - bytes);
179         if (rc < 0)
180                 goto done;
181
182         bytes += rc;
183
184         /* Update Tx offload info */
185         for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
186                 if (dev->tx_offloads & tx_offload_map[i].flags) {
187                         rc = rte_strscpy(mode->info + bytes,
188                                          tx_offload_map[i].output,
189                                          str_size - bytes);
190                         if (rc < 0)
191                                 goto done;
192
193                         bytes += rc;
194                 }
195         }
196
197 done:
198         return 0;
199 }
200
201 int
202 cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
203 {
204         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
205         struct roc_nix *nix = &dev->nix;
206         int rc;
207
208         /* Update mac address at NPC */
209         rc = roc_nix_npc_mac_addr_set(nix, addr->addr_bytes);
210         if (rc)
211                 goto exit;
212
213         /* Update mac address at CGX for PFs only */
214         if (!roc_nix_is_vf_or_sdp(nix)) {
215                 rc = roc_nix_mac_addr_set(nix, addr->addr_bytes);
216                 if (rc) {
217                         /* Rollback to previous mac address */
218                         roc_nix_npc_mac_addr_set(nix, dev->mac_addr);
219                         goto exit;
220                 }
221         }
222
223         /* Update mac address to cnxk ethernet device */
224         rte_memcpy(dev->mac_addr, addr->addr_bytes, RTE_ETHER_ADDR_LEN);
225
226 exit:
227         return rc;
228 }
229
230 int
231 cnxk_nix_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr,
232                       uint32_t index, uint32_t pool)
233 {
234         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
235         struct roc_nix *nix = &dev->nix;
236         int rc;
237
238         PLT_SET_USED(index);
239         PLT_SET_USED(pool);
240
241         rc = roc_nix_mac_addr_add(nix, addr->addr_bytes);
242         if (rc < 0) {
243                 plt_err("Failed to add mac address, rc=%d", rc);
244                 return rc;
245         }
246
247         /* Enable promiscuous mode at NIX level */
248         roc_nix_npc_promisc_ena_dis(nix, true);
249         dev->dmac_filter_enable = true;
250         eth_dev->data->promiscuous = false;
251
252         return 0;
253 }
254
255 void
256 cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index)
257 {
258         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
259         struct roc_nix *nix = &dev->nix;
260         int rc;
261
262         rc = roc_nix_mac_addr_del(nix, index);
263         if (rc)
264                 plt_err("Failed to delete mac address, rc=%d", rc);
265 }
266
267 int
268 cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
269 {
270         uint32_t old_frame_size, frame_size = mtu + CNXK_NIX_L2_OVERHEAD;
271         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
272         struct rte_eth_dev_data *data = eth_dev->data;
273         struct roc_nix *nix = &dev->nix;
274         int rc = -EINVAL;
275         uint32_t buffsz;
276
277         /* Check if MTU is within the allowed range */
278         if ((frame_size - RTE_ETHER_CRC_LEN) < NIX_MIN_HW_FRS) {
279                 plt_err("MTU is lesser than minimum");
280                 goto exit;
281         }
282
283         if ((frame_size - RTE_ETHER_CRC_LEN) >
284             ((uint32_t)roc_nix_max_pkt_len(nix))) {
285                 plt_err("MTU is greater than maximum");
286                 goto exit;
287         }
288
289         buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
290         old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD;
291
292         /* Refuse MTU that requires the support of scattered packets
293          * when this feature has not been enabled before.
294          */
295         if (data->dev_started && frame_size > buffsz &&
296             !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
297                 plt_err("Scatter offload is not enabled for mtu");
298                 goto exit;
299         }
300
301         /* Check <seg size> * <max_seg>  >= max_frame */
302         if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) &&
303             frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
304                 plt_err("Greater than maximum supported packet length");
305                 goto exit;
306         }
307
308         frame_size -= RTE_ETHER_CRC_LEN;
309
310         /* Update mtu on Tx */
311         rc = roc_nix_mac_mtu_set(nix, frame_size);
312         if (rc) {
313                 plt_err("Failed to set MTU, rc=%d", rc);
314                 goto exit;
315         }
316
317         /* Sync same frame size on Rx */
318         rc = roc_nix_mac_max_rx_len_set(nix, frame_size);
319         if (rc) {
320                 /* Rollback to older mtu */
321                 roc_nix_mac_mtu_set(nix,
322                                     old_frame_size - RTE_ETHER_CRC_LEN);
323                 plt_err("Failed to max Rx frame length, rc=%d", rc);
324                 goto exit;
325         }
326
327         frame_size += RTE_ETHER_CRC_LEN;
328
329         if (frame_size > RTE_ETHER_MAX_LEN)
330                 dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
331         else
332                 dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
333
334         /* Update max_rx_pkt_len */
335         data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
336
337 exit:
338         return rc;
339 }
340
341 int
342 cnxk_nix_promisc_enable(struct rte_eth_dev *eth_dev)
343 {
344         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
345         struct roc_nix *nix = &dev->nix;
346         int rc = 0;
347
348         if (roc_nix_is_vf_or_sdp(nix))
349                 return rc;
350
351         rc = roc_nix_npc_promisc_ena_dis(nix, true);
352         if (rc) {
353                 plt_err("Failed to setup promisc mode in npc, rc=%d(%s)", rc,
354                         roc_error_msg_get(rc));
355                 return rc;
356         }
357
358         rc = roc_nix_mac_promisc_mode_enable(nix, true);
359         if (rc) {
360                 plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc,
361                         roc_error_msg_get(rc));
362                 roc_nix_npc_promisc_ena_dis(nix, false);
363                 return rc;
364         }
365
366         return 0;
367 }
368
369 int
370 cnxk_nix_promisc_disable(struct rte_eth_dev *eth_dev)
371 {
372         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
373         struct roc_nix *nix = &dev->nix;
374         int rc = 0;
375
376         if (roc_nix_is_vf_or_sdp(nix))
377                 return rc;
378
379         rc = roc_nix_npc_promisc_ena_dis(nix, dev->dmac_filter_enable);
380         if (rc) {
381                 plt_err("Failed to setup promisc mode in npc, rc=%d(%s)", rc,
382                         roc_error_msg_get(rc));
383                 return rc;
384         }
385
386         rc = roc_nix_mac_promisc_mode_enable(nix, false);
387         if (rc) {
388                 plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc,
389                         roc_error_msg_get(rc));
390                 roc_nix_npc_promisc_ena_dis(nix, !dev->dmac_filter_enable);
391                 return rc;
392         }
393
394         dev->dmac_filter_enable = false;
395         return 0;
396 }
397
398 int
399 cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev)
400 {
401         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
402
403         return roc_nix_npc_mcast_config(&dev->nix, true, false);
404 }
405
406 int
407 cnxk_nix_allmulticast_disable(struct rte_eth_dev *eth_dev)
408 {
409         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
410
411         return roc_nix_npc_mcast_config(&dev->nix, false,
412                                         eth_dev->data->promiscuous);
413 }