ethdev: remove jumbo offload flag
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <cnxk_ethdev.h>
6
7 int
8 cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
9 {
10         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
11         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
12         int max_rx_pktlen;
13
14         max_rx_pktlen = (roc_nix_max_pkt_len(&dev->nix) + RTE_ETHER_CRC_LEN -
15                          CNXK_NIX_MAX_VTAG_ACT_SIZE);
16
17         devinfo->min_rx_bufsize = NIX_MIN_HW_FRS + RTE_ETHER_CRC_LEN;
18         devinfo->max_rx_pktlen = max_rx_pktlen;
19         devinfo->max_rx_queues = RTE_MAX_QUEUES_PER_PORT;
20         devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
21         devinfo->max_mac_addrs = dev->max_mac_entries;
22         devinfo->max_vfs = pci_dev->max_vfs;
23         devinfo->max_mtu = devinfo->max_rx_pktlen -
24                                 (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
25         devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD;
26
27         devinfo->rx_offload_capa = dev->rx_offload_capa;
28         devinfo->tx_offload_capa = dev->tx_offload_capa;
29         devinfo->rx_queue_offload_capa = 0;
30         devinfo->tx_queue_offload_capa = 0;
31
32         devinfo->reta_size = dev->nix.reta_sz;
33         devinfo->hash_key_size = ROC_NIX_RSS_KEY_LEN;
34         devinfo->flow_type_rss_offloads = CNXK_NIX_RSS_OFFLOAD;
35
36         devinfo->default_rxconf = (struct rte_eth_rxconf){
37                 .rx_drop_en = 0,
38                 .offloads = 0,
39         };
40
41         devinfo->default_txconf = (struct rte_eth_txconf){
42                 .offloads = 0,
43         };
44
45         devinfo->default_rxportconf = (struct rte_eth_dev_portconf){
46                 .ring_size = CNXK_NIX_RX_DEFAULT_RING_SZ,
47         };
48
49         devinfo->rx_desc_lim = (struct rte_eth_desc_lim){
50                 .nb_max = UINT16_MAX,
51                 .nb_min = CNXK_NIX_RX_MIN_DESC,
52                 .nb_align = CNXK_NIX_RX_MIN_DESC_ALIGN,
53                 .nb_seg_max = CNXK_NIX_RX_NB_SEG_MAX,
54                 .nb_mtu_seg_max = CNXK_NIX_RX_NB_SEG_MAX,
55         };
56         devinfo->rx_desc_lim.nb_max =
57                 RTE_ALIGN_MUL_FLOOR(devinfo->rx_desc_lim.nb_max,
58                                     CNXK_NIX_RX_MIN_DESC_ALIGN);
59
60         devinfo->tx_desc_lim = (struct rte_eth_desc_lim){
61                 .nb_max = UINT16_MAX,
62                 .nb_min = 1,
63                 .nb_align = 1,
64                 .nb_seg_max = CNXK_NIX_TX_NB_SEG_MAX,
65                 .nb_mtu_seg_max = CNXK_NIX_TX_NB_SEG_MAX,
66         };
67
68         devinfo->speed_capa = dev->speed_capa;
69         devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
70                             RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
71         return 0;
72 }
73
74 int
75 cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
76                            struct rte_eth_burst_mode *mode)
77 {
78         ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
79         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
80         const struct burst_info {
81                 uint64_t flags;
82                 const char *output;
83         } rx_offload_map[] = {
84                 {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
85                 {DEV_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
86                 {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
87                 {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
88                 {DEV_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
89                 {DEV_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
90                 {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
91                 {DEV_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
92                 {DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
93                 {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
94                 {DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
95                 {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
96                 {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
97                 {DEV_RX_OFFLOAD_SECURITY, " Security,"},
98                 {DEV_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
99                 {DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
100                 {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
101                 {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
102         };
103         static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
104                                                  "Scalar, Rx Offloads:"
105         };
106         uint32_t i;
107
108         PLT_SET_USED(queue_id);
109
110         /* Update burst mode info */
111         rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
112                          str_size - bytes);
113         if (rc < 0)
114                 goto done;
115
116         bytes += rc;
117
118         /* Update Rx offload info */
119         for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
120                 if (dev->rx_offloads & rx_offload_map[i].flags) {
121                         rc = rte_strscpy(mode->info + bytes,
122                                          rx_offload_map[i].output,
123                                          str_size - bytes);
124                         if (rc < 0)
125                                 goto done;
126
127                         bytes += rc;
128                 }
129         }
130
131 done:
132         return 0;
133 }
134
135 int
136 cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
137                            struct rte_eth_burst_mode *mode)
138 {
139         ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
140         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
141         const struct burst_info {
142                 uint64_t flags;
143                 const char *output;
144         } tx_offload_map[] = {
145                 {DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
146                 {DEV_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
147                 {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
148                 {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
149                 {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
150                 {DEV_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
151                 {DEV_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
152                 {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
153                 {DEV_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
154                 {DEV_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
155                 {DEV_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
156                 {DEV_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
157                 {DEV_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
158                 {DEV_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
159                 {DEV_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
160                 {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
161                 {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
162                 {DEV_TX_OFFLOAD_SECURITY, " Security,"},
163                 {DEV_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
164                 {DEV_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
165                 {DEV_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
166                 {DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
167         };
168         static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
169                                                  "Scalar, Tx Offloads:"
170         };
171         uint32_t i;
172
173         PLT_SET_USED(queue_id);
174
175         /* Update burst mode info */
176         rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
177                          str_size - bytes);
178         if (rc < 0)
179                 goto done;
180
181         bytes += rc;
182
183         /* Update Tx offload info */
184         for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
185                 if (dev->tx_offloads & tx_offload_map[i].flags) {
186                         rc = rte_strscpy(mode->info + bytes,
187                                          tx_offload_map[i].output,
188                                          str_size - bytes);
189                         if (rc < 0)
190                                 goto done;
191
192                         bytes += rc;
193                 }
194         }
195
196 done:
197         return 0;
198 }
199
200 int
201 cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
202                        struct rte_eth_fc_conf *fc_conf)
203 {
204         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
205         enum rte_eth_fc_mode mode_map[] = {
206                                            RTE_FC_NONE, RTE_FC_RX_PAUSE,
207                                            RTE_FC_TX_PAUSE, RTE_FC_FULL
208                                           };
209         struct roc_nix *nix = &dev->nix;
210         int mode;
211
212         mode = roc_nix_fc_mode_get(nix);
213         if (mode < 0)
214                 return mode;
215
216         memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
217         fc_conf->mode = mode_map[mode];
218         return 0;
219 }
220
221 static int
222 nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
223 {
224         struct roc_nix *nix = &dev->nix;
225         struct roc_nix_fc_cfg fc_cfg;
226         struct roc_nix_cq *cq;
227
228         memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
229         cq = &dev->cqs[qid];
230         fc_cfg.cq_cfg_valid = true;
231         fc_cfg.cq_cfg.enable = enable;
232         fc_cfg.cq_cfg.rq = qid;
233         fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
234
235         return roc_nix_fc_config_set(nix, &fc_cfg);
236 }
237
238 int
239 cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
240                        struct rte_eth_fc_conf *fc_conf)
241 {
242         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
243         enum roc_nix_fc_mode mode_map[] = {
244                                            ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
245                                            ROC_NIX_FC_TX, ROC_NIX_FC_FULL
246                                           };
247         struct rte_eth_dev_data *data = eth_dev->data;
248         struct cnxk_fc_cfg *fc = &dev->fc_cfg;
249         struct roc_nix *nix = &dev->nix;
250         uint8_t rx_pause, tx_pause;
251         int rc, i;
252
253         if (roc_nix_is_vf_or_sdp(nix)) {
254                 plt_err("Flow control configuration is not allowed on VFs");
255                 return -ENOTSUP;
256         }
257
258         if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time ||
259             fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) {
260                 plt_info("Only MODE configuration is supported");
261                 return -EINVAL;
262         }
263
264         if (fc_conf->mode == fc->mode)
265                 return 0;
266
267         rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
268                     (fc_conf->mode == RTE_FC_RX_PAUSE);
269         tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
270                     (fc_conf->mode == RTE_FC_TX_PAUSE);
271
272         /* Check if TX pause frame is already enabled or not */
273         if (fc->tx_pause ^ tx_pause) {
274                 if (roc_model_is_cn96_ax() && data->dev_started) {
275                         /* On Ax, CQ should be in disabled state
276                          * while setting flow control configuration.
277                          */
278                         plt_info("Stop the port=%d for setting flow control",
279                                  data->port_id);
280                         return 0;
281                 }
282
283                 for (i = 0; i < data->nb_rx_queues; i++) {
284                         rc = nix_fc_cq_config_set(dev, i, tx_pause);
285                         if (rc)
286                                 return rc;
287                 }
288         }
289
290         rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
291         if (rc)
292                 return rc;
293
294         fc->rx_pause = rx_pause;
295         fc->tx_pause = tx_pause;
296         fc->mode = fc_conf->mode;
297
298         return rc;
299 }
300
301 int
302 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
303                       const struct rte_flow_ops **ops)
304 {
305         RTE_SET_USED(eth_dev);
306
307         *ops = &cnxk_flow_ops;
308         return 0;
309 }
310
311 int
312 cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
313 {
314         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
315         struct roc_nix *nix = &dev->nix;
316         int rc;
317
318         /* Update mac address at NPC */
319         rc = roc_nix_npc_mac_addr_set(nix, addr->addr_bytes);
320         if (rc)
321                 goto exit;
322
323         /* Update mac address at CGX for PFs only */
324         if (!roc_nix_is_vf_or_sdp(nix)) {
325                 rc = roc_nix_mac_addr_set(nix, addr->addr_bytes);
326                 if (rc) {
327                         /* Rollback to previous mac address */
328                         roc_nix_npc_mac_addr_set(nix, dev->mac_addr);
329                         goto exit;
330                 }
331         }
332
333         /* Update mac address to cnxk ethernet device */
334         rte_memcpy(dev->mac_addr, addr->addr_bytes, RTE_ETHER_ADDR_LEN);
335
336 exit:
337         return rc;
338 }
339
340 int
341 cnxk_nix_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr,
342                       uint32_t index, uint32_t pool)
343 {
344         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
345         struct roc_nix *nix = &dev->nix;
346         int rc;
347
348         PLT_SET_USED(index);
349         PLT_SET_USED(pool);
350
351         rc = roc_nix_mac_addr_add(nix, addr->addr_bytes);
352         if (rc < 0) {
353                 plt_err("Failed to add mac address, rc=%d", rc);
354                 return rc;
355         }
356
357         /* Enable promiscuous mode at NIX level */
358         roc_nix_npc_promisc_ena_dis(nix, true);
359         dev->dmac_filter_enable = true;
360         eth_dev->data->promiscuous = false;
361         dev->dmac_filter_count++;
362
363         return 0;
364 }
365
366 void
367 cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index)
368 {
369         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
370         struct roc_nix *nix = &dev->nix;
371         int rc;
372
373         rc = roc_nix_mac_addr_del(nix, index);
374         if (rc)
375                 plt_err("Failed to delete mac address, rc=%d", rc);
376
377         dev->dmac_filter_count--;
378 }
379
380 int
381 cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
382 {
383         uint32_t old_frame_size, frame_size = mtu + CNXK_NIX_L2_OVERHEAD;
384         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
385         struct rte_eth_dev_data *data = eth_dev->data;
386         struct roc_nix *nix = &dev->nix;
387         int rc = -EINVAL;
388         uint32_t buffsz;
389
390         frame_size += CNXK_NIX_TIMESYNC_RX_OFFSET * dev->ptp_en;
391
392         /* Check if MTU is within the allowed range */
393         if ((frame_size - RTE_ETHER_CRC_LEN) < NIX_MIN_HW_FRS) {
394                 plt_err("MTU is lesser than minimum");
395                 goto exit;
396         }
397
398         if ((frame_size - RTE_ETHER_CRC_LEN) >
399             ((uint32_t)roc_nix_max_pkt_len(nix))) {
400                 plt_err("MTU is greater than maximum");
401                 goto exit;
402         }
403
404         buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
405         old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD;
406
407         /* Refuse MTU that requires the support of scattered packets
408          * when this feature has not been enabled before.
409          */
410         if (data->dev_started && frame_size > buffsz &&
411             !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
412                 plt_err("Scatter offload is not enabled for mtu");
413                 goto exit;
414         }
415
416         /* Check <seg size> * <max_seg>  >= max_frame */
417         if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) &&
418             frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
419                 plt_err("Greater than maximum supported packet length");
420                 goto exit;
421         }
422
423         frame_size -= RTE_ETHER_CRC_LEN;
424
425         /* Update mtu on Tx */
426         rc = roc_nix_mac_mtu_set(nix, frame_size);
427         if (rc) {
428                 plt_err("Failed to set MTU, rc=%d", rc);
429                 goto exit;
430         }
431
432         /* Sync same frame size on Rx */
433         rc = roc_nix_mac_max_rx_len_set(nix, frame_size);
434         if (rc) {
435                 /* Rollback to older mtu */
436                 roc_nix_mac_mtu_set(nix,
437                                     old_frame_size - RTE_ETHER_CRC_LEN);
438                 plt_err("Failed to max Rx frame length, rc=%d", rc);
439                 goto exit;
440         }
441 exit:
442         return rc;
443 }
444
445 int
446 cnxk_nix_promisc_enable(struct rte_eth_dev *eth_dev)
447 {
448         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
449         struct roc_nix *nix = &dev->nix;
450         int rc = 0;
451
452         if (roc_nix_is_vf_or_sdp(nix))
453                 return rc;
454
455         rc = roc_nix_npc_promisc_ena_dis(nix, true);
456         if (rc) {
457                 plt_err("Failed to setup promisc mode in npc, rc=%d(%s)", rc,
458                         roc_error_msg_get(rc));
459                 return rc;
460         }
461
462         rc = roc_nix_mac_promisc_mode_enable(nix, true);
463         if (rc) {
464                 plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc,
465                         roc_error_msg_get(rc));
466                 roc_nix_npc_promisc_ena_dis(nix, false);
467                 return rc;
468         }
469
470         return 0;
471 }
472
473 int
474 cnxk_nix_promisc_disable(struct rte_eth_dev *eth_dev)
475 {
476         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
477         struct roc_nix *nix = &dev->nix;
478         int rc = 0;
479
480         if (roc_nix_is_vf_or_sdp(nix))
481                 return rc;
482
483         rc = roc_nix_npc_promisc_ena_dis(nix, dev->dmac_filter_enable);
484         if (rc) {
485                 plt_err("Failed to setup promisc mode in npc, rc=%d(%s)", rc,
486                         roc_error_msg_get(rc));
487                 return rc;
488         }
489
490         rc = roc_nix_mac_promisc_mode_enable(nix, false);
491         if (rc) {
492                 plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc,
493                         roc_error_msg_get(rc));
494                 roc_nix_npc_promisc_ena_dis(nix, !dev->dmac_filter_enable);
495                 return rc;
496         }
497
498         dev->dmac_filter_enable = false;
499         return 0;
500 }
501
502 int
503 cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev)
504 {
505         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
506
507         return roc_nix_npc_mcast_config(&dev->nix, true, false);
508 }
509
510 int
511 cnxk_nix_allmulticast_disable(struct rte_eth_dev *eth_dev)
512 {
513         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
514
515         return roc_nix_npc_mcast_config(&dev->nix, false,
516                                         eth_dev->data->promiscuous);
517 }
518
519 int
520 cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev)
521 {
522         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
523         struct roc_nix *nix = &dev->nix;
524         int rc, i;
525
526         if (roc_nix_is_vf_or_sdp(nix))
527                 return -ENOTSUP;
528
529         rc = roc_nix_mac_link_state_set(nix, true);
530         if (rc)
531                 goto exit;
532
533         /* Start tx queues  */
534         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
535                 rc = cnxk_nix_tx_queue_start(eth_dev, i);
536                 if (rc)
537                         goto exit;
538         }
539
540 exit:
541         return rc;
542 }
543
544 int
545 cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev)
546 {
547         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
548         struct roc_nix *nix = &dev->nix;
549         int rc, i;
550
551         if (roc_nix_is_vf_or_sdp(nix))
552                 return -ENOTSUP;
553
554         /* Stop tx queues  */
555         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
556                 rc = cnxk_nix_tx_queue_stop(eth_dev, i);
557                 if (rc)
558                         goto exit;
559         }
560
561         rc = roc_nix_mac_link_state_set(nix, false);
562 exit:
563         return rc;
564 }
565
566 int
567 cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
568                          struct rte_eth_dev_module_info *modinfo)
569 {
570         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
571         struct roc_nix_eeprom_info eeprom_info = {0};
572         struct roc_nix *nix = &dev->nix;
573         int rc;
574
575         rc = roc_nix_eeprom_info_get(nix, &eeprom_info);
576         if (rc)
577                 return rc;
578
579         modinfo->type = eeprom_info.sff_id;
580         modinfo->eeprom_len = ROC_NIX_EEPROM_SIZE;
581         return 0;
582 }
583
584 int
585 cnxk_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
586                            struct rte_dev_eeprom_info *info)
587 {
588         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
589         struct roc_nix_eeprom_info eeprom_info = {0};
590         struct roc_nix *nix = &dev->nix;
591         int rc = -EINVAL;
592
593         if (!info->data || !info->length ||
594             (info->offset + info->length > ROC_NIX_EEPROM_SIZE))
595                 return rc;
596
597         rc = roc_nix_eeprom_info_get(nix, &eeprom_info);
598         if (rc)
599                 return rc;
600
601         rte_memcpy(info->data, eeprom_info.buf + info->offset, info->length);
602         return 0;
603 }
604
605 int
606 cnxk_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
607 {
608         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
609
610         roc_nix_rx_queue_intr_enable(&dev->nix, rx_queue_id);
611         return 0;
612 }
613
614 int
615 cnxk_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
616                                uint16_t rx_queue_id)
617 {
618         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
619
620         roc_nix_rx_queue_intr_disable(&dev->nix, rx_queue_id);
621         return 0;
622 }
623
624 int
625 cnxk_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool)
626 {
627         RTE_SET_USED(eth_dev);
628
629         if (!strcmp(pool, rte_mbuf_platform_mempool_ops()))
630                 return 0;
631
632         return -ENOTSUP;
633 }
634
635 int
636 cnxk_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
637                         size_t fw_size)
638 {
639         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
640         const char *str = roc_npc_profile_name_get(&dev->npc);
641         uint32_t size = strlen(str) + 1;
642
643         if (fw_size > size)
644                 fw_size = size;
645
646         rte_strlcpy(fw_version, str, fw_size);
647
648         if (fw_size < size)
649                 return size;
650
651         return 0;
652 }
653
654 void
655 cnxk_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
656                       struct rte_eth_rxq_info *qinfo)
657 {
658         void *rxq = eth_dev->data->rx_queues[qid];
659         struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
660
661         memset(qinfo, 0, sizeof(*qinfo));
662
663         qinfo->mp = rxq_sp->qconf.mp;
664         qinfo->scattered_rx = eth_dev->data->scattered_rx;
665         qinfo->nb_desc = rxq_sp->qconf.nb_desc;
666
667         memcpy(&qinfo->conf, &rxq_sp->qconf.conf.rx, sizeof(qinfo->conf));
668 }
669
670 void
671 cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
672                       struct rte_eth_txq_info *qinfo)
673 {
674         void *txq = eth_dev->data->tx_queues[qid];
675         struct cnxk_eth_txq_sp *txq_sp = cnxk_eth_txq_to_sp(txq);
676
677         memset(qinfo, 0, sizeof(*qinfo));
678
679         qinfo->nb_desc = txq_sp->qconf.nb_desc;
680
681         memcpy(&qinfo->conf, &txq_sp->qconf.conf.tx, sizeof(qinfo->conf));
682 }
683
684 /* It is a NOP for cnxk as HW frees the buffer on xmit */
685 int
686 cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
687 {
688         RTE_SET_USED(txq);
689         RTE_SET_USED(free_cnt);
690
691         return 0;
692 }
693
694 int
695 cnxk_nix_dev_get_reg(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
696 {
697         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
698         struct roc_nix *nix = &dev->nix;
699         uint64_t *data = regs->data;
700         int rc = -ENOTSUP;
701
702         if (data == NULL) {
703                 rc = roc_nix_lf_get_reg_count(nix);
704                 if (rc > 0) {
705                         regs->length = rc;
706                         regs->width = 8;
707                         rc = 0;
708                 }
709                 return rc;
710         }
711
712         if (!regs->length ||
713             regs->length == (uint32_t)roc_nix_lf_get_reg_count(nix))
714                 return roc_nix_lf_reg_dump(nix, data);
715
716         return rc;
717 }
718
719 int
720 cnxk_nix_reta_update(struct rte_eth_dev *eth_dev,
721                      struct rte_eth_rss_reta_entry64 *reta_conf,
722                      uint16_t reta_size)
723 {
724         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
725         uint16_t reta[ROC_NIX_RSS_RETA_MAX];
726         struct roc_nix *nix = &dev->nix;
727         int i, j, rc = -EINVAL, idx = 0;
728
729         if (reta_size != dev->nix.reta_sz) {
730                 plt_err("Size of hash lookup table configured (%d) does not "
731                         "match the number hardware can supported (%d)",
732                         reta_size, dev->nix.reta_sz);
733                 goto fail;
734         }
735
736         /* Copy RETA table */
737         for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
738                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
739                         if ((reta_conf[i].mask >> j) & 0x01)
740                                 reta[idx] = reta_conf[i].reta[j];
741                         idx++;
742                 }
743         }
744
745         return roc_nix_rss_reta_set(nix, 0, reta);
746
747 fail:
748         return rc;
749 }
750
751 int
752 cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
753                     struct rte_eth_rss_reta_entry64 *reta_conf,
754                     uint16_t reta_size)
755 {
756         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
757         uint16_t reta[ROC_NIX_RSS_RETA_MAX];
758         struct roc_nix *nix = &dev->nix;
759         int rc = -EINVAL, i, j, idx = 0;
760
761         if (reta_size != dev->nix.reta_sz) {
762                 plt_err("Size of hash lookup table configured (%d) does not "
763                         "match the number hardware can supported (%d)",
764                         reta_size, dev->nix.reta_sz);
765                 goto fail;
766         }
767
768         rc = roc_nix_rss_reta_get(nix, 0, reta);
769         if (rc)
770                 goto fail;
771
772         /* Copy RETA table */
773         for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
774                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
775                         if ((reta_conf[i].mask >> j) & 0x01)
776                                 reta_conf[i].reta[j] = reta[idx];
777                         idx++;
778                 }
779         }
780
781         return 0;
782
783 fail:
784         return rc;
785 }
786
787 int
788 cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
789                          struct rte_eth_rss_conf *rss_conf)
790 {
791         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
792         struct roc_nix *nix = &dev->nix;
793         uint8_t rss_hash_level;
794         uint32_t flowkey_cfg;
795         int rc = -EINVAL;
796         uint8_t alg_idx;
797
798         if (rss_conf->rss_key && rss_conf->rss_key_len != ROC_NIX_RSS_KEY_LEN) {
799                 plt_err("Hash key size mismatch %d vs %d",
800                         rss_conf->rss_key_len, ROC_NIX_RSS_KEY_LEN);
801                 goto fail;
802         }
803
804         if (rss_conf->rss_key)
805                 roc_nix_rss_key_set(nix, rss_conf->rss_key);
806
807         rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
808         if (rss_hash_level)
809                 rss_hash_level -= 1;
810         flowkey_cfg =
811                 cnxk_rss_ethdev_to_nix(dev, rss_conf->rss_hf, rss_hash_level);
812
813         rc = roc_nix_rss_flowkey_set(nix, &alg_idx, flowkey_cfg,
814                                      ROC_NIX_RSS_GROUP_DEFAULT,
815                                      ROC_NIX_RSS_MCAM_IDX_DEFAULT);
816         if (rc) {
817                 plt_err("Failed to set RSS hash function rc=%d", rc);
818                 return rc;
819         }
820
821 fail:
822         return rc;
823 }
824
825 int
826 cnxk_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
827                            struct rte_eth_rss_conf *rss_conf)
828 {
829         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
830
831         if (rss_conf->rss_key)
832                 roc_nix_rss_key_get(&dev->nix, rss_conf->rss_key);
833
834         rss_conf->rss_key_len = ROC_NIX_RSS_KEY_LEN;
835         rss_conf->rss_hf = dev->ethdev_rss_hf;
836
837         return 0;
838 }
839
840 int
841 cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
842                                 struct rte_ether_addr *mc_addr_set,
843                                 uint32_t nb_mc_addr)
844 {
845         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
846         struct rte_eth_dev_data *data = eth_dev->data;
847         struct rte_ether_addr null_mac_addr;
848         struct roc_nix *nix = &dev->nix;
849         int rc, index;
850         uint32_t i;
851
852         memset(&null_mac_addr, 0, sizeof(null_mac_addr));
853
854         /* All configured multicast filters should be flushed first */
855         for (i = 0; i < dev->max_mac_entries; i++) {
856                 if (rte_is_multicast_ether_addr(&data->mac_addrs[i])) {
857                         rc = roc_nix_mac_addr_del(nix, i);
858                         if (rc) {
859                                 plt_err("Failed to flush mcast address, rc=%d",
860                                         rc);
861                                 return rc;
862                         }
863
864                         dev->dmac_filter_count--;
865                         /* Update address in NIC data structure */
866                         rte_ether_addr_copy(&null_mac_addr,
867                                             &data->mac_addrs[i]);
868                 }
869         }
870
871         if (!mc_addr_set || !nb_mc_addr)
872                 return 0;
873
874         /* Check for available space */
875         if (nb_mc_addr >
876             ((uint32_t)(dev->max_mac_entries - dev->dmac_filter_count))) {
877                 plt_err("No space is available to add multicast filters");
878                 return -ENOSPC;
879         }
880
881         /* Multicast addresses are to be installed */
882         for (i = 0; i < nb_mc_addr; i++) {
883                 index = roc_nix_mac_addr_add(nix, mc_addr_set[i].addr_bytes);
884                 if (index < 0) {
885                         plt_err("Failed to add mcast mac address, rc=%d",
886                                 index);
887                         return index;
888                 }
889
890                 dev->dmac_filter_count++;
891                 /* Update address in NIC data structure */
892                 rte_ether_addr_copy(&mc_addr_set[i], &data->mac_addrs[index]);
893         }
894
895         roc_nix_npc_promisc_ena_dis(nix, true);
896         dev->dmac_filter_enable = true;
897         eth_dev->data->promiscuous = false;
898
899         return 0;
900 }