net/cnxk: get PTP status
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <cnxk_ethdev.h>
6
7 int
8 cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
9 {
10         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
11         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
12         int max_rx_pktlen;
13
14         max_rx_pktlen = (roc_nix_max_pkt_len(&dev->nix) + RTE_ETHER_CRC_LEN -
15                          CNXK_NIX_MAX_VTAG_ACT_SIZE);
16
17         devinfo->min_rx_bufsize = NIX_MIN_HW_FRS + RTE_ETHER_CRC_LEN;
18         devinfo->max_rx_pktlen = max_rx_pktlen;
19         devinfo->max_rx_queues = RTE_MAX_QUEUES_PER_PORT;
20         devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
21         devinfo->max_mac_addrs = dev->max_mac_entries;
22         devinfo->max_vfs = pci_dev->max_vfs;
23         devinfo->max_mtu = devinfo->max_rx_pktlen -
24                                 (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
25         devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD;
26
27         devinfo->rx_offload_capa = dev->rx_offload_capa;
28         devinfo->tx_offload_capa = dev->tx_offload_capa;
29         devinfo->rx_queue_offload_capa = 0;
30         devinfo->tx_queue_offload_capa = 0;
31
32         devinfo->reta_size = dev->nix.reta_sz;
33         devinfo->hash_key_size = ROC_NIX_RSS_KEY_LEN;
34         devinfo->flow_type_rss_offloads = CNXK_NIX_RSS_OFFLOAD;
35
36         devinfo->default_rxconf = (struct rte_eth_rxconf){
37                 .rx_drop_en = 0,
38                 .offloads = 0,
39         };
40
41         devinfo->default_txconf = (struct rte_eth_txconf){
42                 .offloads = 0,
43         };
44
45         devinfo->default_rxportconf = (struct rte_eth_dev_portconf){
46                 .ring_size = CNXK_NIX_RX_DEFAULT_RING_SZ,
47         };
48
49         devinfo->rx_desc_lim = (struct rte_eth_desc_lim){
50                 .nb_max = UINT16_MAX,
51                 .nb_min = CNXK_NIX_RX_MIN_DESC,
52                 .nb_align = CNXK_NIX_RX_MIN_DESC_ALIGN,
53                 .nb_seg_max = CNXK_NIX_RX_NB_SEG_MAX,
54                 .nb_mtu_seg_max = CNXK_NIX_RX_NB_SEG_MAX,
55         };
56         devinfo->rx_desc_lim.nb_max =
57                 RTE_ALIGN_MUL_FLOOR(devinfo->rx_desc_lim.nb_max,
58                                     CNXK_NIX_RX_MIN_DESC_ALIGN);
59
60         devinfo->tx_desc_lim = (struct rte_eth_desc_lim){
61                 .nb_max = UINT16_MAX,
62                 .nb_min = 1,
63                 .nb_align = 1,
64                 .nb_seg_max = CNXK_NIX_TX_NB_SEG_MAX,
65                 .nb_mtu_seg_max = CNXK_NIX_TX_NB_SEG_MAX,
66         };
67
68         devinfo->speed_capa = dev->speed_capa;
69         devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
70                             RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
71         return 0;
72 }
73
74 int
75 cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
76                            struct rte_eth_burst_mode *mode)
77 {
78         ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
79         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
80         const struct burst_info {
81                 uint64_t flags;
82                 const char *output;
83         } rx_offload_map[] = {
84                 {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
85                 {DEV_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
86                 {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
87                 {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
88                 {DEV_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
89                 {DEV_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
90                 {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
91                 {DEV_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
92                 {DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
93                 {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
94                 {DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
95                 {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo Frame,"},
96                 {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
97                 {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
98                 {DEV_RX_OFFLOAD_SECURITY, " Security,"},
99                 {DEV_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
100                 {DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
101                 {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
102                 {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
103         };
104         static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
105                                                  "Scalar, Rx Offloads:"
106         };
107         uint32_t i;
108
109         PLT_SET_USED(queue_id);
110
111         /* Update burst mode info */
112         rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
113                          str_size - bytes);
114         if (rc < 0)
115                 goto done;
116
117         bytes += rc;
118
119         /* Update Rx offload info */
120         for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
121                 if (dev->rx_offloads & rx_offload_map[i].flags) {
122                         rc = rte_strscpy(mode->info + bytes,
123                                          rx_offload_map[i].output,
124                                          str_size - bytes);
125                         if (rc < 0)
126                                 goto done;
127
128                         bytes += rc;
129                 }
130         }
131
132 done:
133         return 0;
134 }
135
136 int
137 cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
138                            struct rte_eth_burst_mode *mode)
139 {
140         ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
141         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
142         const struct burst_info {
143                 uint64_t flags;
144                 const char *output;
145         } tx_offload_map[] = {
146                 {DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
147                 {DEV_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
148                 {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
149                 {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
150                 {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
151                 {DEV_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
152                 {DEV_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
153                 {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
154                 {DEV_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
155                 {DEV_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
156                 {DEV_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
157                 {DEV_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
158                 {DEV_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
159                 {DEV_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
160                 {DEV_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
161                 {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
162                 {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
163                 {DEV_TX_OFFLOAD_SECURITY, " Security,"},
164                 {DEV_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
165                 {DEV_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
166                 {DEV_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
167                 {DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
168         };
169         static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
170                                                  "Scalar, Tx Offloads:"
171         };
172         uint32_t i;
173
174         PLT_SET_USED(queue_id);
175
176         /* Update burst mode info */
177         rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
178                          str_size - bytes);
179         if (rc < 0)
180                 goto done;
181
182         bytes += rc;
183
184         /* Update Tx offload info */
185         for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
186                 if (dev->tx_offloads & tx_offload_map[i].flags) {
187                         rc = rte_strscpy(mode->info + bytes,
188                                          tx_offload_map[i].output,
189                                          str_size - bytes);
190                         if (rc < 0)
191                                 goto done;
192
193                         bytes += rc;
194                 }
195         }
196
197 done:
198         return 0;
199 }
200
201 int
202 cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
203                        struct rte_eth_fc_conf *fc_conf)
204 {
205         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
206         enum rte_eth_fc_mode mode_map[] = {
207                                            RTE_FC_NONE, RTE_FC_RX_PAUSE,
208                                            RTE_FC_TX_PAUSE, RTE_FC_FULL
209                                           };
210         struct roc_nix *nix = &dev->nix;
211         int mode;
212
213         mode = roc_nix_fc_mode_get(nix);
214         if (mode < 0)
215                 return mode;
216
217         memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
218         fc_conf->mode = mode_map[mode];
219         return 0;
220 }
221
222 static int
223 nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
224 {
225         struct roc_nix *nix = &dev->nix;
226         struct roc_nix_fc_cfg fc_cfg;
227         struct roc_nix_cq *cq;
228
229         memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
230         cq = &dev->cqs[qid];
231         fc_cfg.cq_cfg_valid = true;
232         fc_cfg.cq_cfg.enable = enable;
233         fc_cfg.cq_cfg.rq = qid;
234         fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
235
236         return roc_nix_fc_config_set(nix, &fc_cfg);
237 }
238
239 int
240 cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
241                        struct rte_eth_fc_conf *fc_conf)
242 {
243         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
244         enum roc_nix_fc_mode mode_map[] = {
245                                            ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
246                                            ROC_NIX_FC_TX, ROC_NIX_FC_FULL
247                                           };
248         struct rte_eth_dev_data *data = eth_dev->data;
249         struct cnxk_fc_cfg *fc = &dev->fc_cfg;
250         struct roc_nix *nix = &dev->nix;
251         uint8_t rx_pause, tx_pause;
252         int rc, i;
253
254         if (roc_nix_is_vf_or_sdp(nix)) {
255                 plt_err("Flow control configuration is not allowed on VFs");
256                 return -ENOTSUP;
257         }
258
259         if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time ||
260             fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) {
261                 plt_info("Only MODE configuration is supported");
262                 return -EINVAL;
263         }
264
265         if (fc_conf->mode == fc->mode)
266                 return 0;
267
268         rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
269                     (fc_conf->mode == RTE_FC_RX_PAUSE);
270         tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
271                     (fc_conf->mode == RTE_FC_TX_PAUSE);
272
273         /* Check if TX pause frame is already enabled or not */
274         if (fc->tx_pause ^ tx_pause) {
275                 if (roc_model_is_cn96_ax() && data->dev_started) {
276                         /* On Ax, CQ should be in disabled state
277                          * while setting flow control configuration.
278                          */
279                         plt_info("Stop the port=%d for setting flow control",
280                                  data->port_id);
281                         return 0;
282                 }
283
284                 for (i = 0; i < data->nb_rx_queues; i++) {
285                         rc = nix_fc_cq_config_set(dev, i, tx_pause);
286                         if (rc)
287                                 return rc;
288                 }
289         }
290
291         rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
292         if (rc)
293                 return rc;
294
295         fc->rx_pause = rx_pause;
296         fc->tx_pause = tx_pause;
297         fc->mode = fc_conf->mode;
298
299         return rc;
300 }
301
302 int
303 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
304                       const struct rte_flow_ops **ops)
305 {
306         RTE_SET_USED(eth_dev);
307
308         *ops = &cnxk_flow_ops;
309         return 0;
310 }
311
312 int
313 cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
314 {
315         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
316         struct roc_nix *nix = &dev->nix;
317         int rc;
318
319         /* Update mac address at NPC */
320         rc = roc_nix_npc_mac_addr_set(nix, addr->addr_bytes);
321         if (rc)
322                 goto exit;
323
324         /* Update mac address at CGX for PFs only */
325         if (!roc_nix_is_vf_or_sdp(nix)) {
326                 rc = roc_nix_mac_addr_set(nix, addr->addr_bytes);
327                 if (rc) {
328                         /* Rollback to previous mac address */
329                         roc_nix_npc_mac_addr_set(nix, dev->mac_addr);
330                         goto exit;
331                 }
332         }
333
334         /* Update mac address to cnxk ethernet device */
335         rte_memcpy(dev->mac_addr, addr->addr_bytes, RTE_ETHER_ADDR_LEN);
336
337 exit:
338         return rc;
339 }
340
341 int
342 cnxk_nix_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr,
343                       uint32_t index, uint32_t pool)
344 {
345         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
346         struct roc_nix *nix = &dev->nix;
347         int rc;
348
349         PLT_SET_USED(index);
350         PLT_SET_USED(pool);
351
352         rc = roc_nix_mac_addr_add(nix, addr->addr_bytes);
353         if (rc < 0) {
354                 plt_err("Failed to add mac address, rc=%d", rc);
355                 return rc;
356         }
357
358         /* Enable promiscuous mode at NIX level */
359         roc_nix_npc_promisc_ena_dis(nix, true);
360         dev->dmac_filter_enable = true;
361         eth_dev->data->promiscuous = false;
362
363         return 0;
364 }
365
366 void
367 cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index)
368 {
369         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
370         struct roc_nix *nix = &dev->nix;
371         int rc;
372
373         rc = roc_nix_mac_addr_del(nix, index);
374         if (rc)
375                 plt_err("Failed to delete mac address, rc=%d", rc);
376 }
377
378 int
379 cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
380 {
381         uint32_t old_frame_size, frame_size = mtu + CNXK_NIX_L2_OVERHEAD;
382         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
383         struct rte_eth_dev_data *data = eth_dev->data;
384         struct roc_nix *nix = &dev->nix;
385         int rc = -EINVAL;
386         uint32_t buffsz;
387
388         frame_size += CNXK_NIX_TIMESYNC_RX_OFFSET * dev->ptp_en;
389
390         /* Check if MTU is within the allowed range */
391         if ((frame_size - RTE_ETHER_CRC_LEN) < NIX_MIN_HW_FRS) {
392                 plt_err("MTU is lesser than minimum");
393                 goto exit;
394         }
395
396         if ((frame_size - RTE_ETHER_CRC_LEN) >
397             ((uint32_t)roc_nix_max_pkt_len(nix))) {
398                 plt_err("MTU is greater than maximum");
399                 goto exit;
400         }
401
402         buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
403         old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD;
404
405         /* Refuse MTU that requires the support of scattered packets
406          * when this feature has not been enabled before.
407          */
408         if (data->dev_started && frame_size > buffsz &&
409             !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
410                 plt_err("Scatter offload is not enabled for mtu");
411                 goto exit;
412         }
413
414         /* Check <seg size> * <max_seg>  >= max_frame */
415         if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) &&
416             frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
417                 plt_err("Greater than maximum supported packet length");
418                 goto exit;
419         }
420
421         frame_size -= RTE_ETHER_CRC_LEN;
422
423         /* Update mtu on Tx */
424         rc = roc_nix_mac_mtu_set(nix, frame_size);
425         if (rc) {
426                 plt_err("Failed to set MTU, rc=%d", rc);
427                 goto exit;
428         }
429
430         /* Sync same frame size on Rx */
431         rc = roc_nix_mac_max_rx_len_set(nix, frame_size);
432         if (rc) {
433                 /* Rollback to older mtu */
434                 roc_nix_mac_mtu_set(nix,
435                                     old_frame_size - RTE_ETHER_CRC_LEN);
436                 plt_err("Failed to max Rx frame length, rc=%d", rc);
437                 goto exit;
438         }
439
440         frame_size += RTE_ETHER_CRC_LEN;
441
442         if (frame_size > RTE_ETHER_MAX_LEN)
443                 dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
444         else
445                 dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
446
447         /* Update max_rx_pkt_len */
448         data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
449
450 exit:
451         return rc;
452 }
453
454 int
455 cnxk_nix_promisc_enable(struct rte_eth_dev *eth_dev)
456 {
457         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
458         struct roc_nix *nix = &dev->nix;
459         int rc = 0;
460
461         if (roc_nix_is_vf_or_sdp(nix))
462                 return rc;
463
464         rc = roc_nix_npc_promisc_ena_dis(nix, true);
465         if (rc) {
466                 plt_err("Failed to setup promisc mode in npc, rc=%d(%s)", rc,
467                         roc_error_msg_get(rc));
468                 return rc;
469         }
470
471         rc = roc_nix_mac_promisc_mode_enable(nix, true);
472         if (rc) {
473                 plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc,
474                         roc_error_msg_get(rc));
475                 roc_nix_npc_promisc_ena_dis(nix, false);
476                 return rc;
477         }
478
479         return 0;
480 }
481
482 int
483 cnxk_nix_promisc_disable(struct rte_eth_dev *eth_dev)
484 {
485         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
486         struct roc_nix *nix = &dev->nix;
487         int rc = 0;
488
489         if (roc_nix_is_vf_or_sdp(nix))
490                 return rc;
491
492         rc = roc_nix_npc_promisc_ena_dis(nix, dev->dmac_filter_enable);
493         if (rc) {
494                 plt_err("Failed to setup promisc mode in npc, rc=%d(%s)", rc,
495                         roc_error_msg_get(rc));
496                 return rc;
497         }
498
499         rc = roc_nix_mac_promisc_mode_enable(nix, false);
500         if (rc) {
501                 plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc,
502                         roc_error_msg_get(rc));
503                 roc_nix_npc_promisc_ena_dis(nix, !dev->dmac_filter_enable);
504                 return rc;
505         }
506
507         dev->dmac_filter_enable = false;
508         return 0;
509 }
510
511 int
512 cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev)
513 {
514         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
515
516         return roc_nix_npc_mcast_config(&dev->nix, true, false);
517 }
518
519 int
520 cnxk_nix_allmulticast_disable(struct rte_eth_dev *eth_dev)
521 {
522         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
523
524         return roc_nix_npc_mcast_config(&dev->nix, false,
525                                         eth_dev->data->promiscuous);
526 }
527
528 int
529 cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev)
530 {
531         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
532         struct roc_nix *nix = &dev->nix;
533         int rc, i;
534
535         if (roc_nix_is_vf_or_sdp(nix))
536                 return -ENOTSUP;
537
538         rc = roc_nix_mac_link_state_set(nix, true);
539         if (rc)
540                 goto exit;
541
542         /* Start tx queues  */
543         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
544                 rc = cnxk_nix_tx_queue_start(eth_dev, i);
545                 if (rc)
546                         goto exit;
547         }
548
549 exit:
550         return rc;
551 }
552
553 int
554 cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev)
555 {
556         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
557         struct roc_nix *nix = &dev->nix;
558         int rc, i;
559
560         if (roc_nix_is_vf_or_sdp(nix))
561                 return -ENOTSUP;
562
563         /* Stop tx queues  */
564         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
565                 rc = cnxk_nix_tx_queue_stop(eth_dev, i);
566                 if (rc)
567                         goto exit;
568         }
569
570         rc = roc_nix_mac_link_state_set(nix, false);
571 exit:
572         return rc;
573 }
574
575 int
576 cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
577                          struct rte_eth_dev_module_info *modinfo)
578 {
579         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
580         struct roc_nix_eeprom_info eeprom_info = {0};
581         struct roc_nix *nix = &dev->nix;
582         int rc;
583
584         rc = roc_nix_eeprom_info_get(nix, &eeprom_info);
585         if (rc)
586                 return rc;
587
588         modinfo->type = eeprom_info.sff_id;
589         modinfo->eeprom_len = ROC_NIX_EEPROM_SIZE;
590         return 0;
591 }
592
593 int
594 cnxk_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
595                            struct rte_dev_eeprom_info *info)
596 {
597         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
598         struct roc_nix_eeprom_info eeprom_info = {0};
599         struct roc_nix *nix = &dev->nix;
600         int rc = -EINVAL;
601
602         if (!info->data || !info->length ||
603             (info->offset + info->length > ROC_NIX_EEPROM_SIZE))
604                 return rc;
605
606         rc = roc_nix_eeprom_info_get(nix, &eeprom_info);
607         if (rc)
608                 return rc;
609
610         rte_memcpy(info->data, eeprom_info.buf + info->offset, info->length);
611         return 0;
612 }
613
614 int
615 cnxk_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
616 {
617         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
618
619         roc_nix_rx_queue_intr_enable(&dev->nix, rx_queue_id);
620         return 0;
621 }
622
623 int
624 cnxk_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
625                                uint16_t rx_queue_id)
626 {
627         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
628
629         roc_nix_rx_queue_intr_disable(&dev->nix, rx_queue_id);
630         return 0;
631 }
632
633 int
634 cnxk_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool)
635 {
636         RTE_SET_USED(eth_dev);
637
638         if (!strcmp(pool, rte_mbuf_platform_mempool_ops()))
639                 return 0;
640
641         return -ENOTSUP;
642 }
643
644 int
645 cnxk_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
646                         size_t fw_size)
647 {
648         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
649         const char *str = roc_npc_profile_name_get(&dev->npc);
650         uint32_t size = strlen(str) + 1;
651
652         if (fw_size > size)
653                 fw_size = size;
654
655         rte_strlcpy(fw_version, str, fw_size);
656
657         if (fw_size < size)
658                 return size;
659
660         return 0;
661 }
662
663 void
664 cnxk_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
665                       struct rte_eth_rxq_info *qinfo)
666 {
667         void *rxq = eth_dev->data->rx_queues[qid];
668         struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
669
670         memset(qinfo, 0, sizeof(*qinfo));
671
672         qinfo->mp = rxq_sp->qconf.mp;
673         qinfo->scattered_rx = eth_dev->data->scattered_rx;
674         qinfo->nb_desc = rxq_sp->qconf.nb_desc;
675
676         memcpy(&qinfo->conf, &rxq_sp->qconf.conf.rx, sizeof(qinfo->conf));
677 }
678
679 void
680 cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
681                       struct rte_eth_txq_info *qinfo)
682 {
683         void *txq = eth_dev->data->tx_queues[qid];
684         struct cnxk_eth_txq_sp *txq_sp = cnxk_eth_txq_to_sp(txq);
685
686         memset(qinfo, 0, sizeof(*qinfo));
687
688         qinfo->nb_desc = txq_sp->qconf.nb_desc;
689
690         memcpy(&qinfo->conf, &txq_sp->qconf.conf.tx, sizeof(qinfo->conf));
691 }
692
693 /* It is a NOP for cnxk as HW frees the buffer on xmit */
694 int
695 cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
696 {
697         RTE_SET_USED(txq);
698         RTE_SET_USED(free_cnt);
699
700         return 0;
701 }
702
703 int
704 cnxk_nix_dev_get_reg(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
705 {
706         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
707         struct roc_nix *nix = &dev->nix;
708         uint64_t *data = regs->data;
709         int rc = -ENOTSUP;
710
711         if (data == NULL) {
712                 rc = roc_nix_lf_get_reg_count(nix);
713                 if (rc > 0) {
714                         regs->length = rc;
715                         regs->width = 8;
716                         rc = 0;
717                 }
718                 return rc;
719         }
720
721         if (!regs->length ||
722             regs->length == (uint32_t)roc_nix_lf_get_reg_count(nix))
723                 return roc_nix_lf_reg_dump(nix, data);
724
725         return rc;
726 }