net/cnxk: support priority flow control
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <cnxk_ethdev.h>
6
7 int
8 cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
9 {
10         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
11         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
12         int max_rx_pktlen;
13
14         max_rx_pktlen = (roc_nix_max_pkt_len(&dev->nix) + RTE_ETHER_CRC_LEN -
15                          CNXK_NIX_MAX_VTAG_ACT_SIZE);
16
17         devinfo->min_rx_bufsize = NIX_MIN_HW_FRS + RTE_ETHER_CRC_LEN;
18         devinfo->max_rx_pktlen = max_rx_pktlen;
19         devinfo->max_rx_queues = RTE_MAX_QUEUES_PER_PORT;
20         devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
21         devinfo->max_mac_addrs = dev->max_mac_entries;
22         devinfo->max_vfs = pci_dev->max_vfs;
23         devinfo->max_mtu = devinfo->max_rx_pktlen -
24                                 (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
25         devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD;
26
27         devinfo->rx_offload_capa = dev->rx_offload_capa;
28         devinfo->tx_offload_capa = dev->tx_offload_capa;
29         devinfo->rx_queue_offload_capa = 0;
30         devinfo->tx_queue_offload_capa = 0;
31
32         devinfo->reta_size = dev->nix.reta_sz;
33         devinfo->hash_key_size = ROC_NIX_RSS_KEY_LEN;
34         devinfo->flow_type_rss_offloads = CNXK_NIX_RSS_OFFLOAD;
35
36         devinfo->default_rxconf = (struct rte_eth_rxconf){
37                 .rx_drop_en = 0,
38                 .offloads = 0,
39         };
40
41         devinfo->default_txconf = (struct rte_eth_txconf){
42                 .offloads = 0,
43         };
44
45         devinfo->default_rxportconf = (struct rte_eth_dev_portconf){
46                 .ring_size = CNXK_NIX_RX_DEFAULT_RING_SZ,
47         };
48
49         devinfo->rx_desc_lim = (struct rte_eth_desc_lim){
50                 .nb_max = UINT16_MAX,
51                 .nb_min = CNXK_NIX_RX_MIN_DESC,
52                 .nb_align = CNXK_NIX_RX_MIN_DESC_ALIGN,
53                 .nb_seg_max = CNXK_NIX_RX_NB_SEG_MAX,
54                 .nb_mtu_seg_max = CNXK_NIX_RX_NB_SEG_MAX,
55         };
56         devinfo->rx_desc_lim.nb_max =
57                 RTE_ALIGN_MUL_FLOOR(devinfo->rx_desc_lim.nb_max,
58                                     CNXK_NIX_RX_MIN_DESC_ALIGN);
59
60         devinfo->tx_desc_lim = (struct rte_eth_desc_lim){
61                 .nb_max = UINT16_MAX,
62                 .nb_min = 1,
63                 .nb_align = 1,
64                 .nb_seg_max = CNXK_NIX_TX_NB_SEG_MAX,
65                 .nb_mtu_seg_max = CNXK_NIX_TX_NB_SEG_MAX,
66         };
67
68         devinfo->speed_capa = dev->speed_capa;
69         devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
70                             RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP |
71                             RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
72         return 0;
73 }
74
75 int
76 cnxk_nix_rx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
77                            struct rte_eth_burst_mode *mode)
78 {
79         ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
80         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
81         const struct burst_info {
82                 uint64_t flags;
83                 const char *output;
84         } rx_offload_map[] = {
85                 {RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN Strip,"},
86                 {RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
87                 {RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
88                 {RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
89                 {RTE_ETH_RX_OFFLOAD_TCP_LRO, " TCP LRO,"},
90                 {RTE_ETH_RX_OFFLOAD_QINQ_STRIP, " QinQ VLAN Strip,"},
91                 {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
92                 {RTE_ETH_RX_OFFLOAD_MACSEC_STRIP, " MACsec Strip,"},
93                 {RTE_ETH_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
94                 {RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
95                 {RTE_ETH_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
96                 {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
97                 {RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
98                 {RTE_ETH_RX_OFFLOAD_SECURITY, " Security,"},
99                 {RTE_ETH_RX_OFFLOAD_KEEP_CRC, " Keep CRC,"},
100                 {RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP,"},
101                 {RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
102                 {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
103         };
104         static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
105                                                  "Scalar, Rx Offloads:"
106         };
107         uint32_t i;
108
109         PLT_SET_USED(queue_id);
110
111         /* Update burst mode info */
112         rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
113                          str_size - bytes);
114         if (rc < 0)
115                 goto done;
116
117         bytes += rc;
118
119         /* Update Rx offload info */
120         for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
121                 if (dev->rx_offloads & rx_offload_map[i].flags) {
122                         rc = rte_strscpy(mode->info + bytes,
123                                          rx_offload_map[i].output,
124                                          str_size - bytes);
125                         if (rc < 0)
126                                 goto done;
127
128                         bytes += rc;
129                 }
130         }
131
132 done:
133         return 0;
134 }
135
136 int
137 cnxk_nix_tx_burst_mode_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
138                            struct rte_eth_burst_mode *mode)
139 {
140         ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
141         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
142         const struct burst_info {
143                 uint64_t flags;
144                 const char *output;
145         } tx_offload_map[] = {
146                 {RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
147                 {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " Inner IPv4 Checksum,"},
148                 {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP Checksum,"},
149                 {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP Checksum,"},
150                 {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP Checksum,"},
151                 {RTE_ETH_TX_OFFLOAD_TCP_TSO, " TCP TSO,"},
152                 {RTE_ETH_TX_OFFLOAD_UDP_TSO, " UDP TSO,"},
153                 {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPv4 Checksum,"},
154                 {RTE_ETH_TX_OFFLOAD_QINQ_INSERT, " QinQ VLAN Insert,"},
155                 {RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, " VXLAN Tunnel TSO,"},
156                 {RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO, " GRE Tunnel TSO,"},
157                 {RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO, " IP-in-IP Tunnel TSO,"},
158                 {RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO, " Geneve Tunnel TSO,"},
159                 {RTE_ETH_TX_OFFLOAD_MACSEC_INSERT, " MACsec Insert,"},
160                 {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " Multi Thread Lockless Tx,"},
161                 {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"},
162                 {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " H/W MBUF Free,"},
163                 {RTE_ETH_TX_OFFLOAD_SECURITY, " Security,"},
164                 {RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO, " UDP Tunnel TSO,"},
165                 {RTE_ETH_TX_OFFLOAD_IP_TNL_TSO, " IP Tunnel TSO,"},
166                 {RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP Checksum,"},
167                 {RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP, " Timestamp,"}
168         };
169         static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
170                                                  "Scalar, Tx Offloads:"
171         };
172         uint32_t i;
173
174         PLT_SET_USED(queue_id);
175
176         /* Update burst mode info */
177         rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
178                          str_size - bytes);
179         if (rc < 0)
180                 goto done;
181
182         bytes += rc;
183
184         /* Update Tx offload info */
185         for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
186                 if (dev->tx_offloads & tx_offload_map[i].flags) {
187                         rc = rte_strscpy(mode->info + bytes,
188                                          tx_offload_map[i].output,
189                                          str_size - bytes);
190                         if (rc < 0)
191                                 goto done;
192
193                         bytes += rc;
194                 }
195         }
196
197 done:
198         return 0;
199 }
200
201 int
202 cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
203                        struct rte_eth_fc_conf *fc_conf)
204 {
205         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
206         enum rte_eth_fc_mode mode_map[] = {
207                                            RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE,
208                                            RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL
209                                           };
210         struct roc_nix *nix = &dev->nix;
211         int mode;
212
213         mode = roc_nix_fc_mode_get(nix);
214         if (mode < 0)
215                 return mode;
216
217         memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
218         fc_conf->mode = mode_map[mode];
219         return 0;
220 }
221
222 static int
223 nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
224 {
225         struct roc_nix *nix = &dev->nix;
226         struct roc_nix_fc_cfg fc_cfg;
227         struct roc_nix_cq *cq;
228
229         memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
230         cq = &dev->cqs[qid];
231         fc_cfg.type = ROC_NIX_FC_CQ_CFG;
232         fc_cfg.cq_cfg.enable = enable;
233         /* Map all CQs to last channel */
234         fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
235         fc_cfg.cq_cfg.rq = qid;
236         fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
237
238         return roc_nix_fc_config_set(nix, &fc_cfg);
239 }
240
241 int
242 cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
243                        struct rte_eth_fc_conf *fc_conf)
244 {
245         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
246         enum roc_nix_fc_mode mode_map[] = {
247                                            ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
248                                            ROC_NIX_FC_TX, ROC_NIX_FC_FULL
249                                           };
250         struct rte_eth_dev_data *data = eth_dev->data;
251         struct cnxk_fc_cfg *fc = &dev->fc_cfg;
252         struct roc_nix *nix = &dev->nix;
253         struct cnxk_eth_rxq_sp *rxq;
254         struct cnxk_eth_txq_sp *txq;
255         uint8_t rx_pause, tx_pause;
256         int rc, i;
257
258         if (roc_nix_is_vf_or_sdp(nix) && !roc_nix_is_lbk(nix)) {
259                 plt_err("Flow control configuration is not allowed on VFs");
260                 return -ENOTSUP;
261         }
262
263         if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time ||
264             fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) {
265                 plt_info("Only MODE configuration is supported");
266                 return -EINVAL;
267         }
268
269         if (fc_conf->mode == fc->mode)
270                 return 0;
271
272         rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
273                     (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
274         tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
275                     (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
276
277         /* Check if TX pause frame is already enabled or not */
278         if (fc->tx_pause ^ tx_pause) {
279                 if (roc_model_is_cn96_ax() && data->dev_started) {
280                         /* On Ax, CQ should be in disabled state
281                          * while setting flow control configuration.
282                          */
283                         plt_info("Stop the port=%d for setting flow control",
284                                  data->port_id);
285                         return 0;
286                 }
287
288                 for (i = 0; i < data->nb_rx_queues; i++) {
289                         struct roc_nix_fc_cfg fc_cfg;
290
291                         memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
292                         rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
293                               1;
294                         rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
295                         if (rc)
296                                 return rc;
297                 }
298         }
299
300         /* Check if RX pause frame is enabled or not */
301         if (fc->rx_pause ^ rx_pause) {
302                 for (i = 0; i < data->nb_tx_queues; i++) {
303                         struct roc_nix_fc_cfg fc_cfg;
304
305                         memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
306                         txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[i]) -
307                               1;
308                         fc_cfg.type = ROC_NIX_FC_TM_CFG;
309                         fc_cfg.tm_cfg.sq = txq->qid;
310                         fc_cfg.tm_cfg.enable = !!rx_pause;
311                         rc = roc_nix_fc_config_set(nix, &fc_cfg);
312                         if (rc)
313                                 return rc;
314                 }
315         }
316
317         rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
318         if (rc)
319                 return rc;
320
321         fc->rx_pause = rx_pause;
322         fc->tx_pause = tx_pause;
323         fc->mode = fc_conf->mode;
324
325         return rc;
326 }
327
328 int
329 cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
330                                          struct rte_eth_pfc_queue_info *pfc_info)
331 {
332         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
333
334         pfc_info->tc_max = roc_nix_chan_count_get(&dev->nix);
335         pfc_info->mode_capa = RTE_ETH_FC_FULL;
336         return 0;
337 }
338
339 int
340 cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
341                                          struct rte_eth_pfc_queue_conf *pfc_conf)
342 {
343         struct cnxk_pfc_cfg conf;
344         int rc;
345
346         memset(&conf, 0, sizeof(struct cnxk_pfc_cfg));
347
348         conf.fc_cfg.mode = pfc_conf->mode;
349
350         conf.pause_time = pfc_conf->tx_pause.pause_time;
351         conf.rx_tc = pfc_conf->tx_pause.tc;
352         conf.rx_qid = pfc_conf->tx_pause.rx_qid;
353
354         conf.tx_tc = pfc_conf->rx_pause.tc;
355         conf.tx_qid = pfc_conf->rx_pause.tx_qid;
356
357         rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
358         if (rc)
359                 return rc;
360
361         return rc;
362 }
363
364 int
365 cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
366                       const struct rte_flow_ops **ops)
367 {
368         RTE_SET_USED(eth_dev);
369
370         *ops = &cnxk_flow_ops;
371         return 0;
372 }
373
374 int
375 cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
376 {
377         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
378         struct roc_nix *nix = &dev->nix;
379         int rc;
380
381         /* Update mac address at NPC */
382         rc = roc_nix_npc_mac_addr_set(nix, addr->addr_bytes);
383         if (rc)
384                 goto exit;
385
386         /* Update mac address at CGX for PFs only */
387         if (!roc_nix_is_vf_or_sdp(nix)) {
388                 rc = roc_nix_mac_addr_set(nix, addr->addr_bytes);
389                 if (rc) {
390                         /* Rollback to previous mac address */
391                         roc_nix_npc_mac_addr_set(nix, dev->mac_addr);
392                         goto exit;
393                 }
394         }
395
396         /* Update mac address to cnxk ethernet device */
397         rte_memcpy(dev->mac_addr, addr->addr_bytes, RTE_ETHER_ADDR_LEN);
398
399 exit:
400         return rc;
401 }
402
403 int
404 cnxk_nix_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr,
405                       uint32_t index, uint32_t pool)
406 {
407         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
408         struct roc_nix *nix = &dev->nix;
409         int rc;
410
411         PLT_SET_USED(index);
412         PLT_SET_USED(pool);
413
414         rc = roc_nix_mac_addr_add(nix, addr->addr_bytes);
415         if (rc < 0) {
416                 plt_err("Failed to add mac address, rc=%d", rc);
417                 return rc;
418         }
419
420         /* Enable promiscuous mode at NIX level */
421         roc_nix_npc_promisc_ena_dis(nix, true);
422         dev->dmac_filter_enable = true;
423         eth_dev->data->promiscuous = false;
424         dev->dmac_filter_count++;
425
426         return 0;
427 }
428
429 void
430 cnxk_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index)
431 {
432         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
433         struct roc_nix *nix = &dev->nix;
434         int rc;
435
436         rc = roc_nix_mac_addr_del(nix, index);
437         if (rc)
438                 plt_err("Failed to delete mac address, rc=%d", rc);
439
440         dev->dmac_filter_count--;
441 }
442
443 int
444 cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
445 {
446         uint32_t old_frame_size, frame_size = mtu + CNXK_NIX_L2_OVERHEAD;
447         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
448         struct rte_eth_dev_data *data = eth_dev->data;
449         struct roc_nix *nix = &dev->nix;
450         int rc = -EINVAL;
451         uint32_t buffsz;
452
453         frame_size += CNXK_NIX_TIMESYNC_RX_OFFSET * dev->ptp_en;
454
455         /* Check if MTU is within the allowed range */
456         if ((frame_size - RTE_ETHER_CRC_LEN) < NIX_MIN_HW_FRS) {
457                 plt_err("MTU is lesser than minimum");
458                 goto exit;
459         }
460
461         if ((frame_size - RTE_ETHER_CRC_LEN) >
462             ((uint32_t)roc_nix_max_pkt_len(nix))) {
463                 plt_err("MTU is greater than maximum");
464                 goto exit;
465         }
466
467         buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
468         old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD;
469
470         /* Refuse MTU that requires the support of scattered packets
471          * when this feature has not been enabled before.
472          */
473         if (data->dev_started && frame_size > buffsz &&
474             !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
475                 plt_err("Scatter offload is not enabled for mtu");
476                 goto exit;
477         }
478
479         /* Check <seg size> * <max_seg>  >= max_frame */
480         if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)     &&
481             frame_size > (buffsz * CNXK_NIX_RX_NB_SEG_MAX)) {
482                 plt_err("Greater than maximum supported packet length");
483                 goto exit;
484         }
485
486         frame_size -= RTE_ETHER_CRC_LEN;
487
488         /* Update mtu on Tx */
489         rc = roc_nix_mac_mtu_set(nix, frame_size);
490         if (rc) {
491                 plt_err("Failed to set MTU, rc=%d", rc);
492                 goto exit;
493         }
494
495         /* Sync same frame size on Rx */
496         rc = roc_nix_mac_max_rx_len_set(nix, frame_size);
497         if (rc) {
498                 /* Rollback to older mtu */
499                 roc_nix_mac_mtu_set(nix,
500                                     old_frame_size - RTE_ETHER_CRC_LEN);
501                 plt_err("Failed to max Rx frame length, rc=%d", rc);
502                 goto exit;
503         }
504 exit:
505         return rc;
506 }
507
508 int
509 cnxk_nix_promisc_enable(struct rte_eth_dev *eth_dev)
510 {
511         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
512         struct roc_nix *nix = &dev->nix;
513         int rc = 0;
514
515         if (roc_nix_is_vf_or_sdp(nix))
516                 return rc;
517
518         rc = roc_nix_npc_promisc_ena_dis(nix, true);
519         if (rc) {
520                 plt_err("Failed to setup promisc mode in npc, rc=%d(%s)", rc,
521                         roc_error_msg_get(rc));
522                 return rc;
523         }
524
525         rc = roc_nix_mac_promisc_mode_enable(nix, true);
526         if (rc) {
527                 plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc,
528                         roc_error_msg_get(rc));
529                 roc_nix_npc_promisc_ena_dis(nix, false);
530                 return rc;
531         }
532
533         return 0;
534 }
535
536 int
537 cnxk_nix_promisc_disable(struct rte_eth_dev *eth_dev)
538 {
539         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
540         struct roc_nix *nix = &dev->nix;
541         int rc = 0;
542
543         if (roc_nix_is_vf_or_sdp(nix))
544                 return rc;
545
546         rc = roc_nix_npc_promisc_ena_dis(nix, dev->dmac_filter_enable);
547         if (rc) {
548                 plt_err("Failed to setup promisc mode in npc, rc=%d(%s)", rc,
549                         roc_error_msg_get(rc));
550                 return rc;
551         }
552
553         rc = roc_nix_mac_promisc_mode_enable(nix, false);
554         if (rc) {
555                 plt_err("Failed to setup promisc mode in mac, rc=%d(%s)", rc,
556                         roc_error_msg_get(rc));
557                 roc_nix_npc_promisc_ena_dis(nix, !dev->dmac_filter_enable);
558                 return rc;
559         }
560
561         dev->dmac_filter_enable = false;
562         return 0;
563 }
564
565 int
566 cnxk_nix_allmulticast_enable(struct rte_eth_dev *eth_dev)
567 {
568         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
569
570         return roc_nix_npc_mcast_config(&dev->nix, true,
571                                         eth_dev->data->promiscuous);
572 }
573
574 int
575 cnxk_nix_allmulticast_disable(struct rte_eth_dev *eth_dev)
576 {
577         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
578
579         return roc_nix_npc_mcast_config(&dev->nix, false,
580                                         eth_dev->data->promiscuous);
581 }
582
583 int
584 cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev)
585 {
586         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
587         struct roc_nix *nix = &dev->nix;
588         int rc, i;
589
590         if (roc_nix_is_vf_or_sdp(nix))
591                 return -ENOTSUP;
592
593         rc = roc_nix_mac_link_state_set(nix, true);
594         if (rc)
595                 goto exit;
596
597         /* Start tx queues  */
598         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
599                 rc = cnxk_nix_tx_queue_start(eth_dev, i);
600                 if (rc)
601                         goto exit;
602         }
603
604 exit:
605         return rc;
606 }
607
608 int
609 cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev)
610 {
611         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
612         struct roc_nix *nix = &dev->nix;
613         int rc, i;
614
615         if (roc_nix_is_vf_or_sdp(nix))
616                 return -ENOTSUP;
617
618         /* Stop tx queues  */
619         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
620                 rc = cnxk_nix_tx_queue_stop(eth_dev, i);
621                 if (rc)
622                         goto exit;
623         }
624
625         rc = roc_nix_mac_link_state_set(nix, false);
626 exit:
627         return rc;
628 }
629
630 int
631 cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
632                          struct rte_eth_dev_module_info *modinfo)
633 {
634         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
635         struct roc_nix_eeprom_info eeprom_info = {0};
636         struct roc_nix *nix = &dev->nix;
637         int rc;
638
639         rc = roc_nix_eeprom_info_get(nix, &eeprom_info);
640         if (rc)
641                 return rc;
642
643         modinfo->type = eeprom_info.sff_id;
644         modinfo->eeprom_len = ROC_NIX_EEPROM_SIZE;
645         return 0;
646 }
647
648 int
649 cnxk_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
650                            struct rte_dev_eeprom_info *info)
651 {
652         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
653         struct roc_nix_eeprom_info eeprom_info = {0};
654         struct roc_nix *nix = &dev->nix;
655         int rc = -EINVAL;
656
657         if (!info->data || !info->length ||
658             (info->offset + info->length > ROC_NIX_EEPROM_SIZE))
659                 return rc;
660
661         rc = roc_nix_eeprom_info_get(nix, &eeprom_info);
662         if (rc)
663                 return rc;
664
665         rte_memcpy(info->data, eeprom_info.buf + info->offset, info->length);
666         return 0;
667 }
668
669 int
670 cnxk_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
671 {
672         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
673
674         roc_nix_rx_queue_intr_enable(&dev->nix, rx_queue_id);
675         return 0;
676 }
677
678 int
679 cnxk_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
680                                uint16_t rx_queue_id)
681 {
682         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
683
684         roc_nix_rx_queue_intr_disable(&dev->nix, rx_queue_id);
685         return 0;
686 }
687
688 int
689 cnxk_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool)
690 {
691         RTE_SET_USED(eth_dev);
692
693         if (!strcmp(pool, rte_mbuf_platform_mempool_ops()))
694                 return 0;
695
696         return -ENOTSUP;
697 }
698
699 int
700 cnxk_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
701                         size_t fw_size)
702 {
703         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
704         const char *str = roc_npc_profile_name_get(&dev->npc);
705         uint32_t size = strlen(str) + 1;
706
707         if (fw_size > size)
708                 fw_size = size;
709
710         rte_strlcpy(fw_version, str, fw_size);
711
712         if (fw_size < size)
713                 return size;
714
715         return 0;
716 }
717
718 void
719 cnxk_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
720                       struct rte_eth_rxq_info *qinfo)
721 {
722         void *rxq = eth_dev->data->rx_queues[qid];
723         struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
724
725         memset(qinfo, 0, sizeof(*qinfo));
726
727         qinfo->mp = rxq_sp->qconf.mp;
728         qinfo->scattered_rx = eth_dev->data->scattered_rx;
729         qinfo->nb_desc = rxq_sp->qconf.nb_desc;
730
731         memcpy(&qinfo->conf, &rxq_sp->qconf.conf.rx, sizeof(qinfo->conf));
732 }
733
734 void
735 cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
736                       struct rte_eth_txq_info *qinfo)
737 {
738         void *txq = eth_dev->data->tx_queues[qid];
739         struct cnxk_eth_txq_sp *txq_sp = cnxk_eth_txq_to_sp(txq);
740
741         memset(qinfo, 0, sizeof(*qinfo));
742
743         qinfo->nb_desc = txq_sp->qconf.nb_desc;
744
745         memcpy(&qinfo->conf, &txq_sp->qconf.conf.tx, sizeof(qinfo->conf));
746 }
747
748 uint32_t
749 cnxk_nix_rx_queue_count(void *rxq)
750 {
751         struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
752         struct roc_nix *nix = &rxq_sp->dev->nix;
753         uint32_t head, tail;
754
755         roc_nix_cq_head_tail_get(nix, rxq_sp->qid, &head, &tail);
756         return (tail - head) % (rxq_sp->qconf.nb_desc);
757 }
758
759 static inline int
760 nix_offset_has_packet(uint32_t head, uint32_t tail, uint16_t offset, bool is_rx)
761 {
762         /* Check given offset(queue index) has packet filled/xmit by HW
763          * in case of Rx or Tx.
764          * Also, checks for wrap around case.
765          */
766         return ((tail > head && offset <= tail && offset >= head) ||
767                 (head > tail && (offset >= head || offset <= tail))) ?
768                        is_rx :
769                        !is_rx;
770 }
771
772 int
773 cnxk_nix_rx_descriptor_status(void *rxq, uint16_t offset)
774 {
775         struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
776         struct roc_nix *nix = &rxq_sp->dev->nix;
777         uint32_t head, tail;
778
779         if (rxq_sp->qconf.nb_desc <= offset)
780                 return -EINVAL;
781
782         roc_nix_cq_head_tail_get(nix, rxq_sp->qid, &head, &tail);
783
784         if (nix_offset_has_packet(head, tail, offset, 1))
785                 return RTE_ETH_RX_DESC_DONE;
786         else
787                 return RTE_ETH_RX_DESC_AVAIL;
788 }
789
790 int
791 cnxk_nix_tx_descriptor_status(void *txq, uint16_t offset)
792 {
793         struct cnxk_eth_txq_sp *txq_sp = cnxk_eth_txq_to_sp(txq);
794         struct roc_nix *nix = &txq_sp->dev->nix;
795         uint32_t head = 0, tail = 0;
796
797         if (txq_sp->qconf.nb_desc <= offset)
798                 return -EINVAL;
799
800         roc_nix_sq_head_tail_get(nix, txq_sp->qid, &head, &tail);
801
802         if (nix_offset_has_packet(head, tail, offset, 0))
803                 return RTE_ETH_TX_DESC_DONE;
804         else
805                 return RTE_ETH_TX_DESC_FULL;
806 }
807
808 /* It is a NOP for cnxk as HW frees the buffer on xmit */
809 int
810 cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
811 {
812         RTE_SET_USED(txq);
813         RTE_SET_USED(free_cnt);
814
815         return 0;
816 }
817
818 int
819 cnxk_nix_dev_get_reg(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
820 {
821         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
822         struct roc_nix *nix = &dev->nix;
823         uint64_t *data = regs->data;
824         int rc = -ENOTSUP;
825
826         if (data == NULL) {
827                 rc = roc_nix_lf_get_reg_count(nix);
828                 if (rc > 0) {
829                         regs->length = rc;
830                         regs->width = 8;
831                         rc = 0;
832                 }
833                 return rc;
834         }
835
836         if (!regs->length ||
837             regs->length == (uint32_t)roc_nix_lf_get_reg_count(nix))
838                 return roc_nix_lf_reg_dump(nix, data);
839
840         return rc;
841 }
842
843 int
844 cnxk_nix_reta_update(struct rte_eth_dev *eth_dev,
845                      struct rte_eth_rss_reta_entry64 *reta_conf,
846                      uint16_t reta_size)
847 {
848         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
849         uint16_t reta[ROC_NIX_RSS_RETA_MAX];
850         struct roc_nix *nix = &dev->nix;
851         int i, j, rc = -EINVAL, idx = 0;
852
853         if (reta_size != dev->nix.reta_sz) {
854                 plt_err("Size of hash lookup table configured (%d) does not "
855                         "match the number hardware can supported (%d)",
856                         reta_size, dev->nix.reta_sz);
857                 goto fail;
858         }
859
860         /* Copy RETA table */
861         for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
862                 for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
863                         if ((reta_conf[i].mask >> j) & 0x01)
864                                 reta[idx] = reta_conf[i].reta[j];
865                         idx++;
866                 }
867         }
868
869         return roc_nix_rss_reta_set(nix, 0, reta);
870
871 fail:
872         return rc;
873 }
874
875 int
876 cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
877                     struct rte_eth_rss_reta_entry64 *reta_conf,
878                     uint16_t reta_size)
879 {
880         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
881         uint16_t reta[ROC_NIX_RSS_RETA_MAX];
882         struct roc_nix *nix = &dev->nix;
883         int rc = -EINVAL, i, j, idx = 0;
884
885         if (reta_size != dev->nix.reta_sz) {
886                 plt_err("Size of hash lookup table configured (%d) does not "
887                         "match the number hardware can supported (%d)",
888                         reta_size, dev->nix.reta_sz);
889                 goto fail;
890         }
891
892         rc = roc_nix_rss_reta_get(nix, 0, reta);
893         if (rc)
894                 goto fail;
895
896         /* Copy RETA table */
897         for (i = 0; i < (int)(dev->nix.reta_sz / RTE_ETH_RETA_GROUP_SIZE); i++) {
898                 for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) {
899                         if ((reta_conf[i].mask >> j) & 0x01)
900                                 reta_conf[i].reta[j] = reta[idx];
901                         idx++;
902                 }
903         }
904
905         return 0;
906
907 fail:
908         return rc;
909 }
910
911 int
912 cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
913                          struct rte_eth_rss_conf *rss_conf)
914 {
915         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
916         struct roc_nix *nix = &dev->nix;
917         uint8_t rss_hash_level;
918         uint32_t flowkey_cfg;
919         int rc = -EINVAL;
920         uint8_t alg_idx;
921
922         if (rss_conf->rss_key && rss_conf->rss_key_len != ROC_NIX_RSS_KEY_LEN) {
923                 plt_err("Hash key size mismatch %d vs %d",
924                         rss_conf->rss_key_len, ROC_NIX_RSS_KEY_LEN);
925                 goto fail;
926         }
927
928         if (rss_conf->rss_key)
929                 roc_nix_rss_key_set(nix, rss_conf->rss_key);
930
931         rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
932         if (rss_hash_level)
933                 rss_hash_level -= 1;
934         flowkey_cfg =
935                 cnxk_rss_ethdev_to_nix(dev, rss_conf->rss_hf, rss_hash_level);
936
937         rc = roc_nix_rss_flowkey_set(nix, &alg_idx, flowkey_cfg,
938                                      ROC_NIX_RSS_GROUP_DEFAULT,
939                                      ROC_NIX_RSS_MCAM_IDX_DEFAULT);
940         if (rc) {
941                 plt_err("Failed to set RSS hash function rc=%d", rc);
942                 return rc;
943         }
944
945 fail:
946         return rc;
947 }
948
949 int
950 cnxk_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
951                            struct rte_eth_rss_conf *rss_conf)
952 {
953         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
954
955         if (rss_conf->rss_key)
956                 roc_nix_rss_key_get(&dev->nix, rss_conf->rss_key);
957
958         rss_conf->rss_key_len = ROC_NIX_RSS_KEY_LEN;
959         rss_conf->rss_hf = dev->ethdev_rss_hf;
960
961         return 0;
962 }
963
964 int
965 cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
966                                 struct rte_ether_addr *mc_addr_set,
967                                 uint32_t nb_mc_addr)
968 {
969         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
970         struct rte_eth_dev_data *data = eth_dev->data;
971         struct rte_ether_addr null_mac_addr;
972         struct roc_nix *nix = &dev->nix;
973         int rc, index;
974         uint32_t i;
975
976         memset(&null_mac_addr, 0, sizeof(null_mac_addr));
977
978         /* All configured multicast filters should be flushed first */
979         for (i = 0; i < dev->max_mac_entries; i++) {
980                 if (rte_is_multicast_ether_addr(&data->mac_addrs[i])) {
981                         rc = roc_nix_mac_addr_del(nix, i);
982                         if (rc) {
983                                 plt_err("Failed to flush mcast address, rc=%d",
984                                         rc);
985                                 return rc;
986                         }
987
988                         dev->dmac_filter_count--;
989                         /* Update address in NIC data structure */
990                         rte_ether_addr_copy(&null_mac_addr,
991                                             &data->mac_addrs[i]);
992                 }
993         }
994
995         if (!mc_addr_set || !nb_mc_addr)
996                 return 0;
997
998         /* Check for available space */
999         if (nb_mc_addr >
1000             ((uint32_t)(dev->max_mac_entries - dev->dmac_filter_count))) {
1001                 plt_err("No space is available to add multicast filters");
1002                 return -ENOSPC;
1003         }
1004
1005         /* Multicast addresses are to be installed */
1006         for (i = 0; i < nb_mc_addr; i++) {
1007                 index = roc_nix_mac_addr_add(nix, mc_addr_set[i].addr_bytes);
1008                 if (index < 0) {
1009                         plt_err("Failed to add mcast mac address, rc=%d",
1010                                 index);
1011                         return index;
1012                 }
1013
1014                 dev->dmac_filter_count++;
1015                 /* Update address in NIC data structure */
1016                 rte_ether_addr_copy(&mc_addr_set[i], &data->mac_addrs[index]);
1017         }
1018
1019         roc_nix_npc_promisc_ena_dis(nix, true);
1020         dev->dmac_filter_enable = true;
1021         eth_dev->data->promiscuous = false;
1022
1023         return 0;
1024 }
1025
1026 int
1027 nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
1028                                  struct cnxk_pfc_cfg *conf)
1029 {
1030         enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
1031                                            ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
1032         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1033         struct rte_eth_dev_data *data = eth_dev->data;
1034         struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
1035         struct roc_nix *nix = &dev->nix;
1036         struct roc_nix_pfc_cfg pfc_cfg;
1037         struct roc_nix_fc_cfg fc_cfg;
1038         struct cnxk_eth_rxq_sp *rxq;
1039         struct cnxk_eth_txq_sp *txq;
1040         uint8_t rx_pause, tx_pause;
1041         enum rte_eth_fc_mode mode;
1042         struct roc_nix_cq *cq;
1043         struct roc_nix_sq *sq;
1044         int rc;
1045
1046         if (roc_nix_is_vf_or_sdp(nix)) {
1047                 plt_err("Prio flow ctrl config is not allowed on VF and SDP");
1048                 return -ENOTSUP;
1049         }
1050
1051         if (roc_model_is_cn96_ax() && data->dev_started) {
1052                 /* On Ax, CQ should be in disabled state
1053                  * while setting flow control configuration.
1054                  */
1055                 plt_info("Stop the port=%d for setting flow control",
1056                          data->port_id);
1057                 return 0;
1058         }
1059
1060         if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
1061             dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
1062                 plt_err("Same TC can not be configured on multiple SQs");
1063                 return -ENOTSUP;
1064         }
1065
1066         mode = conf->fc_cfg.mode;
1067         rx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_RX_PAUSE);
1068         tx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_TX_PAUSE);
1069
1070         /* Configure CQs */
1071         memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
1072         rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
1073         cq = &dev->cqs[rxq->qid];
1074         fc_cfg.type = ROC_NIX_FC_CQ_CFG;
1075         fc_cfg.cq_cfg.tc = conf->rx_tc;
1076         fc_cfg.cq_cfg.enable = !!tx_pause;
1077         fc_cfg.cq_cfg.rq = cq->qid;
1078         fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
1079         rc = roc_nix_fc_config_set(nix, &fc_cfg);
1080         if (rc)
1081                 goto exit;
1082
1083         /* Check if RX pause frame is enabled or not */
1084         if (pfc->fc_cfg.rx_pause ^ rx_pause) {
1085                 if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
1086                         goto exit;
1087
1088                 if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
1089                     eth_dev->data->nb_tx_queues > 1) {
1090                         /*
1091                          * Disabled xmit will be enabled when
1092                          * new topology is available.
1093                          */
1094                         rc = roc_nix_tm_hierarchy_disable(nix);
1095                         if (rc)
1096                                 goto exit;
1097
1098                         rc = roc_nix_tm_pfc_prepare_tree(nix);
1099                         if (rc)
1100                                 goto exit;
1101
1102                         rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
1103                                                          true);
1104                         if (rc)
1105                                 goto exit;
1106                 }
1107         }
1108
1109         txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[conf->tx_qid]) - 1;
1110         sq = &dev->sqs[txq->qid];
1111         memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
1112         fc_cfg.type = ROC_NIX_FC_TM_CFG;
1113         fc_cfg.tm_cfg.sq = sq->qid;
1114         fc_cfg.tm_cfg.tc = conf->tx_tc;
1115         fc_cfg.tm_cfg.enable = !!rx_pause;
1116         rc = roc_nix_fc_config_set(nix, &fc_cfg);
1117         if (rc)
1118                 return rc;
1119
1120         dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
1121
1122         /* Configure MAC block */
1123         if (tx_pause)
1124                 pfc->class_en |= BIT(conf->rx_tc);
1125         else
1126                 pfc->class_en &= ~BIT(conf->rx_tc);
1127
1128         if (pfc->class_en)
1129                 mode = RTE_ETH_FC_FULL;
1130
1131         memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
1132         pfc_cfg.mode = mode_map[mode];
1133         pfc_cfg.tc = pfc->class_en;
1134         rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
1135         if (rc)
1136                 return rc;
1137
1138         pfc->fc_cfg.rx_pause = rx_pause;
1139         pfc->fc_cfg.tx_pause = tx_pause;
1140         pfc->fc_cfg.mode = mode;
1141
1142 exit:
1143         return rc;
1144 }