db54468dbca1f875bfe1c4b7519384e84dc05654
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <cnxk_ethdev.h>
5
6 static inline uint64_t
7 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
8 {
9         uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
10
11         if (roc_nix_is_vf_or_sdp(&dev->nix) ||
12             dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
13                 capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
14
15         return capa;
16 }
17
18 static inline uint64_t
19 nix_get_tx_offload_capa(struct cnxk_eth_dev *dev)
20 {
21         RTE_SET_USED(dev);
22         return CNXK_NIX_TX_OFFLOAD_CAPA;
23 }
24
25 static inline uint32_t
26 nix_get_speed_capa(struct cnxk_eth_dev *dev)
27 {
28         uint32_t speed_capa;
29
30         /* Auto negotiation disabled */
31         speed_capa = RTE_ETH_LINK_SPEED_FIXED;
32         if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
33                 speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
34                               RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
35                               RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
36         }
37
38         return speed_capa;
39 }
40
41 int
42 cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
43 {
44         struct roc_nix *nix = &dev->nix;
45
46         if (dev->inb.inl_dev == use_inl_dev)
47                 return 0;
48
49         plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
50                     dev->inb.nb_sess, !!dev->inb.inl_dev);
51
52         /* Change the mode */
53         dev->inb.inl_dev = use_inl_dev;
54
55         /* Update RoC for NPC rule insertion */
56         roc_nix_inb_mode_set(nix, use_inl_dev);
57
58         /* Setup lookup mem */
59         return cnxk_nix_lookup_mem_sa_base_set(dev);
60 }
61
62 static int
63 nix_security_setup(struct cnxk_eth_dev *dev)
64 {
65         struct roc_nix *nix = &dev->nix;
66         int i, rc = 0;
67
68         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
69                 /* Setup Inline Inbound */
70                 rc = roc_nix_inl_inb_init(nix);
71                 if (rc) {
72                         plt_err("Failed to initialize nix inline inb, rc=%d",
73                                 rc);
74                         return rc;
75                 }
76
77                 /* By default pick using inline device for poll mode.
78                  * Will be overridden when event mode rq's are setup.
79                  */
80                 cnxk_nix_inb_mode_set(dev, true);
81         }
82
83         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
84             dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
85                 struct plt_bitmap *bmap;
86                 size_t bmap_sz;
87                 void *mem;
88
89                 /* Setup enough descriptors for all tx queues */
90                 nix->outb_nb_desc = dev->outb.nb_desc;
91                 nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs;
92
93                 /* Setup Inline Outbound */
94                 rc = roc_nix_inl_outb_init(nix);
95                 if (rc) {
96                         plt_err("Failed to initialize nix inline outb, rc=%d",
97                                 rc);
98                         goto cleanup;
99                 }
100
101                 dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
102
103                 /* Skip the rest if RTE_ETH_TX_OFFLOAD_SECURITY is not enabled */
104                 if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY))
105                         goto done;
106
107                 rc = -ENOMEM;
108                 /* Allocate a bitmap to alloc and free sa indexes */
109                 bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa);
110                 mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
111                 if (mem == NULL) {
112                         plt_err("Outbound SA bmap alloc failed");
113
114                         rc |= roc_nix_inl_outb_fini(nix);
115                         goto cleanup;
116                 }
117
118                 rc = -EIO;
119                 bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz);
120                 if (!bmap) {
121                         plt_err("Outbound SA bmap init failed");
122
123                         rc |= roc_nix_inl_outb_fini(nix);
124                         plt_free(mem);
125                         goto cleanup;
126                 }
127
128                 for (i = 0; i < dev->outb.max_sa; i++)
129                         plt_bitmap_set(bmap, i);
130
131                 dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix);
132                 dev->outb.sa_bmap_mem = mem;
133                 dev->outb.sa_bmap = bmap;
134         }
135
136 done:
137         return 0;
138 cleanup:
139         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
140                 rc |= roc_nix_inl_inb_fini(nix);
141         return rc;
142 }
143
144 static int
145 nix_meter_fini(struct cnxk_eth_dev *dev)
146 {
147         struct cnxk_meter_node *next_mtr = NULL;
148         struct roc_nix_bpf_objs profs = {0};
149         struct cnxk_meter_node *mtr = NULL;
150         struct cnxk_mtr *fms = &dev->mtr;
151         struct roc_nix *nix = &dev->nix;
152         struct roc_nix_rq *rq;
153         uint32_t i;
154         int rc;
155
156         RTE_TAILQ_FOREACH_SAFE(mtr, fms, next, next_mtr) {
157                 for (i = 0; i < mtr->rq_num; i++) {
158                         rq = &dev->rqs[mtr->rq_id[i]];
159                         rc |= roc_nix_bpf_ena_dis(nix, mtr->bpf_id, rq, false);
160                 }
161
162                 profs.level = mtr->level;
163                 profs.count = 1;
164                 profs.ids[0] = mtr->bpf_id;
165                 rc = roc_nix_bpf_free(nix, &profs, 1);
166
167                 if (rc)
168                         return rc;
169
170                 TAILQ_REMOVE(fms, mtr, next);
171                 plt_free(mtr);
172         }
173         return 0;
174 }
175
176 static int
177 nix_security_release(struct cnxk_eth_dev *dev)
178 {
179         struct rte_eth_dev *eth_dev = dev->eth_dev;
180         struct cnxk_eth_sec_sess *eth_sec, *tvar;
181         struct roc_nix *nix = &dev->nix;
182         int rc, ret = 0;
183
184         /* Cleanup Inline inbound */
185         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
186                 /* Destroy inbound sessions */
187                 tvar = NULL;
188                 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
189                         cnxk_eth_sec_ops.session_destroy(eth_dev,
190                                                          eth_sec->sess);
191
192                 /* Clear lookup mem */
193                 cnxk_nix_lookup_mem_sa_base_clear(dev);
194
195                 rc = roc_nix_inl_inb_fini(nix);
196                 if (rc)
197                         plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
198                 ret |= rc;
199         }
200
201         /* Cleanup Inline outbound */
202         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
203             dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
204                 /* Destroy outbound sessions */
205                 tvar = NULL;
206                 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
207                         cnxk_eth_sec_ops.session_destroy(eth_dev,
208                                                          eth_sec->sess);
209
210                 rc = roc_nix_inl_outb_fini(nix);
211                 if (rc)
212                         plt_err("Failed to cleanup nix inline outb, rc=%d", rc);
213                 ret |= rc;
214
215                 plt_bitmap_free(dev->outb.sa_bmap);
216                 plt_free(dev->outb.sa_bmap_mem);
217                 dev->outb.sa_bmap = NULL;
218                 dev->outb.sa_bmap_mem = NULL;
219         }
220
221         dev->inb.inl_dev = false;
222         roc_nix_inb_mode_set(nix, false);
223         dev->nb_rxq_sso = 0;
224         dev->inb.nb_sess = 0;
225         dev->outb.nb_sess = 0;
226         return ret;
227 }
228
229 static void
230 nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
231 {
232         struct rte_pktmbuf_pool_private *mbp_priv;
233         struct rte_eth_dev *eth_dev;
234         struct cnxk_eth_dev *dev;
235         uint32_t buffsz;
236
237         dev = rxq->dev;
238         eth_dev = dev->eth_dev;
239
240         /* Get rx buffer size */
241         mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
242         buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
243
244         if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
245                 dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
246                 dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
247         }
248 }
249
250 int
251 nix_recalc_mtu(struct rte_eth_dev *eth_dev)
252 {
253         struct rte_eth_dev_data *data = eth_dev->data;
254         struct cnxk_eth_rxq_sp *rxq;
255         int rc;
256
257         rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
258         /* Setup scatter mode if needed by jumbo */
259         nix_enable_mseg_on_jumbo(rxq);
260
261         rc = cnxk_nix_mtu_set(eth_dev, data->mtu);
262         if (rc)
263                 plt_err("Failed to set default MTU size, rc=%d", rc);
264
265         return rc;
266 }
267
268 static int
269 nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
270 {
271         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
272         struct cnxk_fc_cfg *fc = &dev->fc_cfg;
273         struct rte_eth_fc_conf fc_conf = {0};
274         int rc;
275
276         /* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
277          * by AF driver, update those info in PMD structure.
278          */
279         rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
280         if (rc)
281                 goto exit;
282
283         fc->mode = fc_conf.mode;
284         fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
285                         (fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
286         fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
287                         (fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
288
289 exit:
290         return rc;
291 }
292
293 static int
294 nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
295 {
296         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
297         struct cnxk_fc_cfg *fc = &dev->fc_cfg;
298         struct rte_eth_fc_conf fc_cfg = {0};
299
300         if (roc_nix_is_vf_or_sdp(&dev->nix))
301                 return 0;
302
303         fc_cfg.mode = fc->mode;
304
305         /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
306         if (roc_model_is_cn96_ax() &&
307             dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
308             (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
309                 fc_cfg.mode =
310                                 (fc_cfg.mode == RTE_ETH_FC_FULL ||
311                                 fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
312                                 RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
313         }
314
315         return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
316 }
317
318 uint64_t
319 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
320 {
321         uint16_t port_id = dev->eth_dev->data->port_id;
322         struct rte_mbuf mb_def;
323         uint64_t *tmp;
324
325         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
326         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
327                                  offsetof(struct rte_mbuf, data_off) !=
328                          2);
329         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
330                                  offsetof(struct rte_mbuf, data_off) !=
331                          4);
332         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
333                                  offsetof(struct rte_mbuf, data_off) !=
334                          6);
335         mb_def.nb_segs = 1;
336         mb_def.data_off = RTE_PKTMBUF_HEADROOM +
337                           (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
338         mb_def.port = port_id;
339         rte_mbuf_refcnt_set(&mb_def, 1);
340
341         /* Prevent compiler reordering: rearm_data covers previous fields */
342         rte_compiler_barrier();
343         tmp = (uint64_t *)&mb_def.rearm_data;
344
345         return *tmp;
346 }
347
348 static inline uint8_t
349 nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
350 {
351         /*
352          * Maximum three segments can be supported with W8, Choose
353          * NIX_MAXSQESZ_W16 for multi segment offload.
354          */
355         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
356                 return NIX_MAXSQESZ_W16;
357         else
358                 return NIX_MAXSQESZ_W8;
359 }
360
361 int
362 cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
363                         uint16_t nb_desc, uint16_t fp_tx_q_sz,
364                         const struct rte_eth_txconf *tx_conf)
365 {
366         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
367         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
368         struct cnxk_eth_txq_sp *txq_sp;
369         struct roc_nix_sq *sq;
370         size_t txq_sz;
371         int rc;
372
373         /* Free memory prior to re-allocation if needed. */
374         if (eth_dev->data->tx_queues[qid] != NULL) {
375                 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
376                 dev_ops->tx_queue_release(eth_dev, qid);
377                 eth_dev->data->tx_queues[qid] = NULL;
378         }
379
380         /* When Tx Security offload is enabled, increase tx desc count by
381          * max possible outbound desc count.
382          */
383         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
384                 nb_desc += dev->outb.nb_desc;
385
386         /* Setup ROC SQ */
387         sq = &dev->sqs[qid];
388         sq->qid = qid;
389         sq->nb_desc = nb_desc;
390         sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
391
392         rc = roc_nix_sq_init(&dev->nix, sq);
393         if (rc) {
394                 plt_err("Failed to init sq=%d, rc=%d", qid, rc);
395                 return rc;
396         }
397
398         rc = -ENOMEM;
399         txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
400         txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
401         if (!txq_sp) {
402                 plt_err("Failed to alloc tx queue mem");
403                 rc |= roc_nix_sq_fini(sq);
404                 return rc;
405         }
406
407         txq_sp->dev = dev;
408         txq_sp->qid = qid;
409         txq_sp->qconf.conf.tx = *tx_conf;
410         /* Queue config should reflect global offloads */
411         txq_sp->qconf.conf.tx.offloads = dev->tx_offloads;
412         txq_sp->qconf.nb_desc = nb_desc;
413
414         plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
415                     " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
416                     qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
417                     sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
418
419         /* Store start of fast path area */
420         eth_dev->data->tx_queues[qid] = txq_sp + 1;
421         eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
422         return 0;
423 }
424
425 static void
426 cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
427 {
428         void *txq = eth_dev->data->tx_queues[qid];
429         struct cnxk_eth_txq_sp *txq_sp;
430         struct cnxk_eth_dev *dev;
431         struct roc_nix_sq *sq;
432         int rc;
433
434         if (!txq)
435                 return;
436
437         txq_sp = cnxk_eth_txq_to_sp(txq);
438
439         dev = txq_sp->dev;
440
441         plt_nix_dbg("Releasing txq %u", qid);
442
443         /* Cleanup ROC SQ */
444         sq = &dev->sqs[qid];
445         rc = roc_nix_sq_fini(sq);
446         if (rc)
447                 plt_err("Failed to cleanup sq, rc=%d", rc);
448
449         /* Finally free */
450         plt_free(txq_sp);
451 }
452
453 int
454 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
455                         uint16_t nb_desc, uint16_t fp_rx_q_sz,
456                         const struct rte_eth_rxconf *rx_conf,
457                         struct rte_mempool *mp)
458 {
459         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
460         struct roc_nix *nix = &dev->nix;
461         struct cnxk_eth_rxq_sp *rxq_sp;
462         struct rte_mempool_ops *ops;
463         const char *platform_ops;
464         struct roc_nix_rq *rq;
465         struct roc_nix_cq *cq;
466         uint16_t first_skip;
467         int rc = -EINVAL;
468         size_t rxq_sz;
469
470         /* Sanity checks */
471         if (rx_conf->rx_deferred_start == 1) {
472                 plt_err("Deferred Rx start is not supported");
473                 goto fail;
474         }
475
476         platform_ops = rte_mbuf_platform_mempool_ops();
477         /* This driver needs cnxk_npa mempool ops to work */
478         ops = rte_mempool_get_ops(mp->ops_index);
479         if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
480                 plt_err("mempool ops should be of cnxk_npa type");
481                 goto fail;
482         }
483
484         if (mp->pool_id == 0) {
485                 plt_err("Invalid pool_id");
486                 goto fail;
487         }
488
489         /* Free memory prior to re-allocation if needed */
490         if (eth_dev->data->rx_queues[qid] != NULL) {
491                 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
492
493                 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
494                 dev_ops->rx_queue_release(eth_dev, qid);
495                 eth_dev->data->rx_queues[qid] = NULL;
496         }
497
498         /* Clam up cq limit to size of packet pool aura for LBK
499          * to avoid meta packet drop as LBK does not currently support
500          * backpressure.
501          */
502         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
503                 uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
504
505                 /* Use current RQ's aura limit if inl rq is not available */
506                 if (!pkt_pool_limit)
507                         pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
508                 nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
509         }
510
511         /* Setup ROC CQ */
512         cq = &dev->cqs[qid];
513         cq->qid = qid;
514         cq->nb_desc = nb_desc;
515         rc = roc_nix_cq_init(&dev->nix, cq);
516         if (rc) {
517                 plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
518                 goto fail;
519         }
520
521         /* Setup ROC RQ */
522         rq = &dev->rqs[qid];
523         rq->qid = qid;
524         rq->aura_handle = mp->pool_id;
525         rq->flow_tag_width = 32;
526         rq->sso_ena = false;
527
528         /* Calculate first mbuf skip */
529         first_skip = (sizeof(struct rte_mbuf));
530         first_skip += RTE_PKTMBUF_HEADROOM;
531         first_skip += rte_pktmbuf_priv_size(mp);
532         rq->first_skip = first_skip;
533         rq->later_skip = sizeof(struct rte_mbuf);
534         rq->lpb_size = mp->elt_size;
535
536         /* Enable Inline IPSec on RQ, will not be used for Poll mode */
537         if (roc_nix_inl_inb_is_enabled(nix))
538                 rq->ipsech_ena = true;
539
540         rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
541         if (rc) {
542                 plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
543                 goto cq_fini;
544         }
545
546         /* Allocate and setup fast path rx queue */
547         rc = -ENOMEM;
548         rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
549         rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
550         if (!rxq_sp) {
551                 plt_err("Failed to alloc rx queue for rq=%d", qid);
552                 goto rq_fini;
553         }
554
555         /* Setup slow path fields */
556         rxq_sp->dev = dev;
557         rxq_sp->qid = qid;
558         rxq_sp->qconf.conf.rx = *rx_conf;
559         /* Queue config should reflect global offloads */
560         rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads;
561         rxq_sp->qconf.nb_desc = nb_desc;
562         rxq_sp->qconf.mp = mp;
563
564         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
565                 /* Setup rq reference for inline dev if present */
566                 rc = roc_nix_inl_dev_rq_get(rq);
567                 if (rc)
568                         goto free_mem;
569         }
570
571         plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
572                     cq->nb_desc);
573
574         /* Store start of fast path area */
575         eth_dev->data->rx_queues[qid] = rxq_sp + 1;
576         eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
577
578         /* Calculating delta and freq mult between PTP HI clock and tsc.
579          * These are needed in deriving raw clock value from tsc counter.
580          * read_clock eth op returns raw clock value.
581          */
582         if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
583                 rc = cnxk_nix_tsc_convert(dev);
584                 if (rc) {
585                         plt_err("Failed to calculate delta and freq mult");
586                         goto rq_fini;
587                 }
588         }
589
590         return 0;
591 free_mem:
592         plt_free(rxq_sp);
593 rq_fini:
594         rc |= roc_nix_rq_fini(rq);
595 cq_fini:
596         rc |= roc_nix_cq_fini(cq);
597 fail:
598         return rc;
599 }
600
601 static void
602 cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
603 {
604         void *rxq = eth_dev->data->rx_queues[qid];
605         struct cnxk_eth_rxq_sp *rxq_sp;
606         struct cnxk_eth_dev *dev;
607         struct roc_nix_rq *rq;
608         struct roc_nix_cq *cq;
609         int rc;
610
611         if (!rxq)
612                 return;
613
614         rxq_sp = cnxk_eth_rxq_to_sp(rxq);
615         dev = rxq_sp->dev;
616         rq = &dev->rqs[qid];
617
618         plt_nix_dbg("Releasing rxq %u", qid);
619
620         /* Release rq reference for inline dev if present */
621         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
622                 roc_nix_inl_dev_rq_put(rq);
623
624         /* Cleanup ROC RQ */
625         rc = roc_nix_rq_fini(rq);
626         if (rc)
627                 plt_err("Failed to cleanup rq, rc=%d", rc);
628
629         /* Cleanup ROC CQ */
630         cq = &dev->cqs[qid];
631         rc = roc_nix_cq_fini(cq);
632         if (rc)
633                 plt_err("Failed to cleanup cq, rc=%d", rc);
634
635         /* Finally free fast path area */
636         plt_free(rxq_sp);
637 }
638
639 uint32_t
640 cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
641                        uint8_t rss_level)
642 {
643         uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
644                 {FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
645                  FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
646                 {FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
647                  FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
648                  FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
649                 {FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
650                  FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
651                  FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
652                  FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
653                  FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
654                  FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
655         };
656         uint32_t flowkey_cfg = 0;
657
658         dev->ethdev_rss_hf = ethdev_rss;
659
660         if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
661             dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
662                 flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
663         }
664
665         if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
666                 flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
667
668         if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
669                 flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
670
671         if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
672                 flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
673
674         if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
675                 flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
676
677         if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
678                 flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
679
680         if (ethdev_rss & RSS_IPV4_ENABLE)
681                 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
682
683         if (ethdev_rss & RSS_IPV6_ENABLE)
684                 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
685
686         if (ethdev_rss & RTE_ETH_RSS_TCP)
687                 flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
688
689         if (ethdev_rss & RTE_ETH_RSS_UDP)
690                 flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
691
692         if (ethdev_rss & RTE_ETH_RSS_SCTP)
693                 flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
694
695         if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
696                 flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
697
698         if (ethdev_rss & RSS_IPV6_EX_ENABLE)
699                 flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
700
701         if (ethdev_rss & RTE_ETH_RSS_PORT)
702                 flowkey_cfg |= FLOW_KEY_TYPE_PORT;
703
704         if (ethdev_rss & RTE_ETH_RSS_NVGRE)
705                 flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
706
707         if (ethdev_rss & RTE_ETH_RSS_VXLAN)
708                 flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
709
710         if (ethdev_rss & RTE_ETH_RSS_GENEVE)
711                 flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
712
713         if (ethdev_rss & RTE_ETH_RSS_GTPU)
714                 flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
715
716         return flowkey_cfg;
717 }
718
719 static void
720 nix_free_queue_mem(struct cnxk_eth_dev *dev)
721 {
722         plt_free(dev->rqs);
723         plt_free(dev->cqs);
724         plt_free(dev->sqs);
725         dev->rqs = NULL;
726         dev->cqs = NULL;
727         dev->sqs = NULL;
728 }
729
730 static int
731 nix_ingress_policer_setup(struct cnxk_eth_dev *dev)
732 {
733         TAILQ_INIT(&dev->mtr_profiles);
734         TAILQ_INIT(&dev->mtr_policy);
735         TAILQ_INIT(&dev->mtr);
736
737         return 0;
738 }
739
740 static int
741 nix_rss_default_setup(struct cnxk_eth_dev *dev)
742 {
743         struct rte_eth_dev *eth_dev = dev->eth_dev;
744         uint8_t rss_hash_level;
745         uint32_t flowkey_cfg;
746         uint64_t rss_hf;
747
748         rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
749         rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
750         if (rss_hash_level)
751                 rss_hash_level -= 1;
752
753         flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
754         return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
755 }
756
757 static int
758 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
759 {
760         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
761         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
762         struct cnxk_eth_qconf *tx_qconf = NULL;
763         struct cnxk_eth_qconf *rx_qconf = NULL;
764         struct cnxk_eth_rxq_sp *rxq_sp;
765         struct cnxk_eth_txq_sp *txq_sp;
766         int i, nb_rxq, nb_txq;
767         void **txq, **rxq;
768
769         nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
770         nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
771
772         tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
773         if (tx_qconf == NULL) {
774                 plt_err("Failed to allocate memory for tx_qconf");
775                 goto fail;
776         }
777
778         rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
779         if (rx_qconf == NULL) {
780                 plt_err("Failed to allocate memory for rx_qconf");
781                 goto fail;
782         }
783
784         txq = eth_dev->data->tx_queues;
785         for (i = 0; i < nb_txq; i++) {
786                 if (txq[i] == NULL) {
787                         tx_qconf[i].valid = false;
788                         plt_info("txq[%d] is already released", i);
789                         continue;
790                 }
791                 txq_sp = cnxk_eth_txq_to_sp(txq[i]);
792                 memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
793                 tx_qconf[i].valid = true;
794                 dev_ops->tx_queue_release(eth_dev, i);
795                 eth_dev->data->tx_queues[i] = NULL;
796         }
797
798         rxq = eth_dev->data->rx_queues;
799         for (i = 0; i < nb_rxq; i++) {
800                 if (rxq[i] == NULL) {
801                         rx_qconf[i].valid = false;
802                         plt_info("rxq[%d] is already released", i);
803                         continue;
804                 }
805                 rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
806                 memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
807                 rx_qconf[i].valid = true;
808                 dev_ops->rx_queue_release(eth_dev, i);
809                 eth_dev->data->rx_queues[i] = NULL;
810         }
811
812         dev->tx_qconf = tx_qconf;
813         dev->rx_qconf = rx_qconf;
814         return 0;
815
816 fail:
817         free(tx_qconf);
818         free(rx_qconf);
819         return -ENOMEM;
820 }
821
822 static int
823 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
824 {
825         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
826         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
827         struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
828         struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
829         int rc, i, nb_rxq, nb_txq;
830
831         nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
832         nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
833
834         rc = -ENOMEM;
835         /* Setup tx & rx queues with previous configuration so
836          * that the queues can be functional in cases like ports
837          * are started without re configuring queues.
838          *
839          * Usual re config sequence is like below:
840          * port_configure() {
841          *      if(reconfigure) {
842          *              queue_release()
843          *              queue_setup()
844          *      }
845          *      queue_configure() {
846          *              queue_release()
847          *              queue_setup()
848          *      }
849          * }
850          * port_start()
851          *
852          * In some application's control path, queue_configure() would
853          * NOT be invoked for TXQs/RXQs in port_configure().
854          * In such cases, queues can be functional after start as the
855          * queues are already setup in port_configure().
856          */
857         for (i = 0; i < nb_txq; i++) {
858                 if (!tx_qconf[i].valid)
859                         continue;
860                 rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
861                                              &tx_qconf[i].conf.tx);
862                 if (rc) {
863                         plt_err("Failed to setup tx queue rc=%d", rc);
864                         for (i -= 1; i >= 0; i--)
865                                 dev_ops->tx_queue_release(eth_dev, i);
866                         goto fail;
867                 }
868         }
869
870         free(tx_qconf);
871         tx_qconf = NULL;
872
873         for (i = 0; i < nb_rxq; i++) {
874                 if (!rx_qconf[i].valid)
875                         continue;
876                 rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
877                                              &rx_qconf[i].conf.rx,
878                                              rx_qconf[i].mp);
879                 if (rc) {
880                         plt_err("Failed to setup rx queue rc=%d", rc);
881                         for (i -= 1; i >= 0; i--)
882                                 dev_ops->rx_queue_release(eth_dev, i);
883                         goto tx_queue_release;
884                 }
885         }
886
887         free(rx_qconf);
888         rx_qconf = NULL;
889
890         return 0;
891
892 tx_queue_release:
893         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
894                 dev_ops->tx_queue_release(eth_dev, i);
895 fail:
896         if (tx_qconf)
897                 free(tx_qconf);
898         if (rx_qconf)
899                 free(rx_qconf);
900
901         return rc;
902 }
903
904 static uint16_t
905 nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
906 {
907         RTE_SET_USED(queue);
908         RTE_SET_USED(mbufs);
909         RTE_SET_USED(pkts);
910
911         return 0;
912 }
913
914 static void
915 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
916 {
917         /* These dummy functions are required for supporting
918          * some applications which reconfigure queues without
919          * stopping tx burst and rx burst threads(eg kni app)
920          * When the queues context is saved, txq/rxqs are released
921          * which caused app crash since rx/tx burst is still
922          * on different lcores
923          */
924         eth_dev->tx_pkt_burst = nix_eth_nop_burst;
925         eth_dev->rx_pkt_burst = nix_eth_nop_burst;
926         rte_mb();
927 }
928
929 static int
930 nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
931 {
932         uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
933         uint8_t tun[ROC_NIX_LSO_TUN_MAX];
934         struct roc_nix *nix = &dev->nix;
935         int rc;
936
937         rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
938         if (rc)
939                 return rc;
940
941         dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
942                             (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
943                             (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
944                             (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
945
946         dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
947                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
948                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
949                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
950         return 0;
951 }
952
953 static int
954 nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
955 {
956         struct roc_nix *nix = &dev->nix;
957         int rc;
958
959         /* Nothing much to do if offload is not enabled */
960         if (!(dev->tx_offloads &
961               (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
962                RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
963                 return 0;
964
965         /* Setup LSO formats in AF. Its a no-op if other ethdev has
966          * already set it up
967          */
968         rc = roc_nix_lso_fmt_setup(nix);
969         if (rc)
970                 return rc;
971
972         return nix_lso_tun_fmt_update(dev);
973 }
974
975 int
976 cnxk_nix_configure(struct rte_eth_dev *eth_dev)
977 {
978         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
979         struct rte_eth_dev_data *data = eth_dev->data;
980         struct rte_eth_conf *conf = &data->dev_conf;
981         struct rte_eth_rxmode *rxmode = &conf->rxmode;
982         struct rte_eth_txmode *txmode = &conf->txmode;
983         char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
984         struct roc_nix_fc_cfg fc_cfg = {0};
985         struct roc_nix *nix = &dev->nix;
986         struct rte_ether_addr *ea;
987         uint8_t nb_rxq, nb_txq;
988         uint64_t rx_cfg;
989         void *qs;
990         int rc;
991
992         rc = -EINVAL;
993
994         /* Sanity checks */
995         if (rte_eal_has_hugepages() == 0) {
996                 plt_err("Huge page is not configured");
997                 goto fail_configure;
998         }
999
1000         if (conf->dcb_capability_en == 1) {
1001                 plt_err("dcb enable is not supported");
1002                 goto fail_configure;
1003         }
1004
1005         if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1006                 plt_err("Flow director is not supported");
1007                 goto fail_configure;
1008         }
1009
1010         if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
1011             rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
1012                 plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
1013                 goto fail_configure;
1014         }
1015
1016         if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
1017                 plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
1018                 goto fail_configure;
1019         }
1020
1021         /* Free the resources allocated from the previous configure */
1022         if (dev->configured == 1) {
1023                 /* Unregister queue irq's */
1024                 roc_nix_unregister_queue_irqs(nix);
1025
1026                 /* Unregister CQ irqs if present */
1027                 if (eth_dev->data->dev_conf.intr_conf.rxq)
1028                         roc_nix_unregister_cq_irqs(nix);
1029
1030                 /* Set no-op functions */
1031                 nix_set_nop_rxtx_function(eth_dev);
1032                 /* Store queue config for later */
1033                 rc = nix_store_queue_cfg_and_then_release(eth_dev);
1034                 if (rc)
1035                         goto fail_configure;
1036
1037                 /* Disable and free rte_meter entries */
1038                 rc = nix_meter_fini(dev);
1039                 if (rc)
1040                         goto fail_configure;
1041
1042                 /* Cleanup security support */
1043                 rc = nix_security_release(dev);
1044                 if (rc)
1045                         goto fail_configure;
1046
1047                 roc_nix_tm_fini(nix);
1048                 roc_nix_lf_free(nix);
1049         }
1050
1051         dev->rx_offloads = rxmode->offloads;
1052         dev->tx_offloads = txmode->offloads;
1053
1054         /* Prepare rx cfg */
1055         rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
1056         if (dev->rx_offloads &
1057             (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
1058                 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
1059                 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
1060         }
1061         rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |
1062                    ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
1063                    ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
1064
1065         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
1066                 rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
1067                 /* Disable drop re if rx offload security is enabled and
1068                  * platform does not support it.
1069                  */
1070                 if (dev->ipsecd_drop_re_dis)
1071                         rx_cfg &= ~(ROC_NIX_LF_RX_CFG_DROP_RE);
1072         }
1073
1074         nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1075         nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1076
1077         /* Alloc a nix lf */
1078         rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
1079         if (rc) {
1080                 plt_err("Failed to init nix_lf rc=%d", rc);
1081                 goto fail_configure;
1082         }
1083
1084         dev->npc.channel = roc_nix_get_base_chan(nix);
1085
1086         nb_rxq = data->nb_rx_queues;
1087         nb_txq = data->nb_tx_queues;
1088         rc = -ENOMEM;
1089         if (nb_rxq) {
1090                 /* Allocate memory for roc rq's and cq's */
1091                 qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0);
1092                 if (!qs) {
1093                         plt_err("Failed to alloc rqs");
1094                         goto free_nix_lf;
1095                 }
1096                 dev->rqs = qs;
1097
1098                 qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0);
1099                 if (!qs) {
1100                         plt_err("Failed to alloc cqs");
1101                         goto free_nix_lf;
1102                 }
1103                 dev->cqs = qs;
1104         }
1105
1106         if (nb_txq) {
1107                 /* Allocate memory for roc sq's */
1108                 qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0);
1109                 if (!qs) {
1110                         plt_err("Failed to alloc sqs");
1111                         goto free_nix_lf;
1112                 }
1113                 dev->sqs = qs;
1114         }
1115
1116         /* Re-enable NIX LF error interrupts */
1117         roc_nix_err_intr_ena_dis(nix, true);
1118         roc_nix_ras_intr_ena_dis(nix, true);
1119
1120         if (nix->rx_ptp_ena &&
1121             dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) {
1122                 plt_err("Both PTP and switch header enabled");
1123                 goto free_nix_lf;
1124         }
1125
1126         rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type);
1127         if (rc) {
1128                 plt_err("Failed to enable switch type nix_lf rc=%d", rc);
1129                 goto free_nix_lf;
1130         }
1131
1132         /* Setup LSO if needed */
1133         rc = nix_lso_fmt_setup(dev);
1134         if (rc) {
1135                 plt_err("Failed to setup nix lso format fields, rc=%d", rc);
1136                 goto free_nix_lf;
1137         }
1138
1139         /* Configure RSS */
1140         rc = nix_rss_default_setup(dev);
1141         if (rc) {
1142                 plt_err("Failed to configure rss rc=%d", rc);
1143                 goto free_nix_lf;
1144         }
1145
1146         /* Init the default TM scheduler hierarchy */
1147         rc = roc_nix_tm_init(nix);
1148         if (rc) {
1149                 plt_err("Failed to init traffic manager, rc=%d", rc);
1150                 goto free_nix_lf;
1151         }
1152
1153         rc = nix_ingress_policer_setup(dev);
1154         if (rc) {
1155                 plt_err("Failed to setup ingress policer rc=%d", rc);
1156                 goto free_nix_lf;
1157         }
1158
1159         rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
1160         if (rc) {
1161                 plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
1162                 goto tm_fini;
1163         }
1164
1165         /* Register queue IRQs */
1166         rc = roc_nix_register_queue_irqs(nix);
1167         if (rc) {
1168                 plt_err("Failed to register queue interrupts rc=%d", rc);
1169                 goto tm_fini;
1170         }
1171
1172         /* Register cq IRQs */
1173         if (eth_dev->data->dev_conf.intr_conf.rxq) {
1174                 if (eth_dev->data->nb_rx_queues > dev->nix.cints) {
1175                         plt_err("Rx interrupt cannot be enabled, rxq > %d",
1176                                 dev->nix.cints);
1177                         goto q_irq_fini;
1178                 }
1179                 /* Rx interrupt feature cannot work with vector mode because,
1180                  * vector mode does not process packets unless min 4 pkts are
1181                  * received, while cq interrupts are generated even for 1 pkt
1182                  * in the CQ.
1183                  */
1184                 dev->scalar_ena = true;
1185
1186                 rc = roc_nix_register_cq_irqs(nix);
1187                 if (rc) {
1188                         plt_err("Failed to register CQ interrupts rc=%d", rc);
1189                         goto q_irq_fini;
1190                 }
1191         }
1192
1193         /* Configure loop back mode */
1194         rc = roc_nix_mac_loopback_enable(nix,
1195                                          eth_dev->data->dev_conf.lpbk_mode);
1196         if (rc) {
1197                 plt_err("Failed to configure cgx loop back mode rc=%d", rc);
1198                 goto cq_fini;
1199         }
1200
1201         /* Init flow control configuration */
1202         fc_cfg.cq_cfg_valid = false;
1203         fc_cfg.rxchan_cfg.enable = true;
1204         rc = roc_nix_fc_config_set(nix, &fc_cfg);
1205         if (rc) {
1206                 plt_err("Failed to initialize flow control rc=%d", rc);
1207                 goto cq_fini;
1208         }
1209
1210         /* Update flow control configuration to PMD */
1211         rc = nix_init_flow_ctrl_config(eth_dev);
1212         if (rc) {
1213                 plt_err("Failed to initialize flow control rc=%d", rc);
1214                 goto cq_fini;
1215         }
1216
1217         /* Setup Inline security support */
1218         rc = nix_security_setup(dev);
1219         if (rc)
1220                 goto cq_fini;
1221
1222         /*
1223          * Restore queue config when reconfigure followed by
1224          * reconfigure and no queue configure invoked from application case.
1225          */
1226         if (dev->configured == 1) {
1227                 rc = nix_restore_queue_cfg(eth_dev);
1228                 if (rc)
1229                         goto sec_release;
1230         }
1231
1232         /* Update the mac address */
1233         ea = eth_dev->data->mac_addrs;
1234         memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1235         if (rte_is_zero_ether_addr(ea))
1236                 rte_eth_random_addr((uint8_t *)ea);
1237
1238         rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
1239
1240         plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
1241                     " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "",
1242                     eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq,
1243                     dev->rx_offloads, dev->tx_offloads);
1244
1245         /* All good */
1246         dev->configured = 1;
1247         dev->nb_rxq = data->nb_rx_queues;
1248         dev->nb_txq = data->nb_tx_queues;
1249         return 0;
1250
1251 sec_release:
1252         rc |= nix_security_release(dev);
1253 cq_fini:
1254         roc_nix_unregister_cq_irqs(nix);
1255 q_irq_fini:
1256         roc_nix_unregister_queue_irqs(nix);
1257 tm_fini:
1258         roc_nix_tm_fini(nix);
1259 free_nix_lf:
1260         nix_free_queue_mem(dev);
1261         rc |= roc_nix_lf_free(nix);
1262 fail_configure:
1263         dev->configured = 0;
1264         return rc;
1265 }
1266
1267 int
1268 cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1269 {
1270         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1271         struct rte_eth_dev_data *data = eth_dev->data;
1272         struct roc_nix_sq *sq = &dev->sqs[qid];
1273         int rc = -EINVAL;
1274
1275         if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1276                 return 0;
1277
1278         rc = roc_nix_tm_sq_aura_fc(sq, true);
1279         if (rc) {
1280                 plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
1281                 goto done;
1282         }
1283
1284         data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1285 done:
1286         return rc;
1287 }
1288
1289 int
1290 cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1291 {
1292         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1293         struct rte_eth_dev_data *data = eth_dev->data;
1294         struct roc_nix_sq *sq = &dev->sqs[qid];
1295         int rc;
1296
1297         if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1298                 return 0;
1299
1300         rc = roc_nix_tm_sq_aura_fc(sq, false);
1301         if (rc) {
1302                 plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
1303                         rc);
1304                 goto done;
1305         }
1306
1307         data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1308 done:
1309         return rc;
1310 }
1311
1312 static int
1313 cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1314 {
1315         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1316         struct rte_eth_dev_data *data = eth_dev->data;
1317         struct roc_nix_rq *rq = &dev->rqs[qid];
1318         int rc;
1319
1320         if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1321                 return 0;
1322
1323         rc = roc_nix_rq_ena_dis(rq, true);
1324         if (rc) {
1325                 plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
1326                 goto done;
1327         }
1328
1329         data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1330 done:
1331         return rc;
1332 }
1333
1334 static int
1335 cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1336 {
1337         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1338         struct rte_eth_dev_data *data = eth_dev->data;
1339         struct roc_nix_rq *rq = &dev->rqs[qid];
1340         int rc;
1341
1342         if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1343                 return 0;
1344
1345         rc = roc_nix_rq_ena_dis(rq, false);
1346         if (rc) {
1347                 plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
1348                 goto done;
1349         }
1350
1351         data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1352 done:
1353         return rc;
1354 }
1355
1356 static int
1357 cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
1358 {
1359         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1360         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1361         struct rte_mbuf *rx_pkts[32];
1362         struct rte_eth_link link;
1363         int count, i, j, rc;
1364         void *rxq;
1365
1366         /* Disable switch hdr pkind */
1367         roc_nix_switch_hdr_set(&dev->nix, 0);
1368
1369         /* Stop link change events */
1370         if (!roc_nix_is_vf_or_sdp(&dev->nix))
1371                 roc_nix_mac_link_event_start_stop(&dev->nix, false);
1372
1373         /* Disable Rx via NPC */
1374         roc_nix_npc_rx_ena_dis(&dev->nix, false);
1375
1376         /* Stop rx queues and free up pkts pending */
1377         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1378                 rc = dev_ops->rx_queue_stop(eth_dev, i);
1379                 if (rc)
1380                         continue;
1381
1382                 rxq = eth_dev->data->rx_queues[i];
1383                 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1384                 while (count) {
1385                         for (j = 0; j < count; j++)
1386                                 rte_pktmbuf_free(rx_pkts[j]);
1387                         count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1388                 }
1389         }
1390
1391         /* Stop tx queues  */
1392         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1393                 dev_ops->tx_queue_stop(eth_dev, i);
1394
1395         /* Bring down link status internally */
1396         memset(&link, 0, sizeof(link));
1397         rte_eth_linkstatus_set(eth_dev, &link);
1398
1399         return 0;
1400 }
1401
1402 int
1403 cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
1404 {
1405         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1406         int rc, i;
1407
1408         if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) {
1409                 rc = nix_recalc_mtu(eth_dev);
1410                 if (rc)
1411                         return rc;
1412         }
1413
1414         /* Start rx queues */
1415         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1416                 rc = cnxk_nix_rx_queue_start(eth_dev, i);
1417                 if (rc)
1418                         return rc;
1419         }
1420
1421         /* Start tx queues  */
1422         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1423                 rc = cnxk_nix_tx_queue_start(eth_dev, i);
1424                 if (rc)
1425                         return rc;
1426         }
1427
1428         /* Update Flow control configuration */
1429         rc = nix_update_flow_ctrl_config(eth_dev);
1430         if (rc) {
1431                 plt_err("Failed to enable flow control. error code(%d)", rc);
1432                 return rc;
1433         }
1434
1435         /* Enable Rx in NPC */
1436         rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
1437         if (rc) {
1438                 plt_err("Failed to enable NPC rx %d", rc);
1439                 return rc;
1440         }
1441
1442         cnxk_nix_toggle_flag_link_cfg(dev, true);
1443
1444         /* Start link change events */
1445         if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
1446                 rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
1447                 if (rc) {
1448                         plt_err("Failed to start cgx link event %d", rc);
1449                         goto rx_disable;
1450                 }
1451         }
1452
1453         /* Enable PTP if it is requested by the user or already
1454          * enabled on PF owning this VF
1455          */
1456         memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
1457         if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
1458                 cnxk_eth_dev_ops.timesync_enable(eth_dev);
1459         else
1460                 cnxk_eth_dev_ops.timesync_disable(eth_dev);
1461
1462         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1463                 rc = rte_mbuf_dyn_rx_timestamp_register
1464                         (&dev->tstamp.tstamp_dynfield_offset,
1465                          &dev->tstamp.rx_tstamp_dynflag);
1466                 if (rc != 0) {
1467                         plt_err("Failed to register Rx timestamp field/flag");
1468                         goto rx_disable;
1469                 }
1470         }
1471
1472         cnxk_nix_toggle_flag_link_cfg(dev, false);
1473
1474         return 0;
1475
1476 rx_disable:
1477         roc_nix_npc_rx_ena_dis(&dev->nix, false);
1478         cnxk_nix_toggle_flag_link_cfg(dev, false);
1479         return rc;
1480 }
1481
1482 static int cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev);
1483 static int cnxk_nix_dev_close(struct rte_eth_dev *eth_dev);
1484
1485 /* CNXK platform independent eth dev ops */
1486 struct eth_dev_ops cnxk_eth_dev_ops = {
1487         .mtu_set = cnxk_nix_mtu_set,
1488         .mac_addr_add = cnxk_nix_mac_addr_add,
1489         .mac_addr_remove = cnxk_nix_mac_addr_del,
1490         .mac_addr_set = cnxk_nix_mac_addr_set,
1491         .dev_infos_get = cnxk_nix_info_get,
1492         .link_update = cnxk_nix_link_update,
1493         .tx_queue_release = cnxk_nix_tx_queue_release,
1494         .rx_queue_release = cnxk_nix_rx_queue_release,
1495         .dev_stop = cnxk_nix_dev_stop,
1496         .dev_close = cnxk_nix_dev_close,
1497         .dev_reset = cnxk_nix_dev_reset,
1498         .tx_queue_start = cnxk_nix_tx_queue_start,
1499         .rx_queue_start = cnxk_nix_rx_queue_start,
1500         .rx_queue_stop = cnxk_nix_rx_queue_stop,
1501         .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
1502         .promiscuous_enable = cnxk_nix_promisc_enable,
1503         .promiscuous_disable = cnxk_nix_promisc_disable,
1504         .allmulticast_enable = cnxk_nix_allmulticast_enable,
1505         .allmulticast_disable = cnxk_nix_allmulticast_disable,
1506         .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
1507         .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
1508         .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
1509         .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
1510         .dev_set_link_up = cnxk_nix_set_link_up,
1511         .dev_set_link_down = cnxk_nix_set_link_down,
1512         .get_module_info = cnxk_nix_get_module_info,
1513         .get_module_eeprom = cnxk_nix_get_module_eeprom,
1514         .rx_queue_intr_enable = cnxk_nix_rx_queue_intr_enable,
1515         .rx_queue_intr_disable = cnxk_nix_rx_queue_intr_disable,
1516         .pool_ops_supported = cnxk_nix_pool_ops_supported,
1517         .queue_stats_mapping_set = cnxk_nix_queue_stats_mapping,
1518         .stats_get = cnxk_nix_stats_get,
1519         .stats_reset = cnxk_nix_stats_reset,
1520         .xstats_get = cnxk_nix_xstats_get,
1521         .xstats_get_names = cnxk_nix_xstats_get_names,
1522         .xstats_reset = cnxk_nix_xstats_reset,
1523         .xstats_get_by_id = cnxk_nix_xstats_get_by_id,
1524         .xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id,
1525         .fw_version_get = cnxk_nix_fw_version_get,
1526         .rxq_info_get = cnxk_nix_rxq_info_get,
1527         .txq_info_get = cnxk_nix_txq_info_get,
1528         .tx_done_cleanup = cnxk_nix_tx_done_cleanup,
1529         .flow_ops_get = cnxk_nix_flow_ops_get,
1530         .get_reg = cnxk_nix_dev_get_reg,
1531         .timesync_read_rx_timestamp = cnxk_nix_timesync_read_rx_timestamp,
1532         .timesync_read_tx_timestamp = cnxk_nix_timesync_read_tx_timestamp,
1533         .timesync_read_time = cnxk_nix_timesync_read_time,
1534         .timesync_write_time = cnxk_nix_timesync_write_time,
1535         .timesync_adjust_time = cnxk_nix_timesync_adjust_time,
1536         .read_clock = cnxk_nix_read_clock,
1537         .reta_update = cnxk_nix_reta_update,
1538         .reta_query = cnxk_nix_reta_query,
1539         .rss_hash_update = cnxk_nix_rss_hash_update,
1540         .rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
1541         .set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
1542         .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
1543         .tm_ops_get = cnxk_nix_tm_ops_get,
1544         .mtr_ops_get = cnxk_nix_mtr_ops_get,
1545 };
1546
1547 static int
1548 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
1549 {
1550         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1551         struct rte_security_ctx *sec_ctx;
1552         struct roc_nix *nix = &dev->nix;
1553         struct rte_pci_device *pci_dev;
1554         int rc, max_entries;
1555
1556         eth_dev->dev_ops = &cnxk_eth_dev_ops;
1557
1558         /* Alloc security context */
1559         sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
1560         if (!sec_ctx)
1561                 return -ENOMEM;
1562         sec_ctx->device = eth_dev;
1563         sec_ctx->ops = &cnxk_eth_sec_ops;
1564         sec_ctx->flags =
1565                 (RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
1566         eth_dev->security_ctx = sec_ctx;
1567         TAILQ_INIT(&dev->inb.list);
1568         TAILQ_INIT(&dev->outb.list);
1569
1570         /* For secondary processes, the primary has done all the work */
1571         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1572                 return 0;
1573
1574         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1575         rte_eth_copy_pci_info(eth_dev, pci_dev);
1576
1577         /* Parse devargs string */
1578         rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev);
1579         if (rc) {
1580                 plt_err("Failed to parse devargs rc=%d", rc);
1581                 goto error;
1582         }
1583
1584         /* Initialize base roc nix */
1585         nix->pci_dev = pci_dev;
1586         nix->hw_vlan_ins = true;
1587         rc = roc_nix_dev_init(nix);
1588         if (rc) {
1589                 plt_err("Failed to initialize roc nix rc=%d", rc);
1590                 goto error;
1591         }
1592
1593         /* Register up msg callbacks */
1594         roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
1595
1596         /* Register up msg callbacks */
1597         roc_nix_mac_link_info_get_cb_register(nix,
1598                                               cnxk_eth_dev_link_status_get_cb);
1599
1600         dev->eth_dev = eth_dev;
1601         dev->configured = 0;
1602         dev->ptype_disable = 0;
1603
1604         /* For vfs, returned max_entries will be 0. but to keep default mac
1605          * address, one entry must be allocated. so setting up to 1.
1606          */
1607         if (roc_nix_is_vf_or_sdp(nix))
1608                 max_entries = 1;
1609         else
1610                 max_entries = roc_nix_mac_max_entries_get(nix);
1611
1612         if (max_entries <= 0) {
1613                 plt_err("Failed to get max entries for mac addr");
1614                 rc = -ENOTSUP;
1615                 goto dev_fini;
1616         }
1617
1618         eth_dev->data->mac_addrs =
1619                 rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0);
1620         if (eth_dev->data->mac_addrs == NULL) {
1621                 plt_err("Failed to allocate memory for mac addr");
1622                 rc = -ENOMEM;
1623                 goto dev_fini;
1624         }
1625
1626         dev->max_mac_entries = max_entries;
1627         dev->dmac_filter_count = 1;
1628
1629         /* Get mac address */
1630         rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
1631         if (rc) {
1632                 plt_err("Failed to get mac addr, rc=%d", rc);
1633                 goto free_mac_addrs;
1634         }
1635
1636         /* Update the mac address */
1637         memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1638
1639         if (!roc_nix_is_vf_or_sdp(nix)) {
1640                 /* Sync same MAC address to CGX/RPM table */
1641                 rc = roc_nix_mac_addr_set(nix, dev->mac_addr);
1642                 if (rc) {
1643                         plt_err("Failed to set mac addr, rc=%d", rc);
1644                         goto free_mac_addrs;
1645                 }
1646         }
1647
1648         /* Union of all capabilities supported by CNXK.
1649          * Platform specific capabilities will be
1650          * updated later.
1651          */
1652         dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
1653         dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
1654         dev->speed_capa = nix_get_speed_capa(dev);
1655
1656         /* Initialize roc npc */
1657         dev->npc.roc_nix = nix;
1658         rc = roc_npc_init(&dev->npc);
1659         if (rc)
1660                 goto free_mac_addrs;
1661
1662         plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
1663                     " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
1664                     eth_dev->data->port_id, roc_nix_get_pf(nix),
1665                     roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
1666                     dev->rx_offload_capa, dev->tx_offload_capa);
1667         return 0;
1668
1669 free_mac_addrs:
1670         rte_free(eth_dev->data->mac_addrs);
1671 dev_fini:
1672         roc_nix_dev_fini(nix);
1673 error:
1674         plt_err("Failed to init nix eth_dev rc=%d", rc);
1675         return rc;
1676 }
1677
1678 static int
1679 cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
1680 {
1681         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1682         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1683         struct roc_nix *nix = &dev->nix;
1684         int rc, i;
1685
1686         plt_free(eth_dev->security_ctx);
1687         eth_dev->security_ctx = NULL;
1688
1689         /* Nothing to be done for secondary processes */
1690         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1691                 return 0;
1692
1693         /* Clear the flag since we are closing down */
1694         dev->configured = 0;
1695
1696         roc_nix_npc_rx_ena_dis(nix, false);
1697
1698         /* Disable and free rte_meter entries */
1699         nix_meter_fini(dev);
1700
1701         /* Disable and free rte_flow entries */
1702         roc_npc_fini(&dev->npc);
1703
1704         /* Disable link status events */
1705         roc_nix_mac_link_event_start_stop(nix, false);
1706
1707         /* Unregister the link update op, this is required to stop VFs from
1708          * receiving link status updates on exit path.
1709          */
1710         roc_nix_mac_link_cb_unregister(nix);
1711
1712         /* Free up SQs */
1713         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1714                 dev_ops->tx_queue_release(eth_dev, i);
1715                 eth_dev->data->tx_queues[i] = NULL;
1716         }
1717         eth_dev->data->nb_tx_queues = 0;
1718
1719         /* Free up RQ's and CQ's */
1720         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1721                 dev_ops->rx_queue_release(eth_dev, i);
1722                 eth_dev->data->rx_queues[i] = NULL;
1723         }
1724         eth_dev->data->nb_rx_queues = 0;
1725
1726         /* Free security resources */
1727         nix_security_release(dev);
1728
1729         /* Free tm resources */
1730         roc_nix_tm_fini(nix);
1731
1732         /* Unregister queue irqs */
1733         roc_nix_unregister_queue_irqs(nix);
1734
1735         /* Unregister cq irqs */
1736         if (eth_dev->data->dev_conf.intr_conf.rxq)
1737                 roc_nix_unregister_cq_irqs(nix);
1738
1739         /* Free ROC RQ's, SQ's and CQ's memory */
1740         nix_free_queue_mem(dev);
1741
1742         /* Free nix lf resources */
1743         rc = roc_nix_lf_free(nix);
1744         if (rc)
1745                 plt_err("Failed to free nix lf, rc=%d", rc);
1746
1747         rte_free(eth_dev->data->mac_addrs);
1748         eth_dev->data->mac_addrs = NULL;
1749
1750         rc = roc_nix_dev_fini(nix);
1751         /* Can be freed later by PMD if NPA LF is in use */
1752         if (rc == -EAGAIN) {
1753                 if (!reset)
1754                         eth_dev->data->dev_private = NULL;
1755                 return 0;
1756         } else if (rc) {
1757                 plt_err("Failed in nix dev fini, rc=%d", rc);
1758         }
1759
1760         return rc;
1761 }
1762
1763 static int
1764 cnxk_nix_dev_close(struct rte_eth_dev *eth_dev)
1765 {
1766         cnxk_eth_dev_uninit(eth_dev, false);
1767         return 0;
1768 }
1769
1770 static int
1771 cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev)
1772 {
1773         int rc;
1774
1775         rc = cnxk_eth_dev_uninit(eth_dev, true);
1776         if (rc)
1777                 return rc;
1778
1779         return cnxk_eth_dev_init(eth_dev);
1780 }
1781
1782 int
1783 cnxk_nix_remove(struct rte_pci_device *pci_dev)
1784 {
1785         struct rte_eth_dev *eth_dev;
1786         struct roc_nix *nix;
1787         int rc = -EINVAL;
1788
1789         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1790         if (eth_dev) {
1791                 /* Cleanup eth dev */
1792                 rc = cnxk_eth_dev_uninit(eth_dev, false);
1793                 if (rc)
1794                         return rc;
1795
1796                 rte_eth_dev_release_port(eth_dev);
1797         }
1798
1799         /* Nothing to be done for secondary processes */
1800         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1801                 return 0;
1802
1803         /* Check if this device is hosting common resource */
1804         nix = roc_idev_npa_nix_get();
1805         if (nix->pci_dev != pci_dev)
1806                 return 0;
1807
1808         /* Try nix fini now */
1809         rc = roc_nix_dev_fini(nix);
1810         if (rc == -EAGAIN) {
1811                 plt_info("%s: common resource in use by other devices",
1812                          pci_dev->name);
1813                 goto exit;
1814         } else if (rc) {
1815                 plt_err("Failed in nix dev fini, rc=%d", rc);
1816                 goto exit;
1817         }
1818
1819         /* Free device pointer as rte_ethdev does not have it anymore */
1820         rte_free(nix);
1821 exit:
1822         return rc;
1823 }
1824
1825 int
1826 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1827 {
1828         int rc;
1829
1830         RTE_SET_USED(pci_drv);
1831
1832         rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
1833                                            cnxk_eth_dev_init);
1834
1835         /* On error on secondary, recheck if port exists in primary or
1836          * in mid of detach state.
1837          */
1838         if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
1839                 if (!rte_eth_dev_allocated(pci_dev->device.name))
1840                         return 0;
1841         return rc;
1842 }