net/cnxk: support to create meter
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <cnxk_ethdev.h>
5
6 static inline uint64_t
7 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
8 {
9         uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
10
11         if (roc_nix_is_vf_or_sdp(&dev->nix) ||
12             dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
13                 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
14
15         return capa;
16 }
17
18 static inline uint64_t
19 nix_get_tx_offload_capa(struct cnxk_eth_dev *dev)
20 {
21         RTE_SET_USED(dev);
22         return CNXK_NIX_TX_OFFLOAD_CAPA;
23 }
24
25 static inline uint32_t
26 nix_get_speed_capa(struct cnxk_eth_dev *dev)
27 {
28         uint32_t speed_capa;
29
30         /* Auto negotiation disabled */
31         speed_capa = ETH_LINK_SPEED_FIXED;
32         if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
33                 speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
34                               ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
35                               ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
36         }
37
38         return speed_capa;
39 }
40
41 int
42 cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
43 {
44         struct roc_nix *nix = &dev->nix;
45
46         if (dev->inb.inl_dev == use_inl_dev)
47                 return 0;
48
49         plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
50                     dev->inb.nb_sess, !!dev->inb.inl_dev);
51
52         /* Change the mode */
53         dev->inb.inl_dev = use_inl_dev;
54
55         /* Update RoC for NPC rule insertion */
56         roc_nix_inb_mode_set(nix, use_inl_dev);
57
58         /* Setup lookup mem */
59         return cnxk_nix_lookup_mem_sa_base_set(dev);
60 }
61
62 static int
63 nix_security_setup(struct cnxk_eth_dev *dev)
64 {
65         struct roc_nix *nix = &dev->nix;
66         int i, rc = 0;
67
68         if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
69                 /* Setup Inline Inbound */
70                 rc = roc_nix_inl_inb_init(nix);
71                 if (rc) {
72                         plt_err("Failed to initialize nix inline inb, rc=%d",
73                                 rc);
74                         return rc;
75                 }
76
77                 /* By default pick using inline device for poll mode.
78                  * Will be overridden when event mode rq's are setup.
79                  */
80                 cnxk_nix_inb_mode_set(dev, true);
81         }
82
83         if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
84             dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
85                 struct plt_bitmap *bmap;
86                 size_t bmap_sz;
87                 void *mem;
88
89                 /* Setup enough descriptors for all tx queues */
90                 nix->outb_nb_desc = dev->outb.nb_desc;
91                 nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs;
92
93                 /* Setup Inline Outbound */
94                 rc = roc_nix_inl_outb_init(nix);
95                 if (rc) {
96                         plt_err("Failed to initialize nix inline outb, rc=%d",
97                                 rc);
98                         goto cleanup;
99                 }
100
101                 dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
102
103                 /* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
104                 if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY))
105                         goto done;
106
107                 rc = -ENOMEM;
108                 /* Allocate a bitmap to alloc and free sa indexes */
109                 bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa);
110                 mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
111                 if (mem == NULL) {
112                         plt_err("Outbound SA bmap alloc failed");
113
114                         rc |= roc_nix_inl_outb_fini(nix);
115                         goto cleanup;
116                 }
117
118                 rc = -EIO;
119                 bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz);
120                 if (!bmap) {
121                         plt_err("Outbound SA bmap init failed");
122
123                         rc |= roc_nix_inl_outb_fini(nix);
124                         plt_free(mem);
125                         goto cleanup;
126                 }
127
128                 for (i = 0; i < dev->outb.max_sa; i++)
129                         plt_bitmap_set(bmap, i);
130
131                 dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix);
132                 dev->outb.sa_bmap_mem = mem;
133                 dev->outb.sa_bmap = bmap;
134         }
135
136 done:
137         return 0;
138 cleanup:
139         if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
140                 rc |= roc_nix_inl_inb_fini(nix);
141         return rc;
142 }
143
144 static int
145 nix_security_release(struct cnxk_eth_dev *dev)
146 {
147         struct rte_eth_dev *eth_dev = dev->eth_dev;
148         struct cnxk_eth_sec_sess *eth_sec, *tvar;
149         struct roc_nix *nix = &dev->nix;
150         int rc, ret = 0;
151
152         /* Cleanup Inline inbound */
153         if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
154                 /* Destroy inbound sessions */
155                 tvar = NULL;
156                 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
157                         cnxk_eth_sec_ops.session_destroy(eth_dev,
158                                                          eth_sec->sess);
159
160                 /* Clear lookup mem */
161                 cnxk_nix_lookup_mem_sa_base_clear(dev);
162
163                 rc = roc_nix_inl_inb_fini(nix);
164                 if (rc)
165                         plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
166                 ret |= rc;
167         }
168
169         /* Cleanup Inline outbound */
170         if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
171             dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
172                 /* Destroy outbound sessions */
173                 tvar = NULL;
174                 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
175                         cnxk_eth_sec_ops.session_destroy(eth_dev,
176                                                          eth_sec->sess);
177
178                 rc = roc_nix_inl_outb_fini(nix);
179                 if (rc)
180                         plt_err("Failed to cleanup nix inline outb, rc=%d", rc);
181                 ret |= rc;
182
183                 plt_bitmap_free(dev->outb.sa_bmap);
184                 plt_free(dev->outb.sa_bmap_mem);
185                 dev->outb.sa_bmap = NULL;
186                 dev->outb.sa_bmap_mem = NULL;
187         }
188
189         dev->inb.inl_dev = false;
190         roc_nix_inb_mode_set(nix, false);
191         dev->nb_rxq_sso = 0;
192         dev->inb.nb_sess = 0;
193         dev->outb.nb_sess = 0;
194         return ret;
195 }
196
197 static void
198 nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
199 {
200         struct rte_pktmbuf_pool_private *mbp_priv;
201         struct rte_eth_dev *eth_dev;
202         struct cnxk_eth_dev *dev;
203         uint32_t buffsz;
204
205         dev = rxq->dev;
206         eth_dev = dev->eth_dev;
207
208         /* Get rx buffer size */
209         mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
210         buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
211
212         if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
213                 dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
214                 dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
215         }
216 }
217
218 int
219 nix_recalc_mtu(struct rte_eth_dev *eth_dev)
220 {
221         struct rte_eth_dev_data *data = eth_dev->data;
222         struct cnxk_eth_rxq_sp *rxq;
223         int rc;
224
225         rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
226         /* Setup scatter mode if needed by jumbo */
227         nix_enable_mseg_on_jumbo(rxq);
228
229         rc = cnxk_nix_mtu_set(eth_dev, data->mtu);
230         if (rc)
231                 plt_err("Failed to set default MTU size, rc=%d", rc);
232
233         return rc;
234 }
235
236 static int
237 nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
238 {
239         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
240         struct cnxk_fc_cfg *fc = &dev->fc_cfg;
241         struct rte_eth_fc_conf fc_conf = {0};
242         int rc;
243
244         /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
245          * by AF driver, update those info in PMD structure.
246          */
247         rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
248         if (rc)
249                 goto exit;
250
251         fc->mode = fc_conf.mode;
252         fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
253                         (fc_conf.mode == RTE_FC_RX_PAUSE);
254         fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
255                         (fc_conf.mode == RTE_FC_TX_PAUSE);
256
257 exit:
258         return rc;
259 }
260
261 static int
262 nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
263 {
264         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
265         struct cnxk_fc_cfg *fc = &dev->fc_cfg;
266         struct rte_eth_fc_conf fc_cfg = {0};
267
268         if (roc_nix_is_vf_or_sdp(&dev->nix))
269                 return 0;
270
271         fc_cfg.mode = fc->mode;
272
273         /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
274         if (roc_model_is_cn96_ax() &&
275             dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
276             (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
277                 fc_cfg.mode =
278                                 (fc_cfg.mode == RTE_FC_FULL ||
279                                 fc_cfg.mode == RTE_FC_TX_PAUSE) ?
280                                 RTE_FC_TX_PAUSE : RTE_FC_NONE;
281         }
282
283         return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
284 }
285
286 uint64_t
287 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
288 {
289         uint16_t port_id = dev->eth_dev->data->port_id;
290         struct rte_mbuf mb_def;
291         uint64_t *tmp;
292
293         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
294         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
295                                  offsetof(struct rte_mbuf, data_off) !=
296                          2);
297         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
298                                  offsetof(struct rte_mbuf, data_off) !=
299                          4);
300         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
301                                  offsetof(struct rte_mbuf, data_off) !=
302                          6);
303         mb_def.nb_segs = 1;
304         mb_def.data_off = RTE_PKTMBUF_HEADROOM +
305                           (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
306         mb_def.port = port_id;
307         rte_mbuf_refcnt_set(&mb_def, 1);
308
309         /* Prevent compiler reordering: rearm_data covers previous fields */
310         rte_compiler_barrier();
311         tmp = (uint64_t *)&mb_def.rearm_data;
312
313         return *tmp;
314 }
315
316 static inline uint8_t
317 nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
318 {
319         /*
320          * Maximum three segments can be supported with W8, Choose
321          * NIX_MAXSQESZ_W16 for multi segment offload.
322          */
323         if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
324                 return NIX_MAXSQESZ_W16;
325         else
326                 return NIX_MAXSQESZ_W8;
327 }
328
329 int
330 cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
331                         uint16_t nb_desc, uint16_t fp_tx_q_sz,
332                         const struct rte_eth_txconf *tx_conf)
333 {
334         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
335         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
336         struct cnxk_eth_txq_sp *txq_sp;
337         struct roc_nix_sq *sq;
338         size_t txq_sz;
339         int rc;
340
341         /* Free memory prior to re-allocation if needed. */
342         if (eth_dev->data->tx_queues[qid] != NULL) {
343                 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
344                 dev_ops->tx_queue_release(eth_dev, qid);
345                 eth_dev->data->tx_queues[qid] = NULL;
346         }
347
348         /* When Tx Security offload is enabled, increase tx desc count by
349          * max possible outbound desc count.
350          */
351         if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
352                 nb_desc += dev->outb.nb_desc;
353
354         /* Setup ROC SQ */
355         sq = &dev->sqs[qid];
356         sq->qid = qid;
357         sq->nb_desc = nb_desc;
358         sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
359
360         rc = roc_nix_sq_init(&dev->nix, sq);
361         if (rc) {
362                 plt_err("Failed to init sq=%d, rc=%d", qid, rc);
363                 return rc;
364         }
365
366         rc = -ENOMEM;
367         txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
368         txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
369         if (!txq_sp) {
370                 plt_err("Failed to alloc tx queue mem");
371                 rc |= roc_nix_sq_fini(sq);
372                 return rc;
373         }
374
375         txq_sp->dev = dev;
376         txq_sp->qid = qid;
377         txq_sp->qconf.conf.tx = *tx_conf;
378         /* Queue config should reflect global offloads */
379         txq_sp->qconf.conf.tx.offloads = dev->tx_offloads;
380         txq_sp->qconf.nb_desc = nb_desc;
381
382         plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
383                     " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
384                     qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
385                     sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
386
387         /* Store start of fast path area */
388         eth_dev->data->tx_queues[qid] = txq_sp + 1;
389         eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
390         return 0;
391 }
392
393 static void
394 cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
395 {
396         void *txq = eth_dev->data->tx_queues[qid];
397         struct cnxk_eth_txq_sp *txq_sp;
398         struct cnxk_eth_dev *dev;
399         struct roc_nix_sq *sq;
400         int rc;
401
402         if (!txq)
403                 return;
404
405         txq_sp = cnxk_eth_txq_to_sp(txq);
406
407         dev = txq_sp->dev;
408
409         plt_nix_dbg("Releasing txq %u", qid);
410
411         /* Cleanup ROC SQ */
412         sq = &dev->sqs[qid];
413         rc = roc_nix_sq_fini(sq);
414         if (rc)
415                 plt_err("Failed to cleanup sq, rc=%d", rc);
416
417         /* Finally free */
418         plt_free(txq_sp);
419 }
420
421 int
422 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
423                         uint16_t nb_desc, uint16_t fp_rx_q_sz,
424                         const struct rte_eth_rxconf *rx_conf,
425                         struct rte_mempool *mp)
426 {
427         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
428         struct roc_nix *nix = &dev->nix;
429         struct cnxk_eth_rxq_sp *rxq_sp;
430         struct rte_mempool_ops *ops;
431         const char *platform_ops;
432         struct roc_nix_rq *rq;
433         struct roc_nix_cq *cq;
434         uint16_t first_skip;
435         int rc = -EINVAL;
436         size_t rxq_sz;
437
438         /* Sanity checks */
439         if (rx_conf->rx_deferred_start == 1) {
440                 plt_err("Deferred Rx start is not supported");
441                 goto fail;
442         }
443
444         platform_ops = rte_mbuf_platform_mempool_ops();
445         /* This driver needs cnxk_npa mempool ops to work */
446         ops = rte_mempool_get_ops(mp->ops_index);
447         if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
448                 plt_err("mempool ops should be of cnxk_npa type");
449                 goto fail;
450         }
451
452         if (mp->pool_id == 0) {
453                 plt_err("Invalid pool_id");
454                 goto fail;
455         }
456
457         /* Free memory prior to re-allocation if needed */
458         if (eth_dev->data->rx_queues[qid] != NULL) {
459                 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
460
461                 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
462                 dev_ops->rx_queue_release(eth_dev, qid);
463                 eth_dev->data->rx_queues[qid] = NULL;
464         }
465
466         /* Clam up cq limit to size of packet pool aura for LBK
467          * to avoid meta packet drop as LBK does not currently support
468          * backpressure.
469          */
470         if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
471                 uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
472
473                 /* Use current RQ's aura limit if inl rq is not available */
474                 if (!pkt_pool_limit)
475                         pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
476                 nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
477         }
478
479         /* Setup ROC CQ */
480         cq = &dev->cqs[qid];
481         cq->qid = qid;
482         cq->nb_desc = nb_desc;
483         rc = roc_nix_cq_init(&dev->nix, cq);
484         if (rc) {
485                 plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
486                 goto fail;
487         }
488
489         /* Setup ROC RQ */
490         rq = &dev->rqs[qid];
491         rq->qid = qid;
492         rq->aura_handle = mp->pool_id;
493         rq->flow_tag_width = 32;
494         rq->sso_ena = false;
495
496         /* Calculate first mbuf skip */
497         first_skip = (sizeof(struct rte_mbuf));
498         first_skip += RTE_PKTMBUF_HEADROOM;
499         first_skip += rte_pktmbuf_priv_size(mp);
500         rq->first_skip = first_skip;
501         rq->later_skip = sizeof(struct rte_mbuf);
502         rq->lpb_size = mp->elt_size;
503
504         /* Enable Inline IPSec on RQ, will not be used for Poll mode */
505         if (roc_nix_inl_inb_is_enabled(nix))
506                 rq->ipsech_ena = true;
507
508         rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
509         if (rc) {
510                 plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
511                 goto cq_fini;
512         }
513
514         /* Allocate and setup fast path rx queue */
515         rc = -ENOMEM;
516         rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
517         rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
518         if (!rxq_sp) {
519                 plt_err("Failed to alloc rx queue for rq=%d", qid);
520                 goto rq_fini;
521         }
522
523         /* Setup slow path fields */
524         rxq_sp->dev = dev;
525         rxq_sp->qid = qid;
526         rxq_sp->qconf.conf.rx = *rx_conf;
527         /* Queue config should reflect global offloads */
528         rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads;
529         rxq_sp->qconf.nb_desc = nb_desc;
530         rxq_sp->qconf.mp = mp;
531
532         if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
533                 /* Setup rq reference for inline dev if present */
534                 rc = roc_nix_inl_dev_rq_get(rq);
535                 if (rc)
536                         goto free_mem;
537         }
538
539         plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
540                     cq->nb_desc);
541
542         /* Store start of fast path area */
543         eth_dev->data->rx_queues[qid] = rxq_sp + 1;
544         eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
545
546         /* Calculating delta and freq mult between PTP HI clock and tsc.
547          * These are needed in deriving raw clock value from tsc counter.
548          * read_clock eth op returns raw clock value.
549          */
550         if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
551                 rc = cnxk_nix_tsc_convert(dev);
552                 if (rc) {
553                         plt_err("Failed to calculate delta and freq mult");
554                         goto rq_fini;
555                 }
556         }
557
558         return 0;
559 free_mem:
560         plt_free(rxq_sp);
561 rq_fini:
562         rc |= roc_nix_rq_fini(rq);
563 cq_fini:
564         rc |= roc_nix_cq_fini(cq);
565 fail:
566         return rc;
567 }
568
569 static void
570 cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
571 {
572         void *rxq = eth_dev->data->rx_queues[qid];
573         struct cnxk_eth_rxq_sp *rxq_sp;
574         struct cnxk_eth_dev *dev;
575         struct roc_nix_rq *rq;
576         struct roc_nix_cq *cq;
577         int rc;
578
579         if (!rxq)
580                 return;
581
582         rxq_sp = cnxk_eth_rxq_to_sp(rxq);
583         dev = rxq_sp->dev;
584         rq = &dev->rqs[qid];
585
586         plt_nix_dbg("Releasing rxq %u", qid);
587
588         /* Release rq reference for inline dev if present */
589         if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
590                 roc_nix_inl_dev_rq_put(rq);
591
592         /* Cleanup ROC RQ */
593         rc = roc_nix_rq_fini(rq);
594         if (rc)
595                 plt_err("Failed to cleanup rq, rc=%d", rc);
596
597         /* Cleanup ROC CQ */
598         cq = &dev->cqs[qid];
599         rc = roc_nix_cq_fini(cq);
600         if (rc)
601                 plt_err("Failed to cleanup cq, rc=%d", rc);
602
603         /* Finally free fast path area */
604         plt_free(rxq_sp);
605 }
606
607 uint32_t
608 cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
609                        uint8_t rss_level)
610 {
611         uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
612                 {FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
613                  FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
614                 {FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
615                  FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
616                  FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
617                 {FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
618                  FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
619                  FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
620                  FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
621                  FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
622                  FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
623         };
624         uint32_t flowkey_cfg = 0;
625
626         dev->ethdev_rss_hf = ethdev_rss;
627
628         if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
629             dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
630                 flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
631         }
632
633         if (ethdev_rss & ETH_RSS_C_VLAN)
634                 flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
635
636         if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
637                 flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
638
639         if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
640                 flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
641
642         if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
643                 flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
644
645         if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
646                 flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
647
648         if (ethdev_rss & RSS_IPV4_ENABLE)
649                 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
650
651         if (ethdev_rss & RSS_IPV6_ENABLE)
652                 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
653
654         if (ethdev_rss & ETH_RSS_TCP)
655                 flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
656
657         if (ethdev_rss & ETH_RSS_UDP)
658                 flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
659
660         if (ethdev_rss & ETH_RSS_SCTP)
661                 flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
662
663         if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
664                 flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
665
666         if (ethdev_rss & RSS_IPV6_EX_ENABLE)
667                 flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
668
669         if (ethdev_rss & ETH_RSS_PORT)
670                 flowkey_cfg |= FLOW_KEY_TYPE_PORT;
671
672         if (ethdev_rss & ETH_RSS_NVGRE)
673                 flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
674
675         if (ethdev_rss & ETH_RSS_VXLAN)
676                 flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
677
678         if (ethdev_rss & ETH_RSS_GENEVE)
679                 flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
680
681         if (ethdev_rss & ETH_RSS_GTPU)
682                 flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
683
684         return flowkey_cfg;
685 }
686
687 static void
688 nix_free_queue_mem(struct cnxk_eth_dev *dev)
689 {
690         plt_free(dev->rqs);
691         plt_free(dev->cqs);
692         plt_free(dev->sqs);
693         dev->rqs = NULL;
694         dev->cqs = NULL;
695         dev->sqs = NULL;
696 }
697
698 static int
699 nix_ingress_policer_setup(struct cnxk_eth_dev *dev)
700 {
701         TAILQ_INIT(&dev->mtr_profiles);
702         TAILQ_INIT(&dev->mtr_policy);
703         TAILQ_INIT(&dev->mtr);
704
705         return 0;
706 }
707
708 static int
709 nix_rss_default_setup(struct cnxk_eth_dev *dev)
710 {
711         struct rte_eth_dev *eth_dev = dev->eth_dev;
712         uint8_t rss_hash_level;
713         uint32_t flowkey_cfg;
714         uint64_t rss_hf;
715
716         rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
717         rss_hash_level = ETH_RSS_LEVEL(rss_hf);
718         if (rss_hash_level)
719                 rss_hash_level -= 1;
720
721         flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
722         return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
723 }
724
725 static int
726 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
727 {
728         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
729         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
730         struct cnxk_eth_qconf *tx_qconf = NULL;
731         struct cnxk_eth_qconf *rx_qconf = NULL;
732         struct cnxk_eth_rxq_sp *rxq_sp;
733         struct cnxk_eth_txq_sp *txq_sp;
734         int i, nb_rxq, nb_txq;
735         void **txq, **rxq;
736
737         nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
738         nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
739
740         tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
741         if (tx_qconf == NULL) {
742                 plt_err("Failed to allocate memory for tx_qconf");
743                 goto fail;
744         }
745
746         rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
747         if (rx_qconf == NULL) {
748                 plt_err("Failed to allocate memory for rx_qconf");
749                 goto fail;
750         }
751
752         txq = eth_dev->data->tx_queues;
753         for (i = 0; i < nb_txq; i++) {
754                 if (txq[i] == NULL) {
755                         tx_qconf[i].valid = false;
756                         plt_info("txq[%d] is already released", i);
757                         continue;
758                 }
759                 txq_sp = cnxk_eth_txq_to_sp(txq[i]);
760                 memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
761                 tx_qconf[i].valid = true;
762                 dev_ops->tx_queue_release(eth_dev, i);
763                 eth_dev->data->tx_queues[i] = NULL;
764         }
765
766         rxq = eth_dev->data->rx_queues;
767         for (i = 0; i < nb_rxq; i++) {
768                 if (rxq[i] == NULL) {
769                         rx_qconf[i].valid = false;
770                         plt_info("rxq[%d] is already released", i);
771                         continue;
772                 }
773                 rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
774                 memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
775                 rx_qconf[i].valid = true;
776                 dev_ops->rx_queue_release(eth_dev, i);
777                 eth_dev->data->rx_queues[i] = NULL;
778         }
779
780         dev->tx_qconf = tx_qconf;
781         dev->rx_qconf = rx_qconf;
782         return 0;
783
784 fail:
785         free(tx_qconf);
786         free(rx_qconf);
787         return -ENOMEM;
788 }
789
790 static int
791 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
792 {
793         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
794         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
795         struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
796         struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
797         int rc, i, nb_rxq, nb_txq;
798
799         nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
800         nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
801
802         rc = -ENOMEM;
803         /* Setup tx & rx queues with previous configuration so
804          * that the queues can be functional in cases like ports
805          * are started without re configuring queues.
806          *
807          * Usual re config sequence is like below:
808          * port_configure() {
809          *      if(reconfigure) {
810          *              queue_release()
811          *              queue_setup()
812          *      }
813          *      queue_configure() {
814          *              queue_release()
815          *              queue_setup()
816          *      }
817          * }
818          * port_start()
819          *
820          * In some application's control path, queue_configure() would
821          * NOT be invoked for TXQs/RXQs in port_configure().
822          * In such cases, queues can be functional after start as the
823          * queues are already setup in port_configure().
824          */
825         for (i = 0; i < nb_txq; i++) {
826                 if (!tx_qconf[i].valid)
827                         continue;
828                 rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
829                                              &tx_qconf[i].conf.tx);
830                 if (rc) {
831                         plt_err("Failed to setup tx queue rc=%d", rc);
832                         for (i -= 1; i >= 0; i--)
833                                 dev_ops->tx_queue_release(eth_dev, i);
834                         goto fail;
835                 }
836         }
837
838         free(tx_qconf);
839         tx_qconf = NULL;
840
841         for (i = 0; i < nb_rxq; i++) {
842                 if (!rx_qconf[i].valid)
843                         continue;
844                 rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
845                                              &rx_qconf[i].conf.rx,
846                                              rx_qconf[i].mp);
847                 if (rc) {
848                         plt_err("Failed to setup rx queue rc=%d", rc);
849                         for (i -= 1; i >= 0; i--)
850                                 dev_ops->rx_queue_release(eth_dev, i);
851                         goto tx_queue_release;
852                 }
853         }
854
855         free(rx_qconf);
856         rx_qconf = NULL;
857
858         return 0;
859
860 tx_queue_release:
861         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
862                 dev_ops->tx_queue_release(eth_dev, i);
863 fail:
864         if (tx_qconf)
865                 free(tx_qconf);
866         if (rx_qconf)
867                 free(rx_qconf);
868
869         return rc;
870 }
871
872 static uint16_t
873 nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
874 {
875         RTE_SET_USED(queue);
876         RTE_SET_USED(mbufs);
877         RTE_SET_USED(pkts);
878
879         return 0;
880 }
881
882 static void
883 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
884 {
885         /* These dummy functions are required for supporting
886          * some applications which reconfigure queues without
887          * stopping tx burst and rx burst threads(eg kni app)
888          * When the queues context is saved, txq/rxqs are released
889          * which caused app crash since rx/tx burst is still
890          * on different lcores
891          */
892         eth_dev->tx_pkt_burst = nix_eth_nop_burst;
893         eth_dev->rx_pkt_burst = nix_eth_nop_burst;
894         rte_mb();
895 }
896
897 static int
898 nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
899 {
900         uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
901         uint8_t tun[ROC_NIX_LSO_TUN_MAX];
902         struct roc_nix *nix = &dev->nix;
903         int rc;
904
905         rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
906         if (rc)
907                 return rc;
908
909         dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
910                             (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
911                             (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
912                             (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
913
914         dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
915                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
916                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
917                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
918         return 0;
919 }
920
921 static int
922 nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
923 {
924         struct roc_nix *nix = &dev->nix;
925         int rc;
926
927         /* Nothing much to do if offload is not enabled */
928         if (!(dev->tx_offloads &
929               (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
930                DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
931                 return 0;
932
933         /* Setup LSO formats in AF. Its a no-op if other ethdev has
934          * already set it up
935          */
936         rc = roc_nix_lso_fmt_setup(nix);
937         if (rc)
938                 return rc;
939
940         return nix_lso_tun_fmt_update(dev);
941 }
942
943 int
944 cnxk_nix_configure(struct rte_eth_dev *eth_dev)
945 {
946         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
947         struct rte_eth_dev_data *data = eth_dev->data;
948         struct rte_eth_conf *conf = &data->dev_conf;
949         struct rte_eth_rxmode *rxmode = &conf->rxmode;
950         struct rte_eth_txmode *txmode = &conf->txmode;
951         char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
952         struct roc_nix_fc_cfg fc_cfg = {0};
953         struct roc_nix *nix = &dev->nix;
954         struct rte_ether_addr *ea;
955         uint8_t nb_rxq, nb_txq;
956         uint64_t rx_cfg;
957         void *qs;
958         int rc;
959
960         rc = -EINVAL;
961
962         /* Sanity checks */
963         if (rte_eal_has_hugepages() == 0) {
964                 plt_err("Huge page is not configured");
965                 goto fail_configure;
966         }
967
968         if (conf->dcb_capability_en == 1) {
969                 plt_err("dcb enable is not supported");
970                 goto fail_configure;
971         }
972
973         if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
974                 plt_err("Flow director is not supported");
975                 goto fail_configure;
976         }
977
978         if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
979             rxmode->mq_mode != ETH_MQ_RX_RSS) {
980                 plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
981                 goto fail_configure;
982         }
983
984         if (txmode->mq_mode != ETH_MQ_TX_NONE) {
985                 plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
986                 goto fail_configure;
987         }
988
989         /* Free the resources allocated from the previous configure */
990         if (dev->configured == 1) {
991                 /* Unregister queue irq's */
992                 roc_nix_unregister_queue_irqs(nix);
993
994                 /* Unregister CQ irqs if present */
995                 if (eth_dev->data->dev_conf.intr_conf.rxq)
996                         roc_nix_unregister_cq_irqs(nix);
997
998                 /* Set no-op functions */
999                 nix_set_nop_rxtx_function(eth_dev);
1000                 /* Store queue config for later */
1001                 rc = nix_store_queue_cfg_and_then_release(eth_dev);
1002                 if (rc)
1003                         goto fail_configure;
1004
1005                 /* Cleanup security support */
1006                 rc = nix_security_release(dev);
1007                 if (rc)
1008                         goto fail_configure;
1009
1010                 roc_nix_tm_fini(nix);
1011                 roc_nix_lf_free(nix);
1012         }
1013
1014         dev->rx_offloads = rxmode->offloads;
1015         dev->tx_offloads = txmode->offloads;
1016
1017         /* Prepare rx cfg */
1018         rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
1019         if (dev->rx_offloads &
1020             (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
1021                 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
1022                 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
1023         }
1024         rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |
1025                    ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
1026                    ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
1027
1028         if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
1029                 rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
1030                 /* Disable drop re if rx offload security is enabled and
1031                  * platform does not support it.
1032                  */
1033                 if (dev->ipsecd_drop_re_dis)
1034                         rx_cfg &= ~(ROC_NIX_LF_RX_CFG_DROP_RE);
1035         }
1036
1037         nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1038         nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1039
1040         /* Alloc a nix lf */
1041         rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
1042         if (rc) {
1043                 plt_err("Failed to init nix_lf rc=%d", rc);
1044                 goto fail_configure;
1045         }
1046
1047         dev->npc.channel = roc_nix_get_base_chan(nix);
1048
1049         nb_rxq = data->nb_rx_queues;
1050         nb_txq = data->nb_tx_queues;
1051         rc = -ENOMEM;
1052         if (nb_rxq) {
1053                 /* Allocate memory for roc rq's and cq's */
1054                 qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0);
1055                 if (!qs) {
1056                         plt_err("Failed to alloc rqs");
1057                         goto free_nix_lf;
1058                 }
1059                 dev->rqs = qs;
1060
1061                 qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0);
1062                 if (!qs) {
1063                         plt_err("Failed to alloc cqs");
1064                         goto free_nix_lf;
1065                 }
1066                 dev->cqs = qs;
1067         }
1068
1069         if (nb_txq) {
1070                 /* Allocate memory for roc sq's */
1071                 qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0);
1072                 if (!qs) {
1073                         plt_err("Failed to alloc sqs");
1074                         goto free_nix_lf;
1075                 }
1076                 dev->sqs = qs;
1077         }
1078
1079         /* Re-enable NIX LF error interrupts */
1080         roc_nix_err_intr_ena_dis(nix, true);
1081         roc_nix_ras_intr_ena_dis(nix, true);
1082
1083         if (nix->rx_ptp_ena &&
1084             dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) {
1085                 plt_err("Both PTP and switch header enabled");
1086                 goto free_nix_lf;
1087         }
1088
1089         rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type);
1090         if (rc) {
1091                 plt_err("Failed to enable switch type nix_lf rc=%d", rc);
1092                 goto free_nix_lf;
1093         }
1094
1095         /* Setup LSO if needed */
1096         rc = nix_lso_fmt_setup(dev);
1097         if (rc) {
1098                 plt_err("Failed to setup nix lso format fields, rc=%d", rc);
1099                 goto free_nix_lf;
1100         }
1101
1102         /* Configure RSS */
1103         rc = nix_rss_default_setup(dev);
1104         if (rc) {
1105                 plt_err("Failed to configure rss rc=%d", rc);
1106                 goto free_nix_lf;
1107         }
1108
1109         /* Init the default TM scheduler hierarchy */
1110         rc = roc_nix_tm_init(nix);
1111         if (rc) {
1112                 plt_err("Failed to init traffic manager, rc=%d", rc);
1113                 goto free_nix_lf;
1114         }
1115
1116         rc = nix_ingress_policer_setup(dev);
1117         if (rc) {
1118                 plt_err("Failed to setup ingress policer rc=%d", rc);
1119                 goto free_nix_lf;
1120         }
1121
1122         rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
1123         if (rc) {
1124                 plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
1125                 goto tm_fini;
1126         }
1127
1128         /* Register queue IRQs */
1129         rc = roc_nix_register_queue_irqs(nix);
1130         if (rc) {
1131                 plt_err("Failed to register queue interrupts rc=%d", rc);
1132                 goto tm_fini;
1133         }
1134
1135         /* Register cq IRQs */
1136         if (eth_dev->data->dev_conf.intr_conf.rxq) {
1137                 if (eth_dev->data->nb_rx_queues > dev->nix.cints) {
1138                         plt_err("Rx interrupt cannot be enabled, rxq > %d",
1139                                 dev->nix.cints);
1140                         goto q_irq_fini;
1141                 }
1142                 /* Rx interrupt feature cannot work with vector mode because,
1143                  * vector mode does not process packets unless min 4 pkts are
1144                  * received, while cq interrupts are generated even for 1 pkt
1145                  * in the CQ.
1146                  */
1147                 dev->scalar_ena = true;
1148
1149                 rc = roc_nix_register_cq_irqs(nix);
1150                 if (rc) {
1151                         plt_err("Failed to register CQ interrupts rc=%d", rc);
1152                         goto q_irq_fini;
1153                 }
1154         }
1155
1156         /* Configure loop back mode */
1157         rc = roc_nix_mac_loopback_enable(nix,
1158                                          eth_dev->data->dev_conf.lpbk_mode);
1159         if (rc) {
1160                 plt_err("Failed to configure cgx loop back mode rc=%d", rc);
1161                 goto cq_fini;
1162         }
1163
1164         /* Init flow control configuration */
1165         fc_cfg.cq_cfg_valid = false;
1166         fc_cfg.rxchan_cfg.enable = true;
1167         rc = roc_nix_fc_config_set(nix, &fc_cfg);
1168         if (rc) {
1169                 plt_err("Failed to initialize flow control rc=%d", rc);
1170                 goto cq_fini;
1171         }
1172
1173         /* Update flow control configuration to PMD */
1174         rc = nix_init_flow_ctrl_config(eth_dev);
1175         if (rc) {
1176                 plt_err("Failed to initialize flow control rc=%d", rc);
1177                 goto cq_fini;
1178         }
1179
1180         /* Setup Inline security support */
1181         rc = nix_security_setup(dev);
1182         if (rc)
1183                 goto cq_fini;
1184
1185         /*
1186          * Restore queue config when reconfigure followed by
1187          * reconfigure and no queue configure invoked from application case.
1188          */
1189         if (dev->configured == 1) {
1190                 rc = nix_restore_queue_cfg(eth_dev);
1191                 if (rc)
1192                         goto sec_release;
1193         }
1194
1195         /* Update the mac address */
1196         ea = eth_dev->data->mac_addrs;
1197         memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1198         if (rte_is_zero_ether_addr(ea))
1199                 rte_eth_random_addr((uint8_t *)ea);
1200
1201         rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
1202
1203         plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
1204                     " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "",
1205                     eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq,
1206                     dev->rx_offloads, dev->tx_offloads);
1207
1208         /* All good */
1209         dev->configured = 1;
1210         dev->nb_rxq = data->nb_rx_queues;
1211         dev->nb_txq = data->nb_tx_queues;
1212         return 0;
1213
1214 sec_release:
1215         rc |= nix_security_release(dev);
1216 cq_fini:
1217         roc_nix_unregister_cq_irqs(nix);
1218 q_irq_fini:
1219         roc_nix_unregister_queue_irqs(nix);
1220 tm_fini:
1221         roc_nix_tm_fini(nix);
1222 free_nix_lf:
1223         nix_free_queue_mem(dev);
1224         rc |= roc_nix_lf_free(nix);
1225 fail_configure:
1226         dev->configured = 0;
1227         return rc;
1228 }
1229
1230 int
1231 cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1232 {
1233         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1234         struct rte_eth_dev_data *data = eth_dev->data;
1235         struct roc_nix_sq *sq = &dev->sqs[qid];
1236         int rc = -EINVAL;
1237
1238         if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1239                 return 0;
1240
1241         rc = roc_nix_tm_sq_aura_fc(sq, true);
1242         if (rc) {
1243                 plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
1244                 goto done;
1245         }
1246
1247         data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1248 done:
1249         return rc;
1250 }
1251
1252 int
1253 cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1254 {
1255         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1256         struct rte_eth_dev_data *data = eth_dev->data;
1257         struct roc_nix_sq *sq = &dev->sqs[qid];
1258         int rc;
1259
1260         if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1261                 return 0;
1262
1263         rc = roc_nix_tm_sq_aura_fc(sq, false);
1264         if (rc) {
1265                 plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
1266                         rc);
1267                 goto done;
1268         }
1269
1270         data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1271 done:
1272         return rc;
1273 }
1274
1275 static int
1276 cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1277 {
1278         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1279         struct rte_eth_dev_data *data = eth_dev->data;
1280         struct roc_nix_rq *rq = &dev->rqs[qid];
1281         int rc;
1282
1283         if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1284                 return 0;
1285
1286         rc = roc_nix_rq_ena_dis(rq, true);
1287         if (rc) {
1288                 plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
1289                 goto done;
1290         }
1291
1292         data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1293 done:
1294         return rc;
1295 }
1296
1297 static int
1298 cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1299 {
1300         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1301         struct rte_eth_dev_data *data = eth_dev->data;
1302         struct roc_nix_rq *rq = &dev->rqs[qid];
1303         int rc;
1304
1305         if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1306                 return 0;
1307
1308         rc = roc_nix_rq_ena_dis(rq, false);
1309         if (rc) {
1310                 plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
1311                 goto done;
1312         }
1313
1314         data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1315 done:
1316         return rc;
1317 }
1318
1319 static int
1320 cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
1321 {
1322         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1323         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1324         struct rte_mbuf *rx_pkts[32];
1325         struct rte_eth_link link;
1326         int count, i, j, rc;
1327         void *rxq;
1328
1329         /* Disable switch hdr pkind */
1330         roc_nix_switch_hdr_set(&dev->nix, 0);
1331
1332         /* Stop link change events */
1333         if (!roc_nix_is_vf_or_sdp(&dev->nix))
1334                 roc_nix_mac_link_event_start_stop(&dev->nix, false);
1335
1336         /* Disable Rx via NPC */
1337         roc_nix_npc_rx_ena_dis(&dev->nix, false);
1338
1339         /* Stop rx queues and free up pkts pending */
1340         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1341                 rc = dev_ops->rx_queue_stop(eth_dev, i);
1342                 if (rc)
1343                         continue;
1344
1345                 rxq = eth_dev->data->rx_queues[i];
1346                 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1347                 while (count) {
1348                         for (j = 0; j < count; j++)
1349                                 rte_pktmbuf_free(rx_pkts[j]);
1350                         count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1351                 }
1352         }
1353
1354         /* Stop tx queues  */
1355         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1356                 dev_ops->tx_queue_stop(eth_dev, i);
1357
1358         /* Bring down link status internally */
1359         memset(&link, 0, sizeof(link));
1360         rte_eth_linkstatus_set(eth_dev, &link);
1361
1362         return 0;
1363 }
1364
1365 int
1366 cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
1367 {
1368         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1369         int rc, i;
1370
1371         if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) {
1372                 rc = nix_recalc_mtu(eth_dev);
1373                 if (rc)
1374                         return rc;
1375         }
1376
1377         /* Start rx queues */
1378         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1379                 rc = cnxk_nix_rx_queue_start(eth_dev, i);
1380                 if (rc)
1381                         return rc;
1382         }
1383
1384         /* Start tx queues  */
1385         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1386                 rc = cnxk_nix_tx_queue_start(eth_dev, i);
1387                 if (rc)
1388                         return rc;
1389         }
1390
1391         /* Update Flow control configuration */
1392         rc = nix_update_flow_ctrl_config(eth_dev);
1393         if (rc) {
1394                 plt_err("Failed to enable flow control. error code(%d)", rc);
1395                 return rc;
1396         }
1397
1398         /* Enable Rx in NPC */
1399         rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
1400         if (rc) {
1401                 plt_err("Failed to enable NPC rx %d", rc);
1402                 return rc;
1403         }
1404
1405         cnxk_nix_toggle_flag_link_cfg(dev, true);
1406
1407         /* Start link change events */
1408         if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
1409                 rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
1410                 if (rc) {
1411                         plt_err("Failed to start cgx link event %d", rc);
1412                         goto rx_disable;
1413                 }
1414         }
1415
1416         /* Enable PTP if it is requested by the user or already
1417          * enabled on PF owning this VF
1418          */
1419         memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
1420         if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
1421                 cnxk_eth_dev_ops.timesync_enable(eth_dev);
1422         else
1423                 cnxk_eth_dev_ops.timesync_disable(eth_dev);
1424
1425         if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
1426                 rc = rte_mbuf_dyn_rx_timestamp_register
1427                         (&dev->tstamp.tstamp_dynfield_offset,
1428                          &dev->tstamp.rx_tstamp_dynflag);
1429                 if (rc != 0) {
1430                         plt_err("Failed to register Rx timestamp field/flag");
1431                         goto rx_disable;
1432                 }
1433         }
1434
1435         cnxk_nix_toggle_flag_link_cfg(dev, false);
1436
1437         return 0;
1438
1439 rx_disable:
1440         roc_nix_npc_rx_ena_dis(&dev->nix, false);
1441         cnxk_nix_toggle_flag_link_cfg(dev, false);
1442         return rc;
1443 }
1444
1445 static int cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev);
1446 static int cnxk_nix_dev_close(struct rte_eth_dev *eth_dev);
1447
1448 /* CNXK platform independent eth dev ops */
1449 struct eth_dev_ops cnxk_eth_dev_ops = {
1450         .mtu_set = cnxk_nix_mtu_set,
1451         .mac_addr_add = cnxk_nix_mac_addr_add,
1452         .mac_addr_remove = cnxk_nix_mac_addr_del,
1453         .mac_addr_set = cnxk_nix_mac_addr_set,
1454         .dev_infos_get = cnxk_nix_info_get,
1455         .link_update = cnxk_nix_link_update,
1456         .tx_queue_release = cnxk_nix_tx_queue_release,
1457         .rx_queue_release = cnxk_nix_rx_queue_release,
1458         .dev_stop = cnxk_nix_dev_stop,
1459         .dev_close = cnxk_nix_dev_close,
1460         .dev_reset = cnxk_nix_dev_reset,
1461         .tx_queue_start = cnxk_nix_tx_queue_start,
1462         .rx_queue_start = cnxk_nix_rx_queue_start,
1463         .rx_queue_stop = cnxk_nix_rx_queue_stop,
1464         .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
1465         .promiscuous_enable = cnxk_nix_promisc_enable,
1466         .promiscuous_disable = cnxk_nix_promisc_disable,
1467         .allmulticast_enable = cnxk_nix_allmulticast_enable,
1468         .allmulticast_disable = cnxk_nix_allmulticast_disable,
1469         .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
1470         .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
1471         .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
1472         .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
1473         .dev_set_link_up = cnxk_nix_set_link_up,
1474         .dev_set_link_down = cnxk_nix_set_link_down,
1475         .get_module_info = cnxk_nix_get_module_info,
1476         .get_module_eeprom = cnxk_nix_get_module_eeprom,
1477         .rx_queue_intr_enable = cnxk_nix_rx_queue_intr_enable,
1478         .rx_queue_intr_disable = cnxk_nix_rx_queue_intr_disable,
1479         .pool_ops_supported = cnxk_nix_pool_ops_supported,
1480         .queue_stats_mapping_set = cnxk_nix_queue_stats_mapping,
1481         .stats_get = cnxk_nix_stats_get,
1482         .stats_reset = cnxk_nix_stats_reset,
1483         .xstats_get = cnxk_nix_xstats_get,
1484         .xstats_get_names = cnxk_nix_xstats_get_names,
1485         .xstats_reset = cnxk_nix_xstats_reset,
1486         .xstats_get_by_id = cnxk_nix_xstats_get_by_id,
1487         .xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id,
1488         .fw_version_get = cnxk_nix_fw_version_get,
1489         .rxq_info_get = cnxk_nix_rxq_info_get,
1490         .txq_info_get = cnxk_nix_txq_info_get,
1491         .tx_done_cleanup = cnxk_nix_tx_done_cleanup,
1492         .flow_ops_get = cnxk_nix_flow_ops_get,
1493         .get_reg = cnxk_nix_dev_get_reg,
1494         .timesync_read_rx_timestamp = cnxk_nix_timesync_read_rx_timestamp,
1495         .timesync_read_tx_timestamp = cnxk_nix_timesync_read_tx_timestamp,
1496         .timesync_read_time = cnxk_nix_timesync_read_time,
1497         .timesync_write_time = cnxk_nix_timesync_write_time,
1498         .timesync_adjust_time = cnxk_nix_timesync_adjust_time,
1499         .read_clock = cnxk_nix_read_clock,
1500         .reta_update = cnxk_nix_reta_update,
1501         .reta_query = cnxk_nix_reta_query,
1502         .rss_hash_update = cnxk_nix_rss_hash_update,
1503         .rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
1504         .set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
1505         .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
1506         .tm_ops_get = cnxk_nix_tm_ops_get,
1507         .mtr_ops_get = cnxk_nix_mtr_ops_get,
1508 };
1509
1510 static int
1511 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
1512 {
1513         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1514         struct rte_security_ctx *sec_ctx;
1515         struct roc_nix *nix = &dev->nix;
1516         struct rte_pci_device *pci_dev;
1517         int rc, max_entries;
1518
1519         eth_dev->dev_ops = &cnxk_eth_dev_ops;
1520
1521         /* Alloc security context */
1522         sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
1523         if (!sec_ctx)
1524                 return -ENOMEM;
1525         sec_ctx->device = eth_dev;
1526         sec_ctx->ops = &cnxk_eth_sec_ops;
1527         sec_ctx->flags =
1528                 (RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
1529         eth_dev->security_ctx = sec_ctx;
1530         TAILQ_INIT(&dev->inb.list);
1531         TAILQ_INIT(&dev->outb.list);
1532
1533         /* For secondary processes, the primary has done all the work */
1534         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1535                 return 0;
1536
1537         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1538         rte_eth_copy_pci_info(eth_dev, pci_dev);
1539
1540         /* Parse devargs string */
1541         rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev);
1542         if (rc) {
1543                 plt_err("Failed to parse devargs rc=%d", rc);
1544                 goto error;
1545         }
1546
1547         /* Initialize base roc nix */
1548         nix->pci_dev = pci_dev;
1549         nix->hw_vlan_ins = true;
1550         rc = roc_nix_dev_init(nix);
1551         if (rc) {
1552                 plt_err("Failed to initialize roc nix rc=%d", rc);
1553                 goto error;
1554         }
1555
1556         /* Register up msg callbacks */
1557         roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
1558
1559         /* Register up msg callbacks */
1560         roc_nix_mac_link_info_get_cb_register(nix,
1561                                               cnxk_eth_dev_link_status_get_cb);
1562
1563         dev->eth_dev = eth_dev;
1564         dev->configured = 0;
1565         dev->ptype_disable = 0;
1566
1567         /* For vfs, returned max_entries will be 0. but to keep default mac
1568          * address, one entry must be allocated. so setting up to 1.
1569          */
1570         if (roc_nix_is_vf_or_sdp(nix))
1571                 max_entries = 1;
1572         else
1573                 max_entries = roc_nix_mac_max_entries_get(nix);
1574
1575         if (max_entries <= 0) {
1576                 plt_err("Failed to get max entries for mac addr");
1577                 rc = -ENOTSUP;
1578                 goto dev_fini;
1579         }
1580
1581         eth_dev->data->mac_addrs =
1582                 rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0);
1583         if (eth_dev->data->mac_addrs == NULL) {
1584                 plt_err("Failed to allocate memory for mac addr");
1585                 rc = -ENOMEM;
1586                 goto dev_fini;
1587         }
1588
1589         dev->max_mac_entries = max_entries;
1590         dev->dmac_filter_count = 1;
1591
1592         /* Get mac address */
1593         rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
1594         if (rc) {
1595                 plt_err("Failed to get mac addr, rc=%d", rc);
1596                 goto free_mac_addrs;
1597         }
1598
1599         /* Update the mac address */
1600         memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1601
1602         if (!roc_nix_is_vf_or_sdp(nix)) {
1603                 /* Sync same MAC address to CGX/RPM table */
1604                 rc = roc_nix_mac_addr_set(nix, dev->mac_addr);
1605                 if (rc) {
1606                         plt_err("Failed to set mac addr, rc=%d", rc);
1607                         goto free_mac_addrs;
1608                 }
1609         }
1610
1611         /* Union of all capabilities supported by CNXK.
1612          * Platform specific capabilities will be
1613          * updated later.
1614          */
1615         dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
1616         dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
1617         dev->speed_capa = nix_get_speed_capa(dev);
1618
1619         /* Initialize roc npc */
1620         dev->npc.roc_nix = nix;
1621         rc = roc_npc_init(&dev->npc);
1622         if (rc)
1623                 goto free_mac_addrs;
1624
1625         plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
1626                     " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
1627                     eth_dev->data->port_id, roc_nix_get_pf(nix),
1628                     roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
1629                     dev->rx_offload_capa, dev->tx_offload_capa);
1630         return 0;
1631
1632 free_mac_addrs:
1633         rte_free(eth_dev->data->mac_addrs);
1634 dev_fini:
1635         roc_nix_dev_fini(nix);
1636 error:
1637         plt_err("Failed to init nix eth_dev rc=%d", rc);
1638         return rc;
1639 }
1640
1641 static int
1642 cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
1643 {
1644         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1645         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1646         struct roc_nix *nix = &dev->nix;
1647         int rc, i;
1648
1649         plt_free(eth_dev->security_ctx);
1650         eth_dev->security_ctx = NULL;
1651
1652         /* Nothing to be done for secondary processes */
1653         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1654                 return 0;
1655
1656         /* Clear the flag since we are closing down */
1657         dev->configured = 0;
1658
1659         roc_nix_npc_rx_ena_dis(nix, false);
1660
1661         /* Disable and free rte_flow entries */
1662         roc_npc_fini(&dev->npc);
1663
1664         /* Disable link status events */
1665         roc_nix_mac_link_event_start_stop(nix, false);
1666
1667         /* Unregister the link update op, this is required to stop VFs from
1668          * receiving link status updates on exit path.
1669          */
1670         roc_nix_mac_link_cb_unregister(nix);
1671
1672         /* Free up SQs */
1673         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1674                 dev_ops->tx_queue_release(eth_dev, i);
1675                 eth_dev->data->tx_queues[i] = NULL;
1676         }
1677         eth_dev->data->nb_tx_queues = 0;
1678
1679         /* Free up RQ's and CQ's */
1680         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1681                 dev_ops->rx_queue_release(eth_dev, i);
1682                 eth_dev->data->rx_queues[i] = NULL;
1683         }
1684         eth_dev->data->nb_rx_queues = 0;
1685
1686         /* Free security resources */
1687         nix_security_release(dev);
1688
1689         /* Free tm resources */
1690         roc_nix_tm_fini(nix);
1691
1692         /* Unregister queue irqs */
1693         roc_nix_unregister_queue_irqs(nix);
1694
1695         /* Unregister cq irqs */
1696         if (eth_dev->data->dev_conf.intr_conf.rxq)
1697                 roc_nix_unregister_cq_irqs(nix);
1698
1699         /* Free ROC RQ's, SQ's and CQ's memory */
1700         nix_free_queue_mem(dev);
1701
1702         /* Free nix lf resources */
1703         rc = roc_nix_lf_free(nix);
1704         if (rc)
1705                 plt_err("Failed to free nix lf, rc=%d", rc);
1706
1707         rte_free(eth_dev->data->mac_addrs);
1708         eth_dev->data->mac_addrs = NULL;
1709
1710         rc = roc_nix_dev_fini(nix);
1711         /* Can be freed later by PMD if NPA LF is in use */
1712         if (rc == -EAGAIN) {
1713                 if (!reset)
1714                         eth_dev->data->dev_private = NULL;
1715                 return 0;
1716         } else if (rc) {
1717                 plt_err("Failed in nix dev fini, rc=%d", rc);
1718         }
1719
1720         return rc;
1721 }
1722
1723 static int
1724 cnxk_nix_dev_close(struct rte_eth_dev *eth_dev)
1725 {
1726         cnxk_eth_dev_uninit(eth_dev, false);
1727         return 0;
1728 }
1729
1730 static int
1731 cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev)
1732 {
1733         int rc;
1734
1735         rc = cnxk_eth_dev_uninit(eth_dev, true);
1736         if (rc)
1737                 return rc;
1738
1739         return cnxk_eth_dev_init(eth_dev);
1740 }
1741
1742 int
1743 cnxk_nix_remove(struct rte_pci_device *pci_dev)
1744 {
1745         struct rte_eth_dev *eth_dev;
1746         struct roc_nix *nix;
1747         int rc = -EINVAL;
1748
1749         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1750         if (eth_dev) {
1751                 /* Cleanup eth dev */
1752                 rc = cnxk_eth_dev_uninit(eth_dev, false);
1753                 if (rc)
1754                         return rc;
1755
1756                 rte_eth_dev_release_port(eth_dev);
1757         }
1758
1759         /* Nothing to be done for secondary processes */
1760         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1761                 return 0;
1762
1763         /* Check if this device is hosting common resource */
1764         nix = roc_idev_npa_nix_get();
1765         if (nix->pci_dev != pci_dev)
1766                 return 0;
1767
1768         /* Try nix fini now */
1769         rc = roc_nix_dev_fini(nix);
1770         if (rc == -EAGAIN) {
1771                 plt_info("%s: common resource in use by other devices",
1772                          pci_dev->name);
1773                 goto exit;
1774         } else if (rc) {
1775                 plt_err("Failed in nix dev fini, rc=%d", rc);
1776                 goto exit;
1777         }
1778
1779         /* Free device pointer as rte_ethdev does not have it anymore */
1780         rte_free(nix);
1781 exit:
1782         return rc;
1783 }
1784
1785 int
1786 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1787 {
1788         int rc;
1789
1790         RTE_SET_USED(pci_drv);
1791
1792         rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
1793                                            cnxk_eth_dev_init);
1794
1795         /* On error on secondary, recheck if port exists in primary or
1796          * in mid of detach state.
1797          */
1798         if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
1799                 if (!rte_eth_dev_allocated(pci_dev->device.name))
1800                         return 0;
1801         return rc;
1802 }