net/i40e/base: update FW API version to 1.15
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <cnxk_ethdev.h>
5
6 static inline uint64_t
7 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
8 {
9         uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
10
11         if (roc_nix_is_vf_or_sdp(&dev->nix) ||
12             dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
13                 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
14
15         return capa;
16 }
17
18 static inline uint64_t
19 nix_get_tx_offload_capa(struct cnxk_eth_dev *dev)
20 {
21         RTE_SET_USED(dev);
22         return CNXK_NIX_TX_OFFLOAD_CAPA;
23 }
24
25 static inline uint32_t
26 nix_get_speed_capa(struct cnxk_eth_dev *dev)
27 {
28         uint32_t speed_capa;
29
30         /* Auto negotiation disabled */
31         speed_capa = ETH_LINK_SPEED_FIXED;
32         if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
33                 speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
34                               ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
35                               ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
36         }
37
38         return speed_capa;
39 }
40
41 int
42 cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
43 {
44         struct roc_nix *nix = &dev->nix;
45
46         if (dev->inb.inl_dev == use_inl_dev)
47                 return 0;
48
49         plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
50                     dev->inb.nb_sess, !!dev->inb.inl_dev);
51
52         /* Change the mode */
53         dev->inb.inl_dev = use_inl_dev;
54
55         /* Update RoC for NPC rule insertion */
56         roc_nix_inb_mode_set(nix, use_inl_dev);
57
58         /* Setup lookup mem */
59         return cnxk_nix_lookup_mem_sa_base_set(dev);
60 }
61
62 static int
63 nix_security_setup(struct cnxk_eth_dev *dev)
64 {
65         struct roc_nix *nix = &dev->nix;
66         int i, rc = 0;
67
68         if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
69                 /* Setup Inline Inbound */
70                 rc = roc_nix_inl_inb_init(nix);
71                 if (rc) {
72                         plt_err("Failed to initialize nix inline inb, rc=%d",
73                                 rc);
74                         return rc;
75                 }
76
77                 /* By default pick using inline device for poll mode.
78                  * Will be overridden when event mode rq's are setup.
79                  */
80                 cnxk_nix_inb_mode_set(dev, true);
81         }
82
83         if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
84             dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
85                 struct plt_bitmap *bmap;
86                 size_t bmap_sz;
87                 void *mem;
88
89                 /* Setup enough descriptors for all tx queues */
90                 nix->outb_nb_desc = dev->outb.nb_desc;
91                 nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs;
92
93                 /* Setup Inline Outbound */
94                 rc = roc_nix_inl_outb_init(nix);
95                 if (rc) {
96                         plt_err("Failed to initialize nix inline outb, rc=%d",
97                                 rc);
98                         goto cleanup;
99                 }
100
101                 dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
102
103                 /* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
104                 if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY))
105                         goto done;
106
107                 rc = -ENOMEM;
108                 /* Allocate a bitmap to alloc and free sa indexes */
109                 bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa);
110                 mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
111                 if (mem == NULL) {
112                         plt_err("Outbound SA bmap alloc failed");
113
114                         rc |= roc_nix_inl_outb_fini(nix);
115                         goto cleanup;
116                 }
117
118                 rc = -EIO;
119                 bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz);
120                 if (!bmap) {
121                         plt_err("Outbound SA bmap init failed");
122
123                         rc |= roc_nix_inl_outb_fini(nix);
124                         plt_free(mem);
125                         goto cleanup;
126                 }
127
128                 for (i = 0; i < dev->outb.max_sa; i++)
129                         plt_bitmap_set(bmap, i);
130
131                 dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix);
132                 dev->outb.sa_bmap_mem = mem;
133                 dev->outb.sa_bmap = bmap;
134         }
135
136 done:
137         return 0;
138 cleanup:
139         if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
140                 rc |= roc_nix_inl_inb_fini(nix);
141         return rc;
142 }
143
144 static int
145 nix_security_release(struct cnxk_eth_dev *dev)
146 {
147         struct rte_eth_dev *eth_dev = dev->eth_dev;
148         struct cnxk_eth_sec_sess *eth_sec, *tvar;
149         struct roc_nix *nix = &dev->nix;
150         int rc, ret = 0;
151
152         /* Cleanup Inline inbound */
153         if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
154                 /* Destroy inbound sessions */
155                 tvar = NULL;
156                 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
157                         cnxk_eth_sec_ops.session_destroy(eth_dev,
158                                                          eth_sec->sess);
159
160                 /* Clear lookup mem */
161                 cnxk_nix_lookup_mem_sa_base_clear(dev);
162
163                 rc = roc_nix_inl_inb_fini(nix);
164                 if (rc)
165                         plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
166                 ret |= rc;
167         }
168
169         /* Cleanup Inline outbound */
170         if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
171             dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
172                 /* Destroy outbound sessions */
173                 tvar = NULL;
174                 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
175                         cnxk_eth_sec_ops.session_destroy(eth_dev,
176                                                          eth_sec->sess);
177
178                 rc = roc_nix_inl_outb_fini(nix);
179                 if (rc)
180                         plt_err("Failed to cleanup nix inline outb, rc=%d", rc);
181                 ret |= rc;
182
183                 plt_bitmap_free(dev->outb.sa_bmap);
184                 plt_free(dev->outb.sa_bmap_mem);
185                 dev->outb.sa_bmap = NULL;
186                 dev->outb.sa_bmap_mem = NULL;
187         }
188
189         dev->inb.inl_dev = false;
190         roc_nix_inb_mode_set(nix, false);
191         dev->nb_rxq_sso = 0;
192         dev->inb.nb_sess = 0;
193         dev->outb.nb_sess = 0;
194         return ret;
195 }
196
197 static void
198 nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
199 {
200         struct rte_pktmbuf_pool_private *mbp_priv;
201         struct rte_eth_dev *eth_dev;
202         struct cnxk_eth_dev *dev;
203         uint32_t buffsz;
204
205         dev = rxq->dev;
206         eth_dev = dev->eth_dev;
207
208         /* Get rx buffer size */
209         mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
210         buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
211
212         if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
213                 dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
214                 dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
215         }
216 }
217
218 int
219 nix_recalc_mtu(struct rte_eth_dev *eth_dev)
220 {
221         struct rte_eth_dev_data *data = eth_dev->data;
222         struct cnxk_eth_rxq_sp *rxq;
223         uint16_t mtu;
224         int rc;
225
226         rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
227         /* Setup scatter mode if needed by jumbo */
228         nix_enable_mseg_on_jumbo(rxq);
229
230         /* Setup MTU based on max_rx_pkt_len */
231         mtu = data->dev_conf.rxmode.max_rx_pkt_len - CNXK_NIX_L2_OVERHEAD +
232                                 CNXK_NIX_MAX_VTAG_ACT_SIZE;
233
234         rc = cnxk_nix_mtu_set(eth_dev, mtu);
235         if (rc)
236                 plt_err("Failed to set default MTU size, rc=%d", rc);
237
238         return rc;
239 }
240
241 static int
242 nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
243 {
244         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
245         struct cnxk_fc_cfg *fc = &dev->fc_cfg;
246         struct rte_eth_fc_conf fc_conf = {0};
247         int rc;
248
249         /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
250          * by AF driver, update those info in PMD structure.
251          */
252         rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
253         if (rc)
254                 goto exit;
255
256         fc->mode = fc_conf.mode;
257         fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
258                         (fc_conf.mode == RTE_FC_RX_PAUSE);
259         fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
260                         (fc_conf.mode == RTE_FC_TX_PAUSE);
261
262 exit:
263         return rc;
264 }
265
266 static int
267 nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
268 {
269         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
270         struct cnxk_fc_cfg *fc = &dev->fc_cfg;
271         struct rte_eth_fc_conf fc_cfg = {0};
272
273         if (roc_nix_is_vf_or_sdp(&dev->nix))
274                 return 0;
275
276         fc_cfg.mode = fc->mode;
277
278         /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
279         if (roc_model_is_cn96_ax() &&
280             dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
281             (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
282                 fc_cfg.mode =
283                                 (fc_cfg.mode == RTE_FC_FULL ||
284                                 fc_cfg.mode == RTE_FC_TX_PAUSE) ?
285                                 RTE_FC_TX_PAUSE : RTE_FC_NONE;
286         }
287
288         return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
289 }
290
291 uint64_t
292 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
293 {
294         uint16_t port_id = dev->eth_dev->data->port_id;
295         struct rte_mbuf mb_def;
296         uint64_t *tmp;
297
298         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
299         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
300                                  offsetof(struct rte_mbuf, data_off) !=
301                          2);
302         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
303                                  offsetof(struct rte_mbuf, data_off) !=
304                          4);
305         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
306                                  offsetof(struct rte_mbuf, data_off) !=
307                          6);
308         mb_def.nb_segs = 1;
309         mb_def.data_off = RTE_PKTMBUF_HEADROOM +
310                           (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
311         mb_def.port = port_id;
312         rte_mbuf_refcnt_set(&mb_def, 1);
313
314         /* Prevent compiler reordering: rearm_data covers previous fields */
315         rte_compiler_barrier();
316         tmp = (uint64_t *)&mb_def.rearm_data;
317
318         return *tmp;
319 }
320
321 static inline uint8_t
322 nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
323 {
324         /*
325          * Maximum three segments can be supported with W8, Choose
326          * NIX_MAXSQESZ_W16 for multi segment offload.
327          */
328         if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
329                 return NIX_MAXSQESZ_W16;
330         else
331                 return NIX_MAXSQESZ_W8;
332 }
333
334 int
335 cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
336                         uint16_t nb_desc, uint16_t fp_tx_q_sz,
337                         const struct rte_eth_txconf *tx_conf)
338 {
339         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
340         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
341         struct cnxk_eth_txq_sp *txq_sp;
342         struct roc_nix_sq *sq;
343         size_t txq_sz;
344         int rc;
345
346         /* Free memory prior to re-allocation if needed. */
347         if (eth_dev->data->tx_queues[qid] != NULL) {
348                 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
349                 dev_ops->tx_queue_release(eth_dev, qid);
350                 eth_dev->data->tx_queues[qid] = NULL;
351         }
352
353         /* When Tx Security offload is enabled, increase tx desc count by
354          * max possible outbound desc count.
355          */
356         if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
357                 nb_desc += dev->outb.nb_desc;
358
359         /* Setup ROC SQ */
360         sq = &dev->sqs[qid];
361         sq->qid = qid;
362         sq->nb_desc = nb_desc;
363         sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
364
365         rc = roc_nix_sq_init(&dev->nix, sq);
366         if (rc) {
367                 plt_err("Failed to init sq=%d, rc=%d", qid, rc);
368                 return rc;
369         }
370
371         rc = -ENOMEM;
372         txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
373         txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
374         if (!txq_sp) {
375                 plt_err("Failed to alloc tx queue mem");
376                 rc |= roc_nix_sq_fini(sq);
377                 return rc;
378         }
379
380         txq_sp->dev = dev;
381         txq_sp->qid = qid;
382         txq_sp->qconf.conf.tx = *tx_conf;
383         /* Queue config should reflect global offloads */
384         txq_sp->qconf.conf.tx.offloads = dev->tx_offloads;
385         txq_sp->qconf.nb_desc = nb_desc;
386
387         plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
388                     " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
389                     qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
390                     sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
391
392         /* Store start of fast path area */
393         eth_dev->data->tx_queues[qid] = txq_sp + 1;
394         eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
395         return 0;
396 }
397
398 static void
399 cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
400 {
401         void *txq = eth_dev->data->tx_queues[qid];
402         struct cnxk_eth_txq_sp *txq_sp;
403         struct cnxk_eth_dev *dev;
404         struct roc_nix_sq *sq;
405         int rc;
406
407         if (!txq)
408                 return;
409
410         txq_sp = cnxk_eth_txq_to_sp(txq);
411
412         dev = txq_sp->dev;
413
414         plt_nix_dbg("Releasing txq %u", qid);
415
416         /* Cleanup ROC SQ */
417         sq = &dev->sqs[qid];
418         rc = roc_nix_sq_fini(sq);
419         if (rc)
420                 plt_err("Failed to cleanup sq, rc=%d", rc);
421
422         /* Finally free */
423         plt_free(txq_sp);
424 }
425
426 int
427 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
428                         uint16_t nb_desc, uint16_t fp_rx_q_sz,
429                         const struct rte_eth_rxconf *rx_conf,
430                         struct rte_mempool *mp)
431 {
432         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
433         struct roc_nix *nix = &dev->nix;
434         struct cnxk_eth_rxq_sp *rxq_sp;
435         struct rte_mempool_ops *ops;
436         const char *platform_ops;
437         struct roc_nix_rq *rq;
438         struct roc_nix_cq *cq;
439         uint16_t first_skip;
440         int rc = -EINVAL;
441         size_t rxq_sz;
442
443         /* Sanity checks */
444         if (rx_conf->rx_deferred_start == 1) {
445                 plt_err("Deferred Rx start is not supported");
446                 goto fail;
447         }
448
449         platform_ops = rte_mbuf_platform_mempool_ops();
450         /* This driver needs cnxk_npa mempool ops to work */
451         ops = rte_mempool_get_ops(mp->ops_index);
452         if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
453                 plt_err("mempool ops should be of cnxk_npa type");
454                 goto fail;
455         }
456
457         if (mp->pool_id == 0) {
458                 plt_err("Invalid pool_id");
459                 goto fail;
460         }
461
462         /* Free memory prior to re-allocation if needed */
463         if (eth_dev->data->rx_queues[qid] != NULL) {
464                 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
465
466                 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
467                 dev_ops->rx_queue_release(eth_dev, qid);
468                 eth_dev->data->rx_queues[qid] = NULL;
469         }
470
471         /* Clam up cq limit to size of packet pool aura for LBK
472          * to avoid meta packet drop as LBK does not currently support
473          * backpressure.
474          */
475         if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
476                 uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
477
478                 /* Use current RQ's aura limit if inl rq is not available */
479                 if (!pkt_pool_limit)
480                         pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
481                 nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
482         }
483
484         /* Setup ROC CQ */
485         cq = &dev->cqs[qid];
486         cq->qid = qid;
487         cq->nb_desc = nb_desc;
488         rc = roc_nix_cq_init(&dev->nix, cq);
489         if (rc) {
490                 plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
491                 goto fail;
492         }
493
494         /* Setup ROC RQ */
495         rq = &dev->rqs[qid];
496         rq->qid = qid;
497         rq->aura_handle = mp->pool_id;
498         rq->flow_tag_width = 32;
499         rq->sso_ena = false;
500
501         /* Calculate first mbuf skip */
502         first_skip = (sizeof(struct rte_mbuf));
503         first_skip += RTE_PKTMBUF_HEADROOM;
504         first_skip += rte_pktmbuf_priv_size(mp);
505         rq->first_skip = first_skip;
506         rq->later_skip = sizeof(struct rte_mbuf);
507         rq->lpb_size = mp->elt_size;
508
509         /* Enable Inline IPSec on RQ, will not be used for Poll mode */
510         if (roc_nix_inl_inb_is_enabled(nix))
511                 rq->ipsech_ena = true;
512
513         rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
514         if (rc) {
515                 plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
516                 goto cq_fini;
517         }
518
519         /* Allocate and setup fast path rx queue */
520         rc = -ENOMEM;
521         rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
522         rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
523         if (!rxq_sp) {
524                 plt_err("Failed to alloc rx queue for rq=%d", qid);
525                 goto rq_fini;
526         }
527
528         /* Setup slow path fields */
529         rxq_sp->dev = dev;
530         rxq_sp->qid = qid;
531         rxq_sp->qconf.conf.rx = *rx_conf;
532         /* Queue config should reflect global offloads */
533         rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads;
534         rxq_sp->qconf.nb_desc = nb_desc;
535         rxq_sp->qconf.mp = mp;
536
537         if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
538                 /* Setup rq reference for inline dev if present */
539                 rc = roc_nix_inl_dev_rq_get(rq);
540                 if (rc)
541                         goto free_mem;
542         }
543
544         plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
545                     cq->nb_desc);
546
547         /* Store start of fast path area */
548         eth_dev->data->rx_queues[qid] = rxq_sp + 1;
549         eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
550
551         /* Calculating delta and freq mult between PTP HI clock and tsc.
552          * These are needed in deriving raw clock value from tsc counter.
553          * read_clock eth op returns raw clock value.
554          */
555         if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
556                 rc = cnxk_nix_tsc_convert(dev);
557                 if (rc) {
558                         plt_err("Failed to calculate delta and freq mult");
559                         goto rq_fini;
560                 }
561         }
562
563         return 0;
564 free_mem:
565         plt_free(rxq_sp);
566 rq_fini:
567         rc |= roc_nix_rq_fini(rq);
568 cq_fini:
569         rc |= roc_nix_cq_fini(cq);
570 fail:
571         return rc;
572 }
573
574 static void
575 cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
576 {
577         void *rxq = eth_dev->data->rx_queues[qid];
578         struct cnxk_eth_rxq_sp *rxq_sp;
579         struct cnxk_eth_dev *dev;
580         struct roc_nix_rq *rq;
581         struct roc_nix_cq *cq;
582         int rc;
583
584         if (!rxq)
585                 return;
586
587         rxq_sp = cnxk_eth_rxq_to_sp(rxq);
588         dev = rxq_sp->dev;
589         rq = &dev->rqs[qid];
590
591         plt_nix_dbg("Releasing rxq %u", qid);
592
593         /* Release rq reference for inline dev if present */
594         if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
595                 roc_nix_inl_dev_rq_put(rq);
596
597         /* Cleanup ROC RQ */
598         rc = roc_nix_rq_fini(rq);
599         if (rc)
600                 plt_err("Failed to cleanup rq, rc=%d", rc);
601
602         /* Cleanup ROC CQ */
603         cq = &dev->cqs[qid];
604         rc = roc_nix_cq_fini(cq);
605         if (rc)
606                 plt_err("Failed to cleanup cq, rc=%d", rc);
607
608         /* Finally free fast path area */
609         plt_free(rxq_sp);
610 }
611
612 uint32_t
613 cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
614                        uint8_t rss_level)
615 {
616         uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
617                 {FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
618                  FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
619                 {FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
620                  FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
621                  FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
622                 {FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
623                  FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
624                  FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
625                  FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
626                  FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
627                  FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
628         };
629         uint32_t flowkey_cfg = 0;
630
631         dev->ethdev_rss_hf = ethdev_rss;
632
633         if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
634             dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
635                 flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
636         }
637
638         if (ethdev_rss & ETH_RSS_C_VLAN)
639                 flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
640
641         if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
642                 flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
643
644         if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
645                 flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
646
647         if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
648                 flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
649
650         if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
651                 flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
652
653         if (ethdev_rss & RSS_IPV4_ENABLE)
654                 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
655
656         if (ethdev_rss & RSS_IPV6_ENABLE)
657                 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
658
659         if (ethdev_rss & ETH_RSS_TCP)
660                 flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
661
662         if (ethdev_rss & ETH_RSS_UDP)
663                 flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
664
665         if (ethdev_rss & ETH_RSS_SCTP)
666                 flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
667
668         if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
669                 flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
670
671         if (ethdev_rss & RSS_IPV6_EX_ENABLE)
672                 flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
673
674         if (ethdev_rss & ETH_RSS_PORT)
675                 flowkey_cfg |= FLOW_KEY_TYPE_PORT;
676
677         if (ethdev_rss & ETH_RSS_NVGRE)
678                 flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
679
680         if (ethdev_rss & ETH_RSS_VXLAN)
681                 flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
682
683         if (ethdev_rss & ETH_RSS_GENEVE)
684                 flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
685
686         if (ethdev_rss & ETH_RSS_GTPU)
687                 flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
688
689         return flowkey_cfg;
690 }
691
692 static void
693 nix_free_queue_mem(struct cnxk_eth_dev *dev)
694 {
695         plt_free(dev->rqs);
696         plt_free(dev->cqs);
697         plt_free(dev->sqs);
698         dev->rqs = NULL;
699         dev->cqs = NULL;
700         dev->sqs = NULL;
701 }
702
703 static int
704 nix_rss_default_setup(struct cnxk_eth_dev *dev)
705 {
706         struct rte_eth_dev *eth_dev = dev->eth_dev;
707         uint8_t rss_hash_level;
708         uint32_t flowkey_cfg;
709         uint64_t rss_hf;
710
711         rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
712         rss_hash_level = ETH_RSS_LEVEL(rss_hf);
713         if (rss_hash_level)
714                 rss_hash_level -= 1;
715
716         flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
717         return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
718 }
719
720 static int
721 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
722 {
723         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
724         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
725         struct cnxk_eth_qconf *tx_qconf = NULL;
726         struct cnxk_eth_qconf *rx_qconf = NULL;
727         struct cnxk_eth_rxq_sp *rxq_sp;
728         struct cnxk_eth_txq_sp *txq_sp;
729         int i, nb_rxq, nb_txq;
730         void **txq, **rxq;
731
732         nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
733         nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
734
735         tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
736         if (tx_qconf == NULL) {
737                 plt_err("Failed to allocate memory for tx_qconf");
738                 goto fail;
739         }
740
741         rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
742         if (rx_qconf == NULL) {
743                 plt_err("Failed to allocate memory for rx_qconf");
744                 goto fail;
745         }
746
747         txq = eth_dev->data->tx_queues;
748         for (i = 0; i < nb_txq; i++) {
749                 if (txq[i] == NULL) {
750                         tx_qconf[i].valid = false;
751                         plt_info("txq[%d] is already released", i);
752                         continue;
753                 }
754                 txq_sp = cnxk_eth_txq_to_sp(txq[i]);
755                 memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
756                 tx_qconf[i].valid = true;
757                 dev_ops->tx_queue_release(eth_dev, i);
758                 eth_dev->data->tx_queues[i] = NULL;
759         }
760
761         rxq = eth_dev->data->rx_queues;
762         for (i = 0; i < nb_rxq; i++) {
763                 if (rxq[i] == NULL) {
764                         rx_qconf[i].valid = false;
765                         plt_info("rxq[%d] is already released", i);
766                         continue;
767                 }
768                 rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
769                 memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
770                 rx_qconf[i].valid = true;
771                 dev_ops->rx_queue_release(eth_dev, i);
772                 eth_dev->data->rx_queues[i] = NULL;
773         }
774
775         dev->tx_qconf = tx_qconf;
776         dev->rx_qconf = rx_qconf;
777         return 0;
778
779 fail:
780         free(tx_qconf);
781         free(rx_qconf);
782         return -ENOMEM;
783 }
784
785 static int
786 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
787 {
788         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
789         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
790         struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
791         struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
792         int rc, i, nb_rxq, nb_txq;
793
794         nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
795         nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
796
797         rc = -ENOMEM;
798         /* Setup tx & rx queues with previous configuration so
799          * that the queues can be functional in cases like ports
800          * are started without re configuring queues.
801          *
802          * Usual re config sequence is like below:
803          * port_configure() {
804          *      if(reconfigure) {
805          *              queue_release()
806          *              queue_setup()
807          *      }
808          *      queue_configure() {
809          *              queue_release()
810          *              queue_setup()
811          *      }
812          * }
813          * port_start()
814          *
815          * In some application's control path, queue_configure() would
816          * NOT be invoked for TXQs/RXQs in port_configure().
817          * In such cases, queues can be functional after start as the
818          * queues are already setup in port_configure().
819          */
820         for (i = 0; i < nb_txq; i++) {
821                 if (!tx_qconf[i].valid)
822                         continue;
823                 rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
824                                              &tx_qconf[i].conf.tx);
825                 if (rc) {
826                         plt_err("Failed to setup tx queue rc=%d", rc);
827                         for (i -= 1; i >= 0; i--)
828                                 dev_ops->tx_queue_release(eth_dev, i);
829                         goto fail;
830                 }
831         }
832
833         free(tx_qconf);
834         tx_qconf = NULL;
835
836         for (i = 0; i < nb_rxq; i++) {
837                 if (!rx_qconf[i].valid)
838                         continue;
839                 rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
840                                              &rx_qconf[i].conf.rx,
841                                              rx_qconf[i].mp);
842                 if (rc) {
843                         plt_err("Failed to setup rx queue rc=%d", rc);
844                         for (i -= 1; i >= 0; i--)
845                                 dev_ops->rx_queue_release(eth_dev, i);
846                         goto tx_queue_release;
847                 }
848         }
849
850         free(rx_qconf);
851         rx_qconf = NULL;
852
853         return 0;
854
855 tx_queue_release:
856         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
857                 dev_ops->tx_queue_release(eth_dev, i);
858 fail:
859         if (tx_qconf)
860                 free(tx_qconf);
861         if (rx_qconf)
862                 free(rx_qconf);
863
864         return rc;
865 }
866
867 static uint16_t
868 nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
869 {
870         RTE_SET_USED(queue);
871         RTE_SET_USED(mbufs);
872         RTE_SET_USED(pkts);
873
874         return 0;
875 }
876
877 static void
878 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
879 {
880         /* These dummy functions are required for supporting
881          * some applications which reconfigure queues without
882          * stopping tx burst and rx burst threads(eg kni app)
883          * When the queues context is saved, txq/rxqs are released
884          * which caused app crash since rx/tx burst is still
885          * on different lcores
886          */
887         eth_dev->tx_pkt_burst = nix_eth_nop_burst;
888         eth_dev->rx_pkt_burst = nix_eth_nop_burst;
889         rte_mb();
890 }
891
892 static int
893 nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
894 {
895         uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
896         uint8_t tun[ROC_NIX_LSO_TUN_MAX];
897         struct roc_nix *nix = &dev->nix;
898         int rc;
899
900         rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
901         if (rc)
902                 return rc;
903
904         dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
905                             (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
906                             (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
907                             (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
908
909         dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
910                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
911                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
912                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
913         return 0;
914 }
915
916 static int
917 nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
918 {
919         struct roc_nix *nix = &dev->nix;
920         int rc;
921
922         /* Nothing much to do if offload is not enabled */
923         if (!(dev->tx_offloads &
924               (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
925                DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
926                 return 0;
927
928         /* Setup LSO formats in AF. Its a no-op if other ethdev has
929          * already set it up
930          */
931         rc = roc_nix_lso_fmt_setup(nix);
932         if (rc)
933                 return rc;
934
935         return nix_lso_tun_fmt_update(dev);
936 }
937
938 int
939 cnxk_nix_configure(struct rte_eth_dev *eth_dev)
940 {
941         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
942         struct rte_eth_dev_data *data = eth_dev->data;
943         struct rte_eth_conf *conf = &data->dev_conf;
944         struct rte_eth_rxmode *rxmode = &conf->rxmode;
945         struct rte_eth_txmode *txmode = &conf->txmode;
946         char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
947         struct roc_nix_fc_cfg fc_cfg = {0};
948         struct roc_nix *nix = &dev->nix;
949         struct rte_ether_addr *ea;
950         uint8_t nb_rxq, nb_txq;
951         uint64_t rx_cfg;
952         void *qs;
953         int rc;
954
955         rc = -EINVAL;
956
957         /* Sanity checks */
958         if (rte_eal_has_hugepages() == 0) {
959                 plt_err("Huge page is not configured");
960                 goto fail_configure;
961         }
962
963         if (conf->dcb_capability_en == 1) {
964                 plt_err("dcb enable is not supported");
965                 goto fail_configure;
966         }
967
968         if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
969                 plt_err("Flow director is not supported");
970                 goto fail_configure;
971         }
972
973         if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
974             rxmode->mq_mode != ETH_MQ_RX_RSS) {
975                 plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
976                 goto fail_configure;
977         }
978
979         if (txmode->mq_mode != ETH_MQ_TX_NONE) {
980                 plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
981                 goto fail_configure;
982         }
983
984         /* Free the resources allocated from the previous configure */
985         if (dev->configured == 1) {
986                 /* Unregister queue irq's */
987                 roc_nix_unregister_queue_irqs(nix);
988
989                 /* Unregister CQ irqs if present */
990                 if (eth_dev->data->dev_conf.intr_conf.rxq)
991                         roc_nix_unregister_cq_irqs(nix);
992
993                 /* Set no-op functions */
994                 nix_set_nop_rxtx_function(eth_dev);
995                 /* Store queue config for later */
996                 rc = nix_store_queue_cfg_and_then_release(eth_dev);
997                 if (rc)
998                         goto fail_configure;
999
1000                 /* Cleanup security support */
1001                 rc = nix_security_release(dev);
1002                 if (rc)
1003                         goto fail_configure;
1004
1005                 roc_nix_tm_fini(nix);
1006                 roc_nix_lf_free(nix);
1007         }
1008
1009         dev->rx_offloads = rxmode->offloads;
1010         dev->tx_offloads = txmode->offloads;
1011
1012         /* Prepare rx cfg */
1013         rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
1014         if (dev->rx_offloads &
1015             (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
1016                 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
1017                 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
1018         }
1019         rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |
1020                    ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
1021                    ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
1022
1023         if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
1024                 rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
1025                 /* Disable drop re if rx offload security is enabled and
1026                  * platform does not support it.
1027                  */
1028                 if (dev->ipsecd_drop_re_dis)
1029                         rx_cfg &= ~(ROC_NIX_LF_RX_CFG_DROP_RE);
1030         }
1031
1032         nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1033         nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1034
1035         /* Alloc a nix lf */
1036         rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
1037         if (rc) {
1038                 plt_err("Failed to init nix_lf rc=%d", rc);
1039                 goto fail_configure;
1040         }
1041
1042         dev->npc.channel = roc_nix_get_base_chan(nix);
1043
1044         nb_rxq = data->nb_rx_queues;
1045         nb_txq = data->nb_tx_queues;
1046         rc = -ENOMEM;
1047         if (nb_rxq) {
1048                 /* Allocate memory for roc rq's and cq's */
1049                 qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0);
1050                 if (!qs) {
1051                         plt_err("Failed to alloc rqs");
1052                         goto free_nix_lf;
1053                 }
1054                 dev->rqs = qs;
1055
1056                 qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0);
1057                 if (!qs) {
1058                         plt_err("Failed to alloc cqs");
1059                         goto free_nix_lf;
1060                 }
1061                 dev->cqs = qs;
1062         }
1063
1064         if (nb_txq) {
1065                 /* Allocate memory for roc sq's */
1066                 qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0);
1067                 if (!qs) {
1068                         plt_err("Failed to alloc sqs");
1069                         goto free_nix_lf;
1070                 }
1071                 dev->sqs = qs;
1072         }
1073
1074         /* Re-enable NIX LF error interrupts */
1075         roc_nix_err_intr_ena_dis(nix, true);
1076         roc_nix_ras_intr_ena_dis(nix, true);
1077
1078         if (nix->rx_ptp_ena &&
1079             dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) {
1080                 plt_err("Both PTP and switch header enabled");
1081                 goto free_nix_lf;
1082         }
1083
1084         rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type);
1085         if (rc) {
1086                 plt_err("Failed to enable switch type nix_lf rc=%d", rc);
1087                 goto free_nix_lf;
1088         }
1089
1090         /* Setup LSO if needed */
1091         rc = nix_lso_fmt_setup(dev);
1092         if (rc) {
1093                 plt_err("Failed to setup nix lso format fields, rc=%d", rc);
1094                 goto free_nix_lf;
1095         }
1096
1097         /* Configure RSS */
1098         rc = nix_rss_default_setup(dev);
1099         if (rc) {
1100                 plt_err("Failed to configure rss rc=%d", rc);
1101                 goto free_nix_lf;
1102         }
1103
1104         /* Init the default TM scheduler hierarchy */
1105         rc = roc_nix_tm_init(nix);
1106         if (rc) {
1107                 plt_err("Failed to init traffic manager, rc=%d", rc);
1108                 goto free_nix_lf;
1109         }
1110
1111         rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
1112         if (rc) {
1113                 plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
1114                 goto tm_fini;
1115         }
1116
1117         /* Register queue IRQs */
1118         rc = roc_nix_register_queue_irqs(nix);
1119         if (rc) {
1120                 plt_err("Failed to register queue interrupts rc=%d", rc);
1121                 goto tm_fini;
1122         }
1123
1124         /* Register cq IRQs */
1125         if (eth_dev->data->dev_conf.intr_conf.rxq) {
1126                 if (eth_dev->data->nb_rx_queues > dev->nix.cints) {
1127                         plt_err("Rx interrupt cannot be enabled, rxq > %d",
1128                                 dev->nix.cints);
1129                         goto q_irq_fini;
1130                 }
1131                 /* Rx interrupt feature cannot work with vector mode because,
1132                  * vector mode does not process packets unless min 4 pkts are
1133                  * received, while cq interrupts are generated even for 1 pkt
1134                  * in the CQ.
1135                  */
1136                 dev->scalar_ena = true;
1137
1138                 rc = roc_nix_register_cq_irqs(nix);
1139                 if (rc) {
1140                         plt_err("Failed to register CQ interrupts rc=%d", rc);
1141                         goto q_irq_fini;
1142                 }
1143         }
1144
1145         /* Configure loop back mode */
1146         rc = roc_nix_mac_loopback_enable(nix,
1147                                          eth_dev->data->dev_conf.lpbk_mode);
1148         if (rc) {
1149                 plt_err("Failed to configure cgx loop back mode rc=%d", rc);
1150                 goto cq_fini;
1151         }
1152
1153         /* Init flow control configuration */
1154         fc_cfg.cq_cfg_valid = false;
1155         fc_cfg.rxchan_cfg.enable = true;
1156         rc = roc_nix_fc_config_set(nix, &fc_cfg);
1157         if (rc) {
1158                 plt_err("Failed to initialize flow control rc=%d", rc);
1159                 goto cq_fini;
1160         }
1161
1162         /* Update flow control configuration to PMD */
1163         rc = nix_init_flow_ctrl_config(eth_dev);
1164         if (rc) {
1165                 plt_err("Failed to initialize flow control rc=%d", rc);
1166                 goto cq_fini;
1167         }
1168
1169         /* Setup Inline security support */
1170         rc = nix_security_setup(dev);
1171         if (rc)
1172                 goto cq_fini;
1173
1174         /*
1175          * Restore queue config when reconfigure followed by
1176          * reconfigure and no queue configure invoked from application case.
1177          */
1178         if (dev->configured == 1) {
1179                 rc = nix_restore_queue_cfg(eth_dev);
1180                 if (rc)
1181                         goto sec_release;
1182         }
1183
1184         /* Update the mac address */
1185         ea = eth_dev->data->mac_addrs;
1186         memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1187         if (rte_is_zero_ether_addr(ea))
1188                 rte_eth_random_addr((uint8_t *)ea);
1189
1190         rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
1191
1192         plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
1193                     " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "",
1194                     eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq,
1195                     dev->rx_offloads, dev->tx_offloads);
1196
1197         /* All good */
1198         dev->configured = 1;
1199         dev->nb_rxq = data->nb_rx_queues;
1200         dev->nb_txq = data->nb_tx_queues;
1201         return 0;
1202
1203 sec_release:
1204         rc |= nix_security_release(dev);
1205 cq_fini:
1206         roc_nix_unregister_cq_irqs(nix);
1207 q_irq_fini:
1208         roc_nix_unregister_queue_irqs(nix);
1209 tm_fini:
1210         roc_nix_tm_fini(nix);
1211 free_nix_lf:
1212         nix_free_queue_mem(dev);
1213         rc |= roc_nix_lf_free(nix);
1214 fail_configure:
1215         dev->configured = 0;
1216         return rc;
1217 }
1218
1219 int
1220 cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1221 {
1222         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1223         struct rte_eth_dev_data *data = eth_dev->data;
1224         struct roc_nix_sq *sq = &dev->sqs[qid];
1225         int rc = -EINVAL;
1226
1227         if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1228                 return 0;
1229
1230         rc = roc_nix_tm_sq_aura_fc(sq, true);
1231         if (rc) {
1232                 plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
1233                 goto done;
1234         }
1235
1236         data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1237 done:
1238         return rc;
1239 }
1240
1241 int
1242 cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1243 {
1244         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1245         struct rte_eth_dev_data *data = eth_dev->data;
1246         struct roc_nix_sq *sq = &dev->sqs[qid];
1247         int rc;
1248
1249         if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1250                 return 0;
1251
1252         rc = roc_nix_tm_sq_aura_fc(sq, false);
1253         if (rc) {
1254                 plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
1255                         rc);
1256                 goto done;
1257         }
1258
1259         data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1260 done:
1261         return rc;
1262 }
1263
1264 static int
1265 cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1266 {
1267         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1268         struct rte_eth_dev_data *data = eth_dev->data;
1269         struct roc_nix_rq *rq = &dev->rqs[qid];
1270         int rc;
1271
1272         if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1273                 return 0;
1274
1275         rc = roc_nix_rq_ena_dis(rq, true);
1276         if (rc) {
1277                 plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
1278                 goto done;
1279         }
1280
1281         data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1282 done:
1283         return rc;
1284 }
1285
1286 static int
1287 cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1288 {
1289         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1290         struct rte_eth_dev_data *data = eth_dev->data;
1291         struct roc_nix_rq *rq = &dev->rqs[qid];
1292         int rc;
1293
1294         if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1295                 return 0;
1296
1297         rc = roc_nix_rq_ena_dis(rq, false);
1298         if (rc) {
1299                 plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
1300                 goto done;
1301         }
1302
1303         data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1304 done:
1305         return rc;
1306 }
1307
1308 static int
1309 cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
1310 {
1311         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1312         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1313         struct rte_mbuf *rx_pkts[32];
1314         struct rte_eth_link link;
1315         int count, i, j, rc;
1316         void *rxq;
1317
1318         /* Disable switch hdr pkind */
1319         roc_nix_switch_hdr_set(&dev->nix, 0);
1320
1321         /* Stop link change events */
1322         if (!roc_nix_is_vf_or_sdp(&dev->nix))
1323                 roc_nix_mac_link_event_start_stop(&dev->nix, false);
1324
1325         /* Disable Rx via NPC */
1326         roc_nix_npc_rx_ena_dis(&dev->nix, false);
1327
1328         /* Stop rx queues and free up pkts pending */
1329         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1330                 rc = dev_ops->rx_queue_stop(eth_dev, i);
1331                 if (rc)
1332                         continue;
1333
1334                 rxq = eth_dev->data->rx_queues[i];
1335                 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1336                 while (count) {
1337                         for (j = 0; j < count; j++)
1338                                 rte_pktmbuf_free(rx_pkts[j]);
1339                         count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1340                 }
1341         }
1342
1343         /* Stop tx queues  */
1344         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1345                 dev_ops->tx_queue_stop(eth_dev, i);
1346
1347         /* Bring down link status internally */
1348         memset(&link, 0, sizeof(link));
1349         rte_eth_linkstatus_set(eth_dev, &link);
1350
1351         return 0;
1352 }
1353
1354 int
1355 cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
1356 {
1357         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1358         int rc, i;
1359
1360         if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) {
1361                 rc = nix_recalc_mtu(eth_dev);
1362                 if (rc)
1363                         return rc;
1364         }
1365
1366         /* Start rx queues */
1367         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1368                 rc = cnxk_nix_rx_queue_start(eth_dev, i);
1369                 if (rc)
1370                         return rc;
1371         }
1372
1373         /* Start tx queues  */
1374         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1375                 rc = cnxk_nix_tx_queue_start(eth_dev, i);
1376                 if (rc)
1377                         return rc;
1378         }
1379
1380         /* Update Flow control configuration */
1381         rc = nix_update_flow_ctrl_config(eth_dev);
1382         if (rc) {
1383                 plt_err("Failed to enable flow control. error code(%d)", rc);
1384                 return rc;
1385         }
1386
1387         /* Enable Rx in NPC */
1388         rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
1389         if (rc) {
1390                 plt_err("Failed to enable NPC rx %d", rc);
1391                 return rc;
1392         }
1393
1394         cnxk_nix_toggle_flag_link_cfg(dev, true);
1395
1396         /* Start link change events */
1397         if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
1398                 rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
1399                 if (rc) {
1400                         plt_err("Failed to start cgx link event %d", rc);
1401                         goto rx_disable;
1402                 }
1403         }
1404
1405         /* Enable PTP if it is requested by the user or already
1406          * enabled on PF owning this VF
1407          */
1408         memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
1409         if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
1410                 cnxk_eth_dev_ops.timesync_enable(eth_dev);
1411         else
1412                 cnxk_eth_dev_ops.timesync_disable(eth_dev);
1413
1414         if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
1415                 rc = rte_mbuf_dyn_rx_timestamp_register
1416                         (&dev->tstamp.tstamp_dynfield_offset,
1417                          &dev->tstamp.rx_tstamp_dynflag);
1418                 if (rc != 0) {
1419                         plt_err("Failed to register Rx timestamp field/flag");
1420                         goto rx_disable;
1421                 }
1422         }
1423
1424         cnxk_nix_toggle_flag_link_cfg(dev, false);
1425
1426         return 0;
1427
1428 rx_disable:
1429         roc_nix_npc_rx_ena_dis(&dev->nix, false);
1430         cnxk_nix_toggle_flag_link_cfg(dev, false);
1431         return rc;
1432 }
1433
1434 static int cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev);
1435 static int cnxk_nix_dev_close(struct rte_eth_dev *eth_dev);
1436
1437 /* CNXK platform independent eth dev ops */
1438 struct eth_dev_ops cnxk_eth_dev_ops = {
1439         .mtu_set = cnxk_nix_mtu_set,
1440         .mac_addr_add = cnxk_nix_mac_addr_add,
1441         .mac_addr_remove = cnxk_nix_mac_addr_del,
1442         .mac_addr_set = cnxk_nix_mac_addr_set,
1443         .dev_infos_get = cnxk_nix_info_get,
1444         .link_update = cnxk_nix_link_update,
1445         .tx_queue_release = cnxk_nix_tx_queue_release,
1446         .rx_queue_release = cnxk_nix_rx_queue_release,
1447         .dev_stop = cnxk_nix_dev_stop,
1448         .dev_close = cnxk_nix_dev_close,
1449         .dev_reset = cnxk_nix_dev_reset,
1450         .tx_queue_start = cnxk_nix_tx_queue_start,
1451         .rx_queue_start = cnxk_nix_rx_queue_start,
1452         .rx_queue_stop = cnxk_nix_rx_queue_stop,
1453         .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
1454         .promiscuous_enable = cnxk_nix_promisc_enable,
1455         .promiscuous_disable = cnxk_nix_promisc_disable,
1456         .allmulticast_enable = cnxk_nix_allmulticast_enable,
1457         .allmulticast_disable = cnxk_nix_allmulticast_disable,
1458         .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
1459         .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
1460         .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
1461         .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
1462         .dev_set_link_up = cnxk_nix_set_link_up,
1463         .dev_set_link_down = cnxk_nix_set_link_down,
1464         .get_module_info = cnxk_nix_get_module_info,
1465         .get_module_eeprom = cnxk_nix_get_module_eeprom,
1466         .rx_queue_intr_enable = cnxk_nix_rx_queue_intr_enable,
1467         .rx_queue_intr_disable = cnxk_nix_rx_queue_intr_disable,
1468         .pool_ops_supported = cnxk_nix_pool_ops_supported,
1469         .queue_stats_mapping_set = cnxk_nix_queue_stats_mapping,
1470         .stats_get = cnxk_nix_stats_get,
1471         .stats_reset = cnxk_nix_stats_reset,
1472         .xstats_get = cnxk_nix_xstats_get,
1473         .xstats_get_names = cnxk_nix_xstats_get_names,
1474         .xstats_reset = cnxk_nix_xstats_reset,
1475         .xstats_get_by_id = cnxk_nix_xstats_get_by_id,
1476         .xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id,
1477         .fw_version_get = cnxk_nix_fw_version_get,
1478         .rxq_info_get = cnxk_nix_rxq_info_get,
1479         .txq_info_get = cnxk_nix_txq_info_get,
1480         .tx_done_cleanup = cnxk_nix_tx_done_cleanup,
1481         .flow_ops_get = cnxk_nix_flow_ops_get,
1482         .get_reg = cnxk_nix_dev_get_reg,
1483         .timesync_read_rx_timestamp = cnxk_nix_timesync_read_rx_timestamp,
1484         .timesync_read_tx_timestamp = cnxk_nix_timesync_read_tx_timestamp,
1485         .timesync_read_time = cnxk_nix_timesync_read_time,
1486         .timesync_write_time = cnxk_nix_timesync_write_time,
1487         .timesync_adjust_time = cnxk_nix_timesync_adjust_time,
1488         .read_clock = cnxk_nix_read_clock,
1489         .reta_update = cnxk_nix_reta_update,
1490         .reta_query = cnxk_nix_reta_query,
1491         .rss_hash_update = cnxk_nix_rss_hash_update,
1492         .rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
1493         .set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
1494         .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
1495         .tm_ops_get = cnxk_nix_tm_ops_get,
1496 };
1497
1498 static int
1499 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
1500 {
1501         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1502         struct rte_security_ctx *sec_ctx;
1503         struct roc_nix *nix = &dev->nix;
1504         struct rte_pci_device *pci_dev;
1505         int rc, max_entries;
1506
1507         eth_dev->dev_ops = &cnxk_eth_dev_ops;
1508
1509         /* Alloc security context */
1510         sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
1511         if (!sec_ctx)
1512                 return -ENOMEM;
1513         sec_ctx->device = eth_dev;
1514         sec_ctx->ops = &cnxk_eth_sec_ops;
1515         sec_ctx->flags =
1516                 (RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
1517         eth_dev->security_ctx = sec_ctx;
1518         TAILQ_INIT(&dev->inb.list);
1519         TAILQ_INIT(&dev->outb.list);
1520
1521         /* For secondary processes, the primary has done all the work */
1522         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1523                 return 0;
1524
1525         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1526         rte_eth_copy_pci_info(eth_dev, pci_dev);
1527
1528         /* Parse devargs string */
1529         rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev);
1530         if (rc) {
1531                 plt_err("Failed to parse devargs rc=%d", rc);
1532                 goto error;
1533         }
1534
1535         /* Initialize base roc nix */
1536         nix->pci_dev = pci_dev;
1537         nix->hw_vlan_ins = true;
1538         rc = roc_nix_dev_init(nix);
1539         if (rc) {
1540                 plt_err("Failed to initialize roc nix rc=%d", rc);
1541                 goto error;
1542         }
1543
1544         /* Register up msg callbacks */
1545         roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
1546
1547         /* Register up msg callbacks */
1548         roc_nix_mac_link_info_get_cb_register(nix,
1549                                               cnxk_eth_dev_link_status_get_cb);
1550
1551         dev->eth_dev = eth_dev;
1552         dev->configured = 0;
1553         dev->ptype_disable = 0;
1554
1555         /* For vfs, returned max_entries will be 0. but to keep default mac
1556          * address, one entry must be allocated. so setting up to 1.
1557          */
1558         if (roc_nix_is_vf_or_sdp(nix))
1559                 max_entries = 1;
1560         else
1561                 max_entries = roc_nix_mac_max_entries_get(nix);
1562
1563         if (max_entries <= 0) {
1564                 plt_err("Failed to get max entries for mac addr");
1565                 rc = -ENOTSUP;
1566                 goto dev_fini;
1567         }
1568
1569         eth_dev->data->mac_addrs =
1570                 rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0);
1571         if (eth_dev->data->mac_addrs == NULL) {
1572                 plt_err("Failed to allocate memory for mac addr");
1573                 rc = -ENOMEM;
1574                 goto dev_fini;
1575         }
1576
1577         dev->max_mac_entries = max_entries;
1578         dev->dmac_filter_count = 1;
1579
1580         /* Get mac address */
1581         rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
1582         if (rc) {
1583                 plt_err("Failed to get mac addr, rc=%d", rc);
1584                 goto free_mac_addrs;
1585         }
1586
1587         /* Update the mac address */
1588         memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1589
1590         if (!roc_nix_is_vf_or_sdp(nix)) {
1591                 /* Sync same MAC address to CGX/RPM table */
1592                 rc = roc_nix_mac_addr_set(nix, dev->mac_addr);
1593                 if (rc) {
1594                         plt_err("Failed to set mac addr, rc=%d", rc);
1595                         goto free_mac_addrs;
1596                 }
1597         }
1598
1599         /* Union of all capabilities supported by CNXK.
1600          * Platform specific capabilities will be
1601          * updated later.
1602          */
1603         dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
1604         dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
1605         dev->speed_capa = nix_get_speed_capa(dev);
1606
1607         /* Initialize roc npc */
1608         dev->npc.roc_nix = nix;
1609         rc = roc_npc_init(&dev->npc);
1610         if (rc)
1611                 goto free_mac_addrs;
1612
1613         plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
1614                     " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
1615                     eth_dev->data->port_id, roc_nix_get_pf(nix),
1616                     roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
1617                     dev->rx_offload_capa, dev->tx_offload_capa);
1618         return 0;
1619
1620 free_mac_addrs:
1621         rte_free(eth_dev->data->mac_addrs);
1622 dev_fini:
1623         roc_nix_dev_fini(nix);
1624 error:
1625         plt_err("Failed to init nix eth_dev rc=%d", rc);
1626         return rc;
1627 }
1628
1629 static int
1630 cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
1631 {
1632         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1633         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1634         struct roc_nix *nix = &dev->nix;
1635         int rc, i;
1636
1637         plt_free(eth_dev->security_ctx);
1638         eth_dev->security_ctx = NULL;
1639
1640         /* Nothing to be done for secondary processes */
1641         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1642                 return 0;
1643
1644         /* Clear the flag since we are closing down */
1645         dev->configured = 0;
1646
1647         roc_nix_npc_rx_ena_dis(nix, false);
1648
1649         /* Disable and free rte_flow entries */
1650         roc_npc_fini(&dev->npc);
1651
1652         /* Disable link status events */
1653         roc_nix_mac_link_event_start_stop(nix, false);
1654
1655         /* Unregister the link update op, this is required to stop VFs from
1656          * receiving link status updates on exit path.
1657          */
1658         roc_nix_mac_link_cb_unregister(nix);
1659
1660         /* Free up SQs */
1661         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1662                 dev_ops->tx_queue_release(eth_dev, i);
1663                 eth_dev->data->tx_queues[i] = NULL;
1664         }
1665         eth_dev->data->nb_tx_queues = 0;
1666
1667         /* Free up RQ's and CQ's */
1668         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1669                 dev_ops->rx_queue_release(eth_dev, i);
1670                 eth_dev->data->rx_queues[i] = NULL;
1671         }
1672         eth_dev->data->nb_rx_queues = 0;
1673
1674         /* Free security resources */
1675         nix_security_release(dev);
1676
1677         /* Free tm resources */
1678         roc_nix_tm_fini(nix);
1679
1680         /* Unregister queue irqs */
1681         roc_nix_unregister_queue_irqs(nix);
1682
1683         /* Unregister cq irqs */
1684         if (eth_dev->data->dev_conf.intr_conf.rxq)
1685                 roc_nix_unregister_cq_irqs(nix);
1686
1687         /* Free ROC RQ's, SQ's and CQ's memory */
1688         nix_free_queue_mem(dev);
1689
1690         /* Free nix lf resources */
1691         rc = roc_nix_lf_free(nix);
1692         if (rc)
1693                 plt_err("Failed to free nix lf, rc=%d", rc);
1694
1695         rte_free(eth_dev->data->mac_addrs);
1696         eth_dev->data->mac_addrs = NULL;
1697
1698         rc = roc_nix_dev_fini(nix);
1699         /* Can be freed later by PMD if NPA LF is in use */
1700         if (rc == -EAGAIN) {
1701                 if (!reset)
1702                         eth_dev->data->dev_private = NULL;
1703                 return 0;
1704         } else if (rc) {
1705                 plt_err("Failed in nix dev fini, rc=%d", rc);
1706         }
1707
1708         return rc;
1709 }
1710
1711 static int
1712 cnxk_nix_dev_close(struct rte_eth_dev *eth_dev)
1713 {
1714         cnxk_eth_dev_uninit(eth_dev, false);
1715         return 0;
1716 }
1717
1718 static int
1719 cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev)
1720 {
1721         int rc;
1722
1723         rc = cnxk_eth_dev_uninit(eth_dev, true);
1724         if (rc)
1725                 return rc;
1726
1727         return cnxk_eth_dev_init(eth_dev);
1728 }
1729
1730 int
1731 cnxk_nix_remove(struct rte_pci_device *pci_dev)
1732 {
1733         struct rte_eth_dev *eth_dev;
1734         struct roc_nix *nix;
1735         int rc = -EINVAL;
1736
1737         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1738         if (eth_dev) {
1739                 /* Cleanup eth dev */
1740                 rc = cnxk_eth_dev_uninit(eth_dev, false);
1741                 if (rc)
1742                         return rc;
1743
1744                 rte_eth_dev_release_port(eth_dev);
1745         }
1746
1747         /* Nothing to be done for secondary processes */
1748         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1749                 return 0;
1750
1751         /* Check if this device is hosting common resource */
1752         nix = roc_idev_npa_nix_get();
1753         if (nix->pci_dev != pci_dev)
1754                 return 0;
1755
1756         /* Try nix fini now */
1757         rc = roc_nix_dev_fini(nix);
1758         if (rc == -EAGAIN) {
1759                 plt_info("%s: common resource in use by other devices",
1760                          pci_dev->name);
1761                 goto exit;
1762         } else if (rc) {
1763                 plt_err("Failed in nix dev fini, rc=%d", rc);
1764                 goto exit;
1765         }
1766
1767         /* Free device pointer as rte_ethdev does not have it anymore */
1768         rte_free(nix);
1769 exit:
1770         return rc;
1771 }
1772
1773 int
1774 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1775 {
1776         int rc;
1777
1778         RTE_SET_USED(pci_drv);
1779
1780         rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
1781                                            cnxk_eth_dev_init);
1782
1783         /* On error on secondary, recheck if port exists in primary or
1784          * in mid of detach state.
1785          */
1786         if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
1787                 if (!rte_eth_dev_allocated(pci_dev->device.name))
1788                         return 0;
1789         return rc;
1790 }