net/mlx5: add C++ include guard to public header
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <cnxk_ethdev.h>
5
6 static inline uint64_t
7 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
8 {
9         uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
10
11         if (roc_nix_is_vf_or_sdp(&dev->nix) ||
12             dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
13                 capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
14
15         return capa;
16 }
17
18 static inline uint64_t
19 nix_get_tx_offload_capa(struct cnxk_eth_dev *dev)
20 {
21         RTE_SET_USED(dev);
22         return CNXK_NIX_TX_OFFLOAD_CAPA;
23 }
24
25 static inline uint32_t
26 nix_get_speed_capa(struct cnxk_eth_dev *dev)
27 {
28         uint32_t speed_capa;
29
30         /* Auto negotiation disabled */
31         speed_capa = RTE_ETH_LINK_SPEED_FIXED;
32         if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
33                 speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
34                               RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
35                               RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
36         }
37
38         return speed_capa;
39 }
40
41 int
42 cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
43 {
44         struct roc_nix *nix = &dev->nix;
45
46         if (dev->inb.inl_dev == use_inl_dev)
47                 return 0;
48
49         plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
50                     dev->inb.nb_sess, !!dev->inb.inl_dev);
51
52         /* Change the mode */
53         dev->inb.inl_dev = use_inl_dev;
54
55         /* Update RoC for NPC rule insertion */
56         roc_nix_inb_mode_set(nix, use_inl_dev);
57
58         /* Setup lookup mem */
59         return cnxk_nix_lookup_mem_sa_base_set(dev);
60 }
61
62 static int
63 nix_security_setup(struct cnxk_eth_dev *dev)
64 {
65         struct roc_nix *nix = &dev->nix;
66         int i, rc = 0;
67
68         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
69                 /* Setup Inline Inbound */
70                 rc = roc_nix_inl_inb_init(nix);
71                 if (rc) {
72                         plt_err("Failed to initialize nix inline inb, rc=%d",
73                                 rc);
74                         return rc;
75                 }
76
77                 /* By default pick using inline device for poll mode.
78                  * Will be overridden when event mode rq's are setup.
79                  */
80                 cnxk_nix_inb_mode_set(dev, true);
81
82                 /* Allocate memory to be used as dptr for CPT ucode
83                  * WRITE_SA op.
84                  */
85                 dev->inb.sa_dptr =
86                         plt_zmalloc(ROC_NIX_INL_OT_IPSEC_INB_HW_SZ, 0);
87                 if (!dev->inb.sa_dptr) {
88                         plt_err("Couldn't allocate memory for SA dptr");
89                         rc = -ENOMEM;
90                         goto cleanup;
91                 }
92         }
93
94         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
95             dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
96                 struct plt_bitmap *bmap;
97                 size_t bmap_sz;
98                 void *mem;
99
100                 /* Setup enough descriptors for all tx queues */
101                 nix->outb_nb_desc = dev->outb.nb_desc;
102                 nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs;
103
104                 /* Setup Inline Outbound */
105                 rc = roc_nix_inl_outb_init(nix);
106                 if (rc) {
107                         plt_err("Failed to initialize nix inline outb, rc=%d",
108                                 rc);
109                         goto sa_dptr_free;
110                 }
111
112                 dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
113
114                 /* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
115                 if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY))
116                         return 0;
117
118                 /* Allocate memory to be used as dptr for CPT ucode
119                  * WRITE_SA op.
120                  */
121                 dev->outb.sa_dptr =
122                         plt_zmalloc(ROC_NIX_INL_OT_IPSEC_OUTB_HW_SZ, 0);
123                 if (!dev->outb.sa_dptr) {
124                         plt_err("Couldn't allocate memory for SA dptr");
125                         rc = -ENOMEM;
126                         goto sa_dptr_free;
127                 }
128
129                 rc = -ENOMEM;
130                 /* Allocate a bitmap to alloc and free sa indexes */
131                 bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa);
132                 mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
133                 if (mem == NULL) {
134                         plt_err("Outbound SA bmap alloc failed");
135
136                         rc |= roc_nix_inl_outb_fini(nix);
137                         goto sa_dptr_free;
138                 }
139
140                 rc = -EIO;
141                 bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz);
142                 if (!bmap) {
143                         plt_err("Outbound SA bmap init failed");
144
145                         rc |= roc_nix_inl_outb_fini(nix);
146                         plt_free(mem);
147                         goto sa_dptr_free;
148                 }
149
150                 for (i = 0; i < dev->outb.max_sa; i++)
151                         plt_bitmap_set(bmap, i);
152
153                 dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix);
154                 dev->outb.sa_bmap_mem = mem;
155                 dev->outb.sa_bmap = bmap;
156         }
157         return 0;
158
159 sa_dptr_free:
160         if (dev->inb.sa_dptr)
161                 plt_free(dev->inb.sa_dptr);
162         if (dev->outb.sa_dptr)
163                 plt_free(dev->outb.sa_dptr);
164 cleanup:
165         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
166                 rc |= roc_nix_inl_inb_fini(nix);
167         return rc;
168 }
169
170 static int
171 nix_meter_fini(struct cnxk_eth_dev *dev)
172 {
173         struct cnxk_meter_node *next_mtr = NULL;
174         struct roc_nix_bpf_objs profs = {0};
175         struct cnxk_meter_node *mtr = NULL;
176         struct cnxk_mtr *fms = &dev->mtr;
177         struct roc_nix *nix = &dev->nix;
178         struct roc_nix_rq *rq;
179         uint32_t i;
180         int rc;
181
182         RTE_TAILQ_FOREACH_SAFE(mtr, fms, next, next_mtr) {
183                 for (i = 0; i < mtr->rq_num; i++) {
184                         rq = &dev->rqs[mtr->rq_id[i]];
185                         rc |= roc_nix_bpf_ena_dis(nix, mtr->bpf_id, rq, false);
186                 }
187
188                 profs.level = mtr->level;
189                 profs.count = 1;
190                 profs.ids[0] = mtr->bpf_id;
191                 rc = roc_nix_bpf_free(nix, &profs, 1);
192
193                 if (rc)
194                         return rc;
195
196                 TAILQ_REMOVE(fms, mtr, next);
197                 plt_free(mtr);
198         }
199         return 0;
200 }
201
202 static int
203 nix_security_release(struct cnxk_eth_dev *dev)
204 {
205         struct rte_eth_dev *eth_dev = dev->eth_dev;
206         struct cnxk_eth_sec_sess *eth_sec, *tvar;
207         struct roc_nix *nix = &dev->nix;
208         int rc, ret = 0;
209
210         /* Cleanup Inline inbound */
211         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
212                 /* Destroy inbound sessions */
213                 tvar = NULL;
214                 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
215                         cnxk_eth_sec_ops.session_destroy(eth_dev,
216                                                          eth_sec->sess);
217
218                 /* Clear lookup mem */
219                 cnxk_nix_lookup_mem_sa_base_clear(dev);
220
221                 rc = roc_nix_inl_inb_fini(nix);
222                 if (rc)
223                         plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
224                 ret |= rc;
225
226                 if (dev->inb.sa_dptr) {
227                         plt_free(dev->inb.sa_dptr);
228                         dev->inb.sa_dptr = NULL;
229                 }
230         }
231
232         /* Cleanup Inline outbound */
233         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
234             dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
235                 /* Destroy outbound sessions */
236                 tvar = NULL;
237                 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
238                         cnxk_eth_sec_ops.session_destroy(eth_dev,
239                                                          eth_sec->sess);
240
241                 rc = roc_nix_inl_outb_fini(nix);
242                 if (rc)
243                         plt_err("Failed to cleanup nix inline outb, rc=%d", rc);
244                 ret |= rc;
245
246                 plt_bitmap_free(dev->outb.sa_bmap);
247                 plt_free(dev->outb.sa_bmap_mem);
248                 dev->outb.sa_bmap = NULL;
249                 dev->outb.sa_bmap_mem = NULL;
250                 if (dev->outb.sa_dptr) {
251                         plt_free(dev->outb.sa_dptr);
252                         dev->outb.sa_dptr = NULL;
253                 }
254         }
255
256         dev->inb.inl_dev = false;
257         roc_nix_inb_mode_set(nix, false);
258         dev->nb_rxq_sso = 0;
259         dev->inb.nb_sess = 0;
260         dev->outb.nb_sess = 0;
261         return ret;
262 }
263
264 static void
265 nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
266 {
267         struct rte_pktmbuf_pool_private *mbp_priv;
268         struct rte_eth_dev *eth_dev;
269         struct cnxk_eth_dev *dev;
270         uint32_t buffsz;
271
272         dev = rxq->dev;
273         eth_dev = dev->eth_dev;
274
275         /* Get rx buffer size */
276         mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
277         buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
278
279         if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
280                 dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
281                 dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
282         }
283 }
284
285 int
286 nix_recalc_mtu(struct rte_eth_dev *eth_dev)
287 {
288         struct rte_eth_dev_data *data = eth_dev->data;
289         struct cnxk_eth_rxq_sp *rxq;
290         int rc;
291
292         rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
293         /* Setup scatter mode if needed by jumbo */
294         nix_enable_mseg_on_jumbo(rxq);
295
296         rc = cnxk_nix_mtu_set(eth_dev, data->mtu);
297         if (rc)
298                 plt_err("Failed to set default MTU size, rc=%d", rc);
299
300         return rc;
301 }
302
303 static int
304 nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
305 {
306         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
307         struct cnxk_fc_cfg *fc = &dev->fc_cfg;
308         struct rte_eth_fc_conf fc_conf = {0};
309         int rc;
310
311         /* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
312          * by AF driver, update those info in PMD structure.
313          */
314         rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
315         if (rc)
316                 goto exit;
317
318         fc->mode = fc_conf.mode;
319         fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
320                         (fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
321         fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
322                         (fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
323
324 exit:
325         return rc;
326 }
327
328 static int
329 nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
330 {
331         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
332         struct cnxk_fc_cfg *fc = &dev->fc_cfg;
333         struct rte_eth_fc_conf fc_cfg = {0};
334
335         if (roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix))
336                 return 0;
337
338         fc_cfg.mode = fc->mode;
339
340         /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
341         if (roc_model_is_cn96_ax() &&
342             dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
343             (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
344                 fc_cfg.mode =
345                                 (fc_cfg.mode == RTE_ETH_FC_FULL ||
346                                 fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
347                                 RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
348         }
349
350         return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
351 }
352
353 uint64_t
354 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
355 {
356         uint16_t port_id = dev->eth_dev->data->port_id;
357         struct rte_mbuf mb_def;
358         uint64_t *tmp;
359
360         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
361         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
362                                  offsetof(struct rte_mbuf, data_off) !=
363                          2);
364         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
365                                  offsetof(struct rte_mbuf, data_off) !=
366                          4);
367         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
368                                  offsetof(struct rte_mbuf, data_off) !=
369                          6);
370         mb_def.nb_segs = 1;
371         mb_def.data_off = RTE_PKTMBUF_HEADROOM +
372                           (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
373         mb_def.port = port_id;
374         rte_mbuf_refcnt_set(&mb_def, 1);
375
376         /* Prevent compiler reordering: rearm_data covers previous fields */
377         rte_compiler_barrier();
378         tmp = (uint64_t *)&mb_def.rearm_data;
379
380         return *tmp;
381 }
382
383 static inline uint8_t
384 nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
385 {
386         /*
387          * Maximum three segments can be supported with W8, Choose
388          * NIX_MAXSQESZ_W16 for multi segment offload.
389          */
390         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
391                 return NIX_MAXSQESZ_W16;
392         else
393                 return NIX_MAXSQESZ_W8;
394 }
395
396 int
397 cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
398                         uint16_t nb_desc, uint16_t fp_tx_q_sz,
399                         const struct rte_eth_txconf *tx_conf)
400 {
401         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
402         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
403         struct cnxk_eth_txq_sp *txq_sp;
404         struct roc_nix_sq *sq;
405         size_t txq_sz;
406         int rc;
407
408         /* Free memory prior to re-allocation if needed. */
409         if (eth_dev->data->tx_queues[qid] != NULL) {
410                 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
411                 dev_ops->tx_queue_release(eth_dev, qid);
412                 eth_dev->data->tx_queues[qid] = NULL;
413         }
414
415         /* When Tx Security offload is enabled, increase tx desc count by
416          * max possible outbound desc count.
417          */
418         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
419                 nb_desc += dev->outb.nb_desc;
420
421         /* Setup ROC SQ */
422         sq = &dev->sqs[qid];
423         sq->qid = qid;
424         sq->nb_desc = nb_desc;
425         sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
426
427         rc = roc_nix_sq_init(&dev->nix, sq);
428         if (rc) {
429                 plt_err("Failed to init sq=%d, rc=%d", qid, rc);
430                 return rc;
431         }
432
433         rc = -ENOMEM;
434         txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
435         txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
436         if (!txq_sp) {
437                 plt_err("Failed to alloc tx queue mem");
438                 rc |= roc_nix_sq_fini(sq);
439                 return rc;
440         }
441
442         txq_sp->dev = dev;
443         txq_sp->qid = qid;
444         txq_sp->qconf.conf.tx = *tx_conf;
445         /* Queue config should reflect global offloads */
446         txq_sp->qconf.conf.tx.offloads = dev->tx_offloads;
447         txq_sp->qconf.nb_desc = nb_desc;
448
449         plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
450                     " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
451                     qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
452                     sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
453
454         /* Store start of fast path area */
455         eth_dev->data->tx_queues[qid] = txq_sp + 1;
456         eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
457         return 0;
458 }
459
460 static void
461 cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
462 {
463         void *txq = eth_dev->data->tx_queues[qid];
464         struct cnxk_eth_txq_sp *txq_sp;
465         struct cnxk_eth_dev *dev;
466         struct roc_nix_sq *sq;
467         int rc;
468
469         if (!txq)
470                 return;
471
472         txq_sp = cnxk_eth_txq_to_sp(txq);
473
474         dev = txq_sp->dev;
475
476         plt_nix_dbg("Releasing txq %u", qid);
477
478         /* Cleanup ROC SQ */
479         sq = &dev->sqs[qid];
480         rc = roc_nix_sq_fini(sq);
481         if (rc)
482                 plt_err("Failed to cleanup sq, rc=%d", rc);
483
484         /* Finally free */
485         plt_free(txq_sp);
486 }
487
488 int
489 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
490                         uint16_t nb_desc, uint16_t fp_rx_q_sz,
491                         const struct rte_eth_rxconf *rx_conf,
492                         struct rte_mempool *mp)
493 {
494         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
495         struct roc_nix *nix = &dev->nix;
496         struct cnxk_eth_rxq_sp *rxq_sp;
497         struct rte_mempool_ops *ops;
498         const char *platform_ops;
499         struct roc_nix_rq *rq;
500         struct roc_nix_cq *cq;
501         uint16_t first_skip;
502         int rc = -EINVAL;
503         size_t rxq_sz;
504
505         /* Sanity checks */
506         if (rx_conf->rx_deferred_start == 1) {
507                 plt_err("Deferred Rx start is not supported");
508                 goto fail;
509         }
510
511         platform_ops = rte_mbuf_platform_mempool_ops();
512         /* This driver needs cnxk_npa mempool ops to work */
513         ops = rte_mempool_get_ops(mp->ops_index);
514         if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
515                 plt_err("mempool ops should be of cnxk_npa type");
516                 goto fail;
517         }
518
519         if (mp->pool_id == 0) {
520                 plt_err("Invalid pool_id");
521                 goto fail;
522         }
523
524         /* Free memory prior to re-allocation if needed */
525         if (eth_dev->data->rx_queues[qid] != NULL) {
526                 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
527
528                 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
529                 dev_ops->rx_queue_release(eth_dev, qid);
530                 eth_dev->data->rx_queues[qid] = NULL;
531         }
532
533         /* Clam up cq limit to size of packet pool aura for LBK
534          * to avoid meta packet drop as LBK does not currently support
535          * backpressure.
536          */
537         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
538                 uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
539
540                 /* Use current RQ's aura limit if inl rq is not available */
541                 if (!pkt_pool_limit)
542                         pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
543                 nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
544         }
545
546         /* Setup ROC CQ */
547         cq = &dev->cqs[qid];
548         cq->qid = qid;
549         cq->nb_desc = nb_desc;
550         rc = roc_nix_cq_init(&dev->nix, cq);
551         if (rc) {
552                 plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
553                 goto fail;
554         }
555
556         /* Setup ROC RQ */
557         rq = &dev->rqs[qid];
558         rq->qid = qid;
559         rq->aura_handle = mp->pool_id;
560         rq->flow_tag_width = 32;
561         rq->sso_ena = false;
562
563         /* Calculate first mbuf skip */
564         first_skip = (sizeof(struct rte_mbuf));
565         first_skip += RTE_PKTMBUF_HEADROOM;
566         first_skip += rte_pktmbuf_priv_size(mp);
567         rq->first_skip = first_skip;
568         rq->later_skip = sizeof(struct rte_mbuf);
569         rq->lpb_size = mp->elt_size;
570
571         /* Enable Inline IPSec on RQ, will not be used for Poll mode */
572         if (roc_nix_inl_inb_is_enabled(nix))
573                 rq->ipsech_ena = true;
574
575         rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
576         if (rc) {
577                 plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
578                 goto cq_fini;
579         }
580
581         /* Allocate and setup fast path rx queue */
582         rc = -ENOMEM;
583         rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
584         rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
585         if (!rxq_sp) {
586                 plt_err("Failed to alloc rx queue for rq=%d", qid);
587                 goto rq_fini;
588         }
589
590         /* Setup slow path fields */
591         rxq_sp->dev = dev;
592         rxq_sp->qid = qid;
593         rxq_sp->qconf.conf.rx = *rx_conf;
594         /* Queue config should reflect global offloads */
595         rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads;
596         rxq_sp->qconf.nb_desc = nb_desc;
597         rxq_sp->qconf.mp = mp;
598
599         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
600                 /* Setup rq reference for inline dev if present */
601                 rc = roc_nix_inl_dev_rq_get(rq);
602                 if (rc)
603                         goto free_mem;
604         }
605
606         plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
607                     cq->nb_desc);
608
609         /* Store start of fast path area */
610         eth_dev->data->rx_queues[qid] = rxq_sp + 1;
611         eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
612
613         /* Calculating delta and freq mult between PTP HI clock and tsc.
614          * These are needed in deriving raw clock value from tsc counter.
615          * read_clock eth op returns raw clock value.
616          */
617         if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
618                 rc = cnxk_nix_tsc_convert(dev);
619                 if (rc) {
620                         plt_err("Failed to calculate delta and freq mult");
621                         goto rq_fini;
622                 }
623         }
624
625         return 0;
626 free_mem:
627         plt_free(rxq_sp);
628 rq_fini:
629         rc |= roc_nix_rq_fini(rq);
630 cq_fini:
631         rc |= roc_nix_cq_fini(cq);
632 fail:
633         return rc;
634 }
635
636 static void
637 cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
638 {
639         void *rxq = eth_dev->data->rx_queues[qid];
640         struct cnxk_eth_rxq_sp *rxq_sp;
641         struct cnxk_eth_dev *dev;
642         struct roc_nix_rq *rq;
643         struct roc_nix_cq *cq;
644         int rc;
645
646         if (!rxq)
647                 return;
648
649         rxq_sp = cnxk_eth_rxq_to_sp(rxq);
650         dev = rxq_sp->dev;
651         rq = &dev->rqs[qid];
652
653         plt_nix_dbg("Releasing rxq %u", qid);
654
655         /* Release rq reference for inline dev if present */
656         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
657                 roc_nix_inl_dev_rq_put(rq);
658
659         /* Cleanup ROC RQ */
660         rc = roc_nix_rq_fini(rq);
661         if (rc)
662                 plt_err("Failed to cleanup rq, rc=%d", rc);
663
664         /* Cleanup ROC CQ */
665         cq = &dev->cqs[qid];
666         rc = roc_nix_cq_fini(cq);
667         if (rc)
668                 plt_err("Failed to cleanup cq, rc=%d", rc);
669
670         /* Finally free fast path area */
671         plt_free(rxq_sp);
672 }
673
674 uint32_t
675 cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
676                        uint8_t rss_level)
677 {
678         uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
679                 {FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
680                  FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
681                 {FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
682                  FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
683                  FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
684                 {FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
685                  FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
686                  FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
687                  FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
688                  FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
689                  FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
690         };
691         uint32_t flowkey_cfg = 0;
692
693         dev->ethdev_rss_hf = ethdev_rss;
694
695         if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
696             dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
697                 flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
698         }
699
700         if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
701                 flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
702
703         if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
704                 flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
705
706         if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
707                 flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
708
709         if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
710                 flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
711
712         if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
713                 flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
714
715         if (ethdev_rss & RSS_IPV4_ENABLE)
716                 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
717
718         if (ethdev_rss & RSS_IPV6_ENABLE)
719                 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
720
721         if (ethdev_rss & RTE_ETH_RSS_TCP)
722                 flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
723
724         if (ethdev_rss & RTE_ETH_RSS_UDP)
725                 flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
726
727         if (ethdev_rss & RTE_ETH_RSS_SCTP)
728                 flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
729
730         if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
731                 flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
732
733         if (ethdev_rss & RSS_IPV6_EX_ENABLE)
734                 flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
735
736         if (ethdev_rss & RTE_ETH_RSS_PORT)
737                 flowkey_cfg |= FLOW_KEY_TYPE_PORT;
738
739         if (ethdev_rss & RTE_ETH_RSS_NVGRE)
740                 flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
741
742         if (ethdev_rss & RTE_ETH_RSS_VXLAN)
743                 flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
744
745         if (ethdev_rss & RTE_ETH_RSS_GENEVE)
746                 flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
747
748         if (ethdev_rss & RTE_ETH_RSS_GTPU)
749                 flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
750
751         return flowkey_cfg;
752 }
753
754 static void
755 nix_free_queue_mem(struct cnxk_eth_dev *dev)
756 {
757         plt_free(dev->rqs);
758         plt_free(dev->cqs);
759         plt_free(dev->sqs);
760         dev->rqs = NULL;
761         dev->cqs = NULL;
762         dev->sqs = NULL;
763 }
764
765 static int
766 nix_ingress_policer_setup(struct cnxk_eth_dev *dev)
767 {
768         struct rte_eth_dev *eth_dev = dev->eth_dev;
769         int rc = 0;
770
771         TAILQ_INIT(&dev->mtr_profiles);
772         TAILQ_INIT(&dev->mtr_policy);
773         TAILQ_INIT(&dev->mtr);
774
775         if (eth_dev->dev_ops->mtr_ops_get == NULL)
776                 return rc;
777
778         return nix_mtr_capabilities_init(eth_dev);
779 }
780
781 static int
782 nix_rss_default_setup(struct cnxk_eth_dev *dev)
783 {
784         struct rte_eth_dev *eth_dev = dev->eth_dev;
785         uint8_t rss_hash_level;
786         uint32_t flowkey_cfg;
787         uint64_t rss_hf;
788
789         rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
790         rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
791         if (rss_hash_level)
792                 rss_hash_level -= 1;
793
794         flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
795         return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
796 }
797
798 static int
799 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
800 {
801         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
802         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
803         struct cnxk_eth_qconf *tx_qconf = NULL;
804         struct cnxk_eth_qconf *rx_qconf = NULL;
805         struct cnxk_eth_rxq_sp *rxq_sp;
806         struct cnxk_eth_txq_sp *txq_sp;
807         int i, nb_rxq, nb_txq;
808         void **txq, **rxq;
809
810         nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
811         nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
812
813         tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
814         if (tx_qconf == NULL) {
815                 plt_err("Failed to allocate memory for tx_qconf");
816                 goto fail;
817         }
818
819         rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
820         if (rx_qconf == NULL) {
821                 plt_err("Failed to allocate memory for rx_qconf");
822                 goto fail;
823         }
824
825         txq = eth_dev->data->tx_queues;
826         for (i = 0; i < nb_txq; i++) {
827                 if (txq[i] == NULL) {
828                         tx_qconf[i].valid = false;
829                         plt_info("txq[%d] is already released", i);
830                         continue;
831                 }
832                 txq_sp = cnxk_eth_txq_to_sp(txq[i]);
833                 memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
834                 tx_qconf[i].valid = true;
835                 dev_ops->tx_queue_release(eth_dev, i);
836                 eth_dev->data->tx_queues[i] = NULL;
837         }
838
839         rxq = eth_dev->data->rx_queues;
840         for (i = 0; i < nb_rxq; i++) {
841                 if (rxq[i] == NULL) {
842                         rx_qconf[i].valid = false;
843                         plt_info("rxq[%d] is already released", i);
844                         continue;
845                 }
846                 rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
847                 memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
848                 rx_qconf[i].valid = true;
849                 dev_ops->rx_queue_release(eth_dev, i);
850                 eth_dev->data->rx_queues[i] = NULL;
851         }
852
853         dev->tx_qconf = tx_qconf;
854         dev->rx_qconf = rx_qconf;
855         return 0;
856
857 fail:
858         free(tx_qconf);
859         free(rx_qconf);
860         return -ENOMEM;
861 }
862
863 static int
864 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
865 {
866         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
867         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
868         struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
869         struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
870         int rc, i, nb_rxq, nb_txq;
871
872         nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
873         nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
874
875         rc = -ENOMEM;
876         /* Setup tx & rx queues with previous configuration so
877          * that the queues can be functional in cases like ports
878          * are started without re configuring queues.
879          *
880          * Usual re config sequence is like below:
881          * port_configure() {
882          *      if(reconfigure) {
883          *              queue_release()
884          *              queue_setup()
885          *      }
886          *      queue_configure() {
887          *              queue_release()
888          *              queue_setup()
889          *      }
890          * }
891          * port_start()
892          *
893          * In some application's control path, queue_configure() would
894          * NOT be invoked for TXQs/RXQs in port_configure().
895          * In such cases, queues can be functional after start as the
896          * queues are already setup in port_configure().
897          */
898         for (i = 0; i < nb_txq; i++) {
899                 if (!tx_qconf[i].valid)
900                         continue;
901                 rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
902                                              &tx_qconf[i].conf.tx);
903                 if (rc) {
904                         plt_err("Failed to setup tx queue rc=%d", rc);
905                         for (i -= 1; i >= 0; i--)
906                                 dev_ops->tx_queue_release(eth_dev, i);
907                         goto fail;
908                 }
909         }
910
911         free(tx_qconf);
912         tx_qconf = NULL;
913
914         for (i = 0; i < nb_rxq; i++) {
915                 if (!rx_qconf[i].valid)
916                         continue;
917                 rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
918                                              &rx_qconf[i].conf.rx,
919                                              rx_qconf[i].mp);
920                 if (rc) {
921                         plt_err("Failed to setup rx queue rc=%d", rc);
922                         for (i -= 1; i >= 0; i--)
923                                 dev_ops->rx_queue_release(eth_dev, i);
924                         goto tx_queue_release;
925                 }
926         }
927
928         free(rx_qconf);
929         rx_qconf = NULL;
930
931         return 0;
932
933 tx_queue_release:
934         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
935                 dev_ops->tx_queue_release(eth_dev, i);
936 fail:
937         if (tx_qconf)
938                 free(tx_qconf);
939         if (rx_qconf)
940                 free(rx_qconf);
941
942         return rc;
943 }
944
945 static uint16_t
946 nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
947 {
948         RTE_SET_USED(queue);
949         RTE_SET_USED(mbufs);
950         RTE_SET_USED(pkts);
951
952         return 0;
953 }
954
955 static void
956 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
957 {
958         /* These dummy functions are required for supporting
959          * some applications which reconfigure queues without
960          * stopping tx burst and rx burst threads(eg kni app)
961          * When the queues context is saved, txq/rxqs are released
962          * which caused app crash since rx/tx burst is still
963          * on different lcores
964          */
965         eth_dev->tx_pkt_burst = nix_eth_nop_burst;
966         eth_dev->rx_pkt_burst = nix_eth_nop_burst;
967         rte_mb();
968 }
969
970 static int
971 nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
972 {
973         uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
974         uint8_t tun[ROC_NIX_LSO_TUN_MAX];
975         struct roc_nix *nix = &dev->nix;
976         int rc;
977
978         rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
979         if (rc)
980                 return rc;
981
982         dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
983                             (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
984                             (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
985                             (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
986
987         dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
988                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
989                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
990                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
991         return 0;
992 }
993
994 static int
995 nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
996 {
997         struct roc_nix *nix = &dev->nix;
998         int rc;
999
1000         /* Nothing much to do if offload is not enabled */
1001         if (!(dev->tx_offloads &
1002               (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1003                RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
1004                 return 0;
1005
1006         /* Setup LSO formats in AF. Its a no-op if other ethdev has
1007          * already set it up
1008          */
1009         rc = roc_nix_lso_fmt_setup(nix);
1010         if (rc)
1011                 return rc;
1012
1013         return nix_lso_tun_fmt_update(dev);
1014 }
1015
1016 int
1017 cnxk_nix_configure(struct rte_eth_dev *eth_dev)
1018 {
1019         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1020         struct rte_eth_dev_data *data = eth_dev->data;
1021         struct rte_eth_conf *conf = &data->dev_conf;
1022         struct rte_eth_rxmode *rxmode = &conf->rxmode;
1023         struct rte_eth_txmode *txmode = &conf->txmode;
1024         char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
1025         struct roc_nix_fc_cfg fc_cfg = {0};
1026         struct roc_nix *nix = &dev->nix;
1027         struct rte_ether_addr *ea;
1028         uint8_t nb_rxq, nb_txq;
1029         uint64_t rx_cfg;
1030         void *qs;
1031         int rc;
1032
1033         rc = -EINVAL;
1034
1035         /* Sanity checks */
1036         if (rte_eal_has_hugepages() == 0) {
1037                 plt_err("Huge page is not configured");
1038                 goto fail_configure;
1039         }
1040
1041         if (conf->dcb_capability_en == 1) {
1042                 plt_err("dcb enable is not supported");
1043                 goto fail_configure;
1044         }
1045
1046         if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1047                 plt_err("Flow director is not supported");
1048                 goto fail_configure;
1049         }
1050
1051         if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
1052             rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
1053                 plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
1054                 goto fail_configure;
1055         }
1056
1057         if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
1058                 plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
1059                 goto fail_configure;
1060         }
1061
1062         /* Free the resources allocated from the previous configure */
1063         if (dev->configured == 1) {
1064                 /* Unregister queue irq's */
1065                 roc_nix_unregister_queue_irqs(nix);
1066
1067                 /* Unregister CQ irqs if present */
1068                 if (eth_dev->data->dev_conf.intr_conf.rxq)
1069                         roc_nix_unregister_cq_irqs(nix);
1070
1071                 /* Set no-op functions */
1072                 nix_set_nop_rxtx_function(eth_dev);
1073                 /* Store queue config for later */
1074                 rc = nix_store_queue_cfg_and_then_release(eth_dev);
1075                 if (rc)
1076                         goto fail_configure;
1077
1078                 /* Disable and free rte_meter entries */
1079                 rc = nix_meter_fini(dev);
1080                 if (rc)
1081                         goto fail_configure;
1082
1083                 /* Cleanup security support */
1084                 rc = nix_security_release(dev);
1085                 if (rc)
1086                         goto fail_configure;
1087
1088                 roc_nix_tm_fini(nix);
1089                 roc_nix_lf_free(nix);
1090         }
1091
1092         dev->rx_offloads = rxmode->offloads;
1093         dev->tx_offloads = txmode->offloads;
1094
1095         /* Prepare rx cfg */
1096         rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
1097         if (dev->rx_offloads &
1098             (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
1099                 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
1100                 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
1101         }
1102         rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |
1103                    ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
1104                    ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
1105
1106         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
1107                 rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
1108                 /* Disable drop re if rx offload security is enabled and
1109                  * platform does not support it.
1110                  */
1111                 if (dev->ipsecd_drop_re_dis)
1112                         rx_cfg &= ~(ROC_NIX_LF_RX_CFG_DROP_RE);
1113         }
1114
1115         nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1116         nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1117
1118         /* Alloc a nix lf */
1119         rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
1120         if (rc) {
1121                 plt_err("Failed to init nix_lf rc=%d", rc);
1122                 goto fail_configure;
1123         }
1124
1125         dev->npc.channel = roc_nix_get_base_chan(nix);
1126
1127         nb_rxq = data->nb_rx_queues;
1128         nb_txq = data->nb_tx_queues;
1129         rc = -ENOMEM;
1130         if (nb_rxq) {
1131                 /* Allocate memory for roc rq's and cq's */
1132                 qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0);
1133                 if (!qs) {
1134                         plt_err("Failed to alloc rqs");
1135                         goto free_nix_lf;
1136                 }
1137                 dev->rqs = qs;
1138
1139                 qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0);
1140                 if (!qs) {
1141                         plt_err("Failed to alloc cqs");
1142                         goto free_nix_lf;
1143                 }
1144                 dev->cqs = qs;
1145         }
1146
1147         if (nb_txq) {
1148                 /* Allocate memory for roc sq's */
1149                 qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0);
1150                 if (!qs) {
1151                         plt_err("Failed to alloc sqs");
1152                         goto free_nix_lf;
1153                 }
1154                 dev->sqs = qs;
1155         }
1156
1157         /* Re-enable NIX LF error interrupts */
1158         roc_nix_err_intr_ena_dis(nix, true);
1159         roc_nix_ras_intr_ena_dis(nix, true);
1160
1161         if (nix->rx_ptp_ena &&
1162             dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) {
1163                 plt_err("Both PTP and switch header enabled");
1164                 goto free_nix_lf;
1165         }
1166
1167         rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type,
1168                                     dev->npc.pre_l2_size_offset,
1169                                     dev->npc.pre_l2_size_offset_mask,
1170                                     dev->npc.pre_l2_size_shift_dir);
1171         if (rc) {
1172                 plt_err("Failed to enable switch type nix_lf rc=%d", rc);
1173                 goto free_nix_lf;
1174         }
1175
1176         /* Setup LSO if needed */
1177         rc = nix_lso_fmt_setup(dev);
1178         if (rc) {
1179                 plt_err("Failed to setup nix lso format fields, rc=%d", rc);
1180                 goto free_nix_lf;
1181         }
1182
1183         /* Configure RSS */
1184         rc = nix_rss_default_setup(dev);
1185         if (rc) {
1186                 plt_err("Failed to configure rss rc=%d", rc);
1187                 goto free_nix_lf;
1188         }
1189
1190         /* Init the default TM scheduler hierarchy */
1191         rc = roc_nix_tm_init(nix);
1192         if (rc) {
1193                 plt_err("Failed to init traffic manager, rc=%d", rc);
1194                 goto free_nix_lf;
1195         }
1196
1197         rc = nix_ingress_policer_setup(dev);
1198         if (rc) {
1199                 plt_err("Failed to setup ingress policer rc=%d", rc);
1200                 goto free_nix_lf;
1201         }
1202
1203         rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
1204         if (rc) {
1205                 plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
1206                 goto tm_fini;
1207         }
1208
1209         /* Register queue IRQs */
1210         rc = roc_nix_register_queue_irqs(nix);
1211         if (rc) {
1212                 plt_err("Failed to register queue interrupts rc=%d", rc);
1213                 goto tm_fini;
1214         }
1215
1216         /* Register cq IRQs */
1217         if (eth_dev->data->dev_conf.intr_conf.rxq) {
1218                 if (eth_dev->data->nb_rx_queues > dev->nix.cints) {
1219                         plt_err("Rx interrupt cannot be enabled, rxq > %d",
1220                                 dev->nix.cints);
1221                         goto q_irq_fini;
1222                 }
1223                 /* Rx interrupt feature cannot work with vector mode because,
1224                  * vector mode does not process packets unless min 4 pkts are
1225                  * received, while cq interrupts are generated even for 1 pkt
1226                  * in the CQ.
1227                  */
1228                 dev->scalar_ena = true;
1229
1230                 rc = roc_nix_register_cq_irqs(nix);
1231                 if (rc) {
1232                         plt_err("Failed to register CQ interrupts rc=%d", rc);
1233                         goto q_irq_fini;
1234                 }
1235         }
1236
1237         /* Configure loop back mode */
1238         rc = roc_nix_mac_loopback_enable(nix,
1239                                          eth_dev->data->dev_conf.lpbk_mode);
1240         if (rc) {
1241                 plt_err("Failed to configure cgx loop back mode rc=%d", rc);
1242                 goto cq_fini;
1243         }
1244
1245         /* Setup Inline security support */
1246         rc = nix_security_setup(dev);
1247         if (rc)
1248                 goto cq_fini;
1249
1250         /* Init flow control configuration */
1251         fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG;
1252         fc_cfg.rxchan_cfg.enable = true;
1253         rc = roc_nix_fc_config_set(nix, &fc_cfg);
1254         if (rc) {
1255                 plt_err("Failed to initialize flow control rc=%d", rc);
1256                 goto cq_fini;
1257         }
1258
1259         /* Update flow control configuration to PMD */
1260         rc = nix_init_flow_ctrl_config(eth_dev);
1261         if (rc) {
1262                 plt_err("Failed to initialize flow control rc=%d", rc);
1263                 goto cq_fini;
1264         }
1265
1266         /*
1267          * Restore queue config when reconfigure followed by
1268          * reconfigure and no queue configure invoked from application case.
1269          */
1270         if (dev->configured == 1) {
1271                 rc = nix_restore_queue_cfg(eth_dev);
1272                 if (rc)
1273                         goto sec_release;
1274         }
1275
1276         /* Update the mac address */
1277         ea = eth_dev->data->mac_addrs;
1278         memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1279         if (rte_is_zero_ether_addr(ea))
1280                 rte_eth_random_addr((uint8_t *)ea);
1281
1282         rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
1283
1284         plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
1285                     " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "",
1286                     eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq,
1287                     dev->rx_offloads, dev->tx_offloads);
1288
1289         /* All good */
1290         dev->configured = 1;
1291         dev->nb_rxq = data->nb_rx_queues;
1292         dev->nb_txq = data->nb_tx_queues;
1293         return 0;
1294
1295 sec_release:
1296         rc |= nix_security_release(dev);
1297 cq_fini:
1298         roc_nix_unregister_cq_irqs(nix);
1299 q_irq_fini:
1300         roc_nix_unregister_queue_irqs(nix);
1301 tm_fini:
1302         roc_nix_tm_fini(nix);
1303 free_nix_lf:
1304         nix_free_queue_mem(dev);
1305         rc |= roc_nix_lf_free(nix);
1306 fail_configure:
1307         dev->configured = 0;
1308         return rc;
1309 }
1310
1311 int
1312 cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1313 {
1314         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1315         struct rte_eth_dev_data *data = eth_dev->data;
1316         struct roc_nix_sq *sq = &dev->sqs[qid];
1317         int rc = -EINVAL;
1318
1319         if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1320                 return 0;
1321
1322         rc = roc_nix_tm_sq_aura_fc(sq, true);
1323         if (rc) {
1324                 plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
1325                 goto done;
1326         }
1327
1328         data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1329 done:
1330         return rc;
1331 }
1332
1333 int
1334 cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1335 {
1336         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1337         struct rte_eth_dev_data *data = eth_dev->data;
1338         struct roc_nix_sq *sq = &dev->sqs[qid];
1339         int rc;
1340
1341         if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1342                 return 0;
1343
1344         rc = roc_nix_tm_sq_aura_fc(sq, false);
1345         if (rc) {
1346                 plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
1347                         rc);
1348                 goto done;
1349         }
1350
1351         data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1352 done:
1353         return rc;
1354 }
1355
1356 static int
1357 cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1358 {
1359         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1360         struct rte_eth_dev_data *data = eth_dev->data;
1361         struct roc_nix_rq *rq = &dev->rqs[qid];
1362         int rc;
1363
1364         if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1365                 return 0;
1366
1367         rc = roc_nix_rq_ena_dis(rq, true);
1368         if (rc) {
1369                 plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
1370                 goto done;
1371         }
1372
1373         data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1374 done:
1375         return rc;
1376 }
1377
1378 static int
1379 cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1380 {
1381         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1382         struct rte_eth_dev_data *data = eth_dev->data;
1383         struct roc_nix_rq *rq = &dev->rqs[qid];
1384         int rc;
1385
1386         if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1387                 return 0;
1388
1389         rc = roc_nix_rq_ena_dis(rq, false);
1390         if (rc) {
1391                 plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
1392                 goto done;
1393         }
1394
1395         data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1396 done:
1397         return rc;
1398 }
1399
1400 static int
1401 cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
1402 {
1403         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1404         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1405         struct rte_mbuf *rx_pkts[32];
1406         struct rte_eth_link link;
1407         int count, i, j, rc;
1408         void *rxq;
1409
1410         /* Disable switch hdr pkind */
1411         roc_nix_switch_hdr_set(&dev->nix, 0, 0, 0, 0);
1412
1413         /* Stop link change events */
1414         if (!roc_nix_is_vf_or_sdp(&dev->nix))
1415                 roc_nix_mac_link_event_start_stop(&dev->nix, false);
1416
1417         /* Disable Rx via NPC */
1418         roc_nix_npc_rx_ena_dis(&dev->nix, false);
1419
1420         /* Stop rx queues and free up pkts pending */
1421         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1422                 rc = dev_ops->rx_queue_stop(eth_dev, i);
1423                 if (rc)
1424                         continue;
1425
1426                 rxq = eth_dev->data->rx_queues[i];
1427                 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1428                 while (count) {
1429                         for (j = 0; j < count; j++)
1430                                 rte_pktmbuf_free(rx_pkts[j]);
1431                         count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1432                 }
1433         }
1434
1435         /* Stop tx queues  */
1436         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1437                 dev_ops->tx_queue_stop(eth_dev, i);
1438
1439         /* Bring down link status internally */
1440         memset(&link, 0, sizeof(link));
1441         rte_eth_linkstatus_set(eth_dev, &link);
1442
1443         return 0;
1444 }
1445
1446 int
1447 cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
1448 {
1449         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1450         int rc, i;
1451
1452         if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) {
1453                 rc = nix_recalc_mtu(eth_dev);
1454                 if (rc)
1455                         return rc;
1456         }
1457
1458         /* Start rx queues */
1459         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1460                 rc = cnxk_nix_rx_queue_start(eth_dev, i);
1461                 if (rc)
1462                         return rc;
1463         }
1464
1465         /* Start tx queues  */
1466         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1467                 rc = cnxk_nix_tx_queue_start(eth_dev, i);
1468                 if (rc)
1469                         return rc;
1470         }
1471
1472         /* Update Flow control configuration */
1473         rc = nix_update_flow_ctrl_config(eth_dev);
1474         if (rc) {
1475                 plt_err("Failed to enable flow control. error code(%d)", rc);
1476                 return rc;
1477         }
1478
1479         /* Enable Rx in NPC */
1480         rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
1481         if (rc) {
1482                 plt_err("Failed to enable NPC rx %d", rc);
1483                 return rc;
1484         }
1485
1486         cnxk_nix_toggle_flag_link_cfg(dev, true);
1487
1488         /* Start link change events */
1489         if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
1490                 rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
1491                 if (rc) {
1492                         plt_err("Failed to start cgx link event %d", rc);
1493                         goto rx_disable;
1494                 }
1495         }
1496
1497         /* Enable PTP if it is requested by the user or already
1498          * enabled on PF owning this VF
1499          */
1500         memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
1501         if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
1502                 cnxk_eth_dev_ops.timesync_enable(eth_dev);
1503         else
1504                 cnxk_eth_dev_ops.timesync_disable(eth_dev);
1505
1506         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1507                 rc = rte_mbuf_dyn_rx_timestamp_register
1508                         (&dev->tstamp.tstamp_dynfield_offset,
1509                          &dev->tstamp.rx_tstamp_dynflag);
1510                 if (rc != 0) {
1511                         plt_err("Failed to register Rx timestamp field/flag");
1512                         goto rx_disable;
1513                 }
1514         }
1515
1516         cnxk_nix_toggle_flag_link_cfg(dev, false);
1517
1518         return 0;
1519
1520 rx_disable:
1521         roc_nix_npc_rx_ena_dis(&dev->nix, false);
1522         cnxk_nix_toggle_flag_link_cfg(dev, false);
1523         return rc;
1524 }
1525
1526 static int cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev);
1527 static int cnxk_nix_dev_close(struct rte_eth_dev *eth_dev);
1528
1529 /* CNXK platform independent eth dev ops */
1530 struct eth_dev_ops cnxk_eth_dev_ops = {
1531         .mtu_set = cnxk_nix_mtu_set,
1532         .mac_addr_add = cnxk_nix_mac_addr_add,
1533         .mac_addr_remove = cnxk_nix_mac_addr_del,
1534         .mac_addr_set = cnxk_nix_mac_addr_set,
1535         .dev_infos_get = cnxk_nix_info_get,
1536         .link_update = cnxk_nix_link_update,
1537         .tx_queue_release = cnxk_nix_tx_queue_release,
1538         .rx_queue_release = cnxk_nix_rx_queue_release,
1539         .dev_stop = cnxk_nix_dev_stop,
1540         .dev_close = cnxk_nix_dev_close,
1541         .dev_reset = cnxk_nix_dev_reset,
1542         .tx_queue_start = cnxk_nix_tx_queue_start,
1543         .rx_queue_start = cnxk_nix_rx_queue_start,
1544         .rx_queue_stop = cnxk_nix_rx_queue_stop,
1545         .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
1546         .promiscuous_enable = cnxk_nix_promisc_enable,
1547         .promiscuous_disable = cnxk_nix_promisc_disable,
1548         .allmulticast_enable = cnxk_nix_allmulticast_enable,
1549         .allmulticast_disable = cnxk_nix_allmulticast_disable,
1550         .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
1551         .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
1552         .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
1553         .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
1554         .dev_set_link_up = cnxk_nix_set_link_up,
1555         .dev_set_link_down = cnxk_nix_set_link_down,
1556         .get_module_info = cnxk_nix_get_module_info,
1557         .get_module_eeprom = cnxk_nix_get_module_eeprom,
1558         .rx_queue_intr_enable = cnxk_nix_rx_queue_intr_enable,
1559         .rx_queue_intr_disable = cnxk_nix_rx_queue_intr_disable,
1560         .pool_ops_supported = cnxk_nix_pool_ops_supported,
1561         .queue_stats_mapping_set = cnxk_nix_queue_stats_mapping,
1562         .stats_get = cnxk_nix_stats_get,
1563         .stats_reset = cnxk_nix_stats_reset,
1564         .xstats_get = cnxk_nix_xstats_get,
1565         .xstats_get_names = cnxk_nix_xstats_get_names,
1566         .xstats_reset = cnxk_nix_xstats_reset,
1567         .xstats_get_by_id = cnxk_nix_xstats_get_by_id,
1568         .xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id,
1569         .fw_version_get = cnxk_nix_fw_version_get,
1570         .rxq_info_get = cnxk_nix_rxq_info_get,
1571         .txq_info_get = cnxk_nix_txq_info_get,
1572         .tx_done_cleanup = cnxk_nix_tx_done_cleanup,
1573         .flow_ops_get = cnxk_nix_flow_ops_get,
1574         .get_reg = cnxk_nix_dev_get_reg,
1575         .timesync_read_rx_timestamp = cnxk_nix_timesync_read_rx_timestamp,
1576         .timesync_read_tx_timestamp = cnxk_nix_timesync_read_tx_timestamp,
1577         .timesync_read_time = cnxk_nix_timesync_read_time,
1578         .timesync_write_time = cnxk_nix_timesync_write_time,
1579         .timesync_adjust_time = cnxk_nix_timesync_adjust_time,
1580         .read_clock = cnxk_nix_read_clock,
1581         .reta_update = cnxk_nix_reta_update,
1582         .reta_query = cnxk_nix_reta_query,
1583         .rss_hash_update = cnxk_nix_rss_hash_update,
1584         .rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
1585         .set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
1586         .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
1587         .tm_ops_get = cnxk_nix_tm_ops_get,
1588         .mtr_ops_get = cnxk_nix_mtr_ops_get,
1589 };
1590
1591 static int
1592 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
1593 {
1594         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1595         struct rte_security_ctx *sec_ctx;
1596         struct roc_nix *nix = &dev->nix;
1597         struct rte_pci_device *pci_dev;
1598         int rc, max_entries;
1599
1600         eth_dev->dev_ops = &cnxk_eth_dev_ops;
1601         eth_dev->rx_queue_count = cnxk_nix_rx_queue_count;
1602         eth_dev->rx_descriptor_status = cnxk_nix_rx_descriptor_status;
1603         eth_dev->tx_descriptor_status = cnxk_nix_tx_descriptor_status;
1604
1605         /* Alloc security context */
1606         sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
1607         if (!sec_ctx)
1608                 return -ENOMEM;
1609         sec_ctx->device = eth_dev;
1610         sec_ctx->ops = &cnxk_eth_sec_ops;
1611         sec_ctx->flags =
1612                 (RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
1613         eth_dev->security_ctx = sec_ctx;
1614
1615         /* For secondary processes, the primary has done all the work */
1616         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1617                 return 0;
1618
1619         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1620         rte_eth_copy_pci_info(eth_dev, pci_dev);
1621
1622         /* Parse devargs string */
1623         rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev);
1624         if (rc) {
1625                 plt_err("Failed to parse devargs rc=%d", rc);
1626                 goto error;
1627         }
1628
1629         /* Initialize base roc nix */
1630         nix->pci_dev = pci_dev;
1631         nix->hw_vlan_ins = true;
1632         rc = roc_nix_dev_init(nix);
1633         if (rc) {
1634                 plt_err("Failed to initialize roc nix rc=%d", rc);
1635                 goto error;
1636         }
1637
1638         /* Register up msg callbacks */
1639         roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
1640
1641         /* Register up msg callbacks */
1642         roc_nix_mac_link_info_get_cb_register(nix,
1643                                               cnxk_eth_dev_link_status_get_cb);
1644
1645         dev->eth_dev = eth_dev;
1646         dev->configured = 0;
1647         dev->ptype_disable = 0;
1648
1649         TAILQ_INIT(&dev->inb.list);
1650         TAILQ_INIT(&dev->outb.list);
1651         rte_spinlock_init(&dev->inb.lock);
1652         rte_spinlock_init(&dev->outb.lock);
1653
1654         /* For vfs, returned max_entries will be 0. but to keep default mac
1655          * address, one entry must be allocated. so setting up to 1.
1656          */
1657         if (roc_nix_is_vf_or_sdp(nix))
1658                 max_entries = 1;
1659         else
1660                 max_entries = roc_nix_mac_max_entries_get(nix);
1661
1662         if (max_entries <= 0) {
1663                 plt_err("Failed to get max entries for mac addr");
1664                 rc = -ENOTSUP;
1665                 goto dev_fini;
1666         }
1667
1668         eth_dev->data->mac_addrs =
1669                 rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0);
1670         if (eth_dev->data->mac_addrs == NULL) {
1671                 plt_err("Failed to allocate memory for mac addr");
1672                 rc = -ENOMEM;
1673                 goto dev_fini;
1674         }
1675
1676         dev->max_mac_entries = max_entries;
1677         dev->dmac_filter_count = 1;
1678
1679         /* Get mac address */
1680         rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
1681         if (rc) {
1682                 plt_err("Failed to get mac addr, rc=%d", rc);
1683                 goto free_mac_addrs;
1684         }
1685
1686         /* Update the mac address */
1687         memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1688
1689         if (!roc_nix_is_vf_or_sdp(nix)) {
1690                 /* Sync same MAC address to CGX/RPM table */
1691                 rc = roc_nix_mac_addr_set(nix, dev->mac_addr);
1692                 if (rc) {
1693                         plt_err("Failed to set mac addr, rc=%d", rc);
1694                         goto free_mac_addrs;
1695                 }
1696         }
1697
1698         /* Union of all capabilities supported by CNXK.
1699          * Platform specific capabilities will be
1700          * updated later.
1701          */
1702         dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
1703         dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
1704         dev->speed_capa = nix_get_speed_capa(dev);
1705
1706         /* Initialize roc npc */
1707         dev->npc.roc_nix = nix;
1708         rc = roc_npc_init(&dev->npc);
1709         if (rc)
1710                 goto free_mac_addrs;
1711
1712         plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
1713                     " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
1714                     eth_dev->data->port_id, roc_nix_get_pf(nix),
1715                     roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
1716                     dev->rx_offload_capa, dev->tx_offload_capa);
1717         return 0;
1718
1719 free_mac_addrs:
1720         rte_free(eth_dev->data->mac_addrs);
1721 dev_fini:
1722         roc_nix_dev_fini(nix);
1723 error:
1724         plt_err("Failed to init nix eth_dev rc=%d", rc);
1725         return rc;
1726 }
1727
1728 static int
1729 cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
1730 {
1731         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1732         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1733         struct roc_nix *nix = &dev->nix;
1734         int rc, i;
1735
1736         plt_free(eth_dev->security_ctx);
1737         eth_dev->security_ctx = NULL;
1738
1739         /* Nothing to be done for secondary processes */
1740         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1741                 return 0;
1742
1743         /* Clear the flag since we are closing down */
1744         dev->configured = 0;
1745
1746         roc_nix_npc_rx_ena_dis(nix, false);
1747
1748         /* Disable and free rte_meter entries */
1749         nix_meter_fini(dev);
1750
1751         /* Disable and free rte_flow entries */
1752         roc_npc_fini(&dev->npc);
1753
1754         /* Disable link status events */
1755         roc_nix_mac_link_event_start_stop(nix, false);
1756
1757         /* Unregister the link update op, this is required to stop VFs from
1758          * receiving link status updates on exit path.
1759          */
1760         roc_nix_mac_link_cb_unregister(nix);
1761
1762         /* Free up SQs */
1763         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1764                 dev_ops->tx_queue_release(eth_dev, i);
1765                 eth_dev->data->tx_queues[i] = NULL;
1766         }
1767         eth_dev->data->nb_tx_queues = 0;
1768
1769         /* Free up RQ's and CQ's */
1770         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1771                 dev_ops->rx_queue_release(eth_dev, i);
1772                 eth_dev->data->rx_queues[i] = NULL;
1773         }
1774         eth_dev->data->nb_rx_queues = 0;
1775
1776         /* Free security resources */
1777         nix_security_release(dev);
1778
1779         /* Free tm resources */
1780         roc_nix_tm_fini(nix);
1781
1782         /* Unregister queue irqs */
1783         roc_nix_unregister_queue_irqs(nix);
1784
1785         /* Unregister cq irqs */
1786         if (eth_dev->data->dev_conf.intr_conf.rxq)
1787                 roc_nix_unregister_cq_irqs(nix);
1788
1789         /* Free ROC RQ's, SQ's and CQ's memory */
1790         nix_free_queue_mem(dev);
1791
1792         /* Free nix lf resources */
1793         rc = roc_nix_lf_free(nix);
1794         if (rc)
1795                 plt_err("Failed to free nix lf, rc=%d", rc);
1796
1797         rte_free(eth_dev->data->mac_addrs);
1798         eth_dev->data->mac_addrs = NULL;
1799
1800         rc = roc_nix_dev_fini(nix);
1801         /* Can be freed later by PMD if NPA LF is in use */
1802         if (rc == -EAGAIN) {
1803                 if (!reset)
1804                         eth_dev->data->dev_private = NULL;
1805                 return 0;
1806         } else if (rc) {
1807                 plt_err("Failed in nix dev fini, rc=%d", rc);
1808         }
1809
1810         return rc;
1811 }
1812
1813 static int
1814 cnxk_nix_dev_close(struct rte_eth_dev *eth_dev)
1815 {
1816         cnxk_eth_dev_uninit(eth_dev, false);
1817         return 0;
1818 }
1819
1820 static int
1821 cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev)
1822 {
1823         int rc;
1824
1825         rc = cnxk_eth_dev_uninit(eth_dev, true);
1826         if (rc)
1827                 return rc;
1828
1829         return cnxk_eth_dev_init(eth_dev);
1830 }
1831
1832 int
1833 cnxk_nix_remove(struct rte_pci_device *pci_dev)
1834 {
1835         struct rte_eth_dev *eth_dev;
1836         struct roc_nix *nix;
1837         int rc = -EINVAL;
1838
1839         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1840         if (eth_dev) {
1841                 /* Cleanup eth dev */
1842                 rc = cnxk_eth_dev_uninit(eth_dev, false);
1843                 if (rc)
1844                         return rc;
1845
1846                 rte_eth_dev_release_port(eth_dev);
1847         }
1848
1849         /* Nothing to be done for secondary processes */
1850         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1851                 return 0;
1852
1853         /* Check if this device is hosting common resource */
1854         nix = roc_idev_npa_nix_get();
1855         if (nix->pci_dev != pci_dev)
1856                 return 0;
1857
1858         /* Try nix fini now */
1859         rc = roc_nix_dev_fini(nix);
1860         if (rc == -EAGAIN) {
1861                 plt_info("%s: common resource in use by other devices",
1862                          pci_dev->name);
1863                 goto exit;
1864         } else if (rc) {
1865                 plt_err("Failed in nix dev fini, rc=%d", rc);
1866                 goto exit;
1867         }
1868
1869         /* Free device pointer as rte_ethdev does not have it anymore */
1870         rte_free(nix);
1871 exit:
1872         return rc;
1873 }
1874
1875 int
1876 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1877 {
1878         int rc;
1879
1880         RTE_SET_USED(pci_drv);
1881
1882         rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
1883                                            cnxk_eth_dev_init);
1884
1885         /* On error on secondary, recheck if port exists in primary or
1886          * in mid of detach state.
1887          */
1888         if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
1889                 if (!rte_eth_dev_allocated(pci_dev->device.name))
1890                         return 0;
1891         return rc;
1892 }