net/cnxk: fix inline device RQ tag mask
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <cnxk_ethdev.h>
5
6 #include <rte_eventdev.h>
7
8 static inline uint64_t
9 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
10 {
11         uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
12
13         if (roc_nix_is_vf_or_sdp(&dev->nix) ||
14             dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
15                 capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
16
17         return capa;
18 }
19
20 static inline uint64_t
21 nix_get_tx_offload_capa(struct cnxk_eth_dev *dev)
22 {
23         RTE_SET_USED(dev);
24         return CNXK_NIX_TX_OFFLOAD_CAPA;
25 }
26
27 static inline uint32_t
28 nix_get_speed_capa(struct cnxk_eth_dev *dev)
29 {
30         uint32_t speed_capa;
31
32         /* Auto negotiation disabled */
33         speed_capa = RTE_ETH_LINK_SPEED_FIXED;
34         if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
35                 speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
36                               RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
37                               RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
38         }
39
40         return speed_capa;
41 }
42
43 int
44 cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
45 {
46         struct roc_nix *nix = &dev->nix;
47
48         if (dev->inb.inl_dev == use_inl_dev)
49                 return 0;
50
51         plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
52                     dev->inb.nb_sess, !!dev->inb.inl_dev);
53
54         /* Change the mode */
55         dev->inb.inl_dev = use_inl_dev;
56
57         /* Update RoC for NPC rule insertion */
58         roc_nix_inb_mode_set(nix, use_inl_dev);
59
60         /* Setup lookup mem */
61         return cnxk_nix_lookup_mem_sa_base_set(dev);
62 }
63
64 static int
65 nix_security_setup(struct cnxk_eth_dev *dev)
66 {
67         struct roc_nix *nix = &dev->nix;
68         int i, rc = 0;
69
70         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
71                 /* Setup Inline Inbound */
72                 rc = roc_nix_inl_inb_init(nix);
73                 if (rc) {
74                         plt_err("Failed to initialize nix inline inb, rc=%d",
75                                 rc);
76                         return rc;
77                 }
78
79                 /* By default pick using inline device for poll mode.
80                  * Will be overridden when event mode rq's are setup.
81                  */
82                 cnxk_nix_inb_mode_set(dev, true);
83
84                 /* Allocate memory to be used as dptr for CPT ucode
85                  * WRITE_SA op.
86                  */
87                 dev->inb.sa_dptr =
88                         plt_zmalloc(ROC_NIX_INL_OT_IPSEC_INB_HW_SZ, 0);
89                 if (!dev->inb.sa_dptr) {
90                         plt_err("Couldn't allocate memory for SA dptr");
91                         rc = -ENOMEM;
92                         goto cleanup;
93                 }
94         }
95
96         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
97             dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
98                 struct plt_bitmap *bmap;
99                 size_t bmap_sz;
100                 void *mem;
101
102                 /* Setup enough descriptors for all tx queues */
103                 nix->outb_nb_desc = dev->outb.nb_desc;
104                 nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs;
105
106                 /* Setup Inline Outbound */
107                 rc = roc_nix_inl_outb_init(nix);
108                 if (rc) {
109                         plt_err("Failed to initialize nix inline outb, rc=%d",
110                                 rc);
111                         goto sa_dptr_free;
112                 }
113
114                 dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
115
116                 /* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
117                 if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY))
118                         return 0;
119
120                 /* Allocate memory to be used as dptr for CPT ucode
121                  * WRITE_SA op.
122                  */
123                 dev->outb.sa_dptr =
124                         plt_zmalloc(ROC_NIX_INL_OT_IPSEC_OUTB_HW_SZ, 0);
125                 if (!dev->outb.sa_dptr) {
126                         plt_err("Couldn't allocate memory for SA dptr");
127                         rc = -ENOMEM;
128                         goto sa_dptr_free;
129                 }
130
131                 rc = -ENOMEM;
132                 /* Allocate a bitmap to alloc and free sa indexes */
133                 bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa);
134                 mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
135                 if (mem == NULL) {
136                         plt_err("Outbound SA bmap alloc failed");
137
138                         rc |= roc_nix_inl_outb_fini(nix);
139                         goto sa_dptr_free;
140                 }
141
142                 rc = -EIO;
143                 bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz);
144                 if (!bmap) {
145                         plt_err("Outbound SA bmap init failed");
146
147                         rc |= roc_nix_inl_outb_fini(nix);
148                         plt_free(mem);
149                         goto sa_dptr_free;
150                 }
151
152                 for (i = 0; i < dev->outb.max_sa; i++)
153                         plt_bitmap_set(bmap, i);
154
155                 dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix);
156                 dev->outb.sa_bmap_mem = mem;
157                 dev->outb.sa_bmap = bmap;
158         }
159         return 0;
160
161 sa_dptr_free:
162         if (dev->inb.sa_dptr)
163                 plt_free(dev->inb.sa_dptr);
164         if (dev->outb.sa_dptr)
165                 plt_free(dev->outb.sa_dptr);
166 cleanup:
167         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
168                 rc |= roc_nix_inl_inb_fini(nix);
169         return rc;
170 }
171
172 static int
173 nix_meter_fini(struct cnxk_eth_dev *dev)
174 {
175         struct cnxk_meter_node *next_mtr = NULL;
176         struct roc_nix_bpf_objs profs = {0};
177         struct cnxk_meter_node *mtr = NULL;
178         struct cnxk_mtr *fms = &dev->mtr;
179         struct roc_nix *nix = &dev->nix;
180         struct roc_nix_rq *rq;
181         uint32_t i;
182         int rc = 0;
183
184         RTE_TAILQ_FOREACH_SAFE(mtr, fms, next, next_mtr) {
185                 for (i = 0; i < mtr->rq_num; i++) {
186                         rq = &dev->rqs[mtr->rq_id[i]];
187                         rc |= roc_nix_bpf_ena_dis(nix, mtr->bpf_id, rq, false);
188                 }
189
190                 profs.level = mtr->level;
191                 profs.count = 1;
192                 profs.ids[0] = mtr->bpf_id;
193                 rc = roc_nix_bpf_free(nix, &profs, 1);
194
195                 if (rc)
196                         return rc;
197
198                 TAILQ_REMOVE(fms, mtr, next);
199                 plt_free(mtr);
200         }
201         return 0;
202 }
203
204 static int
205 nix_security_release(struct cnxk_eth_dev *dev)
206 {
207         struct rte_eth_dev *eth_dev = dev->eth_dev;
208         struct cnxk_eth_sec_sess *eth_sec, *tvar;
209         struct roc_nix *nix = &dev->nix;
210         int rc, ret = 0;
211
212         /* Cleanup Inline inbound */
213         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
214                 /* Destroy inbound sessions */
215                 tvar = NULL;
216                 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
217                         cnxk_eth_sec_ops.session_destroy(eth_dev,
218                                                          eth_sec->sess);
219
220                 /* Clear lookup mem */
221                 cnxk_nix_lookup_mem_sa_base_clear(dev);
222
223                 rc = roc_nix_inl_inb_fini(nix);
224                 if (rc)
225                         plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
226                 ret |= rc;
227
228                 if (dev->inb.sa_dptr) {
229                         plt_free(dev->inb.sa_dptr);
230                         dev->inb.sa_dptr = NULL;
231                 }
232         }
233
234         /* Cleanup Inline outbound */
235         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
236             dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
237                 /* Destroy outbound sessions */
238                 tvar = NULL;
239                 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
240                         cnxk_eth_sec_ops.session_destroy(eth_dev,
241                                                          eth_sec->sess);
242
243                 rc = roc_nix_inl_outb_fini(nix);
244                 if (rc)
245                         plt_err("Failed to cleanup nix inline outb, rc=%d", rc);
246                 ret |= rc;
247
248                 plt_bitmap_free(dev->outb.sa_bmap);
249                 plt_free(dev->outb.sa_bmap_mem);
250                 dev->outb.sa_bmap = NULL;
251                 dev->outb.sa_bmap_mem = NULL;
252                 if (dev->outb.sa_dptr) {
253                         plt_free(dev->outb.sa_dptr);
254                         dev->outb.sa_dptr = NULL;
255                 }
256         }
257
258         dev->inb.inl_dev = false;
259         roc_nix_inb_mode_set(nix, false);
260         dev->nb_rxq_sso = 0;
261         dev->inb.nb_sess = 0;
262         dev->outb.nb_sess = 0;
263         return ret;
264 }
265
266 static void
267 nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
268 {
269         struct rte_pktmbuf_pool_private *mbp_priv;
270         struct rte_eth_dev *eth_dev;
271         struct cnxk_eth_dev *dev;
272         uint32_t buffsz;
273
274         dev = rxq->dev;
275         eth_dev = dev->eth_dev;
276
277         /* Get rx buffer size */
278         mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
279         buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
280
281         if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
282                 dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
283                 dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
284         }
285 }
286
287 int
288 nix_recalc_mtu(struct rte_eth_dev *eth_dev)
289 {
290         struct rte_eth_dev_data *data = eth_dev->data;
291         struct cnxk_eth_rxq_sp *rxq;
292         int rc;
293
294         rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
295         /* Setup scatter mode if needed by jumbo */
296         nix_enable_mseg_on_jumbo(rxq);
297
298         rc = cnxk_nix_mtu_set(eth_dev, data->mtu);
299         if (rc)
300                 plt_err("Failed to set default MTU size, rc=%d", rc);
301
302         return rc;
303 }
304
305 static int
306 nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
307 {
308         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
309         struct cnxk_fc_cfg *fc = &dev->fc_cfg;
310         struct rte_eth_fc_conf fc_conf = {0};
311         int rc;
312
313         /* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
314          * by AF driver, update those info in PMD structure.
315          */
316         rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
317         if (rc)
318                 goto exit;
319
320         fc->mode = fc_conf.mode;
321         fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
322                         (fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
323         fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
324                         (fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
325
326 exit:
327         return rc;
328 }
329
330 static int
331 nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
332 {
333         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
334         struct cnxk_fc_cfg *fc = &dev->fc_cfg;
335         struct rte_eth_fc_conf fc_cfg = {0};
336
337         if (roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix))
338                 return 0;
339
340         fc_cfg.mode = fc->mode;
341
342         /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
343         if (roc_model_is_cn96_ax() &&
344             dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
345             (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
346                 fc_cfg.mode =
347                                 (fc_cfg.mode == RTE_ETH_FC_FULL ||
348                                 fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
349                                 RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
350         }
351
352         return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
353 }
354
355 uint64_t
356 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
357 {
358         uint16_t port_id = dev->eth_dev->data->port_id;
359         struct rte_mbuf mb_def;
360         uint64_t *tmp;
361
362         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
363         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
364                                  offsetof(struct rte_mbuf, data_off) !=
365                          2);
366         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
367                                  offsetof(struct rte_mbuf, data_off) !=
368                          4);
369         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
370                                  offsetof(struct rte_mbuf, data_off) !=
371                          6);
372         mb_def.nb_segs = 1;
373         mb_def.data_off = RTE_PKTMBUF_HEADROOM +
374                           (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
375         mb_def.port = port_id;
376         rte_mbuf_refcnt_set(&mb_def, 1);
377
378         /* Prevent compiler reordering: rearm_data covers previous fields */
379         rte_compiler_barrier();
380         tmp = (uint64_t *)&mb_def.rearm_data;
381
382         return *tmp;
383 }
384
385 static inline uint8_t
386 nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
387 {
388         /*
389          * Maximum three segments can be supported with W8, Choose
390          * NIX_MAXSQESZ_W16 for multi segment offload.
391          */
392         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
393                 return NIX_MAXSQESZ_W16;
394         else
395                 return NIX_MAXSQESZ_W8;
396 }
397
398 int
399 cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
400                         uint16_t nb_desc, uint16_t fp_tx_q_sz,
401                         const struct rte_eth_txconf *tx_conf)
402 {
403         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
404         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
405         struct cnxk_eth_txq_sp *txq_sp;
406         struct roc_nix_sq *sq;
407         size_t txq_sz;
408         int rc;
409
410         /* Free memory prior to re-allocation if needed. */
411         if (eth_dev->data->tx_queues[qid] != NULL) {
412                 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
413                 dev_ops->tx_queue_release(eth_dev, qid);
414                 eth_dev->data->tx_queues[qid] = NULL;
415         }
416
417         /* When Tx Security offload is enabled, increase tx desc count by
418          * max possible outbound desc count.
419          */
420         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
421                 nb_desc += dev->outb.nb_desc;
422
423         /* Setup ROC SQ */
424         sq = &dev->sqs[qid];
425         sq->qid = qid;
426         sq->nb_desc = nb_desc;
427         sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
428
429         rc = roc_nix_sq_init(&dev->nix, sq);
430         if (rc) {
431                 plt_err("Failed to init sq=%d, rc=%d", qid, rc);
432                 return rc;
433         }
434
435         rc = -ENOMEM;
436         txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
437         txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
438         if (!txq_sp) {
439                 plt_err("Failed to alloc tx queue mem");
440                 rc |= roc_nix_sq_fini(sq);
441                 return rc;
442         }
443
444         txq_sp->dev = dev;
445         txq_sp->qid = qid;
446         txq_sp->qconf.conf.tx = *tx_conf;
447         /* Queue config should reflect global offloads */
448         txq_sp->qconf.conf.tx.offloads = dev->tx_offloads;
449         txq_sp->qconf.nb_desc = nb_desc;
450
451         plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
452                     " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
453                     qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
454                     sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
455
456         /* Store start of fast path area */
457         eth_dev->data->tx_queues[qid] = txq_sp + 1;
458         eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
459         return 0;
460 }
461
462 static void
463 cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
464 {
465         void *txq = eth_dev->data->tx_queues[qid];
466         struct cnxk_eth_txq_sp *txq_sp;
467         struct cnxk_eth_dev *dev;
468         struct roc_nix_sq *sq;
469         int rc;
470
471         if (!txq)
472                 return;
473
474         txq_sp = cnxk_eth_txq_to_sp(txq);
475
476         dev = txq_sp->dev;
477
478         plt_nix_dbg("Releasing txq %u", qid);
479
480         /* Cleanup ROC SQ */
481         sq = &dev->sqs[qid];
482         rc = roc_nix_sq_fini(sq);
483         if (rc)
484                 plt_err("Failed to cleanup sq, rc=%d", rc);
485
486         /* Finally free */
487         plt_free(txq_sp);
488 }
489
490 int
491 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
492                         uint16_t nb_desc, uint16_t fp_rx_q_sz,
493                         const struct rte_eth_rxconf *rx_conf,
494                         struct rte_mempool *mp)
495 {
496         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
497         struct roc_nix *nix = &dev->nix;
498         struct cnxk_eth_rxq_sp *rxq_sp;
499         struct rte_mempool_ops *ops;
500         const char *platform_ops;
501         struct roc_nix_rq *rq;
502         struct roc_nix_cq *cq;
503         uint16_t first_skip;
504         int rc = -EINVAL;
505         size_t rxq_sz;
506
507         /* Sanity checks */
508         if (rx_conf->rx_deferred_start == 1) {
509                 plt_err("Deferred Rx start is not supported");
510                 goto fail;
511         }
512
513         platform_ops = rte_mbuf_platform_mempool_ops();
514         /* This driver needs cnxk_npa mempool ops to work */
515         ops = rte_mempool_get_ops(mp->ops_index);
516         if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
517                 plt_err("mempool ops should be of cnxk_npa type");
518                 goto fail;
519         }
520
521         if (mp->pool_id == 0) {
522                 plt_err("Invalid pool_id");
523                 goto fail;
524         }
525
526         /* Free memory prior to re-allocation if needed */
527         if (eth_dev->data->rx_queues[qid] != NULL) {
528                 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
529
530                 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
531                 dev_ops->rx_queue_release(eth_dev, qid);
532                 eth_dev->data->rx_queues[qid] = NULL;
533         }
534
535         /* Clam up cq limit to size of packet pool aura for LBK
536          * to avoid meta packet drop as LBK does not currently support
537          * backpressure.
538          */
539         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
540                 uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
541
542                 /* Use current RQ's aura limit if inl rq is not available */
543                 if (!pkt_pool_limit)
544                         pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
545                 nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
546         }
547
548         /* Setup ROC CQ */
549         cq = &dev->cqs[qid];
550         cq->qid = qid;
551         cq->nb_desc = nb_desc;
552         rc = roc_nix_cq_init(&dev->nix, cq);
553         if (rc) {
554                 plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
555                 goto fail;
556         }
557
558         /* Setup ROC RQ */
559         rq = &dev->rqs[qid];
560         rq->qid = qid;
561         rq->aura_handle = mp->pool_id;
562         rq->flow_tag_width = 32;
563         rq->sso_ena = false;
564
565         /* Calculate first mbuf skip */
566         first_skip = (sizeof(struct rte_mbuf));
567         first_skip += RTE_PKTMBUF_HEADROOM;
568         first_skip += rte_pktmbuf_priv_size(mp);
569         rq->first_skip = first_skip;
570         rq->later_skip = sizeof(struct rte_mbuf);
571         rq->lpb_size = mp->elt_size;
572
573         /* Enable Inline IPSec on RQ, will not be used for Poll mode */
574         if (roc_nix_inl_inb_is_enabled(nix))
575                 rq->ipsech_ena = true;
576
577         rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
578         if (rc) {
579                 plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
580                 goto cq_fini;
581         }
582
583         /* Allocate and setup fast path rx queue */
584         rc = -ENOMEM;
585         rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
586         rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
587         if (!rxq_sp) {
588                 plt_err("Failed to alloc rx queue for rq=%d", qid);
589                 goto rq_fini;
590         }
591
592         /* Setup slow path fields */
593         rxq_sp->dev = dev;
594         rxq_sp->qid = qid;
595         rxq_sp->qconf.conf.rx = *rx_conf;
596         /* Queue config should reflect global offloads */
597         rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads;
598         rxq_sp->qconf.nb_desc = nb_desc;
599         rxq_sp->qconf.mp = mp;
600
601         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
602                 /* Pass a tagmask used to handle error packets in inline device.
603                  * Ethdev rq's tag_mask field will be overwritten later
604                  * when sso is setup.
605                  */
606                 rq->tag_mask =
607                         0x0FF00000 | ((uint32_t)RTE_EVENT_TYPE_ETHDEV << 28);
608
609                 /* Setup rq reference for inline dev if present */
610                 rc = roc_nix_inl_dev_rq_get(rq);
611                 if (rc)
612                         goto free_mem;
613         }
614
615         plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
616                     cq->nb_desc);
617
618         /* Store start of fast path area */
619         eth_dev->data->rx_queues[qid] = rxq_sp + 1;
620         eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
621
622         /* Calculating delta and freq mult between PTP HI clock and tsc.
623          * These are needed in deriving raw clock value from tsc counter.
624          * read_clock eth op returns raw clock value.
625          */
626         if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
627                 rc = cnxk_nix_tsc_convert(dev);
628                 if (rc) {
629                         plt_err("Failed to calculate delta and freq mult");
630                         goto rq_fini;
631                 }
632         }
633
634         return 0;
635 free_mem:
636         plt_free(rxq_sp);
637 rq_fini:
638         rc |= roc_nix_rq_fini(rq);
639 cq_fini:
640         rc |= roc_nix_cq_fini(cq);
641 fail:
642         return rc;
643 }
644
645 static void
646 cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
647 {
648         void *rxq = eth_dev->data->rx_queues[qid];
649         struct cnxk_eth_rxq_sp *rxq_sp;
650         struct cnxk_eth_dev *dev;
651         struct roc_nix_rq *rq;
652         struct roc_nix_cq *cq;
653         int rc;
654
655         if (!rxq)
656                 return;
657
658         rxq_sp = cnxk_eth_rxq_to_sp(rxq);
659         dev = rxq_sp->dev;
660         rq = &dev->rqs[qid];
661
662         plt_nix_dbg("Releasing rxq %u", qid);
663
664         /* Release rq reference for inline dev if present */
665         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
666                 roc_nix_inl_dev_rq_put(rq);
667
668         /* Cleanup ROC RQ */
669         rc = roc_nix_rq_fini(rq);
670         if (rc)
671                 plt_err("Failed to cleanup rq, rc=%d", rc);
672
673         /* Cleanup ROC CQ */
674         cq = &dev->cqs[qid];
675         rc = roc_nix_cq_fini(cq);
676         if (rc)
677                 plt_err("Failed to cleanup cq, rc=%d", rc);
678
679         /* Finally free fast path area */
680         plt_free(rxq_sp);
681 }
682
683 uint32_t
684 cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
685                        uint8_t rss_level)
686 {
687         uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
688                 {FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
689                  FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
690                 {FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
691                  FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
692                  FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
693                 {FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
694                  FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
695                  FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
696                  FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
697                  FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
698                  FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
699         };
700         uint32_t flowkey_cfg = 0;
701
702         dev->ethdev_rss_hf = ethdev_rss;
703
704         if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
705             dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
706                 flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
707         }
708
709         if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
710                 flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
711
712         if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
713                 flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
714
715         if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
716                 flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
717
718         if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
719                 flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
720
721         if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
722                 flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
723
724         if (ethdev_rss & RSS_IPV4_ENABLE)
725                 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
726
727         if (ethdev_rss & RSS_IPV6_ENABLE)
728                 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
729
730         if (ethdev_rss & RTE_ETH_RSS_TCP)
731                 flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
732
733         if (ethdev_rss & RTE_ETH_RSS_UDP)
734                 flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
735
736         if (ethdev_rss & RTE_ETH_RSS_SCTP)
737                 flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
738
739         if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
740                 flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
741
742         if (ethdev_rss & RSS_IPV6_EX_ENABLE)
743                 flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
744
745         if (ethdev_rss & RTE_ETH_RSS_PORT)
746                 flowkey_cfg |= FLOW_KEY_TYPE_PORT;
747
748         if (ethdev_rss & RTE_ETH_RSS_NVGRE)
749                 flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
750
751         if (ethdev_rss & RTE_ETH_RSS_VXLAN)
752                 flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
753
754         if (ethdev_rss & RTE_ETH_RSS_GENEVE)
755                 flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
756
757         if (ethdev_rss & RTE_ETH_RSS_GTPU)
758                 flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
759
760         return flowkey_cfg;
761 }
762
763 static void
764 nix_free_queue_mem(struct cnxk_eth_dev *dev)
765 {
766         plt_free(dev->rqs);
767         plt_free(dev->cqs);
768         plt_free(dev->sqs);
769         dev->rqs = NULL;
770         dev->cqs = NULL;
771         dev->sqs = NULL;
772 }
773
774 static int
775 nix_ingress_policer_setup(struct cnxk_eth_dev *dev)
776 {
777         struct rte_eth_dev *eth_dev = dev->eth_dev;
778         int rc = 0;
779
780         TAILQ_INIT(&dev->mtr_profiles);
781         TAILQ_INIT(&dev->mtr_policy);
782         TAILQ_INIT(&dev->mtr);
783
784         if (eth_dev->dev_ops->mtr_ops_get == NULL)
785                 return rc;
786
787         return nix_mtr_capabilities_init(eth_dev);
788 }
789
790 static int
791 nix_rss_default_setup(struct cnxk_eth_dev *dev)
792 {
793         struct rte_eth_dev *eth_dev = dev->eth_dev;
794         uint8_t rss_hash_level;
795         uint32_t flowkey_cfg;
796         uint64_t rss_hf;
797
798         rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
799         rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
800         if (rss_hash_level)
801                 rss_hash_level -= 1;
802
803         flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
804         return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
805 }
806
807 static int
808 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
809 {
810         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
811         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
812         struct cnxk_eth_qconf *tx_qconf = NULL;
813         struct cnxk_eth_qconf *rx_qconf = NULL;
814         struct cnxk_eth_rxq_sp *rxq_sp;
815         struct cnxk_eth_txq_sp *txq_sp;
816         int i, nb_rxq, nb_txq;
817         void **txq, **rxq;
818
819         nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
820         nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
821
822         tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
823         if (tx_qconf == NULL) {
824                 plt_err("Failed to allocate memory for tx_qconf");
825                 goto fail;
826         }
827
828         rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
829         if (rx_qconf == NULL) {
830                 plt_err("Failed to allocate memory for rx_qconf");
831                 goto fail;
832         }
833
834         txq = eth_dev->data->tx_queues;
835         for (i = 0; i < nb_txq; i++) {
836                 if (txq[i] == NULL) {
837                         tx_qconf[i].valid = false;
838                         plt_info("txq[%d] is already released", i);
839                         continue;
840                 }
841                 txq_sp = cnxk_eth_txq_to_sp(txq[i]);
842                 memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
843                 tx_qconf[i].valid = true;
844                 dev_ops->tx_queue_release(eth_dev, i);
845                 eth_dev->data->tx_queues[i] = NULL;
846         }
847
848         rxq = eth_dev->data->rx_queues;
849         for (i = 0; i < nb_rxq; i++) {
850                 if (rxq[i] == NULL) {
851                         rx_qconf[i].valid = false;
852                         plt_info("rxq[%d] is already released", i);
853                         continue;
854                 }
855                 rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
856                 memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
857                 rx_qconf[i].valid = true;
858                 dev_ops->rx_queue_release(eth_dev, i);
859                 eth_dev->data->rx_queues[i] = NULL;
860         }
861
862         dev->tx_qconf = tx_qconf;
863         dev->rx_qconf = rx_qconf;
864         return 0;
865
866 fail:
867         free(tx_qconf);
868         free(rx_qconf);
869         return -ENOMEM;
870 }
871
872 static int
873 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
874 {
875         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
876         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
877         struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
878         struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
879         int rc, i, nb_rxq, nb_txq;
880
881         nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
882         nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
883
884         rc = -ENOMEM;
885         /* Setup tx & rx queues with previous configuration so
886          * that the queues can be functional in cases like ports
887          * are started without re configuring queues.
888          *
889          * Usual re config sequence is like below:
890          * port_configure() {
891          *      if(reconfigure) {
892          *              queue_release()
893          *              queue_setup()
894          *      }
895          *      queue_configure() {
896          *              queue_release()
897          *              queue_setup()
898          *      }
899          * }
900          * port_start()
901          *
902          * In some application's control path, queue_configure() would
903          * NOT be invoked for TXQs/RXQs in port_configure().
904          * In such cases, queues can be functional after start as the
905          * queues are already setup in port_configure().
906          */
907         for (i = 0; i < nb_txq; i++) {
908                 if (!tx_qconf[i].valid)
909                         continue;
910                 rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
911                                              &tx_qconf[i].conf.tx);
912                 if (rc) {
913                         plt_err("Failed to setup tx queue rc=%d", rc);
914                         for (i -= 1; i >= 0; i--)
915                                 dev_ops->tx_queue_release(eth_dev, i);
916                         goto fail;
917                 }
918         }
919
920         free(tx_qconf);
921         tx_qconf = NULL;
922
923         for (i = 0; i < nb_rxq; i++) {
924                 if (!rx_qconf[i].valid)
925                         continue;
926                 rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
927                                              &rx_qconf[i].conf.rx,
928                                              rx_qconf[i].mp);
929                 if (rc) {
930                         plt_err("Failed to setup rx queue rc=%d", rc);
931                         for (i -= 1; i >= 0; i--)
932                                 dev_ops->rx_queue_release(eth_dev, i);
933                         goto tx_queue_release;
934                 }
935         }
936
937         free(rx_qconf);
938         rx_qconf = NULL;
939
940         return 0;
941
942 tx_queue_release:
943         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
944                 dev_ops->tx_queue_release(eth_dev, i);
945 fail:
946         free(tx_qconf);
947         free(rx_qconf);
948
949         return rc;
950 }
951
952 static void
953 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
954 {
955         /* These dummy functions are required for supporting
956          * some applications which reconfigure queues without
957          * stopping tx burst and rx burst threads(eg kni app)
958          * When the queues context is saved, txq/rxqs are released
959          * which caused app crash since rx/tx burst is still
960          * on different lcores
961          */
962         eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
963         eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
964         rte_mb();
965 }
966
967 static int
968 nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
969 {
970         uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
971         uint8_t tun[ROC_NIX_LSO_TUN_MAX];
972         struct roc_nix *nix = &dev->nix;
973         int rc;
974
975         rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
976         if (rc)
977                 return rc;
978
979         dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
980                             (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
981                             (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
982                             (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
983
984         dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
985                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
986                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
987                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
988         return 0;
989 }
990
991 static int
992 nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
993 {
994         struct roc_nix *nix = &dev->nix;
995         int rc;
996
997         /* Nothing much to do if offload is not enabled */
998         if (!(dev->tx_offloads &
999               (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1000                RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
1001                 return 0;
1002
1003         /* Setup LSO formats in AF. Its a no-op if other ethdev has
1004          * already set it up
1005          */
1006         rc = roc_nix_lso_fmt_setup(nix);
1007         if (rc)
1008                 return rc;
1009
1010         return nix_lso_tun_fmt_update(dev);
1011 }
1012
1013 int
1014 cnxk_nix_configure(struct rte_eth_dev *eth_dev)
1015 {
1016         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1017         struct rte_eth_dev_data *data = eth_dev->data;
1018         struct rte_eth_conf *conf = &data->dev_conf;
1019         struct rte_eth_rxmode *rxmode = &conf->rxmode;
1020         struct rte_eth_txmode *txmode = &conf->txmode;
1021         char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
1022         struct roc_nix_fc_cfg fc_cfg = {0};
1023         struct roc_nix *nix = &dev->nix;
1024         struct rte_ether_addr *ea;
1025         uint8_t nb_rxq, nb_txq;
1026         uint64_t rx_cfg;
1027         void *qs;
1028         int rc;
1029
1030         rc = -EINVAL;
1031
1032         /* Sanity checks */
1033         if (rte_eal_has_hugepages() == 0) {
1034                 plt_err("Huge page is not configured");
1035                 goto fail_configure;
1036         }
1037
1038         if (conf->dcb_capability_en == 1) {
1039                 plt_err("dcb enable is not supported");
1040                 goto fail_configure;
1041         }
1042
1043         if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1044                 plt_err("Flow director is not supported");
1045                 goto fail_configure;
1046         }
1047
1048         if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
1049             rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
1050                 plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
1051                 goto fail_configure;
1052         }
1053
1054         if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
1055                 plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
1056                 goto fail_configure;
1057         }
1058
1059         /* Free the resources allocated from the previous configure */
1060         if (dev->configured == 1) {
1061                 /* Unregister queue irq's */
1062                 roc_nix_unregister_queue_irqs(nix);
1063
1064                 /* Unregister CQ irqs if present */
1065                 if (eth_dev->data->dev_conf.intr_conf.rxq)
1066                         roc_nix_unregister_cq_irqs(nix);
1067
1068                 /* Set no-op functions */
1069                 nix_set_nop_rxtx_function(eth_dev);
1070                 /* Store queue config for later */
1071                 rc = nix_store_queue_cfg_and_then_release(eth_dev);
1072                 if (rc)
1073                         goto fail_configure;
1074
1075                 /* Disable and free rte_meter entries */
1076                 rc = nix_meter_fini(dev);
1077                 if (rc)
1078                         goto fail_configure;
1079
1080                 /* Cleanup security support */
1081                 rc = nix_security_release(dev);
1082                 if (rc)
1083                         goto fail_configure;
1084
1085                 roc_nix_tm_fini(nix);
1086                 roc_nix_lf_free(nix);
1087         }
1088
1089         dev->rx_offloads = rxmode->offloads;
1090         dev->tx_offloads = txmode->offloads;
1091
1092         /* Prepare rx cfg */
1093         rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
1094         if (dev->rx_offloads &
1095             (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
1096                 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
1097                 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
1098         }
1099         rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |
1100                    ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
1101                    ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
1102
1103         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
1104                 rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
1105                 /* Disable drop re if rx offload security is enabled and
1106                  * platform does not support it.
1107                  */
1108                 if (dev->ipsecd_drop_re_dis)
1109                         rx_cfg &= ~(ROC_NIX_LF_RX_CFG_DROP_RE);
1110         }
1111
1112         nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1113         nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1114
1115         /* Alloc a nix lf */
1116         rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
1117         if (rc) {
1118                 plt_err("Failed to init nix_lf rc=%d", rc);
1119                 goto fail_configure;
1120         }
1121
1122         dev->npc.channel = roc_nix_get_base_chan(nix);
1123
1124         nb_rxq = data->nb_rx_queues;
1125         nb_txq = data->nb_tx_queues;
1126         rc = -ENOMEM;
1127         if (nb_rxq) {
1128                 /* Allocate memory for roc rq's and cq's */
1129                 qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0);
1130                 if (!qs) {
1131                         plt_err("Failed to alloc rqs");
1132                         goto free_nix_lf;
1133                 }
1134                 dev->rqs = qs;
1135
1136                 qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0);
1137                 if (!qs) {
1138                         plt_err("Failed to alloc cqs");
1139                         goto free_nix_lf;
1140                 }
1141                 dev->cqs = qs;
1142         }
1143
1144         if (nb_txq) {
1145                 /* Allocate memory for roc sq's */
1146                 qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0);
1147                 if (!qs) {
1148                         plt_err("Failed to alloc sqs");
1149                         goto free_nix_lf;
1150                 }
1151                 dev->sqs = qs;
1152         }
1153
1154         /* Re-enable NIX LF error interrupts */
1155         roc_nix_err_intr_ena_dis(nix, true);
1156         roc_nix_ras_intr_ena_dis(nix, true);
1157
1158         if (nix->rx_ptp_ena &&
1159             dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) {
1160                 plt_err("Both PTP and switch header enabled");
1161                 goto free_nix_lf;
1162         }
1163
1164         rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type,
1165                                     dev->npc.pre_l2_size_offset,
1166                                     dev->npc.pre_l2_size_offset_mask,
1167                                     dev->npc.pre_l2_size_shift_dir);
1168         if (rc) {
1169                 plt_err("Failed to enable switch type nix_lf rc=%d", rc);
1170                 goto free_nix_lf;
1171         }
1172
1173         /* Setup LSO if needed */
1174         rc = nix_lso_fmt_setup(dev);
1175         if (rc) {
1176                 plt_err("Failed to setup nix lso format fields, rc=%d", rc);
1177                 goto free_nix_lf;
1178         }
1179
1180         /* Configure RSS */
1181         rc = nix_rss_default_setup(dev);
1182         if (rc) {
1183                 plt_err("Failed to configure rss rc=%d", rc);
1184                 goto free_nix_lf;
1185         }
1186
1187         /* Init the default TM scheduler hierarchy */
1188         rc = roc_nix_tm_init(nix);
1189         if (rc) {
1190                 plt_err("Failed to init traffic manager, rc=%d", rc);
1191                 goto free_nix_lf;
1192         }
1193
1194         rc = nix_ingress_policer_setup(dev);
1195         if (rc) {
1196                 plt_err("Failed to setup ingress policer rc=%d", rc);
1197                 goto free_nix_lf;
1198         }
1199
1200         rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
1201         if (rc) {
1202                 plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
1203                 goto tm_fini;
1204         }
1205
1206         /* Register queue IRQs */
1207         rc = roc_nix_register_queue_irqs(nix);
1208         if (rc) {
1209                 plt_err("Failed to register queue interrupts rc=%d", rc);
1210                 goto tm_fini;
1211         }
1212
1213         /* Register cq IRQs */
1214         if (eth_dev->data->dev_conf.intr_conf.rxq) {
1215                 if (eth_dev->data->nb_rx_queues > dev->nix.cints) {
1216                         plt_err("Rx interrupt cannot be enabled, rxq > %d",
1217                                 dev->nix.cints);
1218                         goto q_irq_fini;
1219                 }
1220                 /* Rx interrupt feature cannot work with vector mode because,
1221                  * vector mode does not process packets unless min 4 pkts are
1222                  * received, while cq interrupts are generated even for 1 pkt
1223                  * in the CQ.
1224                  */
1225                 dev->scalar_ena = true;
1226
1227                 rc = roc_nix_register_cq_irqs(nix);
1228                 if (rc) {
1229                         plt_err("Failed to register CQ interrupts rc=%d", rc);
1230                         goto q_irq_fini;
1231                 }
1232         }
1233
1234         /* Configure loop back mode */
1235         rc = roc_nix_mac_loopback_enable(nix,
1236                                          eth_dev->data->dev_conf.lpbk_mode);
1237         if (rc) {
1238                 plt_err("Failed to configure cgx loop back mode rc=%d", rc);
1239                 goto cq_fini;
1240         }
1241
1242         /* Setup Inline security support */
1243         rc = nix_security_setup(dev);
1244         if (rc)
1245                 goto cq_fini;
1246
1247         /* Init flow control configuration */
1248         fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG;
1249         fc_cfg.rxchan_cfg.enable = true;
1250         rc = roc_nix_fc_config_set(nix, &fc_cfg);
1251         if (rc) {
1252                 plt_err("Failed to initialize flow control rc=%d", rc);
1253                 goto cq_fini;
1254         }
1255
1256         /* Update flow control configuration to PMD */
1257         rc = nix_init_flow_ctrl_config(eth_dev);
1258         if (rc) {
1259                 plt_err("Failed to initialize flow control rc=%d", rc);
1260                 goto cq_fini;
1261         }
1262
1263         /* Initialize TC to SQ mapping as invalid */
1264         memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
1265         /*
1266          * Restore queue config when reconfigure followed by
1267          * reconfigure and no queue configure invoked from application case.
1268          */
1269         if (dev->configured == 1) {
1270                 rc = nix_restore_queue_cfg(eth_dev);
1271                 if (rc)
1272                         goto sec_release;
1273         }
1274
1275         /* Update the mac address */
1276         ea = eth_dev->data->mac_addrs;
1277         memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1278         if (rte_is_zero_ether_addr(ea))
1279                 rte_eth_random_addr((uint8_t *)ea);
1280
1281         rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
1282
1283         plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
1284                     " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "",
1285                     eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq,
1286                     dev->rx_offloads, dev->tx_offloads);
1287
1288         /* All good */
1289         dev->configured = 1;
1290         dev->nb_rxq = data->nb_rx_queues;
1291         dev->nb_txq = data->nb_tx_queues;
1292         return 0;
1293
1294 sec_release:
1295         rc |= nix_security_release(dev);
1296 cq_fini:
1297         roc_nix_unregister_cq_irqs(nix);
1298 q_irq_fini:
1299         roc_nix_unregister_queue_irqs(nix);
1300 tm_fini:
1301         roc_nix_tm_fini(nix);
1302 free_nix_lf:
1303         nix_free_queue_mem(dev);
1304         rc |= roc_nix_lf_free(nix);
1305 fail_configure:
1306         dev->configured = 0;
1307         return rc;
1308 }
1309
1310 int
1311 cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1312 {
1313         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1314         struct rte_eth_dev_data *data = eth_dev->data;
1315         struct roc_nix_sq *sq = &dev->sqs[qid];
1316         int rc = -EINVAL;
1317
1318         if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1319                 return 0;
1320
1321         rc = roc_nix_tm_sq_aura_fc(sq, true);
1322         if (rc) {
1323                 plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
1324                 goto done;
1325         }
1326
1327         data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1328 done:
1329         return rc;
1330 }
1331
1332 int
1333 cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1334 {
1335         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1336         struct rte_eth_dev_data *data = eth_dev->data;
1337         struct roc_nix_sq *sq = &dev->sqs[qid];
1338         int rc;
1339
1340         if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1341                 return 0;
1342
1343         rc = roc_nix_tm_sq_aura_fc(sq, false);
1344         if (rc) {
1345                 plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
1346                         rc);
1347                 goto done;
1348         }
1349
1350         data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1351 done:
1352         return rc;
1353 }
1354
1355 static int
1356 cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1357 {
1358         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1359         struct rte_eth_dev_data *data = eth_dev->data;
1360         struct roc_nix_rq *rq = &dev->rqs[qid];
1361         int rc;
1362
1363         if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1364                 return 0;
1365
1366         rc = roc_nix_rq_ena_dis(rq, true);
1367         if (rc) {
1368                 plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
1369                 goto done;
1370         }
1371
1372         data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1373 done:
1374         return rc;
1375 }
1376
1377 static int
1378 cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1379 {
1380         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1381         struct rte_eth_dev_data *data = eth_dev->data;
1382         struct roc_nix_rq *rq = &dev->rqs[qid];
1383         int rc;
1384
1385         if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1386                 return 0;
1387
1388         rc = roc_nix_rq_ena_dis(rq, false);
1389         if (rc) {
1390                 plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
1391                 goto done;
1392         }
1393
1394         data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1395 done:
1396         return rc;
1397 }
1398
1399 static int
1400 cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
1401 {
1402         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1403         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1404         struct rte_mbuf *rx_pkts[32];
1405         struct rte_eth_link link;
1406         int count, i, j, rc;
1407         void *rxq;
1408
1409         /* Disable all the NPC entries */
1410         rc = roc_npc_mcam_enable_all_entries(&dev->npc, 0);
1411         if (rc)
1412                 return rc;
1413
1414         /* Stop link change events */
1415         if (!roc_nix_is_vf_or_sdp(&dev->nix))
1416                 roc_nix_mac_link_event_start_stop(&dev->nix, false);
1417
1418         /* Disable Rx via NPC */
1419         roc_nix_npc_rx_ena_dis(&dev->nix, false);
1420
1421         /* Stop rx queues and free up pkts pending */
1422         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1423                 rc = dev_ops->rx_queue_stop(eth_dev, i);
1424                 if (rc)
1425                         continue;
1426
1427                 rxq = eth_dev->data->rx_queues[i];
1428                 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1429                 while (count) {
1430                         for (j = 0; j < count; j++)
1431                                 rte_pktmbuf_free(rx_pkts[j]);
1432                         count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1433                 }
1434         }
1435
1436         /* Stop tx queues  */
1437         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1438                 dev_ops->tx_queue_stop(eth_dev, i);
1439
1440         /* Bring down link status internally */
1441         memset(&link, 0, sizeof(link));
1442         rte_eth_linkstatus_set(eth_dev, &link);
1443
1444         return 0;
1445 }
1446
1447 int
1448 cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
1449 {
1450         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1451         int rc, i;
1452
1453         if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) {
1454                 rc = nix_recalc_mtu(eth_dev);
1455                 if (rc)
1456                         return rc;
1457         }
1458
1459         /* Start rx queues */
1460         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1461                 rc = cnxk_nix_rx_queue_start(eth_dev, i);
1462                 if (rc)
1463                         return rc;
1464         }
1465
1466         /* Start tx queues  */
1467         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1468                 rc = cnxk_nix_tx_queue_start(eth_dev, i);
1469                 if (rc)
1470                         return rc;
1471         }
1472
1473         /* Update Flow control configuration */
1474         rc = nix_update_flow_ctrl_config(eth_dev);
1475         if (rc) {
1476                 plt_err("Failed to enable flow control. error code(%d)", rc);
1477                 return rc;
1478         }
1479
1480         /* Enable Rx in NPC */
1481         rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
1482         if (rc) {
1483                 plt_err("Failed to enable NPC rx %d", rc);
1484                 return rc;
1485         }
1486
1487         rc = roc_npc_mcam_enable_all_entries(&dev->npc, 1);
1488         if (rc) {
1489                 plt_err("Failed to enable NPC entries %d", rc);
1490                 return rc;
1491         }
1492
1493         cnxk_nix_toggle_flag_link_cfg(dev, true);
1494
1495         /* Start link change events */
1496         if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
1497                 rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
1498                 if (rc) {
1499                         plt_err("Failed to start cgx link event %d", rc);
1500                         goto rx_disable;
1501                 }
1502         }
1503
1504         /* Enable PTP if it is requested by the user or already
1505          * enabled on PF owning this VF
1506          */
1507         memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
1508         if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
1509                 cnxk_eth_dev_ops.timesync_enable(eth_dev);
1510         else
1511                 cnxk_eth_dev_ops.timesync_disable(eth_dev);
1512
1513         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1514                 rc = rte_mbuf_dyn_rx_timestamp_register
1515                         (&dev->tstamp.tstamp_dynfield_offset,
1516                          &dev->tstamp.rx_tstamp_dynflag);
1517                 if (rc != 0) {
1518                         plt_err("Failed to register Rx timestamp field/flag");
1519                         goto rx_disable;
1520                 }
1521         }
1522
1523         cnxk_nix_toggle_flag_link_cfg(dev, false);
1524
1525         return 0;
1526
1527 rx_disable:
1528         roc_nix_npc_rx_ena_dis(&dev->nix, false);
1529         cnxk_nix_toggle_flag_link_cfg(dev, false);
1530         return rc;
1531 }
1532
1533 static int cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev);
1534 static int cnxk_nix_dev_close(struct rte_eth_dev *eth_dev);
1535
1536 /* CNXK platform independent eth dev ops */
1537 struct eth_dev_ops cnxk_eth_dev_ops = {
1538         .mtu_set = cnxk_nix_mtu_set,
1539         .mac_addr_add = cnxk_nix_mac_addr_add,
1540         .mac_addr_remove = cnxk_nix_mac_addr_del,
1541         .mac_addr_set = cnxk_nix_mac_addr_set,
1542         .dev_infos_get = cnxk_nix_info_get,
1543         .link_update = cnxk_nix_link_update,
1544         .tx_queue_release = cnxk_nix_tx_queue_release,
1545         .rx_queue_release = cnxk_nix_rx_queue_release,
1546         .dev_stop = cnxk_nix_dev_stop,
1547         .dev_close = cnxk_nix_dev_close,
1548         .dev_reset = cnxk_nix_dev_reset,
1549         .tx_queue_start = cnxk_nix_tx_queue_start,
1550         .rx_queue_start = cnxk_nix_rx_queue_start,
1551         .rx_queue_stop = cnxk_nix_rx_queue_stop,
1552         .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
1553         .promiscuous_enable = cnxk_nix_promisc_enable,
1554         .promiscuous_disable = cnxk_nix_promisc_disable,
1555         .allmulticast_enable = cnxk_nix_allmulticast_enable,
1556         .allmulticast_disable = cnxk_nix_allmulticast_disable,
1557         .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
1558         .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
1559         .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
1560         .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
1561         .priority_flow_ctrl_queue_config =
1562                                 cnxk_nix_priority_flow_ctrl_queue_config,
1563         .priority_flow_ctrl_queue_info_get =
1564                                 cnxk_nix_priority_flow_ctrl_queue_info_get,
1565         .dev_set_link_up = cnxk_nix_set_link_up,
1566         .dev_set_link_down = cnxk_nix_set_link_down,
1567         .get_module_info = cnxk_nix_get_module_info,
1568         .get_module_eeprom = cnxk_nix_get_module_eeprom,
1569         .rx_queue_intr_enable = cnxk_nix_rx_queue_intr_enable,
1570         .rx_queue_intr_disable = cnxk_nix_rx_queue_intr_disable,
1571         .pool_ops_supported = cnxk_nix_pool_ops_supported,
1572         .queue_stats_mapping_set = cnxk_nix_queue_stats_mapping,
1573         .stats_get = cnxk_nix_stats_get,
1574         .stats_reset = cnxk_nix_stats_reset,
1575         .xstats_get = cnxk_nix_xstats_get,
1576         .xstats_get_names = cnxk_nix_xstats_get_names,
1577         .xstats_reset = cnxk_nix_xstats_reset,
1578         .xstats_get_by_id = cnxk_nix_xstats_get_by_id,
1579         .xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id,
1580         .fw_version_get = cnxk_nix_fw_version_get,
1581         .rxq_info_get = cnxk_nix_rxq_info_get,
1582         .txq_info_get = cnxk_nix_txq_info_get,
1583         .tx_done_cleanup = cnxk_nix_tx_done_cleanup,
1584         .flow_ops_get = cnxk_nix_flow_ops_get,
1585         .get_reg = cnxk_nix_dev_get_reg,
1586         .timesync_read_rx_timestamp = cnxk_nix_timesync_read_rx_timestamp,
1587         .timesync_read_tx_timestamp = cnxk_nix_timesync_read_tx_timestamp,
1588         .timesync_read_time = cnxk_nix_timesync_read_time,
1589         .timesync_write_time = cnxk_nix_timesync_write_time,
1590         .timesync_adjust_time = cnxk_nix_timesync_adjust_time,
1591         .read_clock = cnxk_nix_read_clock,
1592         .reta_update = cnxk_nix_reta_update,
1593         .reta_query = cnxk_nix_reta_query,
1594         .rss_hash_update = cnxk_nix_rss_hash_update,
1595         .rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
1596         .set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
1597         .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
1598         .tm_ops_get = cnxk_nix_tm_ops_get,
1599         .mtr_ops_get = cnxk_nix_mtr_ops_get,
1600 };
1601
1602 static int
1603 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
1604 {
1605         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1606         struct rte_security_ctx *sec_ctx;
1607         struct roc_nix *nix = &dev->nix;
1608         struct rte_pci_device *pci_dev;
1609         int rc, max_entries;
1610
1611         eth_dev->dev_ops = &cnxk_eth_dev_ops;
1612         eth_dev->rx_queue_count = cnxk_nix_rx_queue_count;
1613         eth_dev->rx_descriptor_status = cnxk_nix_rx_descriptor_status;
1614         eth_dev->tx_descriptor_status = cnxk_nix_tx_descriptor_status;
1615
1616         /* Alloc security context */
1617         sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
1618         if (!sec_ctx)
1619                 return -ENOMEM;
1620         sec_ctx->device = eth_dev;
1621         sec_ctx->ops = &cnxk_eth_sec_ops;
1622         sec_ctx->flags =
1623                 (RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
1624         eth_dev->security_ctx = sec_ctx;
1625
1626         /* For secondary processes, the primary has done all the work */
1627         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1628                 return 0;
1629
1630         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1631         rte_eth_copy_pci_info(eth_dev, pci_dev);
1632
1633         /* Parse devargs string */
1634         rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev);
1635         if (rc) {
1636                 plt_err("Failed to parse devargs rc=%d", rc);
1637                 goto error;
1638         }
1639
1640         /* Initialize base roc nix */
1641         nix->pci_dev = pci_dev;
1642         nix->hw_vlan_ins = true;
1643         rc = roc_nix_dev_init(nix);
1644         if (rc) {
1645                 plt_err("Failed to initialize roc nix rc=%d", rc);
1646                 goto error;
1647         }
1648
1649         /* Register up msg callbacks */
1650         roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
1651
1652         /* Register up msg callbacks */
1653         roc_nix_mac_link_info_get_cb_register(nix,
1654                                               cnxk_eth_dev_link_status_get_cb);
1655
1656         dev->eth_dev = eth_dev;
1657         dev->configured = 0;
1658         dev->ptype_disable = 0;
1659
1660         TAILQ_INIT(&dev->inb.list);
1661         TAILQ_INIT(&dev->outb.list);
1662         rte_spinlock_init(&dev->inb.lock);
1663         rte_spinlock_init(&dev->outb.lock);
1664
1665         /* For vfs, returned max_entries will be 0. but to keep default mac
1666          * address, one entry must be allocated. so setting up to 1.
1667          */
1668         if (roc_nix_is_vf_or_sdp(nix))
1669                 max_entries = 1;
1670         else
1671                 max_entries = roc_nix_mac_max_entries_get(nix);
1672
1673         if (max_entries <= 0) {
1674                 plt_err("Failed to get max entries for mac addr");
1675                 rc = -ENOTSUP;
1676                 goto dev_fini;
1677         }
1678
1679         eth_dev->data->mac_addrs =
1680                 rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0);
1681         if (eth_dev->data->mac_addrs == NULL) {
1682                 plt_err("Failed to allocate memory for mac addr");
1683                 rc = -ENOMEM;
1684                 goto dev_fini;
1685         }
1686
1687         dev->max_mac_entries = max_entries;
1688         dev->dmac_filter_count = 1;
1689
1690         /* Get mac address */
1691         rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
1692         if (rc) {
1693                 plt_err("Failed to get mac addr, rc=%d", rc);
1694                 goto free_mac_addrs;
1695         }
1696
1697         /* Update the mac address */
1698         memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1699
1700         if (!roc_nix_is_vf_or_sdp(nix)) {
1701                 /* Sync same MAC address to CGX/RPM table */
1702                 rc = roc_nix_mac_addr_set(nix, dev->mac_addr);
1703                 if (rc) {
1704                         plt_err("Failed to set mac addr, rc=%d", rc);
1705                         goto free_mac_addrs;
1706                 }
1707         }
1708
1709         /* Union of all capabilities supported by CNXK.
1710          * Platform specific capabilities will be
1711          * updated later.
1712          */
1713         dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
1714         dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
1715         dev->speed_capa = nix_get_speed_capa(dev);
1716
1717         /* Initialize roc npc */
1718         dev->npc.roc_nix = nix;
1719         rc = roc_npc_init(&dev->npc);
1720         if (rc)
1721                 goto free_mac_addrs;
1722
1723         plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
1724                     " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
1725                     eth_dev->data->port_id, roc_nix_get_pf(nix),
1726                     roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
1727                     dev->rx_offload_capa, dev->tx_offload_capa);
1728         return 0;
1729
1730 free_mac_addrs:
1731         rte_free(eth_dev->data->mac_addrs);
1732 dev_fini:
1733         roc_nix_dev_fini(nix);
1734 error:
1735         plt_err("Failed to init nix eth_dev rc=%d", rc);
1736         return rc;
1737 }
1738
1739 static int
1740 cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
1741 {
1742         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1743         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1744         struct rte_eth_pfc_queue_conf pfc_conf;
1745         struct roc_nix *nix = &dev->nix;
1746         struct rte_eth_fc_conf fc_conf;
1747         int rc, i;
1748
1749         /* Disable switch hdr pkind */
1750         roc_nix_switch_hdr_set(&dev->nix, 0, 0, 0, 0);
1751
1752         plt_free(eth_dev->security_ctx);
1753         eth_dev->security_ctx = NULL;
1754
1755         /* Nothing to be done for secondary processes */
1756         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1757                 return 0;
1758
1759         /* Clear the flag since we are closing down */
1760         dev->configured = 0;
1761
1762         roc_nix_npc_rx_ena_dis(nix, false);
1763
1764         /* Restore 802.3 Flow control configuration */
1765         memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_queue_conf));
1766         memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1767         fc_conf.mode = RTE_ETH_FC_NONE;
1768         rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
1769
1770         pfc_conf.mode = RTE_ETH_FC_NONE;
1771         for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
1772                 if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
1773                         pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
1774                         pfc_conf.rx_pause.tc = i;
1775                         pfc_conf.tx_pause.rx_qid = i;
1776                         pfc_conf.tx_pause.tc = i;
1777                         rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
1778                                 &pfc_conf);
1779                         if (rc)
1780                                 plt_err("Failed to reset PFC. error code(%d)",
1781                                         rc);
1782                 }
1783         }
1784
1785         fc_conf.mode = RTE_ETH_FC_FULL;
1786         rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
1787
1788         /* Disable and free rte_meter entries */
1789         nix_meter_fini(dev);
1790
1791         /* Disable and free rte_flow entries */
1792         roc_npc_fini(&dev->npc);
1793
1794         /* Disable link status events */
1795         roc_nix_mac_link_event_start_stop(nix, false);
1796
1797         /* Unregister the link update op, this is required to stop VFs from
1798          * receiving link status updates on exit path.
1799          */
1800         roc_nix_mac_link_cb_unregister(nix);
1801
1802         /* Free up SQs */
1803         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1804                 dev_ops->tx_queue_release(eth_dev, i);
1805                 eth_dev->data->tx_queues[i] = NULL;
1806         }
1807         eth_dev->data->nb_tx_queues = 0;
1808
1809         /* Free up RQ's and CQ's */
1810         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1811                 dev_ops->rx_queue_release(eth_dev, i);
1812                 eth_dev->data->rx_queues[i] = NULL;
1813         }
1814         eth_dev->data->nb_rx_queues = 0;
1815
1816         /* Free security resources */
1817         nix_security_release(dev);
1818
1819         /* Free tm resources */
1820         roc_nix_tm_fini(nix);
1821
1822         /* Unregister queue irqs */
1823         roc_nix_unregister_queue_irqs(nix);
1824
1825         /* Unregister cq irqs */
1826         if (eth_dev->data->dev_conf.intr_conf.rxq)
1827                 roc_nix_unregister_cq_irqs(nix);
1828
1829         /* Free ROC RQ's, SQ's and CQ's memory */
1830         nix_free_queue_mem(dev);
1831
1832         /* Free nix lf resources */
1833         rc = roc_nix_lf_free(nix);
1834         if (rc)
1835                 plt_err("Failed to free nix lf, rc=%d", rc);
1836
1837         rte_free(eth_dev->data->mac_addrs);
1838         eth_dev->data->mac_addrs = NULL;
1839
1840         rc = roc_nix_dev_fini(nix);
1841         /* Can be freed later by PMD if NPA LF is in use */
1842         if (rc == -EAGAIN) {
1843                 if (!reset)
1844                         eth_dev->data->dev_private = NULL;
1845                 return 0;
1846         } else if (rc) {
1847                 plt_err("Failed in nix dev fini, rc=%d", rc);
1848         }
1849
1850         return rc;
1851 }
1852
1853 static int
1854 cnxk_nix_dev_close(struct rte_eth_dev *eth_dev)
1855 {
1856         cnxk_eth_dev_uninit(eth_dev, false);
1857         return 0;
1858 }
1859
1860 static int
1861 cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev)
1862 {
1863         int rc;
1864
1865         rc = cnxk_eth_dev_uninit(eth_dev, true);
1866         if (rc)
1867                 return rc;
1868
1869         return cnxk_eth_dev_init(eth_dev);
1870 }
1871
1872 int
1873 cnxk_nix_remove(struct rte_pci_device *pci_dev)
1874 {
1875         struct rte_eth_dev *eth_dev;
1876         struct roc_nix *nix;
1877         int rc = -EINVAL;
1878
1879         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1880         if (eth_dev) {
1881                 /* Cleanup eth dev */
1882                 rc = cnxk_eth_dev_uninit(eth_dev, false);
1883                 if (rc)
1884                         return rc;
1885
1886                 rte_eth_dev_release_port(eth_dev);
1887         }
1888
1889         /* Nothing to be done for secondary processes */
1890         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1891                 return 0;
1892
1893         /* Check if this device is hosting common resource */
1894         nix = roc_idev_npa_nix_get();
1895         if (nix->pci_dev != pci_dev)
1896                 return 0;
1897
1898         /* Try nix fini now */
1899         rc = roc_nix_dev_fini(nix);
1900         if (rc == -EAGAIN) {
1901                 plt_info("%s: common resource in use by other devices",
1902                          pci_dev->name);
1903                 goto exit;
1904         } else if (rc) {
1905                 plt_err("Failed in nix dev fini, rc=%d", rc);
1906                 goto exit;
1907         }
1908
1909         /* Free device pointer as rte_ethdev does not have it anymore */
1910         rte_free(nix);
1911 exit:
1912         return rc;
1913 }
1914
1915 int
1916 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1917 {
1918         int rc;
1919
1920         RTE_SET_USED(pci_drv);
1921
1922         rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
1923                                            cnxk_eth_dev_init);
1924
1925         /* On error on secondary, recheck if port exists in primary or
1926          * in mid of detach state.
1927          */
1928         if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
1929                 if (!rte_eth_dev_allocated(pci_dev->device.name))
1930                         return 0;
1931         return rc;
1932 }