net/cnxk: enable packet pool tail drop
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <cnxk_ethdev.h>
5
6 #include <rte_eventdev.h>
7
8 static inline uint64_t
9 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
10 {
11         uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
12
13         if (roc_nix_is_vf_or_sdp(&dev->nix) ||
14             dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
15                 capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
16
17         return capa;
18 }
19
20 static inline uint64_t
21 nix_get_tx_offload_capa(struct cnxk_eth_dev *dev)
22 {
23         RTE_SET_USED(dev);
24         return CNXK_NIX_TX_OFFLOAD_CAPA;
25 }
26
27 static inline uint32_t
28 nix_get_speed_capa(struct cnxk_eth_dev *dev)
29 {
30         uint32_t speed_capa;
31
32         /* Auto negotiation disabled */
33         speed_capa = RTE_ETH_LINK_SPEED_FIXED;
34         if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
35                 speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
36                               RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
37                               RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
38         }
39
40         return speed_capa;
41 }
42
43 int
44 cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
45 {
46         struct roc_nix *nix = &dev->nix;
47
48         if (dev->inb.inl_dev == use_inl_dev)
49                 return 0;
50
51         plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
52                     dev->inb.nb_sess, !!dev->inb.inl_dev);
53
54         /* Change the mode */
55         dev->inb.inl_dev = use_inl_dev;
56
57         /* Update RoC for NPC rule insertion */
58         roc_nix_inb_mode_set(nix, use_inl_dev);
59
60         /* Setup lookup mem */
61         return cnxk_nix_lookup_mem_sa_base_set(dev);
62 }
63
64 static int
65 nix_security_setup(struct cnxk_eth_dev *dev)
66 {
67         struct roc_nix *nix = &dev->nix;
68         int i, rc = 0;
69
70         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
71                 /* Setup Inline Inbound */
72                 rc = roc_nix_inl_inb_init(nix);
73                 if (rc) {
74                         plt_err("Failed to initialize nix inline inb, rc=%d",
75                                 rc);
76                         return rc;
77                 }
78
79                 /* By default pick using inline device for poll mode.
80                  * Will be overridden when event mode rq's are setup.
81                  */
82                 cnxk_nix_inb_mode_set(dev, true);
83
84                 /* Allocate memory to be used as dptr for CPT ucode
85                  * WRITE_SA op.
86                  */
87                 dev->inb.sa_dptr =
88                         plt_zmalloc(ROC_NIX_INL_OT_IPSEC_INB_HW_SZ, 0);
89                 if (!dev->inb.sa_dptr) {
90                         plt_err("Couldn't allocate memory for SA dptr");
91                         rc = -ENOMEM;
92                         goto cleanup;
93                 }
94         }
95
96         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
97             dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
98                 struct plt_bitmap *bmap;
99                 size_t bmap_sz;
100                 void *mem;
101
102                 /* Setup enough descriptors for all tx queues */
103                 nix->outb_nb_desc = dev->outb.nb_desc;
104                 nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs;
105
106                 /* Setup Inline Outbound */
107                 rc = roc_nix_inl_outb_init(nix);
108                 if (rc) {
109                         plt_err("Failed to initialize nix inline outb, rc=%d",
110                                 rc);
111                         goto sa_dptr_free;
112                 }
113
114                 dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
115
116                 /* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
117                 if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY))
118                         return 0;
119
120                 /* Allocate memory to be used as dptr for CPT ucode
121                  * WRITE_SA op.
122                  */
123                 dev->outb.sa_dptr =
124                         plt_zmalloc(ROC_NIX_INL_OT_IPSEC_OUTB_HW_SZ, 0);
125                 if (!dev->outb.sa_dptr) {
126                         plt_err("Couldn't allocate memory for SA dptr");
127                         rc = -ENOMEM;
128                         goto sa_dptr_free;
129                 }
130
131                 rc = -ENOMEM;
132                 /* Allocate a bitmap to alloc and free sa indexes */
133                 bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa);
134                 mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
135                 if (mem == NULL) {
136                         plt_err("Outbound SA bmap alloc failed");
137
138                         rc |= roc_nix_inl_outb_fini(nix);
139                         goto sa_dptr_free;
140                 }
141
142                 rc = -EIO;
143                 bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz);
144                 if (!bmap) {
145                         plt_err("Outbound SA bmap init failed");
146
147                         rc |= roc_nix_inl_outb_fini(nix);
148                         plt_free(mem);
149                         goto sa_dptr_free;
150                 }
151
152                 for (i = 0; i < dev->outb.max_sa; i++)
153                         plt_bitmap_set(bmap, i);
154
155                 dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix);
156                 dev->outb.sa_bmap_mem = mem;
157                 dev->outb.sa_bmap = bmap;
158         }
159         return 0;
160
161 sa_dptr_free:
162         if (dev->inb.sa_dptr)
163                 plt_free(dev->inb.sa_dptr);
164         if (dev->outb.sa_dptr)
165                 plt_free(dev->outb.sa_dptr);
166 cleanup:
167         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
168                 rc |= roc_nix_inl_inb_fini(nix);
169         return rc;
170 }
171
172 static int
173 nix_meter_fini(struct cnxk_eth_dev *dev)
174 {
175         struct cnxk_meter_node *next_mtr = NULL;
176         struct roc_nix_bpf_objs profs = {0};
177         struct cnxk_meter_node *mtr = NULL;
178         struct cnxk_mtr *fms = &dev->mtr;
179         struct roc_nix *nix = &dev->nix;
180         struct roc_nix_rq *rq;
181         uint32_t i;
182         int rc = 0;
183
184         RTE_TAILQ_FOREACH_SAFE(mtr, fms, next, next_mtr) {
185                 for (i = 0; i < mtr->rq_num; i++) {
186                         rq = &dev->rqs[mtr->rq_id[i]];
187                         rc |= roc_nix_bpf_ena_dis(nix, mtr->bpf_id, rq, false);
188                 }
189
190                 profs.level = mtr->level;
191                 profs.count = 1;
192                 profs.ids[0] = mtr->bpf_id;
193                 rc = roc_nix_bpf_free(nix, &profs, 1);
194
195                 if (rc)
196                         return rc;
197
198                 TAILQ_REMOVE(fms, mtr, next);
199                 plt_free(mtr);
200         }
201         return 0;
202 }
203
204 static int
205 nix_security_release(struct cnxk_eth_dev *dev)
206 {
207         struct rte_eth_dev *eth_dev = dev->eth_dev;
208         struct cnxk_eth_sec_sess *eth_sec, *tvar;
209         struct roc_nix *nix = &dev->nix;
210         int rc, ret = 0;
211
212         /* Cleanup Inline inbound */
213         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
214                 /* Destroy inbound sessions */
215                 tvar = NULL;
216                 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
217                         cnxk_eth_sec_ops.session_destroy(eth_dev,
218                                                          eth_sec->sess);
219
220                 /* Clear lookup mem */
221                 cnxk_nix_lookup_mem_sa_base_clear(dev);
222
223                 rc = roc_nix_inl_inb_fini(nix);
224                 if (rc)
225                         plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
226                 ret |= rc;
227
228                 if (dev->inb.sa_dptr) {
229                         plt_free(dev->inb.sa_dptr);
230                         dev->inb.sa_dptr = NULL;
231                 }
232         }
233
234         /* Cleanup Inline outbound */
235         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
236             dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
237                 /* Destroy outbound sessions */
238                 tvar = NULL;
239                 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
240                         cnxk_eth_sec_ops.session_destroy(eth_dev,
241                                                          eth_sec->sess);
242
243                 rc = roc_nix_inl_outb_fini(nix);
244                 if (rc)
245                         plt_err("Failed to cleanup nix inline outb, rc=%d", rc);
246                 ret |= rc;
247
248                 plt_bitmap_free(dev->outb.sa_bmap);
249                 plt_free(dev->outb.sa_bmap_mem);
250                 dev->outb.sa_bmap = NULL;
251                 dev->outb.sa_bmap_mem = NULL;
252                 if (dev->outb.sa_dptr) {
253                         plt_free(dev->outb.sa_dptr);
254                         dev->outb.sa_dptr = NULL;
255                 }
256         }
257
258         dev->inb.inl_dev = false;
259         roc_nix_inb_mode_set(nix, false);
260         dev->nb_rxq_sso = 0;
261         dev->inb.nb_sess = 0;
262         dev->outb.nb_sess = 0;
263         return ret;
264 }
265
266 static void
267 nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
268 {
269         struct rte_pktmbuf_pool_private *mbp_priv;
270         struct rte_eth_dev *eth_dev;
271         struct cnxk_eth_dev *dev;
272         uint32_t buffsz;
273
274         dev = rxq->dev;
275         eth_dev = dev->eth_dev;
276
277         /* Get rx buffer size */
278         mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
279         buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
280
281         if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
282                 dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
283                 dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
284         }
285 }
286
287 int
288 nix_recalc_mtu(struct rte_eth_dev *eth_dev)
289 {
290         struct rte_eth_dev_data *data = eth_dev->data;
291         struct cnxk_eth_rxq_sp *rxq;
292         int rc;
293
294         rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
295         /* Setup scatter mode if needed by jumbo */
296         nix_enable_mseg_on_jumbo(rxq);
297
298         rc = cnxk_nix_mtu_set(eth_dev, data->mtu);
299         if (rc)
300                 plt_err("Failed to set default MTU size, rc=%d", rc);
301
302         return rc;
303 }
304
305 static int
306 nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
307 {
308         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
309         struct cnxk_fc_cfg *fc = &dev->fc_cfg;
310         struct rte_eth_fc_conf fc_conf = {0};
311         int rc;
312
313         /* Both Rx & Tx flow ctrl get enabled(RTE_ETH_FC_FULL) in HW
314          * by AF driver, update those info in PMD structure.
315          */
316         rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
317         if (rc)
318                 goto exit;
319
320         fc->mode = fc_conf.mode;
321         fc->rx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
322                         (fc_conf.mode == RTE_ETH_FC_RX_PAUSE);
323         fc->tx_pause = (fc_conf.mode == RTE_ETH_FC_FULL) ||
324                         (fc_conf.mode == RTE_ETH_FC_TX_PAUSE);
325
326 exit:
327         return rc;
328 }
329
330 static int
331 nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
332 {
333         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
334         struct cnxk_fc_cfg *fc = &dev->fc_cfg;
335         struct rte_eth_fc_conf fc_cfg = {0};
336
337         if (roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix))
338                 return 0;
339
340         fc_cfg.mode = fc->mode;
341
342         /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
343         if (roc_model_is_cn96_ax() &&
344             dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
345             (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
346                 fc_cfg.mode =
347                                 (fc_cfg.mode == RTE_ETH_FC_FULL ||
348                                 fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
349                                 RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
350         }
351
352         return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
353 }
354
355 uint64_t
356 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
357 {
358         uint16_t port_id = dev->eth_dev->data->port_id;
359         struct rte_mbuf mb_def;
360         uint64_t *tmp;
361
362         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
363         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
364                                  offsetof(struct rte_mbuf, data_off) !=
365                          2);
366         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
367                                  offsetof(struct rte_mbuf, data_off) !=
368                          4);
369         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
370                                  offsetof(struct rte_mbuf, data_off) !=
371                          6);
372         mb_def.nb_segs = 1;
373         mb_def.data_off = RTE_PKTMBUF_HEADROOM +
374                           (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
375         mb_def.port = port_id;
376         rte_mbuf_refcnt_set(&mb_def, 1);
377
378         /* Prevent compiler reordering: rearm_data covers previous fields */
379         rte_compiler_barrier();
380         tmp = (uint64_t *)&mb_def.rearm_data;
381
382         return *tmp;
383 }
384
385 static inline uint8_t
386 nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
387 {
388         /*
389          * Maximum three segments can be supported with W8, Choose
390          * NIX_MAXSQESZ_W16 for multi segment offload.
391          */
392         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
393                 return NIX_MAXSQESZ_W16;
394         else
395                 return NIX_MAXSQESZ_W8;
396 }
397
398 int
399 cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
400                         uint16_t nb_desc, uint16_t fp_tx_q_sz,
401                         const struct rte_eth_txconf *tx_conf)
402 {
403         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
404         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
405         struct cnxk_eth_txq_sp *txq_sp;
406         struct roc_nix_sq *sq;
407         size_t txq_sz;
408         int rc;
409
410         /* Free memory prior to re-allocation if needed. */
411         if (eth_dev->data->tx_queues[qid] != NULL) {
412                 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
413                 dev_ops->tx_queue_release(eth_dev, qid);
414                 eth_dev->data->tx_queues[qid] = NULL;
415         }
416
417         /* When Tx Security offload is enabled, increase tx desc count by
418          * max possible outbound desc count.
419          */
420         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
421                 nb_desc += dev->outb.nb_desc;
422
423         /* Setup ROC SQ */
424         sq = &dev->sqs[qid];
425         sq->qid = qid;
426         sq->nb_desc = nb_desc;
427         sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
428
429         rc = roc_nix_sq_init(&dev->nix, sq);
430         if (rc) {
431                 plt_err("Failed to init sq=%d, rc=%d", qid, rc);
432                 return rc;
433         }
434
435         rc = -ENOMEM;
436         txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
437         txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
438         if (!txq_sp) {
439                 plt_err("Failed to alloc tx queue mem");
440                 rc |= roc_nix_sq_fini(sq);
441                 return rc;
442         }
443
444         txq_sp->dev = dev;
445         txq_sp->qid = qid;
446         txq_sp->qconf.conf.tx = *tx_conf;
447         /* Queue config should reflect global offloads */
448         txq_sp->qconf.conf.tx.offloads = dev->tx_offloads;
449         txq_sp->qconf.nb_desc = nb_desc;
450
451         plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
452                     " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
453                     qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
454                     sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
455
456         /* Store start of fast path area */
457         eth_dev->data->tx_queues[qid] = txq_sp + 1;
458         eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
459         return 0;
460 }
461
462 static void
463 cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
464 {
465         void *txq = eth_dev->data->tx_queues[qid];
466         struct cnxk_eth_txq_sp *txq_sp;
467         struct cnxk_eth_dev *dev;
468         struct roc_nix_sq *sq;
469         int rc;
470
471         if (!txq)
472                 return;
473
474         txq_sp = cnxk_eth_txq_to_sp(txq);
475
476         dev = txq_sp->dev;
477
478         plt_nix_dbg("Releasing txq %u", qid);
479
480         /* Cleanup ROC SQ */
481         sq = &dev->sqs[qid];
482         rc = roc_nix_sq_fini(sq);
483         if (rc)
484                 plt_err("Failed to cleanup sq, rc=%d", rc);
485
486         /* Finally free */
487         plt_free(txq_sp);
488 }
489
490 int
491 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
492                         uint16_t nb_desc, uint16_t fp_rx_q_sz,
493                         const struct rte_eth_rxconf *rx_conf,
494                         struct rte_mempool *mp)
495 {
496         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
497         struct roc_nix *nix = &dev->nix;
498         struct cnxk_eth_rxq_sp *rxq_sp;
499         struct rte_mempool_ops *ops;
500         const char *platform_ops;
501         struct roc_nix_rq *rq;
502         struct roc_nix_cq *cq;
503         uint16_t first_skip;
504         int rc = -EINVAL;
505         size_t rxq_sz;
506
507         /* Sanity checks */
508         if (rx_conf->rx_deferred_start == 1) {
509                 plt_err("Deferred Rx start is not supported");
510                 goto fail;
511         }
512
513         platform_ops = rte_mbuf_platform_mempool_ops();
514         /* This driver needs cnxk_npa mempool ops to work */
515         ops = rte_mempool_get_ops(mp->ops_index);
516         if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
517                 plt_err("mempool ops should be of cnxk_npa type");
518                 goto fail;
519         }
520
521         if (mp->pool_id == 0) {
522                 plt_err("Invalid pool_id");
523                 goto fail;
524         }
525
526         /* Free memory prior to re-allocation if needed */
527         if (eth_dev->data->rx_queues[qid] != NULL) {
528                 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
529
530                 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
531                 dev_ops->rx_queue_release(eth_dev, qid);
532                 eth_dev->data->rx_queues[qid] = NULL;
533         }
534
535         /* Clam up cq limit to size of packet pool aura for LBK
536          * to avoid meta packet drop as LBK does not currently support
537          * backpressure.
538          */
539         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
540                 uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
541
542                 /* Use current RQ's aura limit if inl rq is not available */
543                 if (!pkt_pool_limit)
544                         pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
545                 nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
546         }
547
548         /* Its a no-op when inline device is not used */
549         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY ||
550             dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
551                 roc_nix_inl_dev_xaq_realloc(mp->pool_id);
552
553         /* Setup ROC CQ */
554         cq = &dev->cqs[qid];
555         cq->qid = qid;
556         cq->nb_desc = nb_desc;
557         rc = roc_nix_cq_init(&dev->nix, cq);
558         if (rc) {
559                 plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
560                 goto fail;
561         }
562
563         /* Setup ROC RQ */
564         rq = &dev->rqs[qid];
565         rq->qid = qid;
566         rq->aura_handle = mp->pool_id;
567         rq->flow_tag_width = 32;
568         rq->sso_ena = false;
569
570         /* Calculate first mbuf skip */
571         first_skip = (sizeof(struct rte_mbuf));
572         first_skip += RTE_PKTMBUF_HEADROOM;
573         first_skip += rte_pktmbuf_priv_size(mp);
574         rq->first_skip = first_skip;
575         rq->later_skip = sizeof(struct rte_mbuf);
576         rq->lpb_size = mp->elt_size;
577         rq->lpb_drop_ena = !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY);
578
579         /* Enable Inline IPSec on RQ, will not be used for Poll mode */
580         if (roc_nix_inl_inb_is_enabled(nix))
581                 rq->ipsech_ena = true;
582
583         rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
584         if (rc) {
585                 plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
586                 goto cq_fini;
587         }
588
589         /* Allocate and setup fast path rx queue */
590         rc = -ENOMEM;
591         rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
592         rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
593         if (!rxq_sp) {
594                 plt_err("Failed to alloc rx queue for rq=%d", qid);
595                 goto rq_fini;
596         }
597
598         /* Setup slow path fields */
599         rxq_sp->dev = dev;
600         rxq_sp->qid = qid;
601         rxq_sp->qconf.conf.rx = *rx_conf;
602         /* Queue config should reflect global offloads */
603         rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads;
604         rxq_sp->qconf.nb_desc = nb_desc;
605         rxq_sp->qconf.mp = mp;
606
607         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
608                 /* Pass a tagmask used to handle error packets in inline device.
609                  * Ethdev rq's tag_mask field will be overwritten later
610                  * when sso is setup.
611                  */
612                 rq->tag_mask =
613                         0x0FF00000 | ((uint32_t)RTE_EVENT_TYPE_ETHDEV << 28);
614
615                 /* Setup rq reference for inline dev if present */
616                 rc = roc_nix_inl_dev_rq_get(rq);
617                 if (rc)
618                         goto free_mem;
619         }
620
621         plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
622                     cq->nb_desc);
623
624         /* Store start of fast path area */
625         eth_dev->data->rx_queues[qid] = rxq_sp + 1;
626         eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
627
628         /* Calculating delta and freq mult between PTP HI clock and tsc.
629          * These are needed in deriving raw clock value from tsc counter.
630          * read_clock eth op returns raw clock value.
631          */
632         if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
633                 rc = cnxk_nix_tsc_convert(dev);
634                 if (rc) {
635                         plt_err("Failed to calculate delta and freq mult");
636                         goto rq_fini;
637                 }
638         }
639
640         return 0;
641 free_mem:
642         plt_free(rxq_sp);
643 rq_fini:
644         rc |= roc_nix_rq_fini(rq);
645 cq_fini:
646         rc |= roc_nix_cq_fini(cq);
647 fail:
648         return rc;
649 }
650
651 static void
652 cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
653 {
654         void *rxq = eth_dev->data->rx_queues[qid];
655         struct cnxk_eth_rxq_sp *rxq_sp;
656         struct cnxk_eth_dev *dev;
657         struct roc_nix_rq *rq;
658         struct roc_nix_cq *cq;
659         int rc;
660
661         if (!rxq)
662                 return;
663
664         rxq_sp = cnxk_eth_rxq_to_sp(rxq);
665         dev = rxq_sp->dev;
666         rq = &dev->rqs[qid];
667
668         plt_nix_dbg("Releasing rxq %u", qid);
669
670         /* Release rq reference for inline dev if present */
671         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
672                 roc_nix_inl_dev_rq_put(rq);
673
674         /* Cleanup ROC RQ */
675         rc = roc_nix_rq_fini(rq);
676         if (rc)
677                 plt_err("Failed to cleanup rq, rc=%d", rc);
678
679         /* Cleanup ROC CQ */
680         cq = &dev->cqs[qid];
681         rc = roc_nix_cq_fini(cq);
682         if (rc)
683                 plt_err("Failed to cleanup cq, rc=%d", rc);
684
685         /* Finally free fast path area */
686         plt_free(rxq_sp);
687 }
688
689 uint32_t
690 cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
691                        uint8_t rss_level)
692 {
693         uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
694                 {FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
695                  FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
696                 {FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
697                  FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
698                  FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
699                 {FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
700                  FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
701                  FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
702                  FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
703                  FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
704                  FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
705         };
706         uint32_t flowkey_cfg = 0;
707
708         dev->ethdev_rss_hf = ethdev_rss;
709
710         if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
711             dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
712                 flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
713         }
714
715         if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
716                 flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
717
718         if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
719                 flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
720
721         if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
722                 flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
723
724         if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
725                 flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
726
727         if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
728                 flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
729
730         if (ethdev_rss & RSS_IPV4_ENABLE)
731                 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
732
733         if (ethdev_rss & RSS_IPV6_ENABLE)
734                 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
735
736         if (ethdev_rss & RTE_ETH_RSS_TCP)
737                 flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
738
739         if (ethdev_rss & RTE_ETH_RSS_UDP)
740                 flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
741
742         if (ethdev_rss & RTE_ETH_RSS_SCTP)
743                 flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
744
745         if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
746                 flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
747
748         if (ethdev_rss & RSS_IPV6_EX_ENABLE)
749                 flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
750
751         if (ethdev_rss & RTE_ETH_RSS_PORT)
752                 flowkey_cfg |= FLOW_KEY_TYPE_PORT;
753
754         if (ethdev_rss & RTE_ETH_RSS_NVGRE)
755                 flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
756
757         if (ethdev_rss & RTE_ETH_RSS_VXLAN)
758                 flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
759
760         if (ethdev_rss & RTE_ETH_RSS_GENEVE)
761                 flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
762
763         if (ethdev_rss & RTE_ETH_RSS_GTPU)
764                 flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
765
766         return flowkey_cfg;
767 }
768
769 static void
770 nix_free_queue_mem(struct cnxk_eth_dev *dev)
771 {
772         plt_free(dev->rqs);
773         plt_free(dev->cqs);
774         plt_free(dev->sqs);
775         dev->rqs = NULL;
776         dev->cqs = NULL;
777         dev->sqs = NULL;
778 }
779
780 static int
781 nix_ingress_policer_setup(struct cnxk_eth_dev *dev)
782 {
783         struct rte_eth_dev *eth_dev = dev->eth_dev;
784         int rc = 0;
785
786         TAILQ_INIT(&dev->mtr_profiles);
787         TAILQ_INIT(&dev->mtr_policy);
788         TAILQ_INIT(&dev->mtr);
789
790         if (eth_dev->dev_ops->mtr_ops_get == NULL)
791                 return rc;
792
793         return nix_mtr_capabilities_init(eth_dev);
794 }
795
796 static int
797 nix_rss_default_setup(struct cnxk_eth_dev *dev)
798 {
799         struct rte_eth_dev *eth_dev = dev->eth_dev;
800         uint8_t rss_hash_level;
801         uint32_t flowkey_cfg;
802         uint64_t rss_hf;
803
804         rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
805         rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
806         if (rss_hash_level)
807                 rss_hash_level -= 1;
808
809         flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
810         return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
811 }
812
813 static int
814 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
815 {
816         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
817         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
818         struct cnxk_eth_qconf *tx_qconf = NULL;
819         struct cnxk_eth_qconf *rx_qconf = NULL;
820         struct cnxk_eth_rxq_sp *rxq_sp;
821         struct cnxk_eth_txq_sp *txq_sp;
822         int i, nb_rxq, nb_txq;
823         void **txq, **rxq;
824
825         nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
826         nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
827
828         tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
829         if (tx_qconf == NULL) {
830                 plt_err("Failed to allocate memory for tx_qconf");
831                 goto fail;
832         }
833
834         rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
835         if (rx_qconf == NULL) {
836                 plt_err("Failed to allocate memory for rx_qconf");
837                 goto fail;
838         }
839
840         txq = eth_dev->data->tx_queues;
841         for (i = 0; i < nb_txq; i++) {
842                 if (txq[i] == NULL) {
843                         tx_qconf[i].valid = false;
844                         plt_info("txq[%d] is already released", i);
845                         continue;
846                 }
847                 txq_sp = cnxk_eth_txq_to_sp(txq[i]);
848                 memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
849                 tx_qconf[i].valid = true;
850                 dev_ops->tx_queue_release(eth_dev, i);
851                 eth_dev->data->tx_queues[i] = NULL;
852         }
853
854         rxq = eth_dev->data->rx_queues;
855         for (i = 0; i < nb_rxq; i++) {
856                 if (rxq[i] == NULL) {
857                         rx_qconf[i].valid = false;
858                         plt_info("rxq[%d] is already released", i);
859                         continue;
860                 }
861                 rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
862                 memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
863                 rx_qconf[i].valid = true;
864                 dev_ops->rx_queue_release(eth_dev, i);
865                 eth_dev->data->rx_queues[i] = NULL;
866         }
867
868         dev->tx_qconf = tx_qconf;
869         dev->rx_qconf = rx_qconf;
870         return 0;
871
872 fail:
873         free(tx_qconf);
874         free(rx_qconf);
875         return -ENOMEM;
876 }
877
878 static int
879 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
880 {
881         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
882         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
883         struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
884         struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
885         int rc, i, nb_rxq, nb_txq;
886
887         nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
888         nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
889
890         rc = -ENOMEM;
891         /* Setup tx & rx queues with previous configuration so
892          * that the queues can be functional in cases like ports
893          * are started without re configuring queues.
894          *
895          * Usual re config sequence is like below:
896          * port_configure() {
897          *      if(reconfigure) {
898          *              queue_release()
899          *              queue_setup()
900          *      }
901          *      queue_configure() {
902          *              queue_release()
903          *              queue_setup()
904          *      }
905          * }
906          * port_start()
907          *
908          * In some application's control path, queue_configure() would
909          * NOT be invoked for TXQs/RXQs in port_configure().
910          * In such cases, queues can be functional after start as the
911          * queues are already setup in port_configure().
912          */
913         for (i = 0; i < nb_txq; i++) {
914                 if (!tx_qconf[i].valid)
915                         continue;
916                 rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
917                                              &tx_qconf[i].conf.tx);
918                 if (rc) {
919                         plt_err("Failed to setup tx queue rc=%d", rc);
920                         for (i -= 1; i >= 0; i--)
921                                 dev_ops->tx_queue_release(eth_dev, i);
922                         goto fail;
923                 }
924         }
925
926         free(tx_qconf);
927         tx_qconf = NULL;
928
929         for (i = 0; i < nb_rxq; i++) {
930                 if (!rx_qconf[i].valid)
931                         continue;
932                 rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
933                                              &rx_qconf[i].conf.rx,
934                                              rx_qconf[i].mp);
935                 if (rc) {
936                         plt_err("Failed to setup rx queue rc=%d", rc);
937                         for (i -= 1; i >= 0; i--)
938                                 dev_ops->rx_queue_release(eth_dev, i);
939                         goto tx_queue_release;
940                 }
941         }
942
943         free(rx_qconf);
944         rx_qconf = NULL;
945
946         return 0;
947
948 tx_queue_release:
949         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
950                 dev_ops->tx_queue_release(eth_dev, i);
951 fail:
952         free(tx_qconf);
953         free(rx_qconf);
954
955         return rc;
956 }
957
958 static void
959 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
960 {
961         /* These dummy functions are required for supporting
962          * some applications which reconfigure queues without
963          * stopping tx burst and rx burst threads(eg kni app)
964          * When the queues context is saved, txq/rxqs are released
965          * which caused app crash since rx/tx burst is still
966          * on different lcores
967          */
968         eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
969         eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
970         rte_mb();
971 }
972
973 static int
974 nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
975 {
976         uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
977         uint8_t tun[ROC_NIX_LSO_TUN_MAX];
978         struct roc_nix *nix = &dev->nix;
979         int rc;
980
981         rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
982         if (rc)
983                 return rc;
984
985         dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
986                             (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
987                             (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
988                             (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
989
990         dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
991                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
992                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
993                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
994         return 0;
995 }
996
997 static int
998 nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
999 {
1000         struct roc_nix *nix = &dev->nix;
1001         int rc;
1002
1003         /* Nothing much to do if offload is not enabled */
1004         if (!(dev->tx_offloads &
1005               (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1006                RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
1007                 return 0;
1008
1009         /* Setup LSO formats in AF. Its a no-op if other ethdev has
1010          * already set it up
1011          */
1012         rc = roc_nix_lso_fmt_setup(nix);
1013         if (rc)
1014                 return rc;
1015
1016         return nix_lso_tun_fmt_update(dev);
1017 }
1018
1019 int
1020 cnxk_nix_configure(struct rte_eth_dev *eth_dev)
1021 {
1022         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1023         struct rte_eth_dev_data *data = eth_dev->data;
1024         struct rte_eth_conf *conf = &data->dev_conf;
1025         struct rte_eth_rxmode *rxmode = &conf->rxmode;
1026         struct rte_eth_txmode *txmode = &conf->txmode;
1027         char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
1028         struct roc_nix_fc_cfg fc_cfg = {0};
1029         struct roc_nix *nix = &dev->nix;
1030         struct rte_ether_addr *ea;
1031         uint8_t nb_rxq, nb_txq;
1032         uint64_t rx_cfg;
1033         void *qs;
1034         int rc;
1035
1036         rc = -EINVAL;
1037
1038         /* Sanity checks */
1039         if (rte_eal_has_hugepages() == 0) {
1040                 plt_err("Huge page is not configured");
1041                 goto fail_configure;
1042         }
1043
1044         if (conf->dcb_capability_en == 1) {
1045                 plt_err("dcb enable is not supported");
1046                 goto fail_configure;
1047         }
1048
1049         if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1050                 plt_err("Flow director is not supported");
1051                 goto fail_configure;
1052         }
1053
1054         if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
1055             rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
1056                 plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
1057                 goto fail_configure;
1058         }
1059
1060         if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
1061                 plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
1062                 goto fail_configure;
1063         }
1064
1065         /* Free the resources allocated from the previous configure */
1066         if (dev->configured == 1) {
1067                 /* Unregister queue irq's */
1068                 roc_nix_unregister_queue_irqs(nix);
1069
1070                 /* Unregister CQ irqs if present */
1071                 if (eth_dev->data->dev_conf.intr_conf.rxq)
1072                         roc_nix_unregister_cq_irqs(nix);
1073
1074                 /* Set no-op functions */
1075                 nix_set_nop_rxtx_function(eth_dev);
1076                 /* Store queue config for later */
1077                 rc = nix_store_queue_cfg_and_then_release(eth_dev);
1078                 if (rc)
1079                         goto fail_configure;
1080
1081                 /* Disable and free rte_meter entries */
1082                 rc = nix_meter_fini(dev);
1083                 if (rc)
1084                         goto fail_configure;
1085
1086                 /* Cleanup security support */
1087                 rc = nix_security_release(dev);
1088                 if (rc)
1089                         goto fail_configure;
1090
1091                 roc_nix_tm_fini(nix);
1092                 roc_nix_lf_free(nix);
1093         }
1094
1095         dev->rx_offloads = rxmode->offloads;
1096         dev->tx_offloads = txmode->offloads;
1097
1098         /* Prepare rx cfg */
1099         rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
1100         if (dev->rx_offloads &
1101             (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
1102                 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
1103                 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
1104         }
1105         rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |
1106                    ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
1107                    ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
1108
1109         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
1110                 rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
1111                 /* Disable drop re if rx offload security is enabled and
1112                  * platform does not support it.
1113                  */
1114                 if (dev->ipsecd_drop_re_dis)
1115                         rx_cfg &= ~(ROC_NIX_LF_RX_CFG_DROP_RE);
1116         }
1117
1118         nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1119         nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1120
1121         /* Alloc a nix lf */
1122         rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
1123         if (rc) {
1124                 plt_err("Failed to init nix_lf rc=%d", rc);
1125                 goto fail_configure;
1126         }
1127
1128         dev->npc.channel = roc_nix_get_base_chan(nix);
1129
1130         nb_rxq = data->nb_rx_queues;
1131         nb_txq = data->nb_tx_queues;
1132         rc = -ENOMEM;
1133         if (nb_rxq) {
1134                 /* Allocate memory for roc rq's and cq's */
1135                 qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0);
1136                 if (!qs) {
1137                         plt_err("Failed to alloc rqs");
1138                         goto free_nix_lf;
1139                 }
1140                 dev->rqs = qs;
1141
1142                 qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0);
1143                 if (!qs) {
1144                         plt_err("Failed to alloc cqs");
1145                         goto free_nix_lf;
1146                 }
1147                 dev->cqs = qs;
1148         }
1149
1150         if (nb_txq) {
1151                 /* Allocate memory for roc sq's */
1152                 qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0);
1153                 if (!qs) {
1154                         plt_err("Failed to alloc sqs");
1155                         goto free_nix_lf;
1156                 }
1157                 dev->sqs = qs;
1158         }
1159
1160         /* Re-enable NIX LF error interrupts */
1161         roc_nix_err_intr_ena_dis(nix, true);
1162         roc_nix_ras_intr_ena_dis(nix, true);
1163
1164         if (nix->rx_ptp_ena &&
1165             dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) {
1166                 plt_err("Both PTP and switch header enabled");
1167                 goto free_nix_lf;
1168         }
1169
1170         rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type,
1171                                     dev->npc.pre_l2_size_offset,
1172                                     dev->npc.pre_l2_size_offset_mask,
1173                                     dev->npc.pre_l2_size_shift_dir);
1174         if (rc) {
1175                 plt_err("Failed to enable switch type nix_lf rc=%d", rc);
1176                 goto free_nix_lf;
1177         }
1178
1179         /* Setup LSO if needed */
1180         rc = nix_lso_fmt_setup(dev);
1181         if (rc) {
1182                 plt_err("Failed to setup nix lso format fields, rc=%d", rc);
1183                 goto free_nix_lf;
1184         }
1185
1186         /* Configure RSS */
1187         rc = nix_rss_default_setup(dev);
1188         if (rc) {
1189                 plt_err("Failed to configure rss rc=%d", rc);
1190                 goto free_nix_lf;
1191         }
1192
1193         /* Init the default TM scheduler hierarchy */
1194         rc = roc_nix_tm_init(nix);
1195         if (rc) {
1196                 plt_err("Failed to init traffic manager, rc=%d", rc);
1197                 goto free_nix_lf;
1198         }
1199
1200         rc = nix_ingress_policer_setup(dev);
1201         if (rc) {
1202                 plt_err("Failed to setup ingress policer rc=%d", rc);
1203                 goto free_nix_lf;
1204         }
1205
1206         rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
1207         if (rc) {
1208                 plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
1209                 goto tm_fini;
1210         }
1211
1212         /* Register queue IRQs */
1213         rc = roc_nix_register_queue_irqs(nix);
1214         if (rc) {
1215                 plt_err("Failed to register queue interrupts rc=%d", rc);
1216                 goto tm_fini;
1217         }
1218
1219         /* Register cq IRQs */
1220         if (eth_dev->data->dev_conf.intr_conf.rxq) {
1221                 if (eth_dev->data->nb_rx_queues > dev->nix.cints) {
1222                         plt_err("Rx interrupt cannot be enabled, rxq > %d",
1223                                 dev->nix.cints);
1224                         goto q_irq_fini;
1225                 }
1226                 /* Rx interrupt feature cannot work with vector mode because,
1227                  * vector mode does not process packets unless min 4 pkts are
1228                  * received, while cq interrupts are generated even for 1 pkt
1229                  * in the CQ.
1230                  */
1231                 dev->scalar_ena = true;
1232
1233                 rc = roc_nix_register_cq_irqs(nix);
1234                 if (rc) {
1235                         plt_err("Failed to register CQ interrupts rc=%d", rc);
1236                         goto q_irq_fini;
1237                 }
1238         }
1239
1240         /* Configure loop back mode */
1241         rc = roc_nix_mac_loopback_enable(nix,
1242                                          eth_dev->data->dev_conf.lpbk_mode);
1243         if (rc) {
1244                 plt_err("Failed to configure cgx loop back mode rc=%d", rc);
1245                 goto cq_fini;
1246         }
1247
1248         /* Setup Inline security support */
1249         rc = nix_security_setup(dev);
1250         if (rc)
1251                 goto cq_fini;
1252
1253         /* Init flow control configuration */
1254         fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG;
1255         fc_cfg.rxchan_cfg.enable = true;
1256         rc = roc_nix_fc_config_set(nix, &fc_cfg);
1257         if (rc) {
1258                 plt_err("Failed to initialize flow control rc=%d", rc);
1259                 goto cq_fini;
1260         }
1261
1262         /* Update flow control configuration to PMD */
1263         rc = nix_init_flow_ctrl_config(eth_dev);
1264         if (rc) {
1265                 plt_err("Failed to initialize flow control rc=%d", rc);
1266                 goto cq_fini;
1267         }
1268
1269         /* Initialize TC to SQ mapping as invalid */
1270         memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
1271         /*
1272          * Restore queue config when reconfigure followed by
1273          * reconfigure and no queue configure invoked from application case.
1274          */
1275         if (dev->configured == 1) {
1276                 rc = nix_restore_queue_cfg(eth_dev);
1277                 if (rc)
1278                         goto sec_release;
1279         }
1280
1281         /* Update the mac address */
1282         ea = eth_dev->data->mac_addrs;
1283         memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1284         if (rte_is_zero_ether_addr(ea))
1285                 rte_eth_random_addr((uint8_t *)ea);
1286
1287         rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
1288
1289         plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
1290                     " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "",
1291                     eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq,
1292                     dev->rx_offloads, dev->tx_offloads);
1293
1294         /* All good */
1295         dev->configured = 1;
1296         dev->nb_rxq = data->nb_rx_queues;
1297         dev->nb_txq = data->nb_tx_queues;
1298         return 0;
1299
1300 sec_release:
1301         rc |= nix_security_release(dev);
1302 cq_fini:
1303         roc_nix_unregister_cq_irqs(nix);
1304 q_irq_fini:
1305         roc_nix_unregister_queue_irqs(nix);
1306 tm_fini:
1307         roc_nix_tm_fini(nix);
1308 free_nix_lf:
1309         nix_free_queue_mem(dev);
1310         rc |= roc_nix_lf_free(nix);
1311 fail_configure:
1312         dev->configured = 0;
1313         return rc;
1314 }
1315
1316 int
1317 cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1318 {
1319         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1320         struct rte_eth_dev_data *data = eth_dev->data;
1321         struct roc_nix_sq *sq = &dev->sqs[qid];
1322         int rc = -EINVAL;
1323
1324         if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1325                 return 0;
1326
1327         rc = roc_nix_tm_sq_aura_fc(sq, true);
1328         if (rc) {
1329                 plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
1330                 goto done;
1331         }
1332
1333         data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1334 done:
1335         return rc;
1336 }
1337
1338 int
1339 cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1340 {
1341         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1342         struct rte_eth_dev_data *data = eth_dev->data;
1343         struct roc_nix_sq *sq = &dev->sqs[qid];
1344         int rc;
1345
1346         if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1347                 return 0;
1348
1349         rc = roc_nix_tm_sq_aura_fc(sq, false);
1350         if (rc) {
1351                 plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
1352                         rc);
1353                 goto done;
1354         }
1355
1356         data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1357 done:
1358         return rc;
1359 }
1360
1361 static int
1362 cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1363 {
1364         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1365         struct rte_eth_dev_data *data = eth_dev->data;
1366         struct roc_nix_rq *rq = &dev->rqs[qid];
1367         int rc;
1368
1369         if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1370                 return 0;
1371
1372         rc = roc_nix_rq_ena_dis(rq, true);
1373         if (rc) {
1374                 plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
1375                 goto done;
1376         }
1377
1378         data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1379 done:
1380         return rc;
1381 }
1382
1383 static int
1384 cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1385 {
1386         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1387         struct rte_eth_dev_data *data = eth_dev->data;
1388         struct roc_nix_rq *rq = &dev->rqs[qid];
1389         int rc;
1390
1391         if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1392                 return 0;
1393
1394         rc = roc_nix_rq_ena_dis(rq, false);
1395         if (rc) {
1396                 plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
1397                 goto done;
1398         }
1399
1400         data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1401 done:
1402         return rc;
1403 }
1404
1405 static int
1406 cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
1407 {
1408         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1409         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1410         struct rte_mbuf *rx_pkts[32];
1411         struct rte_eth_link link;
1412         int count, i, j, rc;
1413         void *rxq;
1414
1415         /* Disable all the NPC entries */
1416         rc = roc_npc_mcam_enable_all_entries(&dev->npc, 0);
1417         if (rc)
1418                 return rc;
1419
1420         /* Stop link change events */
1421         if (!roc_nix_is_vf_or_sdp(&dev->nix))
1422                 roc_nix_mac_link_event_start_stop(&dev->nix, false);
1423
1424         /* Disable Rx via NPC */
1425         roc_nix_npc_rx_ena_dis(&dev->nix, false);
1426
1427         /* Stop rx queues and free up pkts pending */
1428         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1429                 rc = dev_ops->rx_queue_stop(eth_dev, i);
1430                 if (rc)
1431                         continue;
1432
1433                 rxq = eth_dev->data->rx_queues[i];
1434                 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1435                 while (count) {
1436                         for (j = 0; j < count; j++)
1437                                 rte_pktmbuf_free(rx_pkts[j]);
1438                         count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1439                 }
1440         }
1441
1442         /* Stop tx queues  */
1443         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1444                 dev_ops->tx_queue_stop(eth_dev, i);
1445
1446         /* Bring down link status internally */
1447         memset(&link, 0, sizeof(link));
1448         rte_eth_linkstatus_set(eth_dev, &link);
1449
1450         return 0;
1451 }
1452
1453 int
1454 cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
1455 {
1456         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1457         int rc, i;
1458
1459         if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) {
1460                 rc = nix_recalc_mtu(eth_dev);
1461                 if (rc)
1462                         return rc;
1463         }
1464
1465         /* Start rx queues */
1466         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1467                 rc = cnxk_nix_rx_queue_start(eth_dev, i);
1468                 if (rc)
1469                         return rc;
1470         }
1471
1472         /* Start tx queues  */
1473         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1474                 rc = cnxk_nix_tx_queue_start(eth_dev, i);
1475                 if (rc)
1476                         return rc;
1477         }
1478
1479         /* Update Flow control configuration */
1480         rc = nix_update_flow_ctrl_config(eth_dev);
1481         if (rc) {
1482                 plt_err("Failed to enable flow control. error code(%d)", rc);
1483                 return rc;
1484         }
1485
1486         /* Enable Rx in NPC */
1487         rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
1488         if (rc) {
1489                 plt_err("Failed to enable NPC rx %d", rc);
1490                 return rc;
1491         }
1492
1493         rc = roc_npc_mcam_enable_all_entries(&dev->npc, 1);
1494         if (rc) {
1495                 plt_err("Failed to enable NPC entries %d", rc);
1496                 return rc;
1497         }
1498
1499         cnxk_nix_toggle_flag_link_cfg(dev, true);
1500
1501         /* Start link change events */
1502         if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
1503                 rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
1504                 if (rc) {
1505                         plt_err("Failed to start cgx link event %d", rc);
1506                         goto rx_disable;
1507                 }
1508         }
1509
1510         /* Enable PTP if it is requested by the user or already
1511          * enabled on PF owning this VF
1512          */
1513         memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
1514         if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
1515                 cnxk_eth_dev_ops.timesync_enable(eth_dev);
1516         else
1517                 cnxk_eth_dev_ops.timesync_disable(eth_dev);
1518
1519         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1520                 rc = rte_mbuf_dyn_rx_timestamp_register
1521                         (&dev->tstamp.tstamp_dynfield_offset,
1522                          &dev->tstamp.rx_tstamp_dynflag);
1523                 if (rc != 0) {
1524                         plt_err("Failed to register Rx timestamp field/flag");
1525                         goto rx_disable;
1526                 }
1527         }
1528
1529         cnxk_nix_toggle_flag_link_cfg(dev, false);
1530
1531         return 0;
1532
1533 rx_disable:
1534         roc_nix_npc_rx_ena_dis(&dev->nix, false);
1535         cnxk_nix_toggle_flag_link_cfg(dev, false);
1536         return rc;
1537 }
1538
1539 static int cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev);
1540 static int cnxk_nix_dev_close(struct rte_eth_dev *eth_dev);
1541
1542 /* CNXK platform independent eth dev ops */
1543 struct eth_dev_ops cnxk_eth_dev_ops = {
1544         .mtu_set = cnxk_nix_mtu_set,
1545         .mac_addr_add = cnxk_nix_mac_addr_add,
1546         .mac_addr_remove = cnxk_nix_mac_addr_del,
1547         .mac_addr_set = cnxk_nix_mac_addr_set,
1548         .dev_infos_get = cnxk_nix_info_get,
1549         .link_update = cnxk_nix_link_update,
1550         .tx_queue_release = cnxk_nix_tx_queue_release,
1551         .rx_queue_release = cnxk_nix_rx_queue_release,
1552         .dev_stop = cnxk_nix_dev_stop,
1553         .dev_close = cnxk_nix_dev_close,
1554         .dev_reset = cnxk_nix_dev_reset,
1555         .tx_queue_start = cnxk_nix_tx_queue_start,
1556         .rx_queue_start = cnxk_nix_rx_queue_start,
1557         .rx_queue_stop = cnxk_nix_rx_queue_stop,
1558         .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
1559         .promiscuous_enable = cnxk_nix_promisc_enable,
1560         .promiscuous_disable = cnxk_nix_promisc_disable,
1561         .allmulticast_enable = cnxk_nix_allmulticast_enable,
1562         .allmulticast_disable = cnxk_nix_allmulticast_disable,
1563         .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
1564         .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
1565         .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
1566         .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
1567         .priority_flow_ctrl_queue_config =
1568                                 cnxk_nix_priority_flow_ctrl_queue_config,
1569         .priority_flow_ctrl_queue_info_get =
1570                                 cnxk_nix_priority_flow_ctrl_queue_info_get,
1571         .dev_set_link_up = cnxk_nix_set_link_up,
1572         .dev_set_link_down = cnxk_nix_set_link_down,
1573         .get_module_info = cnxk_nix_get_module_info,
1574         .get_module_eeprom = cnxk_nix_get_module_eeprom,
1575         .rx_queue_intr_enable = cnxk_nix_rx_queue_intr_enable,
1576         .rx_queue_intr_disable = cnxk_nix_rx_queue_intr_disable,
1577         .pool_ops_supported = cnxk_nix_pool_ops_supported,
1578         .queue_stats_mapping_set = cnxk_nix_queue_stats_mapping,
1579         .stats_get = cnxk_nix_stats_get,
1580         .stats_reset = cnxk_nix_stats_reset,
1581         .xstats_get = cnxk_nix_xstats_get,
1582         .xstats_get_names = cnxk_nix_xstats_get_names,
1583         .xstats_reset = cnxk_nix_xstats_reset,
1584         .xstats_get_by_id = cnxk_nix_xstats_get_by_id,
1585         .xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id,
1586         .fw_version_get = cnxk_nix_fw_version_get,
1587         .rxq_info_get = cnxk_nix_rxq_info_get,
1588         .txq_info_get = cnxk_nix_txq_info_get,
1589         .tx_done_cleanup = cnxk_nix_tx_done_cleanup,
1590         .flow_ops_get = cnxk_nix_flow_ops_get,
1591         .get_reg = cnxk_nix_dev_get_reg,
1592         .timesync_read_rx_timestamp = cnxk_nix_timesync_read_rx_timestamp,
1593         .timesync_read_tx_timestamp = cnxk_nix_timesync_read_tx_timestamp,
1594         .timesync_read_time = cnxk_nix_timesync_read_time,
1595         .timesync_write_time = cnxk_nix_timesync_write_time,
1596         .timesync_adjust_time = cnxk_nix_timesync_adjust_time,
1597         .read_clock = cnxk_nix_read_clock,
1598         .reta_update = cnxk_nix_reta_update,
1599         .reta_query = cnxk_nix_reta_query,
1600         .rss_hash_update = cnxk_nix_rss_hash_update,
1601         .rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
1602         .set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
1603         .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
1604         .tm_ops_get = cnxk_nix_tm_ops_get,
1605         .mtr_ops_get = cnxk_nix_mtr_ops_get,
1606 };
1607
1608 static int
1609 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
1610 {
1611         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1612         struct rte_security_ctx *sec_ctx;
1613         struct roc_nix *nix = &dev->nix;
1614         struct rte_pci_device *pci_dev;
1615         int rc, max_entries;
1616
1617         eth_dev->dev_ops = &cnxk_eth_dev_ops;
1618         eth_dev->rx_queue_count = cnxk_nix_rx_queue_count;
1619         eth_dev->rx_descriptor_status = cnxk_nix_rx_descriptor_status;
1620         eth_dev->tx_descriptor_status = cnxk_nix_tx_descriptor_status;
1621
1622         /* Alloc security context */
1623         sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
1624         if (!sec_ctx)
1625                 return -ENOMEM;
1626         sec_ctx->device = eth_dev;
1627         sec_ctx->ops = &cnxk_eth_sec_ops;
1628         sec_ctx->flags =
1629                 (RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
1630         eth_dev->security_ctx = sec_ctx;
1631
1632         /* For secondary processes, the primary has done all the work */
1633         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1634                 return 0;
1635
1636         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1637         rte_eth_copy_pci_info(eth_dev, pci_dev);
1638
1639         /* Parse devargs string */
1640         rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev);
1641         if (rc) {
1642                 plt_err("Failed to parse devargs rc=%d", rc);
1643                 goto error;
1644         }
1645
1646         /* Initialize base roc nix */
1647         nix->pci_dev = pci_dev;
1648         nix->hw_vlan_ins = true;
1649         rc = roc_nix_dev_init(nix);
1650         if (rc) {
1651                 plt_err("Failed to initialize roc nix rc=%d", rc);
1652                 goto error;
1653         }
1654
1655         /* Register up msg callbacks */
1656         roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
1657
1658         /* Register up msg callbacks */
1659         roc_nix_mac_link_info_get_cb_register(nix,
1660                                               cnxk_eth_dev_link_status_get_cb);
1661
1662         dev->eth_dev = eth_dev;
1663         dev->configured = 0;
1664         dev->ptype_disable = 0;
1665
1666         TAILQ_INIT(&dev->inb.list);
1667         TAILQ_INIT(&dev->outb.list);
1668         rte_spinlock_init(&dev->inb.lock);
1669         rte_spinlock_init(&dev->outb.lock);
1670
1671         /* For vfs, returned max_entries will be 0. but to keep default mac
1672          * address, one entry must be allocated. so setting up to 1.
1673          */
1674         if (roc_nix_is_vf_or_sdp(nix))
1675                 max_entries = 1;
1676         else
1677                 max_entries = roc_nix_mac_max_entries_get(nix);
1678
1679         if (max_entries <= 0) {
1680                 plt_err("Failed to get max entries for mac addr");
1681                 rc = -ENOTSUP;
1682                 goto dev_fini;
1683         }
1684
1685         eth_dev->data->mac_addrs =
1686                 rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0);
1687         if (eth_dev->data->mac_addrs == NULL) {
1688                 plt_err("Failed to allocate memory for mac addr");
1689                 rc = -ENOMEM;
1690                 goto dev_fini;
1691         }
1692
1693         dev->max_mac_entries = max_entries;
1694         dev->dmac_filter_count = 1;
1695
1696         /* Get mac address */
1697         rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
1698         if (rc) {
1699                 plt_err("Failed to get mac addr, rc=%d", rc);
1700                 goto free_mac_addrs;
1701         }
1702
1703         /* Update the mac address */
1704         memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1705
1706         if (!roc_nix_is_vf_or_sdp(nix)) {
1707                 /* Sync same MAC address to CGX/RPM table */
1708                 rc = roc_nix_mac_addr_set(nix, dev->mac_addr);
1709                 if (rc) {
1710                         plt_err("Failed to set mac addr, rc=%d", rc);
1711                         goto free_mac_addrs;
1712                 }
1713         }
1714
1715         /* Union of all capabilities supported by CNXK.
1716          * Platform specific capabilities will be
1717          * updated later.
1718          */
1719         dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
1720         dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
1721         dev->speed_capa = nix_get_speed_capa(dev);
1722
1723         /* Initialize roc npc */
1724         dev->npc.roc_nix = nix;
1725         rc = roc_npc_init(&dev->npc);
1726         if (rc)
1727                 goto free_mac_addrs;
1728
1729         plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
1730                     " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
1731                     eth_dev->data->port_id, roc_nix_get_pf(nix),
1732                     roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
1733                     dev->rx_offload_capa, dev->tx_offload_capa);
1734         return 0;
1735
1736 free_mac_addrs:
1737         rte_free(eth_dev->data->mac_addrs);
1738 dev_fini:
1739         roc_nix_dev_fini(nix);
1740 error:
1741         plt_err("Failed to init nix eth_dev rc=%d", rc);
1742         return rc;
1743 }
1744
1745 static int
1746 cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
1747 {
1748         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1749         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1750         struct rte_eth_pfc_queue_conf pfc_conf;
1751         struct roc_nix *nix = &dev->nix;
1752         struct rte_eth_fc_conf fc_conf;
1753         int rc, i;
1754
1755         /* Disable switch hdr pkind */
1756         roc_nix_switch_hdr_set(&dev->nix, 0, 0, 0, 0);
1757
1758         plt_free(eth_dev->security_ctx);
1759         eth_dev->security_ctx = NULL;
1760
1761         /* Nothing to be done for secondary processes */
1762         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1763                 return 0;
1764
1765         /* Clear the flag since we are closing down */
1766         dev->configured = 0;
1767
1768         roc_nix_npc_rx_ena_dis(nix, false);
1769
1770         /* Restore 802.3 Flow control configuration */
1771         memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_queue_conf));
1772         memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1773         fc_conf.mode = RTE_ETH_FC_NONE;
1774         rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
1775
1776         pfc_conf.mode = RTE_ETH_FC_NONE;
1777         for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
1778                 if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
1779                         pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
1780                         pfc_conf.rx_pause.tc = i;
1781                         pfc_conf.tx_pause.rx_qid = i;
1782                         pfc_conf.tx_pause.tc = i;
1783                         rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
1784                                 &pfc_conf);
1785                         if (rc)
1786                                 plt_err("Failed to reset PFC. error code(%d)",
1787                                         rc);
1788                 }
1789         }
1790
1791         fc_conf.mode = RTE_ETH_FC_FULL;
1792         rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
1793
1794         /* Disable and free rte_meter entries */
1795         nix_meter_fini(dev);
1796
1797         /* Disable and free rte_flow entries */
1798         roc_npc_fini(&dev->npc);
1799
1800         /* Disable link status events */
1801         roc_nix_mac_link_event_start_stop(nix, false);
1802
1803         /* Unregister the link update op, this is required to stop VFs from
1804          * receiving link status updates on exit path.
1805          */
1806         roc_nix_mac_link_cb_unregister(nix);
1807
1808         /* Free up SQs */
1809         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1810                 dev_ops->tx_queue_release(eth_dev, i);
1811                 eth_dev->data->tx_queues[i] = NULL;
1812         }
1813         eth_dev->data->nb_tx_queues = 0;
1814
1815         /* Free up RQ's and CQ's */
1816         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1817                 dev_ops->rx_queue_release(eth_dev, i);
1818                 eth_dev->data->rx_queues[i] = NULL;
1819         }
1820         eth_dev->data->nb_rx_queues = 0;
1821
1822         /* Free security resources */
1823         nix_security_release(dev);
1824
1825         /* Free tm resources */
1826         roc_nix_tm_fini(nix);
1827
1828         /* Unregister queue irqs */
1829         roc_nix_unregister_queue_irqs(nix);
1830
1831         /* Unregister cq irqs */
1832         if (eth_dev->data->dev_conf.intr_conf.rxq)
1833                 roc_nix_unregister_cq_irqs(nix);
1834
1835         /* Free ROC RQ's, SQ's and CQ's memory */
1836         nix_free_queue_mem(dev);
1837
1838         /* Free nix lf resources */
1839         rc = roc_nix_lf_free(nix);
1840         if (rc)
1841                 plt_err("Failed to free nix lf, rc=%d", rc);
1842
1843         rte_free(eth_dev->data->mac_addrs);
1844         eth_dev->data->mac_addrs = NULL;
1845
1846         rc = roc_nix_dev_fini(nix);
1847         /* Can be freed later by PMD if NPA LF is in use */
1848         if (rc == -EAGAIN) {
1849                 if (!reset)
1850                         eth_dev->data->dev_private = NULL;
1851                 return 0;
1852         } else if (rc) {
1853                 plt_err("Failed in nix dev fini, rc=%d", rc);
1854         }
1855
1856         return rc;
1857 }
1858
1859 static int
1860 cnxk_nix_dev_close(struct rte_eth_dev *eth_dev)
1861 {
1862         cnxk_eth_dev_uninit(eth_dev, false);
1863         return 0;
1864 }
1865
1866 static int
1867 cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev)
1868 {
1869         int rc;
1870
1871         rc = cnxk_eth_dev_uninit(eth_dev, true);
1872         if (rc)
1873                 return rc;
1874
1875         return cnxk_eth_dev_init(eth_dev);
1876 }
1877
1878 int
1879 cnxk_nix_remove(struct rte_pci_device *pci_dev)
1880 {
1881         struct rte_eth_dev *eth_dev;
1882         struct roc_nix *nix;
1883         int rc = -EINVAL;
1884
1885         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1886         if (eth_dev) {
1887                 /* Cleanup eth dev */
1888                 rc = cnxk_eth_dev_uninit(eth_dev, false);
1889                 if (rc)
1890                         return rc;
1891
1892                 rte_eth_dev_release_port(eth_dev);
1893         }
1894
1895         /* Nothing to be done for secondary processes */
1896         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1897                 return 0;
1898
1899         /* Check if this device is hosting common resource */
1900         nix = roc_idev_npa_nix_get();
1901         if (nix->pci_dev != pci_dev)
1902                 return 0;
1903
1904         /* Try nix fini now */
1905         rc = roc_nix_dev_fini(nix);
1906         if (rc == -EAGAIN) {
1907                 plt_info("%s: common resource in use by other devices",
1908                          pci_dev->name);
1909                 goto exit;
1910         } else if (rc) {
1911                 plt_err("Failed in nix dev fini, rc=%d", rc);
1912                 goto exit;
1913         }
1914
1915         /* Free device pointer as rte_ethdev does not have it anymore */
1916         rte_free(nix);
1917 exit:
1918         return rc;
1919 }
1920
1921 int
1922 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1923 {
1924         int rc;
1925
1926         RTE_SET_USED(pci_drv);
1927
1928         rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
1929                                            cnxk_eth_dev_init);
1930
1931         /* On error on secondary, recheck if port exists in primary or
1932          * in mid of detach state.
1933          */
1934         if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
1935                 if (!rte_eth_dev_allocated(pci_dev->device.name))
1936                         return 0;
1937         return rc;
1938 }