common/mlx5: fix default devargs initialization
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 #include <cnxk_ethdev.h>
5
6 #include <rte_eventdev.h>
7
8 static inline uint64_t
9 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
10 {
11         uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
12
13         if (roc_nix_is_vf_or_sdp(&dev->nix) ||
14             dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
15                 capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
16
17         return capa;
18 }
19
20 static inline uint64_t
21 nix_get_tx_offload_capa(struct cnxk_eth_dev *dev)
22 {
23         RTE_SET_USED(dev);
24         return CNXK_NIX_TX_OFFLOAD_CAPA;
25 }
26
27 static inline uint32_t
28 nix_get_speed_capa(struct cnxk_eth_dev *dev)
29 {
30         uint32_t speed_capa;
31
32         /* Auto negotiation disabled */
33         speed_capa = RTE_ETH_LINK_SPEED_FIXED;
34         if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
35                 speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
36                               RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
37                               RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
38         }
39
40         return speed_capa;
41 }
42
43 int
44 cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
45 {
46         struct roc_nix *nix = &dev->nix;
47
48         if (dev->inb.inl_dev == use_inl_dev)
49                 return 0;
50
51         plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
52                     dev->inb.nb_sess, !!dev->inb.inl_dev);
53
54         /* Change the mode */
55         dev->inb.inl_dev = use_inl_dev;
56
57         /* Update RoC for NPC rule insertion */
58         roc_nix_inb_mode_set(nix, use_inl_dev);
59
60         /* Setup lookup mem */
61         return cnxk_nix_lookup_mem_sa_base_set(dev);
62 }
63
64 static int
65 nix_security_setup(struct cnxk_eth_dev *dev)
66 {
67         struct roc_nix *nix = &dev->nix;
68         int i, rc = 0;
69
70         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
71                 /* Setup Inline Inbound */
72                 rc = roc_nix_inl_inb_init(nix);
73                 if (rc) {
74                         plt_err("Failed to initialize nix inline inb, rc=%d",
75                                 rc);
76                         return rc;
77                 }
78
79                 /* By default pick using inline device for poll mode.
80                  * Will be overridden when event mode rq's are setup.
81                  */
82                 cnxk_nix_inb_mode_set(dev, true);
83
84                 /* Allocate memory to be used as dptr for CPT ucode
85                  * WRITE_SA op.
86                  */
87                 dev->inb.sa_dptr =
88                         plt_zmalloc(ROC_NIX_INL_OT_IPSEC_INB_HW_SZ, 0);
89                 if (!dev->inb.sa_dptr) {
90                         plt_err("Couldn't allocate memory for SA dptr");
91                         rc = -ENOMEM;
92                         goto cleanup;
93                 }
94         }
95
96         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
97             dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
98                 struct plt_bitmap *bmap;
99                 size_t bmap_sz;
100                 void *mem;
101
102                 /* Setup enough descriptors for all tx queues */
103                 nix->outb_nb_desc = dev->outb.nb_desc;
104                 nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs;
105
106                 /* Setup Inline Outbound */
107                 rc = roc_nix_inl_outb_init(nix);
108                 if (rc) {
109                         plt_err("Failed to initialize nix inline outb, rc=%d",
110                                 rc);
111                         goto sa_dptr_free;
112                 }
113
114                 dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
115
116                 /* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
117                 if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY))
118                         return 0;
119
120                 /* Allocate memory to be used as dptr for CPT ucode
121                  * WRITE_SA op.
122                  */
123                 dev->outb.sa_dptr =
124                         plt_zmalloc(ROC_NIX_INL_OT_IPSEC_OUTB_HW_SZ, 0);
125                 if (!dev->outb.sa_dptr) {
126                         plt_err("Couldn't allocate memory for SA dptr");
127                         rc = -ENOMEM;
128                         goto sa_dptr_free;
129                 }
130
131                 rc = -ENOMEM;
132                 /* Allocate a bitmap to alloc and free sa indexes */
133                 bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa);
134                 mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
135                 if (mem == NULL) {
136                         plt_err("Outbound SA bmap alloc failed");
137
138                         rc |= roc_nix_inl_outb_fini(nix);
139                         goto sa_dptr_free;
140                 }
141
142                 rc = -EIO;
143                 bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz);
144                 if (!bmap) {
145                         plt_err("Outbound SA bmap init failed");
146
147                         rc |= roc_nix_inl_outb_fini(nix);
148                         plt_free(mem);
149                         goto sa_dptr_free;
150                 }
151
152                 for (i = 0; i < dev->outb.max_sa; i++)
153                         plt_bitmap_set(bmap, i);
154
155                 dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix);
156                 dev->outb.sa_bmap_mem = mem;
157                 dev->outb.sa_bmap = bmap;
158         }
159         return 0;
160
161 sa_dptr_free:
162         if (dev->inb.sa_dptr)
163                 plt_free(dev->inb.sa_dptr);
164         if (dev->outb.sa_dptr)
165                 plt_free(dev->outb.sa_dptr);
166 cleanup:
167         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
168                 rc |= roc_nix_inl_inb_fini(nix);
169         return rc;
170 }
171
172 static int
173 nix_meter_fini(struct cnxk_eth_dev *dev)
174 {
175         struct cnxk_meter_node *next_mtr = NULL;
176         struct roc_nix_bpf_objs profs = {0};
177         struct cnxk_meter_node *mtr = NULL;
178         struct cnxk_mtr *fms = &dev->mtr;
179         struct roc_nix *nix = &dev->nix;
180         struct roc_nix_rq *rq;
181         uint32_t i;
182         int rc = 0;
183
184         RTE_TAILQ_FOREACH_SAFE(mtr, fms, next, next_mtr) {
185                 for (i = 0; i < mtr->rq_num; i++) {
186                         rq = &dev->rqs[mtr->rq_id[i]];
187                         rc |= roc_nix_bpf_ena_dis(nix, mtr->bpf_id, rq, false);
188                 }
189
190                 profs.level = mtr->level;
191                 profs.count = 1;
192                 profs.ids[0] = mtr->bpf_id;
193                 rc = roc_nix_bpf_free(nix, &profs, 1);
194
195                 if (rc)
196                         return rc;
197
198                 TAILQ_REMOVE(fms, mtr, next);
199                 plt_free(mtr);
200         }
201         return 0;
202 }
203
204 static int
205 nix_security_release(struct cnxk_eth_dev *dev)
206 {
207         struct rte_eth_dev *eth_dev = dev->eth_dev;
208         struct cnxk_eth_sec_sess *eth_sec, *tvar;
209         struct roc_nix *nix = &dev->nix;
210         int rc, ret = 0;
211
212         /* Cleanup Inline inbound */
213         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
214                 /* Destroy inbound sessions */
215                 tvar = NULL;
216                 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
217                         cnxk_eth_sec_ops.session_destroy(eth_dev,
218                                                          eth_sec->sess);
219
220                 /* Clear lookup mem */
221                 cnxk_nix_lookup_mem_sa_base_clear(dev);
222
223                 rc = roc_nix_inl_inb_fini(nix);
224                 if (rc)
225                         plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
226                 ret |= rc;
227
228                 if (dev->inb.sa_dptr) {
229                         plt_free(dev->inb.sa_dptr);
230                         dev->inb.sa_dptr = NULL;
231                 }
232         }
233
234         /* Cleanup Inline outbound */
235         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
236             dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
237                 /* Destroy outbound sessions */
238                 tvar = NULL;
239                 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
240                         cnxk_eth_sec_ops.session_destroy(eth_dev,
241                                                          eth_sec->sess);
242
243                 rc = roc_nix_inl_outb_fini(nix);
244                 if (rc)
245                         plt_err("Failed to cleanup nix inline outb, rc=%d", rc);
246                 ret |= rc;
247
248                 plt_bitmap_free(dev->outb.sa_bmap);
249                 plt_free(dev->outb.sa_bmap_mem);
250                 dev->outb.sa_bmap = NULL;
251                 dev->outb.sa_bmap_mem = NULL;
252                 if (dev->outb.sa_dptr) {
253                         plt_free(dev->outb.sa_dptr);
254                         dev->outb.sa_dptr = NULL;
255                 }
256         }
257
258         dev->inb.inl_dev = false;
259         roc_nix_inb_mode_set(nix, false);
260         dev->nb_rxq_sso = 0;
261         dev->inb.nb_sess = 0;
262         dev->outb.nb_sess = 0;
263         return ret;
264 }
265
266 static void
267 nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
268 {
269         struct rte_pktmbuf_pool_private *mbp_priv;
270         struct rte_eth_dev *eth_dev;
271         struct cnxk_eth_dev *dev;
272         uint32_t buffsz;
273
274         dev = rxq->dev;
275         eth_dev = dev->eth_dev;
276
277         /* Get rx buffer size */
278         mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
279         buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
280
281         if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
282                 dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
283                 dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
284         }
285 }
286
287 int
288 nix_recalc_mtu(struct rte_eth_dev *eth_dev)
289 {
290         struct rte_eth_dev_data *data = eth_dev->data;
291         struct cnxk_eth_rxq_sp *rxq;
292         int rc;
293
294         rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
295         /* Setup scatter mode if needed by jumbo */
296         nix_enable_mseg_on_jumbo(rxq);
297
298         rc = cnxk_nix_mtu_set(eth_dev, data->mtu);
299         if (rc)
300                 plt_err("Failed to set default MTU size, rc=%d", rc);
301
302         return rc;
303 }
304
305 static int
306 nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
307 {
308         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
309         enum roc_nix_fc_mode fc_mode = ROC_NIX_FC_FULL;
310         struct cnxk_fc_cfg *fc = &dev->fc_cfg;
311         int rc;
312
313         /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
314         if (roc_model_is_cn96_ax() &&
315             dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG)
316                 fc_mode = ROC_NIX_FC_TX;
317
318         /* By default enable flow control */
319         rc = roc_nix_fc_mode_set(&dev->nix, fc_mode);
320         if (rc)
321                 return rc;
322
323         fc->mode = (fc_mode == ROC_NIX_FC_FULL) ? RTE_ETH_FC_FULL :
324                                                   RTE_ETH_FC_TX_PAUSE;
325         return rc;
326 }
327
328 static int
329 nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
330 {
331         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
332         struct cnxk_fc_cfg *fc = &dev->fc_cfg;
333         struct rte_eth_fc_conf fc_cfg = {0};
334
335         if (roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix))
336                 return 0;
337
338         fc_cfg.mode = fc->mode;
339
340         /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
341         if (roc_model_is_cn96_ax() &&
342             dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
343             (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
344                 fc_cfg.mode =
345                                 (fc_cfg.mode == RTE_ETH_FC_FULL ||
346                                 fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
347                                 RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
348         }
349
350         return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
351 }
352
353 uint64_t
354 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
355 {
356         uint16_t port_id = dev->eth_dev->data->port_id;
357         struct rte_mbuf mb_def;
358         uint64_t *tmp;
359
360         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
361         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
362                                  offsetof(struct rte_mbuf, data_off) !=
363                          2);
364         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
365                                  offsetof(struct rte_mbuf, data_off) !=
366                          4);
367         RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
368                                  offsetof(struct rte_mbuf, data_off) !=
369                          6);
370         mb_def.nb_segs = 1;
371         mb_def.data_off = RTE_PKTMBUF_HEADROOM +
372                           (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
373         mb_def.port = port_id;
374         rte_mbuf_refcnt_set(&mb_def, 1);
375
376         /* Prevent compiler reordering: rearm_data covers previous fields */
377         rte_compiler_barrier();
378         tmp = (uint64_t *)&mb_def.rearm_data;
379
380         return *tmp;
381 }
382
383 static inline uint8_t
384 nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
385 {
386         /*
387          * Maximum three segments can be supported with W8, Choose
388          * NIX_MAXSQESZ_W16 for multi segment offload.
389          */
390         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
391                 return NIX_MAXSQESZ_W16;
392         else
393                 return NIX_MAXSQESZ_W8;
394 }
395
396 int
397 cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
398                         uint16_t nb_desc, uint16_t fp_tx_q_sz,
399                         const struct rte_eth_txconf *tx_conf)
400 {
401         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
402         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
403         struct cnxk_eth_txq_sp *txq_sp;
404         struct roc_nix_sq *sq;
405         size_t txq_sz;
406         int rc;
407
408         /* Free memory prior to re-allocation if needed. */
409         if (eth_dev->data->tx_queues[qid] != NULL) {
410                 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
411                 dev_ops->tx_queue_release(eth_dev, qid);
412                 eth_dev->data->tx_queues[qid] = NULL;
413         }
414
415         /* When Tx Security offload is enabled, increase tx desc count by
416          * max possible outbound desc count.
417          */
418         if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
419                 nb_desc += dev->outb.nb_desc;
420
421         /* Setup ROC SQ */
422         sq = &dev->sqs[qid];
423         sq->qid = qid;
424         sq->nb_desc = nb_desc;
425         sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
426
427         rc = roc_nix_sq_init(&dev->nix, sq);
428         if (rc) {
429                 plt_err("Failed to init sq=%d, rc=%d", qid, rc);
430                 return rc;
431         }
432
433         rc = -ENOMEM;
434         txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
435         txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
436         if (!txq_sp) {
437                 plt_err("Failed to alloc tx queue mem");
438                 rc |= roc_nix_sq_fini(sq);
439                 return rc;
440         }
441
442         txq_sp->dev = dev;
443         txq_sp->qid = qid;
444         txq_sp->qconf.conf.tx = *tx_conf;
445         /* Queue config should reflect global offloads */
446         txq_sp->qconf.conf.tx.offloads = dev->tx_offloads;
447         txq_sp->qconf.nb_desc = nb_desc;
448
449         plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
450                     " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
451                     qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
452                     sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
453
454         /* Store start of fast path area */
455         eth_dev->data->tx_queues[qid] = txq_sp + 1;
456         eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
457         return 0;
458 }
459
460 static void
461 cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
462 {
463         void *txq = eth_dev->data->tx_queues[qid];
464         struct cnxk_eth_txq_sp *txq_sp;
465         struct cnxk_eth_dev *dev;
466         struct roc_nix_sq *sq;
467         int rc;
468
469         if (!txq)
470                 return;
471
472         txq_sp = cnxk_eth_txq_to_sp(txq);
473
474         dev = txq_sp->dev;
475
476         plt_nix_dbg("Releasing txq %u", qid);
477
478         /* Cleanup ROC SQ */
479         sq = &dev->sqs[qid];
480         rc = roc_nix_sq_fini(sq);
481         if (rc)
482                 plt_err("Failed to cleanup sq, rc=%d", rc);
483
484         /* Finally free */
485         plt_free(txq_sp);
486 }
487
488 int
489 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
490                         uint16_t nb_desc, uint16_t fp_rx_q_sz,
491                         const struct rte_eth_rxconf *rx_conf,
492                         struct rte_mempool *mp)
493 {
494         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
495         struct roc_nix *nix = &dev->nix;
496         struct cnxk_eth_rxq_sp *rxq_sp;
497         struct rte_mempool_ops *ops;
498         const char *platform_ops;
499         struct roc_nix_rq *rq;
500         struct roc_nix_cq *cq;
501         uint16_t first_skip;
502         int rc = -EINVAL;
503         size_t rxq_sz;
504
505         /* Sanity checks */
506         if (rx_conf->rx_deferred_start == 1) {
507                 plt_err("Deferred Rx start is not supported");
508                 goto fail;
509         }
510
511         platform_ops = rte_mbuf_platform_mempool_ops();
512         /* This driver needs cnxk_npa mempool ops to work */
513         ops = rte_mempool_get_ops(mp->ops_index);
514         if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
515                 plt_err("mempool ops should be of cnxk_npa type");
516                 goto fail;
517         }
518
519         if (mp->pool_id == 0) {
520                 plt_err("Invalid pool_id");
521                 goto fail;
522         }
523
524         /* Free memory prior to re-allocation if needed */
525         if (eth_dev->data->rx_queues[qid] != NULL) {
526                 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
527
528                 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
529                 dev_ops->rx_queue_release(eth_dev, qid);
530                 eth_dev->data->rx_queues[qid] = NULL;
531         }
532
533         /* Clam up cq limit to size of packet pool aura for LBK
534          * to avoid meta packet drop as LBK does not currently support
535          * backpressure.
536          */
537         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
538                 uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
539
540                 /* Use current RQ's aura limit if inl rq is not available */
541                 if (!pkt_pool_limit)
542                         pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
543                 nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
544         }
545
546         /* Its a no-op when inline device is not used */
547         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY ||
548             dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
549                 roc_nix_inl_dev_xaq_realloc(mp->pool_id);
550
551         /* Setup ROC CQ */
552         cq = &dev->cqs[qid];
553         cq->qid = qid;
554         cq->nb_desc = nb_desc;
555         rc = roc_nix_cq_init(&dev->nix, cq);
556         if (rc) {
557                 plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
558                 goto fail;
559         }
560
561         /* Setup ROC RQ */
562         rq = &dev->rqs[qid];
563         rq->qid = qid;
564         rq->aura_handle = mp->pool_id;
565         rq->flow_tag_width = 32;
566         rq->sso_ena = false;
567
568         /* Calculate first mbuf skip */
569         first_skip = (sizeof(struct rte_mbuf));
570         first_skip += RTE_PKTMBUF_HEADROOM;
571         first_skip += rte_pktmbuf_priv_size(mp);
572         rq->first_skip = first_skip;
573         rq->later_skip = sizeof(struct rte_mbuf);
574         rq->lpb_size = mp->elt_size;
575         rq->lpb_drop_ena = !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY);
576
577         /* Enable Inline IPSec on RQ, will not be used for Poll mode */
578         if (roc_nix_inl_inb_is_enabled(nix))
579                 rq->ipsech_ena = true;
580
581         rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
582         if (rc) {
583                 plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
584                 goto cq_fini;
585         }
586
587         /* Allocate and setup fast path rx queue */
588         rc = -ENOMEM;
589         rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
590         rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
591         if (!rxq_sp) {
592                 plt_err("Failed to alloc rx queue for rq=%d", qid);
593                 goto rq_fini;
594         }
595
596         /* Setup slow path fields */
597         rxq_sp->dev = dev;
598         rxq_sp->qid = qid;
599         rxq_sp->qconf.conf.rx = *rx_conf;
600         /* Queue config should reflect global offloads */
601         rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads;
602         rxq_sp->qconf.nb_desc = nb_desc;
603         rxq_sp->qconf.mp = mp;
604
605         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
606                 /* Pass a tagmask used to handle error packets in inline device.
607                  * Ethdev rq's tag_mask field will be overwritten later
608                  * when sso is setup.
609                  */
610                 rq->tag_mask =
611                         0x0FF00000 | ((uint32_t)RTE_EVENT_TYPE_ETHDEV << 28);
612
613                 /* Setup rq reference for inline dev if present */
614                 rc = roc_nix_inl_dev_rq_get(rq);
615                 if (rc)
616                         goto free_mem;
617         }
618
619         plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
620                     cq->nb_desc);
621
622         /* Store start of fast path area */
623         eth_dev->data->rx_queues[qid] = rxq_sp + 1;
624         eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
625
626         /* Calculating delta and freq mult between PTP HI clock and tsc.
627          * These are needed in deriving raw clock value from tsc counter.
628          * read_clock eth op returns raw clock value.
629          */
630         if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
631                 rc = cnxk_nix_tsc_convert(dev);
632                 if (rc) {
633                         plt_err("Failed to calculate delta and freq mult");
634                         goto rq_fini;
635                 }
636         }
637
638         return 0;
639 free_mem:
640         plt_free(rxq_sp);
641 rq_fini:
642         rc |= roc_nix_rq_fini(rq);
643 cq_fini:
644         rc |= roc_nix_cq_fini(cq);
645 fail:
646         return rc;
647 }
648
649 static void
650 cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
651 {
652         void *rxq = eth_dev->data->rx_queues[qid];
653         struct cnxk_eth_rxq_sp *rxq_sp;
654         struct cnxk_eth_dev *dev;
655         struct roc_nix_rq *rq;
656         struct roc_nix_cq *cq;
657         int rc;
658
659         if (!rxq)
660                 return;
661
662         rxq_sp = cnxk_eth_rxq_to_sp(rxq);
663         dev = rxq_sp->dev;
664         rq = &dev->rqs[qid];
665
666         plt_nix_dbg("Releasing rxq %u", qid);
667
668         /* Release rq reference for inline dev if present */
669         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
670                 roc_nix_inl_dev_rq_put(rq);
671
672         /* Cleanup ROC RQ */
673         rc = roc_nix_rq_fini(rq);
674         if (rc)
675                 plt_err("Failed to cleanup rq, rc=%d", rc);
676
677         /* Cleanup ROC CQ */
678         cq = &dev->cqs[qid];
679         rc = roc_nix_cq_fini(cq);
680         if (rc)
681                 plt_err("Failed to cleanup cq, rc=%d", rc);
682
683         /* Finally free fast path area */
684         plt_free(rxq_sp);
685 }
686
687 uint32_t
688 cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
689                        uint8_t rss_level)
690 {
691         uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
692                 {FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
693                  FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
694                 {FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
695                  FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
696                  FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
697                 {FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
698                  FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
699                  FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
700                  FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
701                  FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
702                  FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
703         };
704         uint32_t flowkey_cfg = 0;
705
706         dev->ethdev_rss_hf = ethdev_rss;
707
708         if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
709             dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
710                 flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
711         }
712
713         if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
714                 flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
715
716         if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
717                 flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
718
719         if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
720                 flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
721
722         if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
723                 flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
724
725         if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
726                 flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
727
728         if (ethdev_rss & RSS_IPV4_ENABLE)
729                 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
730
731         if (ethdev_rss & RSS_IPV6_ENABLE)
732                 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
733
734         if (ethdev_rss & RTE_ETH_RSS_TCP)
735                 flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
736
737         if (ethdev_rss & RTE_ETH_RSS_UDP)
738                 flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
739
740         if (ethdev_rss & RTE_ETH_RSS_SCTP)
741                 flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
742
743         if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
744                 flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
745
746         if (ethdev_rss & RSS_IPV6_EX_ENABLE)
747                 flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
748
749         if (ethdev_rss & RTE_ETH_RSS_PORT)
750                 flowkey_cfg |= FLOW_KEY_TYPE_PORT;
751
752         if (ethdev_rss & RTE_ETH_RSS_NVGRE)
753                 flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
754
755         if (ethdev_rss & RTE_ETH_RSS_VXLAN)
756                 flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
757
758         if (ethdev_rss & RTE_ETH_RSS_GENEVE)
759                 flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
760
761         if (ethdev_rss & RTE_ETH_RSS_GTPU)
762                 flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
763
764         return flowkey_cfg;
765 }
766
767 static void
768 nix_free_queue_mem(struct cnxk_eth_dev *dev)
769 {
770         plt_free(dev->rqs);
771         plt_free(dev->cqs);
772         plt_free(dev->sqs);
773         dev->rqs = NULL;
774         dev->cqs = NULL;
775         dev->sqs = NULL;
776 }
777
778 static int
779 nix_ingress_policer_setup(struct cnxk_eth_dev *dev)
780 {
781         struct rte_eth_dev *eth_dev = dev->eth_dev;
782         int rc = 0;
783
784         TAILQ_INIT(&dev->mtr_profiles);
785         TAILQ_INIT(&dev->mtr_policy);
786         TAILQ_INIT(&dev->mtr);
787
788         if (eth_dev->dev_ops->mtr_ops_get == NULL)
789                 return rc;
790
791         return nix_mtr_capabilities_init(eth_dev);
792 }
793
794 static int
795 nix_rss_default_setup(struct cnxk_eth_dev *dev)
796 {
797         struct rte_eth_dev *eth_dev = dev->eth_dev;
798         uint8_t rss_hash_level;
799         uint32_t flowkey_cfg;
800         uint64_t rss_hf;
801
802         rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
803         rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
804         if (rss_hash_level)
805                 rss_hash_level -= 1;
806
807         flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
808         return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
809 }
810
811 static int
812 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
813 {
814         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
815         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
816         struct cnxk_eth_qconf *tx_qconf = NULL;
817         struct cnxk_eth_qconf *rx_qconf = NULL;
818         struct cnxk_eth_rxq_sp *rxq_sp;
819         struct cnxk_eth_txq_sp *txq_sp;
820         int i, nb_rxq, nb_txq;
821         void **txq, **rxq;
822
823         nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
824         nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
825
826         tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
827         if (tx_qconf == NULL) {
828                 plt_err("Failed to allocate memory for tx_qconf");
829                 goto fail;
830         }
831
832         rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
833         if (rx_qconf == NULL) {
834                 plt_err("Failed to allocate memory for rx_qconf");
835                 goto fail;
836         }
837
838         txq = eth_dev->data->tx_queues;
839         for (i = 0; i < nb_txq; i++) {
840                 if (txq[i] == NULL) {
841                         tx_qconf[i].valid = false;
842                         plt_info("txq[%d] is already released", i);
843                         continue;
844                 }
845                 txq_sp = cnxk_eth_txq_to_sp(txq[i]);
846                 memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
847                 tx_qconf[i].valid = true;
848                 dev_ops->tx_queue_release(eth_dev, i);
849                 eth_dev->data->tx_queues[i] = NULL;
850         }
851
852         rxq = eth_dev->data->rx_queues;
853         for (i = 0; i < nb_rxq; i++) {
854                 if (rxq[i] == NULL) {
855                         rx_qconf[i].valid = false;
856                         plt_info("rxq[%d] is already released", i);
857                         continue;
858                 }
859                 rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
860                 memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
861                 rx_qconf[i].valid = true;
862                 dev_ops->rx_queue_release(eth_dev, i);
863                 eth_dev->data->rx_queues[i] = NULL;
864         }
865
866         dev->tx_qconf = tx_qconf;
867         dev->rx_qconf = rx_qconf;
868         return 0;
869
870 fail:
871         free(tx_qconf);
872         free(rx_qconf);
873         return -ENOMEM;
874 }
875
876 static int
877 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
878 {
879         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
880         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
881         struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
882         struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
883         int rc, i, nb_rxq, nb_txq;
884
885         nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
886         nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
887
888         rc = -ENOMEM;
889         /* Setup tx & rx queues with previous configuration so
890          * that the queues can be functional in cases like ports
891          * are started without re configuring queues.
892          *
893          * Usual re config sequence is like below:
894          * port_configure() {
895          *      if(reconfigure) {
896          *              queue_release()
897          *              queue_setup()
898          *      }
899          *      queue_configure() {
900          *              queue_release()
901          *              queue_setup()
902          *      }
903          * }
904          * port_start()
905          *
906          * In some application's control path, queue_configure() would
907          * NOT be invoked for TXQs/RXQs in port_configure().
908          * In such cases, queues can be functional after start as the
909          * queues are already setup in port_configure().
910          */
911         for (i = 0; i < nb_txq; i++) {
912                 if (!tx_qconf[i].valid)
913                         continue;
914                 rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
915                                              &tx_qconf[i].conf.tx);
916                 if (rc) {
917                         plt_err("Failed to setup tx queue rc=%d", rc);
918                         for (i -= 1; i >= 0; i--)
919                                 dev_ops->tx_queue_release(eth_dev, i);
920                         goto fail;
921                 }
922         }
923
924         free(tx_qconf);
925         tx_qconf = NULL;
926
927         for (i = 0; i < nb_rxq; i++) {
928                 if (!rx_qconf[i].valid)
929                         continue;
930                 rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
931                                              &rx_qconf[i].conf.rx,
932                                              rx_qconf[i].mp);
933                 if (rc) {
934                         plt_err("Failed to setup rx queue rc=%d", rc);
935                         for (i -= 1; i >= 0; i--)
936                                 dev_ops->rx_queue_release(eth_dev, i);
937                         goto tx_queue_release;
938                 }
939         }
940
941         free(rx_qconf);
942         rx_qconf = NULL;
943
944         return 0;
945
946 tx_queue_release:
947         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
948                 dev_ops->tx_queue_release(eth_dev, i);
949 fail:
950         free(tx_qconf);
951         free(rx_qconf);
952
953         return rc;
954 }
955
956 static void
957 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
958 {
959         /* These dummy functions are required for supporting
960          * some applications which reconfigure queues without
961          * stopping tx burst and rx burst threads(eg kni app)
962          * When the queues context is saved, txq/rxqs are released
963          * which caused app crash since rx/tx burst is still
964          * on different lcores
965          */
966         eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
967         eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
968         rte_mb();
969 }
970
971 static int
972 nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
973 {
974         uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
975         uint8_t tun[ROC_NIX_LSO_TUN_MAX];
976         struct roc_nix *nix = &dev->nix;
977         int rc;
978
979         rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
980         if (rc)
981                 return rc;
982
983         dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
984                             (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
985                             (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
986                             (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
987
988         dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
989                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
990                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
991                              (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
992         return 0;
993 }
994
995 static int
996 nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
997 {
998         struct roc_nix *nix = &dev->nix;
999         int rc;
1000
1001         /* Nothing much to do if offload is not enabled */
1002         if (!(dev->tx_offloads &
1003               (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1004                RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
1005                 return 0;
1006
1007         /* Setup LSO formats in AF. Its a no-op if other ethdev has
1008          * already set it up
1009          */
1010         rc = roc_nix_lso_fmt_setup(nix);
1011         if (rc)
1012                 return rc;
1013
1014         return nix_lso_tun_fmt_update(dev);
1015 }
1016
1017 int
1018 cnxk_nix_configure(struct rte_eth_dev *eth_dev)
1019 {
1020         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1021         struct rte_eth_dev_data *data = eth_dev->data;
1022         struct rte_eth_conf *conf = &data->dev_conf;
1023         struct rte_eth_rxmode *rxmode = &conf->rxmode;
1024         struct rte_eth_txmode *txmode = &conf->txmode;
1025         char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
1026         struct roc_nix_fc_cfg fc_cfg = {0};
1027         struct roc_nix *nix = &dev->nix;
1028         struct rte_ether_addr *ea;
1029         uint8_t nb_rxq, nb_txq;
1030         uint64_t rx_cfg;
1031         void *qs;
1032         int rc;
1033
1034         rc = -EINVAL;
1035
1036         /* Sanity checks */
1037         if (rte_eal_has_hugepages() == 0) {
1038                 plt_err("Huge page is not configured");
1039                 goto fail_configure;
1040         }
1041
1042         if (conf->dcb_capability_en == 1) {
1043                 plt_err("dcb enable is not supported");
1044                 goto fail_configure;
1045         }
1046
1047         if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1048                 plt_err("Flow director is not supported");
1049                 goto fail_configure;
1050         }
1051
1052         if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
1053             rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
1054                 plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
1055                 goto fail_configure;
1056         }
1057
1058         if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
1059                 plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
1060                 goto fail_configure;
1061         }
1062
1063         /* Free the resources allocated from the previous configure */
1064         if (dev->configured == 1) {
1065                 /* Unregister queue irq's */
1066                 roc_nix_unregister_queue_irqs(nix);
1067
1068                 /* Unregister CQ irqs if present */
1069                 if (eth_dev->data->dev_conf.intr_conf.rxq)
1070                         roc_nix_unregister_cq_irqs(nix);
1071
1072                 /* Set no-op functions */
1073                 nix_set_nop_rxtx_function(eth_dev);
1074                 /* Store queue config for later */
1075                 rc = nix_store_queue_cfg_and_then_release(eth_dev);
1076                 if (rc)
1077                         goto fail_configure;
1078
1079                 /* Disable and free rte_meter entries */
1080                 rc = nix_meter_fini(dev);
1081                 if (rc)
1082                         goto fail_configure;
1083
1084                 /* Cleanup security support */
1085                 rc = nix_security_release(dev);
1086                 if (rc)
1087                         goto fail_configure;
1088
1089                 roc_nix_tm_fini(nix);
1090                 roc_nix_lf_free(nix);
1091         }
1092
1093         dev->rx_offloads = rxmode->offloads;
1094         dev->tx_offloads = txmode->offloads;
1095
1096         /* Prepare rx cfg */
1097         rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
1098         if (dev->rx_offloads &
1099             (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
1100                 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
1101                 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
1102         }
1103         rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |
1104                    ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
1105                    ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
1106
1107         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
1108                 rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
1109                 /* Disable drop re if rx offload security is enabled and
1110                  * platform does not support it.
1111                  */
1112                 if (dev->ipsecd_drop_re_dis)
1113                         rx_cfg &= ~(ROC_NIX_LF_RX_CFG_DROP_RE);
1114         }
1115
1116         nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1117         nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1118
1119         /* Alloc a nix lf */
1120         rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
1121         if (rc) {
1122                 plt_err("Failed to init nix_lf rc=%d", rc);
1123                 goto fail_configure;
1124         }
1125
1126         dev->npc.channel = roc_nix_get_base_chan(nix);
1127
1128         nb_rxq = data->nb_rx_queues;
1129         nb_txq = data->nb_tx_queues;
1130         rc = -ENOMEM;
1131         if (nb_rxq) {
1132                 /* Allocate memory for roc rq's and cq's */
1133                 qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0);
1134                 if (!qs) {
1135                         plt_err("Failed to alloc rqs");
1136                         goto free_nix_lf;
1137                 }
1138                 dev->rqs = qs;
1139
1140                 qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0);
1141                 if (!qs) {
1142                         plt_err("Failed to alloc cqs");
1143                         goto free_nix_lf;
1144                 }
1145                 dev->cqs = qs;
1146         }
1147
1148         if (nb_txq) {
1149                 /* Allocate memory for roc sq's */
1150                 qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0);
1151                 if (!qs) {
1152                         plt_err("Failed to alloc sqs");
1153                         goto free_nix_lf;
1154                 }
1155                 dev->sqs = qs;
1156         }
1157
1158         /* Re-enable NIX LF error interrupts */
1159         roc_nix_err_intr_ena_dis(nix, true);
1160         roc_nix_ras_intr_ena_dis(nix, true);
1161
1162         if (nix->rx_ptp_ena &&
1163             dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) {
1164                 plt_err("Both PTP and switch header enabled");
1165                 goto free_nix_lf;
1166         }
1167
1168         rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type,
1169                                     dev->npc.pre_l2_size_offset,
1170                                     dev->npc.pre_l2_size_offset_mask,
1171                                     dev->npc.pre_l2_size_shift_dir);
1172         if (rc) {
1173                 plt_err("Failed to enable switch type nix_lf rc=%d", rc);
1174                 goto free_nix_lf;
1175         }
1176
1177         /* Setup LSO if needed */
1178         rc = nix_lso_fmt_setup(dev);
1179         if (rc) {
1180                 plt_err("Failed to setup nix lso format fields, rc=%d", rc);
1181                 goto free_nix_lf;
1182         }
1183
1184         /* Configure RSS */
1185         rc = nix_rss_default_setup(dev);
1186         if (rc) {
1187                 plt_err("Failed to configure rss rc=%d", rc);
1188                 goto free_nix_lf;
1189         }
1190
1191         /* Init the default TM scheduler hierarchy */
1192         rc = roc_nix_tm_init(nix);
1193         if (rc) {
1194                 plt_err("Failed to init traffic manager, rc=%d", rc);
1195                 goto free_nix_lf;
1196         }
1197
1198         rc = nix_ingress_policer_setup(dev);
1199         if (rc) {
1200                 plt_err("Failed to setup ingress policer rc=%d", rc);
1201                 goto free_nix_lf;
1202         }
1203
1204         rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
1205         if (rc) {
1206                 plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
1207                 goto tm_fini;
1208         }
1209
1210         /* Register queue IRQs */
1211         rc = roc_nix_register_queue_irqs(nix);
1212         if (rc) {
1213                 plt_err("Failed to register queue interrupts rc=%d", rc);
1214                 goto tm_fini;
1215         }
1216
1217         /* Register cq IRQs */
1218         if (eth_dev->data->dev_conf.intr_conf.rxq) {
1219                 if (eth_dev->data->nb_rx_queues > dev->nix.cints) {
1220                         plt_err("Rx interrupt cannot be enabled, rxq > %d",
1221                                 dev->nix.cints);
1222                         goto q_irq_fini;
1223                 }
1224                 /* Rx interrupt feature cannot work with vector mode because,
1225                  * vector mode does not process packets unless min 4 pkts are
1226                  * received, while cq interrupts are generated even for 1 pkt
1227                  * in the CQ.
1228                  */
1229                 dev->scalar_ena = true;
1230
1231                 rc = roc_nix_register_cq_irqs(nix);
1232                 if (rc) {
1233                         plt_err("Failed to register CQ interrupts rc=%d", rc);
1234                         goto q_irq_fini;
1235                 }
1236         }
1237
1238         /* Configure loop back mode */
1239         rc = roc_nix_mac_loopback_enable(nix,
1240                                          eth_dev->data->dev_conf.lpbk_mode);
1241         if (rc) {
1242                 plt_err("Failed to configure cgx loop back mode rc=%d", rc);
1243                 goto cq_fini;
1244         }
1245
1246         /* Setup Inline security support */
1247         rc = nix_security_setup(dev);
1248         if (rc)
1249                 goto cq_fini;
1250
1251         /* Init flow control configuration */
1252         fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG;
1253         fc_cfg.rxchan_cfg.enable = true;
1254         rc = roc_nix_fc_config_set(nix, &fc_cfg);
1255         if (rc) {
1256                 plt_err("Failed to initialize flow control rc=%d", rc);
1257                 goto cq_fini;
1258         }
1259
1260         /* Update flow control configuration to PMD */
1261         rc = nix_init_flow_ctrl_config(eth_dev);
1262         if (rc) {
1263                 plt_err("Failed to initialize flow control rc=%d", rc);
1264                 goto cq_fini;
1265         }
1266
1267         /* Initialize TC to SQ mapping as invalid */
1268         memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
1269         /*
1270          * Restore queue config when reconfigure followed by
1271          * reconfigure and no queue configure invoked from application case.
1272          */
1273         if (dev->configured == 1) {
1274                 rc = nix_restore_queue_cfg(eth_dev);
1275                 if (rc)
1276                         goto sec_release;
1277         }
1278
1279         /* Update the mac address */
1280         ea = eth_dev->data->mac_addrs;
1281         memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1282         if (rte_is_zero_ether_addr(ea))
1283                 rte_eth_random_addr((uint8_t *)ea);
1284
1285         rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
1286
1287         plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
1288                     " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "",
1289                     eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq,
1290                     dev->rx_offloads, dev->tx_offloads);
1291
1292         /* All good */
1293         dev->configured = 1;
1294         dev->nb_rxq = data->nb_rx_queues;
1295         dev->nb_txq = data->nb_tx_queues;
1296         return 0;
1297
1298 sec_release:
1299         rc |= nix_security_release(dev);
1300 cq_fini:
1301         roc_nix_unregister_cq_irqs(nix);
1302 q_irq_fini:
1303         roc_nix_unregister_queue_irqs(nix);
1304 tm_fini:
1305         roc_nix_tm_fini(nix);
1306 free_nix_lf:
1307         nix_free_queue_mem(dev);
1308         rc |= roc_nix_lf_free(nix);
1309 fail_configure:
1310         dev->configured = 0;
1311         return rc;
1312 }
1313
1314 int
1315 cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1316 {
1317         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1318         struct rte_eth_dev_data *data = eth_dev->data;
1319         struct roc_nix_sq *sq = &dev->sqs[qid];
1320         int rc = -EINVAL;
1321
1322         if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1323                 return 0;
1324
1325         rc = roc_nix_tm_sq_aura_fc(sq, true);
1326         if (rc) {
1327                 plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
1328                 goto done;
1329         }
1330
1331         data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1332 done:
1333         return rc;
1334 }
1335
1336 int
1337 cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1338 {
1339         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1340         struct rte_eth_dev_data *data = eth_dev->data;
1341         struct roc_nix_sq *sq = &dev->sqs[qid];
1342         int rc;
1343
1344         if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1345                 return 0;
1346
1347         rc = roc_nix_tm_sq_aura_fc(sq, false);
1348         if (rc) {
1349                 plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
1350                         rc);
1351                 goto done;
1352         }
1353
1354         data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1355 done:
1356         return rc;
1357 }
1358
1359 static int
1360 cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1361 {
1362         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1363         struct rte_eth_dev_data *data = eth_dev->data;
1364         struct roc_nix_rq *rq = &dev->rqs[qid];
1365         int rc;
1366
1367         if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1368                 return 0;
1369
1370         rc = roc_nix_rq_ena_dis(rq, true);
1371         if (rc) {
1372                 plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
1373                 goto done;
1374         }
1375
1376         data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1377 done:
1378         return rc;
1379 }
1380
1381 static int
1382 cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1383 {
1384         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1385         struct rte_eth_dev_data *data = eth_dev->data;
1386         struct roc_nix_rq *rq = &dev->rqs[qid];
1387         int rc;
1388
1389         if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1390                 return 0;
1391
1392         rc = roc_nix_rq_ena_dis(rq, false);
1393         if (rc) {
1394                 plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
1395                 goto done;
1396         }
1397
1398         data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1399 done:
1400         return rc;
1401 }
1402
1403 static int
1404 cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
1405 {
1406         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1407         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1408         struct rte_mbuf *rx_pkts[32];
1409         struct rte_eth_link link;
1410         int count, i, j, rc;
1411         void *rxq;
1412
1413         /* Disable all the NPC entries */
1414         rc = roc_npc_mcam_enable_all_entries(&dev->npc, 0);
1415         if (rc)
1416                 return rc;
1417
1418         /* Stop link change events */
1419         if (!roc_nix_is_vf_or_sdp(&dev->nix))
1420                 roc_nix_mac_link_event_start_stop(&dev->nix, false);
1421
1422         /* Disable Rx via NPC */
1423         roc_nix_npc_rx_ena_dis(&dev->nix, false);
1424
1425         roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, false);
1426
1427         /* Stop rx queues and free up pkts pending */
1428         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1429                 rc = dev_ops->rx_queue_stop(eth_dev, i);
1430                 if (rc)
1431                         continue;
1432
1433                 rxq = eth_dev->data->rx_queues[i];
1434                 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1435                 while (count) {
1436                         for (j = 0; j < count; j++)
1437                                 rte_pktmbuf_free(rx_pkts[j]);
1438                         count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1439                 }
1440         }
1441
1442         /* Stop tx queues  */
1443         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1444                 dev_ops->tx_queue_stop(eth_dev, i);
1445
1446         /* Bring down link status internally */
1447         memset(&link, 0, sizeof(link));
1448         rte_eth_linkstatus_set(eth_dev, &link);
1449
1450         return 0;
1451 }
1452
1453 int
1454 cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
1455 {
1456         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1457         int rc, i;
1458
1459         if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) {
1460                 rc = nix_recalc_mtu(eth_dev);
1461                 if (rc)
1462                         return rc;
1463         }
1464
1465         /* Start rx queues */
1466         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1467                 rc = cnxk_nix_rx_queue_start(eth_dev, i);
1468                 if (rc)
1469                         return rc;
1470         }
1471
1472         /* Start tx queues  */
1473         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1474                 rc = cnxk_nix_tx_queue_start(eth_dev, i);
1475                 if (rc)
1476                         return rc;
1477         }
1478
1479         /* Update Flow control configuration */
1480         rc = nix_update_flow_ctrl_config(eth_dev);
1481         if (rc) {
1482                 plt_err("Failed to enable flow control. error code(%d)", rc);
1483                 return rc;
1484         }
1485
1486         /* Enable Rx in NPC */
1487         rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
1488         if (rc) {
1489                 plt_err("Failed to enable NPC rx %d", rc);
1490                 return rc;
1491         }
1492
1493         rc = roc_npc_mcam_enable_all_entries(&dev->npc, 1);
1494         if (rc) {
1495                 plt_err("Failed to enable NPC entries %d", rc);
1496                 return rc;
1497         }
1498
1499         cnxk_nix_toggle_flag_link_cfg(dev, true);
1500
1501         /* Start link change events */
1502         if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
1503                 rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
1504                 if (rc) {
1505                         plt_err("Failed to start cgx link event %d", rc);
1506                         goto rx_disable;
1507                 }
1508         }
1509
1510         /* Enable PTP if it is requested by the user or already
1511          * enabled on PF owning this VF
1512          */
1513         memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
1514         if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
1515                 cnxk_eth_dev_ops.timesync_enable(eth_dev);
1516         else
1517                 cnxk_eth_dev_ops.timesync_disable(eth_dev);
1518
1519         if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1520                 rc = rte_mbuf_dyn_rx_timestamp_register
1521                         (&dev->tstamp.tstamp_dynfield_offset,
1522                          &dev->tstamp.rx_tstamp_dynflag);
1523                 if (rc != 0) {
1524                         plt_err("Failed to register Rx timestamp field/flag");
1525                         goto rx_disable;
1526                 }
1527         }
1528
1529         cnxk_nix_toggle_flag_link_cfg(dev, false);
1530
1531         roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, true);
1532
1533         return 0;
1534
1535 rx_disable:
1536         roc_nix_npc_rx_ena_dis(&dev->nix, false);
1537         cnxk_nix_toggle_flag_link_cfg(dev, false);
1538         return rc;
1539 }
1540
1541 static int cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev);
1542 static int cnxk_nix_dev_close(struct rte_eth_dev *eth_dev);
1543
1544 /* CNXK platform independent eth dev ops */
1545 struct eth_dev_ops cnxk_eth_dev_ops = {
1546         .mtu_set = cnxk_nix_mtu_set,
1547         .mac_addr_add = cnxk_nix_mac_addr_add,
1548         .mac_addr_remove = cnxk_nix_mac_addr_del,
1549         .mac_addr_set = cnxk_nix_mac_addr_set,
1550         .dev_infos_get = cnxk_nix_info_get,
1551         .link_update = cnxk_nix_link_update,
1552         .tx_queue_release = cnxk_nix_tx_queue_release,
1553         .rx_queue_release = cnxk_nix_rx_queue_release,
1554         .dev_stop = cnxk_nix_dev_stop,
1555         .dev_close = cnxk_nix_dev_close,
1556         .dev_reset = cnxk_nix_dev_reset,
1557         .tx_queue_start = cnxk_nix_tx_queue_start,
1558         .rx_queue_start = cnxk_nix_rx_queue_start,
1559         .rx_queue_stop = cnxk_nix_rx_queue_stop,
1560         .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
1561         .promiscuous_enable = cnxk_nix_promisc_enable,
1562         .promiscuous_disable = cnxk_nix_promisc_disable,
1563         .allmulticast_enable = cnxk_nix_allmulticast_enable,
1564         .allmulticast_disable = cnxk_nix_allmulticast_disable,
1565         .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
1566         .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
1567         .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
1568         .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
1569         .priority_flow_ctrl_queue_config =
1570                                 cnxk_nix_priority_flow_ctrl_queue_config,
1571         .priority_flow_ctrl_queue_info_get =
1572                                 cnxk_nix_priority_flow_ctrl_queue_info_get,
1573         .dev_set_link_up = cnxk_nix_set_link_up,
1574         .dev_set_link_down = cnxk_nix_set_link_down,
1575         .get_module_info = cnxk_nix_get_module_info,
1576         .get_module_eeprom = cnxk_nix_get_module_eeprom,
1577         .rx_queue_intr_enable = cnxk_nix_rx_queue_intr_enable,
1578         .rx_queue_intr_disable = cnxk_nix_rx_queue_intr_disable,
1579         .pool_ops_supported = cnxk_nix_pool_ops_supported,
1580         .queue_stats_mapping_set = cnxk_nix_queue_stats_mapping,
1581         .stats_get = cnxk_nix_stats_get,
1582         .stats_reset = cnxk_nix_stats_reset,
1583         .xstats_get = cnxk_nix_xstats_get,
1584         .xstats_get_names = cnxk_nix_xstats_get_names,
1585         .xstats_reset = cnxk_nix_xstats_reset,
1586         .xstats_get_by_id = cnxk_nix_xstats_get_by_id,
1587         .xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id,
1588         .fw_version_get = cnxk_nix_fw_version_get,
1589         .rxq_info_get = cnxk_nix_rxq_info_get,
1590         .txq_info_get = cnxk_nix_txq_info_get,
1591         .tx_done_cleanup = cnxk_nix_tx_done_cleanup,
1592         .flow_ops_get = cnxk_nix_flow_ops_get,
1593         .get_reg = cnxk_nix_dev_get_reg,
1594         .timesync_read_rx_timestamp = cnxk_nix_timesync_read_rx_timestamp,
1595         .timesync_read_tx_timestamp = cnxk_nix_timesync_read_tx_timestamp,
1596         .timesync_read_time = cnxk_nix_timesync_read_time,
1597         .timesync_write_time = cnxk_nix_timesync_write_time,
1598         .timesync_adjust_time = cnxk_nix_timesync_adjust_time,
1599         .read_clock = cnxk_nix_read_clock,
1600         .reta_update = cnxk_nix_reta_update,
1601         .reta_query = cnxk_nix_reta_query,
1602         .rss_hash_update = cnxk_nix_rss_hash_update,
1603         .rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
1604         .set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
1605         .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
1606         .tm_ops_get = cnxk_nix_tm_ops_get,
1607         .mtr_ops_get = cnxk_nix_mtr_ops_get,
1608 };
1609
1610 static int
1611 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
1612 {
1613         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1614         struct rte_security_ctx *sec_ctx;
1615         struct roc_nix *nix = &dev->nix;
1616         struct rte_pci_device *pci_dev;
1617         int rc, max_entries;
1618
1619         eth_dev->dev_ops = &cnxk_eth_dev_ops;
1620         eth_dev->rx_queue_count = cnxk_nix_rx_queue_count;
1621         eth_dev->rx_descriptor_status = cnxk_nix_rx_descriptor_status;
1622         eth_dev->tx_descriptor_status = cnxk_nix_tx_descriptor_status;
1623
1624         /* Alloc security context */
1625         sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
1626         if (!sec_ctx)
1627                 return -ENOMEM;
1628         sec_ctx->device = eth_dev;
1629         sec_ctx->ops = &cnxk_eth_sec_ops;
1630         sec_ctx->flags =
1631                 (RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
1632         eth_dev->security_ctx = sec_ctx;
1633
1634         /* For secondary processes, the primary has done all the work */
1635         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1636                 return 0;
1637
1638         pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1639         rte_eth_copy_pci_info(eth_dev, pci_dev);
1640
1641         /* Parse devargs string */
1642         rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev);
1643         if (rc) {
1644                 plt_err("Failed to parse devargs rc=%d", rc);
1645                 goto error;
1646         }
1647
1648         /* Initialize base roc nix */
1649         nix->pci_dev = pci_dev;
1650         nix->hw_vlan_ins = true;
1651         rc = roc_nix_dev_init(nix);
1652         if (rc) {
1653                 plt_err("Failed to initialize roc nix rc=%d", rc);
1654                 goto error;
1655         }
1656
1657         /* Register up msg callbacks */
1658         roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
1659
1660         /* Register up msg callbacks */
1661         roc_nix_mac_link_info_get_cb_register(nix,
1662                                               cnxk_eth_dev_link_status_get_cb);
1663
1664         dev->eth_dev = eth_dev;
1665         dev->configured = 0;
1666         dev->ptype_disable = 0;
1667
1668         TAILQ_INIT(&dev->inb.list);
1669         TAILQ_INIT(&dev->outb.list);
1670         rte_spinlock_init(&dev->inb.lock);
1671         rte_spinlock_init(&dev->outb.lock);
1672
1673         /* For vfs, returned max_entries will be 0. but to keep default mac
1674          * address, one entry must be allocated. so setting up to 1.
1675          */
1676         if (roc_nix_is_vf_or_sdp(nix))
1677                 max_entries = 1;
1678         else
1679                 max_entries = roc_nix_mac_max_entries_get(nix);
1680
1681         if (max_entries <= 0) {
1682                 plt_err("Failed to get max entries for mac addr");
1683                 rc = -ENOTSUP;
1684                 goto dev_fini;
1685         }
1686
1687         eth_dev->data->mac_addrs =
1688                 rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0);
1689         if (eth_dev->data->mac_addrs == NULL) {
1690                 plt_err("Failed to allocate memory for mac addr");
1691                 rc = -ENOMEM;
1692                 goto dev_fini;
1693         }
1694
1695         dev->max_mac_entries = max_entries;
1696         dev->dmac_filter_count = 1;
1697
1698         /* Get mac address */
1699         rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
1700         if (rc) {
1701                 plt_err("Failed to get mac addr, rc=%d", rc);
1702                 goto free_mac_addrs;
1703         }
1704
1705         /* Update the mac address */
1706         memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1707
1708         if (!roc_nix_is_vf_or_sdp(nix)) {
1709                 /* Sync same MAC address to CGX/RPM table */
1710                 rc = roc_nix_mac_addr_set(nix, dev->mac_addr);
1711                 if (rc) {
1712                         plt_err("Failed to set mac addr, rc=%d", rc);
1713                         goto free_mac_addrs;
1714                 }
1715         }
1716
1717         /* Union of all capabilities supported by CNXK.
1718          * Platform specific capabilities will be
1719          * updated later.
1720          */
1721         dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
1722         dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
1723         dev->speed_capa = nix_get_speed_capa(dev);
1724
1725         /* Initialize roc npc */
1726         dev->npc.roc_nix = nix;
1727         rc = roc_npc_init(&dev->npc);
1728         if (rc)
1729                 goto free_mac_addrs;
1730
1731         plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
1732                     " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
1733                     eth_dev->data->port_id, roc_nix_get_pf(nix),
1734                     roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
1735                     dev->rx_offload_capa, dev->tx_offload_capa);
1736         return 0;
1737
1738 free_mac_addrs:
1739         rte_free(eth_dev->data->mac_addrs);
1740 dev_fini:
1741         roc_nix_dev_fini(nix);
1742 error:
1743         plt_err("Failed to init nix eth_dev rc=%d", rc);
1744         return rc;
1745 }
1746
1747 static int
1748 cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
1749 {
1750         struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1751         const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1752         struct rte_eth_pfc_queue_conf pfc_conf;
1753         struct roc_nix *nix = &dev->nix;
1754         struct rte_eth_fc_conf fc_conf;
1755         int rc, i;
1756
1757         /* Disable switch hdr pkind */
1758         roc_nix_switch_hdr_set(&dev->nix, 0, 0, 0, 0);
1759
1760         plt_free(eth_dev->security_ctx);
1761         eth_dev->security_ctx = NULL;
1762
1763         /* Nothing to be done for secondary processes */
1764         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1765                 return 0;
1766
1767         /* Clear the flag since we are closing down */
1768         dev->configured = 0;
1769
1770         roc_nix_npc_rx_ena_dis(nix, false);
1771
1772         /* Restore 802.3 Flow control configuration */
1773         memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_queue_conf));
1774         memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1775         fc_conf.mode = RTE_ETH_FC_NONE;
1776         rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
1777
1778         pfc_conf.mode = RTE_ETH_FC_NONE;
1779         for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
1780                 if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
1781                         pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
1782                         pfc_conf.rx_pause.tc = i;
1783                         pfc_conf.tx_pause.rx_qid = i;
1784                         pfc_conf.tx_pause.tc = i;
1785                         rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
1786                                 &pfc_conf);
1787                         if (rc)
1788                                 plt_err("Failed to reset PFC. error code(%d)",
1789                                         rc);
1790                 }
1791         }
1792
1793         fc_conf.mode = RTE_ETH_FC_FULL;
1794         rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
1795
1796         /* Disable and free rte_meter entries */
1797         nix_meter_fini(dev);
1798
1799         /* Disable and free rte_flow entries */
1800         roc_npc_fini(&dev->npc);
1801
1802         /* Disable link status events */
1803         roc_nix_mac_link_event_start_stop(nix, false);
1804
1805         /* Unregister the link update op, this is required to stop VFs from
1806          * receiving link status updates on exit path.
1807          */
1808         roc_nix_mac_link_cb_unregister(nix);
1809
1810         /* Free up SQs */
1811         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1812                 dev_ops->tx_queue_release(eth_dev, i);
1813                 eth_dev->data->tx_queues[i] = NULL;
1814         }
1815         eth_dev->data->nb_tx_queues = 0;
1816
1817         /* Free up RQ's and CQ's */
1818         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1819                 dev_ops->rx_queue_release(eth_dev, i);
1820                 eth_dev->data->rx_queues[i] = NULL;
1821         }
1822         eth_dev->data->nb_rx_queues = 0;
1823
1824         /* Free security resources */
1825         nix_security_release(dev);
1826
1827         /* Free tm resources */
1828         roc_nix_tm_fini(nix);
1829
1830         /* Unregister queue irqs */
1831         roc_nix_unregister_queue_irqs(nix);
1832
1833         /* Unregister cq irqs */
1834         if (eth_dev->data->dev_conf.intr_conf.rxq)
1835                 roc_nix_unregister_cq_irqs(nix);
1836
1837         /* Free ROC RQ's, SQ's and CQ's memory */
1838         nix_free_queue_mem(dev);
1839
1840         /* Free nix lf resources */
1841         rc = roc_nix_lf_free(nix);
1842         if (rc)
1843                 plt_err("Failed to free nix lf, rc=%d", rc);
1844
1845         rte_free(eth_dev->data->mac_addrs);
1846         eth_dev->data->mac_addrs = NULL;
1847
1848         rc = roc_nix_dev_fini(nix);
1849         /* Can be freed later by PMD if NPA LF is in use */
1850         if (rc == -EAGAIN) {
1851                 if (!reset)
1852                         eth_dev->data->dev_private = NULL;
1853                 return 0;
1854         } else if (rc) {
1855                 plt_err("Failed in nix dev fini, rc=%d", rc);
1856         }
1857
1858         return rc;
1859 }
1860
1861 static int
1862 cnxk_nix_dev_close(struct rte_eth_dev *eth_dev)
1863 {
1864         cnxk_eth_dev_uninit(eth_dev, false);
1865         return 0;
1866 }
1867
1868 static int
1869 cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev)
1870 {
1871         int rc;
1872
1873         rc = cnxk_eth_dev_uninit(eth_dev, true);
1874         if (rc)
1875                 return rc;
1876
1877         return cnxk_eth_dev_init(eth_dev);
1878 }
1879
1880 int
1881 cnxk_nix_remove(struct rte_pci_device *pci_dev)
1882 {
1883         struct rte_eth_dev *eth_dev;
1884         struct roc_nix *nix;
1885         int rc = -EINVAL;
1886
1887         eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1888         if (eth_dev) {
1889                 /* Cleanup eth dev */
1890                 rc = cnxk_eth_dev_uninit(eth_dev, false);
1891                 if (rc)
1892                         return rc;
1893
1894                 rte_eth_dev_release_port(eth_dev);
1895         }
1896
1897         /* Nothing to be done for secondary processes */
1898         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1899                 return 0;
1900
1901         /* Check if this device is hosting common resource */
1902         nix = roc_idev_npa_nix_get();
1903         if (nix->pci_dev != pci_dev)
1904                 return 0;
1905
1906         /* Try nix fini now */
1907         rc = roc_nix_dev_fini(nix);
1908         if (rc == -EAGAIN) {
1909                 plt_info("%s: common resource in use by other devices",
1910                          pci_dev->name);
1911                 goto exit;
1912         } else if (rc) {
1913                 plt_err("Failed in nix dev fini, rc=%d", rc);
1914                 goto exit;
1915         }
1916
1917         /* Free device pointer as rte_ethdev does not have it anymore */
1918         rte_free(nix);
1919 exit:
1920         return rc;
1921 }
1922
1923 int
1924 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1925 {
1926         int rc;
1927
1928         RTE_SET_USED(pci_drv);
1929
1930         rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
1931                                            cnxk_eth_dev_init);
1932
1933         /* On error on secondary, recheck if port exists in primary or
1934          * in mid of detach state.
1935          */
1936         if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
1937                 if (!rte_eth_dev_allocated(pci_dev->device.name))
1938                         return 0;
1939         return rc;
1940 }