1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include <cnxk_ethdev.h>
6 #include <rte_eventdev.h>
9 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
11 uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
13 if (roc_nix_is_vf_or_sdp(&dev->nix) ||
14 dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
15 capa &= ~RTE_ETH_RX_OFFLOAD_TIMESTAMP;
20 static inline uint64_t
21 nix_get_tx_offload_capa(struct cnxk_eth_dev *dev)
24 return CNXK_NIX_TX_OFFLOAD_CAPA;
27 static inline uint32_t
28 nix_get_speed_capa(struct cnxk_eth_dev *dev)
32 /* Auto negotiation disabled */
33 speed_capa = RTE_ETH_LINK_SPEED_FIXED;
34 if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
35 speed_capa |= RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G |
36 RTE_ETH_LINK_SPEED_25G | RTE_ETH_LINK_SPEED_40G |
37 RTE_ETH_LINK_SPEED_50G | RTE_ETH_LINK_SPEED_100G;
44 cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
46 struct roc_nix *nix = &dev->nix;
48 if (dev->inb.inl_dev == use_inl_dev)
51 plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
52 dev->inb.nb_sess, !!dev->inb.inl_dev);
55 dev->inb.inl_dev = use_inl_dev;
57 /* Update RoC for NPC rule insertion */
58 roc_nix_inb_mode_set(nix, use_inl_dev);
60 /* Setup lookup mem */
61 return cnxk_nix_lookup_mem_sa_base_set(dev);
65 nix_security_setup(struct cnxk_eth_dev *dev)
67 struct roc_nix *nix = &dev->nix;
70 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
71 /* Setup Inline Inbound */
72 rc = roc_nix_inl_inb_init(nix);
74 plt_err("Failed to initialize nix inline inb, rc=%d",
79 /* By default pick using inline device for poll mode.
80 * Will be overridden when event mode rq's are setup.
82 cnxk_nix_inb_mode_set(dev, true);
84 /* Allocate memory to be used as dptr for CPT ucode
88 plt_zmalloc(ROC_NIX_INL_OT_IPSEC_INB_HW_SZ, 0);
89 if (!dev->inb.sa_dptr) {
90 plt_err("Couldn't allocate memory for SA dptr");
96 if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
97 dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
98 struct plt_bitmap *bmap;
102 /* Setup enough descriptors for all tx queues */
103 nix->outb_nb_desc = dev->outb.nb_desc;
104 nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs;
106 /* Setup Inline Outbound */
107 rc = roc_nix_inl_outb_init(nix);
109 plt_err("Failed to initialize nix inline outb, rc=%d",
114 dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
116 /* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
117 if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY))
120 /* Allocate memory to be used as dptr for CPT ucode
124 plt_zmalloc(ROC_NIX_INL_OT_IPSEC_OUTB_HW_SZ, 0);
125 if (!dev->outb.sa_dptr) {
126 plt_err("Couldn't allocate memory for SA dptr");
132 /* Allocate a bitmap to alloc and free sa indexes */
133 bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa);
134 mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
136 plt_err("Outbound SA bmap alloc failed");
138 rc |= roc_nix_inl_outb_fini(nix);
143 bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz);
145 plt_err("Outbound SA bmap init failed");
147 rc |= roc_nix_inl_outb_fini(nix);
152 for (i = 0; i < dev->outb.max_sa; i++)
153 plt_bitmap_set(bmap, i);
155 dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix);
156 dev->outb.sa_bmap_mem = mem;
157 dev->outb.sa_bmap = bmap;
162 if (dev->inb.sa_dptr)
163 plt_free(dev->inb.sa_dptr);
164 if (dev->outb.sa_dptr)
165 plt_free(dev->outb.sa_dptr);
167 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
168 rc |= roc_nix_inl_inb_fini(nix);
173 nix_meter_fini(struct cnxk_eth_dev *dev)
175 struct cnxk_meter_node *next_mtr = NULL;
176 struct roc_nix_bpf_objs profs = {0};
177 struct cnxk_meter_node *mtr = NULL;
178 struct cnxk_mtr *fms = &dev->mtr;
179 struct roc_nix *nix = &dev->nix;
180 struct roc_nix_rq *rq;
184 RTE_TAILQ_FOREACH_SAFE(mtr, fms, next, next_mtr) {
185 for (i = 0; i < mtr->rq_num; i++) {
186 rq = &dev->rqs[mtr->rq_id[i]];
187 rc |= roc_nix_bpf_ena_dis(nix, mtr->bpf_id, rq, false);
190 profs.level = mtr->level;
192 profs.ids[0] = mtr->bpf_id;
193 rc = roc_nix_bpf_free(nix, &profs, 1);
198 TAILQ_REMOVE(fms, mtr, next);
205 nix_security_release(struct cnxk_eth_dev *dev)
207 struct rte_eth_dev *eth_dev = dev->eth_dev;
208 struct cnxk_eth_sec_sess *eth_sec, *tvar;
209 struct roc_nix *nix = &dev->nix;
212 /* Cleanup Inline inbound */
213 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
214 /* Destroy inbound sessions */
216 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
217 cnxk_eth_sec_ops.session_destroy(eth_dev,
220 /* Clear lookup mem */
221 cnxk_nix_lookup_mem_sa_base_clear(dev);
223 rc = roc_nix_inl_inb_fini(nix);
225 plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
228 if (dev->inb.sa_dptr) {
229 plt_free(dev->inb.sa_dptr);
230 dev->inb.sa_dptr = NULL;
234 /* Cleanup Inline outbound */
235 if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
236 dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
237 /* Destroy outbound sessions */
239 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
240 cnxk_eth_sec_ops.session_destroy(eth_dev,
243 rc = roc_nix_inl_outb_fini(nix);
245 plt_err("Failed to cleanup nix inline outb, rc=%d", rc);
248 plt_bitmap_free(dev->outb.sa_bmap);
249 plt_free(dev->outb.sa_bmap_mem);
250 dev->outb.sa_bmap = NULL;
251 dev->outb.sa_bmap_mem = NULL;
252 if (dev->outb.sa_dptr) {
253 plt_free(dev->outb.sa_dptr);
254 dev->outb.sa_dptr = NULL;
258 dev->inb.inl_dev = false;
259 roc_nix_inb_mode_set(nix, false);
261 dev->inb.nb_sess = 0;
262 dev->outb.nb_sess = 0;
267 nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
269 struct rte_pktmbuf_pool_private *mbp_priv;
270 struct rte_eth_dev *eth_dev;
271 struct cnxk_eth_dev *dev;
275 eth_dev = dev->eth_dev;
277 /* Get rx buffer size */
278 mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
279 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
281 if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
282 dev->rx_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
283 dev->tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
288 nix_recalc_mtu(struct rte_eth_dev *eth_dev)
290 struct rte_eth_dev_data *data = eth_dev->data;
291 struct cnxk_eth_rxq_sp *rxq;
294 rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
295 /* Setup scatter mode if needed by jumbo */
296 nix_enable_mseg_on_jumbo(rxq);
298 rc = cnxk_nix_mtu_set(eth_dev, data->mtu);
300 plt_err("Failed to set default MTU size, rc=%d", rc);
306 nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
308 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
309 enum roc_nix_fc_mode fc_mode = ROC_NIX_FC_FULL;
310 struct cnxk_fc_cfg *fc = &dev->fc_cfg;
313 /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
314 if (roc_model_is_cn96_ax() &&
315 dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG)
316 fc_mode = ROC_NIX_FC_TX;
318 /* By default enable flow control */
319 rc = roc_nix_fc_mode_set(&dev->nix, fc_mode);
323 fc->mode = (fc_mode == ROC_NIX_FC_FULL) ? RTE_ETH_FC_FULL :
329 nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
331 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
332 struct cnxk_fc_cfg *fc = &dev->fc_cfg;
333 struct rte_eth_fc_conf fc_cfg = {0};
335 if (roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix))
338 fc_cfg.mode = fc->mode;
340 /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
341 if (roc_model_is_cn96_ax() &&
342 dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
343 (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
345 (fc_cfg.mode == RTE_ETH_FC_FULL ||
346 fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
347 RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
350 return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
354 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
356 uint16_t port_id = dev->eth_dev->data->port_id;
357 struct rte_mbuf mb_def;
360 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
361 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
362 offsetof(struct rte_mbuf, data_off) !=
364 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
365 offsetof(struct rte_mbuf, data_off) !=
367 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
368 offsetof(struct rte_mbuf, data_off) !=
371 mb_def.data_off = RTE_PKTMBUF_HEADROOM +
372 (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
373 mb_def.port = port_id;
374 rte_mbuf_refcnt_set(&mb_def, 1);
376 /* Prevent compiler reordering: rearm_data covers previous fields */
377 rte_compiler_barrier();
378 tmp = (uint64_t *)&mb_def.rearm_data;
383 static inline uint8_t
384 nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
387 * Maximum three segments can be supported with W8, Choose
388 * NIX_MAXSQESZ_W16 for multi segment offload.
390 if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
391 return NIX_MAXSQESZ_W16;
393 return NIX_MAXSQESZ_W8;
397 cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
398 uint16_t nb_desc, uint16_t fp_tx_q_sz,
399 const struct rte_eth_txconf *tx_conf)
401 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
402 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
403 struct cnxk_eth_txq_sp *txq_sp;
404 struct roc_nix_sq *sq;
408 /* Free memory prior to re-allocation if needed. */
409 if (eth_dev->data->tx_queues[qid] != NULL) {
410 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
411 dev_ops->tx_queue_release(eth_dev, qid);
412 eth_dev->data->tx_queues[qid] = NULL;
415 /* When Tx Security offload is enabled, increase tx desc count by
416 * max possible outbound desc count.
418 if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
419 nb_desc += dev->outb.nb_desc;
424 sq->nb_desc = nb_desc;
425 sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
427 rc = roc_nix_sq_init(&dev->nix, sq);
429 plt_err("Failed to init sq=%d, rc=%d", qid, rc);
434 txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
435 txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
437 plt_err("Failed to alloc tx queue mem");
438 rc |= roc_nix_sq_fini(sq);
444 txq_sp->qconf.conf.tx = *tx_conf;
445 /* Queue config should reflect global offloads */
446 txq_sp->qconf.conf.tx.offloads = dev->tx_offloads;
447 txq_sp->qconf.nb_desc = nb_desc;
449 plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
450 " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
451 qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
452 sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
454 /* Store start of fast path area */
455 eth_dev->data->tx_queues[qid] = txq_sp + 1;
456 eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
461 cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
463 void *txq = eth_dev->data->tx_queues[qid];
464 struct cnxk_eth_txq_sp *txq_sp;
465 struct cnxk_eth_dev *dev;
466 struct roc_nix_sq *sq;
472 txq_sp = cnxk_eth_txq_to_sp(txq);
476 plt_nix_dbg("Releasing txq %u", qid);
480 rc = roc_nix_sq_fini(sq);
482 plt_err("Failed to cleanup sq, rc=%d", rc);
489 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
490 uint16_t nb_desc, uint16_t fp_rx_q_sz,
491 const struct rte_eth_rxconf *rx_conf,
492 struct rte_mempool *mp)
494 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
495 struct roc_nix *nix = &dev->nix;
496 struct cnxk_eth_rxq_sp *rxq_sp;
497 struct rte_mempool_ops *ops;
498 const char *platform_ops;
499 struct roc_nix_rq *rq;
500 struct roc_nix_cq *cq;
506 if (rx_conf->rx_deferred_start == 1) {
507 plt_err("Deferred Rx start is not supported");
511 platform_ops = rte_mbuf_platform_mempool_ops();
512 /* This driver needs cnxk_npa mempool ops to work */
513 ops = rte_mempool_get_ops(mp->ops_index);
514 if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
515 plt_err("mempool ops should be of cnxk_npa type");
519 if (mp->pool_id == 0) {
520 plt_err("Invalid pool_id");
524 /* Free memory prior to re-allocation if needed */
525 if (eth_dev->data->rx_queues[qid] != NULL) {
526 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
528 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
529 dev_ops->rx_queue_release(eth_dev, qid);
530 eth_dev->data->rx_queues[qid] = NULL;
533 /* Clam up cq limit to size of packet pool aura for LBK
534 * to avoid meta packet drop as LBK does not currently support
537 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
538 uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
540 /* Use current RQ's aura limit if inl rq is not available */
542 pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
543 nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
546 /* Its a no-op when inline device is not used */
547 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY ||
548 dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
549 roc_nix_inl_dev_xaq_realloc(mp->pool_id);
554 cq->nb_desc = nb_desc;
555 rc = roc_nix_cq_init(&dev->nix, cq);
557 plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
564 rq->aura_handle = mp->pool_id;
565 rq->flow_tag_width = 32;
568 /* Calculate first mbuf skip */
569 first_skip = (sizeof(struct rte_mbuf));
570 first_skip += RTE_PKTMBUF_HEADROOM;
571 first_skip += rte_pktmbuf_priv_size(mp);
572 rq->first_skip = first_skip;
573 rq->later_skip = sizeof(struct rte_mbuf);
574 rq->lpb_size = mp->elt_size;
575 rq->lpb_drop_ena = !(dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY);
577 /* Enable Inline IPSec on RQ, will not be used for Poll mode */
578 if (roc_nix_inl_inb_is_enabled(nix))
579 rq->ipsech_ena = true;
581 rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
583 plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
587 /* Allocate and setup fast path rx queue */
589 rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
590 rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
592 plt_err("Failed to alloc rx queue for rq=%d", qid);
596 /* Setup slow path fields */
599 rxq_sp->qconf.conf.rx = *rx_conf;
600 /* Queue config should reflect global offloads */
601 rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads;
602 rxq_sp->qconf.nb_desc = nb_desc;
603 rxq_sp->qconf.mp = mp;
605 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
606 /* Pass a tagmask used to handle error packets in inline device.
607 * Ethdev rq's tag_mask field will be overwritten later
611 0x0FF00000 | ((uint32_t)RTE_EVENT_TYPE_ETHDEV << 28);
613 /* Setup rq reference for inline dev if present */
614 rc = roc_nix_inl_dev_rq_get(rq);
619 plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
622 /* Store start of fast path area */
623 eth_dev->data->rx_queues[qid] = rxq_sp + 1;
624 eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
626 /* Calculating delta and freq mult between PTP HI clock and tsc.
627 * These are needed in deriving raw clock value from tsc counter.
628 * read_clock eth op returns raw clock value.
630 if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
631 rc = cnxk_nix_tsc_convert(dev);
633 plt_err("Failed to calculate delta and freq mult");
642 rc |= roc_nix_rq_fini(rq);
644 rc |= roc_nix_cq_fini(cq);
650 cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
652 void *rxq = eth_dev->data->rx_queues[qid];
653 struct cnxk_eth_rxq_sp *rxq_sp;
654 struct cnxk_eth_dev *dev;
655 struct roc_nix_rq *rq;
656 struct roc_nix_cq *cq;
662 rxq_sp = cnxk_eth_rxq_to_sp(rxq);
666 plt_nix_dbg("Releasing rxq %u", qid);
668 /* Release rq reference for inline dev if present */
669 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
670 roc_nix_inl_dev_rq_put(rq);
673 rc = roc_nix_rq_fini(rq);
675 plt_err("Failed to cleanup rq, rc=%d", rc);
679 rc = roc_nix_cq_fini(cq);
681 plt_err("Failed to cleanup cq, rc=%d", rc);
683 /* Finally free fast path area */
688 cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
691 uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
692 {FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
693 FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
694 {FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
695 FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
696 FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
697 {FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
698 FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
699 FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
700 FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
701 FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
702 FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
704 uint32_t flowkey_cfg = 0;
706 dev->ethdev_rss_hf = ethdev_rss;
708 if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
709 dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
710 flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
713 if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
714 flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
716 if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
717 flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
719 if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
720 flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
722 if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
723 flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
725 if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
726 flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
728 if (ethdev_rss & RSS_IPV4_ENABLE)
729 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
731 if (ethdev_rss & RSS_IPV6_ENABLE)
732 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
734 if (ethdev_rss & RTE_ETH_RSS_TCP)
735 flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
737 if (ethdev_rss & RTE_ETH_RSS_UDP)
738 flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
740 if (ethdev_rss & RTE_ETH_RSS_SCTP)
741 flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
743 if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
744 flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
746 if (ethdev_rss & RSS_IPV6_EX_ENABLE)
747 flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
749 if (ethdev_rss & RTE_ETH_RSS_PORT)
750 flowkey_cfg |= FLOW_KEY_TYPE_PORT;
752 if (ethdev_rss & RTE_ETH_RSS_NVGRE)
753 flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
755 if (ethdev_rss & RTE_ETH_RSS_VXLAN)
756 flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
758 if (ethdev_rss & RTE_ETH_RSS_GENEVE)
759 flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
761 if (ethdev_rss & RTE_ETH_RSS_GTPU)
762 flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
768 nix_free_queue_mem(struct cnxk_eth_dev *dev)
779 nix_ingress_policer_setup(struct cnxk_eth_dev *dev)
781 struct rte_eth_dev *eth_dev = dev->eth_dev;
784 TAILQ_INIT(&dev->mtr_profiles);
785 TAILQ_INIT(&dev->mtr_policy);
786 TAILQ_INIT(&dev->mtr);
788 if (eth_dev->dev_ops->mtr_ops_get == NULL)
791 return nix_mtr_capabilities_init(eth_dev);
795 nix_rss_default_setup(struct cnxk_eth_dev *dev)
797 struct rte_eth_dev *eth_dev = dev->eth_dev;
798 uint8_t rss_hash_level;
799 uint32_t flowkey_cfg;
802 rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
803 rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
807 flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
808 return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
812 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
814 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
815 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
816 struct cnxk_eth_qconf *tx_qconf = NULL;
817 struct cnxk_eth_qconf *rx_qconf = NULL;
818 struct cnxk_eth_rxq_sp *rxq_sp;
819 struct cnxk_eth_txq_sp *txq_sp;
820 int i, nb_rxq, nb_txq;
823 nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
824 nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
826 tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
827 if (tx_qconf == NULL) {
828 plt_err("Failed to allocate memory for tx_qconf");
832 rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
833 if (rx_qconf == NULL) {
834 plt_err("Failed to allocate memory for rx_qconf");
838 txq = eth_dev->data->tx_queues;
839 for (i = 0; i < nb_txq; i++) {
840 if (txq[i] == NULL) {
841 tx_qconf[i].valid = false;
842 plt_info("txq[%d] is already released", i);
845 txq_sp = cnxk_eth_txq_to_sp(txq[i]);
846 memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
847 tx_qconf[i].valid = true;
848 dev_ops->tx_queue_release(eth_dev, i);
849 eth_dev->data->tx_queues[i] = NULL;
852 rxq = eth_dev->data->rx_queues;
853 for (i = 0; i < nb_rxq; i++) {
854 if (rxq[i] == NULL) {
855 rx_qconf[i].valid = false;
856 plt_info("rxq[%d] is already released", i);
859 rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
860 memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
861 rx_qconf[i].valid = true;
862 dev_ops->rx_queue_release(eth_dev, i);
863 eth_dev->data->rx_queues[i] = NULL;
866 dev->tx_qconf = tx_qconf;
867 dev->rx_qconf = rx_qconf;
877 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
879 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
880 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
881 struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
882 struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
883 int rc, i, nb_rxq, nb_txq;
885 nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
886 nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
889 /* Setup tx & rx queues with previous configuration so
890 * that the queues can be functional in cases like ports
891 * are started without re configuring queues.
893 * Usual re config sequence is like below:
899 * queue_configure() {
906 * In some application's control path, queue_configure() would
907 * NOT be invoked for TXQs/RXQs in port_configure().
908 * In such cases, queues can be functional after start as the
909 * queues are already setup in port_configure().
911 for (i = 0; i < nb_txq; i++) {
912 if (!tx_qconf[i].valid)
914 rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
915 &tx_qconf[i].conf.tx);
917 plt_err("Failed to setup tx queue rc=%d", rc);
918 for (i -= 1; i >= 0; i--)
919 dev_ops->tx_queue_release(eth_dev, i);
927 for (i = 0; i < nb_rxq; i++) {
928 if (!rx_qconf[i].valid)
930 rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
931 &rx_qconf[i].conf.rx,
934 plt_err("Failed to setup rx queue rc=%d", rc);
935 for (i -= 1; i >= 0; i--)
936 dev_ops->rx_queue_release(eth_dev, i);
937 goto tx_queue_release;
947 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
948 dev_ops->tx_queue_release(eth_dev, i);
957 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
959 /* These dummy functions are required for supporting
960 * some applications which reconfigure queues without
961 * stopping tx burst and rx burst threads(eg kni app)
962 * When the queues context is saved, txq/rxqs are released
963 * which caused app crash since rx/tx burst is still
964 * on different lcores
966 eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
967 eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
972 nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
974 uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
975 uint8_t tun[ROC_NIX_LSO_TUN_MAX];
976 struct roc_nix *nix = &dev->nix;
979 rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
983 dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
984 (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
985 (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
986 (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
988 dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
989 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
990 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
991 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
996 nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
998 struct roc_nix *nix = &dev->nix;
1001 /* Nothing much to do if offload is not enabled */
1002 if (!(dev->tx_offloads &
1003 (RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1004 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)))
1007 /* Setup LSO formats in AF. Its a no-op if other ethdev has
1010 rc = roc_nix_lso_fmt_setup(nix);
1014 return nix_lso_tun_fmt_update(dev);
1018 cnxk_nix_configure(struct rte_eth_dev *eth_dev)
1020 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1021 struct rte_eth_dev_data *data = eth_dev->data;
1022 struct rte_eth_conf *conf = &data->dev_conf;
1023 struct rte_eth_rxmode *rxmode = &conf->rxmode;
1024 struct rte_eth_txmode *txmode = &conf->txmode;
1025 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
1026 struct roc_nix_fc_cfg fc_cfg = {0};
1027 struct roc_nix *nix = &dev->nix;
1028 struct rte_ether_addr *ea;
1029 uint8_t nb_rxq, nb_txq;
1037 if (rte_eal_has_hugepages() == 0) {
1038 plt_err("Huge page is not configured");
1039 goto fail_configure;
1042 if (conf->dcb_capability_en == 1) {
1043 plt_err("dcb enable is not supported");
1044 goto fail_configure;
1047 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1048 plt_err("Flow director is not supported");
1049 goto fail_configure;
1052 if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE &&
1053 rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
1054 plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
1055 goto fail_configure;
1058 if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
1059 plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
1060 goto fail_configure;
1063 /* Free the resources allocated from the previous configure */
1064 if (dev->configured == 1) {
1065 /* Unregister queue irq's */
1066 roc_nix_unregister_queue_irqs(nix);
1068 /* Unregister CQ irqs if present */
1069 if (eth_dev->data->dev_conf.intr_conf.rxq)
1070 roc_nix_unregister_cq_irqs(nix);
1072 /* Set no-op functions */
1073 nix_set_nop_rxtx_function(eth_dev);
1074 /* Store queue config for later */
1075 rc = nix_store_queue_cfg_and_then_release(eth_dev);
1077 goto fail_configure;
1079 /* Disable and free rte_meter entries */
1080 rc = nix_meter_fini(dev);
1082 goto fail_configure;
1084 /* Cleanup security support */
1085 rc = nix_security_release(dev);
1087 goto fail_configure;
1089 roc_nix_tm_fini(nix);
1090 roc_nix_lf_free(nix);
1093 dev->rx_offloads = rxmode->offloads;
1094 dev->tx_offloads = txmode->offloads;
1096 /* Prepare rx cfg */
1097 rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
1098 if (dev->rx_offloads &
1099 (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM)) {
1100 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
1101 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
1103 rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |
1104 ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
1105 ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
1107 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
1108 rx_cfg |= ROC_NIX_LF_RX_CFG_IP6_UDP_OPT;
1109 /* Disable drop re if rx offload security is enabled and
1110 * platform does not support it.
1112 if (dev->ipsecd_drop_re_dis)
1113 rx_cfg &= ~(ROC_NIX_LF_RX_CFG_DROP_RE);
1116 nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1117 nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1119 /* Alloc a nix lf */
1120 rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
1122 plt_err("Failed to init nix_lf rc=%d", rc);
1123 goto fail_configure;
1126 dev->npc.channel = roc_nix_get_base_chan(nix);
1128 nb_rxq = data->nb_rx_queues;
1129 nb_txq = data->nb_tx_queues;
1132 /* Allocate memory for roc rq's and cq's */
1133 qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0);
1135 plt_err("Failed to alloc rqs");
1140 qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0);
1142 plt_err("Failed to alloc cqs");
1149 /* Allocate memory for roc sq's */
1150 qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0);
1152 plt_err("Failed to alloc sqs");
1158 /* Re-enable NIX LF error interrupts */
1159 roc_nix_err_intr_ena_dis(nix, true);
1160 roc_nix_ras_intr_ena_dis(nix, true);
1162 if (nix->rx_ptp_ena &&
1163 dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) {
1164 plt_err("Both PTP and switch header enabled");
1168 rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type,
1169 dev->npc.pre_l2_size_offset,
1170 dev->npc.pre_l2_size_offset_mask,
1171 dev->npc.pre_l2_size_shift_dir);
1173 plt_err("Failed to enable switch type nix_lf rc=%d", rc);
1177 /* Setup LSO if needed */
1178 rc = nix_lso_fmt_setup(dev);
1180 plt_err("Failed to setup nix lso format fields, rc=%d", rc);
1185 rc = nix_rss_default_setup(dev);
1187 plt_err("Failed to configure rss rc=%d", rc);
1191 /* Init the default TM scheduler hierarchy */
1192 rc = roc_nix_tm_init(nix);
1194 plt_err("Failed to init traffic manager, rc=%d", rc);
1198 rc = nix_ingress_policer_setup(dev);
1200 plt_err("Failed to setup ingress policer rc=%d", rc);
1204 rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
1206 plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
1210 /* Register queue IRQs */
1211 rc = roc_nix_register_queue_irqs(nix);
1213 plt_err("Failed to register queue interrupts rc=%d", rc);
1217 /* Register cq IRQs */
1218 if (eth_dev->data->dev_conf.intr_conf.rxq) {
1219 if (eth_dev->data->nb_rx_queues > dev->nix.cints) {
1220 plt_err("Rx interrupt cannot be enabled, rxq > %d",
1224 /* Rx interrupt feature cannot work with vector mode because,
1225 * vector mode does not process packets unless min 4 pkts are
1226 * received, while cq interrupts are generated even for 1 pkt
1229 dev->scalar_ena = true;
1231 rc = roc_nix_register_cq_irqs(nix);
1233 plt_err("Failed to register CQ interrupts rc=%d", rc);
1238 /* Configure loop back mode */
1239 rc = roc_nix_mac_loopback_enable(nix,
1240 eth_dev->data->dev_conf.lpbk_mode);
1242 plt_err("Failed to configure cgx loop back mode rc=%d", rc);
1246 /* Setup Inline security support */
1247 rc = nix_security_setup(dev);
1251 /* Init flow control configuration */
1252 fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG;
1253 fc_cfg.rxchan_cfg.enable = true;
1254 rc = roc_nix_fc_config_set(nix, &fc_cfg);
1256 plt_err("Failed to initialize flow control rc=%d", rc);
1260 /* Update flow control configuration to PMD */
1261 rc = nix_init_flow_ctrl_config(eth_dev);
1263 plt_err("Failed to initialize flow control rc=%d", rc);
1267 /* Initialize TC to SQ mapping as invalid */
1268 memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
1270 * Restore queue config when reconfigure followed by
1271 * reconfigure and no queue configure invoked from application case.
1273 if (dev->configured == 1) {
1274 rc = nix_restore_queue_cfg(eth_dev);
1279 /* Update the mac address */
1280 ea = eth_dev->data->mac_addrs;
1281 memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1282 if (rte_is_zero_ether_addr(ea))
1283 rte_eth_random_addr((uint8_t *)ea);
1285 rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
1287 plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
1288 " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "",
1289 eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq,
1290 dev->rx_offloads, dev->tx_offloads);
1293 dev->configured = 1;
1294 dev->nb_rxq = data->nb_rx_queues;
1295 dev->nb_txq = data->nb_tx_queues;
1299 rc |= nix_security_release(dev);
1301 roc_nix_unregister_cq_irqs(nix);
1303 roc_nix_unregister_queue_irqs(nix);
1305 roc_nix_tm_fini(nix);
1307 nix_free_queue_mem(dev);
1308 rc |= roc_nix_lf_free(nix);
1310 dev->configured = 0;
1315 cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1317 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1318 struct rte_eth_dev_data *data = eth_dev->data;
1319 struct roc_nix_sq *sq = &dev->sqs[qid];
1322 if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1325 rc = roc_nix_tm_sq_aura_fc(sq, true);
1327 plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
1331 data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1337 cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1339 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1340 struct rte_eth_dev_data *data = eth_dev->data;
1341 struct roc_nix_sq *sq = &dev->sqs[qid];
1344 if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1347 rc = roc_nix_tm_sq_aura_fc(sq, false);
1349 plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
1354 data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1360 cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1362 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1363 struct rte_eth_dev_data *data = eth_dev->data;
1364 struct roc_nix_rq *rq = &dev->rqs[qid];
1367 if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1370 rc = roc_nix_rq_ena_dis(rq, true);
1372 plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
1376 data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1382 cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1384 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1385 struct rte_eth_dev_data *data = eth_dev->data;
1386 struct roc_nix_rq *rq = &dev->rqs[qid];
1389 if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1392 rc = roc_nix_rq_ena_dis(rq, false);
1394 plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
1398 data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1404 cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
1406 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1407 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1408 struct rte_mbuf *rx_pkts[32];
1409 struct rte_eth_link link;
1410 int count, i, j, rc;
1413 /* Disable all the NPC entries */
1414 rc = roc_npc_mcam_enable_all_entries(&dev->npc, 0);
1418 /* Stop link change events */
1419 if (!roc_nix_is_vf_or_sdp(&dev->nix))
1420 roc_nix_mac_link_event_start_stop(&dev->nix, false);
1422 /* Disable Rx via NPC */
1423 roc_nix_npc_rx_ena_dis(&dev->nix, false);
1425 roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, false);
1427 /* Stop rx queues and free up pkts pending */
1428 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1429 rc = dev_ops->rx_queue_stop(eth_dev, i);
1433 rxq = eth_dev->data->rx_queues[i];
1434 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1436 for (j = 0; j < count; j++)
1437 rte_pktmbuf_free(rx_pkts[j]);
1438 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1442 /* Stop tx queues */
1443 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1444 dev_ops->tx_queue_stop(eth_dev, i);
1446 /* Bring down link status internally */
1447 memset(&link, 0, sizeof(link));
1448 rte_eth_linkstatus_set(eth_dev, &link);
1454 cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
1456 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1459 if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) {
1460 rc = nix_recalc_mtu(eth_dev);
1465 /* Start rx queues */
1466 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1467 rc = cnxk_nix_rx_queue_start(eth_dev, i);
1472 /* Start tx queues */
1473 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1474 rc = cnxk_nix_tx_queue_start(eth_dev, i);
1479 /* Update Flow control configuration */
1480 rc = nix_update_flow_ctrl_config(eth_dev);
1482 plt_err("Failed to enable flow control. error code(%d)", rc);
1486 /* Enable Rx in NPC */
1487 rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
1489 plt_err("Failed to enable NPC rx %d", rc);
1493 rc = roc_npc_mcam_enable_all_entries(&dev->npc, 1);
1495 plt_err("Failed to enable NPC entries %d", rc);
1499 cnxk_nix_toggle_flag_link_cfg(dev, true);
1501 /* Start link change events */
1502 if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
1503 rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
1505 plt_err("Failed to start cgx link event %d", rc);
1510 /* Enable PTP if it is requested by the user or already
1511 * enabled on PF owning this VF
1513 memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
1514 if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
1515 cnxk_eth_dev_ops.timesync_enable(eth_dev);
1517 cnxk_eth_dev_ops.timesync_disable(eth_dev);
1519 if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
1520 rc = rte_mbuf_dyn_rx_timestamp_register
1521 (&dev->tstamp.tstamp_dynfield_offset,
1522 &dev->tstamp.rx_tstamp_dynflag);
1524 plt_err("Failed to register Rx timestamp field/flag");
1529 cnxk_nix_toggle_flag_link_cfg(dev, false);
1531 roc_nix_inl_outb_soft_exp_poll_switch(&dev->nix, true);
1536 roc_nix_npc_rx_ena_dis(&dev->nix, false);
1537 cnxk_nix_toggle_flag_link_cfg(dev, false);
1541 static int cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev);
1542 static int cnxk_nix_dev_close(struct rte_eth_dev *eth_dev);
1544 /* CNXK platform independent eth dev ops */
1545 struct eth_dev_ops cnxk_eth_dev_ops = {
1546 .mtu_set = cnxk_nix_mtu_set,
1547 .mac_addr_add = cnxk_nix_mac_addr_add,
1548 .mac_addr_remove = cnxk_nix_mac_addr_del,
1549 .mac_addr_set = cnxk_nix_mac_addr_set,
1550 .dev_infos_get = cnxk_nix_info_get,
1551 .link_update = cnxk_nix_link_update,
1552 .tx_queue_release = cnxk_nix_tx_queue_release,
1553 .rx_queue_release = cnxk_nix_rx_queue_release,
1554 .dev_stop = cnxk_nix_dev_stop,
1555 .dev_close = cnxk_nix_dev_close,
1556 .dev_reset = cnxk_nix_dev_reset,
1557 .tx_queue_start = cnxk_nix_tx_queue_start,
1558 .rx_queue_start = cnxk_nix_rx_queue_start,
1559 .rx_queue_stop = cnxk_nix_rx_queue_stop,
1560 .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
1561 .promiscuous_enable = cnxk_nix_promisc_enable,
1562 .promiscuous_disable = cnxk_nix_promisc_disable,
1563 .allmulticast_enable = cnxk_nix_allmulticast_enable,
1564 .allmulticast_disable = cnxk_nix_allmulticast_disable,
1565 .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
1566 .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
1567 .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
1568 .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
1569 .priority_flow_ctrl_queue_config =
1570 cnxk_nix_priority_flow_ctrl_queue_config,
1571 .priority_flow_ctrl_queue_info_get =
1572 cnxk_nix_priority_flow_ctrl_queue_info_get,
1573 .dev_set_link_up = cnxk_nix_set_link_up,
1574 .dev_set_link_down = cnxk_nix_set_link_down,
1575 .get_module_info = cnxk_nix_get_module_info,
1576 .get_module_eeprom = cnxk_nix_get_module_eeprom,
1577 .rx_queue_intr_enable = cnxk_nix_rx_queue_intr_enable,
1578 .rx_queue_intr_disable = cnxk_nix_rx_queue_intr_disable,
1579 .pool_ops_supported = cnxk_nix_pool_ops_supported,
1580 .queue_stats_mapping_set = cnxk_nix_queue_stats_mapping,
1581 .stats_get = cnxk_nix_stats_get,
1582 .stats_reset = cnxk_nix_stats_reset,
1583 .xstats_get = cnxk_nix_xstats_get,
1584 .xstats_get_names = cnxk_nix_xstats_get_names,
1585 .xstats_reset = cnxk_nix_xstats_reset,
1586 .xstats_get_by_id = cnxk_nix_xstats_get_by_id,
1587 .xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id,
1588 .fw_version_get = cnxk_nix_fw_version_get,
1589 .rxq_info_get = cnxk_nix_rxq_info_get,
1590 .txq_info_get = cnxk_nix_txq_info_get,
1591 .tx_done_cleanup = cnxk_nix_tx_done_cleanup,
1592 .flow_ops_get = cnxk_nix_flow_ops_get,
1593 .get_reg = cnxk_nix_dev_get_reg,
1594 .timesync_read_rx_timestamp = cnxk_nix_timesync_read_rx_timestamp,
1595 .timesync_read_tx_timestamp = cnxk_nix_timesync_read_tx_timestamp,
1596 .timesync_read_time = cnxk_nix_timesync_read_time,
1597 .timesync_write_time = cnxk_nix_timesync_write_time,
1598 .timesync_adjust_time = cnxk_nix_timesync_adjust_time,
1599 .read_clock = cnxk_nix_read_clock,
1600 .reta_update = cnxk_nix_reta_update,
1601 .reta_query = cnxk_nix_reta_query,
1602 .rss_hash_update = cnxk_nix_rss_hash_update,
1603 .rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
1604 .set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
1605 .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
1606 .tm_ops_get = cnxk_nix_tm_ops_get,
1607 .mtr_ops_get = cnxk_nix_mtr_ops_get,
1611 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
1613 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1614 struct rte_security_ctx *sec_ctx;
1615 struct roc_nix *nix = &dev->nix;
1616 struct rte_pci_device *pci_dev;
1617 int rc, max_entries;
1619 eth_dev->dev_ops = &cnxk_eth_dev_ops;
1620 eth_dev->rx_queue_count = cnxk_nix_rx_queue_count;
1621 eth_dev->rx_descriptor_status = cnxk_nix_rx_descriptor_status;
1622 eth_dev->tx_descriptor_status = cnxk_nix_tx_descriptor_status;
1624 /* Alloc security context */
1625 sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
1628 sec_ctx->device = eth_dev;
1629 sec_ctx->ops = &cnxk_eth_sec_ops;
1631 (RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
1632 eth_dev->security_ctx = sec_ctx;
1634 /* For secondary processes, the primary has done all the work */
1635 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1638 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1639 rte_eth_copy_pci_info(eth_dev, pci_dev);
1641 /* Parse devargs string */
1642 rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev);
1644 plt_err("Failed to parse devargs rc=%d", rc);
1648 /* Initialize base roc nix */
1649 nix->pci_dev = pci_dev;
1650 nix->hw_vlan_ins = true;
1651 rc = roc_nix_dev_init(nix);
1653 plt_err("Failed to initialize roc nix rc=%d", rc);
1657 /* Register up msg callbacks */
1658 roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
1660 /* Register up msg callbacks */
1661 roc_nix_mac_link_info_get_cb_register(nix,
1662 cnxk_eth_dev_link_status_get_cb);
1664 dev->eth_dev = eth_dev;
1665 dev->configured = 0;
1666 dev->ptype_disable = 0;
1668 TAILQ_INIT(&dev->inb.list);
1669 TAILQ_INIT(&dev->outb.list);
1670 rte_spinlock_init(&dev->inb.lock);
1671 rte_spinlock_init(&dev->outb.lock);
1673 /* For vfs, returned max_entries will be 0. but to keep default mac
1674 * address, one entry must be allocated. so setting up to 1.
1676 if (roc_nix_is_vf_or_sdp(nix))
1679 max_entries = roc_nix_mac_max_entries_get(nix);
1681 if (max_entries <= 0) {
1682 plt_err("Failed to get max entries for mac addr");
1687 eth_dev->data->mac_addrs =
1688 rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0);
1689 if (eth_dev->data->mac_addrs == NULL) {
1690 plt_err("Failed to allocate memory for mac addr");
1695 dev->max_mac_entries = max_entries;
1696 dev->dmac_filter_count = 1;
1698 /* Get mac address */
1699 rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
1701 plt_err("Failed to get mac addr, rc=%d", rc);
1702 goto free_mac_addrs;
1705 /* Update the mac address */
1706 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1708 if (!roc_nix_is_vf_or_sdp(nix)) {
1709 /* Sync same MAC address to CGX/RPM table */
1710 rc = roc_nix_mac_addr_set(nix, dev->mac_addr);
1712 plt_err("Failed to set mac addr, rc=%d", rc);
1713 goto free_mac_addrs;
1717 /* Union of all capabilities supported by CNXK.
1718 * Platform specific capabilities will be
1721 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
1722 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
1723 dev->speed_capa = nix_get_speed_capa(dev);
1725 /* Initialize roc npc */
1726 dev->npc.roc_nix = nix;
1727 rc = roc_npc_init(&dev->npc);
1729 goto free_mac_addrs;
1731 plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
1732 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
1733 eth_dev->data->port_id, roc_nix_get_pf(nix),
1734 roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
1735 dev->rx_offload_capa, dev->tx_offload_capa);
1739 rte_free(eth_dev->data->mac_addrs);
1741 roc_nix_dev_fini(nix);
1743 plt_err("Failed to init nix eth_dev rc=%d", rc);
1748 cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
1750 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1751 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1752 struct rte_eth_pfc_queue_conf pfc_conf;
1753 struct roc_nix *nix = &dev->nix;
1754 struct rte_eth_fc_conf fc_conf;
1757 /* Disable switch hdr pkind */
1758 roc_nix_switch_hdr_set(&dev->nix, 0, 0, 0, 0);
1760 plt_free(eth_dev->security_ctx);
1761 eth_dev->security_ctx = NULL;
1763 /* Nothing to be done for secondary processes */
1764 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1767 /* Clear the flag since we are closing down */
1768 dev->configured = 0;
1770 roc_nix_npc_rx_ena_dis(nix, false);
1772 /* Restore 802.3 Flow control configuration */
1773 memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_queue_conf));
1774 memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
1775 fc_conf.mode = RTE_ETH_FC_NONE;
1776 rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
1778 pfc_conf.mode = RTE_ETH_FC_NONE;
1779 for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
1780 if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
1781 pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
1782 pfc_conf.rx_pause.tc = i;
1783 pfc_conf.tx_pause.rx_qid = i;
1784 pfc_conf.tx_pause.tc = i;
1785 rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
1788 plt_err("Failed to reset PFC. error code(%d)",
1793 fc_conf.mode = RTE_ETH_FC_FULL;
1794 rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
1796 /* Disable and free rte_meter entries */
1797 nix_meter_fini(dev);
1799 /* Disable and free rte_flow entries */
1800 roc_npc_fini(&dev->npc);
1802 /* Disable link status events */
1803 roc_nix_mac_link_event_start_stop(nix, false);
1805 /* Unregister the link update op, this is required to stop VFs from
1806 * receiving link status updates on exit path.
1808 roc_nix_mac_link_cb_unregister(nix);
1811 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1812 dev_ops->tx_queue_release(eth_dev, i);
1813 eth_dev->data->tx_queues[i] = NULL;
1815 eth_dev->data->nb_tx_queues = 0;
1817 /* Free up RQ's and CQ's */
1818 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1819 dev_ops->rx_queue_release(eth_dev, i);
1820 eth_dev->data->rx_queues[i] = NULL;
1822 eth_dev->data->nb_rx_queues = 0;
1824 /* Free security resources */
1825 nix_security_release(dev);
1827 /* Free tm resources */
1828 roc_nix_tm_fini(nix);
1830 /* Unregister queue irqs */
1831 roc_nix_unregister_queue_irqs(nix);
1833 /* Unregister cq irqs */
1834 if (eth_dev->data->dev_conf.intr_conf.rxq)
1835 roc_nix_unregister_cq_irqs(nix);
1837 /* Free ROC RQ's, SQ's and CQ's memory */
1838 nix_free_queue_mem(dev);
1840 /* Free nix lf resources */
1841 rc = roc_nix_lf_free(nix);
1843 plt_err("Failed to free nix lf, rc=%d", rc);
1845 rte_free(eth_dev->data->mac_addrs);
1846 eth_dev->data->mac_addrs = NULL;
1848 rc = roc_nix_dev_fini(nix);
1849 /* Can be freed later by PMD if NPA LF is in use */
1850 if (rc == -EAGAIN) {
1852 eth_dev->data->dev_private = NULL;
1855 plt_err("Failed in nix dev fini, rc=%d", rc);
1862 cnxk_nix_dev_close(struct rte_eth_dev *eth_dev)
1864 cnxk_eth_dev_uninit(eth_dev, false);
1869 cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev)
1873 rc = cnxk_eth_dev_uninit(eth_dev, true);
1877 return cnxk_eth_dev_init(eth_dev);
1881 cnxk_nix_remove(struct rte_pci_device *pci_dev)
1883 struct rte_eth_dev *eth_dev;
1884 struct roc_nix *nix;
1887 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1889 /* Cleanup eth dev */
1890 rc = cnxk_eth_dev_uninit(eth_dev, false);
1894 rte_eth_dev_release_port(eth_dev);
1897 /* Nothing to be done for secondary processes */
1898 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1901 /* Check if this device is hosting common resource */
1902 nix = roc_idev_npa_nix_get();
1903 if (nix->pci_dev != pci_dev)
1906 /* Try nix fini now */
1907 rc = roc_nix_dev_fini(nix);
1908 if (rc == -EAGAIN) {
1909 plt_info("%s: common resource in use by other devices",
1913 plt_err("Failed in nix dev fini, rc=%d", rc);
1917 /* Free device pointer as rte_ethdev does not have it anymore */
1924 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1928 RTE_SET_USED(pci_drv);
1930 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
1933 /* On error on secondary, recheck if port exists in primary or
1934 * in mid of detach state.
1936 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
1937 if (!rte_eth_dev_allocated(pci_dev->device.name))