1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include <cnxk_ethdev.h>
7 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
9 uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
11 if (roc_nix_is_vf_or_sdp(&dev->nix) ||
12 dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
13 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
18 static inline uint64_t
19 nix_get_tx_offload_capa(struct cnxk_eth_dev *dev)
22 return CNXK_NIX_TX_OFFLOAD_CAPA;
25 static inline uint32_t
26 nix_get_speed_capa(struct cnxk_eth_dev *dev)
30 /* Auto negotiation disabled */
31 speed_capa = ETH_LINK_SPEED_FIXED;
32 if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
33 speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
34 ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
35 ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
42 nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
44 struct rte_pktmbuf_pool_private *mbp_priv;
45 struct rte_eth_dev *eth_dev;
46 struct cnxk_eth_dev *dev;
50 eth_dev = dev->eth_dev;
52 /* Get rx buffer size */
53 mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
54 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
56 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
57 dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
58 dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
63 nix_recalc_mtu(struct rte_eth_dev *eth_dev)
65 struct rte_eth_dev_data *data = eth_dev->data;
66 struct cnxk_eth_rxq_sp *rxq;
70 rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
71 /* Setup scatter mode if needed by jumbo */
72 nix_enable_mseg_on_jumbo(rxq);
74 /* Setup MTU based on max_rx_pkt_len */
75 mtu = data->dev_conf.rxmode.max_rx_pkt_len - CNXK_NIX_L2_OVERHEAD +
76 CNXK_NIX_MAX_VTAG_ACT_SIZE;
78 rc = cnxk_nix_mtu_set(eth_dev, mtu);
80 plt_err("Failed to set default MTU size, rc=%d", rc);
86 nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
88 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
89 struct cnxk_fc_cfg *fc = &dev->fc_cfg;
90 struct rte_eth_fc_conf fc_conf = {0};
93 /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
94 * by AF driver, update those info in PMD structure.
96 rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
100 fc->mode = fc_conf.mode;
101 fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
102 (fc_conf.mode == RTE_FC_RX_PAUSE);
103 fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
104 (fc_conf.mode == RTE_FC_TX_PAUSE);
111 nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
113 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
114 struct cnxk_fc_cfg *fc = &dev->fc_cfg;
115 struct rte_eth_fc_conf fc_cfg = {0};
117 if (roc_nix_is_vf_or_sdp(&dev->nix))
120 fc_cfg.mode = fc->mode;
122 /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
123 if (roc_model_is_cn96_ax() &&
124 dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
125 (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
127 (fc_cfg.mode == RTE_FC_FULL ||
128 fc_cfg.mode == RTE_FC_TX_PAUSE) ?
129 RTE_FC_TX_PAUSE : RTE_FC_NONE;
132 return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
136 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
138 uint16_t port_id = dev->eth_dev->data->port_id;
139 struct rte_mbuf mb_def;
142 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
143 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
144 offsetof(struct rte_mbuf, data_off) !=
146 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
147 offsetof(struct rte_mbuf, data_off) !=
149 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
150 offsetof(struct rte_mbuf, data_off) !=
153 mb_def.data_off = RTE_PKTMBUF_HEADROOM +
154 (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
155 mb_def.port = port_id;
156 rte_mbuf_refcnt_set(&mb_def, 1);
158 /* Prevent compiler reordering: rearm_data covers previous fields */
159 rte_compiler_barrier();
160 tmp = (uint64_t *)&mb_def.rearm_data;
165 static inline uint8_t
166 nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
169 * Maximum three segments can be supported with W8, Choose
170 * NIX_MAXSQESZ_W16 for multi segment offload.
172 if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
173 return NIX_MAXSQESZ_W16;
175 return NIX_MAXSQESZ_W8;
179 cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
180 uint16_t nb_desc, uint16_t fp_tx_q_sz,
181 const struct rte_eth_txconf *tx_conf)
183 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
184 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
185 struct cnxk_eth_txq_sp *txq_sp;
186 struct roc_nix_sq *sq;
190 /* Free memory prior to re-allocation if needed. */
191 if (eth_dev->data->tx_queues[qid] != NULL) {
192 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
193 dev_ops->tx_queue_release(eth_dev->data->tx_queues[qid]);
194 eth_dev->data->tx_queues[qid] = NULL;
200 sq->nb_desc = nb_desc;
201 sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
203 rc = roc_nix_sq_init(&dev->nix, sq);
205 plt_err("Failed to init sq=%d, rc=%d", qid, rc);
210 txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
211 txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
213 plt_err("Failed to alloc tx queue mem");
214 rc |= roc_nix_sq_fini(sq);
220 txq_sp->qconf.conf.tx = *tx_conf;
221 txq_sp->qconf.nb_desc = nb_desc;
223 plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
224 " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
225 qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
226 sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
228 /* Store start of fast path area */
229 eth_dev->data->tx_queues[qid] = txq_sp + 1;
230 eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
235 cnxk_nix_tx_queue_release(void *txq)
237 struct cnxk_eth_txq_sp *txq_sp;
238 struct cnxk_eth_dev *dev;
239 struct roc_nix_sq *sq;
246 txq_sp = cnxk_eth_txq_to_sp(txq);
250 plt_nix_dbg("Releasing txq %u", qid);
254 rc = roc_nix_sq_fini(sq);
256 plt_err("Failed to cleanup sq, rc=%d", rc);
263 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
264 uint16_t nb_desc, uint16_t fp_rx_q_sz,
265 const struct rte_eth_rxconf *rx_conf,
266 struct rte_mempool *mp)
268 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
269 struct cnxk_eth_rxq_sp *rxq_sp;
270 struct rte_mempool_ops *ops;
271 const char *platform_ops;
272 struct roc_nix_rq *rq;
273 struct roc_nix_cq *cq;
279 if (rx_conf->rx_deferred_start == 1) {
280 plt_err("Deferred Rx start is not supported");
284 platform_ops = rte_mbuf_platform_mempool_ops();
285 /* This driver needs cnxk_npa mempool ops to work */
286 ops = rte_mempool_get_ops(mp->ops_index);
287 if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
288 plt_err("mempool ops should be of cnxk_npa type");
292 if (mp->pool_id == 0) {
293 plt_err("Invalid pool_id");
297 /* Free memory prior to re-allocation if needed */
298 if (eth_dev->data->rx_queues[qid] != NULL) {
299 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
301 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
302 dev_ops->rx_queue_release(eth_dev->data->rx_queues[qid]);
303 eth_dev->data->rx_queues[qid] = NULL;
309 cq->nb_desc = nb_desc;
310 rc = roc_nix_cq_init(&dev->nix, cq);
312 plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
319 rq->aura_handle = mp->pool_id;
320 rq->flow_tag_width = 32;
323 /* Calculate first mbuf skip */
324 first_skip = (sizeof(struct rte_mbuf));
325 first_skip += RTE_PKTMBUF_HEADROOM;
326 first_skip += rte_pktmbuf_priv_size(mp);
327 rq->first_skip = first_skip;
328 rq->later_skip = sizeof(struct rte_mbuf);
329 rq->lpb_size = mp->elt_size;
331 rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
333 plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
337 /* Allocate and setup fast path rx queue */
339 rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
340 rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
342 plt_err("Failed to alloc rx queue for rq=%d", qid);
346 /* Setup slow path fields */
349 rxq_sp->qconf.conf.rx = *rx_conf;
350 rxq_sp->qconf.nb_desc = nb_desc;
351 rxq_sp->qconf.mp = mp;
353 plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
356 /* Store start of fast path area */
357 eth_dev->data->rx_queues[qid] = rxq_sp + 1;
358 eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
360 /* Calculating delta and freq mult between PTP HI clock and tsc.
361 * These are needed in deriving raw clock value from tsc counter.
362 * read_clock eth op returns raw clock value.
364 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
365 rc = cnxk_nix_tsc_convert(dev);
367 plt_err("Failed to calculate delta and freq mult");
374 rc |= roc_nix_rq_fini(rq);
376 rc |= roc_nix_cq_fini(cq);
382 cnxk_nix_rx_queue_release(void *rxq)
384 struct cnxk_eth_rxq_sp *rxq_sp;
385 struct cnxk_eth_dev *dev;
386 struct roc_nix_rq *rq;
387 struct roc_nix_cq *cq;
394 rxq_sp = cnxk_eth_rxq_to_sp(rxq);
398 plt_nix_dbg("Releasing rxq %u", qid);
402 rc = roc_nix_rq_fini(rq);
404 plt_err("Failed to cleanup rq, rc=%d", rc);
408 rc = roc_nix_cq_fini(cq);
410 plt_err("Failed to cleanup cq, rc=%d", rc);
412 /* Finally free fast path area */
417 cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
420 uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
421 {FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
422 FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
423 {FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
424 FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
425 FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
426 {FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
427 FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
428 FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
429 FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
430 FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
431 FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
433 uint32_t flowkey_cfg = 0;
435 dev->ethdev_rss_hf = ethdev_rss;
437 if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
438 dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
439 flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
442 if (ethdev_rss & ETH_RSS_C_VLAN)
443 flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
445 if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
446 flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
448 if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
449 flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
451 if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
452 flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
454 if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
455 flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
457 if (ethdev_rss & RSS_IPV4_ENABLE)
458 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
460 if (ethdev_rss & RSS_IPV6_ENABLE)
461 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
463 if (ethdev_rss & ETH_RSS_TCP)
464 flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
466 if (ethdev_rss & ETH_RSS_UDP)
467 flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
469 if (ethdev_rss & ETH_RSS_SCTP)
470 flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
472 if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
473 flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
475 if (ethdev_rss & RSS_IPV6_EX_ENABLE)
476 flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
478 if (ethdev_rss & ETH_RSS_PORT)
479 flowkey_cfg |= FLOW_KEY_TYPE_PORT;
481 if (ethdev_rss & ETH_RSS_NVGRE)
482 flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
484 if (ethdev_rss & ETH_RSS_VXLAN)
485 flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
487 if (ethdev_rss & ETH_RSS_GENEVE)
488 flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
490 if (ethdev_rss & ETH_RSS_GTPU)
491 flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
497 nix_free_queue_mem(struct cnxk_eth_dev *dev)
508 nix_rss_default_setup(struct cnxk_eth_dev *dev)
510 struct rte_eth_dev *eth_dev = dev->eth_dev;
511 uint8_t rss_hash_level;
512 uint32_t flowkey_cfg;
515 rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
516 rss_hash_level = ETH_RSS_LEVEL(rss_hf);
520 flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
521 return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
525 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
527 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
528 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
529 struct cnxk_eth_qconf *tx_qconf = NULL;
530 struct cnxk_eth_qconf *rx_qconf = NULL;
531 struct cnxk_eth_rxq_sp *rxq_sp;
532 struct cnxk_eth_txq_sp *txq_sp;
533 int i, nb_rxq, nb_txq;
536 nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
537 nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
539 tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
540 if (tx_qconf == NULL) {
541 plt_err("Failed to allocate memory for tx_qconf");
545 rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
546 if (rx_qconf == NULL) {
547 plt_err("Failed to allocate memory for rx_qconf");
551 txq = eth_dev->data->tx_queues;
552 for (i = 0; i < nb_txq; i++) {
553 if (txq[i] == NULL) {
554 tx_qconf[i].valid = false;
555 plt_info("txq[%d] is already released", i);
558 txq_sp = cnxk_eth_txq_to_sp(txq[i]);
559 memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
560 tx_qconf[i].valid = true;
561 dev_ops->tx_queue_release(txq[i]);
562 eth_dev->data->tx_queues[i] = NULL;
565 rxq = eth_dev->data->rx_queues;
566 for (i = 0; i < nb_rxq; i++) {
567 if (rxq[i] == NULL) {
568 rx_qconf[i].valid = false;
569 plt_info("rxq[%d] is already released", i);
572 rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
573 memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
574 rx_qconf[i].valid = true;
575 dev_ops->rx_queue_release(rxq[i]);
576 eth_dev->data->rx_queues[i] = NULL;
579 dev->tx_qconf = tx_qconf;
580 dev->rx_qconf = rx_qconf;
590 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
592 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
593 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
594 struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
595 struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
596 int rc, i, nb_rxq, nb_txq;
599 nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
600 nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
603 /* Setup tx & rx queues with previous configuration so
604 * that the queues can be functional in cases like ports
605 * are started without re configuring queues.
607 * Usual re config sequence is like below:
613 * queue_configure() {
620 * In some application's control path, queue_configure() would
621 * NOT be invoked for TXQs/RXQs in port_configure().
622 * In such cases, queues can be functional after start as the
623 * queues are already setup in port_configure().
625 for (i = 0; i < nb_txq; i++) {
626 if (!tx_qconf[i].valid)
628 rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
629 &tx_qconf[i].conf.tx);
631 plt_err("Failed to setup tx queue rc=%d", rc);
632 txq = eth_dev->data->tx_queues;
633 for (i -= 1; i >= 0; i--)
634 dev_ops->tx_queue_release(txq[i]);
642 for (i = 0; i < nb_rxq; i++) {
643 if (!rx_qconf[i].valid)
645 rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
646 &rx_qconf[i].conf.rx,
649 plt_err("Failed to setup rx queue rc=%d", rc);
650 rxq = eth_dev->data->rx_queues;
651 for (i -= 1; i >= 0; i--)
652 dev_ops->rx_queue_release(rxq[i]);
653 goto tx_queue_release;
663 txq = eth_dev->data->tx_queues;
664 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
665 dev_ops->tx_queue_release(txq[i]);
676 nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
686 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
688 /* These dummy functions are required for supporting
689 * some applications which reconfigure queues without
690 * stopping tx burst and rx burst threads(eg kni app)
691 * When the queues context is saved, txq/rxqs are released
692 * which caused app crash since rx/tx burst is still
693 * on different lcores
695 eth_dev->tx_pkt_burst = nix_eth_nop_burst;
696 eth_dev->rx_pkt_burst = nix_eth_nop_burst;
701 nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
703 uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
704 uint8_t tun[ROC_NIX_LSO_TUN_MAX];
705 struct roc_nix *nix = &dev->nix;
708 rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
712 dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
713 (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
714 (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
715 (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
717 dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
718 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
719 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
720 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
725 nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
727 struct roc_nix *nix = &dev->nix;
730 /* Nothing much to do if offload is not enabled */
731 if (!(dev->tx_offloads &
732 (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
733 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
736 /* Setup LSO formats in AF. Its a no-op if other ethdev has
739 rc = roc_nix_lso_fmt_setup(nix);
743 return nix_lso_tun_fmt_update(dev);
747 cnxk_nix_configure(struct rte_eth_dev *eth_dev)
749 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
750 struct rte_eth_dev_data *data = eth_dev->data;
751 struct rte_eth_conf *conf = &data->dev_conf;
752 struct rte_eth_rxmode *rxmode = &conf->rxmode;
753 struct rte_eth_txmode *txmode = &conf->txmode;
754 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
755 struct roc_nix_fc_cfg fc_cfg = {0};
756 struct roc_nix *nix = &dev->nix;
757 struct rte_ether_addr *ea;
758 uint8_t nb_rxq, nb_txq;
766 if (rte_eal_has_hugepages() == 0) {
767 plt_err("Huge page is not configured");
771 if (conf->dcb_capability_en == 1) {
772 plt_err("dcb enable is not supported");
776 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
777 plt_err("Flow director is not supported");
781 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
782 rxmode->mq_mode != ETH_MQ_RX_RSS) {
783 plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
787 if (txmode->mq_mode != ETH_MQ_TX_NONE) {
788 plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
792 /* Free the resources allocated from the previous configure */
793 if (dev->configured == 1) {
794 /* Unregister queue irq's */
795 roc_nix_unregister_queue_irqs(nix);
797 /* Unregister CQ irqs if present */
798 if (eth_dev->data->dev_conf.intr_conf.rxq)
799 roc_nix_unregister_cq_irqs(nix);
801 /* Set no-op functions */
802 nix_set_nop_rxtx_function(eth_dev);
803 /* Store queue config for later */
804 rc = nix_store_queue_cfg_and_then_release(eth_dev);
807 roc_nix_tm_fini(nix);
808 roc_nix_lf_free(nix);
811 dev->rx_offloads = rxmode->offloads;
812 dev->tx_offloads = txmode->offloads;
815 rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
816 if (dev->rx_offloads &
817 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
818 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
819 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
821 rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |
822 ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
823 ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
825 nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
826 nb_txq = RTE_MAX(data->nb_tx_queues, 1);
829 rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
831 plt_err("Failed to init nix_lf rc=%d", rc);
835 dev->npc.channel = roc_nix_get_base_chan(nix);
837 nb_rxq = data->nb_rx_queues;
838 nb_txq = data->nb_tx_queues;
841 /* Allocate memory for roc rq's and cq's */
842 qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0);
844 plt_err("Failed to alloc rqs");
849 qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0);
851 plt_err("Failed to alloc cqs");
858 /* Allocate memory for roc sq's */
859 qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0);
861 plt_err("Failed to alloc sqs");
867 /* Re-enable NIX LF error interrupts */
868 roc_nix_err_intr_ena_dis(nix, true);
869 roc_nix_ras_intr_ena_dis(nix, true);
871 if (nix->rx_ptp_ena &&
872 dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) {
873 plt_err("Both PTP and switch header enabled");
877 rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type);
879 plt_err("Failed to enable switch type nix_lf rc=%d", rc);
883 /* Setup LSO if needed */
884 rc = nix_lso_fmt_setup(dev);
886 plt_err("Failed to setup nix lso format fields, rc=%d", rc);
891 rc = nix_rss_default_setup(dev);
893 plt_err("Failed to configure rss rc=%d", rc);
897 /* Init the default TM scheduler hierarchy */
898 rc = roc_nix_tm_init(nix);
900 plt_err("Failed to init traffic manager, rc=%d", rc);
904 rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
906 plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
910 /* Register queue IRQs */
911 rc = roc_nix_register_queue_irqs(nix);
913 plt_err("Failed to register queue interrupts rc=%d", rc);
917 /* Register cq IRQs */
918 if (eth_dev->data->dev_conf.intr_conf.rxq) {
919 if (eth_dev->data->nb_rx_queues > dev->nix.cints) {
920 plt_err("Rx interrupt cannot be enabled, rxq > %d",
924 /* Rx interrupt feature cannot work with vector mode because,
925 * vector mode does not process packets unless min 4 pkts are
926 * received, while cq interrupts are generated even for 1 pkt
929 dev->scalar_ena = true;
931 rc = roc_nix_register_cq_irqs(nix);
933 plt_err("Failed to register CQ interrupts rc=%d", rc);
938 /* Configure loop back mode */
939 rc = roc_nix_mac_loopback_enable(nix,
940 eth_dev->data->dev_conf.lpbk_mode);
942 plt_err("Failed to configure cgx loop back mode rc=%d", rc);
946 /* Init flow control configuration */
947 fc_cfg.cq_cfg_valid = false;
948 fc_cfg.rxchan_cfg.enable = true;
949 rc = roc_nix_fc_config_set(nix, &fc_cfg);
951 plt_err("Failed to initialize flow control rc=%d", rc);
955 /* Update flow control configuration to PMD */
956 rc = nix_init_flow_ctrl_config(eth_dev);
958 plt_err("Failed to initialize flow control rc=%d", rc);
962 * Restore queue config when reconfigure followed by
963 * reconfigure and no queue configure invoked from application case.
965 if (dev->configured == 1) {
966 rc = nix_restore_queue_cfg(eth_dev);
971 /* Update the mac address */
972 ea = eth_dev->data->mac_addrs;
973 memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
974 if (rte_is_zero_ether_addr(ea))
975 rte_eth_random_addr((uint8_t *)ea);
977 rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
979 plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
980 " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "",
981 eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq,
982 dev->rx_offloads, dev->tx_offloads);
986 dev->nb_rxq = data->nb_rx_queues;
987 dev->nb_txq = data->nb_tx_queues;
991 roc_nix_unregister_cq_irqs(nix);
993 roc_nix_unregister_queue_irqs(nix);
995 roc_nix_tm_fini(nix);
997 nix_free_queue_mem(dev);
998 rc |= roc_nix_lf_free(nix);
1000 dev->configured = 0;
1005 cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1007 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1008 struct rte_eth_dev_data *data = eth_dev->data;
1009 struct roc_nix_sq *sq = &dev->sqs[qid];
1012 if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1015 rc = roc_nix_tm_sq_aura_fc(sq, true);
1017 plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
1021 data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1027 cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1029 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1030 struct rte_eth_dev_data *data = eth_dev->data;
1031 struct roc_nix_sq *sq = &dev->sqs[qid];
1034 if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1037 rc = roc_nix_tm_sq_aura_fc(sq, false);
1039 plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
1044 data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1050 cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1052 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1053 struct rte_eth_dev_data *data = eth_dev->data;
1054 struct roc_nix_rq *rq = &dev->rqs[qid];
1057 if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1060 rc = roc_nix_rq_ena_dis(rq, true);
1062 plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
1066 data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1072 cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1074 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1075 struct rte_eth_dev_data *data = eth_dev->data;
1076 struct roc_nix_rq *rq = &dev->rqs[qid];
1079 if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1082 rc = roc_nix_rq_ena_dis(rq, false);
1084 plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
1088 data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1094 cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
1096 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1097 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1098 struct rte_mbuf *rx_pkts[32];
1099 struct rte_eth_link link;
1100 int count, i, j, rc;
1103 /* Disable switch hdr pkind */
1104 roc_nix_switch_hdr_set(&dev->nix, 0);
1106 /* Stop link change events */
1107 if (!roc_nix_is_vf_or_sdp(&dev->nix))
1108 roc_nix_mac_link_event_start_stop(&dev->nix, false);
1110 /* Disable Rx via NPC */
1111 roc_nix_npc_rx_ena_dis(&dev->nix, false);
1113 /* Stop rx queues and free up pkts pending */
1114 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1115 rc = dev_ops->rx_queue_stop(eth_dev, i);
1119 rxq = eth_dev->data->rx_queues[i];
1120 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1122 for (j = 0; j < count; j++)
1123 rte_pktmbuf_free(rx_pkts[j]);
1124 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1128 /* Stop tx queues */
1129 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1130 dev_ops->tx_queue_stop(eth_dev, i);
1132 /* Bring down link status internally */
1133 memset(&link, 0, sizeof(link));
1134 rte_eth_linkstatus_set(eth_dev, &link);
1140 cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
1142 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1145 if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) {
1146 rc = nix_recalc_mtu(eth_dev);
1151 /* Start rx queues */
1152 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1153 rc = cnxk_nix_rx_queue_start(eth_dev, i);
1158 /* Start tx queues */
1159 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1160 rc = cnxk_nix_tx_queue_start(eth_dev, i);
1165 /* Update Flow control configuration */
1166 rc = nix_update_flow_ctrl_config(eth_dev);
1168 plt_err("Failed to enable flow control. error code(%d)", rc);
1172 /* Enable Rx in NPC */
1173 rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
1175 plt_err("Failed to enable NPC rx %d", rc);
1179 cnxk_nix_toggle_flag_link_cfg(dev, true);
1181 /* Start link change events */
1182 if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
1183 rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
1185 plt_err("Failed to start cgx link event %d", rc);
1190 /* Enable PTP if it is requested by the user or already
1191 * enabled on PF owning this VF
1193 memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
1194 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
1195 cnxk_eth_dev_ops.timesync_enable(eth_dev);
1197 cnxk_eth_dev_ops.timesync_disable(eth_dev);
1199 if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
1200 rc = rte_mbuf_dyn_rx_timestamp_register
1201 (&dev->tstamp.tstamp_dynfield_offset,
1202 &dev->tstamp.rx_tstamp_dynflag);
1204 plt_err("Failed to register Rx timestamp field/flag");
1209 cnxk_nix_toggle_flag_link_cfg(dev, false);
1214 roc_nix_npc_rx_ena_dis(&dev->nix, false);
1215 cnxk_nix_toggle_flag_link_cfg(dev, false);
1219 static int cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev);
1220 static int cnxk_nix_dev_close(struct rte_eth_dev *eth_dev);
1222 /* CNXK platform independent eth dev ops */
1223 struct eth_dev_ops cnxk_eth_dev_ops = {
1224 .mtu_set = cnxk_nix_mtu_set,
1225 .mac_addr_add = cnxk_nix_mac_addr_add,
1226 .mac_addr_remove = cnxk_nix_mac_addr_del,
1227 .mac_addr_set = cnxk_nix_mac_addr_set,
1228 .dev_infos_get = cnxk_nix_info_get,
1229 .link_update = cnxk_nix_link_update,
1230 .tx_queue_release = cnxk_nix_tx_queue_release,
1231 .rx_queue_release = cnxk_nix_rx_queue_release,
1232 .dev_stop = cnxk_nix_dev_stop,
1233 .dev_close = cnxk_nix_dev_close,
1234 .dev_reset = cnxk_nix_dev_reset,
1235 .tx_queue_start = cnxk_nix_tx_queue_start,
1236 .rx_queue_start = cnxk_nix_rx_queue_start,
1237 .rx_queue_stop = cnxk_nix_rx_queue_stop,
1238 .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
1239 .promiscuous_enable = cnxk_nix_promisc_enable,
1240 .promiscuous_disable = cnxk_nix_promisc_disable,
1241 .allmulticast_enable = cnxk_nix_allmulticast_enable,
1242 .allmulticast_disable = cnxk_nix_allmulticast_disable,
1243 .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
1244 .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
1245 .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
1246 .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
1247 .dev_set_link_up = cnxk_nix_set_link_up,
1248 .dev_set_link_down = cnxk_nix_set_link_down,
1249 .get_module_info = cnxk_nix_get_module_info,
1250 .get_module_eeprom = cnxk_nix_get_module_eeprom,
1251 .rx_queue_intr_enable = cnxk_nix_rx_queue_intr_enable,
1252 .rx_queue_intr_disable = cnxk_nix_rx_queue_intr_disable,
1253 .pool_ops_supported = cnxk_nix_pool_ops_supported,
1254 .queue_stats_mapping_set = cnxk_nix_queue_stats_mapping,
1255 .stats_get = cnxk_nix_stats_get,
1256 .stats_reset = cnxk_nix_stats_reset,
1257 .xstats_get = cnxk_nix_xstats_get,
1258 .xstats_get_names = cnxk_nix_xstats_get_names,
1259 .xstats_reset = cnxk_nix_xstats_reset,
1260 .xstats_get_by_id = cnxk_nix_xstats_get_by_id,
1261 .xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id,
1262 .fw_version_get = cnxk_nix_fw_version_get,
1263 .rxq_info_get = cnxk_nix_rxq_info_get,
1264 .txq_info_get = cnxk_nix_txq_info_get,
1265 .tx_done_cleanup = cnxk_nix_tx_done_cleanup,
1266 .flow_ops_get = cnxk_nix_flow_ops_get,
1267 .get_reg = cnxk_nix_dev_get_reg,
1268 .timesync_read_rx_timestamp = cnxk_nix_timesync_read_rx_timestamp,
1269 .timesync_read_tx_timestamp = cnxk_nix_timesync_read_tx_timestamp,
1270 .timesync_read_time = cnxk_nix_timesync_read_time,
1271 .timesync_write_time = cnxk_nix_timesync_write_time,
1272 .timesync_adjust_time = cnxk_nix_timesync_adjust_time,
1273 .read_clock = cnxk_nix_read_clock,
1274 .reta_update = cnxk_nix_reta_update,
1275 .reta_query = cnxk_nix_reta_query,
1276 .rss_hash_update = cnxk_nix_rss_hash_update,
1277 .rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
1278 .set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
1282 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
1284 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1285 struct roc_nix *nix = &dev->nix;
1286 struct rte_pci_device *pci_dev;
1287 int rc, max_entries;
1289 eth_dev->dev_ops = &cnxk_eth_dev_ops;
1291 /* For secondary processes, the primary has done all the work */
1292 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1295 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1296 rte_eth_copy_pci_info(eth_dev, pci_dev);
1298 /* Parse devargs string */
1299 rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev);
1301 plt_err("Failed to parse devargs rc=%d", rc);
1305 /* Initialize base roc nix */
1306 nix->pci_dev = pci_dev;
1307 nix->hw_vlan_ins = true;
1308 rc = roc_nix_dev_init(nix);
1310 plt_err("Failed to initialize roc nix rc=%d", rc);
1314 /* Register up msg callbacks */
1315 roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
1317 /* Register up msg callbacks */
1318 roc_nix_mac_link_info_get_cb_register(nix,
1319 cnxk_eth_dev_link_status_get_cb);
1321 dev->eth_dev = eth_dev;
1322 dev->configured = 0;
1323 dev->ptype_disable = 0;
1325 /* For vfs, returned max_entries will be 0. but to keep default mac
1326 * address, one entry must be allocated. so setting up to 1.
1328 if (roc_nix_is_vf_or_sdp(nix))
1331 max_entries = roc_nix_mac_max_entries_get(nix);
1333 if (max_entries <= 0) {
1334 plt_err("Failed to get max entries for mac addr");
1339 eth_dev->data->mac_addrs =
1340 rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0);
1341 if (eth_dev->data->mac_addrs == NULL) {
1342 plt_err("Failed to allocate memory for mac addr");
1347 dev->max_mac_entries = max_entries;
1348 dev->dmac_filter_count = 1;
1350 /* Get mac address */
1351 rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
1353 plt_err("Failed to get mac addr, rc=%d", rc);
1354 goto free_mac_addrs;
1357 /* Update the mac address */
1358 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1360 if (!roc_nix_is_vf_or_sdp(nix)) {
1361 /* Sync same MAC address to CGX/RPM table */
1362 rc = roc_nix_mac_addr_set(nix, dev->mac_addr);
1364 plt_err("Failed to set mac addr, rc=%d", rc);
1365 goto free_mac_addrs;
1369 /* Union of all capabilities supported by CNXK.
1370 * Platform specific capabilities will be
1373 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
1374 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
1375 dev->speed_capa = nix_get_speed_capa(dev);
1377 /* Initialize roc npc */
1378 dev->npc.roc_nix = nix;
1379 rc = roc_npc_init(&dev->npc);
1381 goto free_mac_addrs;
1383 plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
1384 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
1385 eth_dev->data->port_id, roc_nix_get_pf(nix),
1386 roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
1387 dev->rx_offload_capa, dev->tx_offload_capa);
1391 rte_free(eth_dev->data->mac_addrs);
1393 roc_nix_dev_fini(nix);
1395 plt_err("Failed to init nix eth_dev rc=%d", rc);
1400 cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
1402 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1403 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1404 struct roc_nix *nix = &dev->nix;
1407 /* Nothing to be done for secondary processes */
1408 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1411 /* Clear the flag since we are closing down */
1412 dev->configured = 0;
1414 roc_nix_npc_rx_ena_dis(nix, false);
1416 /* Disable and free rte_flow entries */
1417 roc_npc_fini(&dev->npc);
1419 /* Disable link status events */
1420 roc_nix_mac_link_event_start_stop(nix, false);
1422 /* Unregister the link update op, this is required to stop VFs from
1423 * receiving link status updates on exit path.
1425 roc_nix_mac_link_cb_unregister(nix);
1428 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1429 dev_ops->tx_queue_release(eth_dev->data->tx_queues[i]);
1430 eth_dev->data->tx_queues[i] = NULL;
1432 eth_dev->data->nb_tx_queues = 0;
1434 /* Free up RQ's and CQ's */
1435 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1436 dev_ops->rx_queue_release(eth_dev->data->rx_queues[i]);
1437 eth_dev->data->rx_queues[i] = NULL;
1439 eth_dev->data->nb_rx_queues = 0;
1441 /* Free tm resources */
1442 roc_nix_tm_fini(nix);
1444 /* Unregister queue irqs */
1445 roc_nix_unregister_queue_irqs(nix);
1447 /* Unregister cq irqs */
1448 if (eth_dev->data->dev_conf.intr_conf.rxq)
1449 roc_nix_unregister_cq_irqs(nix);
1451 /* Free ROC RQ's, SQ's and CQ's memory */
1452 nix_free_queue_mem(dev);
1454 /* Free nix lf resources */
1455 rc = roc_nix_lf_free(nix);
1457 plt_err("Failed to free nix lf, rc=%d", rc);
1459 rte_free(eth_dev->data->mac_addrs);
1460 eth_dev->data->mac_addrs = NULL;
1462 rc = roc_nix_dev_fini(nix);
1463 /* Can be freed later by PMD if NPA LF is in use */
1464 if (rc == -EAGAIN) {
1466 eth_dev->data->dev_private = NULL;
1469 plt_err("Failed in nix dev fini, rc=%d", rc);
1476 cnxk_nix_dev_close(struct rte_eth_dev *eth_dev)
1478 cnxk_eth_dev_uninit(eth_dev, false);
1483 cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev)
1487 rc = cnxk_eth_dev_uninit(eth_dev, true);
1491 return cnxk_eth_dev_init(eth_dev);
1495 cnxk_nix_remove(struct rte_pci_device *pci_dev)
1497 struct rte_eth_dev *eth_dev;
1498 struct roc_nix *nix;
1501 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1503 /* Cleanup eth dev */
1504 rc = cnxk_eth_dev_uninit(eth_dev, false);
1508 rte_eth_dev_release_port(eth_dev);
1511 /* Nothing to be done for secondary processes */
1512 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1515 /* Check if this device is hosting common resource */
1516 nix = roc_idev_npa_nix_get();
1517 if (nix->pci_dev != pci_dev)
1520 /* Try nix fini now */
1521 rc = roc_nix_dev_fini(nix);
1522 if (rc == -EAGAIN) {
1523 plt_info("%s: common resource in use by other devices",
1527 plt_err("Failed in nix dev fini, rc=%d", rc);
1531 /* Free device pointer as rte_ethdev does not have it anymore */
1538 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1542 RTE_SET_USED(pci_drv);
1544 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
1547 /* On error on secondary, recheck if port exists in primary or
1548 * in mid of detach state.
1550 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
1551 if (!rte_eth_dev_allocated(pci_dev->device.name))