1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include <cnxk_ethdev.h>
7 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
9 uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
11 if (roc_nix_is_vf_or_sdp(&dev->nix))
12 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
17 static inline uint64_t
18 nix_get_tx_offload_capa(struct cnxk_eth_dev *dev)
21 return CNXK_NIX_TX_OFFLOAD_CAPA;
24 static inline uint32_t
25 nix_get_speed_capa(struct cnxk_eth_dev *dev)
29 /* Auto negotiation disabled */
30 speed_capa = ETH_LINK_SPEED_FIXED;
31 if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
32 speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
33 ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
34 ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
41 cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
44 uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
45 {FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
46 FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
47 {FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
48 FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
49 FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
50 {FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
51 FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
52 FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
53 FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
54 FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
55 FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
57 uint32_t flowkey_cfg = 0;
59 dev->ethdev_rss_hf = ethdev_rss;
61 if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
62 flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
64 if (ethdev_rss & ETH_RSS_C_VLAN)
65 flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
67 if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
68 flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
70 if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
71 flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
73 if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
74 flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
76 if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
77 flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
79 if (ethdev_rss & RSS_IPV4_ENABLE)
80 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
82 if (ethdev_rss & RSS_IPV6_ENABLE)
83 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
85 if (ethdev_rss & ETH_RSS_TCP)
86 flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
88 if (ethdev_rss & ETH_RSS_UDP)
89 flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
91 if (ethdev_rss & ETH_RSS_SCTP)
92 flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
94 if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
95 flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
97 if (ethdev_rss & RSS_IPV6_EX_ENABLE)
98 flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
100 if (ethdev_rss & ETH_RSS_PORT)
101 flowkey_cfg |= FLOW_KEY_TYPE_PORT;
103 if (ethdev_rss & ETH_RSS_NVGRE)
104 flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
106 if (ethdev_rss & ETH_RSS_VXLAN)
107 flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
109 if (ethdev_rss & ETH_RSS_GENEVE)
110 flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
112 if (ethdev_rss & ETH_RSS_GTPU)
113 flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
119 nix_free_queue_mem(struct cnxk_eth_dev *dev)
130 nix_rss_default_setup(struct cnxk_eth_dev *dev)
132 struct rte_eth_dev *eth_dev = dev->eth_dev;
133 uint8_t rss_hash_level;
134 uint32_t flowkey_cfg;
137 rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
138 rss_hash_level = ETH_RSS_LEVEL(rss_hf);
142 flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
143 return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
147 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
149 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
150 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
151 struct cnxk_eth_qconf *tx_qconf = NULL;
152 struct cnxk_eth_qconf *rx_qconf = NULL;
153 struct cnxk_eth_rxq_sp *rxq_sp;
154 struct cnxk_eth_txq_sp *txq_sp;
155 int i, nb_rxq, nb_txq;
158 nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
159 nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
161 tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
162 if (tx_qconf == NULL) {
163 plt_err("Failed to allocate memory for tx_qconf");
167 rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
168 if (rx_qconf == NULL) {
169 plt_err("Failed to allocate memory for rx_qconf");
173 txq = eth_dev->data->tx_queues;
174 for (i = 0; i < nb_txq; i++) {
175 if (txq[i] == NULL) {
176 tx_qconf[i].valid = false;
177 plt_info("txq[%d] is already released", i);
180 txq_sp = cnxk_eth_txq_to_sp(txq[i]);
181 memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
182 tx_qconf[i].valid = true;
183 dev_ops->tx_queue_release(txq[i]);
184 eth_dev->data->tx_queues[i] = NULL;
187 rxq = eth_dev->data->rx_queues;
188 for (i = 0; i < nb_rxq; i++) {
189 if (rxq[i] == NULL) {
190 rx_qconf[i].valid = false;
191 plt_info("rxq[%d] is already released", i);
194 rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
195 memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
196 rx_qconf[i].valid = true;
197 dev_ops->rx_queue_release(rxq[i]);
198 eth_dev->data->rx_queues[i] = NULL;
201 dev->tx_qconf = tx_qconf;
202 dev->rx_qconf = rx_qconf;
212 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
214 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
215 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
216 struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
217 struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
218 int rc, i, nb_rxq, nb_txq;
221 nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
222 nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
225 /* Setup tx & rx queues with previous configuration so
226 * that the queues can be functional in cases like ports
227 * are started without re configuring queues.
229 * Usual re config sequence is like below:
235 * queue_configure() {
242 * In some application's control path, queue_configure() would
243 * NOT be invoked for TXQs/RXQs in port_configure().
244 * In such cases, queues can be functional after start as the
245 * queues are already setup in port_configure().
247 for (i = 0; i < nb_txq; i++) {
248 if (!tx_qconf[i].valid)
250 rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
251 &tx_qconf[i].conf.tx);
253 plt_err("Failed to setup tx queue rc=%d", rc);
254 txq = eth_dev->data->tx_queues;
255 for (i -= 1; i >= 0; i--)
256 dev_ops->tx_queue_release(txq[i]);
264 for (i = 0; i < nb_rxq; i++) {
265 if (!rx_qconf[i].valid)
267 rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
268 &rx_qconf[i].conf.rx,
271 plt_err("Failed to setup rx queue rc=%d", rc);
272 rxq = eth_dev->data->rx_queues;
273 for (i -= 1; i >= 0; i--)
274 dev_ops->rx_queue_release(rxq[i]);
275 goto tx_queue_release;
285 txq = eth_dev->data->tx_queues;
286 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
287 dev_ops->tx_queue_release(txq[i]);
298 nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
308 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
310 /* These dummy functions are required for supporting
311 * some applications which reconfigure queues without
312 * stopping tx burst and rx burst threads(eg kni app)
313 * When the queues context is saved, txq/rxqs are released
314 * which caused app crash since rx/tx burst is still
315 * on different lcores
317 eth_dev->tx_pkt_burst = nix_eth_nop_burst;
318 eth_dev->rx_pkt_burst = nix_eth_nop_burst;
323 nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
325 uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
326 uint8_t tun[ROC_NIX_LSO_TUN_MAX];
327 struct roc_nix *nix = &dev->nix;
330 rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
334 dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
335 (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
336 (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
337 (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
339 dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
340 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
341 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
342 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
347 nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
349 struct roc_nix *nix = &dev->nix;
352 /* Nothing much to do if offload is not enabled */
353 if (!(dev->tx_offloads &
354 (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
355 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
358 /* Setup LSO formats in AF. Its a no-op if other ethdev has
361 rc = roc_nix_lso_fmt_setup(nix);
365 return nix_lso_tun_fmt_update(dev);
369 cnxk_nix_configure(struct rte_eth_dev *eth_dev)
371 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
372 struct rte_eth_dev_data *data = eth_dev->data;
373 struct rte_eth_conf *conf = &data->dev_conf;
374 struct rte_eth_rxmode *rxmode = &conf->rxmode;
375 struct rte_eth_txmode *txmode = &conf->txmode;
376 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
377 struct roc_nix *nix = &dev->nix;
378 struct rte_ether_addr *ea;
379 uint8_t nb_rxq, nb_txq;
387 if (rte_eal_has_hugepages() == 0) {
388 plt_err("Huge page is not configured");
392 if (conf->dcb_capability_en == 1) {
393 plt_err("dcb enable is not supported");
397 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
398 plt_err("Flow director is not supported");
402 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
403 rxmode->mq_mode != ETH_MQ_RX_RSS) {
404 plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
408 if (txmode->mq_mode != ETH_MQ_TX_NONE) {
409 plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
413 /* Free the resources allocated from the previous configure */
414 if (dev->configured == 1) {
415 /* Unregister queue irq's */
416 roc_nix_unregister_queue_irqs(nix);
418 /* Unregister CQ irqs if present */
419 if (eth_dev->data->dev_conf.intr_conf.rxq)
420 roc_nix_unregister_cq_irqs(nix);
422 /* Set no-op functions */
423 nix_set_nop_rxtx_function(eth_dev);
424 /* Store queue config for later */
425 rc = nix_store_queue_cfg_and_then_release(eth_dev);
428 roc_nix_tm_fini(nix);
429 roc_nix_lf_free(nix);
432 dev->rx_offloads = rxmode->offloads;
433 dev->tx_offloads = txmode->offloads;
436 rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
437 if (dev->rx_offloads &
438 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
439 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
440 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
442 rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |
443 ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
444 ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
446 nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
447 nb_txq = RTE_MAX(data->nb_tx_queues, 1);
450 rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
452 plt_err("Failed to init nix_lf rc=%d", rc);
456 nb_rxq = data->nb_rx_queues;
457 nb_txq = data->nb_tx_queues;
460 /* Allocate memory for roc rq's and cq's */
461 qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0);
463 plt_err("Failed to alloc rqs");
468 qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0);
470 plt_err("Failed to alloc cqs");
477 /* Allocate memory for roc sq's */
478 qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0);
480 plt_err("Failed to alloc sqs");
486 /* Re-enable NIX LF error interrupts */
487 roc_nix_err_intr_ena_dis(nix, true);
488 roc_nix_ras_intr_ena_dis(nix, true);
490 if (nix->rx_ptp_ena) {
491 plt_err("Both PTP and switch header enabled");
495 /* Setup LSO if needed */
496 rc = nix_lso_fmt_setup(dev);
498 plt_err("Failed to setup nix lso format fields, rc=%d", rc);
503 rc = nix_rss_default_setup(dev);
505 plt_err("Failed to configure rss rc=%d", rc);
509 /* Init the default TM scheduler hierarchy */
510 rc = roc_nix_tm_init(nix);
512 plt_err("Failed to init traffic manager, rc=%d", rc);
516 rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
518 plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
522 /* Register queue IRQs */
523 rc = roc_nix_register_queue_irqs(nix);
525 plt_err("Failed to register queue interrupts rc=%d", rc);
529 /* Register cq IRQs */
530 if (eth_dev->data->dev_conf.intr_conf.rxq) {
531 if (eth_dev->data->nb_rx_queues > dev->nix.cints) {
532 plt_err("Rx interrupt cannot be enabled, rxq > %d",
536 /* Rx interrupt feature cannot work with vector mode because,
537 * vector mode does not process packets unless min 4 pkts are
538 * received, while cq interrupts are generated even for 1 pkt
541 dev->scalar_ena = true;
543 rc = roc_nix_register_cq_irqs(nix);
545 plt_err("Failed to register CQ interrupts rc=%d", rc);
550 /* Configure loop back mode */
551 rc = roc_nix_mac_loopback_enable(nix,
552 eth_dev->data->dev_conf.lpbk_mode);
554 plt_err("Failed to configure cgx loop back mode rc=%d", rc);
559 * Restore queue config when reconfigure followed by
560 * reconfigure and no queue configure invoked from application case.
562 if (dev->configured == 1) {
563 rc = nix_restore_queue_cfg(eth_dev);
568 /* Update the mac address */
569 ea = eth_dev->data->mac_addrs;
570 memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
571 if (rte_is_zero_ether_addr(ea))
572 rte_eth_random_addr((uint8_t *)ea);
574 rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
576 plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
577 " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "",
578 eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq,
579 dev->rx_offloads, dev->tx_offloads);
583 dev->nb_rxq = data->nb_rx_queues;
584 dev->nb_txq = data->nb_tx_queues;
588 roc_nix_unregister_cq_irqs(nix);
590 roc_nix_unregister_queue_irqs(nix);
592 roc_nix_tm_fini(nix);
594 nix_free_queue_mem(dev);
595 rc |= roc_nix_lf_free(nix);
601 /* CNXK platform independent eth dev ops */
602 struct eth_dev_ops cnxk_eth_dev_ops = {
603 .dev_infos_get = cnxk_nix_info_get,
607 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
609 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
610 struct roc_nix *nix = &dev->nix;
611 struct rte_pci_device *pci_dev;
614 eth_dev->dev_ops = &cnxk_eth_dev_ops;
616 /* For secondary processes, the primary has done all the work */
617 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
620 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
621 rte_eth_copy_pci_info(eth_dev, pci_dev);
623 /* Parse devargs string */
624 rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev);
626 plt_err("Failed to parse devargs rc=%d", rc);
630 /* Initialize base roc nix */
631 nix->pci_dev = pci_dev;
632 rc = roc_nix_dev_init(nix);
634 plt_err("Failed to initialize roc nix rc=%d", rc);
638 dev->eth_dev = eth_dev;
641 /* For vfs, returned max_entries will be 0. but to keep default mac
642 * address, one entry must be allocated. so setting up to 1.
644 if (roc_nix_is_vf_or_sdp(nix))
647 max_entries = roc_nix_mac_max_entries_get(nix);
649 if (max_entries <= 0) {
650 plt_err("Failed to get max entries for mac addr");
655 eth_dev->data->mac_addrs =
656 rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0);
657 if (eth_dev->data->mac_addrs == NULL) {
658 plt_err("Failed to allocate memory for mac addr");
663 dev->max_mac_entries = max_entries;
665 /* Get mac address */
666 rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
668 plt_err("Failed to get mac addr, rc=%d", rc);
672 /* Update the mac address */
673 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
675 if (!roc_nix_is_vf_or_sdp(nix)) {
676 /* Sync same MAC address to CGX/RPM table */
677 rc = roc_nix_mac_addr_set(nix, dev->mac_addr);
679 plt_err("Failed to set mac addr, rc=%d", rc);
684 /* Union of all capabilities supported by CNXK.
685 * Platform specific capabilities will be
688 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
689 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
690 dev->speed_capa = nix_get_speed_capa(dev);
692 /* Initialize roc npc */
693 plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
694 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
695 eth_dev->data->port_id, roc_nix_get_pf(nix),
696 roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
697 dev->rx_offload_capa, dev->tx_offload_capa);
701 rte_free(eth_dev->data->mac_addrs);
703 roc_nix_dev_fini(nix);
705 plt_err("Failed to init nix eth_dev rc=%d", rc);
710 cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
712 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
713 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
714 struct roc_nix *nix = &dev->nix;
717 /* Nothing to be done for secondary processes */
718 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
721 /* Clear the flag since we are closing down */
724 roc_nix_npc_rx_ena_dis(nix, false);
727 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
728 dev_ops->tx_queue_release(eth_dev->data->tx_queues[i]);
729 eth_dev->data->tx_queues[i] = NULL;
731 eth_dev->data->nb_tx_queues = 0;
733 /* Free up RQ's and CQ's */
734 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
735 dev_ops->rx_queue_release(eth_dev->data->rx_queues[i]);
736 eth_dev->data->rx_queues[i] = NULL;
738 eth_dev->data->nb_rx_queues = 0;
740 /* Free tm resources */
741 roc_nix_tm_fini(nix);
743 /* Unregister queue irqs */
744 roc_nix_unregister_queue_irqs(nix);
746 /* Unregister cq irqs */
747 if (eth_dev->data->dev_conf.intr_conf.rxq)
748 roc_nix_unregister_cq_irqs(nix);
750 /* Free ROC RQ's, SQ's and CQ's memory */
751 nix_free_queue_mem(dev);
753 /* Free nix lf resources */
754 rc = roc_nix_lf_free(nix);
756 plt_err("Failed to free nix lf, rc=%d", rc);
758 rte_free(eth_dev->data->mac_addrs);
759 eth_dev->data->mac_addrs = NULL;
761 /* Check if mbox close is needed */
765 rc = roc_nix_dev_fini(nix);
766 /* Can be freed later by PMD if NPA LF is in use */
768 eth_dev->data->dev_private = NULL;
771 plt_err("Failed in nix dev fini, rc=%d", rc);
778 cnxk_nix_remove(struct rte_pci_device *pci_dev)
780 struct rte_eth_dev *eth_dev;
784 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
786 /* Cleanup eth dev */
787 rc = cnxk_eth_dev_uninit(eth_dev, true);
791 rte_eth_dev_release_port(eth_dev);
794 /* Nothing to be done for secondary processes */
795 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
798 /* Check if this device is hosting common resource */
799 nix = roc_idev_npa_nix_get();
800 if (nix->pci_dev != pci_dev)
803 /* Try nix fini now */
804 rc = roc_nix_dev_fini(nix);
806 plt_info("%s: common resource in use by other devices",
810 plt_err("Failed in nix dev fini, rc=%d", rc);
814 /* Free device pointer as rte_ethdev does not have it anymore */
821 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
825 RTE_SET_USED(pci_drv);
827 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
830 /* On error on secondary, recheck if port exists in primary or
831 * in mid of detach state.
833 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
834 if (!rte_eth_dev_allocated(pci_dev->device.name))