1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include <cnxk_ethdev.h>
7 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
9 uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
11 if (roc_nix_is_vf_or_sdp(&dev->nix))
12 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
17 static inline uint64_t
18 nix_get_tx_offload_capa(struct cnxk_eth_dev *dev)
21 return CNXK_NIX_TX_OFFLOAD_CAPA;
24 static inline uint32_t
25 nix_get_speed_capa(struct cnxk_eth_dev *dev)
29 /* Auto negotiation disabled */
30 speed_capa = ETH_LINK_SPEED_FIXED;
31 if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
32 speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
33 ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
34 ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
40 /* CNXK platform independent eth dev ops */
41 struct eth_dev_ops cnxk_eth_dev_ops = {
42 .dev_infos_get = cnxk_nix_info_get,
46 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
48 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
49 struct roc_nix *nix = &dev->nix;
50 struct rte_pci_device *pci_dev;
53 eth_dev->dev_ops = &cnxk_eth_dev_ops;
55 /* For secondary processes, the primary has done all the work */
56 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
59 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
60 rte_eth_copy_pci_info(eth_dev, pci_dev);
62 /* Parse devargs string */
63 rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev);
65 plt_err("Failed to parse devargs rc=%d", rc);
69 /* Initialize base roc nix */
70 nix->pci_dev = pci_dev;
71 rc = roc_nix_dev_init(nix);
73 plt_err("Failed to initialize roc nix rc=%d", rc);
77 dev->eth_dev = eth_dev;
79 /* For vfs, returned max_entries will be 0. but to keep default mac
80 * address, one entry must be allocated. so setting up to 1.
82 if (roc_nix_is_vf_or_sdp(nix))
85 max_entries = roc_nix_mac_max_entries_get(nix);
87 if (max_entries <= 0) {
88 plt_err("Failed to get max entries for mac addr");
93 eth_dev->data->mac_addrs =
94 rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0);
95 if (eth_dev->data->mac_addrs == NULL) {
96 plt_err("Failed to allocate memory for mac addr");
101 dev->max_mac_entries = max_entries;
103 /* Get mac address */
104 rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
106 plt_err("Failed to get mac addr, rc=%d", rc);
110 /* Update the mac address */
111 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
113 if (!roc_nix_is_vf_or_sdp(nix)) {
114 /* Sync same MAC address to CGX/RPM table */
115 rc = roc_nix_mac_addr_set(nix, dev->mac_addr);
117 plt_err("Failed to set mac addr, rc=%d", rc);
122 /* Union of all capabilities supported by CNXK.
123 * Platform specific capabilities will be
126 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
127 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
128 dev->speed_capa = nix_get_speed_capa(dev);
130 /* Initialize roc npc */
131 plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
132 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
133 eth_dev->data->port_id, roc_nix_get_pf(nix),
134 roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
135 dev->rx_offload_capa, dev->tx_offload_capa);
139 rte_free(eth_dev->data->mac_addrs);
141 roc_nix_dev_fini(nix);
143 plt_err("Failed to init nix eth_dev rc=%d", rc);
148 cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
150 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
151 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
152 struct roc_nix *nix = &dev->nix;
155 /* Nothing to be done for secondary processes */
156 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
159 roc_nix_npc_rx_ena_dis(nix, false);
162 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
163 dev_ops->tx_queue_release(eth_dev->data->tx_queues[i]);
164 eth_dev->data->tx_queues[i] = NULL;
166 eth_dev->data->nb_tx_queues = 0;
168 /* Free up RQ's and CQ's */
169 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
170 dev_ops->rx_queue_release(eth_dev->data->rx_queues[i]);
171 eth_dev->data->rx_queues[i] = NULL;
173 eth_dev->data->nb_rx_queues = 0;
175 /* Free tm resources */
176 roc_nix_tm_fini(nix);
178 /* Unregister queue irqs */
179 roc_nix_unregister_queue_irqs(nix);
181 /* Unregister cq irqs */
182 if (eth_dev->data->dev_conf.intr_conf.rxq)
183 roc_nix_unregister_cq_irqs(nix);
185 /* Free nix lf resources */
186 rc = roc_nix_lf_free(nix);
188 plt_err("Failed to free nix lf, rc=%d", rc);
190 rte_free(eth_dev->data->mac_addrs);
191 eth_dev->data->mac_addrs = NULL;
193 /* Check if mbox close is needed */
197 rc = roc_nix_dev_fini(nix);
198 /* Can be freed later by PMD if NPA LF is in use */
200 eth_dev->data->dev_private = NULL;
203 plt_err("Failed in nix dev fini, rc=%d", rc);
210 cnxk_nix_remove(struct rte_pci_device *pci_dev)
212 struct rte_eth_dev *eth_dev;
216 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
218 /* Cleanup eth dev */
219 rc = cnxk_eth_dev_uninit(eth_dev, true);
223 rte_eth_dev_release_port(eth_dev);
226 /* Nothing to be done for secondary processes */
227 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
230 /* Check if this device is hosting common resource */
231 nix = roc_idev_npa_nix_get();
232 if (nix->pci_dev != pci_dev)
235 /* Try nix fini now */
236 rc = roc_nix_dev_fini(nix);
238 plt_info("%s: common resource in use by other devices",
242 plt_err("Failed in nix dev fini, rc=%d", rc);
246 /* Free device pointer as rte_ethdev does not have it anymore */
253 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
257 RTE_SET_USED(pci_drv);
259 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
262 /* On error on secondary, recheck if port exists in primary or
263 * in mid of detach state.
265 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
266 if (!rte_eth_dev_allocated(pci_dev->device.name))