1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_memory.h>
14 #include "txgbe_logs.h"
15 #include "base/txgbe.h"
16 #include "txgbe_ethdev.h"
18 static int txgbe_dev_close(struct rte_eth_dev *dev);
21 * The set of PCI devices this driver supports
23 static const struct rte_pci_id pci_id_txgbe_map[] = {
24 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
25 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
26 { .vendor_id = 0, /* sentinel */ },
29 static const struct eth_dev_ops txgbe_eth_dev_ops;
32 txgbe_is_sfp(struct txgbe_hw *hw)
34 switch (hw->phy.type) {
35 case txgbe_phy_sfp_avago:
36 case txgbe_phy_sfp_ftl:
37 case txgbe_phy_sfp_intel:
38 case txgbe_phy_sfp_unknown:
39 case txgbe_phy_sfp_tyco_passive:
40 case txgbe_phy_sfp_unknown_passive:
48 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
50 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
51 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
52 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
53 const struct rte_memzone *mz;
57 PMD_INIT_FUNC_TRACE();
59 eth_dev->dev_ops = &txgbe_eth_dev_ops;
61 rte_eth_copy_pci_info(eth_dev, pci_dev);
63 /* Vendor and Device ID need to be set before init of shared code */
64 hw->device_id = pci_dev->id.device_id;
65 hw->vendor_id = pci_dev->id.vendor_id;
66 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
67 hw->allow_unsupported_sfp = 1;
69 /* Reserve memory for interrupt status block */
70 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
71 16, TXGBE_ALIGN, SOCKET_ID_ANY);
75 hw->isb_dma = TMZ_PADDR(mz);
76 hw->isb_mem = TMZ_VADDR(mz);
78 /* Initialize the shared code (base driver) */
79 err = txgbe_init_shared_code(hw);
81 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
85 err = hw->rom.init_params(hw);
87 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
91 /* Make sure we have a good EEPROM before we read from it */
92 err = hw->rom.validate_checksum(hw, &csum);
94 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
98 err = hw->mac.init_hw(hw);
101 * Devices with copper phys will fail to initialise if txgbe_init_hw()
102 * is called too soon after the kernel driver unbinding/binding occurs.
103 * The failure occurs in txgbe_identify_phy() for all devices,
104 * but for non-copper devies, txgbe_identify_sfp_module() is
105 * also called. See txgbe_identify_phy(). The reason for the
106 * failure is not known, and only occuts when virtualisation features
107 * are disabled in the bios. A delay of 200ms was found to be enough by
108 * trial-and-error, and is doubled to be safe.
110 if (err && hw->phy.media_type == txgbe_media_type_copper) {
112 err = hw->mac.init_hw(hw);
115 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
118 if (err == TXGBE_ERR_EEPROM_VERSION) {
119 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
120 "LOM. Please be aware there may be issues associated "
121 "with your hardware.");
122 PMD_INIT_LOG(ERR, "If you are experiencing problems "
123 "please contact your hardware representative "
124 "who provided you with this hardware.");
125 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
126 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
129 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
133 /* Allocate memory for storing MAC addresses */
134 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
135 hw->mac.num_rar_entries, 0);
136 if (eth_dev->data->mac_addrs == NULL) {
138 "Failed to allocate %u bytes needed to store "
140 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
144 /* Copy the permanent MAC address */
145 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
146 ð_dev->data->mac_addrs[0]);
148 /* Allocate memory for storing hash filter MAC addresses */
149 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
150 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
151 if (eth_dev->data->hash_mac_addrs == NULL) {
153 "Failed to allocate %d bytes needed to store MAC addresses",
154 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
158 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
159 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
160 (int)hw->mac.type, (int)hw->phy.type,
161 (int)hw->phy.sfp_type);
163 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
164 (int)hw->mac.type, (int)hw->phy.type);
166 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
167 eth_dev->data->port_id, pci_dev->id.vendor_id,
168 pci_dev->id.device_id);
170 /* enable uio/vfio intr/eventfd mapping */
171 rte_intr_enable(intr_handle);
177 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
179 PMD_INIT_FUNC_TRACE();
181 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
184 txgbe_dev_close(eth_dev);
190 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
191 struct rte_pci_device *pci_dev)
193 struct rte_eth_dev *pf_ethdev;
194 struct rte_eth_devargs eth_da;
197 if (pci_dev->device.devargs) {
198 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
203 memset(ð_da, 0, sizeof(eth_da));
206 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
207 sizeof(struct txgbe_adapter),
208 eth_dev_pci_specific_init, pci_dev,
209 eth_txgbe_dev_init, NULL);
211 if (retval || eth_da.nb_representor_ports < 1)
214 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
215 if (pf_ethdev == NULL)
221 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
223 struct rte_eth_dev *ethdev;
225 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
229 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
232 static struct rte_pci_driver rte_txgbe_pmd = {
233 .id_table = pci_id_txgbe_map,
234 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
235 RTE_PCI_DRV_INTR_LSC,
236 .probe = eth_txgbe_pci_probe,
237 .remove = eth_txgbe_pci_remove,
241 * Reset and stop device.
244 txgbe_dev_close(struct rte_eth_dev *dev)
246 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
247 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
249 PMD_INIT_FUNC_TRACE();
251 /* disable uio intr before callback unregister */
252 rte_intr_disable(intr_handle);
254 rte_free(dev->data->mac_addrs);
255 dev->data->mac_addrs = NULL;
257 rte_free(dev->data->hash_mac_addrs);
258 dev->data->hash_mac_addrs = NULL;
263 static const struct eth_dev_ops txgbe_eth_dev_ops = {
266 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
267 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
268 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
270 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
271 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
273 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
274 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
276 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
277 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
280 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
281 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);