1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_memory.h>
14 #include "txgbe_logs.h"
15 #include "base/txgbe.h"
16 #include "txgbe_ethdev.h"
18 static int txgbe_dev_close(struct rte_eth_dev *dev);
21 * The set of PCI devices this driver supports
23 static const struct rte_pci_id pci_id_txgbe_map[] = {
24 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
25 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
26 { .vendor_id = 0, /* sentinel */ },
29 static const struct eth_dev_ops txgbe_eth_dev_ops;
32 txgbe_is_sfp(struct txgbe_hw *hw)
34 switch (hw->phy.type) {
35 case txgbe_phy_sfp_avago:
36 case txgbe_phy_sfp_ftl:
37 case txgbe_phy_sfp_intel:
38 case txgbe_phy_sfp_unknown:
39 case txgbe_phy_sfp_tyco_passive:
40 case txgbe_phy_sfp_unknown_passive:
48 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
50 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
51 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
52 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
53 const struct rte_memzone *mz;
56 PMD_INIT_FUNC_TRACE();
58 eth_dev->dev_ops = &txgbe_eth_dev_ops;
60 rte_eth_copy_pci_info(eth_dev, pci_dev);
62 /* Vendor and Device ID need to be set before init of shared code */
63 hw->device_id = pci_dev->id.device_id;
64 hw->vendor_id = pci_dev->id.vendor_id;
65 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
66 hw->allow_unsupported_sfp = 1;
68 /* Reserve memory for interrupt status block */
69 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
70 16, TXGBE_ALIGN, SOCKET_ID_ANY);
74 hw->isb_dma = TMZ_PADDR(mz);
75 hw->isb_mem = TMZ_VADDR(mz);
77 /* Initialize the shared code (base driver) */
78 err = txgbe_init_shared_code(hw);
80 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
84 /* Allocate memory for storing MAC addresses */
85 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
86 hw->mac.num_rar_entries, 0);
87 if (eth_dev->data->mac_addrs == NULL) {
89 "Failed to allocate %u bytes needed to store "
91 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
95 /* Copy the permanent MAC address */
96 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
97 ð_dev->data->mac_addrs[0]);
99 /* Allocate memory for storing hash filter MAC addresses */
100 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
101 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
102 if (eth_dev->data->hash_mac_addrs == NULL) {
104 "Failed to allocate %d bytes needed to store MAC addresses",
105 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
109 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
110 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
111 (int)hw->mac.type, (int)hw->phy.type,
112 (int)hw->phy.sfp_type);
114 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
115 (int)hw->mac.type, (int)hw->phy.type);
117 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
118 eth_dev->data->port_id, pci_dev->id.vendor_id,
119 pci_dev->id.device_id);
121 /* enable uio/vfio intr/eventfd mapping */
122 rte_intr_enable(intr_handle);
128 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
130 PMD_INIT_FUNC_TRACE();
132 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
135 txgbe_dev_close(eth_dev);
141 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
142 struct rte_pci_device *pci_dev)
144 struct rte_eth_dev *pf_ethdev;
145 struct rte_eth_devargs eth_da;
148 if (pci_dev->device.devargs) {
149 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
154 memset(ð_da, 0, sizeof(eth_da));
157 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
158 sizeof(struct txgbe_adapter),
159 eth_dev_pci_specific_init, pci_dev,
160 eth_txgbe_dev_init, NULL);
162 if (retval || eth_da.nb_representor_ports < 1)
165 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
166 if (pf_ethdev == NULL)
172 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
174 struct rte_eth_dev *ethdev;
176 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
180 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
183 static struct rte_pci_driver rte_txgbe_pmd = {
184 .id_table = pci_id_txgbe_map,
185 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
186 RTE_PCI_DRV_INTR_LSC,
187 .probe = eth_txgbe_pci_probe,
188 .remove = eth_txgbe_pci_remove,
192 * Reset and stop device.
195 txgbe_dev_close(struct rte_eth_dev *dev)
197 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
198 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
200 PMD_INIT_FUNC_TRACE();
202 /* disable uio intr before callback unregister */
203 rte_intr_disable(intr_handle);
205 rte_free(dev->data->mac_addrs);
206 dev->data->mac_addrs = NULL;
208 rte_free(dev->data->hash_mac_addrs);
209 dev->data->hash_mac_addrs = NULL;
214 static const struct eth_dev_ops txgbe_eth_dev_ops = {
217 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
218 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
219 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
221 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
222 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
224 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
225 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
227 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
228 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
231 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
232 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);