1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
12 #include <rte_memory.h>
14 #include "txgbe_logs.h"
15 #include "base/txgbe.h"
16 #include "txgbe_ethdev.h"
17 #include "txgbe_rxtx.h"
19 static int txgbe_dev_close(struct rte_eth_dev *dev);
22 * The set of PCI devices this driver supports
24 static const struct rte_pci_id pci_id_txgbe_map[] = {
25 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
26 { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
27 { .vendor_id = 0, /* sentinel */ },
30 static const struct rte_eth_desc_lim rx_desc_lim = {
31 .nb_max = TXGBE_RING_DESC_MAX,
32 .nb_min = TXGBE_RING_DESC_MIN,
33 .nb_align = TXGBE_RXD_ALIGN,
36 static const struct rte_eth_desc_lim tx_desc_lim = {
37 .nb_max = TXGBE_RING_DESC_MAX,
38 .nb_min = TXGBE_RING_DESC_MIN,
39 .nb_align = TXGBE_TXD_ALIGN,
40 .nb_seg_max = TXGBE_TX_MAX_SEG,
41 .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
44 static const struct eth_dev_ops txgbe_eth_dev_ops;
47 txgbe_is_sfp(struct txgbe_hw *hw)
49 switch (hw->phy.type) {
50 case txgbe_phy_sfp_avago:
51 case txgbe_phy_sfp_ftl:
52 case txgbe_phy_sfp_intel:
53 case txgbe_phy_sfp_unknown:
54 case txgbe_phy_sfp_tyco_passive:
55 case txgbe_phy_sfp_unknown_passive:
63 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
65 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
66 struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
67 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
68 const struct rte_memzone *mz;
72 PMD_INIT_FUNC_TRACE();
74 eth_dev->dev_ops = &txgbe_eth_dev_ops;
76 rte_eth_copy_pci_info(eth_dev, pci_dev);
78 /* Vendor and Device ID need to be set before init of shared code */
79 hw->device_id = pci_dev->id.device_id;
80 hw->vendor_id = pci_dev->id.vendor_id;
81 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
82 hw->allow_unsupported_sfp = 1;
84 /* Reserve memory for interrupt status block */
85 mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
86 16, TXGBE_ALIGN, SOCKET_ID_ANY);
90 hw->isb_dma = TMZ_PADDR(mz);
91 hw->isb_mem = TMZ_VADDR(mz);
93 /* Initialize the shared code (base driver) */
94 err = txgbe_init_shared_code(hw);
96 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
100 err = hw->rom.init_params(hw);
102 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
106 /* Make sure we have a good EEPROM before we read from it */
107 err = hw->rom.validate_checksum(hw, &csum);
109 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
113 err = hw->mac.init_hw(hw);
116 * Devices with copper phys will fail to initialise if txgbe_init_hw()
117 * is called too soon after the kernel driver unbinding/binding occurs.
118 * The failure occurs in txgbe_identify_phy() for all devices,
119 * but for non-copper devies, txgbe_identify_sfp_module() is
120 * also called. See txgbe_identify_phy(). The reason for the
121 * failure is not known, and only occuts when virtualisation features
122 * are disabled in the bios. A delay of 200ms was found to be enough by
123 * trial-and-error, and is doubled to be safe.
125 if (err && hw->phy.media_type == txgbe_media_type_copper) {
127 err = hw->mac.init_hw(hw);
130 if (err == TXGBE_ERR_SFP_NOT_PRESENT)
133 if (err == TXGBE_ERR_EEPROM_VERSION) {
134 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
135 "LOM. Please be aware there may be issues associated "
136 "with your hardware.");
137 PMD_INIT_LOG(ERR, "If you are experiencing problems "
138 "please contact your hardware representative "
139 "who provided you with this hardware.");
140 } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
141 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
144 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
148 /* Allocate memory for storing MAC addresses */
149 eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
150 hw->mac.num_rar_entries, 0);
151 if (eth_dev->data->mac_addrs == NULL) {
153 "Failed to allocate %u bytes needed to store "
155 RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
159 /* Copy the permanent MAC address */
160 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
161 ð_dev->data->mac_addrs[0]);
163 /* Allocate memory for storing hash filter MAC addresses */
164 eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
165 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
166 if (eth_dev->data->hash_mac_addrs == NULL) {
168 "Failed to allocate %d bytes needed to store MAC addresses",
169 RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
173 if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
174 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
175 (int)hw->mac.type, (int)hw->phy.type,
176 (int)hw->phy.sfp_type);
178 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
179 (int)hw->mac.type, (int)hw->phy.type);
181 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
182 eth_dev->data->port_id, pci_dev->id.vendor_id,
183 pci_dev->id.device_id);
185 /* enable uio/vfio intr/eventfd mapping */
186 rte_intr_enable(intr_handle);
192 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
194 PMD_INIT_FUNC_TRACE();
196 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
199 txgbe_dev_close(eth_dev);
205 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
206 struct rte_pci_device *pci_dev)
208 struct rte_eth_dev *pf_ethdev;
209 struct rte_eth_devargs eth_da;
212 if (pci_dev->device.devargs) {
213 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
218 memset(ð_da, 0, sizeof(eth_da));
221 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
222 sizeof(struct txgbe_adapter),
223 eth_dev_pci_specific_init, pci_dev,
224 eth_txgbe_dev_init, NULL);
226 if (retval || eth_da.nb_representor_ports < 1)
229 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
230 if (pf_ethdev == NULL)
236 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
238 struct rte_eth_dev *ethdev;
240 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
244 return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
247 static struct rte_pci_driver rte_txgbe_pmd = {
248 .id_table = pci_id_txgbe_map,
249 .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
250 RTE_PCI_DRV_INTR_LSC,
251 .probe = eth_txgbe_pci_probe,
252 .remove = eth_txgbe_pci_remove,
256 * Reset and stop device.
259 txgbe_dev_close(struct rte_eth_dev *dev)
261 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
262 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
264 PMD_INIT_FUNC_TRACE();
266 /* disable uio intr before callback unregister */
267 rte_intr_disable(intr_handle);
269 rte_free(dev->data->mac_addrs);
270 dev->data->mac_addrs = NULL;
272 rte_free(dev->data->hash_mac_addrs);
273 dev->data->hash_mac_addrs = NULL;
279 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
281 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
282 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
284 dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
285 dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
286 dev_info->min_rx_bufsize = 1024;
287 dev_info->max_rx_pktlen = 15872;
288 dev_info->max_mac_addrs = hw->mac.num_rar_entries;
289 dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
290 dev_info->max_vfs = pci_dev->max_vfs;
291 dev_info->max_vmdq_pools = ETH_64_POOLS;
292 dev_info->vmdq_queue_num = dev_info->max_rx_queues;
293 dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
294 dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
295 dev_info->rx_queue_offload_capa);
296 dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
297 dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
299 dev_info->default_rxconf = (struct rte_eth_rxconf) {
301 .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
302 .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
303 .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
305 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
310 dev_info->default_txconf = (struct rte_eth_txconf) {
312 .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
313 .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
314 .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
316 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
320 dev_info->rx_desc_lim = rx_desc_lim;
321 dev_info->tx_desc_lim = tx_desc_lim;
323 dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
324 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
325 dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
327 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
328 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
330 /* Driver-preferred Rx/Tx parameters */
331 dev_info->default_rxportconf.burst_size = 32;
332 dev_info->default_txportconf.burst_size = 32;
333 dev_info->default_rxportconf.nb_queues = 1;
334 dev_info->default_txportconf.nb_queues = 1;
335 dev_info->default_rxportconf.ring_size = 256;
336 dev_info->default_txportconf.ring_size = 256;
341 static const struct eth_dev_ops txgbe_eth_dev_ops = {
342 .dev_infos_get = txgbe_dev_info_get,
345 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
346 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
347 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
349 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
350 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
352 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
353 RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
355 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
356 RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
359 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
360 RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);