844ed7d63607edbef231ce5ce63e0d854461ab29
[dpdk.git] / drivers / net / txgbe / txgbe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <stdio.h>
6 #include <errno.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <rte_common.h>
10 #include <rte_ethdev_pci.h>
11 #include <rte_pci.h>
12 #include <rte_memory.h>
13
14 #include "txgbe_logs.h"
15 #include "base/txgbe.h"
16 #include "txgbe_ethdev.h"
17 #include "txgbe_rxtx.h"
18
19 static int txgbe_dev_close(struct rte_eth_dev *dev);
20
21 /*
22  * The set of PCI devices this driver supports
23  */
24 static const struct rte_pci_id pci_id_txgbe_map[] = {
25         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
26         { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
27         { .vendor_id = 0, /* sentinel */ },
28 };
29
30 static const struct rte_eth_desc_lim rx_desc_lim = {
31         .nb_max = TXGBE_RING_DESC_MAX,
32         .nb_min = TXGBE_RING_DESC_MIN,
33         .nb_align = TXGBE_RXD_ALIGN,
34 };
35
36 static const struct rte_eth_desc_lim tx_desc_lim = {
37         .nb_max = TXGBE_RING_DESC_MAX,
38         .nb_min = TXGBE_RING_DESC_MIN,
39         .nb_align = TXGBE_TXD_ALIGN,
40         .nb_seg_max = TXGBE_TX_MAX_SEG,
41         .nb_mtu_seg_max = TXGBE_TX_MAX_SEG,
42 };
43
44 static const struct eth_dev_ops txgbe_eth_dev_ops;
45
46 static inline int
47 txgbe_is_sfp(struct txgbe_hw *hw)
48 {
49         switch (hw->phy.type) {
50         case txgbe_phy_sfp_avago:
51         case txgbe_phy_sfp_ftl:
52         case txgbe_phy_sfp_intel:
53         case txgbe_phy_sfp_unknown:
54         case txgbe_phy_sfp_tyco_passive:
55         case txgbe_phy_sfp_unknown_passive:
56                 return 1;
57         default:
58                 return 0;
59         }
60 }
61
62 static int
63 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
64 {
65         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
66         struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
67         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
68         const struct rte_memzone *mz;
69         uint16_t csum;
70         int err;
71
72         PMD_INIT_FUNC_TRACE();
73
74         eth_dev->dev_ops = &txgbe_eth_dev_ops;
75
76         rte_eth_copy_pci_info(eth_dev, pci_dev);
77
78         /* Vendor and Device ID need to be set before init of shared code */
79         hw->device_id = pci_dev->id.device_id;
80         hw->vendor_id = pci_dev->id.vendor_id;
81         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
82         hw->allow_unsupported_sfp = 1;
83
84         /* Reserve memory for interrupt status block */
85         mz = rte_eth_dma_zone_reserve(eth_dev, "txgbe_driver", -1,
86                 16, TXGBE_ALIGN, SOCKET_ID_ANY);
87         if (mz == NULL)
88                 return -ENOMEM;
89
90         hw->isb_dma = TMZ_PADDR(mz);
91         hw->isb_mem = TMZ_VADDR(mz);
92
93         /* Initialize the shared code (base driver) */
94         err = txgbe_init_shared_code(hw);
95         if (err != 0) {
96                 PMD_INIT_LOG(ERR, "Shared code init failed: %d", err);
97                 return -EIO;
98         }
99
100         err = hw->rom.init_params(hw);
101         if (err != 0) {
102                 PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
103                 return -EIO;
104         }
105
106         /* Make sure we have a good EEPROM before we read from it */
107         err = hw->rom.validate_checksum(hw, &csum);
108         if (err != 0) {
109                 PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", err);
110                 return -EIO;
111         }
112
113         err = hw->mac.init_hw(hw);
114
115         /*
116          * Devices with copper phys will fail to initialise if txgbe_init_hw()
117          * is called too soon after the kernel driver unbinding/binding occurs.
118          * The failure occurs in txgbe_identify_phy() for all devices,
119          * but for non-copper devies, txgbe_identify_sfp_module() is
120          * also called. See txgbe_identify_phy(). The reason for the
121          * failure is not known, and only occuts when virtualisation features
122          * are disabled in the bios. A delay of 200ms  was found to be enough by
123          * trial-and-error, and is doubled to be safe.
124          */
125         if (err && hw->phy.media_type == txgbe_media_type_copper) {
126                 rte_delay_ms(200);
127                 err = hw->mac.init_hw(hw);
128         }
129
130         if (err == TXGBE_ERR_SFP_NOT_PRESENT)
131                 err = 0;
132
133         if (err == TXGBE_ERR_EEPROM_VERSION) {
134                 PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
135                              "LOM.  Please be aware there may be issues associated "
136                              "with your hardware.");
137                 PMD_INIT_LOG(ERR, "If you are experiencing problems "
138                              "please contact your hardware representative "
139                              "who provided you with this hardware.");
140         } else if (err == TXGBE_ERR_SFP_NOT_SUPPORTED) {
141                 PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
142         }
143         if (err) {
144                 PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", err);
145                 return -EIO;
146         }
147
148         /* Allocate memory for storing MAC addresses */
149         eth_dev->data->mac_addrs = rte_zmalloc("txgbe", RTE_ETHER_ADDR_LEN *
150                                                hw->mac.num_rar_entries, 0);
151         if (eth_dev->data->mac_addrs == NULL) {
152                 PMD_INIT_LOG(ERR,
153                              "Failed to allocate %u bytes needed to store "
154                              "MAC addresses",
155                              RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries);
156                 return -ENOMEM;
157         }
158
159         /* Copy the permanent MAC address */
160         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr,
161                         &eth_dev->data->mac_addrs[0]);
162
163         /* Allocate memory for storing hash filter MAC addresses */
164         eth_dev->data->hash_mac_addrs = rte_zmalloc("txgbe",
165                         RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC, 0);
166         if (eth_dev->data->hash_mac_addrs == NULL) {
167                 PMD_INIT_LOG(ERR,
168                              "Failed to allocate %d bytes needed to store MAC addresses",
169                              RTE_ETHER_ADDR_LEN * TXGBE_VMDQ_NUM_UC_MAC);
170                 return -ENOMEM;
171         }
172
173         if (txgbe_is_sfp(hw) && hw->phy.sfp_type != txgbe_sfp_type_not_present)
174                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
175                              (int)hw->mac.type, (int)hw->phy.type,
176                              (int)hw->phy.sfp_type);
177         else
178                 PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
179                              (int)hw->mac.type, (int)hw->phy.type);
180
181         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
182                      eth_dev->data->port_id, pci_dev->id.vendor_id,
183                      pci_dev->id.device_id);
184
185         /* enable uio/vfio intr/eventfd mapping */
186         rte_intr_enable(intr_handle);
187
188         return 0;
189 }
190
191 static int
192 eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
193 {
194         PMD_INIT_FUNC_TRACE();
195
196         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
197                 return 0;
198
199         txgbe_dev_close(eth_dev);
200
201         return 0;
202 }
203
204 static int
205 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
206                 struct rte_pci_device *pci_dev)
207 {
208         struct rte_eth_dev *pf_ethdev;
209         struct rte_eth_devargs eth_da;
210         int retval;
211
212         if (pci_dev->device.devargs) {
213                 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
214                                 &eth_da);
215                 if (retval)
216                         return retval;
217         } else {
218                 memset(&eth_da, 0, sizeof(eth_da));
219         }
220
221         retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
222                         sizeof(struct txgbe_adapter),
223                         eth_dev_pci_specific_init, pci_dev,
224                         eth_txgbe_dev_init, NULL);
225
226         if (retval || eth_da.nb_representor_ports < 1)
227                 return retval;
228
229         pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
230         if (pf_ethdev == NULL)
231                 return -ENODEV;
232
233         return 0;
234 }
235
236 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
237 {
238         struct rte_eth_dev *ethdev;
239
240         ethdev = rte_eth_dev_allocated(pci_dev->device.name);
241         if (!ethdev)
242                 return -ENODEV;
243
244         return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
245 }
246
247 static struct rte_pci_driver rte_txgbe_pmd = {
248         .id_table = pci_id_txgbe_map,
249         .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
250                      RTE_PCI_DRV_INTR_LSC,
251         .probe = eth_txgbe_pci_probe,
252         .remove = eth_txgbe_pci_remove,
253 };
254
255 /*
256  * Reset and stop device.
257  */
258 static int
259 txgbe_dev_close(struct rte_eth_dev *dev)
260 {
261         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
262         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
263
264         PMD_INIT_FUNC_TRACE();
265
266         /* disable uio intr before callback unregister */
267         rte_intr_disable(intr_handle);
268
269         rte_free(dev->data->mac_addrs);
270         dev->data->mac_addrs = NULL;
271
272         rte_free(dev->data->hash_mac_addrs);
273         dev->data->hash_mac_addrs = NULL;
274
275         return 0;
276 }
277
278 static int
279 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
280 {
281         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
282         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
283
284         dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
285         dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
286         dev_info->min_rx_bufsize = 1024;
287         dev_info->max_rx_pktlen = 15872;
288         dev_info->max_mac_addrs = hw->mac.num_rar_entries;
289         dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
290         dev_info->max_vfs = pci_dev->max_vfs;
291         dev_info->max_vmdq_pools = ETH_64_POOLS;
292         dev_info->vmdq_queue_num = dev_info->max_rx_queues;
293         dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
294         dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
295                                      dev_info->rx_queue_offload_capa);
296         dev_info->tx_queue_offload_capa = txgbe_get_tx_queue_offloads(dev);
297         dev_info->tx_offload_capa = txgbe_get_tx_port_offloads(dev);
298
299         dev_info->default_rxconf = (struct rte_eth_rxconf) {
300                 .rx_thresh = {
301                         .pthresh = TXGBE_DEFAULT_RX_PTHRESH,
302                         .hthresh = TXGBE_DEFAULT_RX_HTHRESH,
303                         .wthresh = TXGBE_DEFAULT_RX_WTHRESH,
304                 },
305                 .rx_free_thresh = TXGBE_DEFAULT_RX_FREE_THRESH,
306                 .rx_drop_en = 0,
307                 .offloads = 0,
308         };
309
310         dev_info->default_txconf = (struct rte_eth_txconf) {
311                 .tx_thresh = {
312                         .pthresh = TXGBE_DEFAULT_TX_PTHRESH,
313                         .hthresh = TXGBE_DEFAULT_TX_HTHRESH,
314                         .wthresh = TXGBE_DEFAULT_TX_WTHRESH,
315                 },
316                 .tx_free_thresh = TXGBE_DEFAULT_TX_FREE_THRESH,
317                 .offloads = 0,
318         };
319
320         dev_info->rx_desc_lim = rx_desc_lim;
321         dev_info->tx_desc_lim = tx_desc_lim;
322
323         dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
324         dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
325         dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
326
327         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
328         dev_info->speed_capa |= ETH_LINK_SPEED_100M;
329
330         /* Driver-preferred Rx/Tx parameters */
331         dev_info->default_rxportconf.burst_size = 32;
332         dev_info->default_txportconf.burst_size = 32;
333         dev_info->default_rxportconf.nb_queues = 1;
334         dev_info->default_txportconf.nb_queues = 1;
335         dev_info->default_rxportconf.ring_size = 256;
336         dev_info->default_txportconf.ring_size = 256;
337
338         return 0;
339 }
340
341 static const struct eth_dev_ops txgbe_eth_dev_ops = {
342         .dev_infos_get              = txgbe_dev_info_get,
343 };
344
345 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
346 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
347 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
348
349 RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
350 RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
351
352 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
353         RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
354 #endif
355 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
356         RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
357 #endif
358
359 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
360         RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);
361 #endif