net/igc: support device initialization
[dpdk.git] / drivers / net / igc / igc_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Intel Corporation
3  */
4
5 #include <stdint.h>
6
7 #include <rte_pci.h>
8 #include <rte_bus_pci.h>
9 #include <rte_ethdev_driver.h>
10 #include <rte_ethdev_pci.h>
11 #include <rte_malloc.h>
12
13 #include "igc_logs.h"
14 #include "igc_ethdev.h"
15
16 #define IGC_INTEL_VENDOR_ID             0x8086
17
18 #define IGC_FC_PAUSE_TIME               0x0680
19
20 static const struct rte_pci_id pci_id_igc_map[] = {
21         { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) },
22         { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V)  },
23         { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I)  },
24         { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K)  },
25         { .vendor_id = 0, /* sentinel */ },
26 };
27
28 static int eth_igc_configure(struct rte_eth_dev *dev);
29 static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
30 static void eth_igc_stop(struct rte_eth_dev *dev);
31 static int eth_igc_start(struct rte_eth_dev *dev);
32 static void eth_igc_close(struct rte_eth_dev *dev);
33 static int eth_igc_reset(struct rte_eth_dev *dev);
34 static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev);
35 static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev);
36 static int eth_igc_infos_get(struct rte_eth_dev *dev,
37                         struct rte_eth_dev_info *dev_info);
38 static int
39 eth_igc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
40                 uint16_t nb_rx_desc, unsigned int socket_id,
41                 const struct rte_eth_rxconf *rx_conf,
42                 struct rte_mempool *mb_pool);
43 static int
44 eth_igc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
45                 uint16_t nb_desc, unsigned int socket_id,
46                 const struct rte_eth_txconf *tx_conf);
47
48 static const struct eth_dev_ops eth_igc_ops = {
49         .dev_configure          = eth_igc_configure,
50         .link_update            = eth_igc_link_update,
51         .dev_stop               = eth_igc_stop,
52         .dev_start              = eth_igc_start,
53         .dev_close              = eth_igc_close,
54         .dev_reset              = eth_igc_reset,
55         .promiscuous_enable     = eth_igc_promiscuous_enable,
56         .promiscuous_disable    = eth_igc_promiscuous_disable,
57         .dev_infos_get          = eth_igc_infos_get,
58         .rx_queue_setup         = eth_igc_rx_queue_setup,
59         .tx_queue_setup         = eth_igc_tx_queue_setup,
60 };
61
62 static int
63 eth_igc_configure(struct rte_eth_dev *dev)
64 {
65         PMD_INIT_FUNC_TRACE();
66         RTE_SET_USED(dev);
67         return 0;
68 }
69
70 static int
71 eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
72 {
73         PMD_INIT_FUNC_TRACE();
74         RTE_SET_USED(dev);
75         RTE_SET_USED(wait_to_complete);
76         return 0;
77 }
78
79 static void
80 eth_igc_stop(struct rte_eth_dev *dev)
81 {
82         PMD_INIT_FUNC_TRACE();
83         RTE_SET_USED(dev);
84 }
85
86 /*
87  *  Get hardware rx-buffer size.
88  */
89 static inline int
90 igc_get_rx_buffer_size(struct igc_hw *hw)
91 {
92         return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10;
93 }
94
95 /*
96  * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
97  * For ASF and Pass Through versions of f/w this means
98  * that the driver is loaded.
99  */
100 static void
101 igc_hw_control_acquire(struct igc_hw *hw)
102 {
103         uint32_t ctrl_ext;
104
105         /* Let firmware know the driver has taken over */
106         ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
107         IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
108 }
109
110 /*
111  * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
112  * For ASF and Pass Through versions of f/w this means that the
113  * driver is no longer loaded.
114  */
115 static void
116 igc_hw_control_release(struct igc_hw *hw)
117 {
118         uint32_t ctrl_ext;
119
120         /* Let firmware taken over control of h/w */
121         ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
122         IGC_WRITE_REG(hw, IGC_CTRL_EXT,
123                         ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
124 }
125
126 static int
127 igc_hardware_init(struct igc_hw *hw)
128 {
129         uint32_t rx_buf_size;
130         int diag;
131
132         /* Let the firmware know the OS is in control */
133         igc_hw_control_acquire(hw);
134
135         /* Issue a global reset */
136         igc_reset_hw(hw);
137
138         /* disable all wake up */
139         IGC_WRITE_REG(hw, IGC_WUC, 0);
140
141         /*
142          * Hardware flow control
143          * - High water mark should allow for at least two standard size (1518)
144          *   frames to be received after sending an XOFF.
145          * - Low water mark works best when it is very near the high water mark.
146          *   This allows the receiver to restart by sending XON when it has
147          *   drained a bit. Here we use an arbitrary value of 1500 which will
148          *   restart after one full frame is pulled from the buffer. There
149          *   could be several smaller frames in the buffer and if so they will
150          *   not trigger the XON until their total number reduces the buffer
151          *   by 1500.
152          */
153         rx_buf_size = igc_get_rx_buffer_size(hw);
154         hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
155         hw->fc.low_water = hw->fc.high_water - 1500;
156         hw->fc.pause_time = IGC_FC_PAUSE_TIME;
157         hw->fc.send_xon = 1;
158         hw->fc.requested_mode = igc_fc_full;
159
160         diag = igc_init_hw(hw);
161         if (diag < 0)
162                 return diag;
163
164         igc_get_phy_info(hw);
165         igc_check_for_link(hw);
166
167         return 0;
168 }
169
170 static int
171 eth_igc_start(struct rte_eth_dev *dev)
172 {
173         PMD_INIT_FUNC_TRACE();
174         RTE_SET_USED(dev);
175         return 0;
176 }
177
178 static int
179 igc_reset_swfw_lock(struct igc_hw *hw)
180 {
181         int ret_val;
182
183         /*
184          * Do mac ops initialization manually here, since we will need
185          * some function pointers set by this call.
186          */
187         ret_val = igc_init_mac_params(hw);
188         if (ret_val)
189                 return ret_val;
190
191         /*
192          * SMBI lock should not fail in this early stage. If this is the case,
193          * it is due to an improper exit of the application.
194          * So force the release of the faulty lock.
195          */
196         if (igc_get_hw_semaphore_generic(hw) < 0)
197                 PMD_DRV_LOG(DEBUG, "SMBI lock released");
198
199         igc_put_hw_semaphore_generic(hw);
200
201         if (hw->mac.ops.acquire_swfw_sync != NULL) {
202                 uint16_t mask;
203
204                 /*
205                  * Phy lock should not fail in this early stage.
206                  * If this is the case, it is due to an improper exit of the
207                  * application. So force the release of the faulty lock.
208                  */
209                 mask = IGC_SWFW_PHY0_SM;
210                 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
211                         PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
212                                     hw->bus.func);
213                 }
214                 hw->mac.ops.release_swfw_sync(hw, mask);
215
216                 /*
217                  * This one is more tricky since it is common to all ports; but
218                  * swfw_sync retries last long enough (1s) to be almost sure
219                  * that if lock can not be taken it is due to an improper lock
220                  * of the semaphore.
221                  */
222                 mask = IGC_SWFW_EEP_SM;
223                 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0)
224                         PMD_DRV_LOG(DEBUG, "SWFW common locks released");
225
226                 hw->mac.ops.release_swfw_sync(hw, mask);
227         }
228
229         return IGC_SUCCESS;
230 }
231
232 static void
233 eth_igc_close(struct rte_eth_dev *dev)
234 {
235         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
236
237         PMD_INIT_FUNC_TRACE();
238
239         igc_phy_hw_reset(hw);
240         igc_hw_control_release(hw);
241
242         /* Reset any pending lock */
243         igc_reset_swfw_lock(hw);
244 }
245
246 static void
247 igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
248 {
249         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
250
251         hw->vendor_id = pci_dev->id.vendor_id;
252         hw->device_id = pci_dev->id.device_id;
253         hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
254         hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
255 }
256
257 static int
258 eth_igc_dev_init(struct rte_eth_dev *dev)
259 {
260         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
261         struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
262         int error = 0;
263
264         PMD_INIT_FUNC_TRACE();
265         dev->dev_ops = &eth_igc_ops;
266
267         /*
268          * for secondary processes, we don't initialize any further as primary
269          * has already done this work. Only check we don't need a different
270          * RX function.
271          */
272         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
273                 return 0;
274
275         rte_eth_copy_pci_info(dev, pci_dev);
276
277         hw->back = pci_dev;
278         hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
279
280         igc_identify_hardware(dev, pci_dev);
281         if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) {
282                 error = -EIO;
283                 goto err_late;
284         }
285
286         igc_get_bus_info(hw);
287
288         /* Reset any pending lock */
289         if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) {
290                 error = -EIO;
291                 goto err_late;
292         }
293
294         /* Finish initialization */
295         if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) {
296                 error = -EIO;
297                 goto err_late;
298         }
299
300         hw->mac.autoneg = 1;
301         hw->phy.autoneg_wait_to_complete = 0;
302         hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
303
304         /* Copper options */
305         if (hw->phy.media_type == igc_media_type_copper) {
306                 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
307                 hw->phy.disable_polarity_correction = 0;
308                 hw->phy.ms_type = igc_ms_hw_default;
309         }
310
311         /*
312          * Start from a known state, this is important in reading the nvm
313          * and mac from that.
314          */
315         igc_reset_hw(hw);
316
317         /* Make sure we have a good EEPROM before we read from it */
318         if (igc_validate_nvm_checksum(hw) < 0) {
319                 /*
320                  * Some PCI-E parts fail the first check due to
321                  * the link being in sleep state, call it again,
322                  * if it fails a second time its a real issue.
323                  */
324                 if (igc_validate_nvm_checksum(hw) < 0) {
325                         PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
326                         error = -EIO;
327                         goto err_late;
328                 }
329         }
330
331         /* Read the permanent MAC address out of the EEPROM */
332         if (igc_read_mac_addr(hw) != 0) {
333                 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
334                 error = -EIO;
335                 goto err_late;
336         }
337
338         /* Allocate memory for storing MAC addresses */
339         dev->data->mac_addrs = rte_zmalloc("igc",
340                 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
341         if (dev->data->mac_addrs == NULL) {
342                 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC",
343                                 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
344                 error = -ENOMEM;
345                 goto err_late;
346         }
347
348         /* Copy the permanent MAC address */
349         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
350                         &dev->data->mac_addrs[0]);
351
352         /* Now initialize the hardware */
353         if (igc_hardware_init(hw) != 0) {
354                 PMD_INIT_LOG(ERR, "Hardware initialization failed");
355                 rte_free(dev->data->mac_addrs);
356                 dev->data->mac_addrs = NULL;
357                 error = -ENODEV;
358                 goto err_late;
359         }
360
361         /* Pass the information to the rte_eth_dev_close() that it should also
362          * release the private port resources.
363          */
364         dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
365
366         hw->mac.get_link_status = 1;
367
368         /* Indicate SOL/IDER usage */
369         if (igc_check_reset_block(hw) < 0)
370                 PMD_INIT_LOG(ERR,
371                         "PHY reset is blocked due to SOL/IDER session.");
372
373         PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
374                         dev->data->port_id, pci_dev->id.vendor_id,
375                         pci_dev->id.device_id);
376
377         return 0;
378
379 err_late:
380         igc_hw_control_release(hw);
381         return error;
382 }
383
384 static int
385 eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)
386 {
387         PMD_INIT_FUNC_TRACE();
388
389         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
390                 return 0;
391
392         eth_igc_close(eth_dev);
393         return 0;
394 }
395
396 static int
397 eth_igc_reset(struct rte_eth_dev *dev)
398 {
399         int ret;
400
401         PMD_INIT_FUNC_TRACE();
402
403         ret = eth_igc_dev_uninit(dev);
404         if (ret)
405                 return ret;
406
407         return eth_igc_dev_init(dev);
408 }
409
410 static int
411 eth_igc_promiscuous_enable(struct rte_eth_dev *dev)
412 {
413         PMD_INIT_FUNC_TRACE();
414         RTE_SET_USED(dev);
415         return 0;
416 }
417
418 static int
419 eth_igc_promiscuous_disable(struct rte_eth_dev *dev)
420 {
421         PMD_INIT_FUNC_TRACE();
422         RTE_SET_USED(dev);
423         return 0;
424 }
425
426 static int
427 eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
428 {
429         PMD_INIT_FUNC_TRACE();
430         RTE_SET_USED(dev);
431         dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
432         dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
433         return 0;
434 }
435
436 static int
437 eth_igc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
438                 uint16_t nb_rx_desc, unsigned int socket_id,
439                 const struct rte_eth_rxconf *rx_conf,
440                 struct rte_mempool *mb_pool)
441 {
442         PMD_INIT_FUNC_TRACE();
443         RTE_SET_USED(dev);
444         RTE_SET_USED(rx_queue_id);
445         RTE_SET_USED(nb_rx_desc);
446         RTE_SET_USED(socket_id);
447         RTE_SET_USED(rx_conf);
448         RTE_SET_USED(mb_pool);
449         return 0;
450 }
451
452 static int
453 eth_igc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
454                 uint16_t nb_desc, unsigned int socket_id,
455                 const struct rte_eth_txconf *tx_conf)
456 {
457         PMD_INIT_FUNC_TRACE();
458         RTE_SET_USED(dev);
459         RTE_SET_USED(queue_idx);
460         RTE_SET_USED(nb_desc);
461         RTE_SET_USED(socket_id);
462         RTE_SET_USED(tx_conf);
463         return 0;
464 }
465
466 static int
467 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
468         struct rte_pci_device *pci_dev)
469 {
470         PMD_INIT_FUNC_TRACE();
471         return rte_eth_dev_pci_generic_probe(pci_dev,
472                 sizeof(struct igc_adapter), eth_igc_dev_init);
473 }
474
475 static int
476 eth_igc_pci_remove(struct rte_pci_device *pci_dev)
477 {
478         PMD_INIT_FUNC_TRACE();
479         return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit);
480 }
481
482 static struct rte_pci_driver rte_igc_pmd = {
483         .id_table = pci_id_igc_map,
484         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
485         .probe = eth_igc_pci_probe,
486         .remove = eth_igc_pci_remove,
487 };
488
489 RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd);
490 RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map);
491 RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci");