1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation
8 #include <rte_bus_pci.h>
9 #include <rte_ethdev_driver.h>
10 #include <rte_ethdev_pci.h>
11 #include <rte_malloc.h>
14 #include "igc_ethdev.h"
16 #define IGC_INTEL_VENDOR_ID 0x8086
18 #define IGC_FC_PAUSE_TIME 0x0680
20 static const struct rte_pci_id pci_id_igc_map[] = {
21 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) },
22 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V) },
23 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I) },
24 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K) },
25 { .vendor_id = 0, /* sentinel */ },
28 static int eth_igc_configure(struct rte_eth_dev *dev);
29 static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
30 static void eth_igc_stop(struct rte_eth_dev *dev);
31 static int eth_igc_start(struct rte_eth_dev *dev);
32 static void eth_igc_close(struct rte_eth_dev *dev);
33 static int eth_igc_reset(struct rte_eth_dev *dev);
34 static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev);
35 static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev);
36 static int eth_igc_infos_get(struct rte_eth_dev *dev,
37 struct rte_eth_dev_info *dev_info);
39 eth_igc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
40 uint16_t nb_rx_desc, unsigned int socket_id,
41 const struct rte_eth_rxconf *rx_conf,
42 struct rte_mempool *mb_pool);
44 eth_igc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
45 uint16_t nb_desc, unsigned int socket_id,
46 const struct rte_eth_txconf *tx_conf);
48 static const struct eth_dev_ops eth_igc_ops = {
49 .dev_configure = eth_igc_configure,
50 .link_update = eth_igc_link_update,
51 .dev_stop = eth_igc_stop,
52 .dev_start = eth_igc_start,
53 .dev_close = eth_igc_close,
54 .dev_reset = eth_igc_reset,
55 .promiscuous_enable = eth_igc_promiscuous_enable,
56 .promiscuous_disable = eth_igc_promiscuous_disable,
57 .dev_infos_get = eth_igc_infos_get,
58 .rx_queue_setup = eth_igc_rx_queue_setup,
59 .tx_queue_setup = eth_igc_tx_queue_setup,
63 eth_igc_configure(struct rte_eth_dev *dev)
65 PMD_INIT_FUNC_TRACE();
71 eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
73 PMD_INIT_FUNC_TRACE();
75 RTE_SET_USED(wait_to_complete);
80 eth_igc_stop(struct rte_eth_dev *dev)
82 PMD_INIT_FUNC_TRACE();
87 * Get hardware rx-buffer size.
90 igc_get_rx_buffer_size(struct igc_hw *hw)
92 return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10;
96 * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
97 * For ASF and Pass Through versions of f/w this means
98 * that the driver is loaded.
101 igc_hw_control_acquire(struct igc_hw *hw)
105 /* Let firmware know the driver has taken over */
106 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
107 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
111 * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
112 * For ASF and Pass Through versions of f/w this means that the
113 * driver is no longer loaded.
116 igc_hw_control_release(struct igc_hw *hw)
120 /* Let firmware taken over control of h/w */
121 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
122 IGC_WRITE_REG(hw, IGC_CTRL_EXT,
123 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
127 igc_hardware_init(struct igc_hw *hw)
129 uint32_t rx_buf_size;
132 /* Let the firmware know the OS is in control */
133 igc_hw_control_acquire(hw);
135 /* Issue a global reset */
138 /* disable all wake up */
139 IGC_WRITE_REG(hw, IGC_WUC, 0);
142 * Hardware flow control
143 * - High water mark should allow for at least two standard size (1518)
144 * frames to be received after sending an XOFF.
145 * - Low water mark works best when it is very near the high water mark.
146 * This allows the receiver to restart by sending XON when it has
147 * drained a bit. Here we use an arbitrary value of 1500 which will
148 * restart after one full frame is pulled from the buffer. There
149 * could be several smaller frames in the buffer and if so they will
150 * not trigger the XON until their total number reduces the buffer
153 rx_buf_size = igc_get_rx_buffer_size(hw);
154 hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
155 hw->fc.low_water = hw->fc.high_water - 1500;
156 hw->fc.pause_time = IGC_FC_PAUSE_TIME;
158 hw->fc.requested_mode = igc_fc_full;
160 diag = igc_init_hw(hw);
164 igc_get_phy_info(hw);
165 igc_check_for_link(hw);
171 eth_igc_start(struct rte_eth_dev *dev)
173 PMD_INIT_FUNC_TRACE();
179 igc_reset_swfw_lock(struct igc_hw *hw)
184 * Do mac ops initialization manually here, since we will need
185 * some function pointers set by this call.
187 ret_val = igc_init_mac_params(hw);
192 * SMBI lock should not fail in this early stage. If this is the case,
193 * it is due to an improper exit of the application.
194 * So force the release of the faulty lock.
196 if (igc_get_hw_semaphore_generic(hw) < 0)
197 PMD_DRV_LOG(DEBUG, "SMBI lock released");
199 igc_put_hw_semaphore_generic(hw);
201 if (hw->mac.ops.acquire_swfw_sync != NULL) {
205 * Phy lock should not fail in this early stage.
206 * If this is the case, it is due to an improper exit of the
207 * application. So force the release of the faulty lock.
209 mask = IGC_SWFW_PHY0_SM;
210 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
211 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
214 hw->mac.ops.release_swfw_sync(hw, mask);
217 * This one is more tricky since it is common to all ports; but
218 * swfw_sync retries last long enough (1s) to be almost sure
219 * that if lock can not be taken it is due to an improper lock
222 mask = IGC_SWFW_EEP_SM;
223 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0)
224 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
226 hw->mac.ops.release_swfw_sync(hw, mask);
233 eth_igc_close(struct rte_eth_dev *dev)
235 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
237 PMD_INIT_FUNC_TRACE();
239 igc_phy_hw_reset(hw);
240 igc_hw_control_release(hw);
242 /* Reset any pending lock */
243 igc_reset_swfw_lock(hw);
247 igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
249 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
251 hw->vendor_id = pci_dev->id.vendor_id;
252 hw->device_id = pci_dev->id.device_id;
253 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
254 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
258 eth_igc_dev_init(struct rte_eth_dev *dev)
260 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
261 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
264 PMD_INIT_FUNC_TRACE();
265 dev->dev_ops = ð_igc_ops;
268 * for secondary processes, we don't initialize any further as primary
269 * has already done this work. Only check we don't need a different
272 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
275 rte_eth_copy_pci_info(dev, pci_dev);
278 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
280 igc_identify_hardware(dev, pci_dev);
281 if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) {
286 igc_get_bus_info(hw);
288 /* Reset any pending lock */
289 if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) {
294 /* Finish initialization */
295 if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) {
301 hw->phy.autoneg_wait_to_complete = 0;
302 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
305 if (hw->phy.media_type == igc_media_type_copper) {
306 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
307 hw->phy.disable_polarity_correction = 0;
308 hw->phy.ms_type = igc_ms_hw_default;
312 * Start from a known state, this is important in reading the nvm
317 /* Make sure we have a good EEPROM before we read from it */
318 if (igc_validate_nvm_checksum(hw) < 0) {
320 * Some PCI-E parts fail the first check due to
321 * the link being in sleep state, call it again,
322 * if it fails a second time its a real issue.
324 if (igc_validate_nvm_checksum(hw) < 0) {
325 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
331 /* Read the permanent MAC address out of the EEPROM */
332 if (igc_read_mac_addr(hw) != 0) {
333 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
338 /* Allocate memory for storing MAC addresses */
339 dev->data->mac_addrs = rte_zmalloc("igc",
340 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
341 if (dev->data->mac_addrs == NULL) {
342 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC",
343 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
348 /* Copy the permanent MAC address */
349 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
350 &dev->data->mac_addrs[0]);
352 /* Now initialize the hardware */
353 if (igc_hardware_init(hw) != 0) {
354 PMD_INIT_LOG(ERR, "Hardware initialization failed");
355 rte_free(dev->data->mac_addrs);
356 dev->data->mac_addrs = NULL;
361 /* Pass the information to the rte_eth_dev_close() that it should also
362 * release the private port resources.
364 dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
366 hw->mac.get_link_status = 1;
368 /* Indicate SOL/IDER usage */
369 if (igc_check_reset_block(hw) < 0)
371 "PHY reset is blocked due to SOL/IDER session.");
373 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
374 dev->data->port_id, pci_dev->id.vendor_id,
375 pci_dev->id.device_id);
380 igc_hw_control_release(hw);
385 eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)
387 PMD_INIT_FUNC_TRACE();
389 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
392 eth_igc_close(eth_dev);
397 eth_igc_reset(struct rte_eth_dev *dev)
401 PMD_INIT_FUNC_TRACE();
403 ret = eth_igc_dev_uninit(dev);
407 return eth_igc_dev_init(dev);
411 eth_igc_promiscuous_enable(struct rte_eth_dev *dev)
413 PMD_INIT_FUNC_TRACE();
419 eth_igc_promiscuous_disable(struct rte_eth_dev *dev)
421 PMD_INIT_FUNC_TRACE();
427 eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
429 PMD_INIT_FUNC_TRACE();
431 dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
432 dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
437 eth_igc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
438 uint16_t nb_rx_desc, unsigned int socket_id,
439 const struct rte_eth_rxconf *rx_conf,
440 struct rte_mempool *mb_pool)
442 PMD_INIT_FUNC_TRACE();
444 RTE_SET_USED(rx_queue_id);
445 RTE_SET_USED(nb_rx_desc);
446 RTE_SET_USED(socket_id);
447 RTE_SET_USED(rx_conf);
448 RTE_SET_USED(mb_pool);
453 eth_igc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
454 uint16_t nb_desc, unsigned int socket_id,
455 const struct rte_eth_txconf *tx_conf)
457 PMD_INIT_FUNC_TRACE();
459 RTE_SET_USED(queue_idx);
460 RTE_SET_USED(nb_desc);
461 RTE_SET_USED(socket_id);
462 RTE_SET_USED(tx_conf);
467 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
468 struct rte_pci_device *pci_dev)
470 PMD_INIT_FUNC_TRACE();
471 return rte_eth_dev_pci_generic_probe(pci_dev,
472 sizeof(struct igc_adapter), eth_igc_dev_init);
476 eth_igc_pci_remove(struct rte_pci_device *pci_dev)
478 PMD_INIT_FUNC_TRACE();
479 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit);
482 static struct rte_pci_driver rte_igc_pmd = {
483 .id_table = pci_id_igc_map,
484 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
485 .probe = eth_igc_pci_probe,
486 .remove = eth_igc_pci_remove,
489 RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd);
490 RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map);
491 RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci");