1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation
9 #include <rte_bus_pci.h>
10 #include <rte_ethdev_driver.h>
11 #include <rte_ethdev_pci.h>
12 #include <rte_malloc.h>
15 #include "igc_ethdev.h"
17 #define IGC_INTEL_VENDOR_ID 0x8086
20 * The overhead from MTU to max frame size.
21 * Considering VLAN so tag needs to be counted.
23 #define IGC_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + \
24 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE)
26 #define IGC_FC_PAUSE_TIME 0x0680
27 #define IGC_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
28 #define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
30 #define IGC_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
31 #define IGC_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
32 #define IGC_MSIX_OTHER_INTR_VEC 0 /* MSI-X other interrupt vector */
33 #define IGC_FLAG_NEED_LINK_UPDATE (1u << 0) /* need update link */
35 #define IGC_DEFAULT_RX_FREE_THRESH 32
37 #define IGC_DEFAULT_RX_PTHRESH 8
38 #define IGC_DEFAULT_RX_HTHRESH 8
39 #define IGC_DEFAULT_RX_WTHRESH 4
41 #define IGC_DEFAULT_TX_PTHRESH 8
42 #define IGC_DEFAULT_TX_HTHRESH 1
43 #define IGC_DEFAULT_TX_WTHRESH 16
45 /* MSI-X other interrupt vector */
46 #define IGC_MSIX_OTHER_INTR_VEC 0
48 static const struct rte_pci_id pci_id_igc_map[] = {
49 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) },
50 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V) },
51 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I) },
52 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K) },
53 { .vendor_id = 0, /* sentinel */ },
56 static int eth_igc_configure(struct rte_eth_dev *dev);
57 static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
58 static void eth_igc_stop(struct rte_eth_dev *dev);
59 static int eth_igc_start(struct rte_eth_dev *dev);
60 static int eth_igc_set_link_up(struct rte_eth_dev *dev);
61 static int eth_igc_set_link_down(struct rte_eth_dev *dev);
62 static void eth_igc_close(struct rte_eth_dev *dev);
63 static int eth_igc_reset(struct rte_eth_dev *dev);
64 static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev);
65 static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev);
66 static int eth_igc_fw_version_get(struct rte_eth_dev *dev,
67 char *fw_version, size_t fw_size);
68 static int eth_igc_infos_get(struct rte_eth_dev *dev,
69 struct rte_eth_dev_info *dev_info);
70 static int eth_igc_led_on(struct rte_eth_dev *dev);
71 static int eth_igc_led_off(struct rte_eth_dev *dev);
72 static void eth_igc_tx_queue_release(void *txq);
73 static void eth_igc_rx_queue_release(void *rxq);
75 eth_igc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
76 uint16_t nb_rx_desc, unsigned int socket_id,
77 const struct rte_eth_rxconf *rx_conf,
78 struct rte_mempool *mb_pool);
80 eth_igc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
81 uint16_t nb_desc, unsigned int socket_id,
82 const struct rte_eth_txconf *tx_conf);
84 static const struct eth_dev_ops eth_igc_ops = {
85 .dev_configure = eth_igc_configure,
86 .link_update = eth_igc_link_update,
87 .dev_stop = eth_igc_stop,
88 .dev_start = eth_igc_start,
89 .dev_close = eth_igc_close,
90 .dev_reset = eth_igc_reset,
91 .dev_set_link_up = eth_igc_set_link_up,
92 .dev_set_link_down = eth_igc_set_link_down,
93 .promiscuous_enable = eth_igc_promiscuous_enable,
94 .promiscuous_disable = eth_igc_promiscuous_disable,
96 .fw_version_get = eth_igc_fw_version_get,
97 .dev_infos_get = eth_igc_infos_get,
98 .dev_led_on = eth_igc_led_on,
99 .dev_led_off = eth_igc_led_off,
101 .rx_queue_setup = eth_igc_rx_queue_setup,
102 .rx_queue_release = eth_igc_rx_queue_release,
103 .tx_queue_setup = eth_igc_tx_queue_setup,
104 .tx_queue_release = eth_igc_tx_queue_release,
108 * multiple queue mode checking
111 igc_check_mq_mode(struct rte_eth_dev *dev)
113 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
114 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
116 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
117 PMD_INIT_LOG(ERR, "SRIOV is not supported.");
121 if (rx_mq_mode != ETH_MQ_RX_NONE &&
122 rx_mq_mode != ETH_MQ_RX_RSS) {
123 /* RSS together with VMDq not supported*/
124 PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
129 /* To no break software that set invalid mode, only display
130 * warning if invalid mode is used.
132 if (tx_mq_mode != ETH_MQ_TX_NONE)
133 PMD_INIT_LOG(WARNING,
134 "TX mode %d is not supported. Due to meaningless in this driver, just ignore",
141 eth_igc_configure(struct rte_eth_dev *dev)
143 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
146 PMD_INIT_FUNC_TRACE();
148 ret = igc_check_mq_mode(dev);
152 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
157 eth_igc_set_link_up(struct rte_eth_dev *dev)
159 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
161 if (hw->phy.media_type == igc_media_type_copper)
162 igc_power_up_phy(hw);
164 igc_power_up_fiber_serdes_link(hw);
169 eth_igc_set_link_down(struct rte_eth_dev *dev)
171 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
173 if (hw->phy.media_type == igc_media_type_copper)
174 igc_power_down_phy(hw);
176 igc_shutdown_fiber_serdes_link(hw);
181 * disable other interrupt
184 igc_intr_other_disable(struct rte_eth_dev *dev)
186 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
187 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
188 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
190 if (rte_intr_allow_others(intr_handle) &&
191 dev->data->dev_conf.intr_conf.lsc) {
192 IGC_WRITE_REG(hw, IGC_EIMC, 1u << IGC_MSIX_OTHER_INTR_VEC);
195 IGC_WRITE_REG(hw, IGC_IMC, ~0);
200 * enable other interrupt
203 igc_intr_other_enable(struct rte_eth_dev *dev)
205 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
206 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
207 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
208 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
210 if (rte_intr_allow_others(intr_handle) &&
211 dev->data->dev_conf.intr_conf.lsc) {
212 IGC_WRITE_REG(hw, IGC_EIMS, 1u << IGC_MSIX_OTHER_INTR_VEC);
215 IGC_WRITE_REG(hw, IGC_IMS, intr->mask);
220 * It reads ICR and gets interrupt causes, check it and set a bit flag
221 * to update link status.
224 eth_igc_interrupt_get_status(struct rte_eth_dev *dev)
227 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
228 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
230 /* read-on-clear nic registers here */
231 icr = IGC_READ_REG(hw, IGC_ICR);
234 if (icr & IGC_ICR_LSC)
235 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
238 /* return 0 means link status changed, -1 means not changed */
240 eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
242 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
243 struct rte_eth_link link;
244 int link_check, count;
247 hw->mac.get_link_status = 1;
249 /* possible wait-to-complete in up to 9 seconds */
250 for (count = 0; count < IGC_LINK_UPDATE_CHECK_TIMEOUT; count++) {
251 /* Read the real link status */
252 switch (hw->phy.media_type) {
253 case igc_media_type_copper:
254 /* Do the work to read phy */
255 igc_check_for_link(hw);
256 link_check = !hw->mac.get_link_status;
259 case igc_media_type_fiber:
260 igc_check_for_link(hw);
261 link_check = (IGC_READ_REG(hw, IGC_STATUS) &
265 case igc_media_type_internal_serdes:
266 igc_check_for_link(hw);
267 link_check = hw->mac.serdes_has_link;
273 if (link_check || wait_to_complete == 0)
275 rte_delay_ms(IGC_LINK_UPDATE_CHECK_INTERVAL);
277 memset(&link, 0, sizeof(link));
279 /* Now we check if a transition has happened */
281 uint16_t duplex, speed;
282 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
283 link.link_duplex = (duplex == FULL_DUPLEX) ?
284 ETH_LINK_FULL_DUPLEX :
285 ETH_LINK_HALF_DUPLEX;
286 link.link_speed = speed;
287 link.link_status = ETH_LINK_UP;
288 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
289 ETH_LINK_SPEED_FIXED);
291 if (speed == SPEED_2500) {
292 uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
293 if ((tipg & IGC_TIPG_IPGT_MASK) != 0x0b) {
294 tipg &= ~IGC_TIPG_IPGT_MASK;
296 IGC_WRITE_REG(hw, IGC_TIPG, tipg);
301 link.link_duplex = ETH_LINK_HALF_DUPLEX;
302 link.link_status = ETH_LINK_DOWN;
303 link.link_autoneg = ETH_LINK_FIXED;
306 return rte_eth_linkstatus_set(dev, &link);
310 * It executes link_update after knowing an interrupt is present.
313 eth_igc_interrupt_action(struct rte_eth_dev *dev)
315 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
316 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
317 struct rte_eth_link link;
320 if (intr->flags & IGC_FLAG_NEED_LINK_UPDATE) {
321 intr->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
323 /* set get_link_status to check register later */
324 ret = eth_igc_link_update(dev, 0);
326 /* check if link has changed */
330 rte_eth_linkstatus_get(dev, &link);
331 if (link.link_status)
333 " Port %d: Link Up - speed %u Mbps - %s",
335 (unsigned int)link.link_speed,
336 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
337 "full-duplex" : "half-duplex");
339 PMD_DRV_LOG(INFO, " Port %d: Link Down",
342 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
343 pci_dev->addr.domain,
346 pci_dev->addr.function);
347 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
353 * Interrupt handler which shall be registered at first.
356 * Pointer to interrupt handle.
358 * The address of parameter (struct rte_eth_dev *) registered before.
361 eth_igc_interrupt_handler(void *param)
363 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
365 eth_igc_interrupt_get_status(dev);
366 eth_igc_interrupt_action(dev);
370 * This routine disables all traffic on the adapter by issuing a
371 * global reset on the MAC.
374 eth_igc_stop(struct rte_eth_dev *dev)
376 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
377 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
378 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
379 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
380 struct rte_eth_link link;
382 adapter->stopped = 1;
384 /* disable all MSI-X interrupts */
385 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
388 /* clear all MSI-X interrupts */
389 IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
391 igc_intr_other_disable(dev);
393 /* disable intr eventfd mapping */
394 rte_intr_disable(intr_handle);
398 /* disable all wake up */
399 IGC_WRITE_REG(hw, IGC_WUC, 0);
401 /* Set bit for Go Link disconnect */
402 igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT,
403 IGC_82580_PM_GO_LINKD);
405 /* Power down the phy. Needed to make the link go Down */
406 eth_igc_set_link_down(dev);
408 /* clear the recorded link status */
409 memset(&link, 0, sizeof(link));
410 rte_eth_linkstatus_set(dev, &link);
412 if (!rte_intr_allow_others(intr_handle))
413 /* resume to the default handler */
414 rte_intr_callback_register(intr_handle,
415 eth_igc_interrupt_handler,
418 /* Clean datapath event and queue/vec mapping */
419 rte_intr_efd_disable(intr_handle);
422 /* Sets up the hardware to generate MSI-X interrupts properly
424 * board private structure
427 igc_configure_msix_intr(struct rte_eth_dev *dev)
429 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
430 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
431 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
435 /* won't configure msix register if no mapping is done
436 * between intr vector and event fd
438 if (!rte_intr_dp_is_en(intr_handle) ||
439 !dev->data->dev_conf.intr_conf.lsc)
442 /* turn on MSI-X capability first */
443 IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE |
444 IGC_GPIE_PBA | IGC_GPIE_EIAME |
447 intr_mask = (1u << IGC_MSIX_OTHER_INTR_VEC);
449 /* enable msix auto-clear */
450 igc_read_reg_check_set_bits(hw, IGC_EIAC, intr_mask);
452 /* set other cause interrupt vector */
453 igc_read_reg_check_set_bits(hw, IGC_IVAR_MISC,
454 (uint32_t)(IGC_MSIX_OTHER_INTR_VEC | IGC_IVAR_VALID) << 8);
456 /* enable auto-mask */
457 igc_read_reg_check_set_bits(hw, IGC_EIAM, intr_mask);
463 * It enables the interrupt mask and then enable the interrupt.
466 * Pointer to struct rte_eth_dev.
471 igc_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
473 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
476 intr->mask |= IGC_ICR_LSC;
478 intr->mask &= ~IGC_ICR_LSC;
482 * Get hardware rx-buffer size.
485 igc_get_rx_buffer_size(struct igc_hw *hw)
487 return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10;
491 * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
492 * For ASF and Pass Through versions of f/w this means
493 * that the driver is loaded.
496 igc_hw_control_acquire(struct igc_hw *hw)
500 /* Let firmware know the driver has taken over */
501 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
502 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
506 * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
507 * For ASF and Pass Through versions of f/w this means that the
508 * driver is no longer loaded.
511 igc_hw_control_release(struct igc_hw *hw)
515 /* Let firmware taken over control of h/w */
516 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
517 IGC_WRITE_REG(hw, IGC_CTRL_EXT,
518 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
522 igc_hardware_init(struct igc_hw *hw)
524 uint32_t rx_buf_size;
527 /* Let the firmware know the OS is in control */
528 igc_hw_control_acquire(hw);
530 /* Issue a global reset */
533 /* disable all wake up */
534 IGC_WRITE_REG(hw, IGC_WUC, 0);
537 * Hardware flow control
538 * - High water mark should allow for at least two standard size (1518)
539 * frames to be received after sending an XOFF.
540 * - Low water mark works best when it is very near the high water mark.
541 * This allows the receiver to restart by sending XON when it has
542 * drained a bit. Here we use an arbitrary value of 1500 which will
543 * restart after one full frame is pulled from the buffer. There
544 * could be several smaller frames in the buffer and if so they will
545 * not trigger the XON until their total number reduces the buffer
548 rx_buf_size = igc_get_rx_buffer_size(hw);
549 hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
550 hw->fc.low_water = hw->fc.high_water - 1500;
551 hw->fc.pause_time = IGC_FC_PAUSE_TIME;
553 hw->fc.requested_mode = igc_fc_full;
555 diag = igc_init_hw(hw);
559 igc_get_phy_info(hw);
560 igc_check_for_link(hw);
566 eth_igc_start(struct rte_eth_dev *dev)
568 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
569 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
570 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
571 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
576 PMD_INIT_FUNC_TRACE();
578 /* disable all MSI-X interrupts */
579 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
582 /* clear all MSI-X interrupts */
583 IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
585 /* disable uio/vfio intr/eventfd mapping */
586 if (!adapter->stopped)
587 rte_intr_disable(intr_handle);
589 /* Power up the phy. Needed to make the link go Up */
590 eth_igc_set_link_up(dev);
592 /* Put the address into the Receive Address Array */
593 igc_rar_set(hw, hw->mac.addr, 0);
595 /* Initialize the hardware */
596 if (igc_hardware_init(hw)) {
597 PMD_DRV_LOG(ERR, "Unable to initialize the hardware");
600 adapter->stopped = 0;
602 /* confiugre msix for rx interrupt */
603 igc_configure_msix_intr(dev);
605 igc_clear_hw_cntrs_base_generic(hw);
607 /* Setup link speed and duplex */
608 speeds = &dev->data->dev_conf.link_speeds;
609 if (*speeds == ETH_LINK_SPEED_AUTONEG) {
610 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
614 autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
617 hw->phy.autoneg_advertised = 0;
619 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
620 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
621 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
622 ETH_LINK_SPEED_FIXED)) {
624 goto error_invalid_config;
626 if (*speeds & ETH_LINK_SPEED_10M_HD) {
627 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
630 if (*speeds & ETH_LINK_SPEED_10M) {
631 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
634 if (*speeds & ETH_LINK_SPEED_100M_HD) {
635 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
638 if (*speeds & ETH_LINK_SPEED_100M) {
639 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
642 if (*speeds & ETH_LINK_SPEED_1G) {
643 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
646 if (*speeds & ETH_LINK_SPEED_2_5G) {
647 hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
650 if (num_speeds == 0 || (!autoneg && num_speeds > 1))
651 goto error_invalid_config;
653 /* Set/reset the mac.autoneg based on the link speed,
658 hw->mac.forced_speed_duplex =
659 hw->phy.autoneg_advertised;
667 if (rte_intr_allow_others(intr_handle)) {
668 /* check if lsc interrupt is enabled */
669 if (dev->data->dev_conf.intr_conf.lsc)
670 igc_lsc_interrupt_setup(dev, 1);
672 igc_lsc_interrupt_setup(dev, 0);
674 rte_intr_callback_unregister(intr_handle,
675 eth_igc_interrupt_handler,
677 if (dev->data->dev_conf.intr_conf.lsc)
679 "LSC won't enable because of no intr multiplex");
682 /* enable uio/vfio intr/eventfd mapping */
683 rte_intr_enable(intr_handle);
685 /* resume enabled intr since hw reset */
686 igc_intr_other_enable(dev);
688 eth_igc_link_update(dev, 0);
692 error_invalid_config:
693 PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
694 dev->data->dev_conf.link_speeds, dev->data->port_id);
699 igc_reset_swfw_lock(struct igc_hw *hw)
704 * Do mac ops initialization manually here, since we will need
705 * some function pointers set by this call.
707 ret_val = igc_init_mac_params(hw);
712 * SMBI lock should not fail in this early stage. If this is the case,
713 * it is due to an improper exit of the application.
714 * So force the release of the faulty lock.
716 if (igc_get_hw_semaphore_generic(hw) < 0)
717 PMD_DRV_LOG(DEBUG, "SMBI lock released");
719 igc_put_hw_semaphore_generic(hw);
721 if (hw->mac.ops.acquire_swfw_sync != NULL) {
725 * Phy lock should not fail in this early stage.
726 * If this is the case, it is due to an improper exit of the
727 * application. So force the release of the faulty lock.
729 mask = IGC_SWFW_PHY0_SM;
730 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
731 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
734 hw->mac.ops.release_swfw_sync(hw, mask);
737 * This one is more tricky since it is common to all ports; but
738 * swfw_sync retries last long enough (1s) to be almost sure
739 * that if lock can not be taken it is due to an improper lock
742 mask = IGC_SWFW_EEP_SM;
743 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0)
744 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
746 hw->mac.ops.release_swfw_sync(hw, mask);
753 eth_igc_close(struct rte_eth_dev *dev)
755 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
756 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
757 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
758 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
761 PMD_INIT_FUNC_TRACE();
763 if (!adapter->stopped)
766 igc_intr_other_disable(dev);
768 int ret = rte_intr_callback_unregister(intr_handle,
769 eth_igc_interrupt_handler, dev);
770 if (ret >= 0 || ret == -ENOENT || ret == -EINVAL)
773 PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", ret);
774 DELAY(200 * 1000); /* delay 200ms */
775 } while (retry++ < 5);
777 igc_phy_hw_reset(hw);
778 igc_hw_control_release(hw);
780 /* Reset any pending lock */
781 igc_reset_swfw_lock(hw);
785 igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
787 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
789 hw->vendor_id = pci_dev->id.vendor_id;
790 hw->device_id = pci_dev->id.device_id;
791 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
792 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
796 eth_igc_dev_init(struct rte_eth_dev *dev)
798 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
799 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
800 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
803 PMD_INIT_FUNC_TRACE();
804 dev->dev_ops = ð_igc_ops;
807 * for secondary processes, we don't initialize any further as primary
808 * has already done this work. Only check we don't need a different
811 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
814 rte_eth_copy_pci_info(dev, pci_dev);
817 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
819 igc_identify_hardware(dev, pci_dev);
820 if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) {
825 igc_get_bus_info(hw);
827 /* Reset any pending lock */
828 if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) {
833 /* Finish initialization */
834 if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) {
840 hw->phy.autoneg_wait_to_complete = 0;
841 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
844 if (hw->phy.media_type == igc_media_type_copper) {
845 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
846 hw->phy.disable_polarity_correction = 0;
847 hw->phy.ms_type = igc_ms_hw_default;
851 * Start from a known state, this is important in reading the nvm
856 /* Make sure we have a good EEPROM before we read from it */
857 if (igc_validate_nvm_checksum(hw) < 0) {
859 * Some PCI-E parts fail the first check due to
860 * the link being in sleep state, call it again,
861 * if it fails a second time its a real issue.
863 if (igc_validate_nvm_checksum(hw) < 0) {
864 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
870 /* Read the permanent MAC address out of the EEPROM */
871 if (igc_read_mac_addr(hw) != 0) {
872 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
877 /* Allocate memory for storing MAC addresses */
878 dev->data->mac_addrs = rte_zmalloc("igc",
879 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
880 if (dev->data->mac_addrs == NULL) {
881 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC",
882 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
887 /* Copy the permanent MAC address */
888 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
889 &dev->data->mac_addrs[0]);
891 /* Now initialize the hardware */
892 if (igc_hardware_init(hw) != 0) {
893 PMD_INIT_LOG(ERR, "Hardware initialization failed");
894 rte_free(dev->data->mac_addrs);
895 dev->data->mac_addrs = NULL;
900 /* Pass the information to the rte_eth_dev_close() that it should also
901 * release the private port resources.
903 dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
905 hw->mac.get_link_status = 1;
908 /* Indicate SOL/IDER usage */
909 if (igc_check_reset_block(hw) < 0)
911 "PHY reset is blocked due to SOL/IDER session.");
913 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
914 dev->data->port_id, pci_dev->id.vendor_id,
915 pci_dev->id.device_id);
917 rte_intr_callback_register(&pci_dev->intr_handle,
918 eth_igc_interrupt_handler, (void *)dev);
920 /* enable uio/vfio intr/eventfd mapping */
921 rte_intr_enable(&pci_dev->intr_handle);
923 /* enable support intr */
924 igc_intr_other_enable(dev);
929 igc_hw_control_release(hw);
934 eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)
936 PMD_INIT_FUNC_TRACE();
938 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
941 eth_igc_close(eth_dev);
946 eth_igc_reset(struct rte_eth_dev *dev)
950 PMD_INIT_FUNC_TRACE();
952 ret = eth_igc_dev_uninit(dev);
956 return eth_igc_dev_init(dev);
960 eth_igc_promiscuous_enable(struct rte_eth_dev *dev)
962 PMD_INIT_FUNC_TRACE();
968 eth_igc_promiscuous_disable(struct rte_eth_dev *dev)
970 PMD_INIT_FUNC_TRACE();
976 eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
979 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
980 struct igc_fw_version fw;
983 igc_get_fw_version(hw, &fw);
985 /* if option rom is valid, display its version too */
987 ret = snprintf(fw_version, fw_size,
988 "%d.%d, 0x%08x, %d.%d.%d",
989 fw.eep_major, fw.eep_minor, fw.etrack_id,
990 fw.or_major, fw.or_build, fw.or_patch);
993 if (fw.etrack_id != 0X0000) {
994 ret = snprintf(fw_version, fw_size,
996 fw.eep_major, fw.eep_minor,
999 ret = snprintf(fw_version, fw_size,
1001 fw.eep_major, fw.eep_minor,
1006 ret += 1; /* add the size of '\0' */
1007 if (fw_size < (u32)ret)
1014 eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1016 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1018 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1019 dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE;
1020 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1021 dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
1022 dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
1023 dev_info->max_vmdq_pools = 0;
1025 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
1026 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
1027 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;
1029 dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
1030 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1035 eth_igc_led_on(struct rte_eth_dev *dev)
1037 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1039 return igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1043 eth_igc_led_off(struct rte_eth_dev *dev)
1045 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1047 return igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1051 eth_igc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1052 uint16_t nb_rx_desc, unsigned int socket_id,
1053 const struct rte_eth_rxconf *rx_conf,
1054 struct rte_mempool *mb_pool)
1056 PMD_INIT_FUNC_TRACE();
1058 RTE_SET_USED(rx_queue_id);
1059 RTE_SET_USED(nb_rx_desc);
1060 RTE_SET_USED(socket_id);
1061 RTE_SET_USED(rx_conf);
1062 RTE_SET_USED(mb_pool);
1067 eth_igc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1068 uint16_t nb_desc, unsigned int socket_id,
1069 const struct rte_eth_txconf *tx_conf)
1071 PMD_INIT_FUNC_TRACE();
1073 RTE_SET_USED(queue_idx);
1074 RTE_SET_USED(nb_desc);
1075 RTE_SET_USED(socket_id);
1076 RTE_SET_USED(tx_conf);
1080 static void eth_igc_tx_queue_release(void *txq)
1085 static void eth_igc_rx_queue_release(void *rxq)
1091 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1092 struct rte_pci_device *pci_dev)
1094 PMD_INIT_FUNC_TRACE();
1095 return rte_eth_dev_pci_generic_probe(pci_dev,
1096 sizeof(struct igc_adapter), eth_igc_dev_init);
1100 eth_igc_pci_remove(struct rte_pci_device *pci_dev)
1102 PMD_INIT_FUNC_TRACE();
1103 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit);
1106 static struct rte_pci_driver rte_igc_pmd = {
1107 .id_table = pci_id_igc_map,
1108 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1109 .probe = eth_igc_pci_probe,
1110 .remove = eth_igc_pci_remove,
1113 RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd);
1114 RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map);
1115 RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci");