+
+ /* disable all MSI-X interrupts */
+ IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
+ IGC_WRITE_FLUSH(hw);
+
+ /* clear all MSI-X interrupts */
+ IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
+
+ /* disable uio/vfio intr/eventfd mapping */
+ if (!adapter->stopped)
+ rte_intr_disable(intr_handle);
+
+ /* Power up the phy. Needed to make the link go Up */
+ eth_igc_set_link_up(dev);
+
+ /* Put the address into the Receive Address Array */
+ igc_rar_set(hw, hw->mac.addr, 0);
+
+ /* Initialize the hardware */
+ if (igc_hardware_init(hw)) {
+ PMD_DRV_LOG(ERR, "Unable to initialize the hardware");
+ return -EIO;
+ }
+ adapter->stopped = 0;
+
+ /* check and configure queue intr-vector mapping */
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq) {
+ uint32_t intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec = rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate %d rx_queues intr_vec",
+ dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ /* configure msix for rx interrupt */
+ igc_configure_msix_intr(dev);
+
+ igc_tx_init(dev);
+
+ /* This can fail when allocating mbufs for descriptor rings */
+ ret = igc_rx_init(dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Unable to initialize RX hardware");
+ igc_dev_clear_queues(dev);
+ return ret;
+ }
+
+ igc_clear_hw_cntrs_base_generic(hw);
+
+ /* VLAN Offload Settings */
+ eth_igc_vlan_offload_set(dev,
+ ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK);
+
+ /* Setup link speed and duplex */
+ speeds = &dev->data->dev_conf.link_speeds;
+ if (*speeds == ETH_LINK_SPEED_AUTONEG) {
+ hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
+ hw->mac.autoneg = 1;
+ } else {
+ int num_speeds = 0;
+ bool autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
+
+ /* Reset */
+ hw->phy.autoneg_advertised = 0;
+
+ if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
+ ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
+ ETH_LINK_SPEED_FIXED)) {
+ num_speeds = -1;
+ goto error_invalid_config;
+ }
+ if (*speeds & ETH_LINK_SPEED_10M_HD) {
+ hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
+ num_speeds++;
+ }
+ if (*speeds & ETH_LINK_SPEED_10M) {
+ hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
+ num_speeds++;
+ }
+ if (*speeds & ETH_LINK_SPEED_100M_HD) {
+ hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
+ num_speeds++;
+ }
+ if (*speeds & ETH_LINK_SPEED_100M) {
+ hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
+ num_speeds++;
+ }
+ if (*speeds & ETH_LINK_SPEED_1G) {
+ hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
+ num_speeds++;
+ }
+ if (*speeds & ETH_LINK_SPEED_2_5G) {
+ hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
+ num_speeds++;
+ }
+ if (num_speeds == 0 || (!autoneg && num_speeds > 1))
+ goto error_invalid_config;
+
+ /* Set/reset the mac.autoneg based on the link speed,
+ * fixed or not
+ */
+ if (!autoneg) {
+ hw->mac.autoneg = 0;
+ hw->mac.forced_speed_duplex =
+ hw->phy.autoneg_advertised;
+ } else {
+ hw->mac.autoneg = 1;
+ }
+ }
+
+ igc_setup_link(hw);
+
+ if (rte_intr_allow_others(intr_handle)) {
+ /* check if lsc interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc)
+ igc_lsc_interrupt_setup(dev, 1);
+ else
+ igc_lsc_interrupt_setup(dev, 0);
+ } else {
+ rte_intr_callback_unregister(intr_handle,
+ eth_igc_interrupt_handler,
+ (void *)dev);
+ if (dev->data->dev_conf.intr_conf.lsc)
+ PMD_DRV_LOG(INFO,
+ "LSC won't enable because of no intr multiplex");
+ }
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(intr_handle);
+
+ rte_eal_alarm_set(IGC_ALARM_INTERVAL,
+ igc_update_queue_stats_handler, dev);
+
+ /* check if rxq interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.rxq &&
+ rte_intr_dp_is_en(intr_handle))
+ igc_rxq_interrupt_setup(dev);
+
+ /* resume enabled intr since hw reset */
+ igc_intr_other_enable(dev);
+
+ eth_igc_rxtx_control(dev, true);
+ eth_igc_link_update(dev, 0);
+
+ /* configure MAC-loopback mode */
+ if (dev->data->dev_conf.lpbk_mode == 1) {
+ uint32_t reg_val;
+
+ reg_val = IGC_READ_REG(hw, IGC_CTRL);
+ reg_val &= ~IGC_CTRL_SPEED_MASK;
+ reg_val |= IGC_CTRL_SLU | IGC_CTRL_FRCSPD |
+ IGC_CTRL_FRCDPX | IGC_CTRL_FD | IGC_CTRL_SPEED_2500;
+ IGC_WRITE_REG(hw, IGC_CTRL, reg_val);
+
+ igc_read_reg_check_set_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN);
+ }
+
+ return 0;
+
+error_invalid_config:
+ PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
+ dev->data->dev_conf.link_speeds, dev->data->port_id);
+ igc_dev_clear_queues(dev);
+ return -EINVAL;
+}
+
+static int
+igc_reset_swfw_lock(struct igc_hw *hw)
+{
+ int ret_val;
+
+ /*
+ * Do mac ops initialization manually here, since we will need
+ * some function pointers set by this call.
+ */
+ ret_val = igc_init_mac_params(hw);
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * SMBI lock should not fail in this early stage. If this is the case,
+ * it is due to an improper exit of the application.
+ * So force the release of the faulty lock.
+ */
+ if (igc_get_hw_semaphore_generic(hw) < 0)
+ PMD_DRV_LOG(DEBUG, "SMBI lock released");
+
+ igc_put_hw_semaphore_generic(hw);
+
+ if (hw->mac.ops.acquire_swfw_sync != NULL) {
+ uint16_t mask;
+
+ /*
+ * Phy lock should not fail in this early stage.
+ * If this is the case, it is due to an improper exit of the
+ * application. So force the release of the faulty lock.
+ */
+ mask = IGC_SWFW_PHY0_SM;
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
+ PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
+ hw->bus.func);
+ }
+ hw->mac.ops.release_swfw_sync(hw, mask);
+
+ /*
+ * This one is more tricky since it is common to all ports; but
+ * swfw_sync retries last long enough (1s) to be almost sure
+ * that if lock can not be taken it is due to an improper lock
+ * of the semaphore.
+ */
+ mask = IGC_SWFW_EEP_SM;
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0)
+ PMD_DRV_LOG(DEBUG, "SWFW common locks released");
+
+ hw->mac.ops.release_swfw_sync(hw, mask);
+ }
+
+ return IGC_SUCCESS;
+}
+
+/*
+ * free all rx/tx queues.
+ */
+static void
+igc_dev_free_queues(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ eth_igc_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ eth_igc_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
+static void
+eth_igc_close(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
+ int retry = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!adapter->stopped)
+ eth_igc_stop(dev);
+
+ igc_flow_flush(dev, NULL);
+ igc_clear_all_filter(dev);
+
+ igc_intr_other_disable(dev);
+ do {
+ int ret = rte_intr_callback_unregister(intr_handle,
+ eth_igc_interrupt_handler, dev);
+ if (ret >= 0 || ret == -ENOENT || ret == -EINVAL)
+ break;
+
+ PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", ret);
+ DELAY(200 * 1000); /* delay 200ms */
+ } while (retry++ < 5);
+
+ igc_phy_hw_reset(hw);
+ igc_hw_control_release(hw);
+ igc_dev_free_queues(dev);
+
+ /* Reset any pending lock */
+ igc_reset_swfw_lock(hw);
+}
+
+static void
+igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->device_id = pci_dev->id.device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+}
+
+static int
+eth_igc_dev_init(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ int i, error = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ dev->dev_ops = ð_igc_ops;
+
+ /*
+ * for secondary processes, we don't initialize any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ rte_eth_copy_pci_info(dev, pci_dev);
+
+ hw->back = pci_dev;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+
+ igc_identify_hardware(dev, pci_dev);
+ if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) {
+ error = -EIO;
+ goto err_late;
+ }
+
+ igc_get_bus_info(hw);
+
+ /* Reset any pending lock */
+ if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) {
+ error = -EIO;
+ goto err_late;
+ }
+
+ /* Finish initialization */
+ if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) {
+ error = -EIO;
+ goto err_late;
+ }
+
+ hw->mac.autoneg = 1;
+ hw->phy.autoneg_wait_to_complete = 0;
+ hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
+
+ /* Copper options */
+ if (hw->phy.media_type == igc_media_type_copper) {
+ hw->phy.mdix = 0; /* AUTO_ALL_MODES */
+ hw->phy.disable_polarity_correction = 0;
+ hw->phy.ms_type = igc_ms_hw_default;
+ }
+
+ /*
+ * Start from a known state, this is important in reading the nvm
+ * and mac from that.
+ */
+ igc_reset_hw(hw);
+
+ /* Make sure we have a good EEPROM before we read from it */
+ if (igc_validate_nvm_checksum(hw) < 0) {
+ /*
+ * Some PCI-E parts fail the first check due to
+ * the link being in sleep state, call it again,
+ * if it fails a second time its a real issue.
+ */
+ if (igc_validate_nvm_checksum(hw) < 0) {
+ PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
+ error = -EIO;
+ goto err_late;
+ }
+ }
+
+ /* Read the permanent MAC address out of the EEPROM */
+ if (igc_read_mac_addr(hw) != 0) {
+ PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
+ error = -EIO;
+ goto err_late;
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ dev->data->mac_addrs = rte_zmalloc("igc",
+ RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
+ if (dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC",
+ RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+ error = -ENOMEM;
+ goto err_late;
+ }
+
+ /* Copy the permanent MAC address */
+ rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
+ &dev->data->mac_addrs[0]);
+
+ /* Now initialize the hardware */
+ if (igc_hardware_init(hw) != 0) {
+ PMD_INIT_LOG(ERR, "Hardware initialization failed");
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+ error = -ENODEV;
+ goto err_late;
+ }
+
+ /* Pass the information to the rte_eth_dev_close() that it should also
+ * release the private port resources.
+ */
+ dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+
+ hw->mac.get_link_status = 1;
+ igc->stopped = 0;
+
+ /* Indicate SOL/IDER usage */
+ if (igc_check_reset_block(hw) < 0)
+ PMD_INIT_LOG(ERR,
+ "PHY reset is blocked due to SOL/IDER session.");
+
+ PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
+ dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ eth_igc_interrupt_handler, (void *)dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(&pci_dev->intr_handle);
+
+ /* enable support intr */
+ igc_intr_other_enable(dev);
+
+ /* initiate queue status */
+ for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
+ igc->txq_stats_map[i] = -1;
+ igc->rxq_stats_map[i] = -1;
+ }
+
+ igc_flow_init(dev);
+ igc_clear_all_filter(dev);
+ return 0;
+
+err_late:
+ igc_hw_control_release(hw);
+ return error;
+}
+
+static int
+eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ eth_igc_close(eth_dev);
+ return 0;
+}
+
+static int
+eth_igc_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = eth_igc_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ return eth_igc_dev_init(dev);
+}
+
+static int
+eth_igc_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t rctl;
+
+ rctl = IGC_READ_REG(hw, IGC_RCTL);
+ rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);
+ IGC_WRITE_REG(hw, IGC_RCTL, rctl);
+ return 0;
+}
+
+static int
+eth_igc_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t rctl;
+
+ rctl = IGC_READ_REG(hw, IGC_RCTL);
+ rctl &= (~IGC_RCTL_UPE);
+ if (dev->data->all_multicast == 1)
+ rctl |= IGC_RCTL_MPE;
+ else
+ rctl &= (~IGC_RCTL_MPE);
+ IGC_WRITE_REG(hw, IGC_RCTL, rctl);
+ return 0;
+}
+
+static int
+eth_igc_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t rctl;
+
+ rctl = IGC_READ_REG(hw, IGC_RCTL);
+ rctl |= IGC_RCTL_MPE;
+ IGC_WRITE_REG(hw, IGC_RCTL, rctl);
+ return 0;
+}
+
+static int
+eth_igc_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t rctl;
+
+ if (dev->data->promiscuous == 1)
+ return 0; /* must remain in all_multicast mode */
+
+ rctl = IGC_READ_REG(hw, IGC_RCTL);
+ rctl &= (~IGC_RCTL_MPE);
+ IGC_WRITE_REG(hw, IGC_RCTL, rctl);
+ return 0;
+}
+
+static int
+eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
+ size_t fw_size)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ struct igc_fw_version fw;
+ int ret;
+
+ igc_get_fw_version(hw, &fw);
+
+ /* if option rom is valid, display its version too */
+ if (fw.or_valid) {
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
+ /* no option rom */
+ } else {
+ if (fw.etrack_id != 0X0000) {
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor,
+ fw.etrack_id);
+ } else {
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d.%d",
+ fw.eep_major, fw.eep_minor,
+ fw.eep_build);
+ }
+ }
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (u32)ret)
+ return ret;
+ else
+ return 0;
+}
+
+static int
+eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+
+ dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
+ dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE;
+ dev_info->max_mac_addrs = hw->mac.rar_entry_count;
+ dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
+ dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
+ dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
+ dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
+ dev_info->max_vmdq_pools = 0;
+
+ dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
+ dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+ dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = IGC_DEFAULT_RX_PTHRESH,
+ .hthresh = IGC_DEFAULT_RX_HTHRESH,
+ .wthresh = IGC_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = IGC_DEFAULT_TX_PTHRESH,
+ .hthresh = IGC_DEFAULT_TX_HTHRESH,
+ .wthresh = IGC_DEFAULT_TX_WTHRESH,
+ },
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+
+ dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
+ ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
+ ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;
+
+ dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ return 0;
+}
+
+static int
+eth_igc_led_on(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+
+ return igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
+}
+
+static int
+eth_igc_led_off(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+
+ return igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
+}
+
+static const uint32_t *
+eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /* refers to rx_desc_pkt_info_to_pkt_type() */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_TUNNEL_IP,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ptypes;
+}
+
+static int
+eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t frame_size = mtu + IGC_ETH_OVERHEAD;
+ uint32_t rctl;
+
+ /* if extend vlan has been enabled */
+ if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN)
+ frame_size += VLAN_TAG_SIZE;
+
+ /* check that mtu is within the allowed range */
+ if (mtu < RTE_ETHER_MIN_MTU ||
+ frame_size > MAX_RX_JUMBO_FRAME_SIZE)
+ return -EINVAL;
+
+ /*
+ * refuse mtu that requires the support of scattered packets when
+ * this feature has not been enabled before.
+ */
+ if (!dev->data->scattered_rx &&
+ frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+ return -EINVAL;
+
+ rctl = IGC_READ_REG(hw, IGC_RCTL);
+
+ /* switch to jumbo mode if needed */
+ if (mtu > RTE_ETHER_MTU) {
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ rctl |= IGC_RCTL_LPE;
+ } else {
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ rctl &= ~IGC_RCTL_LPE;
+ }
+ IGC_WRITE_REG(hw, IGC_RCTL, rctl);
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ IGC_WRITE_REG(hw, IGC_RLPML,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+ return 0;
+}
+
+static int
+eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
+ uint32_t index, uint32_t pool)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+
+ igc_rar_set(hw, mac_addr->addr_bytes, index);
+ RTE_SET_USED(pool);
+ return 0;
+}
+
+static void
+eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index)
+{
+ uint8_t addr[RTE_ETHER_ADDR_LEN];
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+
+ memset(addr, 0, sizeof(addr));
+ igc_rar_set(hw, addr, index);
+}
+
+static int
+eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct rte_ether_addr *addr)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ igc_rar_set(hw, addr->addr_bytes, 0);
+ return 0;
+}
+
+static int
+eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ igc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
+ return 0;
+}
+
+/*
+ * Read hardware registers
+ */
+static void
+igc_read_stats_registers(struct igc_hw *hw, struct igc_hw_stats *stats)
+{
+ int pause_frames;
+
+ uint64_t old_gprc = stats->gprc;
+ uint64_t old_gptc = stats->gptc;
+ uint64_t old_tpr = stats->tpr;
+ uint64_t old_tpt = stats->tpt;
+ uint64_t old_rpthc = stats->rpthc;
+ uint64_t old_hgptc = stats->hgptc;
+
+ stats->crcerrs += IGC_READ_REG(hw, IGC_CRCERRS);
+ stats->algnerrc += IGC_READ_REG(hw, IGC_ALGNERRC);
+ stats->rxerrc += IGC_READ_REG(hw, IGC_RXERRC);
+ stats->mpc += IGC_READ_REG(hw, IGC_MPC);
+ stats->scc += IGC_READ_REG(hw, IGC_SCC);
+ stats->ecol += IGC_READ_REG(hw, IGC_ECOL);
+
+ stats->mcc += IGC_READ_REG(hw, IGC_MCC);
+ stats->latecol += IGC_READ_REG(hw, IGC_LATECOL);
+ stats->colc += IGC_READ_REG(hw, IGC_COLC);
+
+ stats->dc += IGC_READ_REG(hw, IGC_DC);
+ stats->tncrs += IGC_READ_REG(hw, IGC_TNCRS);
+ stats->htdpmc += IGC_READ_REG(hw, IGC_HTDPMC);
+ stats->rlec += IGC_READ_REG(hw, IGC_RLEC);
+ stats->xonrxc += IGC_READ_REG(hw, IGC_XONRXC);
+ stats->xontxc += IGC_READ_REG(hw, IGC_XONTXC);
+
+ /*
+ * For watchdog management we need to know if we have been
+ * paused during the last interval, so capture that here.
+ */
+ pause_frames = IGC_READ_REG(hw, IGC_XOFFRXC);
+ stats->xoffrxc += pause_frames;
+ stats->xofftxc += IGC_READ_REG(hw, IGC_XOFFTXC);
+ stats->fcruc += IGC_READ_REG(hw, IGC_FCRUC);
+ stats->prc64 += IGC_READ_REG(hw, IGC_PRC64);
+ stats->prc127 += IGC_READ_REG(hw, IGC_PRC127);
+ stats->prc255 += IGC_READ_REG(hw, IGC_PRC255);
+ stats->prc511 += IGC_READ_REG(hw, IGC_PRC511);
+ stats->prc1023 += IGC_READ_REG(hw, IGC_PRC1023);
+ stats->prc1522 += IGC_READ_REG(hw, IGC_PRC1522);
+ stats->gprc += IGC_READ_REG(hw, IGC_GPRC);
+ stats->bprc += IGC_READ_REG(hw, IGC_BPRC);
+ stats->mprc += IGC_READ_REG(hw, IGC_MPRC);
+ stats->gptc += IGC_READ_REG(hw, IGC_GPTC);
+
+ /* For the 64-bit byte counters the low dword must be read first. */
+ /* Both registers clear on the read of the high dword */
+
+ /* Workaround CRC bytes included in size, take away 4 bytes/packet */
+ stats->gorc += IGC_READ_REG(hw, IGC_GORCL);
+ stats->gorc += ((uint64_t)IGC_READ_REG(hw, IGC_GORCH) << 32);
+ stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;
+ stats->gotc += IGC_READ_REG(hw, IGC_GOTCL);
+ stats->gotc += ((uint64_t)IGC_READ_REG(hw, IGC_GOTCH) << 32);
+ stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;
+
+ stats->rnbc += IGC_READ_REG(hw, IGC_RNBC);
+ stats->ruc += IGC_READ_REG(hw, IGC_RUC);
+ stats->rfc += IGC_READ_REG(hw, IGC_RFC);
+ stats->roc += IGC_READ_REG(hw, IGC_ROC);
+ stats->rjc += IGC_READ_REG(hw, IGC_RJC);
+
+ stats->mgprc += IGC_READ_REG(hw, IGC_MGTPRC);
+ stats->mgpdc += IGC_READ_REG(hw, IGC_MGTPDC);
+ stats->mgptc += IGC_READ_REG(hw, IGC_MGTPTC);
+ stats->b2ospc += IGC_READ_REG(hw, IGC_B2OSPC);
+ stats->b2ogprc += IGC_READ_REG(hw, IGC_B2OGPRC);
+ stats->o2bgptc += IGC_READ_REG(hw, IGC_O2BGPTC);
+ stats->o2bspc += IGC_READ_REG(hw, IGC_O2BSPC);
+
+ stats->tpr += IGC_READ_REG(hw, IGC_TPR);
+ stats->tpt += IGC_READ_REG(hw, IGC_TPT);
+
+ stats->tor += IGC_READ_REG(hw, IGC_TORL);
+ stats->tor += ((uint64_t)IGC_READ_REG(hw, IGC_TORH) << 32);
+ stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
+ stats->tot += IGC_READ_REG(hw, IGC_TOTL);
+ stats->tot += ((uint64_t)IGC_READ_REG(hw, IGC_TOTH) << 32);
+ stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;
+
+ stats->ptc64 += IGC_READ_REG(hw, IGC_PTC64);
+ stats->ptc127 += IGC_READ_REG(hw, IGC_PTC127);
+ stats->ptc255 += IGC_READ_REG(hw, IGC_PTC255);
+ stats->ptc511 += IGC_READ_REG(hw, IGC_PTC511);
+ stats->ptc1023 += IGC_READ_REG(hw, IGC_PTC1023);
+ stats->ptc1522 += IGC_READ_REG(hw, IGC_PTC1522);
+ stats->mptc += IGC_READ_REG(hw, IGC_MPTC);
+ stats->bptc += IGC_READ_REG(hw, IGC_BPTC);
+ stats->tsctc += IGC_READ_REG(hw, IGC_TSCTC);
+
+ stats->iac += IGC_READ_REG(hw, IGC_IAC);
+ stats->rpthc += IGC_READ_REG(hw, IGC_RPTHC);
+ stats->hgptc += IGC_READ_REG(hw, IGC_HGPTC);
+ stats->icrxdmtc += IGC_READ_REG(hw, IGC_ICRXDMTC);
+
+ /* Host to Card Statistics */
+ stats->hgorc += IGC_READ_REG(hw, IGC_HGORCL);
+ stats->hgorc += ((uint64_t)IGC_READ_REG(hw, IGC_HGORCH) << 32);
+ stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;
+ stats->hgotc += IGC_READ_REG(hw, IGC_HGOTCL);
+ stats->hgotc += ((uint64_t)IGC_READ_REG(hw, IGC_HGOTCH) << 32);
+ stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;
+ stats->lenerrs += IGC_READ_REG(hw, IGC_LENERRS);
+}
+
+/*
+ * Write 0 to all queue status registers
+ */
+static void
+igc_reset_queue_stats_register(struct igc_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
+ IGC_WRITE_REG(hw, IGC_PQGPRC(i), 0);
+ IGC_WRITE_REG(hw, IGC_PQGPTC(i), 0);
+ IGC_WRITE_REG(hw, IGC_PQGORC(i), 0);
+ IGC_WRITE_REG(hw, IGC_PQGOTC(i), 0);
+ IGC_WRITE_REG(hw, IGC_PQMPRC(i), 0);
+ IGC_WRITE_REG(hw, IGC_RQDPC(i), 0);
+ IGC_WRITE_REG(hw, IGC_TQDPC(i), 0);
+ }
+}
+
+/*
+ * Read all hardware queue status registers
+ */
+static void
+igc_read_queue_stats_register(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ struct igc_hw_queue_stats *queue_stats =
+ IGC_DEV_PRIVATE_QUEUE_STATS(dev);
+ int i;
+
+ /*
+ * This register is not cleared on read. Furthermore, the register wraps
+ * around back to 0x00000000 on the next increment when reaching a value
+ * of 0xFFFFFFFF and then continues normal count operation.
+ */
+ for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
+ union {
+ u64 ddword;
+ u32 dword[2];
+ } value;
+ u32 tmp;
+
+ /*
+ * Read the register first, if the value is smaller than that
+ * previous read, that mean the register has been overflowed,
+ * then we add the high 4 bytes by 1 and replace the low 4
+ * bytes by the new value.
+ */
+ tmp = IGC_READ_REG(hw, IGC_PQGPRC(i));
+ value.ddword = queue_stats->pqgprc[i];
+ if (value.dword[U32_0_IN_U64] > tmp)
+ value.dword[U32_1_IN_U64]++;
+ value.dword[U32_0_IN_U64] = tmp;
+ queue_stats->pqgprc[i] = value.ddword;
+
+ tmp = IGC_READ_REG(hw, IGC_PQGPTC(i));
+ value.ddword = queue_stats->pqgptc[i];
+ if (value.dword[U32_0_IN_U64] > tmp)
+ value.dword[U32_1_IN_U64]++;
+ value.dword[U32_0_IN_U64] = tmp;
+ queue_stats->pqgptc[i] = value.ddword;
+
+ tmp = IGC_READ_REG(hw, IGC_PQGORC(i));
+ value.ddword = queue_stats->pqgorc[i];
+ if (value.dword[U32_0_IN_U64] > tmp)
+ value.dword[U32_1_IN_U64]++;
+ value.dword[U32_0_IN_U64] = tmp;
+ queue_stats->pqgorc[i] = value.ddword;
+
+ tmp = IGC_READ_REG(hw, IGC_PQGOTC(i));
+ value.ddword = queue_stats->pqgotc[i];
+ if (value.dword[U32_0_IN_U64] > tmp)
+ value.dword[U32_1_IN_U64]++;
+ value.dword[U32_0_IN_U64] = tmp;
+ queue_stats->pqgotc[i] = value.ddword;
+
+ tmp = IGC_READ_REG(hw, IGC_PQMPRC(i));
+ value.ddword = queue_stats->pqmprc[i];
+ if (value.dword[U32_0_IN_U64] > tmp)
+ value.dword[U32_1_IN_U64]++;
+ value.dword[U32_0_IN_U64] = tmp;
+ queue_stats->pqmprc[i] = value.ddword;
+
+ tmp = IGC_READ_REG(hw, IGC_RQDPC(i));
+ value.ddword = queue_stats->rqdpc[i];
+ if (value.dword[U32_0_IN_U64] > tmp)
+ value.dword[U32_1_IN_U64]++;
+ value.dword[U32_0_IN_U64] = tmp;
+ queue_stats->rqdpc[i] = value.ddword;
+
+ tmp = IGC_READ_REG(hw, IGC_TQDPC(i));
+ value.ddword = queue_stats->tqdpc[i];
+ if (value.dword[U32_0_IN_U64] > tmp)
+ value.dword[U32_1_IN_U64]++;
+ value.dword[U32_0_IN_U64] = tmp;
+ queue_stats->tqdpc[i] = value.ddword;
+ }
+}
+
+static int
+eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
+{
+ struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ struct igc_hw_stats *stats = IGC_DEV_PRIVATE_STATS(dev);
+ struct igc_hw_queue_stats *queue_stats =
+ IGC_DEV_PRIVATE_QUEUE_STATS(dev);
+ int i;
+
+ /*
+ * Cancel status handler since it will read the queue status registers
+ */
+ rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
+
+ /* Read status register */
+ igc_read_queue_stats_register(dev);
+ igc_read_stats_registers(hw, stats);
+
+ if (rte_stats == NULL) {
+ /* Restart queue status handler */
+ rte_eal_alarm_set(IGC_ALARM_INTERVAL,
+ igc_update_queue_stats_handler, dev);
+ return -EINVAL;
+ }
+
+ /* Rx Errors */
+ rte_stats->imissed = stats->mpc;
+ rte_stats->ierrors = stats->crcerrs +
+ stats->rlec + stats->ruc + stats->roc +
+ stats->rxerrc + stats->algnerrc;
+
+ /* Tx Errors */
+ rte_stats->oerrors = stats->ecol + stats->latecol;
+
+ rte_stats->ipackets = stats->gprc;
+ rte_stats->opackets = stats->gptc;
+ rte_stats->ibytes = stats->gorc;
+ rte_stats->obytes = stats->gotc;
+
+ /* Get per-queue statuses */
+ for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
+ /* GET TX queue statuses */
+ int map_id = igc->txq_stats_map[i];
+ if (map_id >= 0) {
+ rte_stats->q_opackets[map_id] += queue_stats->pqgptc[i];
+ rte_stats->q_obytes[map_id] += queue_stats->pqgotc[i];
+ }
+ /* Get RX queue statuses */
+ map_id = igc->rxq_stats_map[i];
+ if (map_id >= 0) {
+ rte_stats->q_ipackets[map_id] += queue_stats->pqgprc[i];
+ rte_stats->q_ibytes[map_id] += queue_stats->pqgorc[i];
+ rte_stats->q_errors[map_id] += queue_stats->rqdpc[i];
+ }
+ }
+
+ /* Restart queue status handler */
+ rte_eal_alarm_set(IGC_ALARM_INTERVAL,
+ igc_update_queue_stats_handler, dev);
+ return 0;
+}
+
+static int
+eth_igc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ struct igc_hw_stats *hw_stats =
+ IGC_DEV_PRIVATE_STATS(dev);
+ unsigned int i;
+
+ igc_read_stats_registers(hw, hw_stats);
+
+ if (n < IGC_NB_XSTATS)
+ return IGC_NB_XSTATS;
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ if (!xstats)
+ return 0;
+
+ /* Extended stats */
+ for (i = 0; i < IGC_NB_XSTATS; i++) {
+ xstats[i].id = i;
+ xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
+ rte_igc_stats_strings[i].offset);
+ }
+
+ return IGC_NB_XSTATS;
+}
+
+static int
+eth_igc_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
+ struct igc_hw_queue_stats *queue_stats =
+ IGC_DEV_PRIVATE_QUEUE_STATS(dev);
+
+ /* Cancel queue status handler for avoid conflict */
+ rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
+
+ /* HW registers are cleared on read */
+ igc_reset_queue_stats_register(hw);
+ igc_read_stats_registers(hw, hw_stats);
+
+ /* Reset software totals */
+ memset(hw_stats, 0, sizeof(*hw_stats));
+ memset(queue_stats, 0, sizeof(*queue_stats));
+
+ /* Restart the queue status handler */
+ rte_eal_alarm_set(IGC_ALARM_INTERVAL, igc_update_queue_stats_handler,
+ dev);
+
+ return 0;
+}
+
+static int
+eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned int size)
+{
+ unsigned int i;
+
+ if (xstats_names == NULL)
+ return IGC_NB_XSTATS;
+
+ if (size < IGC_NB_XSTATS) {
+ PMD_DRV_LOG(ERR, "not enough buffers!");
+ return IGC_NB_XSTATS;
+ }
+
+ for (i = 0; i < IGC_NB_XSTATS; i++)
+ strlcpy(xstats_names[i].name, rte_igc_stats_strings[i].name,
+ sizeof(xstats_names[i].name));
+
+ return IGC_NB_XSTATS;
+}
+
+static int
+eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
+ unsigned int limit)
+{
+ unsigned int i;
+
+ if (!ids)
+ return eth_igc_xstats_get_names(dev, xstats_names, limit);
+
+ for (i = 0; i < limit; i++) {
+ if (ids[i] >= IGC_NB_XSTATS) {
+ PMD_DRV_LOG(ERR, "id value isn't valid");
+ return -EINVAL;
+ }
+ strlcpy(xstats_names[i].name,
+ rte_igc_stats_strings[ids[i]].name,
+ sizeof(xstats_names[i].name));
+ }
+ return limit;
+}
+
+static int
+eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
+ unsigned int i;
+
+ igc_read_stats_registers(hw, hw_stats);
+
+ if (!ids) {
+ if (n < IGC_NB_XSTATS)
+ return IGC_NB_XSTATS;
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ if (!values)
+ return 0;
+
+ /* Extended stats */
+ for (i = 0; i < IGC_NB_XSTATS; i++)
+ values[i] = *(uint64_t *)(((char *)hw_stats) +
+ rte_igc_stats_strings[i].offset);
+
+ return IGC_NB_XSTATS;
+
+ } else {
+ for (i = 0; i < n; i++) {
+ if (ids[i] >= IGC_NB_XSTATS) {
+ PMD_DRV_LOG(ERR, "id value isn't valid");
+ return -EINVAL;
+ }
+ values[i] = *(uint64_t *)(((char *)hw_stats) +
+ rte_igc_stats_strings[ids[i]].offset);
+ }
+ return n;
+ }
+}
+
+static int
+eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,
+ uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx)
+{
+ struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
+
+ /* check queue id is valid */
+ if (queue_id >= IGC_QUEUE_PAIRS_NUM) {
+ PMD_DRV_LOG(ERR, "queue id(%u) error, max is %u",
+ queue_id, IGC_QUEUE_PAIRS_NUM - 1);
+ return -EINVAL;
+ }
+
+ /* store the mapping status id */
+ if (is_rx)
+ igc->rxq_stats_map[queue_id] = stat_idx;
+ else
+ igc->txq_stats_map[queue_id] = stat_idx;
+
+ return 0;
+}
+
+static int
+eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t vec = IGC_MISC_VEC_ID;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = IGC_RX_VEC_START;
+
+ uint32_t mask = 1u << (queue_id + vec);
+
+ IGC_WRITE_REG(hw, IGC_EIMC, mask);
+ IGC_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t vec = IGC_MISC_VEC_ID;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = IGC_RX_VEC_START;
+
+ uint32_t mask = 1u << (queue_id + vec);
+
+ IGC_WRITE_REG(hw, IGC_EIMS, mask);
+ IGC_WRITE_FLUSH(hw);
+
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+static int
+eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t ctrl;
+ int tx_pause;
+ int rx_pause;
+
+ fc_conf->pause_time = hw->fc.pause_time;
+ fc_conf->high_water = hw->fc.high_water;
+ fc_conf->low_water = hw->fc.low_water;
+ fc_conf->send_xon = hw->fc.send_xon;
+ fc_conf->autoneg = hw->mac.autoneg;
+
+ /*
+ * Return rx_pause and tx_pause status according to actual setting of
+ * the TFCE and RFCE bits in the CTRL register.
+ */
+ ctrl = IGC_READ_REG(hw, IGC_CTRL);
+ if (ctrl & IGC_CTRL_TFCE)
+ tx_pause = 1;
+ else
+ tx_pause = 0;
+
+ if (ctrl & IGC_CTRL_RFCE)
+ rx_pause = 1;
+ else
+ rx_pause = 0;
+
+ if (rx_pause && tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+
+ return 0;
+}
+
+static int
+eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t rx_buf_size;
+ uint32_t max_high_water;
+ uint32_t rctl;
+ int err;
+
+ if (fc_conf->autoneg != hw->mac.autoneg)
+ return -ENOTSUP;
+
+ rx_buf_size = igc_get_rx_buffer_size(hw);
+ PMD_DRV_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+ /* At least reserve one Ethernet frame for watermark */
+ max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
+ if (fc_conf->high_water > max_high_water ||
+ fc_conf->high_water < fc_conf->low_water) {
+ PMD_DRV_LOG(ERR,
+ "Incorrect high(%u)/low(%u) water value, max is %u",
+ fc_conf->high_water, fc_conf->low_water,
+ max_high_water);
+ return -EINVAL;
+ }
+
+ switch (fc_conf->mode) {
+ case RTE_FC_NONE:
+ hw->fc.requested_mode = igc_fc_none;
+ break;
+ case RTE_FC_RX_PAUSE:
+ hw->fc.requested_mode = igc_fc_rx_pause;
+ break;
+ case RTE_FC_TX_PAUSE:
+ hw->fc.requested_mode = igc_fc_tx_pause;
+ break;
+ case RTE_FC_FULL:
+ hw->fc.requested_mode = igc_fc_full;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported fc mode: %u", fc_conf->mode);
+ return -EINVAL;
+ }
+
+ hw->fc.pause_time = fc_conf->pause_time;
+ hw->fc.high_water = fc_conf->high_water;
+ hw->fc.low_water = fc_conf->low_water;
+ hw->fc.send_xon = fc_conf->send_xon;
+
+ err = igc_setup_link_generic(hw);
+ if (err == IGC_SUCCESS) {
+ /**
+ * check if we want to forward MAC frames - driver doesn't have
+ * native capability to do that, so we'll write the registers
+ * ourselves
+ **/
+ rctl = IGC_READ_REG(hw, IGC_RCTL);
+
+ /* set or clear MFLCN.PMCF bit depending on configuration */
+ if (fc_conf->mac_ctrl_frame_fwd != 0)
+ rctl |= IGC_RCTL_PMCF;
+ else
+ rctl &= ~IGC_RCTL_PMCF;
+
+ IGC_WRITE_REG(hw, IGC_RCTL, rctl);
+ IGC_WRITE_FLUSH(hw);
+
+ return 0;
+ }
+
+ PMD_DRV_LOG(ERR, "igc_setup_link_generic = 0x%x", err);
+ return -EIO;
+}
+
+static int
+eth_igc_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint16_t i;
+
+ if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR,
+ "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
+ reta_size, ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+
+ /* set redirection table */
+ for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+ union igc_rss_reta_reg reta, reg;
+ uint16_t idx, shift;
+ uint8_t j, mask;
+
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ IGC_RSS_RDT_REG_SIZE_MASK);
+
+ /* if no need to update the register */
+ if (!mask ||
+ shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+ continue;
+
+ /* check mask whether need to read the register value first */
+ if (mask == IGC_RSS_RDT_REG_SIZE_MASK)
+ reg.dword = 0;
+ else
+ reg.dword = IGC_READ_REG_LE_VALUE(hw,
+ IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
+
+ /* update the register */
+ RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
+ for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
+ if (mask & (1u << j))
+ reta.bytes[j] =
+ (uint8_t)reta_conf[idx].reta[shift + j];
+ else
+ reta.bytes[j] = reg.bytes[j];
+ }
+ IGC_WRITE_REG_LE_VALUE(hw,
+ IGC_RETA(i / IGC_RSS_RDT_REG_SIZE), reta.dword);
+ }
+
+ return 0;
+}
+
+static int
+eth_igc_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint16_t i;
+
+ if (reta_size != ETH_RSS_RETA_SIZE_128) {
+ PMD_DRV_LOG(ERR,
+ "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
+ reta_size, ETH_RSS_RETA_SIZE_128);
+ return -EINVAL;
+ }
+
+ RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+
+ /* read redirection table */
+ for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
+ union igc_rss_reta_reg reta;
+ uint16_t idx, shift;
+ uint8_t j, mask;
+
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+ IGC_RSS_RDT_REG_SIZE_MASK);
+
+ /* if no need to read register */
+ if (!mask ||
+ shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
+ continue;
+
+ /* read register and get the queue index */
+ RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
+ reta.dword = IGC_READ_REG_LE_VALUE(hw,
+ IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
+ for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
+ if (mask & (1u << j))
+ reta_conf[idx].reta[shift + j] = reta.bytes[j];
+ }
+ }
+
+ return 0;
+}
+
+static int
+eth_igc_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ igc_hw_rss_hash_set(hw, rss_conf);
+ return 0;
+}
+
+static int
+eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t *hash_key = (uint32_t *)rss_conf->rss_key;
+ uint32_t mrqc;
+ uint64_t rss_hf;
+
+ if (hash_key != NULL) {
+ int i;
+
+ /* if not enough space for store hash key */
+ if (rss_conf->rss_key_len != IGC_HKEY_SIZE) {
+ PMD_DRV_LOG(ERR,
+ "RSS hash key size %u in parameter doesn't match the hardware hash key size %u",
+ rss_conf->rss_key_len, IGC_HKEY_SIZE);
+ return -EINVAL;
+ }
+
+ /* read RSS key from register */
+ for (i = 0; i < IGC_HKEY_MAX_INDEX; i++)
+ hash_key[i] = IGC_READ_REG_LE_VALUE(hw, IGC_RSSRK(i));
+ }
+
+ /* get RSS functions configured in MRQC register */
+ mrqc = IGC_READ_REG(hw, IGC_MRQC);
+ if ((mrqc & IGC_MRQC_ENABLE_RSS_4Q) == 0)
+ return 0;
+
+ rss_hf = 0;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV4)
+ rss_hf |= ETH_RSS_IPV4;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV6)
+ rss_hf |= ETH_RSS_IPV6;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)
+ rss_hf |= ETH_RSS_IPV6_EX;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)
+ rss_hf |= ETH_RSS_IPV6_TCP_EX;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)
+ rss_hf |= ETH_RSS_IPV6_UDP_EX;
+
+ rss_conf->rss_hf |= rss_hf;
+ return 0;
+}
+
+static int
+eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
+ uint32_t vfta;
+ uint32_t vid_idx;
+ uint32_t vid_bit;
+
+ vid_idx = (vlan_id >> IGC_VFTA_ENTRY_SHIFT) & IGC_VFTA_ENTRY_MASK;
+ vid_bit = 1u << (vlan_id & IGC_VFTA_ENTRY_BIT_SHIFT_MASK);
+ vfta = shadow_vfta->vfta[vid_idx];
+ if (on)
+ vfta |= vid_bit;
+ else
+ vfta &= ~vid_bit;
+ IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, vid_idx, vfta);
+
+ /* update local VFTA copy */
+ shadow_vfta->vfta[vid_idx] = vfta;
+
+ return 0;
+}
+
+static void
+igc_vlan_hw_filter_disable(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ igc_read_reg_check_clear_bits(hw, IGC_RCTL,
+ IGC_RCTL_CFIEN | IGC_RCTL_VFE);
+}
+
+static void
+igc_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
+ uint32_t reg_val;
+ int i;
+
+ /* Filter Table Enable, CFI not used for packet acceptance */
+ reg_val = IGC_READ_REG(hw, IGC_RCTL);
+ reg_val &= ~IGC_RCTL_CFIEN;
+ reg_val |= IGC_RCTL_VFE;
+ IGC_WRITE_REG(hw, IGC_RCTL, reg_val);
+
+ /* restore VFTA table */
+ for (i = 0; i < IGC_VFTA_SIZE; i++)
+ IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, i, shadow_vfta->vfta[i]);
+}
+
+static void
+igc_vlan_hw_strip_disable(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+
+ igc_read_reg_check_clear_bits(hw, IGC_CTRL, IGC_CTRL_VME);
+}
+
+static void
+igc_vlan_hw_strip_enable(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+
+ igc_read_reg_check_set_bits(hw, IGC_CTRL, IGC_CTRL_VME);
+}
+
+static int
+igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t ctrl_ext;
+
+ ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
+
+ /* if extend vlan hasn't been enabled */
+ if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0)
+ return 0;
+
+ if ((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
+ goto write_ext_vlan;
+
+ /* Update maximum packet length */
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len <
+ RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) {
+ PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU);
+ return -EINVAL;
+ }
+ dev->data->dev_conf.rxmode.max_rx_pkt_len -= VLAN_TAG_SIZE;
+ IGC_WRITE_REG(hw, IGC_RLPML,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+write_ext_vlan:
+ IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN);
+ return 0;
+}
+
+static int
+igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t ctrl_ext;
+
+ ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
+
+ /* if extend vlan has been enabled */
+ if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN)
+ return 0;
+
+ if ((dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
+ goto write_ext_vlan;
+
+ /* Update maximum packet length */
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
+ MAX_RX_JUMBO_FRAME_SIZE - VLAN_TAG_SIZE) {
+ PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u",
+ dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ VLAN_TAG_SIZE, MAX_RX_JUMBO_FRAME_SIZE);
+ return -EINVAL;
+ }
+ dev->data->dev_conf.rxmode.max_rx_pkt_len += VLAN_TAG_SIZE;
+ IGC_WRITE_REG(hw, IGC_RLPML,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+
+write_ext_vlan:
+ IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN);
+ return 0;
+}
+
+static int
+eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct rte_eth_rxmode *rxmode;
+
+ rxmode = &dev->data->dev_conf.rxmode;
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ igc_vlan_hw_strip_enable(dev);
+ else
+ igc_vlan_hw_strip_disable(dev);
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ igc_vlan_hw_filter_enable(dev);
+ else
+ igc_vlan_hw_filter_disable(dev);
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ return igc_vlan_hw_extend_enable(dev);
+ else
+ return igc_vlan_hw_extend_disable(dev);
+ }
+
+ return 0;
+}
+
+static int
+eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
+ enum rte_vlan_type vlan_type,
+ uint16_t tpid)
+{
+ struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t reg_val;
+
+ /* only outer TPID of double VLAN can be configured*/
+ if (vlan_type == ETH_VLAN_TYPE_OUTER) {
+ reg_val = IGC_READ_REG(hw, IGC_VET);
+ reg_val = (reg_val & (~IGC_VET_EXT)) |
+ ((uint32_t)tpid << IGC_VET_EXT_SHIFT);
+ IGC_WRITE_REG(hw, IGC_VET, reg_val);
+
+ return 0;
+ }
+
+ /* all other TPID values are read-only*/
+ PMD_DRV_LOG(ERR, "Not supported");
+ return -ENOTSUP;