#include "e1000_logs.h"
#include "base/e1000_api.h"
#include "e1000_ethdev.h"
+#include "igb_regs.h"
/*
* Default values for port configuration
#define IGB_DEFAULT_TX_HTHRESH 0
#define IGB_DEFAULT_TX_WTHRESH 0
+#define IGB_HKEY_MAX_INDEX 10
+
/* Bit shift and mask */
#define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
#define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
#define IGB_8_BIT_WIDTH CHAR_BIT
#define IGB_8_BIT_MASK UINT8_MAX
+/* Additional timesync values. */
+#define E1000_ETQF_FILTER_1588 3
+#define E1000_TIMINCA_INCVALUE 16000000
+#define E1000_TIMINCA_INIT ((0x02 << E1000_TIMINCA_16NS_SHIFT) \
+ | E1000_TIMINCA_INCVALUE)
+#define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000
+
static int eth_igb_configure(struct rte_eth_dev *dev);
static int eth_igb_start(struct rte_eth_dev *dev);
static void eth_igb_stop(struct rte_eth_dev *dev);
static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
+static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *addr);
static void igbvf_intr_disable(struct e1000_hw *hw);
static int igbvf_dev_configure(struct rte_eth_dev *dev);
uint16_t vlan_id, int on);
static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
+static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *addr);
+static int igbvf_get_reg_length(struct rte_eth_dev *dev);
+static int igbvf_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs);
+
static int eth_igb_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg);
-
+static int eth_igb_get_reg_length(struct rte_eth_dev *dev);
+static int eth_igb_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs);
+static int eth_igb_get_eeprom_length(struct rte_eth_dev *dev);
+static int eth_igb_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom);
+static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom);
static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
struct ether_addr *mc_addr_set,
uint32_t nb_mc_addr);
+static int igb_timesync_enable(struct rte_eth_dev *dev);
+static int igb_timesync_disable(struct rte_eth_dev *dev);
+static int igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags);
+static int igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id);
+static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector);
+static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
+ uint8_t index, uint8_t offset);
+static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
/*
* Define VF Stats MACRO for Non "cleared on read" register
.vlan_tpid_set = eth_igb_vlan_tpid_set,
.vlan_offload_set = eth_igb_vlan_offload_set,
.rx_queue_setup = eth_igb_rx_queue_setup,
+ .rx_queue_intr_enable = eth_igb_rx_queue_intr_enable,
+ .rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
.rx_queue_release = eth_igb_rx_queue_release,
.rx_queue_count = eth_igb_rx_queue_count,
.rx_descriptor_done = eth_igb_rx_descriptor_done,
.flow_ctrl_set = eth_igb_flow_ctrl_set,
.mac_addr_add = eth_igb_rar_set,
.mac_addr_remove = eth_igb_rar_clear,
+ .mac_addr_set = eth_igb_default_mac_addr_set,
.reta_update = eth_igb_rss_reta_update,
.reta_query = eth_igb_rss_reta_query,
.rss_hash_update = eth_igb_rss_hash_update,
.rss_hash_conf_get = eth_igb_rss_hash_conf_get,
.filter_ctrl = eth_igb_filter_ctrl,
.set_mc_addr_list = eth_igb_set_mc_addr_list,
+ .timesync_enable = igb_timesync_enable,
+ .timesync_disable = igb_timesync_disable,
+ .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
+ .get_reg_length = eth_igb_get_reg_length,
+ .get_reg = eth_igb_get_regs,
+ .get_eeprom_length = eth_igb_get_eeprom_length,
+ .get_eeprom = eth_igb_get_eeprom,
+ .set_eeprom = eth_igb_set_eeprom,
};
/*
.tx_queue_setup = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
.set_mc_addr_list = eth_igb_set_mc_addr_list,
+ .mac_addr_set = igbvf_default_mac_addr_set,
+ .get_reg_length = igbvf_get_reg_length,
+ .get_reg = igbvf_get_regs,
};
/**
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct e1000_vfta * shadow_vfta =
- E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+ E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+
uint32_t ctrl_ext;
pci_dev = eth_dev->pci_dev;
goto err_late;
}
hw->mac.get_link_status = 1;
+ adapter->stopped = 0;
/* Indicate SOL/IDER usage */
if (e1000_check_reset_block(hw) < 0) {
E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
E1000_WRITE_FLUSH(hw);
- PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x",
+ PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
- rte_intr_callback_register(&(pci_dev->intr_handle),
- eth_igb_interrupt_handler, (void *)eth_dev);
-
- /* enable uio intr after callback register */
- rte_intr_enable(&(pci_dev->intr_handle));
-
/* enable support intr */
igb_intr_enable(eth_dev);
return (error);
}
+static int
+eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct e1000_hw *hw;
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ pci_dev = eth_dev->pci_dev;
+
+ if (adapter->stopped == 0)
+ eth_igb_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ /* Reset any pending lock */
+ igb_reset_swfw_lock(hw);
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ /* uninitialize PF if max_vfs not zero */
+ igb_pf_host_uninit(eth_dev);
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(&(pci_dev->intr_handle));
+ rte_intr_callback_unregister(&(pci_dev->intr_handle),
+ eth_igb_interrupt_handler, (void *)eth_dev);
+
+ return 0;
+}
+
/*
* Virtual Function device init
*/
eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev;
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(eth_dev->data->dev_private);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
int diag;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ adapter->stopped = 0;
/* Initialize the shared code (base driver) */
diag = e1000_setup_init_funcs(hw, TRUE);
return 0;
}
+static int
+eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ if (adapter->stopped == 0)
+ igbvf_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ return 0;
+}
+
static struct eth_driver rte_igb_pmd = {
.pci_drv = {
.name = "rte_igb_pmd",
.id_table = pci_id_igb_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_DETACHABLE,
},
.eth_dev_init = eth_igb_dev_init,
+ .eth_dev_uninit = eth_igb_dev_uninit,
.dev_private_size = sizeof(struct e1000_adapter),
};
.pci_drv = {
.name = "rte_igbvf_pmd",
.id_table = pci_id_igbvf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
},
.eth_dev_init = eth_igbvf_dev_init,
+ .eth_dev_uninit = eth_igbvf_dev_uninit,
.dev_private_size = sizeof(struct e1000_adapter),
};
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int ret, i, mask;
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ int ret, mask;
+ uint32_t intr_vector = 0;
uint32_t ctrl_ext;
PMD_INIT_FUNC_TRACE();
PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
return (-EIO);
}
+ adapter->stopped = 0;
E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
/* configure PF module if SRIOV enabled */
igb_pf_host_configure(dev);
+ /* check and configure queue intr-vector mapping */
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
+ intr_vector = dev->data->nb_rx_queues;
+
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+
+ if (rte_intr_dp_is_en(intr_handle)) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec\n", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ /* confiugre msix for rx interrupt */
+ eth_igb_configure_msix_intr(dev);
+
/* Configure for OS presence */
igb_init_manageability(hw);
igb_vmdq_vlan_hw_filter_enable(dev);
}
- /*
- * Configure the Interrupt Moderation register (EITR) with the maximum
- * possible value (0xFFFF) to minimize "System Partial Write" issued by
- * spurious [DMA] memory updates of RX and TX ring descriptors.
- *
- * With a EITR granularity of 2 microseconds in the 82576, only 7/8
- * spurious memory updates per second should be expected.
- * ((65535 * 2) / 1000.1000 ~= 0.131 second).
- *
- * Because interrupts are not used at all, the MSI-X is not activated
- * and interrupt moderation is controlled by EITR[0].
- *
- * Note that having [almost] disabled memory updates of RX and TX ring
- * descriptors through the Interrupt Moderation mechanism, memory
- * updates of ring descriptors are now moderated by the configurable
- * value of Write-Back Threshold registers.
- */
if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
(hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i210) ||
(hw->mac.type == e1000_i211)) {
- uint32_t ivar;
-
- /* Enable all RX & TX queues in the IVAR registers */
- ivar = (uint32_t) ((E1000_IVAR_VALID << 16) | E1000_IVAR_VALID);
- for (i = 0; i < 8; i++)
- E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, ivar);
-
/* Configure EITR with the maximum possible value (0xFFFF) */
E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
}
e1000_setup_link(hw);
/* check if lsc interrupt feature is enabled */
- if (dev->data->dev_conf.intr_conf.lsc != 0)
- ret = eth_igb_lsc_interrupt_setup(dev);
+ if (dev->data->dev_conf.intr_conf.lsc != 0) {
+ if (rte_intr_allow_others(intr_handle)) {
+ rte_intr_callback_register(intr_handle,
+ eth_igb_interrupt_handler,
+ (void *)dev);
+ eth_igb_lsc_interrupt_setup(dev);
+ } else
+ PMD_INIT_LOG(INFO, "lsc won't enable because of"
+ " no intr multiplex\n");
+ }
+
+ /* check if rxq interrupt is enabled */
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
+ eth_igb_rxq_interrupt_setup(dev);
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(intr_handle);
/* resume enabled intr since hw reset */
igb_intr_enable(dev);
struct e1000_flex_filter *p_flex;
struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
igb_intr_disable(hw);
+
+ /* disable intr eventfd mapping */
+ rte_intr_disable(intr_handle);
+
igb_pf_reset_hw(hw);
E1000_WRITE_REG(hw, E1000_WUC, 0);
rte_free(p_2tuple);
}
filter_info->twotuple_mask = 0;
+
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
}
static void
eth_igb_close(struct rte_eth_dev *dev)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(dev->data->dev_private);
struct rte_eth_link link;
+ struct rte_pci_device *pci_dev;
eth_igb_stop(dev);
+ adapter->stopped = 1;
+
e1000_phy_hw_reset(hw);
igb_release_manageability(hw);
igb_hw_control_release(hw);
E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
}
- igb_dev_clear_queues(dev);
+ igb_dev_free_queues(dev);
+
+ pci_dev = dev->pci_dev;
+ if (pci_dev->intr_handle.intr_vec) {
+ rte_free(pci_dev->intr_handle.intr_vec);
+ pci_dev->intr_handle.intr_vec = NULL;
+ }
memset(&link, 0, sizeof(link));
rte_igb_dev_atomic_write_link_status(dev, &link);
/* Should not happen */
break;
}
+ dev_info->hash_key_size = IGB_HKEY_MAX_INDEX * sizeof(uint32_t);
dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
dev_info->flow_type_rss_offloads = IGB_RSS_OFFLOAD_ALL;
return 0;
}
+/* It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev)
+{
+ uint32_t mask, regval;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_dev_info dev_info;
+
+ memset(&dev_info, 0, sizeof(dev_info));
+ eth_igb_infos_get(dev, &dev_info);
+
+ mask = 0xFFFFFFFF >> (32 - dev_info.max_rx_queues);
+ regval = E1000_READ_REG(hw, E1000_EIMS);
+ E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
+
+ return 0;
+}
+
/*
* It reads ICR and gets interrupt causes, check it and set a bit flag
* to update link status.
PMD_INIT_LOG(INFO, " Port %d: Link Down",
dev->data->port_id);
}
- PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
+
+ PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
dev->pci_dev->addr.domain,
dev->pci_dev->addr.bus,
dev->pci_dev->addr.devid,
e1000_rar_set(hw, addr, index);
}
+static void
+eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
+{
+ eth_igb_rar_clear(dev, 0);
+
+ eth_igb_rar_set(dev, (void *)addr, 0, 0);
+}
/*
* Virtual Function operations
*/
*/
#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
if (!conf->rxmode.hw_strip_crc) {
- PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
+ PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
conf->rxmode.hw_strip_crc = 1;
}
#else
if (conf->rxmode.hw_strip_crc) {
- PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
+ PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
conf->rxmode.hw_strip_crc = 0;
}
#endif
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(dev->data->dev_private);
int ret;
PMD_INIT_FUNC_TRACE();
hw->mac.ops.reset_hw(hw);
+ adapter->stopped = 0;
/* Set all vfta */
igbvf_set_vfta_all(dev,1);
igbvf_dev_close(struct rte_eth_dev *dev)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_adapter *adapter =
+ E1000_DEV_PRIVATE(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
e1000_reset_hw(hw);
igbvf_dev_stop(dev);
+ adapter->stopped = 1;
+ igb_dev_free_queues(dev);
}
static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on)
return 0;
}
+static void
+igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* index is not used by rar_set() */
+ hw->mac.ops.rar_set(hw, (void *)addr, 0);
+}
+
+
static int
eth_igb_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
return 0;
}
+static int
+igb_timesync_enable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl;
+ uint32_t tsauxc;
+
+ /* Enable system time for it isn't on by default. */
+ tsauxc = E1000_READ_REG(hw, E1000_TSAUXC);
+ tsauxc &= ~E1000_TSAUXC_DISABLE_SYSTIME;
+ E1000_WRITE_REG(hw, E1000_TSAUXC, tsauxc);
+
+ /* Start incrementing the register used to timestamp PTP packets. */
+ E1000_WRITE_REG(hw, E1000_TIMINCA, E1000_TIMINCA_INIT);
+
+ /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588),
+ (ETHER_TYPE_1588 |
+ E1000_ETQF_FILTER_ENABLE |
+ E1000_ETQF_1588));
+
+ /* Enable timestamping of received PTP packets. */
+ tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
+ tsync_ctl |= E1000_TSYNCRXCTL_ENABLED;
+ E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
+
+ /* Enable Timestamping of transmitted PTP packets. */
+ tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
+ tsync_ctl |= E1000_TSYNCTXCTL_ENABLED;
+ E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
+
+ return 0;
+}
+
+static int
+igb_timesync_disable(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl;
+
+ /* Disable timestamping of transmitted PTP packets. */
+ tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
+ tsync_ctl &= ~E1000_TSYNCTXCTL_ENABLED;
+ E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, tsync_ctl);
+
+ /* Disable timestamping of received PTP packets. */
+ tsync_ctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
+ tsync_ctl &= ~E1000_TSYNCRXCTL_ENABLED;
+ E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, tsync_ctl);
+
+ /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
+
+ /* Stop incrementating the System Time registers. */
+ E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
+
+ return 0;
+}
+
+static int
+igb_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags __rte_unused)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_rxctl;
+ uint32_t rx_stmpl;
+ uint32_t rx_stmph;
+
+ tsync_rxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
+ if ((tsync_rxctl & E1000_TSYNCRXCTL_VALID) == 0)
+ return -EINVAL;
+
+ rx_stmpl = E1000_READ_REG(hw, E1000_RXSTMPL);
+ rx_stmph = E1000_READ_REG(hw, E1000_RXSTMPH);
+
+ timestamp->tv_sec = (uint64_t)(((uint64_t)rx_stmph << 32) | rx_stmpl);
+ timestamp->tv_nsec = 0;
+
+ return 0;
+}
+
+static int
+igb_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_txctl;
+ uint32_t tx_stmpl;
+ uint32_t tx_stmph;
+
+ tsync_txctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
+ if ((tsync_txctl & E1000_TSYNCTXCTL_VALID) == 0)
+ return -EINVAL;
+
+ tx_stmpl = E1000_READ_REG(hw, E1000_TXSTMPL);
+ tx_stmph = E1000_READ_REG(hw, E1000_TXSTMPH);
+
+ timestamp->tv_sec = (uint64_t)(((uint64_t)tx_stmph << 32) | tx_stmpl);
+ timestamp->tv_nsec = 0;
+
+ return 0;
+}
+
+static int
+eth_igb_get_reg_length(struct rte_eth_dev *dev __rte_unused)
+{
+ int count = 0;
+ int g_ind = 0;
+ const struct reg_info *reg_group;
+
+ while ((reg_group = igb_regs[g_ind++]))
+ count += igb_reg_group_count(reg_group);
+
+ return count;
+}
+
+static int
+igbvf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
+{
+ int count = 0;
+ int g_ind = 0;
+ const struct reg_info *reg_group;
+
+ while ((reg_group = igbvf_regs[g_ind++]))
+ count += igb_reg_group_count(reg_group);
+
+ return count;
+}
+
+static int
+eth_igb_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *data = regs->data;
+ int g_ind = 0;
+ int count = 0;
+ const struct reg_info *reg_group;
+
+ /* Support only full register dump */
+ if ((regs->length == 0) ||
+ (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) {
+ regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
+ hw->device_id;
+ while ((reg_group = igb_regs[g_ind++]))
+ count += igb_read_regs_group(dev, &data[count],
+ reg_group);
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static int
+igbvf_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *data = regs->data;
+ int g_ind = 0;
+ int count = 0;
+ const struct reg_info *reg_group;
+
+ /* Support only full register dump */
+ if ((regs->length == 0) ||
+ (regs->length == (uint32_t)igbvf_get_reg_length(dev))) {
+ regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
+ hw->device_id;
+ while ((reg_group = igbvf_regs[g_ind++]))
+ count += igb_read_regs_group(dev, &data[count],
+ reg_group);
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static int
+eth_igb_get_eeprom_length(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Return unit is byte count */
+ return hw->nvm.word_size * 2;
+}
+
+static int
+eth_igb_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ uint16_t *data = in_eeprom->data;
+ int first, length;
+
+ first = in_eeprom->offset >> 1;
+ length = in_eeprom->length >> 1;
+ if ((first >= hw->nvm.word_size) ||
+ ((first + length) >= hw->nvm.word_size))
+ return -EINVAL;
+
+ in_eeprom->magic = hw->vendor_id |
+ ((uint32_t)hw->device_id << 16);
+
+ if ((nvm->ops.read) == NULL)
+ return -ENOTSUP;
+
+ return nvm->ops.read(hw, first, length, data);
+}
+
+static int
+eth_igb_set_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ uint16_t *data = in_eeprom->data;
+ int first, length;
+
+ first = in_eeprom->offset >> 1;
+ length = in_eeprom->length >> 1;
+ if ((first >= hw->nvm.word_size) ||
+ ((first + length) >= hw->nvm.word_size))
+ return -EINVAL;
+
+ in_eeprom->magic = (uint32_t)hw->vendor_id |
+ ((uint32_t)hw->device_id << 16);
+
+ if ((nvm->ops.write) == NULL)
+ return -ENOTSUP;
+ return nvm->ops.write(hw, first, length, data);
+}
+
static struct rte_driver pmd_igb_drv = {
.type = PMD_PDEV,
.init = rte_igb_pmd_init,
.init = rte_igbvf_pmd_init,
};
+static int
+eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t mask = 1 << queue_id;
+
+ E1000_WRITE_REG(hw, E1000_EIMC, mask);
+ E1000_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t mask = 1 << queue_id;
+ uint32_t regval;
+
+ regval = E1000_READ_REG(hw, E1000_EIMS);
+ E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
+ E1000_WRITE_FLUSH(hw);
+
+ rte_intr_enable(&dev->pci_dev->intr_handle);
+
+ return 0;
+}
+
+static void
+eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
+ uint8_t index, uint8_t offset)
+{
+ uint32_t val = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
+
+ /* clear bits */
+ val &= ~((uint32_t)0xFF << offset);
+
+ /* write vector and valid bit */
+ val |= (msix_vector | E1000_IVAR_VALID) << offset;
+
+ E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, val);
+}
+
+static void
+eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector)
+{
+ uint32_t tmp = 0;
+
+ if (hw->mac.type == e1000_82575) {
+ if (direction == 0)
+ tmp = E1000_EICR_RX_QUEUE0 << queue;
+ else if (direction == 1)
+ tmp = E1000_EICR_TX_QUEUE0 << queue;
+ E1000_WRITE_REG(hw, E1000_MSIXBM(msix_vector), tmp);
+ } else if (hw->mac.type == e1000_82576) {
+ if ((direction == 0) || (direction == 1))
+ eth_igb_write_ivar(hw, msix_vector, queue & 0x7,
+ ((queue & 0x8) << 1) +
+ 8 * direction);
+ } else if ((hw->mac.type == e1000_82580) ||
+ (hw->mac.type == e1000_i350) ||
+ (hw->mac.type == e1000_i354) ||
+ (hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) {
+ if ((direction == 0) || (direction == 1))
+ eth_igb_write_ivar(hw, msix_vector,
+ queue >> 1,
+ ((queue & 0x1) << 4) +
+ 8 * direction);
+ }
+}
+
+/* Sets up the hardware to generate MSI-X interrupts properly
+ * @hw
+ * board private structure
+ */
+static void
+eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
+{
+ int queue_id;
+ uint32_t tmpval, regval, intr_mask;
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t vec = 0;
+ struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+
+ /* won't configure msix register if no mapping is done
+ * between intr vector and event fd
+ */
+ if (!rte_intr_dp_is_en(intr_handle))
+ return;
+
+ /* set interrupt vector for other causes */
+ if (hw->mac.type == e1000_82575) {
+ tmpval = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ /* enable MSI-X PBA support */
+ tmpval |= E1000_CTRL_EXT_PBA_CLR;
+
+ /* Auto-Mask interrupts upon ICR read */
+ tmpval |= E1000_CTRL_EXT_EIAME;
+ tmpval |= E1000_CTRL_EXT_IRCA;
+
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmpval);
+
+ /* enable msix_other interrupt */
+ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), 0, E1000_EIMS_OTHER);
+ regval = E1000_READ_REG(hw, E1000_EIAC);
+ E1000_WRITE_REG(hw, E1000_EIAC, regval | E1000_EIMS_OTHER);
+ regval = E1000_READ_REG(hw, E1000_EIAM);
+ E1000_WRITE_REG(hw, E1000_EIMS, regval | E1000_EIMS_OTHER);
+ } else if ((hw->mac.type == e1000_82576) ||
+ (hw->mac.type == e1000_82580) ||
+ (hw->mac.type == e1000_i350) ||
+ (hw->mac.type == e1000_i354) ||
+ (hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) {
+ /* turn on MSI-X capability first */
+ E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
+ E1000_GPIE_PBA | E1000_GPIE_EIAME |
+ E1000_GPIE_NSICR);
+
+ intr_mask = (1 << intr_handle->max_intr) - 1;
+ regval = E1000_READ_REG(hw, E1000_EIAC);
+ E1000_WRITE_REG(hw, E1000_EIAC, regval | intr_mask);
+
+ /* enable msix_other interrupt */
+ regval = E1000_READ_REG(hw, E1000_EIMS);
+ E1000_WRITE_REG(hw, E1000_EIMS, regval | intr_mask);
+ tmpval = (dev->data->nb_rx_queues | E1000_IVAR_VALID) << 8;
+ E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmpval);
+ }
+
+ /* use EIAM to auto-mask when MSI-X interrupt
+ * is asserted, this saves a register write for every interrupt
+ */
+ intr_mask = (1 << intr_handle->nb_efd) - 1;
+ regval = E1000_READ_REG(hw, E1000_EIAM);
+ E1000_WRITE_REG(hw, E1000_EIAM, regval | intr_mask);
+
+ for (queue_id = 0; queue_id < dev->data->nb_rx_queues; queue_id++) {
+ eth_igb_assign_msix_vector(hw, 0, queue_id, vec);
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < intr_handle->nb_efd - 1)
+ vec++;
+ }
+
+ E1000_WRITE_FLUSH(hw);
+}
+
PMD_REGISTER_DRIVER(pmd_igb_drv);
PMD_REGISTER_DRIVER(pmd_igbvf_drv);