X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Figb_ethdev.c;h=80025dec4fa68c3d5c5818763f8457846fa765a5;hb=e2a2268b5374aa3ae03e271c23f52064389ab5d3;hp=95d1711765f387fbe41c70832abac4f7140fa45d;hpb=e274f57322253a0da4b565f8df0175503c67258d;p=dpdk.git diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index 95d1711765..80025dec4f 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -86,9 +87,24 @@ #define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) #define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000 +#define E1000_VTIVAR_MISC 0x01740 +#define E1000_VTIVAR_MISC_MASK 0xFF +#define E1000_VTIVAR_VALID 0x80 +#define E1000_VTIVAR_MISC_MAILBOX 0 +#define E1000_VTIVAR_MISC_INTR_MASK 0x3 + +/* External VLAN Enable bit mask */ +#define E1000_CTRL_EXT_EXT_VLAN (1 << 26) + +/* External VLAN Ether Type bit mask and shift */ +#define E1000_VET_VET_EXT 0xFFFF0000 +#define E1000_VET_VET_EXT_SHIFT 16 + static int eth_igb_configure(struct rte_eth_dev *dev); static int eth_igb_start(struct rte_eth_dev *dev); static void eth_igb_stop(struct rte_eth_dev *dev); +static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev); +static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev); static void eth_igb_close(struct rte_eth_dev *dev); static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev); static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev); @@ -99,9 +115,20 @@ static int eth_igb_link_update(struct rte_eth_dev *dev, static void eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats); static int eth_igb_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n); + struct rte_eth_xstat *xstats, unsigned n); +static int eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, + const uint64_t *ids, + uint64_t *values, unsigned int n); +static int eth_igb_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int size); +static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, + unsigned int limit); static void eth_igb_stats_reset(struct rte_eth_dev *dev); static void eth_igb_xstats_reset(struct rte_eth_dev *dev); +static int eth_igb_fw_version_get(struct rte_eth_dev *dev, + char *fw_version, size_t fw_size); static void eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev); @@ -114,9 +141,9 @@ static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev); static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev); static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev); -static int eth_igb_interrupt_action(struct rte_eth_dev *dev); -static void eth_igb_interrupt_handler(struct rte_intr_handle *handle, - void *param); +static int eth_igb_interrupt_action(struct rte_eth_dev *dev, + struct rte_intr_handle *handle); +static void eth_igb_interrupt_handler(void *param); static int igb_hardware_init(struct e1000_hw *hw); static void igb_hw_control_acquire(struct e1000_hw *hw); static void igb_hw_control_release(struct e1000_hw *hw); @@ -144,9 +171,9 @@ static int eth_igb_led_off(struct rte_eth_dev *dev); static void igb_intr_disable(struct e1000_hw *hw); static int igb_get_rx_buffer_size(struct e1000_hw *hw); -static void eth_igb_rar_set(struct rte_eth_dev *dev, - struct ether_addr *mac_addr, - uint32_t index, uint32_t pool); +static int eth_igb_rar_set(struct rte_eth_dev *dev, + struct ether_addr *mac_addr, + uint32_t index, uint32_t pool); static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index); static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr); @@ -164,7 +191,10 @@ static int eth_igbvf_link_update(struct e1000_hw *hw); static void eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats); static int eth_igbvf_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n); + struct rte_eth_xstat *xstats, unsigned n); +static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned limit); static void eth_igbvf_stats_reset(struct rte_eth_dev *dev); static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); @@ -259,6 +289,9 @@ static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, uint8_t index, uint8_t offset); static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev); +static void eth_igbvf_interrupt_handler(void *param); +static void igbvf_mbx_process(struct rte_eth_dev *dev); +static int igb_filter_restore(struct rte_eth_dev *dev); /* * Define VF Stats MACRO for Non "cleared on read" register @@ -282,22 +315,57 @@ static enum e1000_fc_mode igb_fc_setting = e1000_fc_full; * The set of PCI devices this driver supports */ static const struct rte_pci_id pci_id_igb_map[] = { - -#define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" - -{0}, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER_ET2) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES_QUAD) }, + + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_FIBER_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575GB_QUAD_COPPER) }, + + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SGMII) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER_DUAL) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_QUAD_FIBER) }, + + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SGMII) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_DA4) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_OEM1) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_IT) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SGMII) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_BACKPLANE) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SFP) }, + { .vendor_id = 0, /* sentinel */ }, }; /* * The set of PCI devices this driver supports (for 82576&I350 VF) */ static const struct rte_pci_id pci_id_igbvf_map[] = { - -#define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" - -{0}, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF_HV) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF_HV) }, + { .vendor_id = 0, /* sentinel */ }, }; static const struct rte_eth_desc_lim rx_desc_lim = { @@ -310,12 +378,16 @@ static const struct rte_eth_desc_lim tx_desc_lim = { .nb_max = E1000_MAX_RING_DESC, .nb_min = E1000_MIN_RING_DESC, .nb_align = IGB_RXD_ALIGN, + .nb_seg_max = IGB_TX_MAX_SEG, + .nb_mtu_seg_max = IGB_TX_MAX_MTU_SEG, }; static const struct eth_dev_ops eth_igb_ops = { .dev_configure = eth_igb_configure, .dev_start = eth_igb_start, .dev_stop = eth_igb_stop, + .dev_set_link_up = eth_igb_dev_set_link_up, + .dev_set_link_down = eth_igb_dev_set_link_down, .dev_close = eth_igb_close, .promiscuous_enable = eth_igb_promiscuous_enable, .promiscuous_disable = eth_igb_promiscuous_disable, @@ -324,8 +396,12 @@ static const struct eth_dev_ops eth_igb_ops = { .link_update = eth_igb_link_update, .stats_get = eth_igb_stats_get, .xstats_get = eth_igb_xstats_get, + .xstats_get_by_id = eth_igb_xstats_get_by_id, + .xstats_get_names_by_id = eth_igb_xstats_get_names_by_id, + .xstats_get_names = eth_igb_xstats_get_names, .stats_reset = eth_igb_stats_reset, .xstats_reset = eth_igb_xstats_reset, + .fw_version_get = eth_igb_fw_version_get, .dev_infos_get = eth_igb_infos_get, .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, .mtu_set = eth_igb_mtu_set, @@ -338,8 +414,11 @@ static const struct eth_dev_ops eth_igb_ops = { .rx_queue_release = eth_igb_rx_queue_release, .rx_queue_count = eth_igb_rx_queue_count, .rx_descriptor_done = eth_igb_rx_descriptor_done, + .rx_descriptor_status = eth_igb_rx_descriptor_status, + .tx_descriptor_status = eth_igb_tx_descriptor_status, .tx_queue_setup = eth_igb_tx_queue_setup, .tx_queue_release = eth_igb_tx_queue_release, + .tx_done_cleanup = eth_igb_tx_done_cleanup, .dev_led_on = eth_igb_led_on, .dev_led_off = eth_igb_led_off, .flow_ctrl_get = eth_igb_flow_ctrl_get, @@ -359,7 +438,6 @@ static const struct eth_dev_ops eth_igb_ops = { .timesync_disable = igb_timesync_disable, .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp, .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp, - .get_reg_length = eth_igb_get_reg_length, .get_reg = eth_igb_get_regs, .get_eeprom_length = eth_igb_get_eeprom_length, .get_eeprom = eth_igb_get_eeprom, @@ -385,6 +463,7 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = { .link_update = eth_igb_link_update, .stats_get = eth_igbvf_stats_get, .xstats_get = eth_igbvf_xstats_get, + .xstats_get_names = eth_igbvf_xstats_get_names, .stats_reset = eth_igbvf_stats_reset, .xstats_reset = eth_igbvf_stats_reset, .vlan_filter_set = igbvf_vlan_filter_set, @@ -398,7 +477,6 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = { .rxq_info_get = igb_rxq_info_get, .txq_info_get = igb_txq_info_get, .mac_addr_set = igbvf_default_mac_addr_set, - .get_reg_length = igbvf_get_reg_length, .get_reg = igbvf_get_regs, }; @@ -554,6 +632,41 @@ igb_intr_disable(struct e1000_hw *hw) E1000_WRITE_FLUSH(hw); } +static inline void +igbvf_intr_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* only for mailbox */ + E1000_WRITE_REG(hw, E1000_EIAM, 1 << E1000_VTIVAR_MISC_MAILBOX); + E1000_WRITE_REG(hw, E1000_EIAC, 1 << E1000_VTIVAR_MISC_MAILBOX); + E1000_WRITE_REG(hw, E1000_EIMS, 1 << E1000_VTIVAR_MISC_MAILBOX); + E1000_WRITE_FLUSH(hw); +} + +/* only for mailbox now. If RX/TX needed, should extend this function. */ +static void +igbvf_set_ivar_map(struct e1000_hw *hw, uint8_t msix_vector) +{ + uint32_t tmp = 0; + + /* mailbox */ + tmp |= (msix_vector & E1000_VTIVAR_MISC_INTR_MASK); + tmp |= E1000_VTIVAR_VALID; + E1000_WRITE_REG(hw, E1000_VTIVAR_MISC, tmp); +} + +static void +eth_igbvf_configure_msix_intr(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Configure VF other cause ivar */ + igbvf_set_ivar_map(hw, E1000_VTIVAR_MISC_MAILBOX); +} + static inline int32_t igb_pf_reset_hw(struct e1000_hw *hw) { @@ -572,15 +685,16 @@ igb_pf_reset_hw(struct e1000_hw *hw) } static void -igb_identify_hardware(struct rte_eth_dev *dev) +igb_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - hw->vendor_id = dev->pci_dev->id.vendor_id; - hw->device_id = dev->pci_dev->id.device_id; - hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id; - hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id; + + hw->vendor_id = pci_dev->id.vendor_id; + hw->device_id = pci_dev->id.device_id; + hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; + hw->subsystem_device_id = pci_dev->id.subsystem_device_id; e1000_set_mac_type(hw); @@ -643,11 +757,35 @@ igb_reset_swfw_lock(struct e1000_hw *hw) return E1000_SUCCESS; } +/* Remove all ntuple filters of the device */ +static int igb_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); + struct e1000_5tuple_filter *p_5tuple; + struct e1000_2tuple_filter *p_2tuple; + + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { + TAILQ_REMOVE(&filter_info->fivetuple_list, + p_5tuple, entries); + rte_free(p_5tuple); + } + filter_info->fivetuple_mask = 0; + while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list))) { + TAILQ_REMOVE(&filter_info->twotuple_list, + p_2tuple, entries); + rte_free(p_2tuple); + } + filter_info->twotuple_mask = 0; + + return 0; +} + static int eth_igb_dev_init(struct rte_eth_dev *eth_dev) { int error = 0; - struct rte_pci_device *pci_dev; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); struct e1000_vfta * shadow_vfta = @@ -659,11 +797,10 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev) uint32_t ctrl_ext; - pci_dev = eth_dev->pci_dev; - eth_dev->dev_ops = ð_igb_ops; eth_dev->rx_pkt_burst = ð_igb_recv_pkts; eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; + eth_dev->tx_pkt_prepare = ð_igb_prep_pkts; /* for secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different @@ -675,10 +812,11 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev) } rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; hw->hw_addr= (void *)pci_dev->mem_resource[0].addr; - igb_identify_hardware(eth_dev); + igb_identify_hardware(eth_dev, pci_dev); if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) { error = -EIO; goto err_late; @@ -793,12 +931,13 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev) /* enable support intr */ igb_intr_enable(eth_dev); + /* initialize filter info */ + memset(filter_info, 0, + sizeof(struct e1000_filter_info)); + TAILQ_INIT(&filter_info->flex_list); - filter_info->flex_mask = 0; TAILQ_INIT(&filter_info->twotuple_list); - filter_info->twotuple_mask = 0; TAILQ_INIT(&filter_info->fivetuple_list); - filter_info->fivetuple_mask = 0; return 0; @@ -812,9 +951,12 @@ static int eth_igb_dev_uninit(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev; + struct rte_intr_handle *intr_handle; struct e1000_hw *hw; struct e1000_adapter *adapter = E1000_DEV_PRIVATE(eth_dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); PMD_INIT_FUNC_TRACE(); @@ -822,7 +964,8 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev) return -EPERM; hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - pci_dev = eth_dev->pci_dev; + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + intr_handle = &pci_dev->intr_handle; if (adapter->stopped == 0) eth_igb_close(eth_dev); @@ -841,9 +984,20 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev) igb_pf_host_uninit(eth_dev); /* disable uio intr before callback unregister */ - rte_intr_disable(&(pci_dev->intr_handle)); - rte_intr_callback_unregister(&(pci_dev->intr_handle), - eth_igb_interrupt_handler, (void *)eth_dev); + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + eth_igb_interrupt_handler, eth_dev); + + /* clear the SYN filter info */ + filter_info->syn_info = 0; + + /* clear the ethertype filters info */ + filter_info->ethertype_mask = 0; + memset(filter_info->ethertype_filters, 0, + E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter)); + + /* remove all ntuple filters of the device */ + igb_ntuple_filter_uninit(eth_dev); return 0; } @@ -855,6 +1009,7 @@ static int eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev; + struct rte_intr_handle *intr_handle; struct e1000_adapter *adapter = E1000_DEV_PRIVATE(eth_dev->data->dev_private); struct e1000_hw *hw = @@ -867,6 +1022,7 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) eth_dev->dev_ops = &igbvf_eth_dev_ops; eth_dev->rx_pkt_burst = ð_igb_recv_pkts; eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; + eth_dev->tx_pkt_prepare = ð_igb_prep_pkts; /* for secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different @@ -877,9 +1033,9 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) return 0; } - pci_dev = eth_dev->pci_dev; - + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; @@ -916,12 +1072,6 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) /* Generate a random MAC address, if none was assigned by PF. */ if (is_zero_ether_addr(perm_addr)) { eth_random_addr(perm_addr->addr_bytes); - diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0); - if (diag) { - rte_free(eth_dev->data->mac_addrs); - eth_dev->data->mac_addrs = NULL; - return diag; - } PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF"); PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address " "%02x:%02x:%02x:%02x:%02x:%02x", @@ -933,6 +1083,12 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) perm_addr->addr_bytes[5]); } + diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0); + if (diag) { + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + return diag; + } /* Copy the permanent MAC address */ ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, ð_dev->data->mac_addrs[0]); @@ -942,6 +1098,10 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id, "igb_mac_82576_vf"); + intr_handle = &pci_dev->intr_handle; + rte_intr_callback_register(intr_handle, + eth_igbvf_interrupt_handler, eth_dev); + return 0; } @@ -950,6 +1110,7 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev) { struct e1000_adapter *adapter = E1000_DEV_PRIVATE(eth_dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); PMD_INIT_FUNC_TRACE(); @@ -966,42 +1127,57 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev) rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; + /* disable uio intr before callback unregister */ + rte_intr_disable(&pci_dev->intr_handle); + rte_intr_callback_unregister(&pci_dev->intr_handle, + eth_igbvf_interrupt_handler, + (void *)eth_dev); + return 0; } -static struct eth_driver rte_igb_pmd = { - .pci_drv = { - .name = "rte_igb_pmd", - .id_table = pci_id_igb_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_DETACHABLE, - }, - .eth_dev_init = eth_igb_dev_init, - .eth_dev_uninit = eth_igb_dev_uninit, - .dev_private_size = sizeof(struct e1000_adapter), +static int eth_igb_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct e1000_adapter), eth_igb_dev_init); +} + +static int eth_igb_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_igb_dev_uninit); +} + +static struct rte_pci_driver rte_igb_pmd = { + .id_table = pci_id_igb_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_igb_pci_probe, + .remove = eth_igb_pci_remove, }; + +static int eth_igbvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct e1000_adapter), eth_igbvf_dev_init); +} + +static int eth_igbvf_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_igbvf_dev_uninit); +} + /* * virtual function driver struct */ -static struct eth_driver rte_igbvf_pmd = { - .pci_drv = { - .name = "rte_igbvf_pmd", - .id_table = pci_id_igbvf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE, - }, - .eth_dev_init = eth_igbvf_dev_init, - .eth_dev_uninit = eth_igbvf_dev_uninit, - .dev_private_size = sizeof(struct e1000_adapter), +static struct rte_pci_driver rte_igbvf_pmd = { + .id_table = pci_id_igbvf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_igbvf_pci_probe, + .remove = eth_igbvf_pci_remove, }; -static int -rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused) -{ - rte_eth_driver_register(&rte_igb_pmd); - return 0; -} - static void igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) { @@ -1013,20 +1189,6 @@ igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_RCTL, rctl); } -/* - * VF Driver initialization routine. - * Invoked one at EAL init time. - * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices. - */ -static int -rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); - - rte_eth_driver_register(&rte_igbvf_pmd); - return 0; -} - static int igb_check_mq_mode(struct rte_eth_dev *dev) { @@ -1129,10 +1291,14 @@ eth_igb_start(struct rte_eth_dev *dev) E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_adapter *adapter = E1000_DEV_PRIVATE(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int ret, mask; uint32_t intr_vector = 0; uint32_t ctrl_ext; + uint32_t *speeds; + int num_speeds; + bool autoneg; PMD_INIT_FUNC_TRACE(); @@ -1140,7 +1306,7 @@ eth_igb_start(struct rte_eth_dev *dev) rte_intr_disable(intr_handle); /* Power up the phy. Needed to make the link go Up */ - e1000_power_up_phy(hw); + eth_igb_dev_set_link_up(dev); /* * Packet Buffer Allocation (PBA) @@ -1190,7 +1356,7 @@ eth_igb_start(struct rte_eth_dev *dev) dev->data->nb_rx_queues * sizeof(int), 0); if (intr_handle->intr_vec == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" - " intr_vec\n", dev->data->nb_rx_queues); + " intr_vec", dev->data->nb_rx_queues); return -ENOMEM; } } @@ -1233,48 +1399,58 @@ eth_igb_start(struct rte_eth_dev *dev) } /* Setup link speed and duplex */ - switch (dev->data->dev_conf.link_speed) { - case ETH_LINK_SPEED_AUTONEG: - if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) - hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; - else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX) - hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX; - else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX) - hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX; - else - goto error_invalid_config; - break; - case ETH_SPEED_NUM_10M: - if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) - hw->phy.autoneg_advertised = E1000_ALL_10_SPEED; - else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX) - hw->phy.autoneg_advertised = ADVERTISE_10_HALF; - else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX) - hw->phy.autoneg_advertised = ADVERTISE_10_FULL; - else - goto error_invalid_config; - break; - case ETH_SPEED_NUM_100M: - if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) - hw->phy.autoneg_advertised = E1000_ALL_100_SPEED; - else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX) - hw->phy.autoneg_advertised = ADVERTISE_100_HALF; - else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX) - hw->phy.autoneg_advertised = ADVERTISE_100_FULL; - else + speeds = &dev->data->dev_conf.link_speeds; + if (*speeds == ETH_LINK_SPEED_AUTONEG) { + hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; + hw->mac.autoneg = 1; + } else { + num_speeds = 0; + autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0; + + /* Reset */ + hw->phy.autoneg_advertised = 0; + + if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | + ETH_LINK_SPEED_1G | ETH_LINK_SPEED_FIXED)) { + num_speeds = -1; goto error_invalid_config; - break; - case ETH_SPEED_NUM_1G: - if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) || - (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)) - hw->phy.autoneg_advertised = ADVERTISE_1000_FULL; - else + } + if (*speeds & ETH_LINK_SPEED_10M_HD) { + hw->phy.autoneg_advertised |= ADVERTISE_10_HALF; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_10M) { + hw->phy.autoneg_advertised |= ADVERTISE_10_FULL; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_100M_HD) { + hw->phy.autoneg_advertised |= ADVERTISE_100_HALF; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_100M) { + hw->phy.autoneg_advertised |= ADVERTISE_100_FULL; + num_speeds++; + } + if (*speeds & ETH_LINK_SPEED_1G) { + hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL; + num_speeds++; + } + if (num_speeds == 0 || (!autoneg && (num_speeds > 1))) goto error_invalid_config; - break; - case ETH_SPEED_NUM_10G: - default: - goto error_invalid_config; + + /* Set/reset the mac.autoneg based on the link speed, + * fixed or not + */ + if (!autoneg) { + hw->mac.autoneg = 0; + hw->mac.forced_speed_duplex = + hw->phy.autoneg_advertised; + } else { + hw->mac.autoneg = 1; + } } + e1000_setup_link(hw); if (rte_intr_allow_others(intr_handle)) { @@ -1287,7 +1463,7 @@ eth_igb_start(struct rte_eth_dev *dev) (void *)dev); if (dev->data->dev_conf.intr_conf.lsc != 0) PMD_INIT_LOG(INFO, "lsc won't enable because of" - " no intr multiplex\n"); + " no intr multiplex"); } /* check if rxq interrupt is enabled */ @@ -1301,14 +1477,16 @@ eth_igb_start(struct rte_eth_dev *dev) /* resume enabled intr since hw reset */ igb_intr_enable(dev); + /* restore all types filter */ + igb_filter_restore(dev); + PMD_INIT_LOG(DEBUG, "<<"); return 0; error_invalid_config: - PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u", - dev->data->dev_conf.link_speed, - dev->data->dev_conf.link_duplex, dev->data->port_id); + PMD_INIT_LOG(ERR, "Invalid advertised speeds (%u) for port %u", + dev->data->dev_conf.link_speeds, dev->data->port_id); igb_dev_clear_queues(dev); return -EINVAL; } @@ -1325,11 +1503,10 @@ eth_igb_stop(struct rte_eth_dev *dev) struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_eth_link link; struct e1000_flex_filter *p_flex; - struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next; - struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; igb_intr_disable(hw); @@ -1349,10 +1526,7 @@ eth_igb_stop(struct rte_eth_dev *dev) } /* Power down the phy. Needed to make the link go Down */ - if (hw->phy.media_type == e1000_media_type_copper) - e1000_power_down_phy(hw); - else - e1000_shutdown_fiber_serdes_link(hw); + eth_igb_dev_set_link_down(dev); igb_dev_clear_queues(dev); @@ -1367,24 +1541,6 @@ eth_igb_stop(struct rte_eth_dev *dev) } filter_info->flex_mask = 0; - /* Remove all ntuple filters of the device */ - for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list); - p_5tuple != NULL; p_5tuple = p_5tuple_next) { - p_5tuple_next = TAILQ_NEXT(p_5tuple, entries); - TAILQ_REMOVE(&filter_info->fivetuple_list, - p_5tuple, entries); - rte_free(p_5tuple); - } - filter_info->fivetuple_mask = 0; - for (p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list); - p_2tuple != NULL; p_2tuple = p_2tuple_next) { - p_2tuple_next = TAILQ_NEXT(p_2tuple, entries); - TAILQ_REMOVE(&filter_info->twotuple_list, - p_2tuple, entries); - rte_free(p_2tuple); - } - filter_info->twotuple_mask = 0; - if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ rte_intr_callback_register(intr_handle, @@ -1399,6 +1555,32 @@ eth_igb_stop(struct rte_eth_dev *dev) } } +static int +eth_igb_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->phy.media_type == e1000_media_type_copper) + e1000_power_up_phy(hw); + else + e1000_power_up_fiber_serdes_link(hw); + + return 0; +} + +static int +eth_igb_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->phy.media_type == e1000_media_type_copper) + e1000_power_down_phy(hw); + else + e1000_shutdown_fiber_serdes_link(hw); + + return 0; +} + static void eth_igb_close(struct rte_eth_dev *dev) { @@ -1406,7 +1588,8 @@ eth_igb_close(struct rte_eth_dev *dev) struct e1000_adapter *adapter = E1000_DEV_PRIVATE(dev->data->dev_private); struct rte_eth_link link; - struct rte_pci_device *pci_dev; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; eth_igb_stop(dev); adapter->stopped = 1; @@ -1426,10 +1609,9 @@ eth_igb_close(struct rte_eth_dev *dev) igb_dev_free_queues(dev); - pci_dev = dev->pci_dev; - if (pci_dev->intr_handle.intr_vec) { - rte_free(pci_dev->intr_handle.intr_vec); - pci_dev->intr_handle.intr_vec = NULL; + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; } memset(&link, 0, sizeof(link)); @@ -1691,8 +1873,62 @@ eth_igb_xstats_reset(struct rte_eth_dev *dev) memset(stats, 0, sizeof(*stats)); } +static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned int size) +{ + unsigned i; + + if (xstats_names == NULL) + return IGB_NB_XSTATS; + + /* Note: limit checked in rte_eth_xstats_names() */ + + for (i = 0; i < IGB_NB_XSTATS; i++) { + snprintf(xstats_names[i].name, sizeof(xstats_names[i].name), + "%s", rte_igb_stats_strings[i].name); + } + + return IGB_NB_XSTATS; +} + +static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, + unsigned int limit) +{ + unsigned int i; + + if (!ids) { + if (xstats_names == NULL) + return IGB_NB_XSTATS; + + for (i = 0; i < IGB_NB_XSTATS; i++) + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), + "%s", rte_igb_stats_strings[i].name); + + return IGB_NB_XSTATS; + + } else { + struct rte_eth_xstat_name xstats_names_copy[IGB_NB_XSTATS]; + + eth_igb_xstats_get_names_by_id(dev, xstats_names_copy, NULL, + IGB_NB_XSTATS); + + for (i = 0; i < limit; i++) { + if (ids[i] >= IGB_NB_XSTATS) { + PMD_INIT_LOG(ERR, "id value isn't valid"); + return -1; + } + strcpy(xstats_names[i].name, + xstats_names_copy[ids[i]].name); + } + return limit; + } +} + static int -eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, +eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1713,8 +1949,7 @@ eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* Extended stats */ for (i = 0; i < IGB_NB_XSTATS; i++) { - snprintf(xstats[i].name, sizeof(xstats[i].name), - "%s", rte_igb_stats_strings[i].name); + xstats[i].id = i; xstats[i].value = *(uint64_t *)(((char *)hw_stats) + rte_igb_stats_strings[i].offset); } @@ -1722,6 +1957,53 @@ eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, return IGB_NB_XSTATS; } +static int +eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int n) +{ + unsigned int i; + + if (!ids) { + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_hw_stats *hw_stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + if (n < IGB_NB_XSTATS) + return IGB_NB_XSTATS; + + igb_read_stats_registers(hw, hw_stats); + + /* If this is a reset xstats is NULL, and we have cleared the + * registers by reading them. + */ + if (!values) + return 0; + + /* Extended stats */ + for (i = 0; i < IGB_NB_XSTATS; i++) + values[i] = *(uint64_t *)(((char *)hw_stats) + + rte_igb_stats_strings[i].offset); + + return IGB_NB_XSTATS; + + } else { + uint64_t values_copy[IGB_NB_XSTATS]; + + eth_igb_xstats_get_by_id(dev, NULL, values_copy, + IGB_NB_XSTATS); + + for (i = 0; i < n; i++) { + if (ids[i] >= IGB_NB_XSTATS) { + PMD_INIT_LOG(ERR, "id value isn't valid"); + return -1; + } + values[i] = values_copy[ids[i]]; + } + return n; + } +} + static void igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats) { @@ -1762,8 +2044,23 @@ igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats) hw_stats->last_gotlbc, hw_stats->gotlbc); } +static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned limit) +{ + unsigned i; + + if (xstats_names != NULL) + for (i = 0; i < IGBVF_NB_XSTATS; i++) { + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), "%s", + rte_igbvf_stats_strings[i].name); + } + return IGBVF_NB_XSTATS; +} + static int -eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, +eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1780,8 +2077,7 @@ eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, return 0; for (i = 0; i < IGBVF_NB_XSTATS; i++) { - snprintf(xstats[i].name, sizeof(xstats[i].name), "%s", - rte_igbvf_stats_strings[i].name); + xstats[i].id = i; xstats[i].value = *(uint64_t *)(((char *)hw_stats) + rte_igbvf_stats_strings[i].offset); } @@ -1805,11 +2101,6 @@ eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) rte_stats->ibytes = hw_stats->gorc; rte_stats->opackets = hw_stats->gptc; rte_stats->obytes = hw_stats->gotc; - rte_stats->imcasts = hw_stats->mprc; - rte_stats->ilbpackets = hw_stats->gprlbc; - rte_stats->ilbbytes = hw_stats->gorlbc; - rte_stats->olbpackets = hw_stats->gptlbc; - rte_stats->olbbytes = hw_stats->gotlbc; } static void @@ -1826,11 +2117,64 @@ eth_igbvf_stats_reset(struct rte_eth_dev *dev) offsetof(struct e1000_vf_stats, gprc)); } +static int +eth_igb_fw_version_get(struct rte_eth_dev *dev, char *fw_version, + size_t fw_size) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_fw_version fw; + int ret; + + e1000_get_fw_version(hw, &fw); + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + if (!(e1000_get_flash_presence_i210(hw))) { + ret = snprintf(fw_version, fw_size, + "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, + fw.invm_img_type); + break; + } + /* fall through */ + default: + /* if option rom is valid, display its version too */ + if (fw.or_valid) { + ret = snprintf(fw_version, fw_size, + "%d.%d, 0x%08x, %d.%d.%d", + fw.eep_major, fw.eep_minor, fw.etrack_id, + fw.or_major, fw.or_build, fw.or_patch); + /* no option rom */ + } else { + if (fw.etrack_id != 0X0000) { + ret = snprintf(fw_version, fw_size, + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, + fw.etrack_id); + } else { + ret = snprintf(fw_version, fw_size, + "%d.%d.%d", + fw.eep_major, fw.eep_minor, + fw.eep_build); + } + } + break; + } + + ret += 1; /* add the size of '\0' */ + if (fw_size < (u32)ret) + return ret; + else + return 0; +} + static void eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ dev_info->max_mac_addrs = hw->mac.rar_entry_count; @@ -1959,6 +2303,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ dev_info->max_mac_addrs = hw->mac.rar_entry_count; @@ -2061,13 +2406,20 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) /* Now we check if a transition has happened */ if (link_check) { - hw->mac.ops.get_link_up_info(hw, &link.link_speed, - &link.link_duplex); + uint16_t duplex, speed; + hw->mac.ops.get_link_up_info(hw, &speed, &duplex); + link.link_duplex = (duplex == FULL_DUPLEX) ? + ETH_LINK_FULL_DUPLEX : + ETH_LINK_HALF_DUPLEX; + link.link_speed = speed; link.link_status = ETH_LINK_UP; + link.link_autoneg = !(dev->data->dev_conf.link_speeds & + ETH_LINK_SPEED_FIXED); } else if (!link_check) { link.link_speed = 0; link.link_duplex = ETH_LINK_HALF_DUPLEX; link.link_status = ETH_LINK_DOWN; + link.link_autoneg = ETH_LINK_SPEED_FIXED; } rte_igb_dev_atomic_write_link_status(dev, &link); @@ -2235,21 +2587,25 @@ eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t reg = ETHER_TYPE_VLAN; - int ret = 0; + uint32_t reg, qinq; + + qinq = E1000_READ_REG(hw, E1000_CTRL_EXT); + qinq &= E1000_CTRL_EXT_EXT_VLAN; - switch (vlan_type) { - case ETH_VLAN_TYPE_INNER: - reg |= (tpid << 16); + /* only outer TPID of double VLAN can be configured*/ + if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) { + reg = E1000_READ_REG(hw, E1000_VET); + reg = (reg & (~E1000_VET_VET_EXT)) | + ((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT); E1000_WRITE_REG(hw, E1000_VET, reg); - break; - default: - ret = -EINVAL; - PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type); - break; + + return 0; } - return ret; + /* all other TPID values are read-only*/ + PMD_DRV_LOG(ERR, "Not supported"); + + return -ENOTSUP; } static void @@ -2472,12 +2828,14 @@ eth_igb_interrupt_get_status(struct rte_eth_dev *dev) * - On failure, a negative value. */ static int -eth_igb_interrupt_action(struct rte_eth_dev *dev) +eth_igb_interrupt_action(struct rte_eth_dev *dev, + struct rte_intr_handle *intr_handle) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_interrupt *intr = E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); uint32_t tctl, rctl; struct rte_eth_link link; int ret; @@ -2488,7 +2846,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev) } igb_intr_enable(dev); - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(intr_handle); if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) { intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; @@ -2516,10 +2874,10 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev) } PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", - dev->pci_dev->addr.domain, - dev->pci_dev->addr.bus, - dev->pci_dev->addr.devid, - dev->pci_dev->addr.function); + pci_dev->addr.domain, + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); tctl = E1000_READ_REG(hw, E1000_TCTL); rctl = E1000_READ_REG(hw, E1000_RCTL); if (link.link_status) { @@ -2534,7 +2892,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_TCTL, tctl); E1000_WRITE_REG(hw, E1000_RCTL, rctl); E1000_WRITE_FLUSH(hw); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); } return 0; @@ -2552,23 +2910,84 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev) * void */ static void -eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle, - void *param) +eth_igb_interrupt_handler(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; eth_igb_interrupt_get_status(dev); - eth_igb_interrupt_action(dev); + eth_igb_interrupt_action(dev, dev->intr_handle); } static int -eth_igb_led_on(struct rte_eth_dev *dev) +eth_igbvf_interrupt_get_status(struct rte_eth_dev *dev) { - struct e1000_hw *hw; + uint32_t eicr; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; -} + igbvf_intr_disable(hw); + + /* read-on-clear nic registers here */ + eicr = E1000_READ_REG(hw, E1000_EICR); + intr->flags = 0; + + if (eicr == E1000_VTIVAR_MISC_MAILBOX) + intr->flags |= E1000_FLAG_MAILBOX; + + return 0; +} + +void igbvf_mbx_process(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_mbx_info *mbx = &hw->mbx; + u32 in_msg = 0; + + if (mbx->ops.read(hw, &in_msg, 1, 0)) + return; + + /* PF reset VF event */ + if (in_msg == E1000_PF_CONTROL_MSG) + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL); +} + +static int +eth_igbvf_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle) +{ + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + if (intr->flags & E1000_FLAG_MAILBOX) { + igbvf_mbx_process(dev); + intr->flags &= ~E1000_FLAG_MAILBOX; + } + + igbvf_intr_enable(dev); + rte_intr_enable(intr_handle); + + return 0; +} + +static void +eth_igbvf_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + eth_igbvf_interrupt_get_status(dev); + eth_igbvf_interrupt_action(dev, dev->intr_handle); +} + +static int +eth_igb_led_on(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP; +} static int eth_igb_led_off(struct rte_eth_dev *dev) @@ -2682,9 +3101,9 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) } #define E1000_RAH_POOLSEL_SHIFT (18) -static void +static int eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - uint32_t index, __rte_unused uint32_t pool) + uint32_t index, uint32_t pool) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t rah; @@ -2693,6 +3112,7 @@ eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, rah = E1000_READ_REG(hw, E1000_RAH(index)); rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool)); E1000_WRITE_REG(hw, E1000_RAH(index), rah); + return 0; } static void @@ -2831,7 +3251,10 @@ igbvf_dev_start(struct rte_eth_dev *dev) E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_adapter *adapter = E1000_DEV_PRIVATE(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int ret; + uint32_t intr_vector = 0; PMD_INIT_FUNC_TRACE(); @@ -2851,12 +3274,42 @@ igbvf_dev_start(struct rte_eth_dev *dev) return ret; } + /* check and configure queue intr-vector mapping */ + if (dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + ret = rte_intr_efd_enable(intr_handle, intr_vector); + if (ret) + return ret; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (!intr_handle->intr_vec) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + + eth_igbvf_configure_msix_intr(dev); + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(intr_handle); + + /* resume enabled intr since hw reset */ + igbvf_intr_enable(dev); + return 0; } static void igbvf_dev_stop(struct rte_eth_dev *dev) { + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + PMD_INIT_FUNC_TRACE(); igbvf_stop_adapter(dev); @@ -2868,6 +3321,16 @@ igbvf_dev_stop(struct rte_eth_dev *dev) igbvf_set_vfta_all(dev,0); igb_dev_clear_queues(dev); + + /* disable intr eventfd mapping */ + rte_intr_disable(intr_handle); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } } static void @@ -3044,7 +3507,7 @@ eth_igb_rss_reta_update(struct rte_eth_dev *dev, if (reta_size != ETH_RSS_RETA_SIZE_128) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128); + "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); return -EINVAL; } @@ -3085,7 +3548,7 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev, if (reta_size != ETH_RSS_RETA_SIZE_128) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128); + "(%d)", reta_size, ETH_RSS_RETA_SIZE_128); return -EINVAL; } @@ -3110,7 +3573,8 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev, #define MAC_TYPE_FILTER_SUP(type) do {\ if ((type) != e1000_82580 && (type) != e1000_i350 &&\ - (type) != e1000_82576)\ + (type) != e1000_82576 && (type) != e1000_i210 &&\ + (type) != e1000_i211)\ return -ENOTSUP;\ } while (0) @@ -3120,6 +3584,8 @@ eth_igb_syn_filter_set(struct rte_eth_dev *dev, bool add) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); uint32_t synqf, rfctl; if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) @@ -3147,6 +3613,7 @@ eth_igb_syn_filter_set(struct rte_eth_dev *dev, synqf = 0; } + filter_info->syn_info = synqf; E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); E1000_WRITE_FLUSH(hw); return 0; @@ -3206,7 +3673,7 @@ eth_igb_syn_filter_handle(struct rte_eth_dev *dev, (struct rte_eth_syn_filter *)arg); break; default: - PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); + PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); ret = -EINVAL; break; } @@ -3215,7 +3682,8 @@ eth_igb_syn_filter_handle(struct rte_eth_dev *dev, } #define MAC_TYPE_FILTER_SUP_EXT(type) do {\ - if ((type) != e1000_82580 && (type) != e1000_i350)\ + if ((type) != e1000_82580 && (type) != e1000_i350 &&\ + (type) != e1000_i210 && (type) != e1000_i211)\ return -ENOSYS; \ } while (0) @@ -3281,6 +3749,54 @@ igb_2tuple_filter_lookup(struct e1000_2tuple_filter_list *filter_list, return NULL; } +/* inject a igb 2tuple filter to HW */ +static inline void +igb_inject_2uple_filter(struct rte_eth_dev *dev, + struct e1000_2tuple_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ttqf = E1000_TTQF_DISABLE_MASK; + uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP; + int i; + + i = filter->index; + imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); + if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ + imir |= E1000_IMIR_PORT_BP; + else + imir &= ~E1000_IMIR_PORT_BP; + + imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; + + ttqf |= E1000_TTQF_QUEUE_ENABLE; + ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT); + ttqf |= (uint32_t)(filter->filter_info.proto & + E1000_TTQF_PROTOCOL_MASK); + if (filter->filter_info.proto_mask == 0) + ttqf &= ~E1000_TTQF_MASK_ENABLE; + + /* tcp flags bits setting. */ + if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { + if (filter->filter_info.tcp_flags & TCP_URG_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_URG; + if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_ACK; + if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_PSH; + if (filter->filter_info.tcp_flags & TCP_RST_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_RST; + if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_SYN; + if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_FIN; + } else { + imir_ext |= E1000_IMIREXT_CTRL_BP; + } + E1000_WRITE_REG(hw, E1000_IMIR(i), imir); + E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf); + E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); +} + /* * igb_add_2tuple_filter - add a 2tuple filter * @@ -3296,12 +3812,9 @@ static int igb_add_2tuple_filter(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *ntuple_filter) { - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); struct e1000_2tuple_filter *filter; - uint32_t ttqf = E1000_TTQF_DISABLE_MASK; - uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP; int i, ret; filter = rte_zmalloc("e1000_2tuple_filter", @@ -3343,39 +3856,7 @@ igb_add_2tuple_filter(struct rte_eth_dev *dev, return -ENOSYS; } - imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); - if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ - imir |= E1000_IMIR_PORT_BP; - else - imir &= ~E1000_IMIR_PORT_BP; - - imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; - - ttqf |= E1000_TTQF_QUEUE_ENABLE; - ttqf |= (uint32_t)(filter->queue << E1000_TTQF_QUEUE_SHIFT); - ttqf |= (uint32_t)(filter->filter_info.proto & E1000_TTQF_PROTOCOL_MASK); - if (filter->filter_info.proto_mask == 0) - ttqf &= ~E1000_TTQF_MASK_ENABLE; - - /* tcp flags bits setting. */ - if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { - if (filter->filter_info.tcp_flags & TCP_URG_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_URG; - if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_ACK; - if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_PSH; - if (filter->filter_info.tcp_flags & TCP_RST_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_RST; - if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_SYN; - if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_FIN; - } else - imir_ext |= E1000_IMIREXT_CTRL_BP; - E1000_WRITE_REG(hw, E1000_IMIR(i), imir); - E1000_WRITE_REG(hw, E1000_TTQF(i), ttqf); - E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); + igb_inject_2uple_filter(dev, filter); return 0; } @@ -3471,10 +3952,6 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, } wufc = E1000_READ_REG(hw, E1000_WUFC); - if (flex_filter->index < E1000_MAX_FHFT) - reg_off = E1000_FHFT(flex_filter->index); - else - reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT); if (add) { if (eth_igb_flex_filter_lookup(&filter_info->flex_list, @@ -3504,6 +3981,11 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, return -ENOSYS; } + if (flex_filter->index < E1000_MAX_FHFT) + reg_off = E1000_FHFT(flex_filter->index); + else + reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT); + E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ | (E1000_WUFC_FLX0 << flex_filter->index)); queueing = filter->len | @@ -3532,6 +4014,11 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, return -ENOENT; } + if (it->index < E1000_MAX_FHFT) + reg_off = E1000_FHFT(it->index); + else + reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT); + for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++) E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0); E1000_WRITE_REG(hw, E1000_WUFC, wufc & @@ -3743,6 +4230,64 @@ igb_5tuple_filter_lookup_82576(struct e1000_5tuple_filter_list *filter_list, return NULL; } +/* inject a igb 5-tuple filter to HW */ +static inline void +igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev, + struct e1000_5tuple_filter *filter) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK; + uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP; + uint8_t i; + + i = filter->index; + ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK; + if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */ + ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP; + if (filter->filter_info.dst_ip_mask == 0) + ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP; + if (filter->filter_info.src_port_mask == 0) + ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; + if (filter->filter_info.proto_mask == 0) + ftqf &= ~E1000_FTQF_MASK_PROTO_BP; + ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) & + E1000_FTQF_QUEUE_MASK; + ftqf |= E1000_FTQF_QUEUE_ENABLE; + E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf); + E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip); + E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip); + + spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT; + E1000_WRITE_REG(hw, E1000_SPQF(i), spqf); + + imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); + if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ + imir |= E1000_IMIR_PORT_BP; + else + imir &= ~E1000_IMIR_PORT_BP; + imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; + + /* tcp flags bits setting. */ + if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { + if (filter->filter_info.tcp_flags & TCP_URG_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_URG; + if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_ACK; + if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_PSH; + if (filter->filter_info.tcp_flags & TCP_RST_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_RST; + if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_SYN; + if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) + imir_ext |= E1000_IMIREXT_CTRL_FIN; + } else { + imir_ext |= E1000_IMIREXT_CTRL_BP; + } + E1000_WRITE_REG(hw, E1000_IMIR(i), imir); + E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); +} + /* * igb_add_5tuple_filter_82576 - add a 5tuple filter * @@ -3758,12 +4303,9 @@ static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *ntuple_filter) { - struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); struct e1000_5tuple_filter *filter; - uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK; - uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP; uint8_t i; int ret; @@ -3807,50 +4349,7 @@ igb_add_5tuple_filter_82576(struct rte_eth_dev *dev, return -ENOSYS; } - ftqf |= filter->filter_info.proto & E1000_FTQF_PROTOCOL_MASK; - if (filter->filter_info.src_ip_mask == 0) /* 0b means compare. */ - ftqf &= ~E1000_FTQF_MASK_SOURCE_ADDR_BP; - if (filter->filter_info.dst_ip_mask == 0) - ftqf &= ~E1000_FTQF_MASK_DEST_ADDR_BP; - if (filter->filter_info.src_port_mask == 0) - ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; - if (filter->filter_info.proto_mask == 0) - ftqf &= ~E1000_FTQF_MASK_PROTO_BP; - ftqf |= (filter->queue << E1000_FTQF_QUEUE_SHIFT) & - E1000_FTQF_QUEUE_MASK; - ftqf |= E1000_FTQF_QUEUE_ENABLE; - E1000_WRITE_REG(hw, E1000_FTQF(i), ftqf); - E1000_WRITE_REG(hw, E1000_DAQF(i), filter->filter_info.dst_ip); - E1000_WRITE_REG(hw, E1000_SAQF(i), filter->filter_info.src_ip); - - spqf = filter->filter_info.src_port & E1000_SPQF_SRCPORT; - E1000_WRITE_REG(hw, E1000_SPQF(i), spqf); - - imir = (uint32_t)(filter->filter_info.dst_port & E1000_IMIR_DSTPORT); - if (filter->filter_info.dst_port_mask == 1) /* 1b means not compare. */ - imir |= E1000_IMIR_PORT_BP; - else - imir &= ~E1000_IMIR_PORT_BP; - imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT; - - /* tcp flags bits setting. */ - if (filter->filter_info.tcp_flags & TCP_FLAG_ALL) { - if (filter->filter_info.tcp_flags & TCP_URG_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_URG; - if (filter->filter_info.tcp_flags & TCP_ACK_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_ACK; - if (filter->filter_info.tcp_flags & TCP_PSH_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_PSH; - if (filter->filter_info.tcp_flags & TCP_RST_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_RST; - if (filter->filter_info.tcp_flags & TCP_SYN_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_SYN; - if (filter->filter_info.tcp_flags & TCP_FIN_FLAG) - imir_ext |= E1000_IMIREXT_CTRL_FIN; - } else - imir_ext |= E1000_IMIREXT_CTRL_BP; - E1000_WRITE_REG(hw, E1000_IMIR(i), imir); - E1000_WRITE_REG(hw, E1000_IMIREXT(i), imir_ext); + igb_inject_5tuple_filter_82576(dev, filter); return 0; } @@ -3987,7 +4486,9 @@ igb_add_del_ntuple_filter(struct rte_eth_dev *dev, break; case RTE_2TUPLE_FLAGS: case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG): - if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350) + if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 && + hw->mac.type != e1000_i210 && + hw->mac.type != e1000_i211) return -ENOTSUP; if (add) ret = igb_add_2tuple_filter(dev, ntuple_filter); @@ -4129,7 +4630,7 @@ igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info, int i; for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { - if (filter_info->ethertype_filters[i] == ethertype && + if (filter_info->ethertype_filters[i].ethertype == ethertype && (filter_info->ethertype_mask & (1 << i))) return i; } @@ -4138,33 +4639,35 @@ igb_ethertype_filter_lookup(struct e1000_filter_info *filter_info, static inline int igb_ethertype_filter_insert(struct e1000_filter_info *filter_info, - uint16_t ethertype) + uint16_t ethertype, uint32_t etqf) { int i; for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { if (!(filter_info->ethertype_mask & (1 << i))) { filter_info->ethertype_mask |= 1 << i; - filter_info->ethertype_filters[i] = ethertype; + filter_info->ethertype_filters[i].ethertype = ethertype; + filter_info->ethertype_filters[i].etqf = etqf; return i; } } return -1; } -static inline int +static int igb_ethertype_filter_remove(struct e1000_filter_info *filter_info, uint8_t idx) { if (idx >= E1000_MAX_ETQF_FILTERS) return -1; filter_info->ethertype_mask &= ~(1 << idx); - filter_info->ethertype_filters[idx] = 0; + filter_info->ethertype_filters[idx].ethertype = 0; + filter_info->ethertype_filters[idx].etqf = 0; return idx; } -static int +int igb_add_del_ethertype_filter(struct rte_eth_dev *dev, struct rte_eth_ethertype_filter *filter, bool add) @@ -4204,16 +4707,15 @@ igb_add_del_ethertype_filter(struct rte_eth_dev *dev, } if (add) { + etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE; + etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE); + etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT; ret = igb_ethertype_filter_insert(filter_info, - filter->ether_type); + filter->ether_type, etqf); if (ret < 0) { PMD_DRV_LOG(ERR, "ethertype filters are full."); return -ENOSYS; } - - etqf |= E1000_ETQF_FILTER_ENABLE | E1000_ETQF_QUEUE_ENABLE; - etqf |= (uint32_t)(filter->ether_type & E1000_ETQF_ETHERTYPE); - etqf |= filter->queue << E1000_ETQF_QUEUE_SHIFT; } else { ret = igb_ethertype_filter_remove(filter_info, (uint8_t)ret); if (ret < 0) @@ -4706,6 +5208,12 @@ eth_igb_get_regs(struct rte_eth_dev *dev, int count = 0; const struct reg_info *reg_group; + if (data == NULL) { + regs->length = eth_igb_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; + } + /* Support only full register dump */ if ((regs->length == 0) || (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) { @@ -4730,6 +5238,12 @@ igbvf_get_regs(struct rte_eth_dev *dev, int count = 0; const struct reg_info *reg_group; + if (data == NULL) { + regs->length = igbvf_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; + } + /* Support only full register dump */ if ((regs->length == 0) || (regs->length == (uint32_t)igbvf_get_reg_length(dev))) { @@ -4800,16 +5314,6 @@ eth_igb_set_eeprom(struct rte_eth_dev *dev, return nvm->ops.write(hw, first, length, data); } -static struct rte_driver pmd_igb_drv = { - .type = PMD_PDEV, - .init = rte_igb_pmd_init, -}; - -static struct rte_driver pmd_igbvf_drv = { - .type = PMD_PDEV, - .init = rte_igbvf_pmd_init, -}; - static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { @@ -4828,6 +5332,8 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t mask = 1 << queue_id; uint32_t regval; @@ -4835,7 +5341,7 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) E1000_WRITE_REG(hw, E1000_EIMS, regval | mask); E1000_WRITE_FLUSH(hw); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(intr_handle); return 0; } @@ -4899,8 +5405,8 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev) uint32_t vec = E1000_MISC_VEC_ID; uint32_t base = E1000_MISC_VEC_ID; uint32_t misc_shift = 0; - - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; /* won't configure msix register if no mapping is done * between intr vector and event fd @@ -4971,5 +5477,73 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev) E1000_WRITE_FLUSH(hw); } -PMD_REGISTER_DRIVER(pmd_igb_drv); -PMD_REGISTER_DRIVER(pmd_igbvf_drv); +/* restore n-tuple filter */ +static inline void +igb_ntuple_filter_restore(struct rte_eth_dev *dev) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct e1000_5tuple_filter *p_5tuple; + struct e1000_2tuple_filter *p_2tuple; + + TAILQ_FOREACH(p_5tuple, &filter_info->fivetuple_list, entries) { + igb_inject_5tuple_filter_82576(dev, p_5tuple); + } + + TAILQ_FOREACH(p_2tuple, &filter_info->twotuple_list, entries) { + igb_inject_2uple_filter(dev, p_2tuple); + } +} + +/* restore SYN filter */ +static inline void +igb_syn_filter_restore(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t synqf; + + synqf = filter_info->syn_info; + + if (synqf & E1000_SYN_FILTER_ENABLE) { + E1000_WRITE_REG(hw, E1000_SYNQF(0), synqf); + E1000_WRITE_FLUSH(hw); + } +} + +/* restore ethernet type filter */ +static inline void +igb_ethertype_filter_restore(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i; + + for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_mask & (1 << i)) { + E1000_WRITE_REG(hw, E1000_ETQF(i), + filter_info->ethertype_filters[i].etqf); + E1000_WRITE_FLUSH(hw); + } + } +} + +/* restore all types filter */ +static int +igb_filter_restore(struct rte_eth_dev *dev) +{ + igb_ntuple_filter_restore(dev); + igb_ethertype_filter_restore(dev); + igb_syn_filter_restore(dev); + + return 0; +} + +RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map); +RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci");