X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_e1000%2Figb_ethdev.c;h=6e835c3af0ec05fd49955121a9a87052688a3dfd;hb=3031749c2df04a63cdcef186dcce3781e61436e8;hp=26f87881e7be2725aa6b00756ab990427f5e050c;hpb=e094e8f9545cfa9c24f0ccd28e2ea8af4f349fd5;p=dpdk.git diff --git a/lib/librte_pmd_e1000/igb_ethdev.c b/lib/librte_pmd_e1000/igb_ethdev.c index 26f87881e7..6e835c3af0 100644 --- a/lib/librte_pmd_e1000/igb_ethdev.c +++ b/lib/librte_pmd_e1000/igb_ethdev.c @@ -1,35 +1,34 @@ /*- * BSD LICENSE - * - * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: - * - * * Redistributions of source code must retain the above copyright + * + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ #include @@ -52,13 +51,13 @@ #include #include #include +#include #include "e1000_logs.h" #include "e1000/e1000_api.h" #include "e1000_ethdev.h" -static int eth_igb_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, - uint16_t nb_tx_q); +static int eth_igb_configure(struct rte_eth_dev *dev); static int eth_igb_start(struct rte_eth_dev *dev); static void eth_igb_stop(struct rte_eth_dev *dev); static void eth_igb_close(struct rte_eth_dev *dev); @@ -75,7 +74,7 @@ static void eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); -static int eth_igb_interrupt_setup(struct rte_eth_dev *dev); +static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev); static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev); static int eth_igb_interrupt_action(struct rte_eth_dev *dev); static void eth_igb_interrupt_handler(struct rte_intr_handle *handle, @@ -85,11 +84,19 @@ static void igb_hw_control_acquire(struct e1000_hw *hw); static void igb_hw_control_release(struct e1000_hw *hw); static void igb_init_manageability(struct e1000_hw *hw); static void igb_release_manageability(struct e1000_hw *hw); -static void igb_vlan_hw_support_enable(struct rte_eth_dev *dev); -static void igb_vlan_hw_support_disable(struct rte_eth_dev *dev); -static void eth_igb_vlan_filter_set(struct rte_eth_dev *dev, - uint16_t vlan_id, - int on); + +static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, int on); +static void eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id); +static void eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask); + +static void igb_vlan_hw_filter_enable(struct rte_eth_dev *dev); +static void igb_vlan_hw_filter_disable(struct rte_eth_dev *dev); +static void igb_vlan_hw_strip_enable(struct rte_eth_dev *dev); +static void igb_vlan_hw_strip_disable(struct rte_eth_dev *dev); +static void igb_vlan_hw_extend_enable(struct rte_eth_dev *dev); +static void igb_vlan_hw_extend_disable(struct rte_eth_dev *dev); + static int eth_igb_led_on(struct rte_eth_dev *dev); static int eth_igb_led_off(struct rte_eth_dev *dev); @@ -108,10 +115,14 @@ static void igbvf_dev_close(struct rte_eth_dev *dev); static int eth_igbvf_link_update(struct e1000_hw *hw); static void eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats); static void eth_igbvf_stats_reset(struct rte_eth_dev *dev); -static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, +static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on); static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on); +static int eth_igb_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta *reta_conf); +static int eth_igb_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta *reta_conf); /* * Define VF Stats MACRO for Non "cleared on read" register @@ -128,6 +139,8 @@ static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on); #define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ #define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ +#define IGBVF_PMD_NAME "rte_igbvf_pmd" /* PMD name */ + static enum e1000_fc_mode igb_fc_setting = e1000_fc_full; /* @@ -166,13 +179,23 @@ static struct eth_dev_ops eth_igb_ops = { .stats_reset = eth_igb_stats_reset, .dev_infos_get = eth_igb_infos_get, .vlan_filter_set = eth_igb_vlan_filter_set, + .vlan_tpid_set = eth_igb_vlan_tpid_set, + .vlan_offload_set = eth_igb_vlan_offload_set, .rx_queue_setup = eth_igb_rx_queue_setup, + .rx_queue_release = eth_igb_rx_queue_release, + .rx_queue_count = eth_igb_rx_queue_count, + .rx_descriptor_done = eth_igb_rx_descriptor_done, .tx_queue_setup = eth_igb_tx_queue_setup, + .tx_queue_release = eth_igb_tx_queue_release, .dev_led_on = eth_igb_led_on, .dev_led_off = eth_igb_led_off, .flow_ctrl_set = eth_igb_flow_ctrl_set, .mac_addr_add = eth_igb_rar_set, .mac_addr_remove = eth_igb_rar_clear, + .reta_update = eth_igb_rss_reta_update, + .reta_query = eth_igb_rss_reta_query, + .rss_hash_update = eth_igb_rss_hash_update, + .rss_hash_conf_get = eth_igb_rss_hash_conf_get, }; /* @@ -247,6 +270,42 @@ rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev, return 0; } +static inline void +igb_intr_enable(struct rte_eth_dev *dev) +{ + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + E1000_WRITE_REG(hw, E1000_IMS, intr->mask); + E1000_WRITE_FLUSH(hw); +} + +static void +igb_intr_disable(struct e1000_hw *hw) +{ + E1000_WRITE_REG(hw, E1000_IMC, ~0); + E1000_WRITE_FLUSH(hw); +} + +static inline int32_t +igb_pf_reset_hw(struct e1000_hw *hw) +{ + uint32_t ctrl_ext; + int32_t status; + + status = e1000_reset_hw(hw); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= E1000_CTRL_EXT_PFRSTD; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + return status; +} + static void igb_identify_hardware(struct rte_eth_dev *dev) { @@ -263,6 +322,61 @@ igb_identify_hardware(struct rte_eth_dev *dev) /* need to check if it is a vf device below */ } +static int +igb_reset_swfw_lock(struct e1000_hw *hw) +{ + int ret_val; + + /* + * Do mac ops initialization manually here, since we will need + * some function pointers set by this call. + */ + ret_val = e1000_init_mac_params(hw); + if (ret_val) + return ret_val; + + /* + * SMBI lock should not fail in this early stage. If this is the case, + * it is due to an improper exit of the application. + * So force the release of the faulty lock. + */ + if (e1000_get_hw_semaphore_generic(hw) < 0) { + DEBUGOUT("SMBI lock released"); + } + e1000_put_hw_semaphore_generic(hw); + + if (hw->mac.ops.acquire_swfw_sync != NULL) { + uint16_t mask; + + /* + * Phy lock should not fail in this early stage. If this is the case, + * it is due to an improper exit of the application. + * So force the release of the faulty lock. + */ + mask = E1000_SWFW_PHY0_SM << hw->bus.func; + if (hw->bus.func > E1000_FUNC_1) + mask <<= 2; + if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { + DEBUGOUT1("SWFW phy%d lock released", hw->bus.func); + } + hw->mac.ops.release_swfw_sync(hw, mask); + + /* + * This one is more tricky since it is common to all ports; but + * swfw_sync retries last long enough (1s) to be almost sure that if + * lock can not be taken it is due to an improper lock of the + * semaphore. + */ + mask = E1000_SWFW_EEP_SM; + if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) { + DEBUGOUT("SWFW common locks released"); + } + hw->mac.ops.release_swfw_sync(hw, mask); + } + + return E1000_SUCCESS; +} + static int eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, struct rte_eth_dev *eth_dev) @@ -272,7 +386,8 @@ eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); struct e1000_vfta * shadow_vfta = - E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); + E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); + uint32_t ctrl_ext; pci_dev = eth_dev->pci_dev; eth_dev->dev_ops = ð_igb_ops; @@ -288,17 +403,28 @@ eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, return 0; } - hw->hw_addr= (void *)pci_dev->mem_resource.addr; + hw->hw_addr= (void *)pci_dev->mem_resource[0].addr; igb_identify_hardware(eth_dev); - - if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) { + if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) { error = -EIO; goto err_late; } e1000_get_bus_info(hw); + /* Reset any pending lock */ + if (igb_reset_swfw_lock(hw) != E1000_SUCCESS) { + error = -EIO; + goto err_late; + } + + /* Finish initialization */ + if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) { + error = -EIO; + goto err_late; + } + hw->mac.autoneg = 1; hw->phy.autoneg_wait_to_complete = 0; hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; @@ -314,7 +440,7 @@ eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, * Start from a known state, this is important in reading the nvm * and mac from that. */ - e1000_reset_hw(hw); + igb_pf_reset_hw(hw); /* Make sure we have a good EEPROM before we read from it */ if (e1000_validate_nvm_checksum(hw) < 0) { @@ -370,6 +496,15 @@ eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, "SOL/IDER session"); } + /* initialize PF if max_vfs not zero */ + igb_pf_host_init(eth_dev); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= E1000_CTRL_EXT_PFRSTD; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x\n", eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id); @@ -377,6 +512,12 @@ eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, rte_intr_callback_register(&(pci_dev->intr_handle), eth_igb_interrupt_handler, (void *)eth_dev); + /* enable uio intr after callback register */ + rte_intr_enable(&(pci_dev->intr_handle)); + + /* enable support intr */ + igb_intr_enable(eth_dev); + return 0; err_late: @@ -400,11 +541,23 @@ eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, PMD_INIT_LOG(DEBUG, "eth_igbvf_dev_init"); eth_dev->dev_ops = &igbvf_eth_dev_ops; + eth_dev->rx_pkt_burst = ð_igb_recv_pkts; + eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * RX function */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (eth_dev->data->scattered_rx) + eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts; + return 0; + } + pci_dev = eth_dev->pci_dev; hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; - hw->hw_addr = (void *)pci_dev->mem_resource.addr; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; /* Initialize the shared code */ diag = e1000_setup_init_funcs(hw, TRUE); @@ -432,6 +585,7 @@ eth_igbvf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, ETHER_ADDR_LEN * hw->mac.rar_entry_count); return -ENOMEM; } + /* Copy the permanent MAC address */ ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, ð_dev->data->mac_addrs[0]); @@ -468,20 +622,31 @@ static struct eth_driver rte_igbvf_pmd = { .dev_private_size = sizeof(struct e1000_adapter), }; -int -rte_igb_pmd_init(void) +static int +rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused) { rte_eth_driver_register(&rte_igb_pmd); return 0; } +static void +igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /* RCTL: enable VLAN filter since VMDq always use VLAN filter */ + uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + /* * VF Driver initialization routine. * Invoked one at EAL init time. * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices. */ -int -rte_igbvf_pmd_init(void) +static int +rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused) { DEBUGFUNC("rte_igbvf_pmd_init"); @@ -490,35 +655,15 @@ rte_igbvf_pmd_init(void) } static int -eth_igb_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q) +eth_igb_configure(struct rte_eth_dev *dev) { struct e1000_interrupt *intr = E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - int diag; PMD_INIT_LOG(DEBUG, ">>"); intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; - /* Allocate the array of pointers to RX structures */ - diag = igb_dev_rx_queue_alloc(dev, nb_rx_q); - if (diag != 0) { - PMD_INIT_LOG(ERR, "ethdev port_id=%u allocation of array of %u" - " pointers to RX queues failed", - dev->data->port_id, nb_rx_q); - return diag; - } - - /* Allocate the array of pointers to TX structures */ - diag = igb_dev_tx_queue_alloc(dev, nb_tx_q); - if (diag != 0) { - PMD_INIT_LOG(ERR, "ethdev port_id=%u allocation of array of %u" - " pointers to TX queues failed", - dev->data->port_id, nb_tx_q); - - return diag; - } - PMD_INIT_LOG(DEBUG, "<<"); return (0); @@ -529,12 +674,11 @@ eth_igb_start(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret, i; + int ret, i, mask; + uint32_t ctrl_ext; PMD_INIT_LOG(DEBUG, ">>"); - igb_intr_disable(hw); - /* Power up the phy. Needed to make the link go Up */ e1000_power_up_phy(hw); @@ -556,10 +700,19 @@ eth_igb_start(struct rte_eth_dev *dev) /* Initialize the hardware */ if (igb_hardware_init(hw)) { PMD_INIT_LOG(ERR, "Unable to initialize the hardware"); - return (-1); + return (-EIO); } - E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN); + E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= E1000_CTRL_EXT_PFRSTD; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + /* configure PF module if SRIOV enabled */ + igb_pf_host_configure(dev); /* Configure for OS presence */ igb_init_manageability(hw); @@ -570,19 +723,23 @@ eth_igb_start(struct rte_eth_dev *dev) ret = eth_igb_rx_init(dev); if (ret) { PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); + igb_dev_clear_queues(dev); return ret; } e1000_clear_hw_cntrs_base_generic(hw); /* - * If VLAN filtering is enabled, set up VLAN tag offload and filtering - * and restore the VFTA. + * VLAN Offload Settings */ - if (dev->data->dev_conf.rxmode.hw_vlan_filter) - igb_vlan_hw_support_enable(dev); - else - igb_vlan_hw_support_disable(dev); + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ + ETH_VLAN_EXTEND_MASK; + eth_igb_vlan_offload_set(dev, mask); + + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { + /* Enable VLAN filter since VMDq always use VLAN filter */ + igb_vmdq_vlan_hw_filter_enable(dev); + } /* * Configure the Interrupt Moderation register (EITR) with the maximum @@ -660,14 +817,11 @@ eth_igb_start(struct rte_eth_dev *dev) e1000_setup_link(hw); /* check if lsc interrupt feature is enabled */ - if (dev->data->dev_conf.intr_conf.lsc != 0) { - ret = eth_igb_interrupt_setup(dev); - if (ret) { - PMD_INIT_LOG(ERR, "Unable to setup interrupts"); - igb_dev_clear_queues(dev); - return ret; - } - } + if (dev->data->dev_conf.intr_conf.lsc != 0) + ret = eth_igb_lsc_interrupt_setup(dev); + + /* resume enabled intr since hw reset */ + igb_intr_enable(dev); PMD_INIT_LOG(DEBUG, "<<"); @@ -677,7 +831,8 @@ error_invalid_config: PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u\n", dev->data->dev_conf.link_speed, dev->data->dev_conf.link_duplex, dev->data->port_id); - return -1; + igb_dev_clear_queues(dev); + return (-EINVAL); } /********************************************************************* @@ -693,9 +848,18 @@ eth_igb_stop(struct rte_eth_dev *dev) struct rte_eth_link link; igb_intr_disable(hw); - e1000_reset_hw(hw); + igb_pf_reset_hw(hw); E1000_WRITE_REG(hw, E1000_WUC, 0); + /* Set bit for Go Link disconnect */ + if (hw->mac.type >= e1000_82580) { + uint32_t phpm_reg; + + phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + phpm_reg |= E1000_82580_PM_GO_LINKD; + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); + } + /* Power down the phy. Needed to make the link go Down */ e1000_power_down_phy(hw); @@ -717,6 +881,15 @@ eth_igb_close(struct rte_eth_dev *dev) igb_release_manageability(hw); igb_hw_control_release(hw); + /* Clear bit for Go Link disconnect */ + if (hw->mac.type >= e1000_82580) { + uint32_t phpm_reg; + + phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + phpm_reg &= ~E1000_82580_PM_GO_LINKD; + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); + } + igb_dev_clear_queues(dev); memset(&link, 0, sizeof(link)); @@ -764,7 +937,7 @@ igb_hardware_init(struct e1000_hw *hw) * frames to be received after sending an XOFF. * - Low water mark works best when it is very near the high water mark. * This allows the receiver to restart by sending XON when it has - * drained a bit. Here we use an arbitary value of 1500 which will + * drained a bit. Here we use an arbitrary value of 1500 which will * restart after one full frame is pulled from the buffer. There * could be several smaller frames in the buffer and if so they will * not trigger the XON until their total number reduces the buffer @@ -785,14 +958,14 @@ igb_hardware_init(struct e1000_hw *hw) hw->fc.requested_mode = e1000_fc_none; /* Issue a global reset */ - e1000_reset_hw(hw); + igb_pf_reset_hw(hw); E1000_WRITE_REG(hw, E1000_WUC, 0); diag = e1000_init_hw(hw); if (diag < 0) return (diag); - E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN); + E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN); e1000_get_phy_info(hw); e1000_check_for_link(hw); @@ -920,6 +1093,12 @@ eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) /* Tx Errors */ rte_stats->oerrors = stats->ecol + stats->latecol; + /* XON/XOFF pause frames */ + rte_stats->tx_pause_xon = stats->xontxc; + rte_stats->rx_pause_xon = stats->xonrxc; + rte_stats->tx_pause_xoff = stats->xofftxc; + rte_stats->rx_pause_xoff = stats->xoffrxc; + rte_stats->ipackets = stats->gprc; rte_stats->opackets = stats->gptc; rte_stats->ibytes = stats->gorc; @@ -1022,24 +1201,44 @@ eth_igb_infos_get(struct rte_eth_dev *dev, dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ dev_info->max_mac_addrs = hw->mac.rar_entry_count; + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM; switch (hw->mac.type) { case e1000_82575: dev_info->max_rx_queues = 4; dev_info->max_tx_queues = 4; + dev_info->max_vmdq_pools = 0; break; case e1000_82576: dev_info->max_rx_queues = 16; dev_info->max_tx_queues = 16; + dev_info->max_vmdq_pools = ETH_8_POOLS; break; case e1000_82580: dev_info->max_rx_queues = 8; dev_info->max_tx_queues = 8; + dev_info->max_vmdq_pools = ETH_8_POOLS; break; case e1000_i350: + dev_info->max_rx_queues = 8; + dev_info->max_tx_queues = 8; + dev_info->max_vmdq_pools = ETH_8_POOLS; + break; + + case e1000_i354: dev_info->max_rx_queues = 8; dev_info->max_tx_queues = 8; break; @@ -1047,22 +1246,26 @@ eth_igb_infos_get(struct rte_eth_dev *dev, case e1000_i210: dev_info->max_rx_queues = 4; dev_info->max_tx_queues = 4; + dev_info->max_vmdq_pools = 0; break; case e1000_vfadapt: dev_info->max_rx_queues = 2; dev_info->max_tx_queues = 2; + dev_info->max_vmdq_pools = 0; break; case e1000_vfadapt_i350: dev_info->max_rx_queues = 1; dev_info->max_tx_queues = 1; + dev_info->max_vmdq_pools = 0; break; default: /* Should not happen */ dev_info->max_rx_queues = 0; dev_info->max_tx_queues = 0; + dev_info->max_vmdq_pools = 0; } } @@ -1258,7 +1461,7 @@ eth_igb_allmulticast_disable(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_RCTL, rctl); } -static void +static int eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct e1000_hw *hw = @@ -1281,10 +1484,37 @@ eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) /* update local VFTA copy */ shadow_vfta->vfta[vid_idx] = vfta; + + return 0; +} + +static void +eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg = ETHER_TYPE_VLAN ; + + reg |= (tpid << 16); + E1000_WRITE_REG(hw, E1000_VET, reg); +} + +static void +igb_vlan_hw_filter_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* Filter Table Disable */ + reg = E1000_READ_REG(hw, E1000_RCTL); + reg &= ~E1000_RCTL_CFIEN; + reg &= ~E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, reg); } static void -igb_vlan_hw_support_enable(struct rte_eth_dev *dev) +igb_vlan_hw_filter_enable(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1293,47 +1523,107 @@ igb_vlan_hw_support_enable(struct rte_eth_dev *dev) uint32_t reg; int i; - /* VLAN Mode Enable */ - reg = E1000_READ_REG(hw, E1000_CTRL); - reg |= E1000_CTRL_VME; - E1000_WRITE_REG(hw, E1000_CTRL, reg); - - /* Filter Table Enable */ + /* Filter Table Enable, CFI not used for packet acceptance */ reg = E1000_READ_REG(hw, E1000_RCTL); reg &= ~E1000_RCTL_CFIEN; reg |= E1000_RCTL_VFE; E1000_WRITE_REG(hw, E1000_RCTL, reg); - /* Update maximum frame size */ - reg = E1000_READ_REG(hw, E1000_RLPML); - reg += VLAN_TAG_SIZE; - E1000_WRITE_REG(hw, E1000_RLPML, reg); - /* restore VFTA table */ - for (i = 0; i < E1000_VFTA_SIZE; i++) + for (i = 0; i < IGB_VFTA_SIZE; i++) E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]); } static void -igb_vlan_hw_support_disable(struct rte_eth_dev *dev) +igb_vlan_hw_strip_disable(struct rte_eth_dev *dev) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t reg; - /* VLAN Mode disable */ + /* VLAN Mode Disable */ reg = E1000_READ_REG(hw, E1000_CTRL); reg &= ~E1000_CTRL_VME; E1000_WRITE_REG(hw, E1000_CTRL, reg); } static void -igb_intr_disable(struct e1000_hw *hw) +igb_vlan_hw_strip_enable(struct rte_eth_dev *dev) { - E1000_WRITE_REG(hw, E1000_IMC, ~0); - E1000_WRITE_FLUSH(hw); + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* VLAN Mode Enable */ + reg = E1000_READ_REG(hw, E1000_CTRL); + reg |= E1000_CTRL_VME; + E1000_WRITE_REG(hw, E1000_CTRL, reg); +} + +static void +igb_vlan_hw_extend_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* CTRL_EXT: Extended VLAN */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_EXTEND_VLAN; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* Update maximum packet length */ + if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + E1000_WRITE_REG(hw, E1000_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len + + VLAN_TAG_SIZE); +} + +static void +igb_vlan_hw_extend_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* CTRL_EXT: Extended VLAN */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_EXTEND_VLAN; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* Update maximum packet length */ + if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + E1000_WRITE_REG(hw, E1000_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len + + 2 * VLAN_TAG_SIZE); } +static void +eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + if(mask & ETH_VLAN_STRIP_MASK){ + if (dev->data->dev_conf.rxmode.hw_vlan_strip) + igb_vlan_hw_strip_enable(dev); + else + igb_vlan_hw_strip_disable(dev); + } + + if(mask & ETH_VLAN_FILTER_MASK){ + if (dev->data->dev_conf.rxmode.hw_vlan_filter) + igb_vlan_hw_filter_enable(dev); + else + igb_vlan_hw_filter_disable(dev); + } + + if(mask & ETH_VLAN_EXTEND_MASK){ + if (dev->data->dev_conf.rxmode.hw_vlan_extend) + igb_vlan_hw_extend_enable(dev); + else + igb_vlan_hw_extend_disable(dev); + } +} + + /** * It enables the interrupt mask and then enable the interrupt. * @@ -1345,14 +1635,12 @@ igb_intr_disable(struct e1000_hw *hw) * - On failure, a negative value. */ static int -eth_igb_interrupt_setup(struct rte_eth_dev *dev) +eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev) { - struct e1000_hw *hw = - E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - E1000_WRITE_REG(hw, E1000_IMS, E1000_ICR_LSC); - E1000_WRITE_FLUSH(hw); - rte_intr_enable(&(dev->pci_dev->intr_handle)); + intr->mask |= E1000_ICR_LSC; return 0; } @@ -1377,12 +1665,19 @@ eth_igb_interrupt_get_status(struct rte_eth_dev *dev) struct e1000_interrupt *intr = E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + igb_intr_disable(hw); + /* read-on-clear nic registers here */ icr = E1000_READ_REG(hw, E1000_ICR); + + intr->flags = 0; if (icr & E1000_ICR_LSC) { intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; } + if (icr & E1000_ICR_VMMB) + intr->flags |= E1000_FLAG_MAILBOX; + return 0; } @@ -1407,51 +1702,58 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev) struct rte_eth_link link; int ret; - if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE)) - return -1; + if (intr->flags & E1000_FLAG_MAILBOX) { + igb_pf_mbx_process(dev); + intr->flags &= ~E1000_FLAG_MAILBOX; + } - intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; + igb_intr_enable(dev); rte_intr_enable(&(dev->pci_dev->intr_handle)); - /* set get_link_status to check register later */ - hw->mac.get_link_status = 1; - ret = eth_igb_link_update(dev, 0); - - /* check if link has changed */ - if (ret < 0) - return 0; - - memset(&link, 0, sizeof(link)); - rte_igb_dev_atomic_read_link_status(dev, &link); - if (link.link_status) { - PMD_INIT_LOG(INFO, - " Port %d: Link Up - speed %u Mbps - %s\n", - dev->data->port_id, (unsigned)link.link_speed, - link.link_duplex == ETH_LINK_FULL_DUPLEX ? - "full-duplex" : "half-duplex"); - } else { - PMD_INIT_LOG(INFO, " Port %d: Link Down\n", - dev->data->port_id); - } - PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d", - dev->pci_dev->addr.domain, - dev->pci_dev->addr.bus, - dev->pci_dev->addr.devid, - dev->pci_dev->addr.function); - tctl = E1000_READ_REG(hw, E1000_TCTL); - rctl = E1000_READ_REG(hw, E1000_RCTL); - if (link.link_status) { - /* enable Tx/Rx */ - tctl |= E1000_TCTL_EN; - rctl |= E1000_RCTL_EN; - } else { - /* disable Tx/Rx */ - tctl &= ~E1000_TCTL_EN; - rctl &= ~E1000_RCTL_EN; + if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) { + intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; + + /* set get_link_status to check register later */ + hw->mac.get_link_status = 1; + ret = eth_igb_link_update(dev, 0); + + /* check if link has changed */ + if (ret < 0) + return 0; + + memset(&link, 0, sizeof(link)); + rte_igb_dev_atomic_read_link_status(dev, &link); + if (link.link_status) { + PMD_INIT_LOG(INFO, + " Port %d: Link Up - speed %u Mbps - %s\n", + dev->data->port_id, (unsigned)link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); + } else { + PMD_INIT_LOG(INFO, " Port %d: Link Down\n", + dev->data->port_id); + } + PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d", + dev->pci_dev->addr.domain, + dev->pci_dev->addr.bus, + dev->pci_dev->addr.devid, + dev->pci_dev->addr.function); + tctl = E1000_READ_REG(hw, E1000_TCTL); + rctl = E1000_READ_REG(hw, E1000_RCTL); + if (link.link_status) { + /* enable Tx/Rx */ + tctl |= E1000_TCTL_EN; + rctl |= E1000_RCTL_EN; + } else { + /* disable Tx/Rx */ + tctl &= ~E1000_TCTL_EN; + rctl &= ~E1000_RCTL_EN; + } + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); } - E1000_WRITE_REG(hw, E1000_TCTL, tctl); - E1000_WRITE_REG(hw, E1000_RCTL, rctl); - E1000_WRITE_FLUSH(hw); return 0; } @@ -1468,13 +1770,13 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev) * void */ static void -eth_igb_interrupt_handler(struct rte_intr_handle *handle, void *param) +eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle, + void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; eth_igb_interrupt_get_status(dev); eth_igb_interrupt_action(dev); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); } static int @@ -1508,6 +1810,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) }; uint32_t rx_buf_size; uint32_t max_high_water; + uint32_t rctl; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); rx_buf_size = igb_get_rx_buffer_size(hw); @@ -1530,6 +1833,21 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) err = e1000_setup_link_generic(hw); if (err == E1000_SUCCESS) { + + /* check if we want to forward MAC frames - driver doesn't have native + * capability to do that, so we'll write the registers ourselves */ + + rctl = E1000_READ_REG(hw, E1000_RCTL); + + /* set or clear MFLCN.PMCF bit depending on configuration */ + if (fc_conf->mac_ctrl_frame_fwd != 0) + rctl |= E1000_RCTL_PMCF; + else + rctl &= ~E1000_RCTL_PMCF; + + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); + return 0; } @@ -1537,13 +1855,18 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) return (-EIO); } +#define E1000_RAH_POOLSEL_SHIFT (18) static void eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, __rte_unused uint32_t pool) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rah; e1000_rar_set(hw, mac_addr->addr_bytes, index); + rah = E1000_READ_REG(hw, E1000_RAH(index)); + rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool)); + E1000_WRITE_REG(hw, E1000_RAH(index), rah); } static void @@ -1566,7 +1889,7 @@ igbvf_intr_disable(struct e1000_hw *hw) PMD_INIT_LOG(DEBUG, "igbvf_intr_disable"); /* Clear interrupt mask to stop from interrupts being generated */ - E1000_WRITE_REG(hw, E1000_EIMC, ~0); + E1000_WRITE_REG(hw, E1000_EIMC, 0xFFFF); E1000_WRITE_FLUSH(hw); } @@ -1583,7 +1906,7 @@ igbvf_stop_adapter(struct rte_eth_dev *dev) eth_igb_infos_get(dev, &dev_info); /* Clear interrupt mask to stop from interrupts being generated */ - E1000_WRITE_REG(hw, E1000_EIMC, ~0); + igbvf_intr_disable(hw); /* Clear any pending interrupts, flush previous writes */ E1000_READ_REG(hw, E1000_EICR); @@ -1670,13 +1993,17 @@ igbvf_dev_configure(struct rte_eth_dev *dev) static int igbvf_dev_start(struct rte_eth_dev *dev) { + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); int ret; PMD_INIT_LOG(DEBUG, "igbvf_dev_start"); + hw->mac.ops.reset_hw(hw); + /* Set all vfta */ igbvf_set_vfta_all(dev,1); - + eth_igbvf_tx_init(dev); /* This can fail when allocating mbufs for descriptor rings */ @@ -1696,9 +2023,9 @@ igbvf_dev_stop(struct rte_eth_dev *dev) PMD_INIT_LOG(DEBUG, "igbvf_dev_stop"); igbvf_stop_adapter(dev); - - /* - * Clear what we set, but we still keep shadow_vfta to + + /* + * Clear what we set, but we still keep shadow_vfta to * restore after device starts */ igbvf_set_vfta_all(dev,0); @@ -1723,7 +2050,7 @@ static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on) struct e1000_mbx_info *mbx = &hw->mbx; uint32_t msgbuf[2]; - /* After set vlan, vlan strip will also be enabled in igb driver*/ + /* After set vlan, vlan strip will also be enabled in igb driver*/ msgbuf[0] = E1000_VF_SET_VLAN; msgbuf[1] = vid; /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ @@ -1735,7 +2062,7 @@ static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on) static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on) { - struct e1000_hw *hw = + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_vfta * shadow_vfta = E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); @@ -1747,7 +2074,8 @@ static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on) mask = 1; for (j = 0; j < 32; j++){ if(vfta & mask) - igbvf_set_vfta(hw, (i<<5)+j, on); + igbvf_set_vfta(hw, + (uint16_t)((i<<5)+j), on); mask<<=1; } } @@ -1758,14 +2086,14 @@ static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on) static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { - struct e1000_hw *hw = + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct e1000_vfta * shadow_vfta = E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vid_idx = 0; uint32_t vid_bit = 0; int ret = 0; - + PMD_INIT_LOG(DEBUG, "igbvf_vlan_filter_set"); /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/ @@ -1786,3 +2114,87 @@ igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) return 0; } +static int +eth_igb_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta *reta_conf) +{ + uint8_t i,j,mask; + uint32_t reta; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Update Redirection Table RETA[n],n=0...31,The redirection table has + * 128-entries in 32 registers + */ + for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) { + if (i < ETH_RSS_RETA_NUM_ENTRIES/2) + mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF); + else + mask = (uint8_t)((reta_conf->mask_hi >> + (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF); + if (mask != 0) { + reta = 0; + /* If all 4 entries were set,don't need read RETA register */ + if (mask != 0xF) + reta = E1000_READ_REG(hw,E1000_RETA(i >> 2)); + + for (j = 0; j < 4; j++) { + if (mask & (0x1 << j)) { + if (mask != 0xF) + reta &= ~(0xFF << 8 * j); + reta |= reta_conf->reta[i + j] << 8 * j; + } + } + E1000_WRITE_REG(hw, E1000_RETA(i >> 2),reta); + } + } + + return 0; +} + +static int +eth_igb_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta *reta_conf) +{ + uint8_t i,j,mask; + uint32_t reta; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Read Redirection Table RETA[n],n=0...31,The redirection table has + * 128-entries in 32 registers + */ + for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) { + if (i < ETH_RSS_RETA_NUM_ENTRIES/2) + mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF); + else + mask = (uint8_t)((reta_conf->mask_hi >> + (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF); + + if (mask != 0) { + reta = E1000_READ_REG(hw,E1000_RETA(i >> 2)); + for (j = 0; j < 4; j++) { + if (mask & (0x1 << j)) + reta_conf->reta[i + j] = + (uint8_t)((reta >> 8 * j) & 0xFF); + } + } + } + + return 0; +} + +static struct rte_driver pmd_igb_drv = { + .type = PMD_PDEV, + .init = rte_igb_pmd_init, +}; + +static struct rte_driver pmd_igbvf_drv = { + .type = PMD_PDEV, + .init = rte_igbvf_pmd_init, +}; + +PMD_REGISTER_DRIVER(pmd_igb_drv); +PMD_REGISTER_DRIVER(pmd_igbvf_drv);