X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_ixgbe%2Fixgbe_ethdev.c;h=a50bce4cb29681cb336a52362b530d2068781f93;hb=88fccb7a05c64cbb10f2c82d61fafc68f2f2150d;hp=6770c2256fc33acb0ef1af52896617a1f7231670;hpb=a974564b341e00c806c4421c2c746e9e98675c10;p=dpdk.git diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c index 6770c2256f..a50bce4cb2 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c @@ -1,35 +1,34 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: * - * * Redistributions of source code must retain the above copyright + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ #include @@ -59,12 +58,14 @@ #include #include #include +#include #include "ixgbe_logs.h" #include "ixgbe/ixgbe_api.h" #include "ixgbe/ixgbe_vf.h" #include "ixgbe/ixgbe_common.h" #include "ixgbe_ethdev.h" +#include "ixgbe_bypass.h" /* * High threshold controlling when to start sending XOFF frames. Must be at @@ -84,6 +85,10 @@ #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ +#define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ + + +#define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */ #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) @@ -116,10 +121,6 @@ static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); -static void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev); -static void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev); -static void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev); -static void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev); static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); @@ -131,8 +132,12 @@ static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf); static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf); +static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta *reta_conf); +static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta *reta_conf); static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); -static int ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev); +static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, @@ -161,6 +166,21 @@ static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); +/* For Eth VMDQ APIs support */ +static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct + ether_addr* mac_addr,uint8_t on); +static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on); +static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, + uint16_t rx_mask, uint8_t on); +static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); +static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); +static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, + uint64_t pool_mask,uint8_t vlan_on); +static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, + struct rte_eth_vmdq_mirror_conf *mirror_conf, + uint8_t rule_id, uint8_t on); +static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, + uint8_t rule_id); /* * Define VF Stats MACRO for Non "cleared on read" register @@ -242,6 +262,8 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = { .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set, .rx_queue_setup = ixgbe_dev_rx_queue_setup, .rx_queue_release = ixgbe_dev_rx_queue_release, + .rx_queue_count = ixgbe_dev_rx_queue_count, + .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, .tx_queue_setup = ixgbe_dev_tx_queue_setup, .tx_queue_release = ixgbe_dev_tx_queue_release, .dev_led_on = ixgbe_dev_led_on, @@ -250,6 +272,14 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = { .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set, .mac_addr_add = ixgbe_add_rar, .mac_addr_remove = ixgbe_remove_rar, + .uc_hash_table_set = ixgbe_uc_hash_table_set, + .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, + .mirror_rule_set = ixgbe_mirror_rule_set, + .mirror_rule_reset = ixgbe_mirror_rule_reset, + .set_vf_rx_mode = ixgbe_set_pool_rx_mode, + .set_vf_rx = ixgbe_set_pool_rx, + .set_vf_tx = ixgbe_set_pool_tx, + .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter, .fdir_add_signature_filter = ixgbe_fdir_add_signature_filter, .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter, .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter, @@ -258,6 +288,19 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = { .fdir_update_perfect_filter = ixgbe_fdir_update_perfect_filter, .fdir_remove_perfect_filter = ixgbe_fdir_remove_perfect_filter, .fdir_set_masks = ixgbe_fdir_set_masks, + .reta_update = ixgbe_dev_rss_reta_update, + .reta_query = ixgbe_dev_rss_reta_query, +#ifdef RTE_NIC_BYPASS + .bypass_init = ixgbe_bypass_init, + .bypass_state_set = ixgbe_bypass_state_store, + .bypass_state_show = ixgbe_bypass_state_show, + .bypass_event_set = ixgbe_bypass_event_store, + .bypass_event_show = ixgbe_bypass_event_show, + .bypass_wd_timeout_set = ixgbe_bypass_wd_timeout_store, + .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show, + .bypass_ver_show = ixgbe_bypass_ver_show, + .bypass_wd_reset = ixgbe_bypass_wd_reset, +#endif /* RTE_NIC_BYPASS */ }; /* @@ -354,6 +397,35 @@ ixgbe_is_sfp(struct ixgbe_hw *hw) } } +static inline int32_t +ixgbe_pf_reset_hw(struct ixgbe_hw *hw) +{ + uint32_t ctrl_ext; + int32_t status; + + status = ixgbe_reset_hw(hw); + + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_FLUSH(hw); + + return status; +} + +static inline void +ixgbe_enable_intr(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask); + IXGBE_WRITE_FLUSH(hw); +} + /* * This function is based on ixgbe_disable_intr() in ixgbe/ixgbe.h. */ @@ -515,6 +587,39 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) } } +/* + * Ensure that all locks are released before first NVM or PHY access + */ +static void +ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) +{ + uint16_t mask; + + /* + * Phy lock should not fail in this early stage. If this is the case, + * it is due to an improper exit of the application. + * So force the release of the faulty lock. Release of common lock + * is done automatically by swfw_sync function. + */ + mask = IXGBE_GSSR_PHY0_SM << hw->bus.func; + if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { + DEBUGOUT1("SWFW phy%d lock released", hw->bus.func); + } + ixgbe_release_swfw_semaphore(hw, mask); + + /* + * These ones are more tricky since they are common to all ports; but + * swfw_sync retries last long enough (1s) to be almost sure that if + * lock can not be taken it is due to an improper lock of the + * semaphore. + */ + mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; + if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) { + DEBUGOUT("SWFW common locks released"); + } + ixgbe_release_swfw_semaphore(hw, mask); +} + /* * This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c. * It returns 0 on success. @@ -555,15 +660,29 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, /* Vendor and Device ID need to be set before init of shared code */ hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; - hw->hw_addr = (void *)pci_dev->mem_resource.addr; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; +#ifdef RTE_LIBRTE_IXGBE_ALLOW_UNSUPPORTED_SFP + hw->allow_unsupported_sfp = 1; +#endif /* Initialize the shared code */ +#ifdef RTE_NIC_BYPASS + diag = ixgbe_bypass_init_shared_code(hw); +#else diag = ixgbe_init_shared_code(hw); +#endif /* RTE_NIC_BYPASS */ + if (diag != IXGBE_SUCCESS) { PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); return -EIO; } + /* pick up the PCI bus settings for reporting later */ + ixgbe_get_bus_info(hw); + + /* Unlock any pending hardware semaphore */ + ixgbe_swfw_lock_reset(hw); + /* Initialize DCB configuration*/ memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); ixgbe_dcb_init(hw,dcb_config); @@ -577,8 +696,6 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, } hw->fc.send_xon = 1; - ixgbe_disable_intr(hw); - /* Make sure we have a good EEPROM before we read from it */ diag = ixgbe_validate_eeprom_checksum(hw, &csum); if (diag != IXGBE_SUCCESS) { @@ -586,7 +703,11 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, return -EIO; } +#ifdef RTE_NIC_BYPASS + diag = ixgbe_bypass_init_hw(hw); +#else diag = ixgbe_init_hw(hw); +#endif /* RTE_NIC_BYPASS */ /* * Devices with copper phys will fail to initialise if ixgbe_init_hw() @@ -616,8 +737,8 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, return -EIO; } - /* pick up the PCI bus settings for reporting later */ - ixgbe_get_bus_info(hw); + /* disable interrupt */ + ixgbe_disable_intr(hw); /* reset mappings for queue statistics hw counters*/ ixgbe_reset_qstat_mappings(hw); @@ -627,13 +748,24 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, - "Failed to allocate %d bytes needed to store MAC addresses", + "Failed to allocate %u bytes needed to store " + "MAC addresses", ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } /* Copy the permanent MAC address */ ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, ð_dev->data->mac_addrs[0]); + + /* Allocate memory for storing hash filter MAC addresses */ + eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * + IXGBE_VMDQ_NUM_UC_MAC, 0); + if (eth_dev->data->hash_mac_addrs == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate %d bytes needed to store MAC addresses", + ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); + return -ENOMEM; + } /* initialize the vfta */ memset(shadow_vfta, 0, sizeof(*shadow_vfta)); @@ -641,10 +773,16 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, /* initialize the hw strip bitmap*/ memset(hwstrip, 0, sizeof(*hwstrip)); - /* let hardware know driver is loaded */ + /* initialize PF if max_vfs not zero */ + ixgbe_pf_host_init(eth_dev); + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + /* let hardware know driver is loaded */ ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_FLUSH(hw); if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) PMD_INIT_LOG(DEBUG, @@ -662,9 +800,41 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, rte_intr_callback_register(&(pci_dev->intr_handle), ixgbe_dev_interrupt_handler, (void *)eth_dev); + /* enable uio intr after callback register */ + rte_intr_enable(&(pci_dev->intr_handle)); + + /* enable support intr */ + ixgbe_enable_intr(eth_dev); + return 0; } + +/* + * Negotiate mailbox API version with the PF. + * After reset API version is always set to the basic one (ixgbe_mbox_api_10). + * Then we try to negotiate starting with the most recent one. + * If all negotiation attempts fail, then we will proceed with + * the default one (ixgbe_mbox_api_10). + */ +static void +ixgbevf_negotiate_api(struct ixgbe_hw *hw) +{ + int32_t i; + + /* start with highest supported, proceed down */ + static const enum ixgbe_pfvf_api_rev sup_ver[] = { + ixgbe_mbox_api_11, + ixgbe_mbox_api_10, + }; + + for (i = 0; + i != RTE_DIM(sup_ver) && + ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0; + i++) + ; +} + /* * Virtual Function device init */ @@ -672,9 +842,11 @@ static int eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev; - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); int diag; + uint32_t tc, tcs; + struct rte_pci_device *pci_dev; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); struct ixgbe_vfta * shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); struct ixgbe_hwstrip *hwstrip = @@ -683,11 +855,23 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init"); eth_dev->dev_ops = &ixgbevf_eth_dev_ops; + eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; + eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * RX function */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (eth_dev->data->scattered_rx) + eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts; + return 0; + } + pci_dev = eth_dev->pci_dev; hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; - hw->hw_addr = (void *)pci_dev->mem_resource.addr; + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr; /* initialize the vfta */ memset(shadow_vfta, 0, sizeof(*shadow_vfta)); @@ -710,20 +894,34 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, hw->mac.num_rar_entries = hw->mac.max_rx_queues; diag = hw->mac.ops.reset_hw(hw); + if (diag != IXGBE_SUCCESS) { PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); + RTE_LOG(ERR, PMD, "\tThe MAC address is not valid.\n" + "\tThe most likely cause of this error is that the VM host\n" + "\thas not assigned a valid MAC address to this VF device.\n" + "\tPlease consult the DPDK Release Notes (FAQ section) for\n" + "\ta possible solution to this problem.\n"); return (diag); } + /* negotiate mailbox API version to use with the PF. */ + ixgbevf_negotiate_api(hw); + + /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */ + ixgbevf_get_queues(hw, &tcs, &tc); + /* Allocate memory for storing MAC addresses */ eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, - "Failed to allocate %d bytes needed to store MAC addresses", + "Failed to allocate %u bytes needed to store " + "MAC addresses", ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } + /* Copy the permanent MAC address */ ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, ð_dev->data->mac_addrs[0]); @@ -774,8 +972,8 @@ static struct eth_driver rte_ixgbevf_pmd = { * Invoked once at EAL init time. * Register itself as the [Poll Mode] Driver of PCI IXGBE devices. */ -int -rte_ixgbe_pmd_init(void) +static int +rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused) { PMD_INIT_FUNC_TRACE(); @@ -788,8 +986,8 @@ rte_ixgbe_pmd_init(void) * Invoked one at EAL init time. * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices. */ -int -rte_ixgbevf_pmd_init(void) +static int +rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused) { DEBUGFUNC("rte_ixgbevf_pmd_init"); @@ -842,7 +1040,7 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid) IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16); } -static void +void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = @@ -858,7 +1056,7 @@ ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); } -static void +void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = @@ -945,7 +1143,7 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); } -static void +void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = @@ -973,7 +1171,7 @@ ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev) } } -static void +void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = @@ -1072,6 +1270,17 @@ ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) } } +static void +ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ + uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */ + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); +} + static int ixgbe_dev_configure(struct rte_eth_dev *dev) { @@ -1098,13 +1307,14 @@ ixgbe_dev_start(struct rte_eth_dev *dev) int err, link_up = 0, negotiate = 0; uint32_t speed = 0; int mask = 0; + int status; PMD_INIT_FUNC_TRACE(); /* IXGBE devices don't support half duplex */ if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) && (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) { - PMD_INIT_LOG(ERR, "Invalid link_duplex (%u) for port %u\n", + PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n", dev->data->dev_conf.link_duplex, dev->data->port_id); return -EINVAL; @@ -1116,11 +1326,17 @@ ixgbe_dev_start(struct rte_eth_dev *dev) /* reinitialize adapter * this calls reset and start */ - ixgbe_init_hw(hw); + status = ixgbe_pf_reset_hw(hw); + if (status != 0) + return -1; + hw->mac.ops.start_hw(hw); + + /* configure PF module if SRIOV enabled */ + ixgbe_pf_host_configure(dev); /* initialize transmission unit */ ixgbe_dev_tx_init(dev); - + /* This can fail when allocating mbufs for descriptor rings */ err = ixgbe_dev_rx_init(dev); if (err) { @@ -1137,8 +1353,12 @@ ixgbe_dev_start(struct rte_eth_dev *dev) } /* Turn on the laser */ - if (hw->phy.multispeed_fiber) - ixgbe_enable_tx_laser(hw); + ixgbe_enable_tx_laser(hw); + + /* Skip link setup if loopback mode is enabled for 82599. */ + if (hw->mac.type == ixgbe_mac_82599EB && + dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) + goto skip_link_setup; err = ixgbe_check_link(hw, &speed, &link_up, 0); if (err) @@ -1167,8 +1387,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev) speed = IXGBE_LINK_SPEED_10GB_FULL; break; default: - PMD_INIT_LOG(ERR, "Invalid link_speed (%u) for port %u\n", - dev->data->dev_conf.link_speed, dev->data->port_id); + PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu\n", + dev->data->dev_conf.link_speed, + dev->data->port_id); goto error; } @@ -1176,17 +1397,24 @@ ixgbe_dev_start(struct rte_eth_dev *dev) if (err) goto error; +skip_link_setup: + /* check if lsc interrupt is enabled */ - if (dev->data->dev_conf.intr_conf.lsc != 0) { - err = ixgbe_dev_interrupt_setup(dev); - if (err) - goto error; - } + if (dev->data->dev_conf.intr_conf.lsc != 0) + ixgbe_dev_lsc_interrupt_setup(dev); + + /* resume enabled intr since hw reset */ + ixgbe_enable_intr(dev); mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ ETH_VLAN_EXTEND_MASK; ixgbe_vlan_offload_set(dev, mask); - + + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { + /* Enable vlan filtering for VMDq */ + ixgbe_vmdq_vlan_hw_filter_enable(dev); + } + /* Configure DCB hw */ ixgbe_configure_dcb(dev); @@ -1215,6 +1443,9 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) struct rte_eth_link link; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + int vf; PMD_INIT_FUNC_TRACE(); @@ -1222,15 +1453,18 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) ixgbe_disable_intr(hw); /* reset the NIC */ - ixgbe_reset_hw(hw); + ixgbe_pf_reset_hw(hw); hw->adapter_stopped = FALSE; /* stop adapter */ ixgbe_stop_adapter(hw); + for (vf = 0; vfinfo != NULL && + vf < dev->pci_dev->max_vfs; vf++) + vfinfo[vf].clear_to_send = false; + /* Turn off the laser */ - if (hw->phy.multispeed_fiber) - ixgbe_disable_tx_laser(hw); + ixgbe_disable_tx_laser(hw); ixgbe_dev_clear_queues(dev); @@ -1250,8 +1484,7 @@ ixgbe_dev_close(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - ixgbe_reset_hw(hw); - + ixgbe_pf_reset_hw(hw); ixgbe_dev_stop(dev); hw->adapter_stopped = 1; @@ -1427,6 +1660,12 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->oerrors = 0; + /* XON/XOFF pause frames */ + stats->tx_pause_xon = hw_stats->lxontxc; + stats->rx_pause_xon = hw_stats->lxonrxc; + stats->tx_pause_xoff = hw_stats->lxofftxc; + stats->rx_pause_xoff = hw_stats->lxoffrxc; + /* Flow Director Stats registers */ hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); @@ -1513,6 +1752,23 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ dev_info->max_mac_addrs = hw->mac.num_rar_entries; + dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; + dev_info->max_vfs = dev->pci_dev->max_vfs; + if (hw->mac.type == ixgbe_mac_82598EB) + dev_info->max_vmdq_pools = ETH_16_POOLS; + else + dev_info->max_vmdq_pools = ETH_64_POOLS; + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM; } /* return 0 means link status changed, -1 means not changed */ @@ -1644,14 +1900,13 @@ ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) * - On failure, a negative value. */ static int -ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev) +ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); ixgbe_dev_link_status_print(dev); - IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC); - IXGBE_WRITE_FLUSH(hw); - rte_intr_enable(&(dev->pci_dev->intr_handle)); + intr->mask |= IXGBE_EICR_LSC; return 0; } @@ -1674,17 +1929,22 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_LSC); - IXGBE_WRITE_FLUSH(hw); + /* clear all cause mask */ + ixgbe_disable_intr(hw); /* read-on-clear nic registers here */ eicr = IXGBE_READ_REG(hw, IXGBE_EICR); - PMD_INIT_LOG(INFO, "eicr %x", eicr); + PMD_DRV_LOG(INFO, "eicr %x", eicr); + + intr->flags = 0; if (eicr & IXGBE_EICR_LSC) { /* set flag for async link update */ intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; } + if (eicr & IXGBE_EICR_MAILBOX) + intr->flags |= IXGBE_FLAG_MAILBOX; + return 0; } @@ -1723,7 +1983,7 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev) } /* - * It executes link_update after knowing an interrupt occured. + * It executes link_update after knowing an interrupt occurred. * * @param dev * Pointer to struct rte_eth_dev. @@ -1737,11 +1997,48 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) { struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + int64_t timeout; + struct rte_eth_link link; + int intr_enable_delay = false; - if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) { - return -1; + PMD_DRV_LOG(DEBUG, "intr action type %d\n", intr->flags); + + if (intr->flags & IXGBE_FLAG_MAILBOX) { + ixgbe_pf_mbx_process(dev); + intr->flags &= ~IXGBE_FLAG_MAILBOX; + } + + if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { + /* get the link status before link update, for predicting later */ + memset(&link, 0, sizeof(link)); + rte_ixgbe_dev_atomic_read_link_status(dev, &link); + + ixgbe_dev_link_update(dev, 0); + + /* likely to up */ + if (!link.link_status) + /* handle it 1 sec later, wait it being stable */ + timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; + /* likely to down */ + else + /* handle it 4 sec later, wait it being stable */ + timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; + + ixgbe_dev_link_status_print(dev); + + intr_enable_delay = true; + } + + if (intr_enable_delay) { + if (rte_eal_alarm_set(timeout * 1000, + ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0) + PMD_DRV_LOG(ERR, "Error setting alarm"); + } else { + PMD_DRV_LOG(DEBUG, "enable intr immediately"); + ixgbe_enable_intr(dev); + rte_intr_enable(&(dev->pci_dev->intr_handle)); } - ixgbe_dev_link_update(dev, 0); + return 0; } @@ -1768,17 +2065,22 @@ ixgbe_dev_interrupt_delayed_handler(void *param) IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t eicr; + + eicr = IXGBE_READ_REG(hw, IXGBE_EICR); + if (eicr & IXGBE_EICR_MAILBOX) + ixgbe_pf_mbx_process(dev); - IXGBE_READ_REG(hw, IXGBE_EICR); - ixgbe_dev_interrupt_action(dev); if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { + ixgbe_dev_link_update(dev, 0); intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; - rte_intr_enable(&(dev->pci_dev->intr_handle)); - IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC); - IXGBE_WRITE_FLUSH(hw); ixgbe_dev_link_status_print(dev); _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); } + + PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]\n", eicr); + ixgbe_enable_intr(dev); + rte_intr_enable(&(dev->pci_dev->intr_handle)); } /** @@ -1797,34 +2099,9 @@ static void ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param) { - int64_t timeout; - struct rte_eth_link link; struct rte_eth_dev *dev = (struct rte_eth_dev *)param; - struct ixgbe_interrupt *intr = - IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - - /* get the link status before link update, for predicting later */ - memset(&link, 0, sizeof(link)); - rte_ixgbe_dev_atomic_read_link_status(dev, &link); ixgbe_dev_interrupt_get_status(dev); ixgbe_dev_interrupt_action(dev); - - if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) - return; - - /* likely to up */ - if (!link.link_status) - /* handle it 1 sec later, wait it being stable */ - timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; - /* likely to down */ - else - /* handle it 4 sec later, wait it being stable */ - timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; - - ixgbe_dev_link_status_print(dev); - if (rte_eal_alarm_set(timeout * 1000, - ixgbe_dev_interrupt_delayed_handler, param) < 0) - PMD_INIT_LOG(ERR, "Error setting alarm"); } static int @@ -1852,6 +2129,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) int err; uint32_t rx_buf_size; uint32_t max_high_water; + uint32_t mflcn; enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { ixgbe_fc_none, ixgbe_fc_rx_pause, @@ -1884,8 +2162,24 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) hw->fc.send_xon = fc_conf->send_xon; err = ixgbe_fc_enable(hw); + /* Not negotiated is not an error case */ if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) { + + /* check if we want to forward MAC frames - driver doesn't have native + * capability to do that, so we'll write the registers ourselves */ + + mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); + + /* set or clear MFLCN.PMCF bit depending on configuration */ + if (fc_conf->mac_ctrl_frame_fwd != 0) + mflcn |= IXGBE_MFLCN_PMCF; + else + mflcn &= ~IXGBE_MFLCN_PMCF; + + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn); + IXGBE_WRITE_FLUSH(hw); + return 0; } @@ -2098,6 +2392,79 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p return -EIO; } +static int +ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta *reta_conf) +{ + uint8_t i,j,mask; + uint32_t reta; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + /* + * Update Redirection Table RETA[n],n=0...31,The redirection table has + * 128-entries in 32 registers + */ + for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) { + if (i < ETH_RSS_RETA_NUM_ENTRIES/2) + mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF); + else + mask = (uint8_t)((reta_conf->mask_hi >> + (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF); + if (mask != 0) { + reta = 0; + if (mask != 0xF) + reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2)); + + for (j = 0; j < 4; j++) { + if (mask & (0x1 << j)) { + if (mask != 0xF) + reta &= ~(0xFF << 8 * j); + reta |= reta_conf->reta[i + j] << 8*j; + } + } + IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),reta); + } + } + + return 0; +} + +static int +ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta *reta_conf) +{ + uint8_t i,j,mask; + uint32_t reta; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + /* + * Read Redirection Table RETA[n],n=0...31,The redirection table has + * 128-entries in 32 registers + */ + for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) { + if (i < ETH_RSS_RETA_NUM_ENTRIES/2) + mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF); + else + mask = (uint8_t)((reta_conf->mask_hi >> + (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF); + + if (mask != 0) { + reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2)); + for (j = 0; j < 4; j++) { + if (mask & (0x1 << j)) + reta_conf->reta[i + j] = + (uint8_t)((reta >> 8 * j) & 0xFF); + } + } + } + + return 0; +} + static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool) @@ -2160,10 +2527,17 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) static int ixgbevf_dev_start(struct rte_eth_dev *dev) { + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); int err, mask = 0; PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start"); + hw->mac.ops.reset_hw(hw); + + /* negotiate mailbox API version to use with the PF. */ + ixgbevf_negotiate_api(hw); + ixgbevf_dev_tx_init(dev); /* This can fail when allocating mbufs for descriptor rings */ @@ -2311,3 +2685,411 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) ixgbevf_vlan_strip_queue_set(dev,i,on); } } + +static int +ixgbe_vmdq_mode_check(struct ixgbe_hw *hw) +{ + uint32_t reg_val; + + /* we only need to do this if VMDq is enabled */ + reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { + PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting\n"); + return (-1); + } + + return 0; +} + +static uint32_t +ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr) +{ + uint32_t vector = 0; + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((uc_addr->addr_bytes[4] >> 4) | + (((uint16_t)uc_addr->addr_bytes[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((uc_addr->addr_bytes[4] >> 3) | + (((uint16_t)uc_addr->addr_bytes[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((uc_addr->addr_bytes[4] >> 2) | + (((uint16_t)uc_addr->addr_bytes[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((uc_addr->addr_bytes[4]) | + (((uint16_t)uc_addr->addr_bytes[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +static int +ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, + uint8_t on) +{ + uint32_t vector; + uint32_t uta_idx; + uint32_t reg_val; + uint32_t uta_shift; + uint32_t rc; + const uint32_t ixgbe_uta_idx_mask = 0x7F; + const uint32_t ixgbe_uta_bit_shift = 5; + const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1; + const uint32_t bit1 = 0x1; + + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_uta_info *uta_info = + IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); + + /* The UTA table only exists on 82599 hardware and newer */ + if (hw->mac.type < ixgbe_mac_82599EB) + return (-ENOTSUP); + + vector = ixgbe_uta_vector(hw,mac_addr); + uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; + uta_shift = vector & ixgbe_uta_bit_mask; + + rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); + if(rc == on) + return 0; + + reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); + if (on) { + uta_info->uta_in_use++; + reg_val |= (bit1 << uta_shift); + uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift); + } else { + uta_info->uta_in_use--; + reg_val &= ~(bit1 << uta_shift); + uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift); + } + + IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val); + + if (uta_info->uta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); + else + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type); + + return 0; +} + +static int +ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) +{ + int i; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_uta_info *uta_info = + IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private); + + /* The UTA table only exists on 82599 hardware and newer */ + if (hw->mac.type < ixgbe_mac_82599EB) + return (-ENOTSUP); + + if(on) { + for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { + uta_info->uta_shadow[i] = ~0; + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); + } + } else { + for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { + uta_info->uta_shadow[i] = 0; + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); + } + } + return 0; + +} +static int +ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, + uint16_t rx_mask, uint8_t on) +{ + int val = 0; + + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); + + if (hw->mac.type == ixgbe_mac_82598EB) { + PMD_INIT_LOG(ERR, "setting VF receive mode set should be done" + " on 82599 hardware and newer\n"); + return (-ENOTSUP); + } + if (ixgbe_vmdq_mode_check(hw) < 0) + return (-ENOTSUP); + + if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG ) + val |= IXGBE_VMOLR_AUPE; + if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC ) + val |= IXGBE_VMOLR_ROMPE; + if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) + val |= IXGBE_VMOLR_ROPE; + if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) + val |= IXGBE_VMOLR_BAM; + if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) + val |= IXGBE_VMOLR_MPE; + + if (on) + vmolr |= val; + else + vmolr &= ~val; + + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); + + return 0; +} + +static int +ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) +{ + uint32_t reg,addr; + uint32_t val; + const uint8_t bit1 = 0x1; + + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (ixgbe_vmdq_mode_check(hw) < 0) + return (-ENOTSUP); + + addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2); + reg = IXGBE_READ_REG(hw, addr); + val = bit1 << pool; + + if (on) + reg |= val; + else + reg &= ~val; + + IXGBE_WRITE_REG(hw, addr,reg); + + return 0; +} + +static int +ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) +{ + uint32_t reg,addr; + uint32_t val; + const uint8_t bit1 = 0x1; + + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (ixgbe_vmdq_mode_check(hw) < 0) + return (-ENOTSUP); + + addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2); + reg = IXGBE_READ_REG(hw, addr); + val = bit1 << pool; + + if (on) + reg |= val; + else + reg &= ~val; + + IXGBE_WRITE_REG(hw, addr,reg); + + return 0; +} + +static int +ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, + uint64_t pool_mask, uint8_t vlan_on) +{ + int ret = 0; + uint16_t pool_idx; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (ixgbe_vmdq_mode_check(hw) < 0) + return (-ENOTSUP); + for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) { + if (pool_mask & ((uint64_t)(1ULL << pool_idx))) + ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on); + if (ret < 0) + return ret; + } + + return ret; +} + +static int +ixgbe_mirror_rule_set(struct rte_eth_dev *dev, + struct rte_eth_vmdq_mirror_conf *mirror_conf, + uint8_t rule_id, uint8_t on) +{ + uint32_t mr_ctl,vlvf; + uint32_t mp_lsb = 0; + uint32_t mv_msb = 0; + uint32_t mv_lsb = 0; + uint32_t mp_msb = 0; + uint8_t i = 0; + int reg_index = 0; + uint64_t vlan_mask = 0; + + const uint8_t pool_mask_offset = 32; + const uint8_t vlan_mask_offset = 32; + const uint8_t dst_pool_offset = 8; + const uint8_t rule_mr_offset = 4; + const uint8_t mirror_rule_mask= 0x0F; + + struct ixgbe_mirror_info *mr_info = + (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (ixgbe_vmdq_mode_check(hw) < 0) + return (-ENOTSUP); + + /* Check if vlan mask is valid */ + if ((mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) && (on)) { + if (mirror_conf->vlan.vlan_mask == 0) + return (-EINVAL); + } + + /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */ + if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) { + for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) { + if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { + /* search vlan id related pool vlan filter index */ + reg_index = ixgbe_find_vlvf_slot(hw, + mirror_conf->vlan.vlan_id[i]); + if(reg_index < 0) + return (-EINVAL); + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index)); + if ((vlvf & IXGBE_VLVF_VIEN) && + ((vlvf & IXGBE_VLVF_VLANID_MASK) + == mirror_conf->vlan.vlan_id[i])) + vlan_mask |= (1ULL << reg_index); + else + return (-EINVAL); + } + } + + if (on) { + mv_lsb = vlan_mask & 0xFFFFFFFF; + mv_msb = vlan_mask >> vlan_mask_offset; + + mr_info->mr_conf[rule_id].vlan.vlan_mask = + mirror_conf->vlan.vlan_mask; + for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) { + if(mirror_conf->vlan.vlan_mask & (1ULL << i)) + mr_info->mr_conf[rule_id].vlan.vlan_id[i] = + mirror_conf->vlan.vlan_id[i]; + } + } else { + mv_lsb = 0; + mv_msb = 0; + mr_info->mr_conf[rule_id].vlan.vlan_mask = 0; + for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) + mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0; + } + } + + /* + * if enable pool mirror, write related pool mask register,if disable + * pool mirror, clear PFMRVM register + */ + if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) { + if (on) { + mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF; + mp_msb = mirror_conf->pool_mask >> pool_mask_offset; + mr_info->mr_conf[rule_id].pool_mask = + mirror_conf->pool_mask; + + } else { + mp_lsb = 0; + mp_msb = 0; + mr_info->mr_conf[rule_id].pool_mask = 0; + } + } + + /* read mirror control register and recalculate it */ + mr_ctl = IXGBE_READ_REG(hw,IXGBE_MRCTL(rule_id)); + + if (on) { + mr_ctl |= mirror_conf->rule_type_mask; + mr_ctl &= mirror_rule_mask; + mr_ctl |= mirror_conf->dst_pool << dst_pool_offset; + } else + mr_ctl &= ~(mirror_conf->rule_type_mask & mirror_rule_mask); + + mr_info->mr_conf[rule_id].rule_type_mask = (uint8_t)(mr_ctl & mirror_rule_mask); + mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool; + + /* write mirrror control register */ + IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); + + /* write pool mirrror control register */ + if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) { + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), + mp_msb); + } + /* write VLAN mirrror control register */ + if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) { + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), + mv_msb); + } + + return 0; +} + +static int +ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) +{ + int mr_ctl = 0; + uint32_t lsb_val = 0; + uint32_t msb_val = 0; + const uint8_t rule_mr_offset = 4; + + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_mirror_info *mr_info = + (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); + + if (ixgbe_vmdq_mode_check(hw) < 0) + return (-ENOTSUP); + + memset(&mr_info->mr_conf[rule_id], 0, + sizeof(struct rte_eth_vmdq_mirror_conf)); + + /* clear PFVMCTL register */ + IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); + + /* clear pool mask register */ + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val); + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val); + + /* clear vlan mask register */ + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val); + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val); + + return 0; +} + +static struct rte_driver rte_ixgbe_driver = { + .type = PMD_PDEV, + .init = rte_ixgbe_pmd_init, +}; + +static struct rte_driver rte_ixgbevf_driver = { + .type = PMD_PDEV, + .init = rte_ixgbevf_pmd_init, +}; + +PMD_REGISTER_DRIVER(rte_ixgbe_driver); +PMD_REGISTER_DRIVER(rte_ixgbevf_driver);