X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_ethdev.c;h=1cb85b367bba611c59cad2761eb279c42189ad3c;hb=5e59e39a1db781e2f04f76f5b651b040e171e5ba;hp=504e951595ff51b4548ef61f3b5607163913ddaa;hpb=e261265e42a15164aa649521227f7c2c4740c703;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 504e951595..1cb85b367b 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include @@ -48,20 +19,21 @@ #include #include #include -#include +#include #include #include -#include #include #include #include -#include +#include #include -#include #include #include #include #include +#ifdef RTE_LIBRTE_SECURITY +#include +#endif #include "ixgbe_logs.h" #include "base/ixgbe_api.h" @@ -93,6 +65,9 @@ /* Timer value included in XOFF frames. */ #define IXGBE_FC_PAUSE 0x680 +/*Default value of Max Rx Queue*/ +#define IXGBE_MAX_RX_QUEUE_NUM 128 + #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ @@ -170,13 +145,14 @@ static void ixgbe_dev_stop(struct rte_eth_dev *dev); static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); static void ixgbe_dev_close(struct rte_eth_dev *dev); +static int ixgbe_dev_reset(struct rte_eth_dev *dev); static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); static int ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); -static void ixgbe_dev_stats_get(struct rte_eth_dev *dev, +static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n); @@ -219,7 +195,7 @@ static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on); static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); -static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); @@ -240,7 +216,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); -static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev); +static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); @@ -262,18 +238,21 @@ static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); static int ixgbevf_dev_configure(struct rte_eth_dev *dev); static int ixgbevf_dev_start(struct rte_eth_dev *dev); +static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete); static void ixgbevf_dev_stop(struct rte_eth_dev *dev); static void ixgbevf_dev_close(struct rte_eth_dev *dev); +static int ixgbevf_dev_reset(struct rte_eth_dev *dev); static void ixgbevf_intr_disable(struct ixgbe_hw *hw); static void ixgbevf_intr_enable(struct ixgbe_hw *hw); -static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, +static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); -static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); @@ -302,9 +281,6 @@ static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); static void ixgbe_configure_msix(struct rte_eth_dev *dev); -static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, - uint16_t queue_idx, uint16_t tx_rate); - static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); @@ -424,6 +400,9 @@ static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev); (r) = (h)->bitmap[idx] >> bit & 1;\ } while (0) +int ixgbe_logtype_init; +int ixgbe_logtype_driver; + /* * The set of PCI devices this driver supports */ @@ -444,13 +423,8 @@ static const struct rte_pci_id pci_id_ixgbe_map[] = { { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, - { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, - { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) }, - { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) }, - { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) }, - { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, @@ -525,6 +499,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .dev_set_link_up = ixgbe_dev_set_link_up, .dev_set_link_down = ixgbe_dev_set_link_down, .dev_close = ixgbe_dev_close, + .dev_reset = ixgbe_dev_reset, .promiscuous_enable = ixgbe_dev_promiscuous_enable, .promiscuous_disable = ixgbe_dev_promiscuous_disable, .allmulticast_enable = ixgbe_dev_allmulticast_enable, @@ -597,6 +572,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .l2_tunnel_offload_set = ixgbe_dev_l2_tunnel_offload_set, .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, + .tm_ops_get = ixgbe_tm_ops_get, }; /* @@ -607,13 +583,14 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .dev_configure = ixgbevf_dev_configure, .dev_start = ixgbevf_dev_start, .dev_stop = ixgbevf_dev_stop, - .link_update = ixgbe_dev_link_update, + .link_update = ixgbevf_dev_link_update, .stats_get = ixgbevf_dev_stats_get, .xstats_get = ixgbevf_dev_xstats_get, .stats_reset = ixgbevf_dev_stats_reset, .xstats_reset = ixgbevf_dev_stats_reset, .xstats_get_names = ixgbevf_dev_xstats_get_names, .dev_close = ixgbevf_dev_close, + .dev_reset = ixgbevf_dev_reset, .allmulticast_enable = ixgbevf_dev_allmulticast_enable, .allmulticast_disable = ixgbevf_dev_allmulticast_disable, .dev_infos_get = ixgbevf_dev_info_get, @@ -809,58 +786,6 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ sizeof(rte_ixgbevf_stats_strings[0])) -/** - * Atomically reads the link status information from global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - /* * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. */ @@ -1170,7 +1095,6 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) } rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; /* Vendor and Device ID need to be set before init of shared code */ hw->device_id = pci_dev->id.device_id; @@ -1196,6 +1120,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* Unlock any pending hardware semaphore */ ixgbe_swfw_lock_reset(hw); +#ifdef RTE_LIBRTE_SECURITY + /* Initialize security_ctx only for primary process*/ + if (ixgbe_ipsec_ctx_create(eth_dev)) + return -ENOMEM; +#endif + /* Initialize DCB configuration*/ memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); ixgbe_dcb_init(hw, dcb_config); @@ -1338,16 +1268,15 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* initialize l2 tunnel filter list & hash */ ixgbe_l2_tn_filter_init(eth_dev); - TAILQ_INIT(&filter_ntuple_list); - TAILQ_INIT(&filter_ethertype_list); - TAILQ_INIT(&filter_syn_list); - TAILQ_INIT(&filter_fdir_list); - TAILQ_INIT(&filter_l2_tunnel_list); - TAILQ_INIT(&ixgbe_flow_list); + /* initialize flow filter lists */ + ixgbe_filterlist_init(); /* initialize bandwidth configuration info */ memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf)); + /* initialize Traffic Manager configuration */ + ixgbe_tm_conf_init(eth_dev); + return 0; } @@ -1401,6 +1330,13 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) /* clear all the filters list */ ixgbe_filterlist_flush(); + /* Remove all Traffic Manager configuration */ + ixgbe_tm_conf_uninit(eth_dev); + +#ifdef RTE_LIBRTE_SECURITY + rte_free(eth_dev->security_ctx); +#endif + return 0; } @@ -1480,7 +1416,7 @@ static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) TAILQ_INIT(&fdir_info->fdir_list); snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, - "fdir_%s", eth_dev->data->name); + "fdir_%s", eth_dev->device->name); fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); if (!fdir_info->hash_handle) { PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); @@ -1516,7 +1452,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) TAILQ_INIT(&l2_tn_info->l2_tn_list); snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, - "l2_tn_%s", eth_dev->data->name); + "l2_tn_%s", eth_dev->device->name); l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); if (!l2_tn_info->hash_handle) { PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); @@ -1627,7 +1563,6 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) } rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; @@ -1781,7 +1716,8 @@ static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_ixgbe_pmd = { .id_table = pci_id_ixgbe_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_IOVA_AS_VA, .probe = eth_ixgbe_pci_probe, .remove = eth_ixgbe_pci_remove, }; @@ -1803,7 +1739,7 @@ static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) */ static struct rte_pci_driver rte_ixgbevf_pmd = { .id_table = pci_id_ixgbevf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, .probe = eth_ixgbevf_pci_probe, .remove = eth_ixgbevf_pci_remove, }; @@ -1959,9 +1895,9 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) rxq = dev->data->rx_queues[queue]; if (on) - rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; + rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; else - rxq->vlan_flags = PKT_RX_VLAN_PKT; + rxq->vlan_flags = PKT_RX_VLAN; } static void @@ -2125,7 +2061,7 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) */ } -static void +static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) { if (mask & ETH_VLAN_STRIP_MASK) { @@ -2148,6 +2084,8 @@ ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) else ixgbe_vlan_hw_extend_disable(dev); } + + return 0; } static void @@ -2179,9 +2117,10 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) return -EINVAL; } - RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q; - RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q; - + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = + IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; + RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = + pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; return 0; } @@ -2221,8 +2160,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) case ETH_MQ_RX_NONE: /* if nothing mq mode configure, use default scheme */ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; - if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1) - RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; break; default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ /* SRIOV only works in VMDq enable mode */ @@ -2494,6 +2431,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev) int status; uint16_t vf, idx; uint32_t *link_speeds; + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); @@ -2502,8 +2441,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev) * - fixed speed: TODO implement */ if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { - PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported", - dev->data->port_id); + PMD_INIT_LOG(ERR, + "Invalid link_speeds for port %u, fix speed not supported", + dev->data->port_id); return -EINVAL; } @@ -2566,9 +2506,13 @@ ixgbe_dev_start(struct rte_eth_dev *dev) goto error; } - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; - ixgbe_vlan_offload_set(dev, mask); + err = ixgbe_vlan_offload_set(dev, mask); + if (err) { + PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); + goto error; + } if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { /* Enable vlan filtering for VMDq */ @@ -2640,9 +2584,22 @@ ixgbe_dev_start(struct rte_eth_dev *dev) speed = 0x0; if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { - speed = (hw->mac.type != ixgbe_mac_82598EB) ? - IXGBE_LINK_SPEED_82599_AUTONEG : - IXGBE_LINK_SPEED_82598_AUTONEG; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + speed = IXGBE_LINK_SPEED_82598_AUTONEG; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + speed = IXGBE_LINK_SPEED_82599_AUTONEG; + break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + speed = IXGBE_LINK_SPEED_X550_AUTONEG; + break; + default: + speed = IXGBE_LINK_SPEED_82599_AUTONEG; + } } else { if (*link_speeds & ETH_LINK_SPEED_10G) speed |= IXGBE_LINK_SPEED_10GB_FULL; @@ -2661,7 +2618,9 @@ skip_link_setup: if (rte_intr_allow_others(intr_handle)) { /* check if lsc interrupt is enabled */ if (dev->data->dev_conf.intr_conf.lsc != 0) - ixgbe_dev_lsc_interrupt_setup(dev); + ixgbe_dev_lsc_interrupt_setup(dev, TRUE); + else + ixgbe_dev_lsc_interrupt_setup(dev, FALSE); ixgbe_dev_macsec_interrupt_setup(dev); } else { rte_intr_callback_unregister(intr_handle, @@ -2684,6 +2643,11 @@ skip_link_setup: ixgbe_l2_tunnel_conf(dev); ixgbe_filter_restore(dev); + if (tm_conf->root && !tm_conf->committed) + PMD_DRV_LOG(WARNING, + "please call hierarchy_commit() " + "before starting the port"); + return 0; error: @@ -2706,6 +2670,8 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int vf; + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); @@ -2738,7 +2704,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) /* Clear recorded link status */ memset(&link, 0, sizeof(link)); - rte_ixgbe_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ @@ -2752,6 +2718,9 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) rte_free(intr_handle->intr_vec); intr_handle->intr_vec = NULL; } + + /* reset hierarchy commit */ + tm_conf->committed = false; } /* @@ -2815,7 +2784,7 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev) } /* - * Reest and stop device. + * Reset and stop device. */ static void ixgbe_dev_close(struct rte_eth_dev *dev) @@ -2838,6 +2807,32 @@ ixgbe_dev_close(struct rte_eth_dev *dev) ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); } +/* + * Reset PF device. + */ +static int +ixgbe_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + /* When a DPDK PMD PF begin to reset PF port, it should notify all + * its VF to make them align with it. The detailed notification + * mechanism is PMD specific. As to ixgbe PF, it is rather complex. + * To avoid unexpected behavior in VF, currently reset of PF with + * SR-IOV activation is not supported. It might be supported later. + */ + if (dev->data->sriov.active) + return -ENOTSUP; + + ret = eth_ixgbe_dev_uninit(dev); + if (ret) + return ret; + + ret = eth_ixgbe_dev_init(dev); + + return ret; +} + static void ixgbe_read_stats_registers(struct ixgbe_hw *hw, struct ixgbe_hw_stats *hw_stats, @@ -3050,7 +3045,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, /* * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c */ -static void +static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct ixgbe_hw *hw = @@ -3072,7 +3067,7 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) &total_qbrc, &total_qprc, &total_qprdc); if (stats == NULL) - return; + return -EINVAL; /* Fill out the rte_eth_stats statistics structure */ stats->ipackets = total_qprc; @@ -3103,6 +3098,7 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* Tx Errors */ stats->oerrors = 0; + return 0; } static void @@ -3514,7 +3510,7 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, return IXGBEVF_NB_XSTATS; } -static void +static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) @@ -3523,12 +3519,13 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) ixgbevf_update_stats(dev); if (stats == NULL) - return; + return -EINVAL; stats->ipackets = hw_stats->vfgprc; stats->ibytes = hw_stats->vfgorc; stats->opackets = hw_stats->vfgptc; stats->obytes = hw_stats->vfgotc; + return 0; } static void @@ -3601,7 +3598,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP; /* * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV @@ -3638,6 +3636,13 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) hw->mac.type == ixgbe_mac_X550EM_a) dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; +#ifdef RTE_LIBRTE_SECURITY + if (dev->security_ctx) { + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY; + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; + } +#endif + dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { .pthresh = IXGBE_DEFAULT_RX_PTHRESH, @@ -3674,6 +3679,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) hw->mac.type == ixgbe_mac_X550_vf) { dev_info->speed_capa |= ETH_LINK_SPEED_100M; } + if (hw->mac.type == ixgbe_mac_X550) { + dev_info->speed_capa |= ETH_LINK_SPEED_2_5G; + dev_info->speed_capa |= ETH_LINK_SPEED_5G; + } } static const uint32_t * @@ -3706,6 +3715,12 @@ ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) return ptypes; + +#if defined(RTE_ARCH_X86) + if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec || + dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) + return ptypes; +#endif return NULL; } @@ -3731,7 +3746,8 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP; dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | @@ -3765,25 +3781,133 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, dev_info->tx_desc_lim = tx_desc_lim; } +static int +ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + int *link_up, int wait_to_complete) +{ + /** + * for a quick link status checking, wait_to_compelet == 0, + * skip PF link status checking + */ + bool no_pflink_check = wait_to_complete == 0; + struct ixgbe_mbx_info *mbx = &hw->mbx; + struct ixgbe_mac_info *mac = &hw->mac; + uint32_t links_reg, in_msg; + int ret_val = 0; + + /* If we were hit with a reset drop the link */ + if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf) { + int i; + + for (i = 0; i < 5; i++) { + rte_delay_us(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + if (hw->mac.type >= ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (hw->mac.type == ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + /* Since Reserved in older MAC's */ + if (hw->mac.type >= ixgbe_mac_X550) + *speed = IXGBE_LINK_SPEED_10_FULL; + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } + + if (no_pflink_check) { + if (*speed == IXGBE_LINK_SPEED_UNKNOWN) + mac->get_link_status = true; + else + mac->get_link_status = false; + + goto out; + } + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error + */ + if (mbx->ops.read(hw, &in_msg, 1, 0)) + goto out; + + if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { + /* msg is not CTS and is NACK we must have lost CTS status */ + if (in_msg & IXGBE_VT_MSGTYPE_NACK) + ret_val = -1; + goto out; + } + + /* the pf is talking, if we timed out in the past we reinit */ + if (!mbx->timeout) { + ret_val = -1; + goto out; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link + */ + mac->get_link_status = false; + +out: + *link_up = !mac->get_link_status; + return ret_val; +} + /* return 0 means link status changed, -1 means not changed */ static int -ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) +ixgbe_dev_link_update_share(struct rte_eth_dev *dev, + int wait_to_complete, int vf) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_eth_link link, old; + struct rte_eth_link link; ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); int link_up; int diag; u32 speed = 0; + int wait = 1; bool autoneg = false; + memset(&link, 0, sizeof(link)); link.link_status = ETH_LINK_DOWN; link.link_speed = 0; link.link_duplex = ETH_LINK_HALF_DUPLEX; - memset(&old, 0, sizeof(old)); - rte_ixgbe_dev_atomic_read_link_status(dev, &old); + link.link_autoneg = ETH_LINK_AUTONEG; hw->mac.get_link_status = true; @@ -3797,26 +3921,24 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) /* check if it needs to wait to complete, if lsc interrupt is enabled */ if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) - diag = ixgbe_check_link(hw, &link_speed, &link_up, 0); + wait = 0; + + if (vf) + diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); else - diag = ixgbe_check_link(hw, &link_speed, &link_up, 1); + diag = ixgbe_check_link(hw, &link_speed, &link_up, wait); if (diag != 0) { link.link_speed = ETH_SPEED_NUM_100M; link.link_duplex = ETH_LINK_FULL_DUPLEX; - rte_ixgbe_dev_atomic_write_link_status(dev, &link); - if (link.link_status == old.link_status) - return -1; - return 0; + return rte_eth_linkstatus_set(dev, &link); } if (link_up == 0) { - rte_ixgbe_dev_atomic_write_link_status(dev, &link); intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; - if (link.link_status == old.link_status) - return -1; - return 0; + return rte_eth_linkstatus_set(dev, &link); } + intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; link.link_status = ETH_LINK_UP; link.link_duplex = ETH_LINK_FULL_DUPLEX; @@ -3836,16 +3958,32 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) link.link_speed = ETH_SPEED_NUM_1G; break; + case IXGBE_LINK_SPEED_2_5GB_FULL: + link.link_speed = ETH_SPEED_NUM_2_5G; + break; + + case IXGBE_LINK_SPEED_5GB_FULL: + link.link_speed = ETH_SPEED_NUM_5G; + break; + case IXGBE_LINK_SPEED_10GB_FULL: link.link_speed = ETH_SPEED_NUM_10G; break; } - rte_ixgbe_dev_atomic_write_link_status(dev, &link); - if (link.link_status == old.link_status) - return -1; + return rte_eth_linkstatus_set(dev, &link); +} - return 0; +static int +ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + return ixgbe_dev_link_update_share(dev, wait_to_complete, 0); +} + +static int +ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + return ixgbe_dev_link_update_share(dev, wait_to_complete, 1); } static void @@ -3905,19 +4043,24 @@ ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) * * @param dev * Pointer to struct rte_eth_dev. + * @param on + * Enable or Disable. * * @return * - On success, zero. * - On failure, a negative value. */ static int -ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev) +ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) { struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); ixgbe_dev_link_status_print(dev); - intr->mask |= IXGBE_EICR_LSC; + if (on) + intr->mask |= IXGBE_EICR_LSC; + else + intr->mask &= ~IXGBE_EICR_LSC; return 0; } @@ -4027,8 +4170,8 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_eth_link link; - memset(&link, 0, sizeof(link)); - rte_ixgbe_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); + if (link.link_status) { PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", (int)(dev->data->port_id), @@ -4063,7 +4206,6 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); int64_t timeout; - struct rte_eth_link link; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -4080,9 +4222,10 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, } if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { + struct rte_eth_link link; + /* get the link status before link update, for predicting later */ - memset(&link, 0, sizeof(link)); - rte_ixgbe_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); ixgbe_dev_link_update(dev, 0); @@ -4155,7 +4298,8 @@ ixgbe_dev_interrupt_delayed_handler(void *param) ixgbe_dev_link_update(dev, 0); intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; ixgbe_dev_link_status_print(dev); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); } if (intr->flags & IXGBE_FLAG_MACSEC) { @@ -4659,7 +4803,7 @@ ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) static bool is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) { - if (strcmp(dev->data->drv_name, drv->driver.name)) + if (strcmp(dev->device->driver->name, drv->driver.name)) return false; return true; @@ -4679,7 +4823,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) struct ixgbe_hw *hw; struct rte_eth_dev_info dev_info; uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; - struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; + struct rte_eth_dev_data *dev_data = dev->data; ixgbe_dev_info_get(dev, &dev_info); @@ -4687,13 +4831,15 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) return -EINVAL; - /* refuse mtu that requires the support of scattered packets when this - * feature has not been enabled before. + /* If device is started, refuse mtu that requires the support of + * scattered packets when this feature has not been enabled before. */ - if (!rx_conf->enable_scatter && + if (dev_data->dev_started && !dev_data->scattered_rx && (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > - dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { + PMD_INIT_LOG(ERR, "Stop port first."); return -EINVAL; + } hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); @@ -4795,7 +4941,11 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - hw->mac.ops.reset_hw(hw); + err = hw->mac.ops.reset_hw(hw); + if (err) { + PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); + return err; + } hw->mac.get_link_status = true; /* negotiate mailbox API version to use with the PF. */ @@ -4817,13 +4967,22 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) /* Set HW strip */ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; - ixgbevf_vlan_offload_set(dev, mask); + err = ixgbevf_vlan_offload_set(dev, mask); + if (err) { + PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); + ixgbe_dev_clear_queues(dev); + return err; + } ixgbevf_dev_rxtx_start(dev); /* check and configure queue intr-vector mapping */ - if (dev->data->dev_conf.intr_conf.rxq != 0) { - intr_vector = dev->data->nb_rx_queues; + if (rte_intr_cap_multiple(intr_handle) && + dev->data->dev_conf.intr_conf.rxq) { + /* According to datasheet, only vector 0/1/2 can be used, + * now only one vector is used for Rx queue + */ + intr_vector = 1; if (rte_intr_efd_enable(intr_handle, intr_vector)) return -1; } @@ -4840,6 +4999,15 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) } ixgbevf_configure_msix(dev); + /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt + * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). + * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) + * is not cleared, it will fail when following rte_intr_enable( ) tries + * to map Rx queue interrupt to other VFIO vectors. + * So clear uio/vfio intr/evevnfd first to avoid failure. + */ + rte_intr_disable(intr_handle); + rte_intr_enable(intr_handle); /* Re-enable interrupt for VF */ @@ -4902,6 +5070,23 @@ ixgbevf_dev_close(struct rte_eth_dev *dev) ixgbevf_remove_mac_addr(dev, 0); } +/* + * Reset VF device + */ +static int +ixgbevf_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + ret = eth_ixgbevf_dev_uninit(dev); + if (ret) + return ret; + + ret = eth_ixgbevf_dev_init(dev); + + return ret; +} + static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -4977,7 +5162,7 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); } -static void +static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct ixgbe_hw *hw = @@ -4992,6 +5177,8 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) for (i = 0; i < hw->mac.max_rx_queues; i++) ixgbevf_vlan_strip_queue_set(dev, i, on); } + + return 0; } int @@ -5274,13 +5461,13 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); /* write pool mirrror control register */ - if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) { + if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), mp_msb); } /* write VLAN mirrror control register */ - if (mirror_conf->rule_type == ETH_MIRROR_VLAN) { + if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), mv_msb); @@ -5305,6 +5492,9 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) if (ixgbe_vt_check(hw) < 0) return -ENOTSUP; + if (rule_id >= IXGBE_MAX_MIRROR_RULES) + return -EINVAL; + memset(&mr_info->mr_conf[rule_id], 0, sizeof(struct rte_eth_mirror_conf)); @@ -5330,9 +5520,12 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) uint32_t mask; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t vec = IXGBE_MISC_VEC_ID; mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); - mask |= (1 << IXGBE_MISC_VEC_ID); + if (rte_intr_allow_others(intr_handle)) + vec = IXGBE_RX_VEC_START; + mask |= (1 << vec); RTE_SET_USED(queue_id); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); @@ -5347,9 +5540,14 @@ ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) uint32_t mask; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t vec = IXGBE_MISC_VEC_ID; mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); - mask &= ~(1 << IXGBE_MISC_VEC_ID); + if (rte_intr_allow_others(intr_handle)) + vec = IXGBE_RX_VEC_START; + mask &= ~(1 << vec); RTE_SET_USED(queue_id); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); @@ -5462,7 +5660,8 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, tmp |= (msix_vector << (8 * (queue & 0x3))); IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); } else if ((hw->mac.type == ixgbe_mac_82599EB) || - (hw->mac.type == ixgbe_mac_X540)) { + (hw->mac.type == ixgbe_mac_X540) || + (hw->mac.type == ixgbe_mac_X550)) { if (direction == -1) { /* other causes */ idx = ((queue & 1) * 8); @@ -5490,6 +5689,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t q_idx; uint32_t vector_idx = IXGBE_MISC_VEC_ID; + uint32_t base = IXGBE_MISC_VEC_ID; /* Configure VF other cause ivar */ ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); @@ -5500,6 +5700,11 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) if (!rte_intr_dp_is_en(intr_handle)) return; + if (rte_intr_allow_others(intr_handle)) { + base = IXGBE_RX_VEC_START; + vector_idx = IXGBE_RX_VEC_START; + } + /* Configure all RX queues of VF */ for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { /* Force all queue use vector 0, @@ -5507,6 +5712,8 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) */ ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); intr_handle->intr_vec[q_idx] = vector_idx; + if (vector_idx < base + intr_handle->nb_efd - 1) + vector_idx++; } } @@ -5570,6 +5777,7 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: + case ixgbe_mac_X550: ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); break; default: @@ -5587,8 +5795,9 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); } -static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, - uint16_t queue_idx, uint16_t tx_rate) +int +ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t tx_rate) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t rf_dec, rf_int; @@ -6131,7 +6340,7 @@ ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, sizeof(struct ixgbe_5tuple_filter), 0); if (filter == NULL) return -ENOMEM; - (void)rte_memcpy(&filter->filter_info, + rte_memcpy(&filter->filter_info, &filter_5tuple, sizeof(struct ixgbe_5tuple_filter_info)); filter->queue = ntuple_filter->queue; @@ -6540,9 +6749,8 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev) uint32_t shift = 0; /* Get current link speed. */ - memset(&link, 0, sizeof(link)); ixgbe_dev_link_update(dev, 1); - rte_ixgbe_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); switch (link.link_speed) { case ETH_SPEED_NUM_100M: @@ -6971,6 +7179,8 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, struct ixgbe_dcb_config *dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); struct ixgbe_dcb_tc_config *tc; + struct rte_eth_dcb_tc_queue_mapping *tc_queue; + uint8_t nb_tcs; uint8_t i, j; if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) @@ -6978,19 +7188,31 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, else dcb_info->nb_tcs = 1; + tc_queue = &dcb_info->tc_queue; + nb_tcs = dcb_info->nb_tcs; + if (dcb_config->vt_mode) { /* vt is enabled*/ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; - for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { - for (j = 0; j < dcb_info->nb_tcs; j++) { - dcb_info->tc_queue.tc_rxq[i][j].base = - i * dcb_info->nb_tcs + j; - dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1; - dcb_info->tc_queue.tc_txq[i][j].base = - i * dcb_info->nb_tcs + j; - dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1; + if (RTE_ETH_DEV_SRIOV(dev).active > 0) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[0][j].base = j; + tc_queue->tc_rxq[0][j].nb_queue = 1; + tc_queue->tc_txq[0][j].base = j; + tc_queue->tc_txq[0][j].nb_queue = 1; + } + } else { + for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_rxq[i][j].nb_queue = 1; + tc_queue->tc_txq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_txq[i][j].nb_queue = 1; + } } } } else { /* vt is disabled*/ @@ -7347,7 +7569,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, if (!node) return -ENOMEM; - (void)rte_memcpy(&node->key, + rte_memcpy(&node->key, &key, sizeof(struct ixgbe_l2_tn_key)); node->pool = l2_tunnel->pool; @@ -7863,12 +8085,17 @@ static void ixgbevf_mbx_process(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); u32 in_msg = 0; - if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) - return; + /* peek the message first */ + in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM); /* PF reset VF event */ - if (in_msg == IXGBE_PF_CONTROL_MSG) - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL); + if (in_msg == IXGBE_PF_CONTROL_MSG) { + /* dummy mbx read to ack pf */ + if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) + return; + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, + NULL); + } } static int @@ -8036,6 +8263,18 @@ ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) } } +/* restore rss filter */ +static inline void +ixgbe_rss_filter_restore(struct rte_eth_dev *dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + if (filter_info->rss_info.num) + ixgbe_config_rss_filter(dev, + &filter_info->rss_info, TRUE); +} + static int ixgbe_filter_restore(struct rte_eth_dev *dev) { @@ -8044,6 +8283,7 @@ ixgbe_filter_restore(struct rte_eth_dev *dev) ixgbe_syn_filter_restore(dev); ixgbe_fdir_filter_restore(dev); ixgbe_l2_tn_filter_restore(dev); + ixgbe_rss_filter_restore(dev); return 0; } @@ -8141,3 +8381,15 @@ RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); + +RTE_INIT(ixgbe_init_log); +static void +ixgbe_init_log(void) +{ + ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init"); + if (ixgbe_logtype_init >= 0) + rte_log_set_level(ixgbe_logtype_init, RTE_LOG_NOTICE); + ixgbe_logtype_driver = rte_log_register("pmd.net.ixgbe.driver"); + if (ixgbe_logtype_driver >= 0) + rte_log_set_level(ixgbe_logtype_driver, RTE_LOG_NOTICE); +}