X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_ethdev.c;h=dbce7a80e96b5694dbeea66b88e2ec7150e56f5d;hb=a5d7c23ec5ca756a091867044fd10a4a9e46b823;hp=910b7dbbd244b9243b38669ae82c9a0492fcbba0;hpb=72dec9e37a846204f14b0fa079c4051881edb3c1;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 910b7dbbd2..dbce7a80e9 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include @@ -40,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -48,18 +20,22 @@ #include #include #include -#include +#include #include #include -#include +#include #include #include #include -#include -#include +#include +#include #include #include #include +#include +#ifdef RTE_LIBRTE_SECURITY +#include +#endif #include "ixgbe_logs.h" #include "base/ixgbe_api.h" @@ -85,12 +61,12 @@ */ #define IXGBE_FC_LO 0x40 -/* Default minimum inter-interrupt interval for EITR configuration */ -#define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT 0x79E - /* Timer value included in XOFF frames. */ #define IXGBE_FC_PAUSE 0x680 +/*Default value of Max Rx Queue*/ +#define IXGBE_MAX_RX_QUEUE_NUM 128 + #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ @@ -123,8 +99,6 @@ #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) -#define IXGBE_HKEY_MAX_INDEX 10 - /* Additional timesync values. */ #define NSEC_PER_SEC 1000000000L #define IXGBE_INCVAL_10GB 0x66666666 @@ -140,7 +114,6 @@ #define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 #define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 -#define DEFAULT_ETAG_ETYPE 0x893f #define IXGBE_ETAG_ETYPE 0x00005084 #define IXGBE_ETAG_ETYPE_MASK 0x0000ffff #define IXGBE_ETAG_ETYPE_VALID 0x80000000 @@ -150,43 +123,70 @@ #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ #define IXGBE_QDE_STRIP_TAG 0x00000004 +#define IXGBE_VTEICR_MASK 0x07 -enum ixgbevf_xcast_modes { - IXGBEVF_XCAST_MODE_NONE = 0, - IXGBEVF_XCAST_MODE_MULTI, - IXGBEVF_XCAST_MODE_ALLMULTI, +#define IXGBE_EXVET_VET_EXT_SHIFT 16 +#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 + +#define IXGBEVF_DEVARG_PFLINK_FULLCHK "pflink_fullchk" + +static const char * const ixgbevf_valid_arguments[] = { + IXGBEVF_DEVARG_PFLINK_FULLCHK, + NULL }; -static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev); +static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params); static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); +static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); +static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); +static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); +static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); +static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev); static int ixgbe_dev_configure(struct rte_eth_dev *dev); static int ixgbe_dev_start(struct rte_eth_dev *dev); static void ixgbe_dev_stop(struct rte_eth_dev *dev); static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); static void ixgbe_dev_close(struct rte_eth_dev *dev); -static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); -static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); -static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); -static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int ixgbe_dev_reset(struct rte_eth_dev *dev); +static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); +static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); +static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); +static int ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); static int ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); -static void ixgbe_dev_stats_get(struct rte_eth_dev *dev, +static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n); + struct rte_eth_xstat *xstats, unsigned n); static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n); -static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev); -static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); + struct rte_eth_xstat *xstats, unsigned n); +static int +ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int n); +static int ixgbe_dev_stats_reset(struct rte_eth_dev *dev); +static int ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); +static int ixgbe_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int size); +static int ixgbevf_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned limit); +static int ixgbe_dev_xstats_get_names_by_id( + struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, + unsigned int limit); static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx); -static void ixgbe_dev_info_get(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info); -static void ixgbevf_dev_info_get(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info); +static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, + size_t fw_size); +static int ixgbe_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); +static int ixgbevf_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, @@ -198,7 +198,10 @@ static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on); static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); -static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, + int mask); +static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask); +static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); @@ -219,37 +222,46 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); -static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev); +static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on); +static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); -static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, - void *param); +static void ixgbe_dev_interrupt_handler(void *param); static void ixgbe_dev_interrupt_delayed_handler(void *param); -static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - uint32_t index, uint32_t pool); +static void ixgbe_dev_setup_link_alarm_handler(void *param); + +static int ixgbe_add_rar(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool); static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); -static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr); -static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config); +static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr); +static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); +static bool is_device_supported(struct rte_eth_dev *dev, + struct rte_pci_driver *drv); /* For Virtual Function support */ static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); static int ixgbevf_dev_configure(struct rte_eth_dev *dev); static int ixgbevf_dev_start(struct rte_eth_dev *dev); +static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete); static void ixgbevf_dev_stop(struct rte_eth_dev *dev); static void ixgbevf_dev_close(struct rte_eth_dev *dev); -static void ixgbevf_intr_disable(struct ixgbe_hw *hw); -static void ixgbevf_intr_enable(struct ixgbe_hw *hw); -static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, +static int ixgbevf_dev_reset(struct rte_eth_dev *dev); +static void ixgbevf_intr_disable(struct rte_eth_dev *dev); +static void ixgbevf_intr_enable(struct rte_eth_dev *dev); +static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); -static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); +static int ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); -static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask); +static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); @@ -258,19 +270,15 @@ static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); static void ixgbevf_configure_msix(struct rte_eth_dev *dev); -static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); -static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev); +static int ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev); +static int ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); +static int ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); /* For Eth VMDQ APIs support */ static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct - ether_addr* mac_addr,uint8_t on); -static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on); -static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, - uint16_t rx_mask, uint8_t on); -static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); -static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); -static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, - uint64_t pool_mask,uint8_t vlan_on); + rte_ether_addr * mac_addr, uint8_t on); +static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on); @@ -284,20 +292,12 @@ static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); static void ixgbe_configure_msix(struct rte_eth_dev *dev); -static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, - uint16_t queue_idx, uint16_t tx_rate); -static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, - uint16_t tx_rate, uint64_t q_msk); - -static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr, - uint32_t index, uint32_t pool); +static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, uint32_t pool); static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); -static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *mac_addr); -static int ixgbe_syn_filter_set(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter, - bool add); +static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr); static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, struct rte_eth_syn_filter *filter); static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev, @@ -307,17 +307,11 @@ static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, struct ixgbe_5tuple_filter *filter); static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, struct ixgbe_5tuple_filter *filter); -static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *filter, - bool add); static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *filter); -static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter, - bool add); static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); @@ -330,7 +324,7 @@ static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr); static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info); @@ -344,6 +338,11 @@ static int ixgbe_get_eeprom(struct rte_eth_dev *dev, static int ixgbe_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom); +static int ixgbe_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo); +static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info); + static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); static int ixgbevf_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs); @@ -360,6 +359,7 @@ static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *timestamp); static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *timestamp); +static void ixgbevf_dev_interrupt_handler(void *param); static int ixgbe_dev_l2_tunnel_eth_type_conf (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel); @@ -376,6 +376,8 @@ static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel); static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel); +static int ixgbe_filter_restore(struct rte_eth_dev *dev); +static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev); /* * Define VF Stats MACRO for Non "cleared on read" register @@ -396,45 +398,110 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, last = latest; \ } -#define IXGBE_SET_HWSTRIP(h, q) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_SET_HWSTRIP(h, q) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (h)->bitmap[idx] |= 1 << bit;\ } while (0) -#define IXGBE_CLEAR_HWSTRIP(h, q) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_CLEAR_HWSTRIP(h, q) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (h)->bitmap[idx] &= ~(1 << bit);\ } while (0) -#define IXGBE_GET_HWSTRIP(h, q, r) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_GET_HWSTRIP(h, q, r) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (r) = (h)->bitmap[idx] >> bit & 1;\ } while (0) +int ixgbe_logtype_init; +int ixgbe_logtype_driver; + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX +int ixgbe_logtype_rx; +#endif +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX +int ixgbe_logtype_tx; +#endif +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE +int ixgbe_logtype_tx_free; +#endif + /* * The set of PCI devices this driver supports */ static const struct rte_pci_id pci_id_ixgbe_map[] = { - -#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" - -{ .vendor_id = 0, /* sentinel */ }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_XFI) }, +#ifdef RTE_LIBRTE_IXGBE_BYPASS + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, +#endif + { .vendor_id = 0, /* sentinel */ }, }; - /* * The set of PCI devices this driver supports (for 82599 VF) */ static const struct rte_pci_id pci_id_ixgbevf_map[] = { - -#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" -{ .vendor_id = 0, /* sentinel */ }, - + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) }, + { .vendor_id = 0, /* sentinel */ }, }; static const struct rte_eth_desc_lim rx_desc_lim = { @@ -447,6 +514,8 @@ static const struct rte_eth_desc_lim tx_desc_lim = { .nb_max = IXGBE_MAX_RING_DESC, .nb_min = IXGBE_MIN_RING_DESC, .nb_align = IXGBE_TXD_ALIGN, + .nb_seg_max = IXGBE_TX_MAX_SEG, + .nb_mtu_seg_max = IXGBE_TX_MAX_SEG, }; static const struct eth_dev_ops ixgbe_eth_dev_ops = { @@ -456,6 +525,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .dev_set_link_up = ixgbe_dev_set_link_up, .dev_set_link_down = ixgbe_dev_set_link_down, .dev_close = ixgbe_dev_close, + .dev_reset = ixgbe_dev_reset, .promiscuous_enable = ixgbe_dev_promiscuous_enable, .promiscuous_disable = ixgbe_dev_promiscuous_disable, .allmulticast_enable = ixgbe_dev_allmulticast_enable, @@ -463,10 +533,15 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .link_update = ixgbe_dev_link_update, .stats_get = ixgbe_dev_stats_get, .xstats_get = ixgbe_dev_xstats_get, + .xstats_get_by_id = ixgbe_dev_xstats_get_by_id, .stats_reset = ixgbe_dev_stats_reset, .xstats_reset = ixgbe_dev_xstats_reset, + .xstats_get_names = ixgbe_dev_xstats_get_names, + .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id, .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, + .fw_version_get = ixgbe_fw_version_get, .dev_infos_get = ixgbe_dev_info_get, + .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, .mtu_set = ixgbe_dev_mtu_set, .vlan_filter_set = ixgbe_vlan_filter_set, .vlan_tpid_set = ixgbe_vlan_tpid_set, @@ -482,6 +557,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .rx_queue_release = ixgbe_dev_rx_queue_release, .rx_queue_count = ixgbe_dev_rx_queue_count, .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, + .rx_descriptor_status = ixgbe_dev_rx_descriptor_status, + .tx_descriptor_status = ixgbe_dev_tx_descriptor_status, .tx_queue_setup = ixgbe_dev_tx_queue_setup, .tx_queue_release = ixgbe_dev_tx_queue_release, .dev_led_on = ixgbe_dev_led_on, @@ -496,25 +573,9 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, .mirror_rule_set = ixgbe_mirror_rule_set, .mirror_rule_reset = ixgbe_mirror_rule_reset, - .set_vf_rx_mode = ixgbe_set_pool_rx_mode, - .set_vf_rx = ixgbe_set_pool_rx, - .set_vf_tx = ixgbe_set_pool_tx, - .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter, .set_queue_rate_limit = ixgbe_set_queue_rate_limit, - .set_vf_rate_limit = ixgbe_set_vf_rate_limit, .reta_update = ixgbe_dev_rss_reta_update, .reta_query = ixgbe_dev_rss_reta_query, -#ifdef RTE_NIC_BYPASS - .bypass_init = ixgbe_bypass_init, - .bypass_state_set = ixgbe_bypass_state_store, - .bypass_state_show = ixgbe_bypass_state_show, - .bypass_event_set = ixgbe_bypass_event_store, - .bypass_event_show = ixgbe_bypass_event_show, - .bypass_wd_timeout_set = ixgbe_bypass_wd_timeout_store, - .bypass_wd_timeout_show = ixgbe_bypass_wd_timeout_show, - .bypass_ver_show = ixgbe_bypass_ver_show, - .bypass_wd_reset = ixgbe_bypass_wd_reset, -#endif /* RTE_NIC_BYPASS */ .rss_hash_update = ixgbe_dev_rss_hash_update, .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, .filter_ctrl = ixgbe_dev_filter_ctrl, @@ -525,11 +586,12 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .timesync_disable = ixgbe_timesync_disable, .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, - .get_reg_length = ixgbe_get_reg_length, .get_reg = ixgbe_get_regs, .get_eeprom_length = ixgbe_get_eeprom_length, .get_eeprom = ixgbe_get_eeprom, .set_eeprom = ixgbe_set_eeprom, + .get_module_info = ixgbe_get_module_info, + .get_module_eeprom = ixgbe_get_module_eeprom, .get_dcb_info = ixgbe_dev_get_dcb_info, .timesync_adjust_time = ixgbe_timesync_adjust_time, .timesync_read_time = ixgbe_timesync_read_time, @@ -538,6 +600,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .l2_tunnel_offload_set = ixgbe_dev_l2_tunnel_offload_set, .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, + .tm_ops_get = ixgbe_tm_ops_get, }; /* @@ -548,15 +611,20 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .dev_configure = ixgbevf_dev_configure, .dev_start = ixgbevf_dev_start, .dev_stop = ixgbevf_dev_stop, - .link_update = ixgbe_dev_link_update, + .link_update = ixgbevf_dev_link_update, .stats_get = ixgbevf_dev_stats_get, .xstats_get = ixgbevf_dev_xstats_get, .stats_reset = ixgbevf_dev_stats_reset, .xstats_reset = ixgbevf_dev_stats_reset, + .xstats_get_names = ixgbevf_dev_xstats_get_names, .dev_close = ixgbevf_dev_close, + .dev_reset = ixgbevf_dev_reset, + .promiscuous_enable = ixgbevf_dev_promiscuous_enable, + .promiscuous_disable = ixgbevf_dev_promiscuous_disable, .allmulticast_enable = ixgbevf_dev_allmulticast_enable, .allmulticast_disable = ixgbevf_dev_allmulticast_disable, .dev_infos_get = ixgbevf_dev_info_get, + .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, .mtu_set = ixgbevf_dev_set_mtu, .vlan_filter_set = ixgbevf_vlan_filter_set, .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, @@ -564,6 +632,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .rx_queue_setup = ixgbe_dev_rx_queue_setup, .rx_queue_release = ixgbe_dev_rx_queue_release, .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, + .rx_descriptor_status = ixgbe_dev_rx_descriptor_status, + .tx_descriptor_status = ixgbe_dev_tx_descriptor_status, .tx_queue_setup = ixgbe_dev_tx_queue_setup, .tx_queue_release = ixgbe_dev_tx_queue_release, .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, @@ -574,7 +644,6 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .rxq_info_get = ixgbe_rxq_info_get, .txq_info_get = ixgbe_txq_info_get, .mac_addr_set = ixgbevf_set_default_mac_addr, - .get_reg_length = ixgbevf_get_reg_length, .get_reg = ixgbevf_get_regs, .reta_update = ixgbe_dev_rss_reta_update, .reta_query = ixgbe_dev_rss_reta_query, @@ -672,6 +741,51 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ sizeof(rte_ixgbe_stats_strings[0])) +/* MACsec statistics */ +static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = { + {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats, + out_pkts_untagged)}, + {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats, + out_pkts_encrypted)}, + {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats, + out_pkts_protected)}, + {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats, + out_octets_encrypted)}, + {"out_octets_protected", offsetof(struct ixgbe_macsec_stats, + out_octets_protected)}, + {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats, + in_pkts_untagged)}, + {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats, + in_pkts_badtag)}, + {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats, + in_pkts_nosci)}, + {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats, + in_pkts_unknownsci)}, + {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats, + in_octets_decrypted)}, + {"in_octets_validated", offsetof(struct ixgbe_macsec_stats, + in_octets_validated)}, + {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats, + in_pkts_unchecked)}, + {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats, + in_pkts_delayed)}, + {"in_pkts_late", offsetof(struct ixgbe_macsec_stats, + in_pkts_late)}, + {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats, + in_pkts_ok)}, + {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats, + in_pkts_invalid)}, + {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats, + in_pkts_notvalid)}, + {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats, + in_pkts_unusedsa)}, + {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats, + in_pkts_notusingsa)}, +}; + +#define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \ + sizeof(rte_ixgbe_macsec_strings[0])) + /* Per-queue statistics */ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, @@ -682,6 +796,7 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ sizeof(rte_ixgbe_rxq_strings[0])) +#define IXGBE_NB_RXQ_PRIO_VALUES 8 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, @@ -692,6 +807,7 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ sizeof(rte_ixgbe_txq_strings[0])) +#define IXGBE_NB_TXQ_PRIO_VALUES 8 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, @@ -700,58 +816,6 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ sizeof(rte_ixgbevf_stats_strings[0])) -/** - * Atomically reads the link status information from global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - /* * This function is the same as ixgbe_is_sfp() in base/ixgbe.h. */ @@ -785,6 +849,8 @@ ixgbe_pf_reset_hw(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); IXGBE_WRITE_FLUSH(hw); + if (status == IXGBE_ERR_SFP_NOT_PRESENT) + status = IXGBE_SUCCESS; return status; } @@ -898,8 +964,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", stat_mappings->rqsmr[n], n); IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); - } - else { + } else { PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", stat_mappings->tqsm[n], n); IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); @@ -908,7 +973,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, } static void -ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev) +ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) { struct ixgbe_stat_mapping_registers *stat_mappings = IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); @@ -926,7 +991,7 @@ ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev) } static void -ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) +ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { uint8_t i; struct ixgbe_dcb_tc_config *tc; @@ -949,7 +1014,7 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) tc = &dcb_config->tc_config[0]; tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; - for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) { + for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; } @@ -1008,12 +1073,13 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) * It returns 0 on success. */ static int -eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) +eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused) { - struct rte_pci_device *pci_dev; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); @@ -1021,6 +1087,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private); struct ixgbe_filter_info *filter_info = IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); + struct ixgbe_bw_conf *bw_conf = + IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private); uint32_t ctrl_ext; uint16_t csum; int diag, i; @@ -1030,30 +1098,31 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) eth_dev->dev_ops = &ixgbe_eth_dev_ops; eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; + eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts; /* * For secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different * RX and TX function. */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { struct ixgbe_tx_queue *txq; /* TX queue function in primary, set by last queue initialized - * Tx queue may not initialized by primary process */ + * Tx queue may not initialized by primary process + */ if (eth_dev->data->tx_queues) { txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; ixgbe_set_tx_function(eth_dev, txq); } else { /* Use default TX function if we get here */ PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " - "Using default TX function."); + "Using default TX function."); } ixgbe_set_rx_function(eth_dev); return 0; } - pci_dev = eth_dev->pci_dev; rte_eth_copy_pci_info(eth_dev, pci_dev); @@ -1064,26 +1133,40 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) hw->allow_unsupported_sfp = 1; /* Initialize the shared code (base driver) */ -#ifdef RTE_NIC_BYPASS +#ifdef RTE_LIBRTE_IXGBE_BYPASS diag = ixgbe_bypass_init_shared_code(hw); #else diag = ixgbe_init_shared_code(hw); -#endif /* RTE_NIC_BYPASS */ +#endif /* RTE_LIBRTE_IXGBE_BYPASS */ if (diag != IXGBE_SUCCESS) { PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); return -EIO; } + if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { + PMD_INIT_LOG(ERR, "\nERROR: " + "Firmware recovery mode detected. Limiting functionality.\n" + "Refer to the Intel(R) Ethernet Adapters and Devices " + "User Guide for details on firmware recovery mode."); + return -EIO; + } + /* pick up the PCI bus settings for reporting later */ ixgbe_get_bus_info(hw); /* Unlock any pending hardware semaphore */ ixgbe_swfw_lock_reset(hw); +#ifdef RTE_LIBRTE_SECURITY + /* Initialize security_ctx only for primary process*/ + if (ixgbe_ipsec_ctx_create(eth_dev)) + return -ENOMEM; +#endif + /* Initialize DCB configuration*/ memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); - ixgbe_dcb_init(hw,dcb_config); + ixgbe_dcb_init(hw, dcb_config); /* Get Hardware Flow Control setting */ hw->fc.requested_mode = ixgbe_fc_full; hw->fc.current_mode = ixgbe_fc_full; @@ -1101,11 +1184,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) return -EIO; } -#ifdef RTE_NIC_BYPASS +#ifdef RTE_LIBRTE_IXGBE_BYPASS diag = ixgbe_bypass_init_hw(hw); #else diag = ixgbe_init_hw(hw); -#endif /* RTE_NIC_BYPASS */ +#endif /* RTE_LIBRTE_IXGBE_BYPASS */ /* * Devices with copper phys will fail to initialise if ixgbe_init_hw() @@ -1122,13 +1205,16 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) diag = ixgbe_init_hw(hw); } + if (diag == IXGBE_ERR_SFP_NOT_PRESENT) + diag = IXGBE_SUCCESS; + if (diag == IXGBE_ERR_EEPROM_VERSION) { PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" - "LOM. Please be aware there may be issues associated " - "with your hardware."); + "LOM. Please be aware there may be issues associated " + "with your hardware."); PMD_INIT_LOG(ERR, "If you are experiencing problems " - "please contact your Intel or hardware representative " - "who provided you with this hardware."); + "please contact your Intel or hardware representative " + "who provided you with this hardware."); } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); if (diag) { @@ -1146,29 +1232,34 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) ixgbe_reset_qstat_mappings(hw); /* Allocate memory for storing MAC addresses */ - eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * - hw->mac.num_rar_entries, 0); + eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", RTE_ETHER_ADDR_LEN * + hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, - "Failed to allocate %u bytes needed to store " - "MAC addresses", - ETHER_ADDR_LEN * hw->mac.num_rar_entries); + "Failed to allocate %u bytes needed to store " + "MAC addresses", + RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } /* Copy the permanent MAC address */ - ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, + rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.perm_addr, ð_dev->data->mac_addrs[0]); /* Allocate memory for storing hash filter MAC addresses */ - eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * - IXGBE_VMDQ_NUM_UC_MAC, 0); + eth_dev->data->hash_mac_addrs = rte_zmalloc( + "ixgbe", RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC, 0); if (eth_dev->data->hash_mac_addrs == NULL) { PMD_INIT_LOG(ERR, - "Failed to allocate %d bytes needed to store MAC addresses", - ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); + "Failed to allocate %d bytes needed to store MAC addresses", + RTE_ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); return -ENOMEM; } + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + /* initialize the vfta */ memset(shadow_vfta, 0, sizeof(*shadow_vfta)); @@ -1195,23 +1286,39 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) (int) hw->mac.type, (int) hw->phy.type); PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", - eth_dev->data->port_id, pci_dev->id.vendor_id, - pci_dev->id.device_id); + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); - rte_intr_callback_register(&pci_dev->intr_handle, - ixgbe_dev_interrupt_handler, - (void *)eth_dev); + rte_intr_callback_register(intr_handle, + ixgbe_dev_interrupt_handler, eth_dev); /* enable uio/vfio intr/eventfd mapping */ - rte_intr_enable(&pci_dev->intr_handle); + rte_intr_enable(intr_handle); /* enable support intr */ ixgbe_enable_intr(eth_dev); + /* initialize filter info */ + memset(filter_info, 0, + sizeof(struct ixgbe_filter_info)); + /* initialize 5tuple filter list */ TAILQ_INIT(&filter_info->fivetuple_list); - memset(filter_info->fivetuple_mask, 0, - sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); + + /* initialize flow director filter list & hash */ + ixgbe_fdir_filter_init(eth_dev); + + /* initialize l2 tunnel filter list & hash */ + ixgbe_l2_tn_filter_init(eth_dev); + + /* initialize flow filter lists */ + ixgbe_filterlist_init(); + + /* initialize bandwidth configuration info */ + memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf)); + + /* initialize Traffic Manager configuration */ + ixgbe_tm_conf_init(eth_dev); return 0; } @@ -1219,44 +1326,149 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev; - struct ixgbe_hw *hw; - PMD_INIT_FUNC_TRACE(); if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; + return 0; - hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - pci_dev = eth_dev->pci_dev; + ixgbe_dev_close(eth_dev); - if (hw->adapter_stopped == 0) - ixgbe_dev_close(eth_dev); + return 0; +} - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; +static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); + struct ixgbe_5tuple_filter *p_5tuple; - /* Unlock any pending hardware semaphore */ - ixgbe_swfw_lock_reset(hw); + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { + TAILQ_REMOVE(&filter_info->fivetuple_list, + p_5tuple, + entries); + rte_free(p_5tuple); + } + memset(filter_info->fivetuple_mask, 0, + sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); - /* disable uio intr before callback unregister */ - rte_intr_disable(&(pci_dev->intr_handle)); - rte_intr_callback_unregister(&(pci_dev->intr_handle), - ixgbe_dev_interrupt_handler, (void *)eth_dev); + return 0; +} - /* uninitialize PF if max_vfs not zero */ - ixgbe_pf_host_uninit(eth_dev); +static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_hw_fdir_info *fdir_info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); + struct ixgbe_fdir_filter *fdir_filter; + + if (fdir_info->hash_map) + rte_free(fdir_info->hash_map); + if (fdir_info->hash_handle) + rte_hash_free(fdir_info->hash_handle); + + while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { + TAILQ_REMOVE(&fdir_info->fdir_list, + fdir_filter, + entries); + rte_free(fdir_filter); + } + + return 0; +} + +static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); + struct ixgbe_l2_tn_filter *l2_tn_filter; + + if (l2_tn_info->hash_map) + rte_free(l2_tn_info->hash_map); + if (l2_tn_info->hash_handle) + rte_hash_free(l2_tn_info->hash_handle); + + while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { + TAILQ_REMOVE(&l2_tn_info->l2_tn_list, + l2_tn_filter, + entries); + rte_free(l2_tn_filter); + } - rte_free(eth_dev->data->mac_addrs); - eth_dev->data->mac_addrs = NULL; + return 0; +} + +static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_hw_fdir_info *fdir_info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); + char fdir_hash_name[RTE_HASH_NAMESIZE]; + struct rte_hash_parameters fdir_hash_params = { + .name = fdir_hash_name, + .entries = IXGBE_MAX_FDIR_FILTER_NUM, + .key_len = sizeof(union ixgbe_atr_input), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; - rte_free(eth_dev->data->hash_mac_addrs); - eth_dev->data->hash_mac_addrs = NULL; + TAILQ_INIT(&fdir_info->fdir_list); + snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, + "fdir_%s", eth_dev->device->name); + fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); + if (!fdir_info->hash_handle) { + PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); + return -EINVAL; + } + fdir_info->hash_map = rte_zmalloc("ixgbe", + sizeof(struct ixgbe_fdir_filter *) * + IXGBE_MAX_FDIR_FILTER_NUM, + 0); + if (!fdir_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir hash map!"); + return -ENOMEM; + } + fdir_info->mask_added = FALSE; return 0; } +static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); + char l2_tn_hash_name[RTE_HASH_NAMESIZE]; + struct rte_hash_parameters l2_tn_hash_params = { + .name = l2_tn_hash_name, + .entries = IXGBE_MAX_L2_TN_FILTER_NUM, + .key_len = sizeof(struct ixgbe_l2_tn_key), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; + + TAILQ_INIT(&l2_tn_info->l2_tn_list); + snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, + "l2_tn_%s", eth_dev->device->name); + l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); + if (!l2_tn_info->hash_handle) { + PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); + return -EINVAL; + } + l2_tn_info->hash_map = rte_zmalloc("ixgbe", + sizeof(struct ixgbe_l2_tn_filter *) * + IXGBE_MAX_L2_TN_FILTER_NUM, + 0); + if (!l2_tn_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for L2 TN hash map!"); + return -ENOMEM; + } + l2_tn_info->e_tag_en = FALSE; + l2_tn_info->e_tag_fwd_en = FALSE; + l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG; + + return 0; +} /* * Negotiate mailbox API version with the PF. * After reset API version is always set to the basic one (ixgbe_mbox_api_10). @@ -1271,6 +1483,7 @@ ixgbevf_negotiate_api(struct ixgbe_hw *hw) /* start with highest supported, proceed down */ static const enum ixgbe_pfvf_api_rev sup_ver[] = { + ixgbe_mbox_api_13, ixgbe_mbox_api_12, ixgbe_mbox_api_11, ixgbe_mbox_api_10, @@ -1284,7 +1497,7 @@ ixgbevf_negotiate_api(struct ixgbe_hw *hw) } static void -generate_random_mac_addr(struct ether_addr *mac_addr) +generate_random_mac_addr(struct rte_ether_addr *mac_addr) { uint64_t random; @@ -1293,12 +1506,51 @@ generate_random_mac_addr(struct ether_addr *mac_addr) mac_addr->addr_bytes[1] = 0x09; mac_addr->addr_bytes[2] = 0xC0; /* Force indication of locally assigned MAC address. */ - mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR; + mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; /* Generate the last 3 bytes of the MAC address with a random number. */ random = rte_rand(); memcpy(&mac_addr->addr_bytes[3], &random, 3); } +static int +devarg_handle_int(__rte_unused const char *key, const char *value, + void *extra_args) +{ + uint16_t *n = extra_args; + + if (value == NULL || extra_args == NULL) + return -EINVAL; + + *n = (uint16_t)strtoul(value, NULL, 0); + if (*n == USHRT_MAX && errno == ERANGE) + return -1; + + return 0; +} + +static void +ixgbevf_parse_devargs(struct ixgbe_adapter *adapter, + struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + uint16_t pflink_fullchk; + + if (devargs == NULL) + return; + + kvlist = rte_kvargs_parse(devargs->args, ixgbevf_valid_arguments); + if (kvlist == NULL) + return; + + if (rte_kvargs_count(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK) == 1 && + rte_kvargs_process(kvlist, IXGBEVF_DEVARG_PFLINK_FULLCHK, + devarg_handle_int, &pflink_fullchk) == 0 && + pflink_fullchk == 1) + adapter->pflink_fullchk = 1; + + rte_kvargs_free(kvlist); +} + /* * Virtual Function device init */ @@ -1307,14 +1559,16 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) { int diag; uint32_t tc, tcs; - struct rte_pci_device *pci_dev; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); - struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; + struct rte_ether_addr *perm_addr = + (struct rte_ether_addr *)hw->mac.perm_addr; PMD_INIT_FUNC_TRACE(); @@ -1324,14 +1578,29 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) /* for secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different - * RX function */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY){ - if (eth_dev->data->scattered_rx) - eth_dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc; + * RX function + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + struct ixgbe_tx_queue *txq; + /* TX queue function in primary, set by last queue initialized + * Tx queue may not initialized by primary process + */ + if (eth_dev->data->tx_queues) { + txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1]; + ixgbe_set_tx_function(eth_dev, txq); + } else { + /* Use default TX function if we get here */ + PMD_INIT_LOG(NOTICE, + "No TX queues configured yet. Using default TX function."); + } + + ixgbe_set_rx_function(eth_dev); + return 0; } - pci_dev = eth_dev->pci_dev; + ixgbevf_parse_devargs(eth_dev->data->dev_private, + pci_dev->device.devargs); rte_eth_copy_pci_info(eth_dev, pci_dev); @@ -1359,7 +1628,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) ixgbevf_dev_stats_reset(eth_dev); /* Disable the interrupts for VF */ - ixgbevf_intr_disable(hw); + ixgbevf_intr_disable(eth_dev); hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */ diag = hw->mac.ops.reset_hw(hw); @@ -1371,7 +1640,12 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) */ if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); - return diag; + /* + * This error code will be propagated to the app by + * rte_eth_dev_reset, so use a public error code rather than + * the internal-only IXGBE_ERR_RESET_FAILED + */ + return -EAGAIN; } /* negotiate mailbox API version to use with the PF. */ @@ -1381,18 +1655,23 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) ixgbevf_get_queues(hw, &tcs, &tc); /* Allocate memory for storing MAC addresses */ - eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * - hw->mac.num_rar_entries, 0); + eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", RTE_ETHER_ADDR_LEN * + hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, - "Failed to allocate %u bytes needed to store " - "MAC addresses", - ETHER_ADDR_LEN * hw->mac.num_rar_entries); + "Failed to allocate %u bytes needed to store " + "MAC addresses", + RTE_ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } + /* Pass the information to the rte_eth_dev_close() that it should also + * release the private port resources. + */ + eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; + /* Generate a random MAC address, if none was assigned by PF. */ - if (is_zero_ether_addr(perm_addr)) { + if (rte_is_zero_ether_addr(perm_addr)) { generate_random_mac_addr(perm_addr); diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1); if (diag) { @@ -1412,19 +1691,24 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) } /* Copy the permanent MAC address */ - ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); + rte_ether_addr_copy(perm_addr, ð_dev->data->mac_addrs[0]); /* reset the hardware with the new settings */ diag = hw->mac.ops.start_hw(hw); switch (diag) { - case 0: - break; + case 0: + break; - default: - PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); - return -EIO; + default: + PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); + return -EIO; } + rte_intr_callback_register(intr_handle, + ixgbevf_dev_interrupt_handler, eth_dev); + rte_intr_enable(intr_handle); + ixgbevf_intr_enable(eth_dev); + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id, "ixgbe_mac_82599_vf"); @@ -1437,104 +1721,128 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) { - struct ixgbe_hw *hw; - unsigned i; - PMD_INIT_FUNC_TRACE(); if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; + return 0; + + ixgbevf_dev_close(eth_dev); - hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + return 0; +} + +static int +eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_eth_dev *pf_ethdev; + struct rte_eth_devargs eth_da; + int i, retval; + + if (pci_dev->device.devargs) { + retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, + ð_da); + if (retval) + return retval; + } else + memset(ð_da, 0, sizeof(eth_da)); - if (hw->adapter_stopped == 0) - ixgbevf_dev_close(eth_dev); + retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, + sizeof(struct ixgbe_adapter), + eth_dev_pci_specific_init, pci_dev, + eth_ixgbe_dev_init, NULL); - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; + if (retval || eth_da.nb_representor_ports < 1) + return retval; - /* Disable the interrupts for VF */ - ixgbevf_intr_disable(hw); + pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); + if (pf_ethdev == NULL) + return -ENODEV; - for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { - ixgbe_dev_rx_queue_release(eth_dev->data->rx_queues[i]); - eth_dev->data->rx_queues[i] = NULL; - } - eth_dev->data->nb_rx_queues = 0; + /* probe VF representor ports */ + for (i = 0; i < eth_da.nb_representor_ports; i++) { + struct ixgbe_vf_info *vfinfo; + struct ixgbe_vf_representor representor; - for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { - ixgbe_dev_tx_queue_release(eth_dev->data->tx_queues[i]); - eth_dev->data->tx_queues[i] = NULL; - } - eth_dev->data->nb_tx_queues = 0; + vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA( + pf_ethdev->data->dev_private); + if (vfinfo == NULL) { + PMD_DRV_LOG(ERR, + "no virtual functions supported by PF"); + break; + } + + representor.vf_id = eth_da.representor_ports[i]; + representor.switch_domain_id = vfinfo->switch_domain_id; + representor.pf_ethdev = pf_ethdev; - rte_free(eth_dev->data->mac_addrs); - eth_dev->data->mac_addrs = NULL; + /* representor port net_bdf_port */ + snprintf(name, sizeof(name), "net_%s_representor_%d", + pci_dev->device.name, + eth_da.representor_ports[i]); + + retval = rte_eth_dev_create(&pci_dev->device, name, + sizeof(struct ixgbe_vf_representor), NULL, NULL, + ixgbe_vf_representor_init, &representor); + + if (retval) + PMD_DRV_LOG(ERR, "failed to create ixgbe vf " + "representor %s.", name); + } return 0; } -static struct eth_driver rte_ixgbe_pmd = { - .pci_drv = { - .name = "rte_ixgbe_pmd", - .id_table = pci_id_ixgbe_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_DETACHABLE, - }, - .eth_dev_init = eth_ixgbe_dev_init, - .eth_dev_uninit = eth_ixgbe_dev_uninit, - .dev_private_size = sizeof(struct ixgbe_adapter), -}; +static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) +{ + struct rte_eth_dev *ethdev; -/* - * virtual function driver struct - */ -static struct eth_driver rte_ixgbevf_pmd = { - .pci_drv = { - .name = "rte_ixgbevf_pmd", - .id_table = pci_id_ixgbevf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE, - }, - .eth_dev_init = eth_ixgbevf_dev_init, - .eth_dev_uninit = eth_ixgbevf_dev_uninit, - .dev_private_size = sizeof(struct ixgbe_adapter), + ethdev = rte_eth_dev_allocated(pci_dev->device.name); + if (!ethdev) + return -ENODEV; + + if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) + return rte_eth_dev_destroy(ethdev, ixgbe_vf_representor_uninit); + else + return rte_eth_dev_destroy(ethdev, eth_ixgbe_dev_uninit); +} + +static struct rte_pci_driver rte_ixgbe_pmd = { + .id_table = pci_id_ixgbe_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = eth_ixgbe_pci_probe, + .remove = eth_ixgbe_pci_remove, }; -/* - * Driver initialization routine. - * Invoked once at EAL init time. - * Register itself as the [Poll Mode] Driver of PCI IXGBE devices. - */ -static int -rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused) +static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) { - PMD_INIT_FUNC_TRACE(); + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init); +} - rte_eth_driver_register(&rte_ixgbe_pmd); - return 0; +static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit); } /* - * VF Driver initialization routine. - * Invoked one at EAL init time. - * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices. + * virtual function driver struct */ -static int -rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); - - rte_eth_driver_register(&rte_ixgbevf_pmd); - return 0; -} +static struct rte_pci_driver rte_ixgbevf_pmd = { + .id_table = pci_id_ixgbevf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_ixgbevf_pci_probe, + .remove = eth_ixgbevf_pci_remove, +}; static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vfta; uint32_t vid_idx; @@ -1572,15 +1880,47 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); int ret = 0; + uint32_t reg; + uint32_t qinq; + + qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + qinq &= IXGBE_DMATXCTL_GDV; switch (vlan_type) { case ETH_VLAN_TYPE_INNER: - /* Only the high 16-bits is valid */ - IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16); + if (qinq) { + reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) + | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + } else { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Inner type is not supported" + " by single VLAN"); + } + break; + case ETH_VLAN_TYPE_OUTER: + if (qinq) { + /* Only the high 16-bits is valid */ + IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << + IXGBE_EXVET_VET_EXT_SHIFT); + } else { + reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) + | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + } + break; default: ret = -EINVAL; - PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type); + PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); break; } @@ -1608,7 +1948,7 @@ ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vlnctrl; uint16_t i; @@ -1632,6 +1972,7 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) { struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); + struct ixgbe_rx_queue *rxq; if (queue >= IXGBE_MAX_RX_QUEUE_NUM) return; @@ -1640,6 +1981,19 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) IXGBE_SET_HWSTRIP(hwstrip, queue); else IXGBE_CLEAR_HWSTRIP(hwstrip, queue); + + if (queue >= dev->data->nb_rx_queues) + return; + + rxq = dev->data->rx_queues[queue]; + + if (on) { + rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } else { + rxq->vlan_flags = PKT_RX_VLAN; + rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } } static void @@ -1656,12 +2010,12 @@ ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); return; } - else { - /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); - ctrl &= ~IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); - } + + /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); + ctrl &= ~IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); + /* record those setting for HW strip per queue */ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); } @@ -1680,74 +2034,18 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); return; } - else { - /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); - ctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); - } + + /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); + ctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); + /* record those setting for HW strip per queue */ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); } -void -ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev) -{ - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t ctrl; - uint16_t i; - - PMD_INIT_FUNC_TRACE(); - - if (hw->mac.type == ixgbe_mac_82598EB) { - ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - ctrl &= ~IXGBE_VLNCTRL_VME; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); - } - else { - /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ - for (i = 0; i < dev->data->nb_rx_queues; i++) { - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); - ctrl &= ~IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl); - - /* record those setting for HW strip per queue */ - ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0); - } - } -} - -void -ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev) -{ - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t ctrl; - uint16_t i; - - PMD_INIT_FUNC_TRACE(); - - if (hw->mac.type == ixgbe_mac_82598EB) { - ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - ctrl |= IXGBE_VLNCTRL_VME; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); - } - else { - /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ - for (i = 0; i < dev->data->nb_rx_queues; i++) { - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); - ctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl); - - /* record those setting for HW strip per queue */ - ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1); - } - } -} - -static void -ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) +static void +ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1788,7 +2086,8 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) /* Clear pooling mode of PFVTCTL. It's required by X550. */ if (hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x) { + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) { ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); @@ -1800,29 +2099,109 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) */ } +void +ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + uint32_t ctrl; + uint16_t i; + struct ixgbe_rx_queue *rxq; + bool on; + + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type == ixgbe_mac_82598EB) { + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + ctrl |= IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); + } else { + ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + ctrl &= ~IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); + } + } else { + /* + * Other 10G NIC, the VLAN strip can be setup + * per queue in RXDCTL + */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); + if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) { + ctrl |= IXGBE_RXDCTL_VME; + on = TRUE; + } else { + ctrl &= ~IXGBE_RXDCTL_VME; + on = FALSE; + } + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); + + /* record those setting for HW strip per queue */ + ixgbe_vlan_hw_strip_bitmap_set(dev, i, on); + } + } +} + static void -ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) +ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask) { + uint16_t i; + struct rte_eth_rxmode *rxmode; + struct ixgbe_rx_queue *rxq; + if (mask & ETH_VLAN_STRIP_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_strip) - ixgbe_vlan_hw_strip_enable_all(dev); + rxmode = &dev->data->dev_conf.rxmode; + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } else - ixgbe_vlan_hw_strip_disable_all(dev); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } + } +} + +static int +ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask) +{ + struct rte_eth_rxmode *rxmode; + rxmode = &dev->data->dev_conf.rxmode; + + if (mask & ETH_VLAN_STRIP_MASK) { + ixgbe_vlan_hw_strip_config(dev); } if (mask & ETH_VLAN_FILTER_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ixgbe_vlan_hw_filter_enable(dev); else ixgbe_vlan_hw_filter_disable(dev); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) ixgbe_vlan_hw_extend_enable(dev); else ixgbe_vlan_hw_extend_disable(dev); } + + return 0; +} + +static int +ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + ixgbe_config_vlan_strip_on_all_queues(dev, mask); + + ixgbe_vlan_offload_config(dev, mask); + + return 0; } static void @@ -1832,6 +2211,7 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); } @@ -1839,6 +2219,8 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) static int ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) { + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + switch (nb_rx_q) { case 1: case 2: @@ -1851,9 +2233,10 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) return -EINVAL; } - RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q; - RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q; - + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = + IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; + RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = + pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; return 0; } @@ -1861,13 +2244,16 @@ static int ixgbe_check_mq_mode(struct rte_eth_dev *dev) { struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t nb_rx_q = dev->data->nb_rx_queues; - uint16_t nb_tx_q = dev->data->nb_rx_queues; + uint16_t nb_tx_q = dev->data->nb_tx_queues; if (RTE_ETH_DEV_SRIOV(dev).active != 0) { /* check multi-queue mode */ switch (dev_conf->rxmode.mq_mode) { case ETH_MQ_RX_VMDQ_DCB: + PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); + break; case ETH_MQ_RX_VMDQ_DCB_RSS: /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ PMD_INIT_LOG(ERR, "SRIOV active," @@ -1890,8 +2276,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) case ETH_MQ_RX_NONE: /* if nothing mq mode configure, use default scheme */ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; - if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1) - RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; break; default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ /* SRIOV only works in VMDq enable mode */ @@ -1903,11 +2287,9 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) switch (dev_conf->txmode.mq_mode) { case ETH_MQ_TX_VMDQ_DCB: - /* DCB VMDQ in SRIOV mode, not implement yet */ - PMD_INIT_LOG(ERR, "SRIOV is active," - " unsupported VMDQ mq_mode tx %d.", - dev_conf->txmode.mq_mode); - return -EINVAL; + PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); + dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; + break; default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY; break; @@ -1917,11 +2299,18 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { PMD_INIT_LOG(ERR, "SRIOV is active," - " queue number must less equal to %d.", + " nb_rx_q=%d nb_tx_q=%d queue number" + " must be less than or equal to %d.", + nb_rx_q, nb_tx_q, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); return -EINVAL; } } else { + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { + PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" + " not supported."); + return -EINVAL; + } /* check configuration for vmdb+dcb mode */ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { const struct rte_eth_vmdq_dcb_conf *conf; @@ -1963,11 +2352,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { const struct rte_eth_dcb_rx_conf *conf; - if (nb_rx_q != IXGBE_DCB_NB_QUEUES) { - PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.", - IXGBE_DCB_NB_QUEUES); - return -EINVAL; - } conf = &dev_conf->rx_adv_conf.dcb_rx_conf; if (!(conf->nb_tcs == ETH_4_TCS || conf->nb_tcs == ETH_8_TCS)) { @@ -1981,11 +2365,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { const struct rte_eth_dcb_tx_conf *conf; - if (nb_tx_q != IXGBE_DCB_NB_QUEUES) { - PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.", - IXGBE_DCB_NB_QUEUES); - return -EINVAL; - } conf = &dev_conf->tx_adv_conf.dcb_tx_conf; if (!(conf->nb_tcs == ETH_4_TCS || conf->nb_tcs == ETH_8_TCS)) { @@ -1995,6 +2374,21 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) return -EINVAL; } } + + /* + * When DCB/VT is off, maximum number of queues changes, + * except for 82598EB, which remains constant. + */ + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && + hw->mac.type != ixgbe_mac_82598EB) { + if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) { + PMD_INIT_LOG(ERR, + "Neither VT nor DCB are enabled, " + "nb_tx_q > %d.", + IXGBE_NONE_MODE_TX_NB_QUEUES); + return -EINVAL; + } + } } return 0; } @@ -2004,8 +2398,7 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) { struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; int ret; PMD_INIT_FUNC_TRACE(); @@ -2030,6 +2423,102 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) return 0; } +static void +ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + uint32_t gpie; + + /* only set up it on X550EM_X */ + if (hw->mac.type == ixgbe_mac_X550EM_x) { + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + gpie |= IXGBE_SDP0_GPIEN_X550EM_x; + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + if (hw->phy.type == ixgbe_phy_x550em_ext_t) + intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x; + } +} + +int +ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, + uint16_t tx_rate, uint64_t q_msk) +{ + struct ixgbe_hw *hw; + struct ixgbe_vf_info *vfinfo; + struct rte_eth_link link; + uint8_t nb_q_per_pool; + uint32_t queue_stride; + uint32_t queue_idx, idx = 0, vf_idx; + uint32_t queue_end; + uint16_t total_rate = 0; + struct rte_pci_device *pci_dev; + int ret; + + pci_dev = RTE_ETH_DEV_TO_PCI(dev); + ret = rte_eth_link_get_nowait(dev->data->port_id, &link); + if (ret < 0) + return ret; + + if (vf >= pci_dev->max_vfs) + return -EINVAL; + + if (tx_rate > link.link_speed) + return -EINVAL; + + if (q_msk == 0) + return 0; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; + queue_idx = vf * queue_stride; + queue_end = queue_idx + nb_q_per_pool - 1; + if (queue_end >= hw->mac.max_tx_queues) + return -EINVAL; + + if (vfinfo) { + for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) { + if (vf_idx == vf) + continue; + for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); + idx++) + total_rate += vfinfo[vf_idx].tx_rate[idx]; + } + } else { + return -EINVAL; + } + + /* Store tx_rate for this vf. */ + for (idx = 0; idx < nb_q_per_pool; idx++) { + if (((uint64_t)0x1 << idx) & q_msk) { + if (vfinfo[vf].tx_rate[idx] != tx_rate) + vfinfo[vf].tx_rate[idx] = tx_rate; + total_rate += tx_rate; + } + } + + if (total_rate > dev->data->dev_link.link_speed) { + /* Reset stored TX rate of the VF if it causes exceed + * link speed. + */ + memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); + return -EINVAL; + } + + /* Set RTTBCNRC of each queue/pool for vf X */ + for (; queue_idx <= queue_end; queue_idx++) { + if (0x1 & q_msk) + ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); + q_msk = q_msk >> 1; + } + + return 0; +} + /* * Configure device link speed and setup link. * It returns 0 on success. @@ -2041,25 +2530,35 @@ ixgbe_dev_start(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t intr_vector = 0; int err, link_up = 0, negotiate = 0; uint32_t speed = 0; + uint32_t allowed_speeds = 0; int mask = 0; int status; uint16_t vf, idx; + uint32_t *link_speeds; + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); - /* IXGBE devices don't support half duplex */ - if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) && - (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) { - PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu", - dev->data->dev_conf.link_duplex, - dev->data->port_id); + /* IXGBE devices don't support: + * - half duplex (checked afterwards for valid speeds) + * - fixed speed: TODO implement + */ + if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { + PMD_INIT_LOG(ERR, + "Invalid link_speeds for port %u, fix speed not supported", + dev->data->port_id); return -EINVAL; } + /* Stop the link setup handler before resetting the HW. */ + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); + /* disable uio/vfio intr/eventfd mapping */ rte_intr_disable(intr_handle); @@ -2068,7 +2567,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev) ixgbe_stop_adapter(hw); /* reinitialize adapter - * this calls reset and start */ + * this calls reset and start + */ status = ixgbe_pf_reset_hw(hw); if (status != 0) return -1; @@ -2078,11 +2578,18 @@ ixgbe_dev_start(struct rte_eth_dev *dev) /* configure PF module if SRIOV enabled */ ixgbe_pf_host_configure(dev); + ixgbe_dev_phy_intr_setup(dev); + /* check and configure queue intr-vector mapping */ if ((rte_intr_cap_multiple(intr_handle) || !RTE_ETH_DEV_SRIOV(dev).active) && dev->data->dev_conf.intr_conf.rxq != 0) { intr_vector = dev->data->nb_rx_queues; + if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) { + PMD_INIT_LOG(ERR, "At most %d intr queues supported", + IXGBE_MAX_INTR_QUEUE_NUM); + return -ENOTSUP; + } if (rte_intr_efd_enable(intr_handle, intr_vector)) return -1; } @@ -2093,7 +2600,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) dev->data->nb_rx_queues * sizeof(int), 0); if (intr_handle->intr_vec == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" - " intr_vec\n", dev->data->nb_rx_queues); + " intr_vec", dev->data->nb_rx_queues); return -ENOMEM; } } @@ -2111,16 +2618,57 @@ ixgbe_dev_start(struct rte_eth_dev *dev) goto error; } + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK; + err = ixgbe_vlan_offload_config(dev, mask); + if (err) { + PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); + goto error; + } + + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { + /* Enable vlan filtering for VMDq */ + ixgbe_vmdq_vlan_hw_filter_enable(dev); + } + + /* Configure DCB hw */ + ixgbe_configure_dcb(dev); + + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { + err = ixgbe_fdir_configure(dev); + if (err) + goto error; + } + + /* Restore vf rate limit */ + if (vfinfo != NULL) { + for (vf = 0; vf < pci_dev->max_vfs; vf++) + for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) + if (vfinfo[vf].tx_rate[idx] != 0) + ixgbe_set_vf_rate_limit( + dev, vf, + vfinfo[vf].tx_rate[idx], + 1 << idx); + } + + ixgbe_restore_statistics_mapping(dev); + err = ixgbe_dev_rxtx_start(dev); if (err < 0) { PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); goto error; } - /* Skip link setup if loopback mode is enabled for 82599. */ - if (hw->mac.type == ixgbe_mac_82599EB && - dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX) - goto skip_link_setup; + /* Skip link setup if loopback mode is enabled. */ + if (dev->data->dev_conf.lpbk_mode != 0) { + err = ixgbe_check_supported_loopback_mode(dev); + if (err < 0) { + PMD_INIT_LOG(ERR, "Unsupported loopback mode"); + goto error; + } else { + goto skip_link_setup; + } + } if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { err = hw->mac.ops.setup_sfp(hw); @@ -2145,32 +2693,62 @@ ixgbe_dev_start(struct rte_eth_dev *dev) if (err) goto error; - switch(dev->data->dev_conf.link_speed) { - case ETH_LINK_SPEED_AUTONEG: - speed = (hw->mac.type != ixgbe_mac_82598EB) ? - IXGBE_LINK_SPEED_82599_AUTONEG : - IXGBE_LINK_SPEED_82598_AUTONEG; - break; - case ETH_LINK_SPEED_100: - /* - * Invalid for 82598 but error will be detected by - * ixgbe_setup_link() - */ - speed = IXGBE_LINK_SPEED_100_FULL; - break; - case ETH_LINK_SPEED_1000: - speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - case ETH_LINK_SPEED_10000: - speed = IXGBE_LINK_SPEED_10GB_FULL; + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G | + ETH_LINK_SPEED_10G; + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) + allowed_speeds = ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; break; default: - PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu", - dev->data->dev_conf.link_speed, - dev->data->port_id); + allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_10G; + } + + link_speeds = &dev->data->dev_conf.link_speeds; + if (*link_speeds & ~allowed_speeds) { + PMD_INIT_LOG(ERR, "Invalid link setting"); goto error; } + speed = 0x0; + if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + speed = IXGBE_LINK_SPEED_82598_AUTONEG; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + speed = IXGBE_LINK_SPEED_82599_AUTONEG; + break; + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + speed = IXGBE_LINK_SPEED_X550_AUTONEG; + break; + default: + speed = IXGBE_LINK_SPEED_82599_AUTONEG; + } + } else { + if (*link_speeds & ETH_LINK_SPEED_10G) + speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_5G) + speed |= IXGBE_LINK_SPEED_5GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_2_5G) + speed |= IXGBE_LINK_SPEED_2_5GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_1G) + speed |= IXGBE_LINK_SPEED_1GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_100M) + speed |= IXGBE_LINK_SPEED_100_FULL; + if (*link_speeds & ETH_LINK_SPEED_10M) + speed |= IXGBE_LINK_SPEED_10_FULL; + } + err = ixgbe_setup_link(hw, speed, link_up); if (err) goto error; @@ -2180,14 +2758,16 @@ skip_link_setup: if (rte_intr_allow_others(intr_handle)) { /* check if lsc interrupt is enabled */ if (dev->data->dev_conf.intr_conf.lsc != 0) - ixgbe_dev_lsc_interrupt_setup(dev); + ixgbe_dev_lsc_interrupt_setup(dev, TRUE); + else + ixgbe_dev_lsc_interrupt_setup(dev, FALSE); + ixgbe_dev_macsec_interrupt_setup(dev); } else { rte_intr_callback_unregister(intr_handle, - ixgbe_dev_interrupt_handler, - (void *)dev); + ixgbe_dev_interrupt_handler, dev); if (dev->data->dev_conf.intr_conf.lsc != 0) PMD_INIT_LOG(INFO, "lsc won't enable because of" - " no intr multiplex\n"); + " no intr multiplex"); } /* check if rxq interrupt is enabled */ @@ -2200,36 +2780,19 @@ skip_link_setup: /* resume enabled intr since hw reset */ ixgbe_enable_intr(dev); + ixgbe_l2_tunnel_conf(dev); + ixgbe_filter_restore(dev); - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ - ETH_VLAN_EXTEND_MASK; - ixgbe_vlan_offload_set(dev, mask); - - if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { - /* Enable vlan filtering for VMDq */ - ixgbe_vmdq_vlan_hw_filter_enable(dev); - } - - /* Configure DCB hw */ - ixgbe_configure_dcb(dev); - - if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { - err = ixgbe_fdir_configure(dev); - if (err) - goto error; - } - - /* Restore vf rate limit */ - if (vfinfo != NULL) { - for (vf = 0; vf < dev->pci_dev->max_vfs; vf++) - for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) - if (vfinfo[vf].tx_rate[idx] != 0) - ixgbe_set_vf_rate_limit(dev, vf, - vfinfo[vf].tx_rate[idx], - 1 << idx); - } + if (tm_conf->root && !tm_conf->committed) + PMD_DRV_LOG(WARNING, + "please call hierarchy_commit() " + "before starting the port"); - ixgbe_restore_statistics_mapping(dev); + /* + * Update link status right before return, because it may + * start link configuration process in a separate thread. + */ + ixgbe_dev_link_update(dev, 0); return 0; @@ -2246,18 +2809,24 @@ static void ixgbe_dev_stop(struct rte_eth_dev *dev) { struct rte_eth_link link; + struct ixgbe_adapter *adapter = dev->data->dev_private; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); - struct ixgbe_filter_info *filter_info = - IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int vf; + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + + if (hw->adapter_stopped) + return; PMD_INIT_FUNC_TRACE(); + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); + /* disable interrupts */ ixgbe_disable_intr(hw); @@ -2268,8 +2837,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) /* stop adapter */ ixgbe_stop_adapter(hw); - for (vf = 0; vfinfo != NULL && - vf < dev->pci_dev->max_vfs; vf++) + for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) vfinfo[vf].clear_to_send = false; if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { @@ -2288,18 +2856,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) /* Clear recorded link status */ memset(&link, 0, sizeof(link)); - rte_ixgbe_dev_atomic_write_link_status(dev, &link); - - /* Remove all ntuple filters of the device */ - for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list); - p_5tuple != NULL; p_5tuple = p_5tuple_next) { - p_5tuple_next = TAILQ_NEXT(p_5tuple, entries); - TAILQ_REMOVE(&filter_info->fivetuple_list, - p_5tuple, entries); - rte_free(p_5tuple); - } - memset(filter_info->fivetuple_mask, 0, - sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); + rte_eth_linkstatus_set(dev, &link); if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ @@ -2313,6 +2870,13 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) rte_free(intr_handle->intr_vec); intr_handle->intr_vec = NULL; } + + /* reset hierarchy commit */ + tm_conf->committed = false; + + adapter->rss_reta_updated = 0; + + hw->adapter_stopped = true; } /* @@ -2324,7 +2888,7 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (hw->mac.type == ixgbe_mac_82599EB) { -#ifdef RTE_NIC_BYPASS +#ifdef RTE_LIBRTE_IXGBE_BYPASS if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { /* Not suported in bypass mode */ PMD_INIT_LOG(ERR, "Set link up is not supported " @@ -2354,7 +2918,7 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (hw->mac.type == ixgbe_mac_82599EB) { -#ifdef RTE_NIC_BYPASS +#ifdef RTE_LIBRTE_IXGBE_BYPASS if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { /* Not suported in bypass mode */ PMD_INIT_LOG(ERR, "Set link down is not supported " @@ -2376,20 +2940,23 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev) } /* - * Reest and stop device. + * Reset and stop device. */ static void ixgbe_dev_close(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + int retries = 0; + int ret; PMD_INIT_FUNC_TRACE(); ixgbe_pf_reset_hw(hw); ixgbe_dev_stop(dev); - hw->adapter_stopped = 1; ixgbe_dev_free_queues(dev); @@ -2397,11 +2964,87 @@ ixgbe_dev_close(struct rte_eth_dev *dev) /* reprogram the RAR[0] in case user changed it. */ ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + /* Unlock any pending hardware semaphore */ + ixgbe_swfw_lock_reset(hw); + + /* disable uio intr before callback unregister */ + rte_intr_disable(intr_handle); + + do { + ret = rte_intr_callback_unregister(intr_handle, + ixgbe_dev_interrupt_handler, dev); + if (ret >= 0) { + break; + } else if (ret != -EAGAIN) { + PMD_INIT_LOG(ERR, + "intr callback unregister failed: %d", + ret); + } + rte_delay_ms(100); + } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); + + /* cancel the delay handler before remove dev */ + rte_eal_alarm_cancel(ixgbe_dev_interrupt_delayed_handler, dev); + + /* uninitialize PF if max_vfs not zero */ + ixgbe_pf_host_uninit(dev); + + /* remove all the fdir filters & hash */ + ixgbe_fdir_filter_uninit(dev); + + /* remove all the L2 tunnel filters & hash */ + ixgbe_l2_tn_filter_uninit(dev); + + /* Remove all ntuple filters of the device */ + ixgbe_ntuple_filter_uninit(dev); + + /* clear all the filters list */ + ixgbe_filterlist_flush(); + + /* Remove all Traffic Manager configuration */ + ixgbe_tm_conf_uninit(dev); + +#ifdef RTE_LIBRTE_SECURITY + rte_free(dev->security_ctx); +#endif + +} + +/* + * Reset PF device. + */ +static int +ixgbe_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + /* When a DPDK PMD PF begin to reset PF port, it should notify all + * its VF to make them align with it. The detailed notification + * mechanism is PMD specific. As to ixgbe PF, it is rather complex. + * To avoid unexpected behavior in VF, currently reset of PF with + * SR-IOV activation is not supported. It might be supported later. + */ + if (dev->data->sriov.active) + return -ENOTSUP; + + ret = eth_ixgbe_dev_uninit(dev); + if (ret) + return ret; + + ret = eth_ixgbe_dev_init(dev, NULL); + + return ret; } static void ixgbe_read_stats_registers(struct ixgbe_hw *hw, struct ixgbe_hw_stats *hw_stats, + struct ixgbe_macsec_stats *macsec_stats, uint64_t *total_missed_rx, uint64_t *total_qbrc, uint64_t *total_qprc, uint64_t *total_qprdc) { @@ -2409,9 +3052,9 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, uint32_t delta_gprc = 0; unsigned i; /* Workaround for RX byte count not including CRC bytes when CRC -+ * strip is enabled. CRC bytes are removed from counters when crc_strip + * strip is enabled. CRC bytes are removed from counters when crc_strip * is disabled. -+ */ + */ int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & IXGBE_HLREG0_RXCRCSTRP); @@ -2421,8 +3064,8 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); for (i = 0; i < 8; i++) { - uint32_t mp; - mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + /* global total per queue */ hw_stats->mpc[i] += mp; /* Running comprehensive total for stats display */ @@ -2461,7 +3104,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, hw_stats->qbrc[i] += ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); if (crc_strip == 0) - hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN; + hw_stats->qbrc[i] -= delta_qprc * RTE_ETHER_CRC_LEN; hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); hw_stats->qbtc[i] += @@ -2506,12 +3149,12 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); if (crc_strip == 0) - hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN; + hw_stats->gorc -= delta_gprc * RTE_ETHER_CRC_LEN; uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); hw_stats->gptc += delta_gptc; - hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN; - hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN; + hw_stats->gotc -= delta_gptc * RTE_ETHER_CRC_LEN; + hw_stats->tor -= (hw_stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN; /* * Workaround: mprc hardware is incorrectly counting @@ -2541,7 +3184,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, hw_stats->gptc -= total; hw_stats->mptc -= total; hw_stats->ptc64 -= total; - hw_stats->gotc -= total * ETHER_MIN_LEN; + hw_stats->gotc -= total * RTE_ETHER_MIN_LEN; hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); @@ -2569,20 +3212,66 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, } /* Flow Director Stats registers */ - hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); - hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + if (hw->mac.type != ixgbe_mac_82598EB) { + hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + hw_stats->fdirustat_add += IXGBE_READ_REG(hw, + IXGBE_FDIRUSTAT) & 0xFFFF; + hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw, + IXGBE_FDIRUSTAT) >> 16) & 0xFFFF; + hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw, + IXGBE_FDIRFSTAT) & 0xFFFF; + hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw, + IXGBE_FDIRFSTAT) >> 16) & 0xFFFF; + } + /* MACsec Stats registers */ + macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); + macsec_stats->out_pkts_encrypted += + IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE); + macsec_stats->out_pkts_protected += + IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP); + macsec_stats->out_octets_encrypted += + IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE); + macsec_stats->out_octets_protected += + IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP); + macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT); + macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD); + macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI); + macsec_stats->in_pkts_unknownsci += + IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI); + macsec_stats->in_octets_decrypted += + IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD); + macsec_stats->in_octets_validated += + IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV); + macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH); + macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY); + macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE); + for (i = 0; i < 2; i++) { + macsec_stats->in_pkts_ok += + IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i)); + macsec_stats->in_pkts_invalid += + IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i)); + macsec_stats->in_pkts_notvalid += + IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i)); + } + macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA); + macsec_stats->in_pkts_notusingsa += + IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA); } /* * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c */ -static void +static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_stats *hw_stats = IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; unsigned i; @@ -2591,11 +3280,11 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) total_qprc = 0; total_qprdc = 0; - ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc, - &total_qprc, &total_qprdc); + ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, + &total_qbrc, &total_qprc, &total_qprdc); if (stats == NULL) - return; + return -EINVAL; /* Fill out the rte_eth_stats statistics structure */ stats->ipackets = total_qprc; @@ -2614,22 +3303,22 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* Rx Errors */ stats->imissed = total_missed_rx; stats->ierrors = hw_stats->crcerrs + - hw_stats->mspdc + - hw_stats->rlec + - hw_stats->ruc + - hw_stats->roc + - total_missed_rx + - hw_stats->illerrc + - hw_stats->errbc + - hw_stats->rfc + - hw_stats->fccrc + - hw_stats->fclast; + hw_stats->mspdc + + hw_stats->rlec + + hw_stats->ruc + + hw_stats->roc + + hw_stats->illerrc + + hw_stats->errbc + + hw_stats->rfc + + hw_stats->fccrc + + hw_stats->fclast; /* Tx Errors */ stats->oerrors = 0; + return 0; } -static void +static int ixgbe_dev_stats_reset(struct rte_eth_dev *dev) { struct ixgbe_hw_stats *stats = @@ -2640,23 +3329,175 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev) /* Reset software totals */ memset(stats, 0, sizeof(*stats)); + + return 0; } /* This function calculates the number of xstats based on the current config */ static unsigned ixgbe_xstats_calc_num(void) { - return IXGBE_NB_HW_STATS + (IXGBE_NB_RXQ_PRIO_STATS * 8) + - (IXGBE_NB_TXQ_PRIO_STATS * 8); + return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS + + (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + + (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); +} + +static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size) +{ + const unsigned cnt_stats = ixgbe_xstats_calc_num(); + unsigned stat, i, count; + + if (xstats_names != NULL) { + count = 0; + + /* Note: limit >= cnt_stats checked upstream + * in rte_eth_xstats_names() + */ + + /* Extended stats from ixgbe_hw_stats */ + for (i = 0; i < IXGBE_NB_HW_STATS; i++) { + strlcpy(xstats_names[count].name, + rte_ixgbe_stats_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + /* MACsec Stats */ + for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { + strlcpy(xstats_names[count].name, + rte_ixgbe_macsec_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + /* RX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_priority%u_%s", i, + rte_ixgbe_rxq_strings[stat].name); + count++; + } + } + + /* TX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_priority%u_%s", i, + rte_ixgbe_txq_strings[stat].name); + count++; + } + } + } + return cnt_stats; +} + +static int ixgbe_dev_xstats_get_names_by_id( + struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, + unsigned int limit) +{ + if (!ids) { + const unsigned int cnt_stats = ixgbe_xstats_calc_num(); + unsigned int stat, i, count; + + if (xstats_names != NULL) { + count = 0; + + /* Note: limit >= cnt_stats checked upstream + * in rte_eth_xstats_names() + */ + + /* Extended stats from ixgbe_hw_stats */ + for (i = 0; i < IXGBE_NB_HW_STATS; i++) { + strlcpy(xstats_names[count].name, + rte_ixgbe_stats_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + /* MACsec Stats */ + for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { + strlcpy(xstats_names[count].name, + rte_ixgbe_macsec_strings[i].name, + sizeof(xstats_names[count].name)); + count++; + } + + /* RX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_priority%u_%s", i, + rte_ixgbe_rxq_strings[stat].name); + count++; + } + } + + /* TX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_priority%u_%s", i, + rte_ixgbe_txq_strings[stat].name); + count++; + } + } + } + return cnt_stats; + } + + uint16_t i; + uint16_t size = ixgbe_xstats_calc_num(); + struct rte_eth_xstat_name xstats_names_copy[size]; + + ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL, + size); + + for (i = 0; i < limit; i++) { + if (ids[i] >= size) { + PMD_INIT_LOG(ERR, "id value isn't valid"); + return -1; + } + strcpy(xstats_names[i].name, + xstats_names_copy[ids[i]].name); + } + return limit; +} + +static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned limit) +{ + unsigned i; + + if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) + return -ENOMEM; + + if (xstats_names != NULL) + for (i = 0; i < IXGBEVF_NB_XSTATS; i++) + strlcpy(xstats_names[i].name, + rte_ixgbevf_stats_strings[i].name, + sizeof(xstats_names[i].name)); + return IXGBEVF_NB_XSTATS; } static int -ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, +ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_stats *hw_stats = IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; unsigned i, stat, count = 0; @@ -2670,8 +3511,8 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, total_qprc = 0; total_qprdc = 0; - ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc, - &total_qprc, &total_qprdc); + ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, + &total_qbrc, &total_qprc, &total_qprdc); /* If this is a reset xstats is NULL, and we have cleared the * registers by reading them. @@ -2682,47 +3523,143 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* Extended stats from ixgbe_hw_stats */ count = 0; for (i = 0; i < IXGBE_NB_HW_STATS; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), "%s", - rte_ixgbe_stats_strings[i].name); xstats[count].value = *(uint64_t *)(((char *)hw_stats) + rte_ixgbe_stats_strings[i].offset); + xstats[count].id = count; + count++; + } + + /* MACsec Stats */ + for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { + xstats[count].value = *(uint64_t *)(((char *)macsec_stats) + + rte_ixgbe_macsec_strings[i].offset); + xstats[count].id = count; count++; } - /* RX Priority Stats */ - for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { - for (i = 0; i < 8; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "rx_priority%u_%s", i, - rte_ixgbe_rxq_strings[stat].name); - xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + /* RX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { + xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_rxq_strings[stat].offset + + (sizeof(uint64_t) * i)); + xstats[count].id = count; + count++; + } + } + + /* TX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { + xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_txq_strings[stat].offset + + (sizeof(uint64_t) * i)); + xstats[count].id = count; + count++; + } + } + return count; +} + +static int +ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int n) +{ + if (!ids) { + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_stats *hw_stats = + IXGBE_DEV_PRIVATE_TO_STATS( + dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); + uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; + unsigned int i, stat, count = 0; + + count = ixgbe_xstats_calc_num(); + + if (!ids && n < count) + return count; + + total_missed_rx = 0; + total_qbrc = 0; + total_qprc = 0; + total_qprdc = 0; + + ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, + &total_missed_rx, &total_qbrc, &total_qprc, + &total_qprdc); + + /* If this is a reset xstats is NULL, and we have cleared the + * registers by reading them. + */ + if (!ids && !values) + return 0; + + /* Extended stats from ixgbe_hw_stats */ + count = 0; + for (i = 0; i < IXGBE_NB_HW_STATS; i++) { + values[count] = *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_stats_strings[i].offset); + count++; + } + + /* MACsec Stats */ + for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { + values[count] = *(uint64_t *)(((char *)macsec_stats) + + rte_ixgbe_macsec_strings[i].offset); + count++; + } + + /* RX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { + values[count] = + *(uint64_t *)(((char *)hw_stats) + rte_ixgbe_rxq_strings[stat].offset + (sizeof(uint64_t) * i)); - count++; + count++; + } } - } - /* TX Priority Stats */ - for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { - for (i = 0; i < 8; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "tx_priority%u_%s", i, - rte_ixgbe_txq_strings[stat].name); - xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + /* TX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { + values[count] = + *(uint64_t *)(((char *)hw_stats) + rte_ixgbe_txq_strings[stat].offset + (sizeof(uint64_t) * i)); - count++; + count++; + } } + return count; } - return count; + uint16_t i; + uint16_t size = ixgbe_xstats_calc_num(); + uint64_t values_copy[size]; + + ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size); + + for (i = 0; i < n; i++) { + if (ids[i] >= size) { + PMD_INIT_LOG(ERR, "id value isn't valid"); + return -1; + } + values[i] = values_copy[ids[i]]; + } + return n; } -static void +static int ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) { struct ixgbe_hw_stats *stats = IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); unsigned count = ixgbe_xstats_calc_num(); @@ -2731,13 +3668,16 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) /* Reset software totals */ memset(stats, 0, sizeof(*stats)); + memset(macsec_stats, 0, sizeof(*macsec_stats)); + + return 0; } static void ixgbevf_update_stats(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*) + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); /* Good Rx packet, include VF loopback */ @@ -2762,7 +3702,7 @@ ixgbevf_update_stats(struct rte_eth_dev *dev) } static int -ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, +ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n) { struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) @@ -2779,8 +3719,7 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* Extended stats */ for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { - snprintf(xstats[i].name, sizeof(xstats[i].name), - "%s", rte_ixgbevf_stats_strings[i].name); + xstats[i].id = i; xstats[i].value = *(uint64_t *)(((char *)hw_stats) + rte_ixgbevf_stats_strings[i].offset); } @@ -2788,7 +3727,7 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, return IXGBEVF_NB_XSTATS; } -static void +static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) @@ -2797,20 +3736,19 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) ixgbevf_update_stats(dev); if (stats == NULL) - return; + return -EINVAL; stats->ipackets = hw_stats->vfgprc; stats->ibytes = hw_stats->vfgorc; stats->opackets = hw_stats->vfgptc; stats->obytes = hw_stats->vfgotc; - stats->imcasts = hw_stats->vfmprc; - /* stats->imcasts should be removed as imcasts is deprecated */ + return 0; } -static void +static int ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) { - struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*) + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); /* Sync HW register to the last stats */ @@ -2821,57 +3759,66 @@ ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) hw_stats->vfgorc = 0; hw_stats->vfgptc = 0; hw_stats->vfgotc = 0; - hw_stats->vfmprc = 0; + return 0; } -static void +static int +ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u16 eeprom_verh, eeprom_verl; + u32 etrack_id; + int ret; + + ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh); + ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl); + + etrack_id = (eeprom_verh << 16) | eeprom_verl; + ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); + + ret += 1; /* add the size of '\0' */ + if (fw_size < (u32)ret) + return ret; + else + return 0; +} + +static int ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; + if (RTE_ETH_DEV_SRIOV(dev).active == 0) { + /* + * When DCB/VT is off, maximum number of queues changes, + * except for 82598EB, which remains constant. + */ + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && + hw->mac.type != ixgbe_mac_82598EB) + dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES; + } dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ dev_info->max_mac_addrs = hw->mac.num_rar_entries; dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; - dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->max_vfs = pci_dev->max_vfs; if (hw->mac.type == ixgbe_mac_82598EB) dev_info->max_vmdq_pools = ETH_16_POOLS; else dev_info->max_vmdq_pools = ETH_64_POOLS; + dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; dev_info->vmdq_queue_num = dev_info->max_rx_queues; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; - - /* - * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV - * mode. - */ - if ((hw->mac.type == ixgbe_mac_82599EB || - hw->mac.type == ixgbe_mac_X540) && - !RTE_ETH_DEV_SRIOV(dev).active) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO; - - if (hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x) - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; - - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; - - if (hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x) - dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); + dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | + dev_info->rx_queue_offload_capa); + dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); + dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -2881,6 +3828,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -2891,8 +3839,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; @@ -2901,35 +3848,101 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; + + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) + dev_info->speed_capa = ETH_LINK_SPEED_10M | + ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; + + if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X540_vf || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550_vf) { + dev_info->speed_capa |= ETH_LINK_SPEED_100M; + } + if (hw->mac.type == ixgbe_mac_X550) { + dev_info->speed_capa |= ETH_LINK_SPEED_2_5G; + dev_info->speed_capa |= ETH_LINK_SPEED_5G; + } + + /* Driver-preferred Rx/Tx parameters */ + dev_info->default_rxportconf.burst_size = 32; + dev_info->default_txportconf.burst_size = 32; + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_rxportconf.ring_size = 256; + dev_info->default_txportconf.ring_size = 256; + + return 0; } -static void +static const uint32_t * +ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /* For non-vec functions, + * refers to ixgbe_rxd_pkt_info_to_pkt_type(); + * for vec functions, + * refers to _recv_raw_pkts_vec(). + */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_IP, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_INNER_L3_IPV6_EXT, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == ixgbe_recv_pkts || + dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || + dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || + dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) + return ptypes; + +#if defined(RTE_ARCH_X86) + if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec || + dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) + return ptypes; +#endif + return NULL; +} + +static int ixgbevf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ - dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */ + dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */ + dev_info->max_mtu = dev_info->max_rx_pktlen - IXGBE_ETH_OVERHEAD; dev_info->max_mac_addrs = hw->mac.num_rar_entries; dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; - dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->max_vfs = pci_dev->max_vfs; if (hw->mac.type == ixgbe_mac_82598EB) dev_info->max_vmdq_pools = ETH_16_POOLS; else dev_info->max_vmdq_pools = ETH_64_POOLS; - dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; - dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev); + dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) | + dev_info->rx_queue_offload_capa); + dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev); + dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev); + dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); + dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); + dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -2939,6 +3952,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, }, .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -2949,84 +3963,237 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, }, .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH, .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; dev_info->tx_desc_lim = tx_desc_lim; + + return 0; } -/* return 0 means link status changed, -1 means not changed */ static int -ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) +ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + int *link_up, int wait_to_complete) +{ + struct ixgbe_adapter *adapter = container_of(hw, + struct ixgbe_adapter, hw); + struct ixgbe_mbx_info *mbx = &hw->mbx; + struct ixgbe_mac_info *mac = &hw->mac; + uint32_t links_reg, in_msg; + int ret_val = 0; + + /* If we were hit with a reset drop the link */ + if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) { + int i; + + for (i = 0; i < 5; i++) { + rte_delay_us(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + if (hw->mac.type >= ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_2_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (hw->mac.type == ixgbe_mac_X550) { + if (links_reg & IXGBE_LINKS_SPEED_NON_STD) + *speed = IXGBE_LINK_SPEED_5GB_FULL; + } + break; + case IXGBE_LINKS_SPEED_10_X550EM_A: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + /* Since Reserved in older MAC's */ + if (hw->mac.type >= ixgbe_mac_X550) + *speed = IXGBE_LINK_SPEED_10_FULL; + break; + default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + } + + if (wait_to_complete == 0 && adapter->pflink_fullchk == 0) { + if (*speed == IXGBE_LINK_SPEED_UNKNOWN) + mac->get_link_status = true; + else + mac->get_link_status = false; + + goto out; + } + + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error + */ + if (mbx->ops.read(hw, &in_msg, 1, 0)) + goto out; + + if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { + /* msg is not CTS and is NACK we must have lost CTS status */ + if (in_msg & IXGBE_VT_MSGTYPE_NACK) + mac->get_link_status = false; + goto out; + } + + /* the pf is talking, if we timed out in the past we reinit */ + if (!mbx->timeout) { + ret_val = -1; + goto out; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link + */ + mac->get_link_status = false; + +out: + *link_up = !mac->get_link_status; + return ret_val; +} + +static void +ixgbe_dev_setup_link_alarm_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + u32 speed; + bool autoneg = false; + + speed = hw->phy.autoneg_advertised; + if (!speed) + ixgbe_get_link_capabilities(hw, &speed, &autoneg); + + ixgbe_setup_link(hw, speed, true); + + intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; +} + +/* return 0 means link status changed, -1 means not changed */ +int +ixgbe_dev_link_update_share(struct rte_eth_dev *dev, + int wait_to_complete, int vf) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_eth_link link, old; + struct rte_eth_link link; ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); int link_up; int diag; + int wait = 1; - link.link_status = 0; - link.link_speed = 0; - link.link_duplex = 0; - memset(&old, 0, sizeof(old)); - rte_ixgbe_dev_atomic_read_link_status(dev, &old); + memset(&link, 0, sizeof(link)); + link.link_status = ETH_LINK_DOWN; + link.link_speed = ETH_SPEED_NUM_NONE; + link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_autoneg = ETH_LINK_AUTONEG; hw->mac.get_link_status = true; + if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) + return rte_eth_linkstatus_set(dev, &link); + /* check if it needs to wait to complete, if lsc interrupt is enabled */ if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) - diag = ixgbe_check_link(hw, &link_speed, &link_up, 0); + wait = 0; + + if (vf) + diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait); else - diag = ixgbe_check_link(hw, &link_speed, &link_up, 1); + diag = ixgbe_check_link(hw, &link_speed, &link_up, wait); if (diag != 0) { - link.link_speed = ETH_LINK_SPEED_100; - link.link_duplex = ETH_LINK_HALF_DUPLEX; - rte_ixgbe_dev_atomic_write_link_status(dev, &link); - if (link.link_status == old.link_status) - return -1; - return 0; + link.link_speed = ETH_SPEED_NUM_100M; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + return rte_eth_linkstatus_set(dev, &link); } if (link_up == 0) { - rte_ixgbe_dev_atomic_write_link_status(dev, &link); - if (link.link_status == old.link_status) - return -1; - return 0; + if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { + intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; + rte_eal_alarm_set(10, + ixgbe_dev_setup_link_alarm_handler, dev); + } + return rte_eth_linkstatus_set(dev, &link); } - link.link_status = 1; + + link.link_status = ETH_LINK_UP; link.link_duplex = ETH_LINK_FULL_DUPLEX; switch (link_speed) { default: case IXGBE_LINK_SPEED_UNKNOWN: - link.link_duplex = ETH_LINK_HALF_DUPLEX; - link.link_speed = ETH_LINK_SPEED_100; + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || + hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) + link.link_speed = ETH_SPEED_NUM_10M; + else + link.link_speed = ETH_SPEED_NUM_100M; break; case IXGBE_LINK_SPEED_100_FULL: - link.link_speed = ETH_LINK_SPEED_100; + link.link_speed = ETH_SPEED_NUM_100M; break; case IXGBE_LINK_SPEED_1GB_FULL: - link.link_speed = ETH_LINK_SPEED_1000; + link.link_speed = ETH_SPEED_NUM_1G; + break; + + case IXGBE_LINK_SPEED_2_5GB_FULL: + link.link_speed = ETH_SPEED_NUM_2_5G; + break; + + case IXGBE_LINK_SPEED_5GB_FULL: + link.link_speed = ETH_SPEED_NUM_5G; break; case IXGBE_LINK_SPEED_10GB_FULL: - link.link_speed = ETH_LINK_SPEED_10000; + link.link_speed = ETH_SPEED_NUM_10G; break; } - rte_ixgbe_dev_atomic_write_link_status(dev, &link); - if (link.link_status == old.link_status) - return -1; + return rte_eth_linkstatus_set(dev, &link); +} - return 0; +static int +ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + return ixgbe_dev_link_update_share(dev, wait_to_complete, 0); } -static void +static int +ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + return ixgbe_dev_link_update_share(dev, wait_to_complete, 1); +} + +static int ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -3035,9 +4202,11 @@ ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + return 0; } -static void +static int ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -3050,9 +4219,11 @@ ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) else fctrl &= (~IXGBE_FCTRL_MPE); IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + return 0; } -static void +static int ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -3061,20 +4232,24 @@ ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); fctrl |= IXGBE_FCTRL_MPE; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + return 0; } -static void +static int ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t fctrl; if (dev->data->promiscuous == 1) - return; /* must remain in all_multicast mode */ + return 0; /* must remain in all_multicast mode */ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); fctrl &= (~IXGBE_FCTRL_MPE); IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + return 0; } /** @@ -3083,19 +4258,24 @@ ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) * * @param dev * Pointer to struct rte_eth_dev. + * @param on + * Enable or Disable. * * @return * - On success, zero. * - On failure, a negative value. */ static int -ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev) +ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on) { struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); ixgbe_dev_link_status_print(dev); - intr->mask |= IXGBE_EICR_LSC; + if (on) + intr->mask |= IXGBE_EICR_LSC; + else + intr->mask &= ~IXGBE_EICR_LSC; return 0; } @@ -3122,6 +4302,28 @@ ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) return 0; } +/** + * It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + intr->mask |= IXGBE_EICR_LINKSEC; + + return 0; +} + /* * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. * @@ -3156,6 +4358,14 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) if (eicr & IXGBE_EICR_MAILBOX) intr->flags |= IXGBE_FLAG_MAILBOX; + if (eicr & IXGBE_EICR_LINKSEC) + intr->flags |= IXGBE_FLAG_MACSEC; + + if (hw->mac.type == ixgbe_mac_X550EM_x && + hw->phy.type == ixgbe_phy_x550em_ext_t && + (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) + intr->flags |= IXGBE_FLAG_PHY_INTERRUPT; + return 0; } @@ -3172,10 +4382,11 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev) { + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_eth_link link; - memset(&link, 0, sizeof(link)); - rte_ixgbe_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); + if (link.link_status) { PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", (int)(dev->data->port_id), @@ -3186,11 +4397,11 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev) PMD_INIT_LOG(INFO, " Port %d: Link Down", (int)(dev->data->port_id)); } - PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", - dev->pci_dev->addr.domain, - dev->pci_dev->addr.bus, - dev->pci_dev->addr.devid, - dev->pci_dev->addr.function); + PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, + pci_dev->addr.domain, + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); } /* @@ -3209,8 +4420,8 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); int64_t timeout; - struct rte_eth_link link; - int intr_enable_delay = false; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); @@ -3219,10 +4430,16 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) intr->flags &= ~IXGBE_FLAG_MAILBOX; } + if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { + ixgbe_handle_lasi(hw); + intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; + } + if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { + struct rte_eth_link link; + /* get the link status before link update, for predicting later */ - memset(&link, 0, sizeof(link)); - rte_ixgbe_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); ixgbe_dev_link_update(dev, 0); @@ -3236,20 +4453,19 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; ixgbe_dev_link_status_print(dev); - - intr_enable_delay = true; - } - - if (intr_enable_delay) { if (rte_eal_alarm_set(timeout * 1000, - ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0) + ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) PMD_DRV_LOG(ERR, "Error setting alarm"); - } else { - PMD_DRV_LOG(DEBUG, "enable intr immediately"); - ixgbe_enable_intr(dev); - rte_intr_enable(&(dev->pci_dev->intr_handle)); + else { + /* remember original mask */ + intr->mask_original = intr->mask; + /* only disable lsc interrupt */ + intr->mask &= ~IXGBE_EIMS_LSC; + } } + PMD_DRV_LOG(DEBUG, "enable intr immediately"); + ixgbe_enable_intr(dev); return 0; } @@ -3272,26 +4488,46 @@ static void ixgbe_dev_interrupt_delayed_handler(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t eicr; + ixgbe_disable_intr(hw); + eicr = IXGBE_READ_REG(hw, IXGBE_EICR); if (eicr & IXGBE_EICR_MAILBOX) ixgbe_pf_mbx_process(dev); + if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { + ixgbe_handle_lasi(hw); + intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; + } + if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { ixgbe_dev_link_update(dev, 0); intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; ixgbe_dev_link_status_print(dev); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + } + + if (intr->flags & IXGBE_FLAG_MACSEC) { + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, + NULL); + intr->flags &= ~IXGBE_FLAG_MACSEC; } + /* restore original mask */ + intr->mask = intr->mask_original; + intr->mask_original = 0; + PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); ixgbe_enable_intr(dev); - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_ack(intr_handle); } /** @@ -3307,8 +4543,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param) * void */ static void -ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, - void *param) +ixgbe_dev_interrupt_handler(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; @@ -3408,7 +4643,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * At least reserve one Ethernet frame for watermark * high_water/low_water in kilo bytes for ixgbe */ - max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; + max_high_water = (rx_buf_size - + RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; if ((fc_conf->high_water > max_high_water) || (fc_conf->high_water < fc_conf->low_water)) { PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); @@ -3456,7 +4692,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * Enable flow control according to the current settings. */ static int -ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) +ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) { int ret_val = 0; uint32_t mflcn_reg, fccfg_reg; @@ -3503,13 +4739,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) * and the TX pause can not be disabled */ nb_rx_en = 0; - for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); if (reg & IXGBE_FCRTH_FCEN) nb_rx_en++; } if (nb_rx_en > 1) - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_rx_pause: /* @@ -3526,20 +4762,20 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) * and the TX pause can not be disabled */ nb_rx_en = 0; - for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); if (reg & IXGBE_FCRTH_FCEN) nb_rx_en++; } if (nb_rx_en > 1) - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_tx_pause: /* * Tx Flow control is enabled, and Rx Flow control is * disabled by software override. */ - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_full: /* Flow control (both Rx and Tx) is enabled by SW override. */ @@ -3550,7 +4786,6 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); ret_val = IXGBE_ERR_CONFIG; goto out; - break; } /* Set 802.3x based flow control settings. */ @@ -3589,13 +4824,13 @@ out: } static int -ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num) +ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); int32_t ret_val = IXGBE_NOT_IMPLEMENTED; if (hw->mac.type != ixgbe_mac_82598EB) { - ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num); + ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); } return ret_val; } @@ -3609,9 +4844,9 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p uint8_t tc_num; uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_dcb_config *dcb_config = - IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { ixgbe_fc_none, @@ -3630,7 +4865,8 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p * At least reserve one Ethernet frame for watermark * high_water/low_water in kilo bytes for ixgbe */ - max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; + max_high_water = (rx_buf_size - + RTE_ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; if ((pfc_conf->fc.high_water > max_high_water) || (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); @@ -3644,7 +4880,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; - err = ixgbe_dcb_pfc_enable(dev,tc_num); + err = ixgbe_dcb_pfc_enable(dev, tc_num); /* Not negotiated is not an error case */ if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) @@ -3659,11 +4895,12 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { - uint8_t i, j, mask; + uint16_t i, sp_reta_size; + uint8_t j, mask; uint32_t reta, r; uint16_t idx, shift; + struct ixgbe_adapter *adapter = dev->data->dev_private; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint16_t sp_reta_size; uint32_t reta_reg; PMD_INIT_FUNC_TRACE(); @@ -3678,7 +4915,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, if (reta_size != sp_reta_size) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, sp_reta_size); + "(%d)", reta_size, sp_reta_size); return -EINVAL; } @@ -3704,6 +4941,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, } IXGBE_WRITE_REG(hw, reta_reg, reta); } + adapter->rss_reta_updated = 1; return 0; } @@ -3713,11 +4951,11 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { - uint8_t i, j, mask; + uint16_t i, sp_reta_size; + uint8_t j, mask; uint32_t reta; uint16_t idx, shift; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint16_t sp_reta_size; uint32_t reta_reg; PMD_INIT_FUNC_TRACE(); @@ -3725,7 +4963,7 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, if (reta_size != sp_reta_size) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, sp_reta_size); + "(%d)", reta_size, sp_reta_size); return -EINVAL; } @@ -3750,14 +4988,15 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, return 0; } -static void -ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, +static int +ixgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t enable_addr = 1; - ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr); + return ixgbe_set_rar(hw, index, mac_addr->addr_bytes, + pool, enable_addr); } static void @@ -3765,15 +5004,33 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - ixgbe_clear_rar(hw, index); + ixgbe_clear_rar(hw, index); +} + +static int +ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + ixgbe_remove_rar(dev, 0); + ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs); + + return 0; +} + +static bool +is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv) +{ + if (strcmp(dev->device->driver->name, drv->driver.name)) + return false; + + return true; } -static void -ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) +bool +is_ixgbe_supported(struct rte_eth_dev *dev) { - ixgbe_remove_rar(dev, 0); - - ixgbe_add_rar(dev, addr, 0, 0); + return is_device_supported(dev, &rte_ixgbe_pmd); } static int @@ -3783,30 +5040,39 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) uint32_t maxfrs; struct ixgbe_hw *hw; struct rte_eth_dev_info dev_info; - uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD; + struct rte_eth_dev_data *dev_data = dev->data; + int ret; - ixgbe_dev_info_get(dev, &dev_info); + ret = ixgbe_dev_info_get(dev, &dev_info); + if (ret != 0) + return ret; /* check that mtu is within the allowed range */ - if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) return -EINVAL; - /* refuse mtu that requires the support of scattered packets when this - * feature has not been enabled before. */ - if (!dev->data->scattered_rx && + /* If device is started, refuse mtu that requires the support of + * scattered packets when this feature has not been enabled before. + */ + if (dev_data->dev_started && !dev_data->scattered_rx && (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > - dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { + PMD_INIT_LOG(ERR, "Stop port first."); return -EINVAL; + } hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); /* switch to jumbo mode if needed */ - if (frame_size > ETHER_MAX_LEN) { - dev->data->dev_conf.rxmode.jumbo_frame = 1; + if (frame_size > RTE_ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; hlreg0 |= IXGBE_HLREG0_JUMBOEN; } else { - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; } IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); @@ -3826,19 +5092,32 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) * Virtual Function operations */ static void -ixgbevf_intr_disable(struct ixgbe_hw *hw) +ixgbevf_intr_disable(struct rte_eth_dev *dev) { + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + PMD_INIT_FUNC_TRACE(); /* Clear interrupt mask to stop from interrupts being generated */ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); IXGBE_WRITE_FLUSH(hw); + + /* Clear mask value. */ + intr->mask = 0; } static void -ixgbevf_intr_enable(struct ixgbe_hw *hw) +ixgbevf_intr_enable(struct rte_eth_dev *dev) { + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + PMD_INIT_FUNC_TRACE(); /* VF enable interrupt autoclean */ @@ -3847,14 +5126,16 @@ ixgbevf_intr_enable(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); IXGBE_WRITE_FLUSH(hw); + + /* Save IXGBE_VTEIMS value to mask. */ + intr->mask = IXGBE_VF_IRQ_ENABLE_MASK; } static int ixgbevf_dev_configure(struct rte_eth_dev *dev) { - struct rte_eth_conf* conf = &dev->data->dev_conf; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct rte_eth_conf *conf = &dev->data->dev_conf; + struct ixgbe_adapter *adapter = dev->data->dev_private; PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", dev->data->port_id); @@ -3864,14 +5145,14 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) * Keep the persistent behavior the same as Host PF */ #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC - if (!conf->rxmode.hw_strip_crc) { + if (conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) { PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 1; + conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC; } #else - if (conf->rxmode.hw_strip_crc) { + if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) { PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 0; + conf->rxmode.offloads |= DEV_RX_OFFLOAD_KEEP_CRC; } #endif @@ -3891,13 +5172,21 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t intr_vector = 0; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int err, mask = 0; PMD_INIT_FUNC_TRACE(); - hw->mac.ops.reset_hw(hw); + /* Stop the link setup handler before resetting the HW. */ + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); + + err = hw->mac.ops.reset_hw(hw); + if (err) { + PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); + return err; + } hw->mac.get_link_status = true; /* negotiate mailbox API version to use with the PF. */ @@ -3914,18 +5203,27 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) } /* Set vfta */ - ixgbevf_set_vfta_all(dev,1); + ixgbevf_set_vfta_all(dev, 1); /* Set HW strip */ - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; - ixgbevf_vlan_offload_set(dev, mask); + err = ixgbevf_vlan_offload_config(dev, mask); + if (err) { + PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); + ixgbe_dev_clear_queues(dev); + return err; + } ixgbevf_dev_rxtx_start(dev); /* check and configure queue intr-vector mapping */ - if (dev->data->dev_conf.intr_conf.rxq != 0) { - intr_vector = dev->data->nb_rx_queues; + if (rte_intr_cap_multiple(intr_handle) && + dev->data->dev_conf.intr_conf.rxq) { + /* According to datasheet, only vector 0/1/2 can be used, + * now only one vector is used for Rx queue + */ + intr_vector = 1; if (rte_intr_efd_enable(intr_handle, intr_vector)) return -1; } @@ -3936,16 +5234,33 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) dev->data->nb_rx_queues * sizeof(int), 0); if (intr_handle->intr_vec == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" - " intr_vec\n", dev->data->nb_rx_queues); + " intr_vec", dev->data->nb_rx_queues); return -ENOMEM; } } ixgbevf_configure_msix(dev); + /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt + * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). + * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) + * is not cleared, it will fail when following rte_intr_enable( ) tries + * to map Rx queue interrupt to other VFIO vectors. + * So clear uio/vfio intr/evevnfd first to avoid failure. + */ + rte_intr_disable(intr_handle); + rte_intr_enable(intr_handle); /* Re-enable interrupt for VF */ - ixgbevf_intr_enable(hw); + ixgbevf_intr_enable(dev); + + /* + * Update link status right before return, because it may + * start link configuration process in a separate thread. + */ + ixgbevf_dev_link_update(dev, 0); + + hw->adapter_stopped = false; return 0; } @@ -3954,10 +5269,19 @@ static void ixgbevf_dev_stop(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct ixgbe_adapter *adapter = dev->data->dev_private; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + + if (hw->adapter_stopped) + return; PMD_INIT_FUNC_TRACE(); + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); + + ixgbevf_intr_disable(dev); + hw->adapter_stopped = 1; ixgbe_stop_adapter(hw); @@ -3965,7 +5289,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) * Clear what we set, but we still keep shadow_vfta to * restore after device starts */ - ixgbevf_set_vfta_all(dev,0); + ixgbevf_set_vfta_all(dev, 0); /* Clear stored conf */ dev->data->scattered_rx = 0; @@ -3978,12 +5302,16 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) rte_free(intr_handle->intr_vec); intr_handle->intr_vec = NULL; } + + adapter->rss_reta_updated = 0; } static void ixgbevf_dev_close(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; PMD_INIT_FUNC_TRACE(); @@ -3993,25 +5321,55 @@ ixgbevf_dev_close(struct rte_eth_dev *dev) ixgbe_dev_free_queues(dev); - /* reprogram the RAR[0] in case user changed it. */ - ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + /** + * Remove the VF MAC address ro ensure + * that the VF traffic goes to the PF + * after stop, close and detach of the VF + **/ + ixgbevf_remove_mac_addr(dev, 0); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + ixgbevf_dev_interrupt_handler, dev); +} + +/* + * Reset VF device + */ +static int +ixgbevf_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + ret = eth_ixgbevf_dev_uninit(dev); + if (ret) + return ret; + + ret = eth_ixgbevf_dev_init(dev); + + return ret; } static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); int i = 0, j = 0, vfta = 0, mask = 1; - for (i = 0; i < IXGBE_VFTA_SIZE; i++){ + for (i = 0; i < IXGBE_VFTA_SIZE; i++) { vfta = shadow_vfta->vfta[i]; if (vfta) { mask = 1; - for (j = 0; j < 32; j++){ + for (j = 0; j < 32; j++) { if (vfta & mask) - ixgbe_set_vfta(hw, (i<<5)+j, 0, on); - mask<<=1; + ixgbe_set_vfta(hw, (i<<5)+j, 0, + on, false); + mask <<= 1; } } } @@ -4023,7 +5381,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vid_idx = 0; uint32_t vid_bit = 0; @@ -4032,7 +5390,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) PMD_INIT_FUNC_TRACE(); /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ - ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on); + ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); if (ret) { PMD_INIT_LOG(ERR, "Unable to set VF vlan"); return ret; @@ -4068,35 +5426,47 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ctrl &= ~IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); - ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on); + ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); } -static void -ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) +static int +ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask) { - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_rx_queue *rxq; uint16_t i; int on = 0; /* VF function only support hw strip feature, others are not support */ if (mask & ETH_VLAN_STRIP_MASK) { - on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip); - - for (i = 0; i < hw->mac.max_rx_queues; i++) - ixgbevf_vlan_strip_queue_set(dev,i,on); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); + ixgbevf_vlan_strip_queue_set(dev, i, on); + } } + + return 0; } static int -ixgbe_vmdq_mode_check(struct ixgbe_hw *hw) +ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + ixgbe_config_vlan_strip_on_all_queues(dev, mask); + + ixgbevf_vlan_offload_config(dev, mask); + + return 0; +} + +int +ixgbe_vt_check(struct ixgbe_hw *hw) { uint32_t reg_val; - /* we only need to do this if VMDq is enabled */ + /* if Virtualization Technology is enabled */ reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { - PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting"); + PMD_INIT_LOG(ERR, "VT must be enabled for this setting"); return -1; } @@ -4104,9 +5474,10 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw) } static uint32_t -ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr) +ixgbe_uta_vector(struct ixgbe_hw *hw, struct rte_ether_addr *uc_addr) { uint32_t vector = 0; + switch (hw->mac.mc_filter_type) { case 0: /* use bits [47:36] of the address */ vector = ((uc_addr->addr_bytes[4] >> 4) | @@ -4134,8 +5505,8 @@ ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr) } static int -ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, - uint8_t on) +ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, uint8_t on) { uint32_t vector; uint32_t uta_idx; @@ -4156,7 +5527,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, if (hw->mac.type < ixgbe_mac_82599EB) return -ENOTSUP; - vector = ixgbe_uta_vector(hw,mac_addr); + vector = ixgbe_uta_vector(hw, mac_addr); uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; uta_shift = vector & ixgbe_uta_bit_mask; @@ -4181,7 +5552,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); else - IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type); + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); return 0; } @@ -4233,111 +5604,6 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) return new_val; } -static int -ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, - uint16_t rx_mask, uint8_t on) -{ - int val = 0; - - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); - - if (hw->mac.type == ixgbe_mac_82598EB) { - PMD_INIT_LOG(ERR, "setting VF receive mode set should be done" - " on 82599 hardware and newer"); - return -ENOTSUP; - } - if (ixgbe_vmdq_mode_check(hw) < 0) - return -ENOTSUP; - - val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val); - - if (on) - vmolr |= val; - else - vmolr &= ~val; - - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); - - return 0; -} - -static int -ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) -{ - uint32_t reg,addr; - uint32_t val; - const uint8_t bit1 = 0x1; - - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - - if (ixgbe_vmdq_mode_check(hw) < 0) - return -ENOTSUP; - - addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2); - reg = IXGBE_READ_REG(hw, addr); - val = bit1 << pool; - - if (on) - reg |= val; - else - reg &= ~val; - - IXGBE_WRITE_REG(hw, addr,reg); - - return 0; -} - -static int -ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) -{ - uint32_t reg,addr; - uint32_t val; - const uint8_t bit1 = 0x1; - - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - - if (ixgbe_vmdq_mode_check(hw) < 0) - return -ENOTSUP; - - addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2); - reg = IXGBE_READ_REG(hw, addr); - val = bit1 << pool; - - if (on) - reg |= val; - else - reg &= ~val; - - IXGBE_WRITE_REG(hw, addr,reg); - - return 0; -} - -static int -ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, - uint64_t pool_mask, uint8_t vlan_on) -{ - int ret = 0; - uint16_t pool_idx; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - - if (ixgbe_vmdq_mode_check(hw) < 0) - return -ENOTSUP; - for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) { - if (pool_mask & ((uint64_t)(1ULL << pool_idx))) - ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on); - if (ret < 0) - return ret; - } - - return ret; -} - #define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */ #define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */ #define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */ @@ -4348,10 +5614,10 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, - struct rte_eth_mirror_conf *mirror_conf, - uint8_t rule_id, uint8_t on) + struct rte_eth_mirror_conf *mirror_conf, + uint8_t rule_id, uint8_t on) { - uint32_t mr_ctl,vlvf; + uint32_t mr_ctl, vlvf; uint32_t mp_lsb = 0; uint32_t mv_msb = 0; uint32_t mv_lsb = 0; @@ -4364,7 +5630,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, const uint8_t vlan_mask_offset = 32; const uint8_t dst_pool_offset = 8; const uint8_t rule_mr_offset = 4; - const uint8_t mirror_rule_mask= 0x0F; + const uint8_t mirror_rule_mask = 0x0F; struct ixgbe_mirror_info *mr_info = (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); @@ -4372,7 +5638,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint8_t mirror_type = 0; - if (ixgbe_vmdq_mode_check(hw) < 0) + if (ixgbe_vt_check(hw) < 0) return -ENOTSUP; if (rule_id >= IXGBE_MAX_MIRROR_RULES) @@ -4380,21 +5646,28 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) { PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.", - mirror_conf->rule_type); + mirror_conf->rule_type); return -EINVAL; } if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { mirror_type |= IXGBE_MRCTL_VLME; - /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */ - for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) { + /* Check if vlan id is valid and find conresponding VLAN ID + * index in VLVF + */ + for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) { if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { - /* search vlan id related pool vlan filter index */ - reg_index = ixgbe_find_vlvf_slot(hw, - mirror_conf->vlan.vlan_id[i]); + /* search vlan id related pool vlan filter + * index + */ + reg_index = ixgbe_find_vlvf_slot( + hw, + mirror_conf->vlan.vlan_id[i], + false); if (reg_index < 0) return -EINVAL; - vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index)); + vlvf = IXGBE_READ_REG(hw, + IXGBE_VLVF(reg_index)); if ((vlvf & IXGBE_VLVF_VIEN) && ((vlvf & IXGBE_VLVF_VLANID_MASK) == mirror_conf->vlan.vlan_id[i])) @@ -4424,7 +5697,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, } } - /* + /** * if enable pool mirror, write related pool mask register,if disable * pool mirror, clear PFMRVM register */ @@ -4454,8 +5727,9 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, mr_ctl |= mirror_type; mr_ctl &= mirror_rule_mask; mr_ctl |= mirror_conf->dst_pool << dst_pool_offset; - } else + } else { mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask); + } mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type; mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool; @@ -4464,13 +5738,13 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); /* write pool mirrror control register */ - if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) { + if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), mp_msb); } /* write VLAN mirrror control register */ - if (mirror_conf->rule_type == ETH_MIRROR_VLAN) { + if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), mv_msb); @@ -4492,11 +5766,14 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) struct ixgbe_mirror_info *mr_info = (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); - if (ixgbe_vmdq_mode_check(hw) < 0) + if (ixgbe_vt_check(hw) < 0) return -ENOTSUP; + if (rule_id >= IXGBE_MAX_MIRROR_RULES) + return -EINVAL; + memset(&mr_info->mr_conf[rule_id], 0, - sizeof(struct rte_eth_mirror_conf)); + sizeof(struct rte_eth_mirror_conf)); /* clear PFVMCTL register */ IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); @@ -4515,16 +5792,21 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { - uint32_t mask; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t vec = IXGBE_MISC_VEC_ID; - mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); - mask |= (1 << IXGBE_MISC_VEC_ID); + if (rte_intr_allow_others(intr_handle)) + vec = IXGBE_RX_VEC_START; + intr->mask |= (1 << vec); RTE_SET_USED(queue_id); - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_ack(intr_handle); return 0; } @@ -4532,14 +5814,19 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { - uint32_t mask; + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t vec = IXGBE_MISC_VEC_ID; - mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); - mask &= ~(1 << IXGBE_MISC_VEC_ID); + if (rte_intr_allow_others(intr_handle)) + vec = IXGBE_RX_VEC_START; + intr->mask &= ~(1 << vec); RTE_SET_USED(queue_id); - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask); return 0; } @@ -4547,6 +5834,8 @@ ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t mask; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -4566,7 +5855,7 @@ ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) mask &= (1 << (queue_id - 32)); IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); } - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_ack(intr_handle); return 0; } @@ -4648,7 +5937,9 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, tmp |= (msix_vector << (8 * (queue & 0x3))); IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); } else if ((hw->mac.type == ixgbe_mac_82599EB) || - (hw->mac.type == ixgbe_mac_X540)) { + (hw->mac.type == ixgbe_mac_X540) || + (hw->mac.type == ixgbe_mac_X550) || + (hw->mac.type == ixgbe_mac_X550EM_x)) { if (direction == -1) { /* other causes */ idx = ((queue & 1) * 8); @@ -4670,11 +5961,16 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, static void ixgbevf_configure_msix(struct rte_eth_dev *dev) { - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t q_idx; uint32_t vector_idx = IXGBE_MISC_VEC_ID; + uint32_t base = IXGBE_MISC_VEC_ID; + + /* Configure VF other cause ivar */ + ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); /* won't configure msix register if no mapping is done * between intr vector and event fd. @@ -4682,6 +5978,11 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) if (!rte_intr_dp_is_en(intr_handle)) return; + if (rte_intr_allow_others(intr_handle)) { + base = IXGBE_RX_VEC_START; + vector_idx = IXGBE_RX_VEC_START; + } + /* Configure all RX queues of VF */ for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { /* Force all queue use vector 0, @@ -4689,10 +5990,16 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) */ ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); intr_handle->intr_vec[q_idx] = vector_idx; + if (vector_idx < base + intr_handle->nb_efd - 1) + vector_idx++; } - /* Configure VF other cause ivar */ - ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); + /* As RX queue setting above show, all queues use the vector 0. + * Set only the ITR value of IXGBE_MISC_VEC_ID. + */ + IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID), + IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) + | IXGBE_EITR_CNT_WDIS); } /** @@ -4703,7 +6010,8 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) static void ixgbe_configure_msix(struct rte_eth_dev *dev) { - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t queue_id, base = IXGBE_MISC_VEC_ID; @@ -4713,8 +6021,12 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) /* won't configure msix register if no mapping is done * between intr vector and event fd + * but if misx has been enabled already, need to configure + * auto clean, auto mask and throttling. */ - if (!rte_intr_dp_is_en(intr_handle)) + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + if (!rte_intr_dp_is_en(intr_handle) && + !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT))) return; if (rte_intr_allow_others(intr_handle)) @@ -4738,29 +6050,35 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) /* Populate the IVAR table and set the ITR values to the * corresponding register. */ - for (queue_id = 0; queue_id < dev->data->nb_rx_queues; - queue_id++) { - /* by default, 1:1 mapping */ - ixgbe_set_ivar_map(hw, 0, queue_id, vec); - intr_handle->intr_vec[queue_id] = vec; - if (vec < base + intr_handle->nb_efd - 1) - vec++; - } + if (rte_intr_dp_is_en(intr_handle)) { + for (queue_id = 0; queue_id < dev->data->nb_rx_queues; + queue_id++) { + /* by default, 1:1 mapping */ + ixgbe_set_ivar_map(hw, 0, queue_id, vec); + intr_handle->intr_vec[queue_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, - IXGBE_MISC_VEC_ID); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); - break; - default: - break; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_set_ivar_map(hw, -1, + IXGBE_IVAR_OTHER_CAUSES_INDEX, + IXGBE_MISC_VEC_ID); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); + break; + default: + break; + } } IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), - IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF); + IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT) + | IXGBE_EITR_CNT_WDIS); /* set up to autoclear timer, and the vectors */ mask = IXGBE_EIMS_ENABLE_MASK; @@ -4771,110 +6089,57 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); } -static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, - uint16_t queue_idx, uint16_t tx_rate) +int +ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t tx_rate) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_rxmode *rxmode; uint32_t rf_dec, rf_int; uint32_t bcnrc_val; uint16_t link_speed = dev->data->dev_link.link_speed; - - if (queue_idx >= hw->mac.max_tx_queues) - return -EINVAL; - - if (tx_rate != 0) { - /* Calculate the rate factor values to set */ - rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; - rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; - rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; - - bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; - bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & - IXGBE_RTTBCNRC_RF_INT_MASK_M); - bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); - } else { - bcnrc_val = 0; - } - - /* - * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM - * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise - * set as 0x4. - */ - if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) && - (dev->data->dev_conf.rxmode.max_rx_pkt_len >= - IXGBE_MAX_JUMBO_FRAME_SIZE)) - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, - IXGBE_MMW_SIZE_JUMBO_FRAME); - else - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, - IXGBE_MMW_SIZE_DEFAULT); - - /* Set RTTBCNRC of queue X */ - IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} - -static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, - uint16_t tx_rate, uint64_t q_msk) -{ - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vf_info *vfinfo = - *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); - uint8_t nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; - uint32_t queue_stride = - IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; - uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx; - uint32_t queue_end = queue_idx + nb_q_per_pool - 1; - uint16_t total_rate = 0; - - if (queue_end >= hw->mac.max_tx_queues) - return -EINVAL; - - if (vfinfo != NULL) { - for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) { - if (vf_idx == vf) - continue; - for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); - idx++) - total_rate += vfinfo[vf_idx].tx_rate[idx]; - } - } else - return -EINVAL; - - /* Store tx_rate for this vf. */ - for (idx = 0; idx < nb_q_per_pool; idx++) { - if (((uint64_t)0x1 << idx) & q_msk) { - if (vfinfo[vf].tx_rate[idx] != tx_rate) - vfinfo[vf].tx_rate[idx] = tx_rate; - total_rate += tx_rate; - } - } - - if (total_rate > dev->data->dev_link.link_speed) { - /* - * Reset stored TX rate of the VF if it causes exceed - * link speed. - */ - memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); + + if (queue_idx >= hw->mac.max_tx_queues) return -EINVAL; - } - /* Set RTTBCNRC of each queue/pool for vf X */ - for (; queue_idx <= queue_end; queue_idx++) { - if (0x1 & q_msk) - ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); - q_msk = q_msk >> 1; + if (tx_rate != 0) { + /* Calculate the rate factor values to set */ + rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; + rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; + rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; + + bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; + bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & + IXGBE_RTTBCNRC_RF_INT_MASK_M); + bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); + } else { + bcnrc_val = 0; } + rxmode = &dev->data->dev_conf.rxmode; + /* + * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise + * set as 0x4. + */ + if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) && + (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE)) + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, + IXGBE_MMW_SIZE_JUMBO_FRAME); + else + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, + IXGBE_MMW_SIZE_DEFAULT); + + /* Set RTTBCNRC of queue X */ + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + IXGBE_WRITE_FLUSH(hw); + return 0; } -static void -ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, +static int +ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr, __attribute__((unused)) uint32_t index, __attribute__((unused)) uint32_t pool) { @@ -4886,20 +6151,30 @@ ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, * operation. Trap this case to avoid exhausting the [very limited] * set of PF resources used to store VF MAC addresses. */ - if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) - return; + if (memcmp(hw->mac.perm_addr, mac_addr, + sizeof(struct rte_ether_addr)) == 0) + return -1; diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); - if (diag == 0) - return; - PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag); + if (diag != 0) + PMD_DRV_LOG(ERR, "Unable to add MAC address " + "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d", + mac_addr->addr_bytes[0], + mac_addr->addr_bytes[1], + mac_addr->addr_bytes[2], + mac_addr->addr_bytes[3], + mac_addr->addr_bytes[4], + mac_addr->addr_bytes[5], + diag); + return diag; } static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; - struct ether_addr *mac_addr; + struct rte_ether_addr *perm_addr = + (struct rte_ether_addr *)hw->mac.perm_addr; + struct rte_ether_addr *mac_addr; uint32_t i; int diag; @@ -4921,10 +6196,11 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) if (i == index) continue; /* Skip NULL MAC addresses */ - if (is_zero_ether_addr(mac_addr)) + if (rte_is_zero_ether_addr(mac_addr)) continue; /* Skip the permanent MAC address */ - if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) + if (memcmp(perm_addr, mac_addr, + sizeof(struct rte_ether_addr)) == 0) continue; diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); if (diag != 0) @@ -4942,35 +6218,35 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) } } -static void -ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) +static int +ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); -} -#define MAC_TYPE_FILTER_SUP(type) do {\ - if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\ - (type) != ixgbe_mac_X550)\ - return -ENOTSUP;\ -} while (0) + return 0; +} -static int +int ixgbe_syn_filter_set(struct rte_eth_dev *dev, struct rte_eth_syn_filter *filter, bool add) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t syn_info; uint32_t synqf; if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) return -EINVAL; - synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + syn_info = filter_info->syn_info; if (add) { - if (synqf & IXGBE_SYN_FILTER_ENABLE) + if (syn_info & IXGBE_SYN_FILTER_ENABLE) return -EINVAL; synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); @@ -4980,10 +6256,13 @@ ixgbe_syn_filter_set(struct rte_eth_dev *dev, else synqf &= ~IXGBE_SYN_FILTER_SYNQFP; } else { - if (!(synqf & IXGBE_SYN_FILTER_ENABLE)) + synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + if (!(syn_info & IXGBE_SYN_FILTER_ENABLE)) return -ENOENT; synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); } + + filter_info->syn_info = synqf; IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); IXGBE_WRITE_FLUSH(hw); return 0; @@ -5039,7 +6318,7 @@ ixgbe_syn_filter_handle(struct rte_eth_dev *dev, (struct rte_eth_syn_filter *)arg); break; default: - PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); + PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); ret = -EINVAL; break; } @@ -5061,6 +6340,52 @@ convert_protocol_type(uint8_t protocol_value) return IXGBE_FILTER_PROTOCOL_NONE; } +/* inject a 5-tuple filter to HW */ +static inline void +ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int i; + uint32_t ftqf, sdpqf; + uint32_t l34timir = 0; + uint8_t mask = 0xff; + + i = filter->index; + + sdpqf = (uint32_t)(filter->filter_info.dst_port << + IXGBE_SDPQF_DSTPORT_SHIFT); + sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); + + ftqf = (uint32_t)(filter->filter_info.proto & + IXGBE_FTQF_PROTOCOL_MASK); + ftqf |= (uint32_t)((filter->filter_info.priority & + IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); + if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ + mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; + if (filter->filter_info.dst_ip_mask == 0) + mask &= IXGBE_FTQF_DEST_ADDR_MASK; + if (filter->filter_info.src_port_mask == 0) + mask &= IXGBE_FTQF_SOURCE_PORT_MASK; + if (filter->filter_info.dst_port_mask == 0) + mask &= IXGBE_FTQF_DEST_PORT_MASK; + if (filter->filter_info.proto_mask == 0) + mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; + ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; + ftqf |= IXGBE_FTQF_POOL_MASK_EN; + ftqf |= IXGBE_FTQF_QUEUE_ENABLE; + + IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); + IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); + IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); + IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); + + l34timir |= IXGBE_L34T_IMIR_RESERVE; + l34timir |= (uint32_t)(filter->queue << + IXGBE_L34T_IMIR_QUEUE_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); +} + /* * add a 5tuple filter * @@ -5078,13 +6403,9 @@ static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, struct ixgbe_5tuple_filter *filter) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_filter_info *filter_info = IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); int i, idx, shift; - uint32_t ftqf, sdpqf; - uint32_t l34timir = 0; - uint8_t mask = 0xff; /* * look for an unused 5tuple filter index, @@ -5107,37 +6428,8 @@ ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, return -ENOSYS; } - sdpqf = (uint32_t)(filter->filter_info.dst_port << - IXGBE_SDPQF_DSTPORT_SHIFT); - sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); - - ftqf = (uint32_t)(filter->filter_info.proto & - IXGBE_FTQF_PROTOCOL_MASK); - ftqf |= (uint32_t)((filter->filter_info.priority & - IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); - if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ - mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; - if (filter->filter_info.dst_ip_mask == 0) - mask &= IXGBE_FTQF_DEST_ADDR_MASK; - if (filter->filter_info.src_port_mask == 0) - mask &= IXGBE_FTQF_SOURCE_PORT_MASK; - if (filter->filter_info.dst_port_mask == 0) - mask &= IXGBE_FTQF_DEST_PORT_MASK; - if (filter->filter_info.proto_mask == 0) - mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; - ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; - ftqf |= IXGBE_FTQF_POOL_MASK_EN; - ftqf |= IXGBE_FTQF_QUEUE_ENABLE; - - IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); - IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); - IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); - IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); + ixgbe_inject_5tuple_filter(dev, filter); - l34timir |= IXGBE_L34T_IMIR_RESERVE; - l34timir |= (uint32_t)(filter->queue << - IXGBE_L34T_IMIR_QUEUE_SHIFT); - IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); return 0; } @@ -5173,19 +6465,24 @@ static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { struct ixgbe_hw *hw; - uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + uint32_t max_frame = mtu + IXGBE_ETH_OVERHEAD; + struct rte_eth_dev_data *dev_data = dev->data; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) + if (mtu < RTE_ETHER_MIN_MTU || + max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) return -EINVAL; - /* refuse mtu that requires the support of scattered packets when this - * feature has not been enabled before. */ - if (!dev->data->scattered_rx && + /* If device is started, refuse mtu that requires the support of + * scattered packets when this feature has not been enabled before. + */ + if (dev_data->dev_started && !dev_data->scattered_rx && (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > - dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) { + PMD_INIT_LOG(ERR, "Stop port first."); return -EINVAL; + } /* * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU @@ -5203,11 +6500,6 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) return 0; } -#define MAC_TYPE_FILTER_SUP_EXT(type) do {\ - if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\ - return -ENOTSUP;\ -} while (0) - static inline struct ixgbe_5tuple_filter * ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, struct ixgbe_5tuple_filter_info *key) @@ -5315,7 +6607,7 @@ ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, * - On success, zero. * - On failure, a negative value. */ -static int +int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *ntuple_filter, bool add) @@ -5352,7 +6644,7 @@ ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, sizeof(struct ixgbe_5tuple_filter), 0); if (filter == NULL) return -ENOMEM; - (void)rte_memcpy(&filter->filter_info, + rte_memcpy(&filter->filter_info, &filter_5tuple, sizeof(struct ixgbe_5tuple_filter_info)); filter->queue = ntuple_filter->queue; @@ -5460,48 +6752,7 @@ ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, return ret; } -static inline int -ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info, - uint16_t ethertype) -{ - int i; - - for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { - if (filter_info->ethertype_filters[i] == ethertype && - (filter_info->ethertype_mask & (1 << i))) - return i; - } - return -1; -} - -static inline int -ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info, - uint16_t ethertype) -{ - int i; - - for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { - if (!(filter_info->ethertype_mask & (1 << i))) { - filter_info->ethertype_mask |= 1 << i; - filter_info->ethertype_filters[i] = ethertype; - return i; - } - } - return -1; -} - -static inline int -ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info, - uint8_t idx) -{ - if (idx >= IXGBE_MAX_ETQF_FILTERS) - return -1; - filter_info->ethertype_mask &= ~(1 << idx); - filter_info->ethertype_filters[idx] = 0; - return idx; -} - -static int +int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, struct rte_eth_ethertype_filter *filter, bool add) @@ -5512,12 +6763,13 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, uint32_t etqf = 0; uint32_t etqs = 0; int ret; + struct ixgbe_ethertype_filter ethertype_filter; if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) return -EINVAL; - if (filter->ether_type == ETHER_TYPE_IPv4 || - filter->ether_type == ETHER_TYPE_IPv6) { + if (filter->ether_type == RTE_ETHER_TYPE_IPV4 || + filter->ether_type == RTE_ETHER_TYPE_IPV6) { PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" " ethertype filter.", filter->ether_type); return -EINVAL; @@ -5545,18 +6797,23 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, } if (add) { - ret = ixgbe_ethertype_filter_insert(filter_info, - filter->ether_type); - if (ret < 0) { - PMD_DRV_LOG(ERR, "ethertype filters are full."); - return -ENOSYS; - } etqf = IXGBE_ETQF_FILTER_EN; etqf |= (uint32_t)filter->ether_type; etqs |= (uint32_t)((filter->queue << IXGBE_ETQS_RX_QUEUE_SHIFT) & IXGBE_ETQS_RX_QUEUE); etqs |= IXGBE_ETQS_QUEUE_EN; + + ethertype_filter.ethertype = filter->ether_type; + ethertype_filter.etqf = etqf; + ethertype_filter.etqs = etqs; + ethertype_filter.conf = FALSE; + ret = ixgbe_ethertype_filter_insert(filter_info, + ðertype_filter); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype filters are full."); + return -ENOSPC; + } } else { ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); if (ret < 0) @@ -5652,7 +6909,7 @@ ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg) { - int ret = -EINVAL; + int ret = 0; switch (filter_type) { case RTE_ETH_FILTER_NTUPLE: @@ -5670,9 +6927,15 @@ ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, case RTE_ETH_FILTER_L2_TUNNEL: ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg); break; + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &ixgbe_flow_ops; + break; default: PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", filter_type); + ret = -EINVAL; break; } @@ -5687,13 +6950,13 @@ ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw, *vmdq = 0; mc_addr = *mc_addr_ptr; - *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr)); + *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr)); return mc_addr; } static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) { struct ixgbe_hw *hw; @@ -5713,6 +6976,8 @@ ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) switch (hw->mac.type) { case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) @@ -5735,6 +7000,8 @@ ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) switch (hw->mac.type) { case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: /* RXSTMPL stores ns and RXSTMPH stores seconds. */ rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) @@ -5758,6 +7025,8 @@ ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) switch (hw->mac.type) { case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: /* TXSTMPL stores ns and TXSTMPH stores seconds. */ tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) @@ -5777,27 +7046,25 @@ static void ixgbe_start_timecounters(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; struct rte_eth_link link; uint32_t incval = 0; uint32_t shift = 0; /* Get current link speed. */ - memset(&link, 0, sizeof(link)); ixgbe_dev_link_update(dev, 1); - rte_ixgbe_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); switch (link.link_speed) { - case ETH_LINK_SPEED_100: + case ETH_SPEED_NUM_100M: incval = IXGBE_INCVAL_100; shift = IXGBE_INCVAL_SHIFT_100; break; - case ETH_LINK_SPEED_1000: + case ETH_SPEED_NUM_1G: incval = IXGBE_INCVAL_1GB; shift = IXGBE_INCVAL_SHIFT_1GB; break; - case ETH_LINK_SPEED_10000: + case ETH_SPEED_NUM_10G: default: incval = IXGBE_INCVAL_10GB; shift = IXGBE_INCVAL_SHIFT_10GB; @@ -5806,6 +7073,8 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev) switch (hw->mac.type) { case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: /* Independent of link speed. */ incval = 1; /* Cycles read will be interpreted as ns. */ @@ -5845,8 +7114,7 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev) static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) { - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; adapter->systime_tc.nsec += delta; adapter->rx_tstamp_tc.nsec += delta; @@ -5859,8 +7127,7 @@ static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) { uint64_t ns; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; ns = rte_timespec_to_ns(ts); /* Set the timecounters to a new value. */ @@ -5875,8 +7142,7 @@ static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) { uint64_t ns, systime_cycles; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; systime_cycles = ixgbe_read_systime_cyclecounter(dev); ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); @@ -5907,7 +7173,7 @@ ixgbe_timesync_enable(struct rte_eth_dev *dev) /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), - (ETHER_TYPE_1588 | + (RTE_ETHER_TYPE_1588 | IXGBE_ETQF_FILTER_EN | IXGBE_ETQF_1588)); @@ -5957,8 +7223,7 @@ ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, uint32_t flags __rte_unused) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; uint32_t tsync_rxctl; uint64_t rx_tstamp_cycles; uint64_t ns; @@ -5979,8 +7244,7 @@ ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, struct timespec *timestamp) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct ixgbe_adapter *adapter = dev->data->dev_private; uint32_t tsync_txctl; uint64_t tx_tstamp_cycles; uint64_t ns; @@ -6037,6 +7301,12 @@ ixgbe_get_regs(struct rte_eth_dev *dev, const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? ixgbe_regs_mac_82598EB : ixgbe_regs_others; + if (data == NULL) { + regs->length = ixgbe_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; + } + /* Support only full register dump */ if ((regs->length == 0) || (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { @@ -6061,6 +7331,12 @@ ixgbevf_get_regs(struct rte_eth_dev *dev, int count = 0; const struct reg_info *reg_group; + if (data == NULL) { + regs->length = ixgbevf_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; + } + /* Support only full register dump */ if ((regs->length == 0) || (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { @@ -6124,6 +7400,78 @@ ixgbe_set_eeprom(struct rte_eth_dev *dev, return eeprom->ops.write_buffer(hw, first, length, data); } +static int +ixgbe_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t status; + uint8_t sff8472_rev, addr_mode; + bool page_swap = false; + + /* Check whether we support SFF-8472 or not */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_COMP, + &sff8472_rev); + if (status != 0) + return -EIO; + + /* addressing mode is not supported */ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_SFF_8472_SWAP, + &addr_mode); + if (status != 0) + return -EIO; + + if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) { + PMD_DRV_LOG(ERR, + "Address change required to access page 0xA2, " + "but not supported. Please report the module " + "type to the driver maintainers."); + page_swap = true; + } + + if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) { + /* We have a SFP, but it does not support SFF-8472 */ + modinfo->type = RTE_ETH_MODULE_SFF_8079; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else { + /* We have a SFP which supports a revision of SFF-8472. */ + modinfo->type = RTE_ETH_MODULE_SFF_8472; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int +ixgbe_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID; + uint8_t databyte = 0xFF; + uint8_t *data = info->data; + uint32_t i = 0; + + if (info->length == 0) + return -EINVAL; + + for (i = info->offset; i < info->offset + info->length; i++) { + if (i < RTE_ETH_MODULE_SFF_8079_LEN) + status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte); + else + status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte); + + if (status != 0) + return -EIO; + + data[i - info->offset] = databyte; + } + + return 0; +} + uint16_t ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { switch (mac_type) { @@ -6135,6 +7483,9 @@ ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { case ixgbe_mac_X550EM_x_vf: case ixgbe_mac_X550EM_a_vf: return ETH_RSS_RETA_SIZE_64; + case ixgbe_mac_X540_vf: + case ixgbe_mac_82599_vf: + return 0; default: return ETH_RSS_RETA_SIZE_128; } @@ -6201,6 +7552,8 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, struct ixgbe_dcb_config *dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); struct ixgbe_dcb_tc_config *tc; + struct rte_eth_dcb_tc_queue_mapping *tc_queue; + uint8_t nb_tcs; uint8_t i, j; if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) @@ -6208,19 +7561,31 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, else dcb_info->nb_tcs = 1; + tc_queue = &dcb_info->tc_queue; + nb_tcs = dcb_info->nb_tcs; + if (dcb_config->vt_mode) { /* vt is enabled*/ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; - for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { - for (j = 0; j < dcb_info->nb_tcs; j++) { - dcb_info->tc_queue.tc_rxq[i][j].base = - i * dcb_info->nb_tcs + j; - dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1; - dcb_info->tc_queue.tc_txq[i][j].base = - i * dcb_info->nb_tcs + j; - dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1; + if (RTE_ETH_DEV_SRIOV(dev).active > 0) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[0][j].base = j; + tc_queue->tc_rxq[0][j].nb_queue = 1; + tc_queue->tc_txq[0][j].base = j; + tc_queue->tc_txq[0][j].nb_queue = 1; + } + } else { + for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_rxq[i][j].nb_queue = 1; + tc_queue->tc_txq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_txq[i][j].nb_queue = 1; + } } } } else { /* vt is disabled*/ @@ -6279,7 +7644,8 @@ ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, uint32_t etag_etype; if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6299,12 +7665,15 @@ ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev, { int ret = 0; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); if (l2_tunnel == NULL) return -EINVAL; switch (l2_tunnel->l2_tunnel_type) { case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type; ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type); break; default: @@ -6323,7 +7692,8 @@ ixgbe_e_tag_enable(struct ixgbe_hw *hw) uint32_t etag_etype; if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6342,9 +7712,12 @@ ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev, { int ret = 0; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); switch (l2_tunnel_type) { case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_en = TRUE; ret = ixgbe_e_tag_enable(hw); break; default: @@ -6363,7 +7736,8 @@ ixgbe_e_tag_disable(struct ixgbe_hw *hw) uint32_t etag_etype; if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6382,9 +7756,12 @@ ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev, { int ret = 0; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); switch (l2_tunnel_type) { case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_en = FALSE; ret = ixgbe_e_tag_disable(hw); break; default: @@ -6406,7 +7783,8 @@ ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, uint32_t rar_low, rar_high; if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6441,7 +7819,8 @@ ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, uint32_t rar_low, rar_high; if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6459,24 +7838,120 @@ ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; rar_low = l2_tunnel->tunnel_id; - IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); - IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); + + return ret; + } + } + + PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." + " Please remove a rule before adding a new one."); + return -EINVAL; +} + +static inline struct ixgbe_l2_tn_filter * +ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info, + struct ixgbe_l2_tn_key *key) +{ + int ret; + + ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); + if (ret < 0) + return NULL; + + return l2_tn_info->hash_map[ret]; +} + +static inline int +ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, + struct ixgbe_l2_tn_filter *l2_tn_filter) +{ + int ret; + + ret = rte_hash_add_key(l2_tn_info->hash_handle, + &l2_tn_filter->key); + + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to insert L2 tunnel filter" + " to hash table %d!", + ret); + return ret; + } + + l2_tn_info->hash_map[ret] = l2_tn_filter; + + TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); + + return 0; +} + +static inline int +ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, + struct ixgbe_l2_tn_key *key) +{ + int ret; + struct ixgbe_l2_tn_filter *l2_tn_filter; - return ret; - } + ret = rte_hash_del_key(l2_tn_info->hash_handle, key); + + if (ret < 0) { + PMD_DRV_LOG(ERR, + "No such L2 tunnel filter to delete %d!", + ret); + return ret; } - PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." - " Please remove a rule before adding a new one."); - return -EINVAL; + l2_tn_filter = l2_tn_info->hash_map[ret]; + l2_tn_info->hash_map[ret] = NULL; + + TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); + rte_free(l2_tn_filter); + + return 0; } /* Add l2 tunnel filter */ -static int +int ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel) + struct rte_eth_l2_tunnel_conf *l2_tunnel, + bool restore) { - int ret = 0; + int ret; + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + struct ixgbe_l2_tn_key key; + struct ixgbe_l2_tn_filter *node; + + if (!restore) { + key.l2_tn_type = l2_tunnel->l2_tunnel_type; + key.tn_id = l2_tunnel->tunnel_id; + + node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key); + + if (node) { + PMD_DRV_LOG(ERR, + "The L2 tunnel filter already exists!"); + return -EINVAL; + } + + node = rte_zmalloc("ixgbe_l2_tn", + sizeof(struct ixgbe_l2_tn_filter), + 0); + if (!node) + return -ENOMEM; + + rte_memcpy(&node->key, + &key, + sizeof(struct ixgbe_l2_tn_key)); + node->pool = l2_tunnel->pool; + ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node); + if (ret < 0) { + rte_free(node); + return ret; + } + } switch (l2_tunnel->l2_tunnel_type) { case RTE_L2_TUNNEL_TYPE_E_TAG: @@ -6488,15 +7963,27 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, break; } + if ((!restore) && (ret < 0)) + (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key); + return ret; } /* Delete l2 tunnel filter */ -static int +int ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel) { - int ret = 0; + int ret; + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + struct ixgbe_l2_tn_key key; + + key.l2_tn_type = l2_tunnel->l2_tunnel_type; + key.tn_id = l2_tunnel->tunnel_id; + ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key); + if (ret < 0) + return ret; switch (l2_tunnel->l2_tunnel_type) { case RTE_L2_TUNNEL_TYPE_E_TAG: @@ -6522,7 +8009,7 @@ ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg) { - int ret = 0; + int ret; if (filter_op == RTE_ETH_FILTER_NOP) return 0; @@ -6537,7 +8024,8 @@ ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, case RTE_ETH_FILTER_ADD: ret = ixgbe_dev_l2_tunnel_filter_add (dev, - (struct rte_eth_l2_tunnel_conf *)arg); + (struct rte_eth_l2_tunnel_conf *)arg, + FALSE); break; case RTE_ETH_FILTER_DELETE: ret = ixgbe_dev_l2_tunnel_filter_del @@ -6560,7 +8048,8 @@ ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6579,10 +8068,13 @@ ixgbe_dev_l2_tunnel_forwarding_enable (struct rte_eth_dev *dev, enum rte_eth_tunnel_type l2_tunnel_type) { + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); int ret = 0; switch (l2_tunnel_type) { case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_fwd_en = TRUE; ret = ixgbe_e_tag_forwarding_en_dis(dev, 1); break; default: @@ -6600,10 +8092,13 @@ ixgbe_dev_l2_tunnel_forwarding_disable (struct rte_eth_dev *dev, enum rte_eth_tunnel_type l2_tunnel_type) { + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); int ret = 0; switch (l2_tunnel_type) { case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_fwd_en = FALSE; ret = ixgbe_e_tag_forwarding_en_dis(dev, 0); break; default: @@ -6620,20 +8115,22 @@ ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel, bool en) { + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); int ret = 0; uint32_t vmtir, vmvir; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (l2_tunnel->vf_id >= dev->pci_dev->max_vfs) { + if (l2_tunnel->vf_id >= pci_dev->max_vfs) { PMD_DRV_LOG(ERR, "VF id %u should be less than %u", l2_tunnel->vf_id, - dev->pci_dev->max_vfs); + pci_dev->max_vfs); return -EINVAL; } if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6703,7 +8200,8 @@ ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev, struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6876,7 +8374,8 @@ ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6912,7 +8411,8 @@ ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6937,70 +8437,417 @@ ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, return ret; } -/* ixgbevf_update_xcast_mode - Update Multicast mode - * @hw: pointer to the HW structure - * @netdev: pointer to net device structure - * @xcast_mode: new multicast mode - * - * Updates the Multicast Mode of VF. - */ -static int ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, - int xcast_mode) +static int +ixgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev) { - struct ixgbe_mbx_info *mbx = &hw->mbx; - u32 msgbuf[2]; - s32 err; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; - switch (hw->api_version) { - case ixgbe_mbox_api_12: + switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_PROMISC)) { + case IXGBE_SUCCESS: + ret = 0; + break; + case IXGBE_ERR_FEATURE_NOT_SUPPORTED: + ret = -ENOTSUP; break; default: - return -EOPNOTSUPP; + ret = -EAGAIN; + break; } - msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; - msgbuf[1] = xcast_mode; + return ret; +} - err = mbx->ops.write_posted(hw, msgbuf, 2, 0); - if (err) - return err; +static int +ixgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; - err = mbx->ops.read_posted(hw, msgbuf, 2, 0); - if (err) - return err; + switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE)) { + case IXGBE_SUCCESS: + ret = 0; + break; + case IXGBE_ERR_FEATURE_NOT_SUPPORTED: + ret = -ENOTSUP; + break; + default: + ret = -EAGAIN; + break; + } + + return ret; +} + +static int +ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + int mode = IXGBEVF_XCAST_MODE_ALLMULTI; + + switch (hw->mac.ops.update_xcast_mode(hw, mode)) { + case IXGBE_SUCCESS: + ret = 0; + break; + case IXGBE_ERR_FEATURE_NOT_SUPPORTED: + ret = -ENOTSUP; + break; + default: + ret = -EAGAIN; + break; + } + + return ret; +} + +static int +ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + switch (hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI)) { + case IXGBE_SUCCESS: + ret = 0; + break; + case IXGBE_ERR_FEATURE_NOT_SUPPORTED: + ret = -ENOTSUP; + break; + default: + ret = -EAGAIN; + break; + } + + return ret; +} + +static void ixgbevf_mbx_process(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u32 in_msg = 0; + + /* peek the message first */ + in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM); + + /* PF reset VF event */ + if (in_msg == IXGBE_PF_CONTROL_MSG) { + /* dummy mbx read to ack pf */ + if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) + return; + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, + NULL); + } +} + +static int +ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) +{ + uint32_t eicr; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + ixgbevf_intr_disable(dev); + + /* read-on-clear nic registers here */ + eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); + intr->flags = 0; + + /* only one misc vector supported - mailbox */ + eicr &= IXGBE_VTEICR_MASK; + if (eicr == IXGBE_MISC_VEC_ID) + intr->flags |= IXGBE_FLAG_MAILBOX; + + return 0; +} + +static int +ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + if (intr->flags & IXGBE_FLAG_MAILBOX) { + ixgbevf_mbx_process(dev); + intr->flags &= ~IXGBE_FLAG_MAILBOX; + } - msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; - if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK)) - return -EPERM; + ixgbevf_intr_enable(dev); return 0; } static void -ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) +ixgbevf_dev_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + ixgbevf_dev_interrupt_get_status(dev); + ixgbevf_dev_interrupt_action(dev); +} + +/** + * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path + * @hw: pointer to hardware structure + * + * Stops the transmit data path and waits for the HW to internally empty + * the Tx security block + **/ +int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw) +{ +#define IXGBE_MAX_SECTX_POLL 40 + + int i; + int sectxreg; + + sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + sectxreg |= IXGBE_SECTXCTRL_TX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); + for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) { + sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); + if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY) + break; + /* Use interrupt-safe sleep just in case */ + usec_delay(1000); + } + + /* For informational purposes only */ + if (i >= IXGBE_MAX_SECTX_POLL) + PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security " + "path fully disabled. Continuing with init."); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path + * @hw: pointer to hardware structure + * + * Enables the transmit data path. + **/ +int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw) +{ + uint32_t sectxreg; + + sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + +/* restore n-tuple filter */ +static inline void +ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter *node; + + TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { + ixgbe_inject_5tuple_filter(dev, node); + } +} + +/* restore ethernet type filter */ +static inline void +ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i; + + for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_mask & (1 << i)) { + IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), + filter_info->ethertype_filters[i].etqf); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), + filter_info->ethertype_filters[i].etqs); + IXGBE_WRITE_FLUSH(hw); + } + } +} + +/* restore SYN filter */ +static inline void +ixgbe_syn_filter_restore(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t synqf; + + synqf = filter_info->syn_info; + + if (synqf & IXGBE_SYN_FILTER_ENABLE) { + IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); + IXGBE_WRITE_FLUSH(hw); + } +} + +/* restore L2 tunnel filter */ +static inline void +ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + struct ixgbe_l2_tn_filter *node; + struct rte_eth_l2_tunnel_conf l2_tn_conf; + + TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { + l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; + l2_tn_conf.tunnel_id = node->key.tn_id; + l2_tn_conf.pool = node->pool; + (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); + } +} + +/* restore rss filter */ +static inline void +ixgbe_rss_filter_restore(struct rte_eth_dev *dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + if (filter_info->rss_info.conf.queue_num) + ixgbe_config_rss_filter(dev, + &filter_info->rss_info, TRUE); +} + +static int +ixgbe_filter_restore(struct rte_eth_dev *dev) +{ + ixgbe_ntuple_filter_restore(dev); + ixgbe_ethertype_filter_restore(dev); + ixgbe_syn_filter_restore(dev); + ixgbe_fdir_filter_restore(dev); + ixgbe_l2_tn_filter_restore(dev); + ixgbe_rss_filter_restore(dev); - ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI); + return 0; } static void -ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) +ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev) { + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE); + if (l2_tn_info->e_tag_en) + (void)ixgbe_e_tag_enable(hw); + + if (l2_tn_info->e_tag_fwd_en) + (void)ixgbe_e_tag_forwarding_en_dis(dev, 1); + + (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); } -static struct rte_driver rte_ixgbe_driver = { - .type = PMD_PDEV, - .init = rte_ixgbe_pmd_init, -}; +/* remove all the n-tuple filters */ +void +ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter *p_5tuple; -static struct rte_driver rte_ixgbevf_driver = { - .type = PMD_PDEV, - .init = rte_ixgbevf_pmd_init, -}; + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) + ixgbe_remove_5tuple_filter(dev, p_5tuple); +} + +/* remove all the ether type filters */ +void +ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i; -PMD_REGISTER_DRIVER(rte_ixgbe_driver); -PMD_REGISTER_DRIVER(rte_ixgbevf_driver); + for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_mask & (1 << i) && + !filter_info->ethertype_filters[i].conf) { + (void)ixgbe_ethertype_filter_remove(filter_info, + (uint8_t)i); + IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0); + IXGBE_WRITE_FLUSH(hw); + } + } +} + +/* remove the SYN filter */ +void +ixgbe_clear_syn_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) { + filter_info->syn_info = 0; + + IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0); + IXGBE_WRITE_FLUSH(hw); + } +} + +/* remove all the L2 tunnel filters */ +int +ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + struct ixgbe_l2_tn_filter *l2_tn_filter; + struct rte_eth_l2_tunnel_conf l2_tn_conf; + int ret = 0; + + while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { + l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; + l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; + l2_tn_conf.pool = l2_tn_filter->pool; + ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); + if (ret < 0) + return ret; + } + + return 0; +} + +RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_ixgbe_vf, + IXGBEVF_DEVARG_PFLINK_FULLCHK "=<0|1>"); + +RTE_INIT(ixgbe_init_log) +{ + ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init"); + if (ixgbe_logtype_init >= 0) + rte_log_set_level(ixgbe_logtype_init, RTE_LOG_NOTICE); + ixgbe_logtype_driver = rte_log_register("pmd.net.ixgbe.driver"); + if (ixgbe_logtype_driver >= 0) + rte_log_set_level(ixgbe_logtype_driver, RTE_LOG_NOTICE); +#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX + ixgbe_logtype_rx = rte_log_register("pmd.net.ixgbe.rx"); + if (ixgbe_logtype_rx >= 0) + rte_log_set_level(ixgbe_logtype_rx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX + ixgbe_logtype_tx = rte_log_register("pmd.net.ixgbe.tx"); + if (ixgbe_logtype_tx >= 0) + rte_log_set_level(ixgbe_logtype_tx, RTE_LOG_DEBUG); +#endif + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE + ixgbe_logtype_tx_free = rte_log_register("pmd.net.ixgbe.tx_free"); + if (ixgbe_logtype_tx_free >= 0) + rte_log_set_level(ixgbe_logtype_tx_free, RTE_LOG_DEBUG); +#endif +}