X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_ethdev.c;h=278c925e92347481f392224a390c8da13308e1b6;hb=a14de8b498d10e23e82d9e6983490b3ffd24c25a;hp=ca65103ec1e923fd8f05aa296697a536a7584dc7;hpb=6c52c126f27a80a32dc43ba11fee9aa0fa0f29e6;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index ca65103ec1..7efe0052bb 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -60,6 +60,7 @@ #include #include #include +#include #include "ixgbe_logs.h" #include "base/ixgbe_api.h" @@ -68,6 +69,11 @@ #include "ixgbe_ethdev.h" #include "ixgbe_bypass.h" #include "ixgbe_rxtx.h" +#include "base/ixgbe_type.h" +#include "base/ixgbe_phy.h" +#include "ixgbe_regs.h" + +#include "rte_pmd_ixgbe.h" /* * High threshold controlling when to start sending XOFF frames. Must be at @@ -82,6 +88,9 @@ */ #define IXGBE_FC_LO 0x40 +/* Default minimum inter-interrupt interval for EITR configuration */ +#define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT 0x79E + /* Timer value included in XOFF frames. */ #define IXGBE_FC_PAUSE 0x680 @@ -91,6 +100,7 @@ #define IXGBE_MMW_SIZE_DEFAULT 0x4 #define IXGBE_MMW_SIZE_JUMBO_FRAME 0x14 +#define IXGBE_MAX_RING_DESC 4096 /* replicate define from rxtx */ /* * Default values for RX/TX configuration @@ -116,7 +126,51 @@ #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0])) +#define IXGBE_HKEY_MAX_INDEX 10 + +/* Additional timesync values. */ +#define NSEC_PER_SEC 1000000000L +#define IXGBE_INCVAL_10GB 0x66666666 +#define IXGBE_INCVAL_1GB 0x40000000 +#define IXGBE_INCVAL_100 0x50000000 +#define IXGBE_INCVAL_SHIFT_10GB 28 +#define IXGBE_INCVAL_SHIFT_1GB 24 +#define IXGBE_INCVAL_SHIFT_100 21 +#define IXGBE_INCVAL_SHIFT_82599 7 +#define IXGBE_INCPER_SHIFT_82599 24 + +#define IXGBE_CYCLECOUNTER_MASK 0xffffffffffffffffULL + +#define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000 +#define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000 +#define DEFAULT_ETAG_ETYPE 0x893f +#define IXGBE_ETAG_ETYPE 0x00005084 +#define IXGBE_ETAG_ETYPE_MASK 0x0000ffff +#define IXGBE_ETAG_ETYPE_VALID 0x80000000 +#define IXGBE_RAH_ADTYPE 0x40000000 +#define IXGBE_RAL_ETAG_FILTER_MASK 0x00003fff +#define IXGBE_VMVIR_TAGA_MASK 0x18000000 +#define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 +#define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_QDE_STRIP_TAG 0x00000004 +#define IXGBE_VTEICR_MASK 0x07 + +enum ixgbevf_xcast_modes { + IXGBEVF_XCAST_MODE_NONE = 0, + IXGBEVF_XCAST_MODE_MULTI, + IXGBEVF_XCAST_MODE_ALLMULTI, +}; + +#define IXGBE_EXVET_VET_EXT_SHIFT 16 +#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 + static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev); +static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); +static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); +static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); +static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); +static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); +static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev); static int ixgbe_dev_configure(struct rte_eth_dev *dev); static int ixgbe_dev_start(struct rte_eth_dev *dev); static void ixgbe_dev_stop(struct rte_eth_dev *dev); @@ -131,20 +185,34 @@ static int ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); static void ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); +static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, unsigned n); +static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, + struct rte_eth_xstat *xstats, unsigned n); static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev); +static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); +static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit); +static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit); static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx); +static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, + size_t fw_size); static void ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); +static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); static void ixgbevf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); -static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id); +static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid_id); static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on); static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, @@ -171,23 +239,31 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, uint16_t reta_size); static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev); +static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); +static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); -static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); +static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, + struct rte_intr_handle *handle); static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, void *param); static void ixgbe_dev_interrupt_delayed_handler(void *param); static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); -static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config); +static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *mac_addr); +static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); +static int is_ixgbe_pmd(const char *driver_name); /* For Virtual Function support */ static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); +static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev); static int ixgbevf_dev_configure(struct rte_eth_dev *dev); static int ixgbevf_dev_start(struct rte_eth_dev *dev); static void ixgbevf_dev_stop(struct rte_eth_dev *dev); static void ixgbevf_dev_close(struct rte_eth_dev *dev); static void ixgbevf_intr_disable(struct ixgbe_hw *hw); +static void ixgbevf_intr_enable(struct ixgbe_hw *hw); static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); @@ -197,35 +273,42 @@ static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); +static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id); +static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id); +static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector); +static void ixgbevf_configure_msix(struct rte_eth_dev *dev); +static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); +static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); /* For Eth VMDQ APIs support */ static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct - ether_addr* mac_addr,uint8_t on); -static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on); -static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, - uint16_t rx_mask, uint8_t on); -static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); -static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); -static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, - uint64_t pool_mask,uint8_t vlan_on); + ether_addr * mac_addr, uint8_t on); +static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, - struct rte_eth_vmdq_mirror_conf *mirror_conf, + struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on); static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id); +static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id); +static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id); +static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector); +static void ixgbe_configure_msix(struct rte_eth_dev *dev); static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t tx_rate); -static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, - uint16_t tx_rate, uint64_t q_msk); static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); -static int ixgbe_syn_filter_set(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter, - bool add); +static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *mac_addr); static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, struct rte_eth_syn_filter *filter); static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev, @@ -235,17 +318,11 @@ static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, struct ixgbe_5tuple_filter *filter); static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, struct ixgbe_5tuple_filter *filter); -static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *filter, - bool add); static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *filter); -static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter, - bool add); static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); @@ -257,13 +334,65 @@ static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, void *arg); static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); +static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, + struct ether_addr *mc_addr_set, + uint32_t nb_mc_addr); +static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, + struct rte_eth_dcb_info *dcb_info); + +static int ixgbe_get_reg_length(struct rte_eth_dev *dev); +static int ixgbe_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs); +static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev); +static int ixgbe_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom); +static int ixgbe_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom); + +static int ixgbevf_get_reg_length(struct rte_eth_dev *dev); +static int ixgbevf_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs); + +static int ixgbe_timesync_enable(struct rte_eth_dev *dev); +static int ixgbe_timesync_disable(struct rte_eth_dev *dev); +static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags); +static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp); +static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta); +static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, + struct timespec *timestamp); +static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, + const struct timespec *timestamp); +static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle, + void *param); + +static int ixgbe_dev_l2_tunnel_eth_type_conf + (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel); +static int ixgbe_dev_l2_tunnel_offload_set + (struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel, + uint32_t mask, + uint8_t en); +static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg); + +static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); +static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); +static int ixgbe_filter_restore(struct rte_eth_dev *dev); +static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev); + /* * Define VF Stats MACRO for Non "cleared on read" register */ -#define UPDATE_VF_STAT(reg, last, cur) \ +#define UPDATE_VF_STAT(reg, last, cur) \ { \ - u32 latest = IXGBE_READ_REG(hw, reg); \ - cur += latest - last; \ + uint32_t latest = IXGBE_READ_REG(hw, reg); \ + cur += (latest - last) & UINT_MAX; \ last = latest; \ } @@ -276,45 +405,116 @@ static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu); last = latest; \ } -#define IXGBE_SET_HWSTRIP(h, q) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_SET_HWSTRIP(h, q) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (h)->bitmap[idx] |= 1 << bit;\ - }while(0) + } while (0) -#define IXGBE_CLEAR_HWSTRIP(h, q) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_CLEAR_HWSTRIP(h, q) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (h)->bitmap[idx] &= ~(1 << bit);\ - }while(0) + } while (0) -#define IXGBE_GET_HWSTRIP(h, q, r) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_GET_HWSTRIP(h, q, r) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (r) = (h)->bitmap[idx] >> bit & 1;\ - }while(0) + } while (0) /* * The set of PCI devices this driver supports */ static const struct rte_pci_id pci_id_ixgbe_map[] = { - -#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" - -{ .vendor_id = 0, /* sentinel */ }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, +#ifdef RTE_NIC_BYPASS + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, +#endif + { .vendor_id = 0, /* sentinel */ }, }; - /* * The set of PCI devices this driver supports (for 82599 VF) */ static const struct rte_pci_id pci_id_ixgbevf_map[] = { + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) }, + { .vendor_id = 0, /* sentinel */ }, +}; -#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" -{ .vendor_id = 0, /* sentinel */ }, +static const struct rte_eth_desc_lim rx_desc_lim = { + .nb_max = IXGBE_MAX_RING_DESC, + .nb_min = IXGBE_MIN_RING_DESC, + .nb_align = IXGBE_RXD_ALIGN, +}; +static const struct rte_eth_desc_lim tx_desc_lim = { + .nb_max = IXGBE_MAX_RING_DESC, + .nb_min = IXGBE_MIN_RING_DESC, + .nb_align = IXGBE_TXD_ALIGN, + .nb_seg_max = IXGBE_TX_MAX_SEG, + .nb_mtu_seg_max = IXGBE_TX_MAX_SEG, }; static const struct eth_dev_ops ixgbe_eth_dev_ops = { @@ -330,9 +530,14 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .allmulticast_disable = ixgbe_dev_allmulticast_disable, .link_update = ixgbe_dev_link_update, .stats_get = ixgbe_dev_stats_get, + .xstats_get = ixgbe_dev_xstats_get, .stats_reset = ixgbe_dev_stats_reset, + .xstats_reset = ixgbe_dev_xstats_reset, + .xstats_get_names = ixgbe_dev_xstats_get_names, .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, + .fw_version_get = ixgbe_fw_version_get, .dev_infos_get = ixgbe_dev_info_get, + .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, .mtu_set = ixgbe_dev_mtu_set, .vlan_filter_set = ixgbe_vlan_filter_set, .vlan_tpid_set = ixgbe_vlan_tpid_set, @@ -343,6 +548,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .tx_queue_start = ixgbe_dev_tx_queue_start, .tx_queue_stop = ixgbe_dev_tx_queue_stop, .rx_queue_setup = ixgbe_dev_rx_queue_setup, + .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable, .rx_queue_release = ixgbe_dev_rx_queue_release, .rx_queue_count = ixgbe_dev_rx_queue_count, .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, @@ -355,16 +562,12 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set, .mac_addr_add = ixgbe_add_rar, .mac_addr_remove = ixgbe_remove_rar, + .mac_addr_set = ixgbe_set_default_mac_addr, .uc_hash_table_set = ixgbe_uc_hash_table_set, .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, .mirror_rule_set = ixgbe_mirror_rule_set, .mirror_rule_reset = ixgbe_mirror_rule_reset, - .set_vf_rx_mode = ixgbe_set_pool_rx_mode, - .set_vf_rx = ixgbe_set_pool_rx, - .set_vf_tx = ixgbe_set_pool_tx, - .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter, .set_queue_rate_limit = ixgbe_set_queue_rate_limit, - .set_vf_rate_limit = ixgbe_set_vf_rate_limit, .reta_update = ixgbe_dev_rss_reta_update, .reta_query = ixgbe_dev_rss_reta_query, #ifdef RTE_NIC_BYPASS @@ -381,6 +584,25 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .rss_hash_update = ixgbe_dev_rss_hash_update, .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, .filter_ctrl = ixgbe_dev_filter_ctrl, + .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, + .rxq_info_get = ixgbe_rxq_info_get, + .txq_info_get = ixgbe_txq_info_get, + .timesync_enable = ixgbe_timesync_enable, + .timesync_disable = ixgbe_timesync_disable, + .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, + .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, + .get_reg = ixgbe_get_regs, + .get_eeprom_length = ixgbe_get_eeprom_length, + .get_eeprom = ixgbe_get_eeprom, + .set_eeprom = ixgbe_set_eeprom, + .get_dcb_info = ixgbe_dev_get_dcb_info, + .timesync_adjust_time = ixgbe_timesync_adjust_time, + .timesync_read_time = ixgbe_timesync_read_time, + .timesync_write_time = ixgbe_timesync_write_time, + .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf, + .l2_tunnel_offload_set = ixgbe_dev_l2_tunnel_offload_set, + .udp_tunnel_port_add = ixgbe_dev_udp_tunnel_port_add, + .udp_tunnel_port_del = ixgbe_dev_udp_tunnel_port_del, }; /* @@ -393,21 +615,204 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .dev_stop = ixgbevf_dev_stop, .link_update = ixgbe_dev_link_update, .stats_get = ixgbevf_dev_stats_get, + .xstats_get = ixgbevf_dev_xstats_get, .stats_reset = ixgbevf_dev_stats_reset, + .xstats_reset = ixgbevf_dev_stats_reset, + .xstats_get_names = ixgbevf_dev_xstats_get_names, .dev_close = ixgbevf_dev_close, + .allmulticast_enable = ixgbevf_dev_allmulticast_enable, + .allmulticast_disable = ixgbevf_dev_allmulticast_disable, .dev_infos_get = ixgbevf_dev_info_get, + .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, .mtu_set = ixgbevf_dev_set_mtu, .vlan_filter_set = ixgbevf_vlan_filter_set, .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, .vlan_offload_set = ixgbevf_vlan_offload_set, .rx_queue_setup = ixgbe_dev_rx_queue_setup, .rx_queue_release = ixgbe_dev_rx_queue_release, + .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, .tx_queue_setup = ixgbe_dev_tx_queue_setup, .tx_queue_release = ixgbe_dev_tx_queue_release, + .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable, .mac_addr_add = ixgbevf_add_mac_addr, .mac_addr_remove = ixgbevf_remove_mac_addr, + .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, + .rxq_info_get = ixgbe_rxq_info_get, + .txq_info_get = ixgbe_txq_info_get, + .mac_addr_set = ixgbevf_set_default_mac_addr, + .get_reg = ixgbevf_get_regs, + .reta_update = ixgbe_dev_rss_reta_update, + .reta_query = ixgbe_dev_rss_reta_query, + .rss_hash_update = ixgbe_dev_rss_hash_update, + .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, +}; + +/* store statistics names and its offset in stats structure */ +struct rte_ixgbe_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned offset; +}; + +static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { + {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)}, + {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)}, + {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)}, + {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)}, + {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)}, + {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)}, + {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)}, + {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)}, + {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)}, + {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)}, + {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)}, + {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)}, + {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)}, + {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)}, + {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, + prc1023)}, + {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, + prc1522)}, + {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)}, + {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)}, + {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)}, + {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)}, + {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)}, + {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)}, + {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)}, + {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)}, + {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)}, + {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)}, + {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)}, + {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)}, + {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)}, + {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)}, + {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)}, + {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)}, + {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats, + ptc1023)}, + {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats, + ptc1522)}, + {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)}, + {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)}, + {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)}, + {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)}, + + {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats, + fdirustat_add)}, + {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats, + fdirustat_remove)}, + {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats, + fdirfstat_fadd)}, + {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats, + fdirfstat_fremove)}, + {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats, + fdirmatch)}, + {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats, + fdirmiss)}, + + {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)}, + {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)}, + {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, + fclast)}, + {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)}, + {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)}, + {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)}, + {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)}, + {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats, + fcoe_noddp)}, + {"rx_fcoe_no_direct_data_placement_ext_buff", + offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)}, + + {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, + lxontxc)}, + {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats, + lxonrxc)}, + {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, + lxofftxc)}, + {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats, + lxoffrxc)}, + {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)}, +}; + +#define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ + sizeof(rte_ixgbe_stats_strings[0])) + +/* MACsec statistics */ +static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = { + {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats, + out_pkts_untagged)}, + {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats, + out_pkts_encrypted)}, + {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats, + out_pkts_protected)}, + {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats, + out_octets_encrypted)}, + {"out_octets_protected", offsetof(struct ixgbe_macsec_stats, + out_octets_protected)}, + {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats, + in_pkts_untagged)}, + {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats, + in_pkts_badtag)}, + {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats, + in_pkts_nosci)}, + {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats, + in_pkts_unknownsci)}, + {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats, + in_octets_decrypted)}, + {"in_octets_validated", offsetof(struct ixgbe_macsec_stats, + in_octets_validated)}, + {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats, + in_pkts_unchecked)}, + {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats, + in_pkts_delayed)}, + {"in_pkts_late", offsetof(struct ixgbe_macsec_stats, + in_pkts_late)}, + {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats, + in_pkts_ok)}, + {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats, + in_pkts_invalid)}, + {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats, + in_pkts_notvalid)}, + {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats, + in_pkts_unusedsa)}, + {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats, + in_pkts_notusingsa)}, +}; + +#define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \ + sizeof(rte_ixgbe_macsec_strings[0])) + +/* Per-queue statistics */ +static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { + {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, + {"dropped", offsetof(struct ixgbe_hw_stats, mpc)}, + {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)}, + {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)}, +}; + +#define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ + sizeof(rte_ixgbe_rxq_strings[0])) +#define IXGBE_NB_RXQ_PRIO_VALUES 8 + +static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { + {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, + {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)}, + {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats, + pxon2offc)}, }; +#define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ + sizeof(rte_ixgbe_txq_strings[0])) +#define IXGBE_NB_TXQ_PRIO_VALUES 8 + +static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { + {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, +}; + +#define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \ + sizeof(rte_ixgbevf_stats_strings[0])) + /** * Atomically reads the link status information from global * structure rte_eth_dev. @@ -537,7 +942,7 @@ ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw) { uint32_t i; - for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { + for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0); IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0); } @@ -565,10 +970,11 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, if ((hw->mac.type != ixgbe_mac_82599EB) && (hw->mac.type != ixgbe_mac_X540) && (hw->mac.type != ixgbe_mac_X550) && - (hw->mac.type != ixgbe_mac_X550EM_x)) + (hw->mac.type != ixgbe_mac_X550EM_x) && + (hw->mac.type != ixgbe_mac_X550EM_a)) return -ENOSYS; - PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d", + PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx); @@ -594,20 +1000,19 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, else stat_mappings->rqsmr[n] |= qsmr_mask; - PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d", + PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d", (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx); - PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, + PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n, is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]); /* Now write the mapping in the appropriate register */ if (is_rx) { - PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d", + PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", stat_mappings->rqsmr[n], n); IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); - } - else { - PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d", + } else { + PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", stat_mappings->tqsm[n], n); IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); } @@ -615,7 +1020,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, } static void -ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev) +ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) { struct ixgbe_stat_mapping_registers *stat_mappings = IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); @@ -633,7 +1038,7 @@ ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev) } static void -ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) +ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { uint8_t i; struct ixgbe_dcb_tc_config *tc; @@ -656,7 +1061,7 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) tc = &dcb_config->tc_config[0]; tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; - for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) { + for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; } @@ -670,7 +1075,8 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) /*we only support 4 Tcs for X540, X550 */ if (hw->mac.type == ixgbe_mac_X540 || hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x) { + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) { dcb_config->num_tcs.pg_tcs = 4; dcb_config->num_tcs.pfc_tcs = 4; } @@ -716,10 +1122,11 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); @@ -736,30 +1143,34 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) eth_dev->dev_ops = &ixgbe_eth_dev_ops; eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; + eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts; /* * For secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different * RX and TX function. */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { struct ixgbe_tx_queue *txq; /* TX queue function in primary, set by last queue initialized - * Tx queue may not initialized by primary process */ + * Tx queue may not initialized by primary process + */ if (eth_dev->data->tx_queues) { txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; ixgbe_set_tx_function(eth_dev, txq); } else { /* Use default TX function if we get here */ - PMD_INIT_LOG(INFO, "No TX queues configured yet. " - "Using default TX function."); + PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " + "Using default TX function."); } ixgbe_set_rx_function(eth_dev); return 0; } - pci_dev = eth_dev->pci_dev; + + rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE; /* Vendor and Device ID need to be set before init of shared code */ hw->device_id = pci_dev->id.device_id; @@ -787,7 +1198,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* Initialize DCB configuration*/ memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); - ixgbe_dcb_init(hw,dcb_config); + ixgbe_dcb_init(hw, dcb_config); /* Get Hardware Flow Control setting */ hw->fc.requested_mode = ixgbe_fc_full; hw->fc.current_mode = ixgbe_fc_full; @@ -828,11 +1239,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) if (diag == IXGBE_ERR_EEPROM_VERSION) { PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" - "LOM. Please be aware there may be issues associated " - "with your hardware."); + "LOM. Please be aware there may be issues associated " + "with your hardware."); PMD_INIT_LOG(ERR, "If you are experiencing problems " - "please contact your Intel or hardware representative " - "who provided you with this hardware."); + "please contact your Intel or hardware representative " + "who provided you with this hardware."); } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); if (diag) { @@ -840,6 +1251,9 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) return -EIO; } + /* Reset the hw statistics */ + ixgbe_dev_stats_reset(eth_dev); + /* disable interrupt */ ixgbe_disable_intr(hw); @@ -848,12 +1262,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* Allocate memory for storing MAC addresses */ eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * - hw->mac.num_rar_entries, 0); + hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, - "Failed to allocate %u bytes needed to store " - "MAC addresses", - ETHER_ADDR_LEN * hw->mac.num_rar_entries); + "Failed to allocate %u bytes needed to store " + "MAC addresses", + ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } /* Copy the permanent MAC address */ @@ -862,11 +1276,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* Allocate memory for storing hash filter MAC addresses */ eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * - IXGBE_VMDQ_NUM_UC_MAC, 0); + IXGBE_VMDQ_NUM_UC_MAC, 0); if (eth_dev->data->hash_mac_addrs == NULL) { PMD_INIT_LOG(ERR, - "Failed to allocate %d bytes needed to store MAC addresses", - ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); + "Failed to allocate %d bytes needed to store MAC addresses", + ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); return -ENOMEM; } @@ -896,27 +1310,224 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) (int) hw->mac.type, (int) hw->phy.type); PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", - eth_dev->data->port_id, pci_dev->id.vendor_id, - pci_dev->id.device_id); + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); - rte_intr_callback_register(&(pci_dev->intr_handle), - ixgbe_dev_interrupt_handler, (void *)eth_dev); + rte_intr_callback_register(intr_handle, + ixgbe_dev_interrupt_handler, eth_dev); - /* enable uio intr after callback register */ - rte_intr_enable(&(pci_dev->intr_handle)); + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(intr_handle); /* enable support intr */ ixgbe_enable_intr(eth_dev); + /* initialize filter info */ + memset(filter_info, 0, + sizeof(struct ixgbe_filter_info)); + /* initialize 5tuple filter list */ TAILQ_INIT(&filter_info->fivetuple_list); + + /* initialize flow director filter list & hash */ + ixgbe_fdir_filter_init(eth_dev); + + /* initialize l2 tunnel filter list & hash */ + ixgbe_l2_tn_filter_init(eth_dev); + + TAILQ_INIT(&filter_ntuple_list); + TAILQ_INIT(&filter_ethertype_list); + TAILQ_INIT(&filter_syn_list); + TAILQ_INIT(&filter_fdir_list); + TAILQ_INIT(&filter_l2_tunnel_list); + TAILQ_INIT(&ixgbe_flow_list); + + return 0; +} + +static int +eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ixgbe_hw *hw; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + + hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + if (hw->adapter_stopped == 0) + ixgbe_dev_close(eth_dev); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + /* Unlock any pending hardware semaphore */ + ixgbe_swfw_lock_reset(hw); + + /* disable uio intr before callback unregister */ + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + ixgbe_dev_interrupt_handler, eth_dev); + + /* uninitialize PF if max_vfs not zero */ + ixgbe_pf_host_uninit(eth_dev); + + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + + rte_free(eth_dev->data->hash_mac_addrs); + eth_dev->data->hash_mac_addrs = NULL; + + /* remove all the fdir filters & hash */ + ixgbe_fdir_filter_uninit(eth_dev); + + /* remove all the L2 tunnel filters & hash */ + ixgbe_l2_tn_filter_uninit(eth_dev); + + /* Remove all ntuple filters of the device */ + ixgbe_ntuple_filter_uninit(eth_dev); + + return 0; +} + +static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); + struct ixgbe_5tuple_filter *p_5tuple; + + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { + TAILQ_REMOVE(&filter_info->fivetuple_list, + p_5tuple, + entries); + rte_free(p_5tuple); + } memset(filter_info->fivetuple_mask, 0, - sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); + sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); + + return 0; +} + +static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_hw_fdir_info *fdir_info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); + struct ixgbe_fdir_filter *fdir_filter; + + if (fdir_info->hash_map) + rte_free(fdir_info->hash_map); + if (fdir_info->hash_handle) + rte_hash_free(fdir_info->hash_handle); + + while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { + TAILQ_REMOVE(&fdir_info->fdir_list, + fdir_filter, + entries); + rte_free(fdir_filter); + } + + return 0; +} + +static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); + struct ixgbe_l2_tn_filter *l2_tn_filter; + + if (l2_tn_info->hash_map) + rte_free(l2_tn_info->hash_map); + if (l2_tn_info->hash_handle) + rte_hash_free(l2_tn_info->hash_handle); + + while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { + TAILQ_REMOVE(&l2_tn_info->l2_tn_list, + l2_tn_filter, + entries); + rte_free(l2_tn_filter); + } + + return 0; +} + +static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_hw_fdir_info *fdir_info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); + char fdir_hash_name[RTE_HASH_NAMESIZE]; + struct rte_hash_parameters fdir_hash_params = { + .name = fdir_hash_name, + .entries = IXGBE_MAX_FDIR_FILTER_NUM, + .key_len = sizeof(union ixgbe_atr_input), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; + + TAILQ_INIT(&fdir_info->fdir_list); + snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, + "fdir_%s", eth_dev->data->name); + fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); + if (!fdir_info->hash_handle) { + PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); + return -EINVAL; + } + fdir_info->hash_map = rte_zmalloc("ixgbe", + sizeof(struct ixgbe_fdir_filter *) * + IXGBE_MAX_FDIR_FILTER_NUM, + 0); + if (!fdir_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir hash map!"); + return -ENOMEM; + } + fdir_info->mask_added = FALSE; return 0; } +static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); + char l2_tn_hash_name[RTE_HASH_NAMESIZE]; + struct rte_hash_parameters l2_tn_hash_params = { + .name = l2_tn_hash_name, + .entries = IXGBE_MAX_L2_TN_FILTER_NUM, + .key_len = sizeof(struct ixgbe_l2_tn_key), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; + + TAILQ_INIT(&l2_tn_info->l2_tn_list); + snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, + "l2_tn_%s", eth_dev->data->name); + l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); + if (!l2_tn_info->hash_handle) { + PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); + return -EINVAL; + } + l2_tn_info->hash_map = rte_zmalloc("ixgbe", + sizeof(struct ixgbe_l2_tn_filter *) * + IXGBE_MAX_L2_TN_FILTER_NUM, + 0); + if (!l2_tn_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for L2 TN hash map!"); + return -ENOMEM; + } + l2_tn_info->e_tag_en = FALSE; + l2_tn_info->e_tag_fwd_en = FALSE; + l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE; + return 0; +} /* * Negotiate mailbox API version with the PF. * After reset API version is always set to the basic one (ixgbe_mbox_api_10). @@ -931,6 +1542,7 @@ ixgbevf_negotiate_api(struct ixgbe_hw *hw) /* start with highest supported, proceed down */ static const enum ixgbe_pfvf_api_rev sup_ver[] = { + ixgbe_mbox_api_12, ixgbe_mbox_api_11, ixgbe_mbox_api_10, }; @@ -966,10 +1578,11 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) { int diag; uint32_t tc, tcs; - struct rte_pci_device *pci_dev; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); @@ -983,14 +1596,29 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) /* for secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different - * RX function */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY){ - if (eth_dev->data->scattered_rx) - eth_dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc; + * RX function + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + struct ixgbe_tx_queue *txq; + /* TX queue function in primary, set by last queue initialized + * Tx queue may not initialized by primary process + */ + if (eth_dev->data->tx_queues) { + txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1]; + ixgbe_set_tx_function(eth_dev, txq); + } else { + /* Use default TX function if we get here */ + PMD_INIT_LOG(NOTICE, + "No TX queues configured yet. Using default TX function."); + } + + ixgbe_set_rx_function(eth_dev); + return 0; } - pci_dev = eth_dev->pci_dev; + rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE; hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; @@ -1012,6 +1640,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) /* init_mailbox_params */ hw->mbx.ops.init_params(hw); + /* Reset the hw statistics */ + ixgbevf_dev_stats_reset(eth_dev); + /* Disable the interrupts for VF */ ixgbevf_intr_disable(hw); @@ -1025,7 +1656,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) */ if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) { PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); - return (diag); + return diag; } /* negotiate mailbox API version to use with the PF. */ @@ -1036,12 +1667,12 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) /* Allocate memory for storing MAC addresses */ eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * - hw->mac.num_rar_entries, 0); + hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, - "Failed to allocate %u bytes needed to store " - "MAC addresses", - ETHER_ADDR_LEN * hw->mac.num_rar_entries); + "Failed to allocate %u bytes needed to store " + "MAC addresses", + ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } @@ -1071,14 +1702,19 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) /* reset the hardware with the new settings */ diag = hw->mac.ops.start_hw(hw); switch (diag) { - case 0: - break; + case 0: + break; - default: - PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); - return (-EIO); + default: + PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); + return -EIO; } + rte_intr_callback_register(intr_handle, + ixgbevf_dev_interrupt_handler, eth_dev); + rte_intr_enable(intr_handle); + ixgbevf_intr_enable(hw); + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id, "ixgbe_mac_82599_vf"); @@ -1086,13 +1722,51 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) return 0; } +/* Virtual Function device uninit */ + +static int +eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ixgbe_hw *hw; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + + hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + + if (hw->adapter_stopped == 0) + ixgbevf_dev_close(eth_dev); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + /* Disable the interrupts for VF */ + ixgbevf_intr_disable(hw); + + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + ixgbevf_dev_interrupt_handler, eth_dev); + + return 0; +} + static struct eth_driver rte_ixgbe_pmd = { .pci_drv = { - .name = "rte_ixgbe_pmd", .id_table = pci_id_ixgbe_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, }, .eth_dev_init = eth_ixgbe_dev_init, + .eth_dev_uninit = eth_ixgbe_dev_uninit, .dev_private_size = sizeof(struct ixgbe_adapter), }; @@ -1101,48 +1775,22 @@ static struct eth_driver rte_ixgbe_pmd = { */ static struct eth_driver rte_ixgbevf_pmd = { .pci_drv = { - .name = "rte_ixgbevf_pmd", .id_table = pci_id_ixgbevf_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, }, .eth_dev_init = eth_ixgbevf_dev_init, + .eth_dev_uninit = eth_ixgbevf_dev_uninit, .dev_private_size = sizeof(struct ixgbe_adapter), }; -/* - * Driver initialization routine. - * Invoked once at EAL init time. - * Register itself as the [Poll Mode] Driver of PCI IXGBE devices. - */ -static int -rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); - - rte_eth_driver_register(&rte_ixgbe_pmd); - return 0; -} - -/* - * VF Driver initialization routine. - * Invoked one at EAL init time. - * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices. - */ -static int -rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); - - rte_eth_driver_register(&rte_ixgbevf_pmd); - return (0); -} - static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vfta; uint32_t vid_idx; @@ -1172,14 +1820,59 @@ ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ixgbe_vlan_hw_strip_disable(dev, queue); } -static void -ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid) +static int +ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret = 0; + uint32_t reg; + uint32_t qinq; + + qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + qinq &= IXGBE_DMATXCTL_GDV; + + switch (vlan_type) { + case ETH_VLAN_TYPE_INNER: + if (qinq) { + reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) + | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + } else { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Inner type is not supported" + " by single VLAN"); + } + break; + case ETH_VLAN_TYPE_OUTER: + if (qinq) { + /* Only the high 16-bits is valid */ + IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << + IXGBE_EXVET_VET_EXT_SHIFT); + } else { + reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) + | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + } + + break; + default: + ret = -EINVAL; + PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); + break; + } - /* Only the high 16-bits is valid */ - IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16); + return ret; } void @@ -1203,7 +1896,7 @@ ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vlnctrl; uint16_t i; @@ -1227,14 +1920,25 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) { struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); + struct ixgbe_rx_queue *rxq; - if(queue >= IXGBE_MAX_RX_QUEUE_NUM) + if (queue >= IXGBE_MAX_RX_QUEUE_NUM) return; if (on) IXGBE_SET_HWSTRIP(hwstrip, queue); else IXGBE_CLEAR_HWSTRIP(hwstrip, queue); + + if (queue >= dev->data->nb_rx_queues) + return; + + rxq = dev->data->rx_queues[queue]; + + if (on) + rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; + else + rxq->vlan_flags = PKT_RX_VLAN_PKT; } static void @@ -1248,15 +1952,15 @@ ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) if (hw->mac.type == ixgbe_mac_82598EB) { /* No queue level support */ - PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip"); + PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); return; } - else { - /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); - ctrl &= ~IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); - } + + /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); + ctrl &= ~IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); + /* record those setting for HW strip per queue */ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); } @@ -1272,15 +1976,15 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) if (hw->mac.type == ixgbe_mac_82598EB) { /* No queue level supported */ - PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip"); + PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); return; } - else { - /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); - ctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); - } + + /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); + ctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); + /* record those setting for HW strip per queue */ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); } @@ -1292,6 +1996,7 @@ ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t ctrl; uint16_t i; + struct ixgbe_rx_queue *rxq; PMD_INIT_FUNC_TRACE(); @@ -1299,13 +2004,13 @@ ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev) ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ctrl &= ~IXGBE_VLNCTRL_VME; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); - } - else { + } else { /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ for (i = 0; i < dev->data->nb_rx_queues; i++) { - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + rxq = dev->data->rx_queues[i]; + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); ctrl &= ~IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl); + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); /* record those setting for HW strip per queue */ ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0); @@ -1320,6 +2025,7 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t ctrl; uint16_t i; + struct ixgbe_rx_queue *rxq; PMD_INIT_FUNC_TRACE(); @@ -1327,13 +2033,13 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev) ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ctrl |= IXGBE_VLNCTRL_VME; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); - } - else { + } else { /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ for (i = 0; i < dev->data->nb_rx_queues; i++) { - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + rxq = dev->data->rx_queues[i]; + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); ctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl); + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); /* record those setting for HW strip per queue */ ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1); @@ -1381,6 +2087,15 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) ctrl |= IXGBE_EXTENDED_VLAN; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl); + /* Clear pooling mode of PFVTCTL. It's required by X550. */ + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) { + ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); + } + /* * VET EXT field in the EXVET register = 0x8100 by default * So no need to change. Same to VT field of DMATXCTL register @@ -1390,21 +2105,21 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) { - if(mask & ETH_VLAN_STRIP_MASK){ + if (mask & ETH_VLAN_STRIP_MASK) { if (dev->data->dev_conf.rxmode.hw_vlan_strip) ixgbe_vlan_hw_strip_enable_all(dev); else ixgbe_vlan_hw_strip_disable_all(dev); } - if(mask & ETH_VLAN_FILTER_MASK){ + if (mask & ETH_VLAN_FILTER_MASK) { if (dev->data->dev_conf.rxmode.hw_vlan_filter) ixgbe_vlan_hw_filter_enable(dev); else ixgbe_vlan_hw_filter_disable(dev); } - if(mask & ETH_VLAN_EXTEND_MASK){ + if (mask & ETH_VLAN_EXTEND_MASK) { if (dev->data->dev_conf.rxmode.hw_vlan_extend) ixgbe_vlan_hw_extend_enable(dev); else @@ -1419,67 +2134,292 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */ + + vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); } static int -ixgbe_dev_configure(struct rte_eth_dev *dev) +ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) { - struct ixgbe_interrupt *intr = - IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); - PMD_INIT_FUNC_TRACE(); - - /* set flag to update link status after init */ - intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + switch (nb_rx_q) { + case 1: + case 2: + RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS; + break; + case 4: + RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS; + break; + default: + return -EINVAL; + } - /* - * Initialize to TRUE. If any of Rx queues doesn't meet the bulk - * allocation or vector Rx preconditions we will reset it. - */ - adapter->rx_bulk_alloc_allowed = true; - adapter->rx_vec_allowed = true; + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q; + RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q; return 0; } -/* - * Configure device link speed and setup link. - * It returns 0 on success. - */ static int -ixgbe_dev_start(struct rte_eth_dev *dev) +ixgbe_check_mq_mode(struct rte_eth_dev *dev) { - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vf_info *vfinfo = - *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); - int err, link_up = 0, negotiate = 0; - uint32_t speed = 0; - int mask = 0; - int status; - uint16_t vf, idx; - - PMD_INIT_FUNC_TRACE(); - - /* IXGBE devices don't support half duplex */ - if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) && - (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) { - PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu", - dev->data->dev_conf.link_duplex, + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t nb_rx_q = dev->data->nb_rx_queues; + uint16_t nb_tx_q = dev->data->nb_tx_queues; + + if (RTE_ETH_DEV_SRIOV(dev).active != 0) { + /* check multi-queue mode */ + switch (dev_conf->rxmode.mq_mode) { + case ETH_MQ_RX_VMDQ_DCB: + PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); + break; + case ETH_MQ_RX_VMDQ_DCB_RSS: + /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ + PMD_INIT_LOG(ERR, "SRIOV active," + " unsupported mq_mode rx %d.", + dev_conf->rxmode.mq_mode); + return -EINVAL; + case ETH_MQ_RX_RSS: + case ETH_MQ_RX_VMDQ_RSS: + dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; + if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) + if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { + PMD_INIT_LOG(ERR, "SRIOV is active," + " invalid queue number" + " for VMDQ RSS, allowed" + " value are 1, 2 or 4."); + return -EINVAL; + } + break; + case ETH_MQ_RX_VMDQ_ONLY: + case ETH_MQ_RX_NONE: + /* if nothing mq mode configure, use default scheme */ + dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; + if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1) + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; + break; + default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ + /* SRIOV only works in VMDq enable mode */ + PMD_INIT_LOG(ERR, "SRIOV is active," + " wrong mq_mode rx %d.", + dev_conf->rxmode.mq_mode); + return -EINVAL; + } + + switch (dev_conf->txmode.mq_mode) { + case ETH_MQ_TX_VMDQ_DCB: + PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); + dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; + break; + default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ + dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY; + break; + } + + /* check valid queue number */ + if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || + (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { + PMD_INIT_LOG(ERR, "SRIOV is active," + " nb_rx_q=%d nb_tx_q=%d queue number" + " must be less than or equal to %d.", + nb_rx_q, nb_tx_q, + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); + return -EINVAL; + } + } else { + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { + PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" + " not supported."); + return -EINVAL; + } + /* check configuration for vmdb+dcb mode */ + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { + const struct rte_eth_vmdq_dcb_conf *conf; + + if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { + PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", + IXGBE_VMDQ_DCB_NB_QUEUES); + return -EINVAL; + } + conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; + if (!(conf->nb_queue_pools == ETH_16_POOLS || + conf->nb_queue_pools == ETH_32_POOLS)) { + PMD_INIT_LOG(ERR, "VMDQ+DCB selected," + " nb_queue_pools must be %d or %d.", + ETH_16_POOLS, ETH_32_POOLS); + return -EINVAL; + } + } + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { + const struct rte_eth_vmdq_dcb_tx_conf *conf; + + if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) { + PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", + IXGBE_VMDQ_DCB_NB_QUEUES); + return -EINVAL; + } + conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; + if (!(conf->nb_queue_pools == ETH_16_POOLS || + conf->nb_queue_pools == ETH_32_POOLS)) { + PMD_INIT_LOG(ERR, "VMDQ+DCB selected," + " nb_queue_pools != %d and" + " nb_queue_pools != %d.", + ETH_16_POOLS, ETH_32_POOLS); + return -EINVAL; + } + } + + /* For DCB mode check our configuration before we go further */ + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { + const struct rte_eth_dcb_rx_conf *conf; + + if (nb_rx_q != IXGBE_DCB_NB_QUEUES) { + PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.", + IXGBE_DCB_NB_QUEUES); + return -EINVAL; + } + conf = &dev_conf->rx_adv_conf.dcb_rx_conf; + if (!(conf->nb_tcs == ETH_4_TCS || + conf->nb_tcs == ETH_8_TCS)) { + PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" + " and nb_tcs != %d.", + ETH_4_TCS, ETH_8_TCS); + return -EINVAL; + } + } + + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { + const struct rte_eth_dcb_tx_conf *conf; + + if (nb_tx_q != IXGBE_DCB_NB_QUEUES) { + PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.", + IXGBE_DCB_NB_QUEUES); + return -EINVAL; + } + conf = &dev_conf->tx_adv_conf.dcb_tx_conf; + if (!(conf->nb_tcs == ETH_4_TCS || + conf->nb_tcs == ETH_8_TCS)) { + PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" + " and nb_tcs != %d.", + ETH_4_TCS, ETH_8_TCS); + return -EINVAL; + } + } + + /* + * When DCB/VT is off, maximum number of queues changes, + * except for 82598EB, which remains constant. + */ + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && + hw->mac.type != ixgbe_mac_82598EB) { + if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) { + PMD_INIT_LOG(ERR, + "Neither VT nor DCB are enabled, " + "nb_tx_q > %d.", + IXGBE_NONE_MODE_TX_NB_QUEUES); + return -EINVAL; + } + } + } + return 0; +} + +static int +ixgbe_dev_configure(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + int ret; + + PMD_INIT_FUNC_TRACE(); + /* multipe queue mode checking */ + ret = ixgbe_check_mq_mode(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.", + ret); + return ret; + } + + /* set flag to update link status after init */ + intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + + /* + * Initialize to TRUE. If any of Rx queues doesn't meet the bulk + * allocation or vector Rx preconditions we will reset it. + */ + adapter->rx_bulk_alloc_allowed = true; + adapter->rx_vec_allowed = true; + + return 0; +} + +static void +ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + uint32_t gpie; + + /* only set up it on X550EM_X */ + if (hw->mac.type == ixgbe_mac_X550EM_x) { + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + gpie |= IXGBE_SDP0_GPIEN_X550EM_x; + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + if (hw->phy.type == ixgbe_phy_x550em_ext_t) + intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x; + } +} + +/* + * Configure device link speed and setup link. + * It returns 0 on success. + */ +static int +ixgbe_dev_start(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t intr_vector = 0; + int err, link_up = 0, negotiate = 0; + uint32_t speed = 0; + int mask = 0; + int status; + uint16_t vf, idx; + uint32_t *link_speeds; + + PMD_INIT_FUNC_TRACE(); + + /* IXGBE devices don't support: + * - half duplex (checked afterwards for valid speeds) + * - fixed speed: TODO implement + */ + if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { + PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported", dev->data->port_id); return -EINVAL; } + /* disable uio/vfio intr/eventfd mapping */ + rte_intr_disable(intr_handle); + /* stop adapter */ - hw->adapter_stopped = FALSE; + hw->adapter_stopped = 0; ixgbe_stop_adapter(hw); /* reinitialize adapter - * this calls reset and start */ + * this calls reset and start + */ status = ixgbe_pf_reset_hw(hw); if (status != 0) return -1; @@ -1489,6 +2429,36 @@ ixgbe_dev_start(struct rte_eth_dev *dev) /* configure PF module if SRIOV enabled */ ixgbe_pf_host_configure(dev); + ixgbe_dev_phy_intr_setup(dev); + + /* check and configure queue intr-vector mapping */ + if ((rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(dev).active) && + dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) { + PMD_INIT_LOG(ERR, "At most %d intr queues supported", + IXGBE_MAX_INTR_QUEUE_NUM); + return -ENOTSUP; + } + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec\n", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + + /* confiugre msix for sleep until rx interrupt */ + ixgbe_configure_msix(dev); + /* initialize transmission unit */ ixgbe_dev_tx_init(dev); @@ -1499,6 +2469,37 @@ ixgbe_dev_start(struct rte_eth_dev *dev) goto error; } + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK; + ixgbe_vlan_offload_set(dev, mask); + + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { + /* Enable vlan filtering for VMDq */ + ixgbe_vmdq_vlan_hw_filter_enable(dev); + } + + /* Configure DCB hw */ + ixgbe_configure_dcb(dev); + + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { + err = ixgbe_fdir_configure(dev); + if (err) + goto error; + } + + /* Restore vf rate limit */ + if (vfinfo != NULL) { + for (vf = 0; vf < pci_dev->max_vfs; vf++) + for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) + if (vfinfo[vf].tx_rate[idx] != 0) + rte_pmd_ixgbe_set_vf_rate_limit( + dev->data->port_id, vf, + vfinfo[vf].tx_rate[idx], + 1 << idx); + } + + ixgbe_restore_statistics_mapping(dev); + err = ixgbe_dev_rxtx_start(dev); if (err < 0) { PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); @@ -1516,8 +2517,13 @@ ixgbe_dev_start(struct rte_eth_dev *dev) goto error; } - /* Turn on the laser */ - ixgbe_enable_tx_laser(hw); + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { + /* Turn on the copper */ + ixgbe_set_phy_power(hw, true); + } else { + /* Turn on the laser */ + ixgbe_enable_tx_laser(hw); + } err = ixgbe_check_link(hw, &speed, &link_up, 0); if (err) @@ -1528,30 +2534,25 @@ ixgbe_dev_start(struct rte_eth_dev *dev) if (err) goto error; - switch(dev->data->dev_conf.link_speed) { - case ETH_LINK_SPEED_AUTONEG: + link_speeds = &dev->data->dev_conf.link_speeds; + if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_10G)) { + PMD_INIT_LOG(ERR, "Invalid link setting"); + goto error; + } + + speed = 0x0; + if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { speed = (hw->mac.type != ixgbe_mac_82598EB) ? IXGBE_LINK_SPEED_82599_AUTONEG : IXGBE_LINK_SPEED_82598_AUTONEG; - break; - case ETH_LINK_SPEED_100: - /* - * Invalid for 82598 but error will be detected by - * ixgbe_setup_link() - */ - speed = IXGBE_LINK_SPEED_100_FULL; - break; - case ETH_LINK_SPEED_1000: - speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - case ETH_LINK_SPEED_10000: - speed = IXGBE_LINK_SPEED_10GB_FULL; - break; - default: - PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu", - dev->data->dev_conf.link_speed, - dev->data->port_id); - goto error; + } else { + if (*link_speeds & ETH_LINK_SPEED_10G) + speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_1G) + speed |= IXGBE_LINK_SPEED_1GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_100M) + speed |= IXGBE_LINK_SPEED_100_FULL; } err = ixgbe_setup_link(hw, speed, link_up); @@ -1560,44 +2561,33 @@ ixgbe_dev_start(struct rte_eth_dev *dev) skip_link_setup: - /* check if lsc interrupt is enabled */ - if (dev->data->dev_conf.intr_conf.lsc != 0) - ixgbe_dev_lsc_interrupt_setup(dev); - - /* resume enabled intr since hw reset */ - ixgbe_enable_intr(dev); - - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ - ETH_VLAN_EXTEND_MASK; - ixgbe_vlan_offload_set(dev, mask); - - if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { - /* Enable vlan filtering for VMDq */ - ixgbe_vmdq_vlan_hw_filter_enable(dev); + if (rte_intr_allow_others(intr_handle)) { + /* check if lsc interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.lsc != 0) + ixgbe_dev_lsc_interrupt_setup(dev); + ixgbe_dev_macsec_interrupt_setup(dev); + } else { + rte_intr_callback_unregister(intr_handle, + ixgbe_dev_interrupt_handler, dev); + if (dev->data->dev_conf.intr_conf.lsc != 0) + PMD_INIT_LOG(INFO, "lsc won't enable because of" + " no intr multiplex\n"); } - /* Configure DCB hw */ - ixgbe_configure_dcb(dev); - - if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { - err = ixgbe_fdir_configure(dev); - if (err) - goto error; - } + /* check if rxq interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.rxq != 0 && + rte_intr_dp_is_en(intr_handle)) + ixgbe_dev_rxq_interrupt_setup(dev); - /* Restore vf rate limit */ - if (vfinfo != NULL) { - for (vf = 0; vf < dev->pci_dev->max_vfs; vf++) - for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) - if (vfinfo[vf].tx_rate[idx] != 0) - ixgbe_set_vf_rate_limit(dev, vf, - vfinfo[vf].tx_rate[idx], - 1 << idx); - } + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(intr_handle); - ixgbe_restore_statistics_mapping(dev); + /* resume enabled intr since hw reset */ + ixgbe_enable_intr(dev); + ixgbe_l2_tunnel_conf(dev); + ixgbe_filter_restore(dev); - return (0); + return 0; error: PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err); @@ -1616,9 +2606,8 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); - struct ixgbe_filter_info *filter_info = - IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int vf; PMD_INIT_FUNC_TRACE(); @@ -1628,17 +2617,21 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) /* reset the NIC */ ixgbe_pf_reset_hw(hw); - hw->adapter_stopped = FALSE; + hw->adapter_stopped = 0; /* stop adapter */ ixgbe_stop_adapter(hw); - for (vf = 0; vfinfo != NULL && - vf < dev->pci_dev->max_vfs; vf++) + for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) vfinfo[vf].clear_to_send = false; - /* Turn off the laser */ - ixgbe_disable_tx_laser(hw); + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { + /* Turn off the copper */ + ixgbe_set_phy_power(hw, false); + } else { + /* Turn off the laser */ + ixgbe_disable_tx_laser(hw); + } ixgbe_dev_clear_queues(dev); @@ -1650,21 +2643,22 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) memset(&link, 0, sizeof(link)); rte_ixgbe_dev_atomic_write_link_status(dev, &link); - /* Remove all ntuple filters of the device */ - for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list); - p_5tuple != NULL; p_5tuple = p_5tuple_next) { - p_5tuple_next = TAILQ_NEXT(p_5tuple, entries); - TAILQ_REMOVE(&filter_info->fivetuple_list, - p_5tuple, entries); - rte_free(p_5tuple); - } - memset(filter_info->fivetuple_mask, 0, - sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); + if (!rte_intr_allow_others(intr_handle)) + /* resume to the default handler */ + rte_intr_callback_register(intr_handle, + ixgbe_dev_interrupt_handler, + (void *)dev); + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } } /* - * Set device link up: enable tx laser. + * Set device link up: enable tx. */ static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev) @@ -1680,18 +2674,21 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev) return -ENOTSUP; } #endif + } + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { + /* Turn on the copper */ + ixgbe_set_phy_power(hw, true); + } else { /* Turn on the laser */ ixgbe_enable_tx_laser(hw); - return 0; } - PMD_INIT_LOG(ERR, "Set link up is not supported by device id 0x%x", - hw->device_id); - return -ENOTSUP; + return 0; } /* - * Set device link down: disable tx laser. + * Set device link down: disable tx. */ static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev) @@ -1707,14 +2704,17 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev) return -ENOTSUP; } #endif + } + + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { + /* Turn off the copper */ + ixgbe_set_phy_power(hw, false); + } else { /* Turn off the laser */ ixgbe_disable_tx_laser(hw); - return 0; } - PMD_INIT_LOG(ERR, "Set link down is not supported by device id 0x%x", - hw->device_id); - return -ENOTSUP; + return 0; } /* @@ -1733,29 +2733,30 @@ ixgbe_dev_close(struct rte_eth_dev *dev) ixgbe_dev_stop(dev); hw->adapter_stopped = 1; + ixgbe_dev_free_queues(dev); + ixgbe_disable_pcie_master(hw); /* reprogram the RAR[0] in case user changed it. */ ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); } -/* - * This function is based on ixgbe_update_stats_counters() in base/ixgbe.c - */ static void -ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +ixgbe_read_stats_registers(struct ixgbe_hw *hw, + struct ixgbe_hw_stats *hw_stats, + struct ixgbe_macsec_stats *macsec_stats, + uint64_t *total_missed_rx, uint64_t *total_qbrc, + uint64_t *total_qprc, uint64_t *total_qprdc) { - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_hw_stats *hw_stats = - IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); uint32_t bprc, lxon, lxoff, total; - uint64_t total_missed_rx, total_qbrc, total_qprc; + uint32_t delta_gprc = 0; unsigned i; - - total_missed_rx = 0; - total_qbrc = 0; - total_qprc = 0; + /* Workaround for RX byte count not including CRC bytes when CRC + * strip is enabled. CRC bytes are removed from counters when crc_strip + * is disabled. + */ + int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & + IXGBE_HLREG0_RXCRCSTRP); hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); @@ -1763,46 +2764,67 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); for (i = 0; i < 8; i++) { - uint32_t mp; - mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + /* global total per queue */ hw_stats->mpc[i] += mp; /* Running comprehensive total for stats display */ - total_missed_rx += hw_stats->mpc[i]; - if (hw->mac.type == ixgbe_mac_82598EB) + *total_missed_rx += hw_stats->mpc[i]; + if (hw->mac.type == ixgbe_mac_82598EB) { hw_stats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + hw_stats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + hw_stats->pxoffrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + } else { + hw_stats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); + hw_stats->pxoffrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + hw_stats->pxon2offc[i] += + IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); + } hw_stats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); - hw_stats->pxonrxc[i] += - IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); hw_stats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); - hw_stats->pxoffrxc[i] += - IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); - hw_stats->pxon2offc[i] += - IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); } for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { - hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); - hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + + delta_gprc += delta_qprc; + + hw_stats->qprc[i] += delta_qprc; + hw_stats->qptc[i] += delta_qptc; + hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); hw_stats->qbrc[i] += ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); + if (crc_strip == 0) + hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN; + hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); hw_stats->qbtc[i] += ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32); - hw_stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); - total_qprc += hw_stats->qprc[i]; - total_qbrc += hw_stats->qbrc[i]; + hw_stats->qprdc[i] += delta_qprdc; + *total_qprdc += hw_stats->qprdc[i]; + + *total_qprc += hw_stats->qprc[i]; + *total_qbrc += hw_stats->qbrc[i]; } hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); - /* Note that gprc counts missed packets */ - hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); + /* + * An errata states that gprc actually counts good + missed packets: + * Workaround to set gprc to summated queue packet receives + */ + hw_stats->gprc = *total_qprc; if (hw->mac.type != ixgbe_mac_82598EB) { hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); @@ -1821,6 +2843,18 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); } + uint64_t old_tpr = hw_stats->tpr; + + hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); + hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); + + if (crc_strip == 0) + hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN; + + uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC); + hw_stats->gptc += delta_gptc; + hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN; + hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN; /* * Workaround: mprc hardware is incorrectly counting @@ -1845,7 +2879,6 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) hw_stats->lxofftxc += lxoff; total = lxon + lxoff; - hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); hw_stats->gptc -= total; @@ -1860,8 +2893,6 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); - hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); - hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); @@ -1880,47 +2911,101 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); } - if (stats == NULL) - return; + /* Flow Director Stats registers */ + hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); - /* Fill out the rte_eth_stats statistics structure */ - stats->ipackets = total_qprc; - stats->ibytes = total_qbrc; - stats->opackets = hw_stats->gptc; - stats->obytes = hw_stats->gotc; - stats->imcasts = hw_stats->mprc; + /* MACsec Stats registers */ + macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); + macsec_stats->out_pkts_encrypted += + IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE); + macsec_stats->out_pkts_protected += + IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP); + macsec_stats->out_octets_encrypted += + IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE); + macsec_stats->out_octets_protected += + IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP); + macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT); + macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD); + macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI); + macsec_stats->in_pkts_unknownsci += + IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI); + macsec_stats->in_octets_decrypted += + IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD); + macsec_stats->in_octets_validated += + IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV); + macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH); + macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY); + macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE); + for (i = 0; i < 2; i++) { + macsec_stats->in_pkts_ok += + IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i)); + macsec_stats->in_pkts_invalid += + IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i)); + macsec_stats->in_pkts_notvalid += + IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i)); + } + macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA); + macsec_stats->in_pkts_notusingsa += + IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA); +} - for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { - stats->q_ipackets[i] = hw_stats->qprc[i]; - stats->q_opackets[i] = hw_stats->qptc[i]; - stats->q_ibytes[i] = hw_stats->qbrc[i]; - stats->q_obytes[i] = hw_stats->qbtc[i]; - stats->q_errors[i] = hw_stats->qprdc[i]; +/* + * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c + */ +static void +ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_stats *hw_stats = + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); + uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; + unsigned i; + + total_missed_rx = 0; + total_qbrc = 0; + total_qprc = 0; + total_qprdc = 0; + + ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, + &total_qbrc, &total_qprc, &total_qprdc); + + if (stats == NULL) + return; + + /* Fill out the rte_eth_stats statistics structure */ + stats->ipackets = total_qprc; + stats->ibytes = total_qbrc; + stats->opackets = hw_stats->gptc; + stats->obytes = hw_stats->gotc; + + for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { + stats->q_ipackets[i] = hw_stats->qprc[i]; + stats->q_opackets[i] = hw_stats->qptc[i]; + stats->q_ibytes[i] = hw_stats->qbrc[i]; + stats->q_obytes[i] = hw_stats->qbtc[i]; + stats->q_errors[i] = hw_stats->qprdc[i]; } /* Rx Errors */ - stats->ibadcrc = hw_stats->crcerrs; - stats->ibadlen = hw_stats->rlec + hw_stats->ruc + hw_stats->roc; stats->imissed = total_missed_rx; - stats->ierrors = stats->ibadcrc + - stats->ibadlen + - stats->imissed + - hw_stats->illerrc + hw_stats->errbc; + stats->ierrors = hw_stats->crcerrs + + hw_stats->mspdc + + hw_stats->rlec + + hw_stats->ruc + + hw_stats->roc + + hw_stats->illerrc + + hw_stats->errbc + + hw_stats->rfc + + hw_stats->fccrc + + hw_stats->fclast; /* Tx Errors */ stats->oerrors = 0; - - /* XON/XOFF pause frames */ - stats->tx_pause_xon = hw_stats->lxontxc; - stats->rx_pause_xon = hw_stats->lxonrxc; - stats->tx_pause_xoff = hw_stats->lxofftxc; - stats->rx_pause_xoff = hw_stats->lxoffrxc; - - /* Flow Director Stats registers */ - hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); - hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); - stats->fdirmatch = hw_stats->fdirmatch; - stats->fdirmiss = hw_stats->fdirmiss; } static void @@ -1936,11 +3021,184 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev) memset(stats, 0, sizeof(*stats)); } +/* This function calculates the number of xstats based on the current config */ +static unsigned +ixgbe_xstats_calc_num(void) { + return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS + + (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + + (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); +} + +static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit) +{ + const unsigned cnt_stats = ixgbe_xstats_calc_num(); + unsigned stat, i, count; + + if (xstats_names != NULL) { + count = 0; + + /* Note: limit >= cnt_stats checked upstream + * in rte_eth_xstats_names() + */ + + /* Extended stats from ixgbe_hw_stats */ + for (i = 0; i < IXGBE_NB_HW_STATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", + rte_ixgbe_stats_strings[i].name); + count++; + } + + /* MACsec Stats */ + for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", + rte_ixgbe_macsec_strings[i].name); + count++; + } + + /* RX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_priority%u_%s", i, + rte_ixgbe_rxq_strings[stat].name); + count++; + } + } + + /* TX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_priority%u_%s", i, + rte_ixgbe_txq_strings[stat].name); + count++; + } + } + } + return cnt_stats; +} + +static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned limit) +{ + unsigned i; + + if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) + return -ENOMEM; + + if (xstats_names != NULL) + for (i = 0; i < IXGBEVF_NB_XSTATS; i++) + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), + "%s", rte_ixgbevf_stats_strings[i].name); + return IXGBEVF_NB_XSTATS; +} + +static int +ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned n) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_stats *hw_stats = + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); + uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; + unsigned i, stat, count = 0; + + count = ixgbe_xstats_calc_num(); + + if (n < count) + return count; + + total_missed_rx = 0; + total_qbrc = 0; + total_qprc = 0; + total_qprdc = 0; + + ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, + &total_qbrc, &total_qprc, &total_qprdc); + + /* If this is a reset xstats is NULL, and we have cleared the + * registers by reading them. + */ + if (!xstats) + return 0; + + /* Extended stats from ixgbe_hw_stats */ + count = 0; + for (i = 0; i < IXGBE_NB_HW_STATS; i++) { + xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_stats_strings[i].offset); + xstats[count].id = count; + count++; + } + + /* MACsec Stats */ + for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { + xstats[count].value = *(uint64_t *)(((char *)macsec_stats) + + rte_ixgbe_macsec_strings[i].offset); + xstats[count].id = count; + count++; + } + + /* RX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { + xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_rxq_strings[stat].offset + + (sizeof(uint64_t) * i)); + xstats[count].id = count; + count++; + } + } + + /* TX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { + xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + rte_ixgbe_txq_strings[stat].offset + + (sizeof(uint64_t) * i)); + xstats[count].id = count; + count++; + } + } + return count; +} + +static void +ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) +{ + struct ixgbe_hw_stats *stats = + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); + + unsigned count = ixgbe_xstats_calc_num(); + + /* HW registers are cleared on read */ + ixgbe_dev_xstats_get(dev, NULL, count); + + /* Reset software totals */ + memset(stats, 0, sizeof(*stats)); + memset(macsec_stats, 0, sizeof(*macsec_stats)); +} + static void -ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +ixgbevf_update_stats(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*) + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); /* Good Rx packet, include VF loopback */ @@ -1962,6 +3220,40 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* Rx Multicst Packet */ UPDATE_VF_STAT(IXGBE_VFMPRC, hw_stats->last_vfmprc, hw_stats->vfmprc); +} + +static int +ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned n) +{ + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + unsigned i; + + if (n < IXGBEVF_NB_XSTATS) + return IXGBEVF_NB_XSTATS; + + ixgbevf_update_stats(dev); + + if (!xstats) + return 0; + + /* Extended stats */ + for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { + xstats[i].value = *(uint64_t *)(((char *)hw_stats) + + rte_ixgbevf_stats_strings[i].offset); + } + + return IXGBEVF_NB_XSTATS; +} + +static void +ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + ixgbevf_update_stats(dev); if (stats == NULL) return; @@ -1970,13 +3262,12 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->ibytes = hw_stats->vfgorc; stats->opackets = hw_stats->vfgptc; stats->obytes = hw_stats->vfgotc; - stats->imcasts = hw_stats->vfmprc; } static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) { - struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*) + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); /* Sync HW register to the last stats */ @@ -1987,22 +3278,53 @@ ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) hw_stats->vfgorc = 0; hw_stats->vfgptc = 0; hw_stats->vfgotc = 0; - hw_stats->vfmprc = 0; +} +static int +ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u16 eeprom_verh, eeprom_verl; + u32 etrack_id; + int ret; + + ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh); + ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl); + + etrack_id = (eeprom_verh << 16) | eeprom_verl; + ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); + + ret += 1; /* add the size of '\0' */ + if (fw_size < (u32)ret) + return ret; + else + return 0; } static void ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; + if (RTE_ETH_DEV_SRIOV(dev).active == 0) { + /* + * When DCB/VT is off, maximum number of queues changes, + * except for 82598EB, which remains constant. + */ + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && + hw->mac.type != ixgbe_mac_82598EB) + dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES; + } dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ dev_info->max_mac_addrs = hw->mac.num_rar_entries; dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; - dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->max_vfs = pci_dev->max_vfs; if (hw->mac.type == ixgbe_mac_82598EB) dev_info->max_vmdq_pools = ETH_16_POOLS; else @@ -2023,6 +3345,15 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) !RTE_ETH_DEV_SRIOV(dev).active) dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO; + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP; + + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | @@ -2031,6 +3362,15 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO; + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT; + + if (hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { .pthresh = IXGBE_DEFAULT_RX_PTHRESH, @@ -2052,23 +3392,71 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS, }; - dev_info->reta_size = ETH_RSS_RETA_SIZE_128; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; + + dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); + dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; + + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; + if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X540_vf || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550_vf) { + dev_info->speed_capa |= ETH_LINK_SPEED_100M; + } +} + +static const uint32_t * +ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /* For non-vec functions, + * refers to ixgbe_rxd_pkt_info_to_pkt_type(); + * for vec functions, + * refers to _recv_raw_pkts_vec(). + */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_IP, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_INNER_L3_IPV6_EXT, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == ixgbe_recv_pkts || + dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || + dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || + dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) + return ptypes; + return NULL; } static void ixgbevf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */ dev_info->max_mac_addrs = hw->mac.num_rar_entries; dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; - dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->max_vfs = pci_dev->max_vfs; if (hw->mac.type == ixgbe_mac_82598EB) dev_info->max_vmdq_pools = ETH_16_POOLS; else @@ -2081,7 +3469,8 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM; + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -2104,6 +3493,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS, }; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; } /* return 0 means link status changed, -1 means not changed */ @@ -2116,58 +3508,55 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) int link_up; int diag; - link.link_status = 0; + link.link_status = ETH_LINK_DOWN; link.link_speed = 0; - link.link_duplex = 0; + link.link_duplex = ETH_LINK_HALF_DUPLEX; memset(&old, 0, sizeof(old)); rte_ixgbe_dev_atomic_read_link_status(dev, &old); + hw->mac.get_link_status = true; + /* check if it needs to wait to complete, if lsc interrupt is enabled */ if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) diag = ixgbe_check_link(hw, &link_speed, &link_up, 0); else diag = ixgbe_check_link(hw, &link_speed, &link_up, 1); + if (diag != 0) { - link.link_speed = ETH_LINK_SPEED_100; - link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_speed = ETH_SPEED_NUM_100M; + link.link_duplex = ETH_LINK_FULL_DUPLEX; rte_ixgbe_dev_atomic_write_link_status(dev, &link); if (link.link_status == old.link_status) return -1; return 0; } - if (link_speed == IXGBE_LINK_SPEED_UNKNOWN && - !hw->mac.get_link_status) { - memcpy(&link, &old, sizeof(link)); - return -1; - } - if (link_up == 0) { rte_ixgbe_dev_atomic_write_link_status(dev, &link); if (link.link_status == old.link_status) return -1; return 0; } - link.link_status = 1; + link.link_status = ETH_LINK_UP; link.link_duplex = ETH_LINK_FULL_DUPLEX; switch (link_speed) { default: case IXGBE_LINK_SPEED_UNKNOWN: - link.link_duplex = ETH_LINK_HALF_DUPLEX; - link.link_speed = ETH_LINK_SPEED_100; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = ETH_SPEED_NUM_100M; break; case IXGBE_LINK_SPEED_100_FULL: - link.link_speed = ETH_LINK_SPEED_100; + link.link_speed = ETH_SPEED_NUM_100M; break; case IXGBE_LINK_SPEED_1GB_FULL: - link.link_speed = ETH_LINK_SPEED_1000; + link.link_speed = ETH_SPEED_NUM_1G; break; case IXGBE_LINK_SPEED_10GB_FULL: - link.link_speed = ETH_LINK_SPEED_10000; + link.link_speed = ETH_SPEED_NUM_10G; break; } rte_ixgbe_dev_atomic_write_link_status(dev, &link); @@ -2252,6 +3641,50 @@ ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev) return 0; } +/** + * It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + intr->mask |= IXGBE_EICR_RTX_QUEUE; + + return 0; +} + +/** + * It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + intr->mask |= IXGBE_EICR_LINKSEC; + + return 0; +} + /* * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. * @@ -2275,17 +3708,25 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) /* read-on-clear nic registers here */ eicr = IXGBE_READ_REG(hw, IXGBE_EICR); - PMD_DRV_LOG(INFO, "eicr %x", eicr); + PMD_DRV_LOG(DEBUG, "eicr %x", eicr); intr->flags = 0; - if (eicr & IXGBE_EICR_LSC) { - /* set flag for async link update */ + + /* set flag for async link update */ + if (eicr & IXGBE_EICR_LSC) intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; - } if (eicr & IXGBE_EICR_MAILBOX) intr->flags |= IXGBE_FLAG_MAILBOX; + if (eicr & IXGBE_EICR_LINKSEC) + intr->flags |= IXGBE_FLAG_MACSEC; + + if (hw->mac.type == ixgbe_mac_X550EM_x && + hw->phy.type == ixgbe_phy_x550em_ext_t && + (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) + intr->flags |= IXGBE_FLAG_PHY_INTERRUPT; + return 0; } @@ -2302,6 +3743,7 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev) { + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); struct rte_eth_link link; memset(&link, 0, sizeof(link)); @@ -2316,11 +3758,11 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev) PMD_INIT_LOG(INFO, " Port %d: Link Down", (int)(dev->data->port_id)); } - PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d", - dev->pci_dev->addr.domain, - dev->pci_dev->addr.bus, - dev->pci_dev->addr.devid, - dev->pci_dev->addr.function); + PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, + pci_dev->addr.domain, + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); } /* @@ -2334,13 +3776,16 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev) * - On failure, a negative value. */ static int -ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) +ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, + struct rte_intr_handle *intr_handle) { struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); int64_t timeout; struct rte_eth_link link; int intr_enable_delay = false; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); @@ -2349,6 +3794,11 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) intr->flags &= ~IXGBE_FLAG_MAILBOX; } + if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { + ixgbe_handle_lasi(hw); + intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; + } + if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { /* get the link status before link update, for predicting later */ memset(&link, 0, sizeof(link)); @@ -2372,12 +3822,12 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) if (intr_enable_delay) { if (rte_eal_alarm_set(timeout * 1000, - ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0) + ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) PMD_DRV_LOG(ERR, "Error setting alarm"); } else { PMD_DRV_LOG(DEBUG, "enable intr immediately"); ixgbe_enable_intr(dev); - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(intr_handle); } @@ -2402,6 +3852,8 @@ static void ixgbe_dev_interrupt_delayed_handler(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); struct ixgbe_hw *hw = @@ -2412,16 +3864,27 @@ ixgbe_dev_interrupt_delayed_handler(void *param) if (eicr & IXGBE_EICR_MAILBOX) ixgbe_pf_mbx_process(dev); + if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { + ixgbe_handle_lasi(hw); + intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; + } + if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { ixgbe_dev_link_update(dev, 0); intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; ixgbe_dev_link_status_print(dev); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + } + + if (intr->flags & IXGBE_FLAG_MACSEC) { + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, + NULL); + intr->flags &= ~IXGBE_FLAG_MACSEC; } PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); ixgbe_enable_intr(dev); - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(intr_handle); } /** @@ -2437,12 +3900,13 @@ ixgbe_dev_interrupt_delayed_handler(void *param) * void */ static void -ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, - void *param) +ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, + void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + ixgbe_dev_interrupt_get_status(dev); - ixgbe_dev_interrupt_action(dev); + ixgbe_dev_interrupt_action(dev, handle); } static int @@ -2451,7 +3915,7 @@ ixgbe_dev_led_on(struct rte_eth_dev *dev) struct ixgbe_hw *hw; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP); + return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; } static int @@ -2460,7 +3924,7 @@ ixgbe_dev_led_off(struct rte_eth_dev *dev) struct ixgbe_hw *hw; hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP); + return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP; } static int @@ -2530,8 +3994,6 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg) - return -ENOTSUP; rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size); @@ -2544,7 +4006,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) (fc_conf->high_water < fc_conf->low_water)) { PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); - return (-EINVAL); + return -EINVAL; } hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode]; @@ -2552,6 +4014,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) hw->fc.high_water[0] = fc_conf->high_water; hw->fc.low_water[0] = fc_conf->low_water; hw->fc.send_xon = fc_conf->send_xon; + hw->fc.disable_fc_autoneg = !fc_conf->autoneg; err = ixgbe_fc_enable(hw); @@ -2586,7 +4049,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * Enable flow control according to the current settings. */ static int -ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) +ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) { int ret_val = 0; uint32_t mflcn_reg, fccfg_reg; @@ -2604,13 +4067,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) /* Low water mark of zero causes XOFF floods */ if (hw->fc.current_mode & ixgbe_fc_tx_pause) { /* High/Low water can not be 0 */ - if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) { + if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) { PMD_INIT_LOG(ERR, "Invalid water mark configuration"); ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; goto out; } - if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { + if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { PMD_INIT_LOG(ERR, "Invalid water mark configuration"); ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; goto out; @@ -2633,13 +4096,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) * and the TX pause can not be disabled */ nb_rx_en = 0; - for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); if (reg & IXGBE_FCRTH_FCEN) nb_rx_en++; } if (nb_rx_en > 1) - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_rx_pause: /* @@ -2656,20 +4119,20 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) * and the TX pause can not be disabled */ nb_rx_en = 0; - for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); if (reg & IXGBE_FCRTH_FCEN) nb_rx_en++; } if (nb_rx_en > 1) - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_tx_pause: /* * Tx Flow control is enabled, and Rx Flow control is * disabled by software override. */ - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_full: /* Flow control (both Rx and Tx) is enabled by SW override. */ @@ -2680,7 +4143,6 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); ret_val = IXGBE_ERR_CONFIG; goto out; - break; } /* Set 802.3x based flow control settings. */ @@ -2719,13 +4181,13 @@ out: } static int -ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num) +ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); int32_t ret_val = IXGBE_NOT_IMPLEMENTED; - if(hw->mac.type != ixgbe_mac_82598EB) { - ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num); + if (hw->mac.type != ixgbe_mac_82598EB) { + ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); } return ret_val; } @@ -2739,9 +4201,9 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p uint8_t tc_num; uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_dcb_config *dcb_config = - IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { ixgbe_fc_none, @@ -2765,7 +4227,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) { PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB"); PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water); - return (-EINVAL); + return -EINVAL; } hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode]; @@ -2774,7 +4236,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; - err = ixgbe_dcb_pfc_enable(dev,tc_num); + err = ixgbe_dcb_pfc_enable(dev, tc_num); /* Not negotiated is not an error case */ if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) @@ -2789,16 +4251,26 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { - uint8_t i, j, mask; + uint16_t i, sp_reta_size; + uint8_t j, mask; uint32_t reta, r; uint16_t idx, shift; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reta_reg; PMD_INIT_FUNC_TRACE(); - if (reta_size != ETH_RSS_RETA_SIZE_128) { + + if (!ixgbe_rss_update_sp(hw->mac.type)) { + PMD_DRV_LOG(ERR, "RSS reta update is not supported on this " + "NIC."); + return -ENOTSUP; + } + + sp_reta_size = ixgbe_reta_size_get(hw->mac.type); + if (reta_size != sp_reta_size) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128); + "(%d)\n", reta_size, sp_reta_size); return -EINVAL; } @@ -2809,10 +4281,11 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, IXGBE_4_BIT_MASK); if (!mask) continue; + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); if (mask == IXGBE_4_BIT_MASK) r = 0; else - r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2)); + r = IXGBE_READ_REG(hw, reta_reg); for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) { if (mask & (0x1 << j)) reta |= reta_conf[idx].reta[shift + j] << @@ -2821,7 +4294,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, reta |= r & (IXGBE_8_BIT_MASK << (CHAR_BIT * j)); } - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); + IXGBE_WRITE_REG(hw, reta_reg, reta); } return 0; @@ -2832,20 +4305,23 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { - uint8_t i, j, mask; + uint16_t i, sp_reta_size; + uint8_t j, mask; uint32_t reta; uint16_t idx, shift; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reta_reg; PMD_INIT_FUNC_TRACE(); - if (reta_size != ETH_RSS_RETA_SIZE_128) { + sp_reta_size = ixgbe_reta_size_get(hw->mac.type); + if (reta_size != sp_reta_size) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128); + "(%d)\n", reta_size, sp_reta_size); return -EINVAL; } - for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IXGBE_4_BIT_WIDTH) { + for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) { idx = i / RTE_RETA_GROUP_SIZE; shift = i % RTE_RETA_GROUP_SIZE; mask = (uint8_t)((reta_conf[idx].mask >> shift) & @@ -2853,7 +4329,8 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, if (!mask) continue; - reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2)); + reta_reg = ixgbe_reta_reg_get(hw->mac.type, i); + reta = IXGBE_READ_REG(hw, reta_reg); for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) { if (mask & (0x1 << j)) reta_conf[idx].reta[shift + j] = @@ -2883,6 +4360,61 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) ixgbe_clear_rar(hw, index); } +static void +ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) +{ + ixgbe_remove_rar(dev, 0); + + ixgbe_add_rar(dev, addr, 0, 0); +} + +static int +is_ixgbe_pmd(const char *driver_name) +{ + if (!strstr(driver_name, "ixgbe")) + return -ENOTSUP; + + if (strstr(driver_name, "ixgbe_vf")) + return -ENOTSUP; + + return 0; +} + +int +rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf, + struct ether_addr *mac_addr) +{ + struct ixgbe_hw *hw; + struct ixgbe_vf_info *vfinfo; + int rar_entry; + uint8_t *new_mac = (uint8_t *)(mac_addr); + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + + if (is_ixgbe_pmd(dev_info.driver_name) != 0) + return -ENOTSUP; + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + rar_entry = hw->mac.num_rar_entries - (vf + 1); + + if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) { + rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, + ETHER_ADDR_LEN); + return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, + IXGBE_RAH_AV); + } + return -EINVAL; +} + static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { @@ -2899,7 +4431,8 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; /* refuse mtu that requires the support of scattered packets when this - * feature has not been enabled before. */ + * feature has not been enabled before. + */ if (!dev->data->scattered_rx && (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) @@ -2943,12 +4476,27 @@ ixgbevf_intr_disable(struct ixgbe_hw *hw) IXGBE_WRITE_FLUSH(hw); } -static int -ixgbevf_dev_configure(struct rte_eth_dev *dev) +static void +ixgbevf_intr_enable(struct ixgbe_hw *hw) { - struct rte_eth_conf* conf = &dev->data->dev_conf; + PMD_INIT_FUNC_TRACE(); - PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", + /* VF enable interrupt autoclean */ + IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK); + IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK); + + IXGBE_WRITE_FLUSH(hw); +} + +static int +ixgbevf_dev_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *conf = &dev->data->dev_conf; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + + PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d", dev->data->port_id); /* @@ -2957,16 +4505,23 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev) */ #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC if (!conf->rxmode.hw_strip_crc) { - PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip"); + PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); conf->rxmode.hw_strip_crc = 1; } #else if (conf->rxmode.hw_strip_crc) { - PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip"); + PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); conf->rxmode.hw_strip_crc = 0; } #endif + /* + * Initialize to TRUE. If any of Rx queues doesn't meet the bulk + * allocation or vector Rx preconditions we will reset it. + */ + adapter->rx_bulk_alloc_allowed = true; + adapter->rx_vec_allowed = true; + return 0; } @@ -2975,6 +4530,10 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t intr_vector = 0; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + int err, mask = 0; PMD_INIT_FUNC_TRACE(); @@ -2996,15 +4555,39 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) } /* Set vfta */ - ixgbevf_set_vfta_all(dev,1); + ixgbevf_set_vfta_all(dev, 1); /* Set HW strip */ - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; ixgbevf_vlan_offload_set(dev, mask); ixgbevf_dev_rxtx_start(dev); + /* check and configure queue intr-vector mapping */ + if (dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec\n", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + ixgbevf_configure_msix(dev); + + rte_intr_enable(intr_handle); + + /* Re-enable interrupt for VF */ + ixgbevf_intr_enable(hw); + return 0; } @@ -3012,22 +4595,33 @@ static void ixgbevf_dev_stop(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; PMD_INIT_FUNC_TRACE(); - hw->adapter_stopped = TRUE; + ixgbevf_intr_disable(hw); + + hw->adapter_stopped = 1; ixgbe_stop_adapter(hw); /* * Clear what we set, but we still keep shadow_vfta to * restore after device starts */ - ixgbevf_set_vfta_all(dev,0); + ixgbevf_set_vfta_all(dev, 0); /* Clear stored conf */ dev->data->scattered_rx = 0; ixgbe_dev_clear_queues(dev); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } } static void @@ -3041,25 +4635,32 @@ ixgbevf_dev_close(struct rte_eth_dev *dev) ixgbevf_dev_stop(dev); - /* reprogram the RAR[0] in case user changed it. */ - ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + ixgbe_dev_free_queues(dev); + + /** + * Remove the VF MAC address ro ensure + * that the VF traffic goes to the PF + * after stop, close and detach of the VF + **/ + ixgbevf_remove_mac_addr(dev, 0); } static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); int i = 0, j = 0, vfta = 0, mask = 1; - for (i = 0; i < IXGBE_VFTA_SIZE; i++){ + for (i = 0; i < IXGBE_VFTA_SIZE; i++) { vfta = shadow_vfta->vfta[i]; - if(vfta){ + if (vfta) { mask = 1; - for (j = 0; j < 32; j++){ - if(vfta & mask) - ixgbe_set_vfta(hw, (i<<5)+j, 0, on); - mask<<=1; + for (j = 0; j < 32; j++) { + if (vfta & mask) + ixgbe_set_vfta(hw, (i<<5)+j, 0, + on, false); + mask <<= 1; } } } @@ -3071,7 +4672,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vid_idx = 0; uint32_t vid_bit = 0; @@ -3080,8 +4681,8 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) PMD_INIT_FUNC_TRACE(); /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ - ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on); - if(ret){ + ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); + if (ret) { PMD_INIT_LOG(ERR, "Unable to set VF vlan"); return ret; } @@ -3106,17 +4707,17 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) PMD_INIT_FUNC_TRACE(); - if(queue >= hw->mac.max_rx_queues) + if (queue >= hw->mac.max_rx_queues) return; ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); - if(on) + if (on) ctrl |= IXGBE_RXDCTL_VME; else ctrl &= ~IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); - ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on); + ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); } static void @@ -3128,33 +4729,34 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) int on = 0; /* VF function only support hw strip feature, others are not support */ - if(mask & ETH_VLAN_STRIP_MASK){ + if (mask & ETH_VLAN_STRIP_MASK) { on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip); - for(i=0; i < hw->mac.max_rx_queues; i++) - ixgbevf_vlan_strip_queue_set(dev,i,on); + for (i = 0; i < hw->mac.max_rx_queues; i++) + ixgbevf_vlan_strip_queue_set(dev, i, on); } } static int -ixgbe_vmdq_mode_check(struct ixgbe_hw *hw) +ixgbe_vt_check(struct ixgbe_hw *hw) { uint32_t reg_val; - /* we only need to do this if VMDq is enabled */ + /* if Virtualization Technology is enabled */ reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { - PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting"); - return (-1); + PMD_INIT_LOG(ERR, "VT must be enabled for this setting"); + return -1; } return 0; } static uint32_t -ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr) +ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr) { uint32_t vector = 0; + switch (hw->mac.mc_filter_type) { case 0: /* use bits [47:36] of the address */ vector = ((uc_addr->addr_bytes[4] >> 4) | @@ -3182,8 +4784,8 @@ ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr) } static int -ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, - uint8_t on) +ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint8_t on) { uint32_t vector; uint32_t uta_idx; @@ -3202,14 +4804,14 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, /* The UTA table only exists on 82599 hardware and newer */ if (hw->mac.type < ixgbe_mac_82599EB) - return (-ENOTSUP); + return -ENOTSUP; - vector = ixgbe_uta_vector(hw,mac_addr); + vector = ixgbe_uta_vector(hw, mac_addr); uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; uta_shift = vector & ixgbe_uta_bit_mask; rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); - if(rc == on) + if (rc == on) return 0; reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); @@ -3229,7 +4831,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); else - IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type); + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); return 0; } @@ -3245,9 +4847,9 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) /* The UTA table only exists on 82599 hardware and newer */ if (hw->mac.type < ixgbe_mac_82599EB) - return (-ENOTSUP); + return -ENOTSUP; - if(on) { + if (on) { for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { uta_info->uta_shadow[i] = ~0; IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); @@ -3281,340 +4883,479 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) return new_val; } -static int -ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, - uint16_t rx_mask, uint8_t on) + +int +rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) { - int val = 0; + struct ixgbe_hw *hw; + struct ixgbe_mac_info *mac; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - if (hw->mac.type == ixgbe_mac_82598EB) { - PMD_INIT_LOG(ERR, "setting VF receive mode set should be done" - " on 82599 hardware and newer"); - return (-ENOTSUP); - } - if (ixgbe_vmdq_mode_check(hw) < 0) - return (-ENOTSUP); + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); - val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val); + if (is_ixgbe_pmd(dev_info.driver_name) != 0) + return -ENOTSUP; - if (on) - vmolr |= val; - else - vmolr &= ~val; + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mac = &hw->mac; - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); + mac->ops.set_vlan_anti_spoofing(hw, on, vf); return 0; } -static int -ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) +int +rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) { - uint32_t reg,addr; - uint32_t val; - const uint8_t bit1 = 0x1; + struct ixgbe_hw *hw; + struct ixgbe_mac_info *mac; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - if (ixgbe_vmdq_mode_check(hw) < 0) - return (-ENOTSUP); + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); - addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2); - reg = IXGBE_READ_REG(hw, addr); - val = bit1 << pool; + if (is_ixgbe_pmd(dev_info.driver_name) != 0) + return -ENOTSUP; - if (on) - reg |= val; - else - reg &= ~val; + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; - IXGBE_WRITE_REG(hw, addr,reg); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mac = &hw->mac; + mac->ops.set_mac_anti_spoofing(hw, on, vf); return 0; } -static int -ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) +int +rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id) { - uint32_t reg,addr; - uint32_t val; - const uint8_t bit1 = 0x1; + struct ixgbe_hw *hw; + uint32_t ctrl; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - if (ixgbe_vmdq_mode_check(hw) < 0) - return (-ENOTSUP); + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); - addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2); - reg = IXGBE_READ_REG(hw, addr); - val = bit1 << pool; + if (is_ixgbe_pmd(dev_info.driver_name) != 0) + return -ENOTSUP; - if (on) - reg |= val; - else - reg &= ~val; + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (vlan_id > ETHER_MAX_VLAN_ID) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf)); + if (vlan_id) { + ctrl = vlan_id; + ctrl |= IXGBE_VMVIR_VLANA_DEFAULT; + } else { + ctrl = 0; + } - IXGBE_WRITE_REG(hw, addr,reg); + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl); return 0; } -static int -ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, - uint64_t pool_mask, uint8_t vlan_on) +int +rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on) { - int ret = 0; - uint16_t pool_idx; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw *hw; + uint32_t ctrl; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; - if (ixgbe_vmdq_mode_check(hw) < 0) - return (-ENOTSUP); - for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) { - if (pool_mask & ((uint64_t)(1ULL << pool_idx))) - ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on); - if (ret < 0) - return ret; - } + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - return ret; + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + + if (is_ixgbe_pmd(dev_info.driver_name) != 0) + return -ENOTSUP; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + /* enable or disable VMDQ loopback */ + if (on) + ctrl |= IXGBE_PFDTXGSWC_VT_LBEN; + else + ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN; + + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl); + + return 0; } -static int -ixgbe_mirror_rule_set(struct rte_eth_dev *dev, - struct rte_eth_vmdq_mirror_conf *mirror_conf, - uint8_t rule_id, uint8_t on) +int +rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on) { - uint32_t mr_ctl,vlvf; - uint32_t mp_lsb = 0; - uint32_t mv_msb = 0; - uint32_t mv_lsb = 0; - uint32_t mp_msb = 0; - uint8_t i = 0; - int reg_index = 0; - uint64_t vlan_mask = 0; + struct ixgbe_hw *hw; + uint32_t reg_value; + int i; + int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT); + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; - const uint8_t pool_mask_offset = 32; - const uint8_t vlan_mask_offset = 32; - const uint8_t dst_pool_offset = 8; - const uint8_t rule_mr_offset = 4; - const uint8_t mirror_rule_mask= 0x0F; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - struct ixgbe_mirror_info *mr_info = - (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); - if (ixgbe_vmdq_mode_check(hw) < 0) - return (-ENOTSUP); + if (is_ixgbe_pmd(dev_info.driver_name) != 0) + return -ENOTSUP; - /* Check if vlan mask is valid */ - if ((mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) && (on)) { - if (mirror_conf->vlan.vlan_mask == 0) - return (-EINVAL); - } + if (on > 1) + return -EINVAL; - /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */ - if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) { - for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) { - if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { - /* search vlan id related pool vlan filter index */ - reg_index = ixgbe_find_vlvf_slot(hw, - mirror_conf->vlan.vlan_id[i]); - if(reg_index < 0) - return (-EINVAL); - vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index)); - if ((vlvf & IXGBE_VLVF_VIEN) && - ((vlvf & IXGBE_VLVF_VLANID_MASK) - == mirror_conf->vlan.vlan_id[i])) - vlan_mask |= (1ULL << reg_index); - else - return (-EINVAL); - } - } + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + for (i = 0; i <= num_queues; i++) { + reg_value = IXGBE_QDE_WRITE | + (i << IXGBE_QDE_IDX_SHIFT) | + (on & IXGBE_QDE_ENABLE); + IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value); + } - if (on) { - mv_lsb = vlan_mask & 0xFFFFFFFF; - mv_msb = vlan_mask >> vlan_mask_offset; + return 0; +} - mr_info->mr_conf[rule_id].vlan.vlan_mask = - mirror_conf->vlan.vlan_mask; - for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) { - if(mirror_conf->vlan.vlan_mask & (1ULL << i)) - mr_info->mr_conf[rule_id].vlan.vlan_id[i] = - mirror_conf->vlan.vlan_id[i]; - } - } else { - mv_lsb = 0; - mv_msb = 0; - mr_info->mr_conf[rule_id].vlan.vlan_mask = 0; - for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) - mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0; - } - } +int +rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on) +{ + struct ixgbe_hw *hw; + uint32_t reg_value; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; - /* - * if enable pool mirror, write related pool mask register,if disable - * pool mirror, clear PFMRVM register - */ - if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) { - if (on) { - mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF; - mp_msb = mirror_conf->pool_mask >> pool_mask_offset; - mr_info->mr_conf[rule_id].pool_mask = - mirror_conf->pool_mask; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - } else { - mp_lsb = 0; - mp_msb = 0; - mr_info->mr_conf[rule_id].pool_mask = 0; - } - } + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); - /* read mirror control register and recalculate it */ - mr_ctl = IXGBE_READ_REG(hw,IXGBE_MRCTL(rule_id)); + if (is_ixgbe_pmd(dev_info.driver_name) != 0) + return -ENOTSUP; - if (on) { - mr_ctl |= mirror_conf->rule_type_mask; - mr_ctl &= mirror_rule_mask; - mr_ctl |= mirror_conf->dst_pool << dst_pool_offset; - } else - mr_ctl &= ~(mirror_conf->rule_type_mask & mirror_rule_mask); + /* only support VF's 0 to 63 */ + if ((vf >= dev_info.max_vfs) || (vf > 63)) + return -EINVAL; - mr_info->mr_conf[rule_id].rule_type_mask = (uint8_t)(mr_ctl & mirror_rule_mask); - mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool; + if (on > 1) + return -EINVAL; - /* write mirrror control register */ - IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf)); + if (on) + reg_value |= IXGBE_SRRCTL_DROP_EN; + else + reg_value &= ~IXGBE_SRRCTL_DROP_EN; - /* write pool mirrror control register */ - if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) { - IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); - IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), - mp_msb); - } - /* write VLAN mirrror control register */ - if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) { - IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); - IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), - mv_msb); - } + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value); return 0; } -static int -ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) +int +rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on) { - int mr_ctl = 0; - uint32_t lsb_val = 0; - uint32_t msb_val = 0; - const uint8_t rule_mr_offset = 4; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + uint16_t queues_per_pool; + uint32_t q; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_mirror_info *mr_info = - (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - if (ixgbe_vmdq_mode_check(hw) < 0) - return (-ENOTSUP); + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); - memset(&mr_info->mr_conf[rule_id], 0, - sizeof(struct rte_eth_vmdq_mirror_conf)); + if (is_ixgbe_pmd(dev_info.driver_name) != 0) + return -ENOTSUP; - /* clear PFVMCTL register */ - IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); + if (vf >= dev_info.max_vfs) + return -EINVAL; - /* clear pool mask register */ - IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val); - IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val); + if (on > 1) + return -EINVAL; - /* clear vlan mask register */ - IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val); - IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); + + /* The PF has 128 queue pairs and in SRIOV configuration + * those queues will be assigned to VF's, so RXDCTL + * registers will be dealing with queues which will be + * assigned to VF's. + * Let's say we have SRIOV configured with 31 VF's then the + * first 124 queues 0-123 will be allocated to VF's and only + * the last 4 queues 123-127 will be assigned to the PF. + */ + + queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools; + for (q = 0; q < queues_per_pool; q++) + (*dev->dev_ops->vlan_strip_queue_set)(dev, + q + vf * queues_per_pool, on); return 0; } -static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, - uint16_t queue_idx, uint16_t tx_rate) +int +rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mask, uint8_t on) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t rf_dec, rf_int; - uint32_t bcnrc_val; - uint16_t link_speed = dev->data->dev_link.link_speed; + int val = 0; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct ixgbe_hw *hw; + uint32_t vmolr; - if (queue_idx >= hw->mac.max_tx_queues) + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + + if (is_ixgbe_pmd(dev_info.driver_name) != 0) + return -ENOTSUP; + + if (vf >= dev_info.max_vfs) return -EINVAL; - if (tx_rate != 0) { - /* Calculate the rate factor values to set */ - rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; - rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; - rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; + if (on > 1) + return -EINVAL; - bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; - bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & - IXGBE_RTTBCNRC_RF_INT_MASK_M); - bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + + if (hw->mac.type == ixgbe_mac_82598EB) { + PMD_INIT_LOG(ERR, "setting VF receive mode set should be done" + " on 82599 hardware and newer"); + return -ENOTSUP; + } + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; + + val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val); + + if (on) + vmolr |= val; + else + vmolr &= ~val; + + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + + return 0; +} + +int +rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + uint32_t reg, addr; + uint32_t val; + const uint8_t bit1 = 0x1; + struct ixgbe_hw *hw; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + + if (is_ixgbe_pmd(dev_info.driver_name) != 0) + return -ENOTSUP; + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; + + /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */ + if (vf >= 32) { + addr = IXGBE_VFRE(1); + val = bit1 << (vf - 32); } else { - bcnrc_val = 0; + addr = IXGBE_VFRE(0); + val = bit1 << vf; } - /* - * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM - * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise - * set as 0x4. - */ - if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) && - (dev->data->dev_conf.rxmode.max_rx_pkt_len >= - IXGBE_MAX_JUMBO_FRAME_SIZE)) - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, - IXGBE_MMW_SIZE_JUMBO_FRAME); + reg = IXGBE_READ_REG(hw, addr); + + if (on) + reg |= val; else - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, - IXGBE_MMW_SIZE_DEFAULT); + reg &= ~val; - /* Set RTTBCNRC of queue X */ - IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); - IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, addr, reg); + + return 0; +} + +int +rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + uint32_t reg, addr; + uint32_t val; + const uint8_t bit1 = 0x1; + + struct ixgbe_hw *hw; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + + if (is_ixgbe_pmd(dev_info.driver_name) != 0) + return -ENOTSUP; + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; + + /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */ + if (vf >= 32) { + addr = IXGBE_VFTE(1); + val = bit1 << (vf - 32); + } else { + addr = IXGBE_VFTE(0); + val = bit1 << vf; + } + + reg = IXGBE_READ_REG(hw, addr); + + if (on) + reg |= val; + else + reg &= ~val; + + IXGBE_WRITE_REG(hw, addr, reg); return 0; } -static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, +int +rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan, + uint64_t vf_mask, uint8_t vlan_on) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + int ret = 0; + uint16_t vf_idx; + struct ixgbe_hw *hw; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + + if (is_ixgbe_pmd(dev_info.driver_name) != 0) + return -ENOTSUP; + + if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0)) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; + + for (vf_idx = 0; vf_idx < 64; vf_idx++) { + if (vf_mask & ((uint64_t)(1ULL << vf_idx))) { + ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx, + vlan_on, false); + if (ret < 0) + return ret; + } + } + + return ret; +} + +int rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf, uint16_t tx_rate, uint64_t q_msk) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vf_info *vfinfo = - *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); - uint8_t nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; - uint32_t queue_stride = - IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; - uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx; - uint32_t queue_end = queue_idx + nb_q_per_pool - 1; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct ixgbe_hw *hw; + struct ixgbe_vf_info *vfinfo; + struct rte_eth_link link; + uint8_t nb_q_per_pool; + uint32_t queue_stride; + uint32_t queue_idx, idx = 0, vf_idx; + uint32_t queue_end; uint16_t total_rate = 0; + struct rte_pci_device *pci_dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + rte_eth_link_get_nowait(port, &link); + + if (is_ixgbe_pmd(dev_info.driver_name) != 0) + return -ENOTSUP; + + if (vf >= dev_info.max_vfs) + return -EINVAL; + if (tx_rate > link.link_speed) + return -EINVAL; + + if (q_msk == 0) + return 0; + + pci_dev = IXGBE_DEV_TO_PCI(dev); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; + queue_idx = vf * queue_stride; + queue_end = queue_idx + nb_q_per_pool - 1; if (queue_end >= hw->mac.max_tx_queues) return -EINVAL; - if (vfinfo != NULL) { - for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) { + if (vfinfo) { + for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) { if (vf_idx == vf) continue; for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); idx++) total_rate += vfinfo[vf_idx].tx_rate[idx]; } - } else + } else { return -EINVAL; + } /* Store tx_rate for this vf. */ for (idx = 0; idx < nb_q_per_pool; idx++) { @@ -3626,8 +5367,7 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, } if (total_rate > dev->data->dev_link.link_speed) { - /* - * Reset stored TX rate of the VF if it causes exceed + /* Reset stored TX rate of the VF if it causes exceed * link speed. */ memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); @@ -3644,165 +5384,2697 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, return 0; } -static void -ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - __attribute__((unused)) uint32_t index, - __attribute__((unused)) uint32_t pool) -{ - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int diag; - - /* - * On a 82599 VF, adding again the same MAC addr is not an idempotent - * operation. Trap this case to avoid exhausting the [very limited] - * set of PF resources used to store VF MAC addresses. - */ - if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) - return; - diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); - if (diag == 0) - return; - PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag); -} +#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */ +#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */ +#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */ +#define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */ +#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \ + ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \ + ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN)) -static void -ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) +static int +ixgbe_mirror_rule_set(struct rte_eth_dev *dev, + struct rte_eth_mirror_conf *mirror_conf, + uint8_t rule_id, uint8_t on) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; - struct ether_addr *mac_addr; - uint32_t i; - int diag; + uint32_t mr_ctl, vlvf; + uint32_t mp_lsb = 0; + uint32_t mv_msb = 0; + uint32_t mv_lsb = 0; + uint32_t mp_msb = 0; + uint8_t i = 0; + int reg_index = 0; + uint64_t vlan_mask = 0; - /* - * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does - * not support the deletion of a given MAC address. - * Instead, it imposes to delete all MAC addresses, then to add again - * all MAC addresses with the exception of the one to be deleted. - */ - (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); + const uint8_t pool_mask_offset = 32; + const uint8_t vlan_mask_offset = 32; + const uint8_t dst_pool_offset = 8; + const uint8_t rule_mr_offset = 4; + const uint8_t mirror_rule_mask = 0x0F; - /* - * Add again all MAC addresses, with the exception of the deleted one - * and of the permanent MAC address. - */ - for (i = 0, mac_addr = dev->data->mac_addrs; - i < hw->mac.num_rar_entries; i++, mac_addr++) { - /* Skip the deleted MAC address */ - if (i == index) - continue; - /* Skip NULL MAC addresses */ - if (is_zero_ether_addr(mac_addr)) - continue; - /* Skip the permanent MAC address */ - if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) - continue; - diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); - if (diag != 0) - PMD_DRV_LOG(ERR, - "Adding again MAC address " - "%02x:%02x:%02x:%02x:%02x:%02x failed " - "diag=%d", - mac_addr->addr_bytes[0], - mac_addr->addr_bytes[1], - mac_addr->addr_bytes[2], - mac_addr->addr_bytes[3], - mac_addr->addr_bytes[4], - mac_addr->addr_bytes[5], - diag); - } -} + struct ixgbe_mirror_info *mr_info = + (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint8_t mirror_type = 0; -#define MAC_TYPE_FILTER_SUP(type) do {\ - if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\ - (type) != ixgbe_mac_X550)\ - return -ENOTSUP;\ -} while (0) + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; -static int -ixgbe_syn_filter_set(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter, - bool add) -{ - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t synqf; + if (rule_id >= IXGBE_MAX_MIRROR_RULES) + return -EINVAL; - if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) + if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) { + PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.", + mirror_conf->rule_type); return -EINVAL; + } - synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { + mirror_type |= IXGBE_MRCTL_VLME; + /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */ + for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) { + if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { + /* search vlan id related pool vlan filter index */ + reg_index = ixgbe_find_vlvf_slot(hw, + mirror_conf->vlan.vlan_id[i], + false); + if (reg_index < 0) + return -EINVAL; + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index)); + if ((vlvf & IXGBE_VLVF_VIEN) && + ((vlvf & IXGBE_VLVF_VLANID_MASK) == + mirror_conf->vlan.vlan_id[i])) + vlan_mask |= (1ULL << reg_index); + else + return -EINVAL; + } + } - if (add) { - if (synqf & IXGBE_SYN_FILTER_ENABLE) - return -EINVAL; - synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & - IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); + if (on) { + mv_lsb = vlan_mask & 0xFFFFFFFF; + mv_msb = vlan_mask >> vlan_mask_offset; + + mr_info->mr_conf[rule_id].vlan.vlan_mask = + mirror_conf->vlan.vlan_mask; + for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) { + if (mirror_conf->vlan.vlan_mask & (1ULL << i)) + mr_info->mr_conf[rule_id].vlan.vlan_id[i] = + mirror_conf->vlan.vlan_id[i]; + } + } else { + mv_lsb = 0; + mv_msb = 0; + mr_info->mr_conf[rule_id].vlan.vlan_mask = 0; + for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) + mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0; + } + } + + /* + * if enable pool mirror, write related pool mask register,if disable + * pool mirror, clear PFMRVM register + */ + if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { + mirror_type |= IXGBE_MRCTL_VPME; + if (on) { + mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF; + mp_msb = mirror_conf->pool_mask >> pool_mask_offset; + mr_info->mr_conf[rule_id].pool_mask = + mirror_conf->pool_mask; + + } else { + mp_lsb = 0; + mp_msb = 0; + mr_info->mr_conf[rule_id].pool_mask = 0; + } + } + if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT) + mirror_type |= IXGBE_MRCTL_UPME; + if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT) + mirror_type |= IXGBE_MRCTL_DPME; + + /* read mirror control register and recalculate it */ + mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id)); + + if (on) { + mr_ctl |= mirror_type; + mr_ctl &= mirror_rule_mask; + mr_ctl |= mirror_conf->dst_pool << dst_pool_offset; + } else + mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask); + + mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type; + mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool; + + /* write mirrror control register */ + IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); + + /* write pool mirrror control register */ + if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) { + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), + mp_msb); + } + /* write VLAN mirrror control register */ + if (mirror_conf->rule_type == ETH_MIRROR_VLAN) { + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), + mv_msb); + } + + return 0; +} + +static int +ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) +{ + int mr_ctl = 0; + uint32_t lsb_val = 0; + uint32_t msb_val = 0; + const uint8_t rule_mr_offset = 4; + + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_mirror_info *mr_info = + (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); + + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; + + memset(&mr_info->mr_conf[rule_id], 0, + sizeof(struct rte_eth_mirror_conf)); + + /* clear PFVMCTL register */ + IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); + + /* clear pool mask register */ + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val); + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val); + + /* clear vlan mask register */ + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val); + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val); + + return 0; +} + +static int +ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t mask; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); + mask |= (1 << IXGBE_MISC_VEC_ID); + RTE_SET_USED(queue_id); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + + rte_intr_enable(intr_handle); + + return 0; +} + +static int +ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + uint32_t mask; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); + mask &= ~(1 << IXGBE_MISC_VEC_ID); + RTE_SET_USED(queue_id); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + + return 0; +} + +static int +ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t mask; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + if (queue_id < 16) { + ixgbe_disable_intr(hw); + intr->mask |= (1 << queue_id); + ixgbe_enable_intr(dev); + } else if (queue_id < 32) { + mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); + mask &= (1 << queue_id); + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); + } else if (queue_id < 64) { + mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); + mask &= (1 << (queue_id - 32)); + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + } + rte_intr_enable(intr_handle); + + return 0; +} + +static int +ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + uint32_t mask; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + if (queue_id < 16) { + ixgbe_disable_intr(hw); + intr->mask &= ~(1 << queue_id); + ixgbe_enable_intr(dev); + } else if (queue_id < 32) { + mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); + mask &= ~(1 << queue_id); + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); + } else if (queue_id < 64) { + mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); + mask &= ~(1 << (queue_id - 32)); + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + } + + return 0; +} + +static void +ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector) +{ + uint32_t tmp, idx; + + if (direction == -1) { + /* other causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); + tmp &= ~0xFF; + tmp |= msix_vector; + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp); + } else { + /* rx or tx cause */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + idx = ((16 * (queue & 1)) + (8 * direction)); + tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); + tmp &= ~(0xFF << idx); + tmp |= (msix_vector << idx); + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp); + } +} + +/** + * set the IVAR registers, mapping interrupt causes to vectors + * @param hw + * pointer to ixgbe_hw struct + * @direction + * 0 for Rx, 1 for Tx, -1 for other causes + * @queue + * queue to map the corresponding interrupt to + * @msix_vector + * the vector to map to the corresponding queue + */ +static void +ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector) +{ + uint32_t tmp, idx; + + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + if (hw->mac.type == ixgbe_mac_82598EB) { + if (direction == -1) + direction = 0; + idx = (((direction * 64) + queue) >> 2) & 0x1F; + tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx)); + tmp &= ~(0xFF << (8 * (queue & 0x3))); + tmp |= (msix_vector << (8 * (queue & 0x3))); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); + } else if ((hw->mac.type == ixgbe_mac_82599EB) || + (hw->mac.type == ixgbe_mac_X540)) { + if (direction == -1) { + /* other causes */ + idx = ((queue & 1) * 8); + tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); + tmp &= ~(0xFF << idx); + tmp |= (msix_vector << idx); + IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp); + } else { + /* rx or tx causes */ + idx = ((16 * (queue & 1)) + (8 * direction)); + tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); + tmp &= ~(0xFF << idx); + tmp |= (msix_vector << idx); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp); + } + } +} + +static void +ixgbevf_configure_msix(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t q_idx; + uint32_t vector_idx = IXGBE_MISC_VEC_ID; + + /* Configure VF other cause ivar */ + ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); + + /* won't configure msix register if no mapping is done + * between intr vector and event fd. + */ + if (!rte_intr_dp_is_en(intr_handle)) + return; + + /* Configure all RX queues of VF */ + for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { + /* Force all queue use vector 0, + * as IXGBE_VF_MAXMSIVECOTR = 1 + */ + ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); + intr_handle->intr_vec[q_idx] = vector_idx; + } +} + +/** + * Sets up the hardware to properly generate MSI-X interrupts + * @hw + * board private structure + */ +static void +ixgbe_configure_msix(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t queue_id, base = IXGBE_MISC_VEC_ID; + uint32_t vec = IXGBE_MISC_VEC_ID; + uint32_t mask; + uint32_t gpie; + + /* won't configure msix register if no mapping is done + * between intr vector and event fd + */ + if (!rte_intr_dp_is_en(intr_handle)) + return; + + if (rte_intr_allow_others(intr_handle)) + vec = base = IXGBE_RX_VEC_START; + + /* setup GPIE for MSI-x mode */ + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | + IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME; + /* auto clearing and auto setting corresponding bits in EIMS + * when MSI-X interrupt is triggered + */ + if (hw->mac.type == ixgbe_mac_82598EB) { + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); + } else { + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); + } + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + + /* Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (queue_id = 0; queue_id < dev->data->nb_rx_queues; + queue_id++) { + /* by default, 1:1 mapping */ + ixgbe_set_ivar_map(hw, 0, queue_id, vec); + intr_handle->intr_vec[queue_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, + IXGBE_MISC_VEC_ID); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); + break; + default: + break; + } + IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), + IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF); + + /* set up to autoclear timer, and the vectors */ + mask = IXGBE_EIMS_ENABLE_MASK; + mask &= ~(IXGBE_EIMS_OTHER | + IXGBE_EIMS_MAILBOX | + IXGBE_EIMS_LSC); + + IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); +} + +static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t tx_rate) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rf_dec, rf_int; + uint32_t bcnrc_val; + uint16_t link_speed = dev->data->dev_link.link_speed; + + if (queue_idx >= hw->mac.max_tx_queues) + return -EINVAL; + + if (tx_rate != 0) { + /* Calculate the rate factor values to set */ + rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; + rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; + rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; + + bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; + bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & + IXGBE_RTTBCNRC_RF_INT_MASK_M); + bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); + } else { + bcnrc_val = 0; + } + + /* + * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise + * set as 0x4. + */ + if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) && + (dev->data->dev_conf.rxmode.max_rx_pkt_len >= + IXGBE_MAX_JUMBO_FRAME_SIZE)) + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, + IXGBE_MMW_SIZE_JUMBO_FRAME); + else + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, + IXGBE_MMW_SIZE_DEFAULT); + + /* Set RTTBCNRC of queue X */ + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +static void +ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + __attribute__((unused)) uint32_t index, + __attribute__((unused)) uint32_t pool) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int diag; + + /* + * On a 82599 VF, adding again the same MAC addr is not an idempotent + * operation. Trap this case to avoid exhausting the [very limited] + * set of PF resources used to store VF MAC addresses. + */ + if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) + return; + diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); + if (diag == 0) + return; + PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag); +} + +static void +ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; + struct ether_addr *mac_addr; + uint32_t i; + int diag; + + /* + * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does + * not support the deletion of a given MAC address. + * Instead, it imposes to delete all MAC addresses, then to add again + * all MAC addresses with the exception of the one to be deleted. + */ + (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); + + /* + * Add again all MAC addresses, with the exception of the deleted one + * and of the permanent MAC address. + */ + for (i = 0, mac_addr = dev->data->mac_addrs; + i < hw->mac.num_rar_entries; i++, mac_addr++) { + /* Skip the deleted MAC address */ + if (i == index) + continue; + /* Skip NULL MAC addresses */ + if (is_zero_ether_addr(mac_addr)) + continue; + /* Skip the permanent MAC address */ + if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) + continue; + diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); + if (diag != 0) + PMD_DRV_LOG(ERR, + "Adding again MAC address " + "%02x:%02x:%02x:%02x:%02x:%02x failed " + "diag=%d", + mac_addr->addr_bytes[0], + mac_addr->addr_bytes[1], + mac_addr->addr_bytes[2], + mac_addr->addr_bytes[3], + mac_addr->addr_bytes[4], + mac_addr->addr_bytes[5], + diag); + } +} + +static void +ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); +} + +#define MAC_TYPE_FILTER_SUP(type) do {\ + if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\ + (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\ + (type) != ixgbe_mac_X550EM_a)\ + return -ENOTSUP;\ +} while (0) + +int +ixgbe_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t syn_info; + uint32_t synqf; + + if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; + + syn_info = filter_info->syn_info; + + if (add) { + if (syn_info & IXGBE_SYN_FILTER_ENABLE) + return -EINVAL; + synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & + IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); + + if (filter->hig_pri) + synqf |= IXGBE_SYN_FILTER_SYNQFP; + else + synqf &= ~IXGBE_SYN_FILTER_SYNQFP; + } else { + synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + if (!(syn_info & IXGBE_SYN_FILTER_ENABLE)) + return -ENOENT; + synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); + } + + filter_info->syn_info = synqf; + IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); + IXGBE_WRITE_FLUSH(hw); + return 0; +} + +static int +ixgbe_syn_filter_get(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + + if (synqf & IXGBE_SYN_FILTER_ENABLE) { + filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0; + filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1); + return 0; + } + return -ENOENT; +} + +static int +ixgbe_syn_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_syn_filter_set(dev, + (struct rte_eth_syn_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_syn_filter_set(dev, + (struct rte_eth_syn_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = ixgbe_syn_filter_get(dev, + (struct rte_eth_syn_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); + ret = -EINVAL; + break; + } + + return ret; +} + + +static inline enum ixgbe_5tuple_protocol +convert_protocol_type(uint8_t protocol_value) +{ + if (protocol_value == IPPROTO_TCP) + return IXGBE_FILTER_PROTOCOL_TCP; + else if (protocol_value == IPPROTO_UDP) + return IXGBE_FILTER_PROTOCOL_UDP; + else if (protocol_value == IPPROTO_SCTP) + return IXGBE_FILTER_PROTOCOL_SCTP; + else + return IXGBE_FILTER_PROTOCOL_NONE; +} + +/* inject a 5-tuple filter to HW */ +static inline void +ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int i; + uint32_t ftqf, sdpqf; + uint32_t l34timir = 0; + uint8_t mask = 0xff; + + i = filter->index; + + sdpqf = (uint32_t)(filter->filter_info.dst_port << + IXGBE_SDPQF_DSTPORT_SHIFT); + sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); + + ftqf = (uint32_t)(filter->filter_info.proto & + IXGBE_FTQF_PROTOCOL_MASK); + ftqf |= (uint32_t)((filter->filter_info.priority & + IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); + if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ + mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; + if (filter->filter_info.dst_ip_mask == 0) + mask &= IXGBE_FTQF_DEST_ADDR_MASK; + if (filter->filter_info.src_port_mask == 0) + mask &= IXGBE_FTQF_SOURCE_PORT_MASK; + if (filter->filter_info.dst_port_mask == 0) + mask &= IXGBE_FTQF_DEST_PORT_MASK; + if (filter->filter_info.proto_mask == 0) + mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; + ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; + ftqf |= IXGBE_FTQF_POOL_MASK_EN; + ftqf |= IXGBE_FTQF_QUEUE_ENABLE; + + IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); + IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); + IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); + IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); + + l34timir |= IXGBE_L34T_IMIR_RESERVE; + l34timir |= (uint32_t)(filter->queue << + IXGBE_L34T_IMIR_QUEUE_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); +} + +/* + * add a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * index: the index the filter allocates. + * filter: ponter to the filter that will be added. + * rx_queue: the queue id the filter assigned to. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i, idx, shift; + + /* + * look for an unused 5tuple filter index, + * and insert the filter to list. + */ + for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { + idx = i / (sizeof(uint32_t) * NBBY); + shift = i % (sizeof(uint32_t) * NBBY); + if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { + filter_info->fivetuple_mask[idx] |= 1 << shift; + filter->index = i; + TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, + filter, + entries); + break; + } + } + if (i >= IXGBE_MAX_FTQF_FILTERS) { + PMD_DRV_LOG(ERR, "5tuple filters are full."); + return -ENOSYS; + } + + ixgbe_inject_5tuple_filter(dev, filter); + + return 0; +} + +/* + * remove a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * filter: the pointer of the filter will be removed. + */ +static void +ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint16_t index = filter->index; + + filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= + ~(1 << (index % (sizeof(uint32_t) * NBBY))); + TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); + rte_free(filter); + + IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); +} + +static int +ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct ixgbe_hw *hw; + uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) + return -EINVAL; + + /* refuse mtu that requires the support of scattered packets when this + * feature has not been enabled before. + */ + if (!dev->data->scattered_rx && + (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) + return -EINVAL; + + /* + * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU + * request of the version 2.0 of the mailbox API. + * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 + * of the mailbox API. + * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers + * prior to 3.11.33 which contains the following change: + * "ixgbe: Enable jumbo frames support w/ SR-IOV" + */ + ixgbevf_rlpml_set_vf(hw, max_frame); + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; + return 0; +} + +#define MAC_TYPE_FILTER_SUP_EXT(type) do {\ + if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\ + return -ENOTSUP;\ +} while (0) + +static inline struct ixgbe_5tuple_filter * +ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, + struct ixgbe_5tuple_filter_info *key) +{ + struct ixgbe_5tuple_filter *it; + + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct ixgbe_5tuple_filter_info)) == 0) { + return it; + } + } + return NULL; +} + +/* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ +static inline int +ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, + struct ixgbe_5tuple_filter_info *filter_info) +{ + if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || + filter->priority > IXGBE_5TUPLE_MAX_PRI || + filter->priority < IXGBE_5TUPLE_MIN_PRI) + return -EINVAL; + + switch (filter->dst_ip_mask) { + case UINT32_MAX: + filter_info->dst_ip_mask = 0; + filter_info->dst_ip = filter->dst_ip; + break; + case 0: + filter_info->dst_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_ip mask."); + return -EINVAL; + } + + switch (filter->src_ip_mask) { + case UINT32_MAX: + filter_info->src_ip_mask = 0; + filter_info->src_ip = filter->src_ip; + break; + case 0: + filter_info->src_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_ip mask."); + return -EINVAL; + } + + switch (filter->dst_port_mask) { + case UINT16_MAX: + filter_info->dst_port_mask = 0; + filter_info->dst_port = filter->dst_port; + break; + case 0: + filter_info->dst_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_port mask."); + return -EINVAL; + } + + switch (filter->src_port_mask) { + case UINT16_MAX: + filter_info->src_port_mask = 0; + filter_info->src_port = filter->src_port; + break; + case 0: + filter_info->src_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_port mask."); + return -EINVAL; + } + + switch (filter->proto_mask) { + case UINT8_MAX: + filter_info->proto_mask = 0; + filter_info->proto = + convert_protocol_type(filter->proto); + break; + case 0: + filter_info->proto_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid protocol mask."); + return -EINVAL; + } + + filter_info->priority = (uint8_t)filter->priority; + return 0; +} + +/* + * add or delete a ntuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * add: if true, add filter, if false, remove filter + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int +ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter, + bool add) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter_info filter_5tuple; + struct ixgbe_5tuple_filter *filter; + int ret; + + if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { + PMD_DRV_LOG(ERR, "only 5tuple is supported."); + return -EINVAL; + } + + memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); + if (ret < 0) + return ret; + + filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter != NULL && add) { + PMD_DRV_LOG(ERR, "filter exists."); + return -EEXIST; + } + if (filter == NULL && !add) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + + if (add) { + filter = rte_zmalloc("ixgbe_5tuple_filter", + sizeof(struct ixgbe_5tuple_filter), 0); + if (filter == NULL) + return -ENOMEM; + (void)rte_memcpy(&filter->filter_info, + &filter_5tuple, + sizeof(struct ixgbe_5tuple_filter_info)); + filter->queue = ntuple_filter->queue; + ret = ixgbe_add_5tuple_filter(dev, filter); + if (ret < 0) { + rte_free(filter); + return ret; + } + } else + ixgbe_remove_5tuple_filter(dev, filter); + + return 0; +} + +/* + * get a ntuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter_info filter_5tuple; + struct ixgbe_5tuple_filter *filter; + int ret; + + if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { + PMD_DRV_LOG(ERR, "only 5tuple is supported."); + return -EINVAL; + } + + memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); + if (ret < 0) + return ret; + + filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + ntuple_filter->queue = filter->queue; + return 0; +} + +/* + * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_add_del_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_add_del_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = ixgbe_get_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +int +ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t etqf = 0; + uint32_t etqs = 0; + int ret; + struct ixgbe_ethertype_filter ethertype_filter; + + if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; + + if (filter->ether_type == ETHER_TYPE_IPv4 || + filter->ether_type == ETHER_TYPE_IPv6) { + PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" + " ethertype filter.", filter->ether_type); + return -EINVAL; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + PMD_DRV_LOG(ERR, "mac compare is unsupported."); + return -EINVAL; + } + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + PMD_DRV_LOG(ERR, "drop option is unsupported."); + return -EINVAL; + } + + ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret >= 0 && add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", + filter->ether_type); + return -EEXIST; + } + if (ret < 0 && !add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + if (add) { + etqf = IXGBE_ETQF_FILTER_EN; + etqf |= (uint32_t)filter->ether_type; + etqs |= (uint32_t)((filter->queue << + IXGBE_ETQS_RX_QUEUE_SHIFT) & + IXGBE_ETQS_RX_QUEUE); + etqs |= IXGBE_ETQS_QUEUE_EN; + + ethertype_filter.ethertype = filter->ether_type; + ethertype_filter.etqf = etqf; + ethertype_filter.etqs = etqs; + ethertype_filter.conf = FALSE; + ret = ixgbe_ethertype_filter_insert(filter_info, + ðertype_filter); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype filters are full."); + return -ENOSPC; + } + } else { + ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); + if (ret < 0) + return -ENOSYS; + } + IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +static int +ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t etqf, etqs; + int ret; + + ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret)); + if (etqf & IXGBE_ETQF_FILTER_EN) { + etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret)); + filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE; + filter->flags = 0; + filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> + IXGBE_ETQS_RX_QUEUE_SHIFT; + return 0; + } + return -ENOENT; +} + +/* + * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ +static int +ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_add_del_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_add_del_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = ixgbe_get_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static int +ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_ETHERTYPE: + ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_SYN: + ret = ixgbe_syn_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_FDIR: + ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_L2_TUNNEL: + ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &ixgbe_flow_ops; + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + return ret; +} + +static u8 * +ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw, + u8 **mc_addr_ptr, u32 *vmdq) +{ + u8 *mc_addr; + + *vmdq = 0; + mc_addr = *mc_addr_ptr; + *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr)); + return mc_addr; +} + +static int +ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, + struct ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct ixgbe_hw *hw; + u8 *mc_addr_list; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mc_addr_list = (u8 *)mc_addr_set; + return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, + ixgbe_dev_addr_list_itr, TRUE); +} + +static uint64_t +ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t systime_cycles; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ + systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) + * NSEC_PER_SEC; + break; + default: + systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) + << 32; + } + + return systime_cycles; +} + +static uint64_t +ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rx_tstamp_cycles; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* RXSTMPL stores ns and RXSTMPH stores seconds. */ + rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); + rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) + * NSEC_PER_SEC; + break; + default: + /* RXSTMPL stores ns and RXSTMPH stores seconds. */ + rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); + rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) + << 32; + } + + return rx_tstamp_cycles; +} + +static uint64_t +ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t tx_tstamp_cycles; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* TXSTMPL stores ns and TXSTMPH stores seconds. */ + tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); + tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) + * NSEC_PER_SEC; + break; + default: + /* TXSTMPL stores ns and TXSTMPH stores seconds. */ + tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); + tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) + << 32; + } + + return tx_tstamp_cycles; +} + +static void +ixgbe_start_timecounters(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + struct rte_eth_link link; + uint32_t incval = 0; + uint32_t shift = 0; + + /* Get current link speed. */ + memset(&link, 0, sizeof(link)); + ixgbe_dev_link_update(dev, 1); + rte_ixgbe_dev_atomic_read_link_status(dev, &link); + + switch (link.link_speed) { + case ETH_SPEED_NUM_100M: + incval = IXGBE_INCVAL_100; + shift = IXGBE_INCVAL_SHIFT_100; + break; + case ETH_SPEED_NUM_1G: + incval = IXGBE_INCVAL_1GB; + shift = IXGBE_INCVAL_SHIFT_1GB; + break; + case ETH_SPEED_NUM_10G: + default: + incval = IXGBE_INCVAL_10GB; + shift = IXGBE_INCVAL_SHIFT_10GB; + break; + } + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* Independent of link speed. */ + incval = 1; + /* Cycles read will be interpreted as ns. */ + shift = 0; + /* Fall-through */ + case ixgbe_mac_X540: + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); + break; + case ixgbe_mac_82599EB: + incval >>= IXGBE_INCVAL_SHIFT_82599; + shift -= IXGBE_INCVAL_SHIFT_82599; + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, + (1 << IXGBE_INCPER_SHIFT_82599) | incval); + break; + default: + /* Not supported. */ + return; + } + + memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + + adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; + adapter->systime_tc.cc_shift = shift; + adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; + + adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; + adapter->rx_tstamp_tc.cc_shift = shift; + adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; + + adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; + adapter->tx_tstamp_tc.cc_shift = shift; + adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; +} + +static int +ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) +{ + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + + adapter->systime_tc.nsec += delta; + adapter->rx_tstamp_tc.nsec += delta; + adapter->tx_tstamp_tc.nsec += delta; + + return 0; +} + +static int +ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) +{ + uint64_t ns; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + + ns = rte_timespec_to_ns(ts); + /* Set the timecounters to a new value. */ + adapter->systime_tc.nsec = ns; + adapter->rx_tstamp_tc.nsec = ns; + adapter->tx_tstamp_tc.nsec = ns; + + return 0; +} + +static int +ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) +{ + uint64_t ns, systime_cycles; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + + systime_cycles = ixgbe_read_systime_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); + *ts = rte_ns_to_timespec(ns); + + return 0; +} + +static int +ixgbe_timesync_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tsync_ctl; + uint32_t tsauxc; + + /* Stop the timesync system time. */ + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0); + /* Reset the timesync system time value. */ + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0); + + /* Enable system time for platforms where it isn't on by default. */ + tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); + tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME; + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); + + ixgbe_start_timecounters(dev); + + /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), + (ETHER_TYPE_1588 | + IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_1588)); + + /* Enable timestamping of received PTP packets. */ + tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); + + /* Enable timestamping of transmitted PTP packets. */ + tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); + + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +static int +ixgbe_timesync_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tsync_ctl; + + /* Disable timestamping of transmitted PTP packets. */ + tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); + + /* Disable timestamping of received PTP packets. */ + tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); + + /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); + + /* Stop incrementating the System Time registers. */ + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); + + return 0; +} + +static int +ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags __rte_unused) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + uint32_t tsync_rxctl; + uint64_t rx_tstamp_cycles; + uint64_t ns; + + tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0) + return -EINVAL; + + rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +static int +ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + uint32_t tsync_txctl; + uint64_t tx_tstamp_cycles; + uint64_t ns; + + tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0) + return -EINVAL; + + tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return 0; +} + +static int +ixgbe_get_reg_length(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int count = 0; + int g_ind = 0; + const struct reg_info *reg_group; + const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? + ixgbe_regs_mac_82598EB : ixgbe_regs_others; + + while ((reg_group = reg_set[g_ind++])) + count += ixgbe_regs_group_count(reg_group); + + return count; +} + +static int +ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) +{ + int count = 0; + int g_ind = 0; + const struct reg_info *reg_group; + + while ((reg_group = ixgbevf_regs[g_ind++])) + count += ixgbe_regs_group_count(reg_group); + + return count; +} + +static int +ixgbe_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t *data = regs->data; + int g_ind = 0; + int count = 0; + const struct reg_info *reg_group; + const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? + ixgbe_regs_mac_82598EB : ixgbe_regs_others; + + if (data == NULL) { + regs->length = ixgbe_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; + } + + /* Support only full register dump */ + if ((regs->length == 0) || + (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { + regs->version = hw->mac.type << 24 | hw->revision_id << 16 | + hw->device_id; + while ((reg_group = reg_set[g_ind++])) + count += ixgbe_read_regs_group(dev, &data[count], + reg_group); + return 0; + } + + return -ENOTSUP; +} + +static int +ixgbevf_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t *data = regs->data; + int g_ind = 0; + int count = 0; + const struct reg_info *reg_group; + + if (data == NULL) { + regs->length = ixgbevf_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; + } + + /* Support only full register dump */ + if ((regs->length == 0) || + (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { + regs->version = hw->mac.type << 24 | hw->revision_id << 16 | + hw->device_id; + while ((reg_group = ixgbevf_regs[g_ind++])) + count += ixgbe_read_regs_group(dev, &data[count], + reg_group); + return 0; + } + + return -ENOTSUP; +} + +static int +ixgbe_get_eeprom_length(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Return unit is byte count */ + return hw->eeprom.word_size * 2; +} + +static int +ixgbe_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + uint16_t *data = in_eeprom->data; + int first, length; + + first = in_eeprom->offset >> 1; + length = in_eeprom->length >> 1; + if ((first > hw->eeprom.word_size) || + ((first + length) > hw->eeprom.word_size)) + return -EINVAL; + + in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + return eeprom->ops.read_buffer(hw, first, length, data); +} + +static int +ixgbe_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + uint16_t *data = in_eeprom->data; + int first, length; + + first = in_eeprom->offset >> 1; + length = in_eeprom->length >> 1; + if ((first > hw->eeprom.word_size) || + ((first + length) > hw->eeprom.word_size)) + return -EINVAL; + + in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + return eeprom->ops.write_buffer(hw, first, length, data); +} + +uint16_t +ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { + switch (mac_type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + return ETH_RSS_RETA_SIZE_512; + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return ETH_RSS_RETA_SIZE_64; + default: + return ETH_RSS_RETA_SIZE_128; + } +} + +uint32_t +ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { + switch (mac_type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + if (reta_idx < ETH_RSS_RETA_SIZE_128) + return IXGBE_RETA(reta_idx >> 2); + else + return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2); + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return IXGBE_VFRETA(reta_idx >> 2); + default: + return IXGBE_RETA(reta_idx >> 2); + } +} + +uint32_t +ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { + switch (mac_type) { + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return IXGBE_VFMRQC; + default: + return IXGBE_MRQC; + } +} + +uint32_t +ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { + switch (mac_type) { + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return IXGBE_VFRSSRK(i); + default: + return IXGBE_RSSRK(i); + } +} + +bool +ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) { + switch (mac_type) { + case ixgbe_mac_82599_vf: + case ixgbe_mac_X540_vf: + return 0; + default: + return 1; + } +} + +static int +ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, + struct rte_eth_dcb_info *dcb_info) +{ + struct ixgbe_dcb_config *dcb_config = + IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); + struct ixgbe_dcb_tc_config *tc; + uint8_t i, j; + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) + dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; + else + dcb_info->nb_tcs = 1; + + if (dcb_config->vt_mode) { /* vt is enabled*/ + struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = + &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; + for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { + for (j = 0; j < dcb_info->nb_tcs; j++) { + dcb_info->tc_queue.tc_rxq[i][j].base = + i * dcb_info->nb_tcs + j; + dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1; + dcb_info->tc_queue.tc_txq[i][j].base = + i * dcb_info->nb_tcs + j; + dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1; + } + } + } else { /* vt is disabled*/ + struct rte_eth_dcb_rx_conf *rx_conf = + &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; + if (dcb_info->nb_tcs == ETH_4_TCS) { + for (i = 0; i < dcb_info->nb_tcs; i++) { + dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; + dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; + } + dcb_info->tc_queue.tc_txq[0][0].base = 0; + dcb_info->tc_queue.tc_txq[0][1].base = 64; + dcb_info->tc_queue.tc_txq[0][2].base = 96; + dcb_info->tc_queue.tc_txq[0][3].base = 112; + dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; + dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; + dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; + dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; + } else if (dcb_info->nb_tcs == ETH_8_TCS) { + for (i = 0; i < dcb_info->nb_tcs; i++) { + dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; + dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; + } + dcb_info->tc_queue.tc_txq[0][0].base = 0; + dcb_info->tc_queue.tc_txq[0][1].base = 32; + dcb_info->tc_queue.tc_txq[0][2].base = 64; + dcb_info->tc_queue.tc_txq[0][3].base = 80; + dcb_info->tc_queue.tc_txq[0][4].base = 96; + dcb_info->tc_queue.tc_txq[0][5].base = 104; + dcb_info->tc_queue.tc_txq[0][6].base = 112; + dcb_info->tc_queue.tc_txq[0][7].base = 120; + dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; + dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; + dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; + dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; + dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; + dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; + dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; + dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; + } + } + for (i = 0; i < dcb_info->nb_tcs; i++) { + tc = &dcb_config->tc_config[i]; + dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent; + } + return 0; +} + +/* Update e-tag ether type */ +static int +ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, + uint16_t ether_type) +{ + uint32_t etag_etype; + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); + etag_etype &= ~IXGBE_ETAG_ETYPE_MASK; + etag_etype |= ether_type; + IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/* Config l2 tunnel ether type */ +static int +ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + + if (l2_tunnel == NULL) + return -EINVAL; + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type; + ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Enable e-tag tunnel */ +static int +ixgbe_e_tag_enable(struct ixgbe_hw *hw) +{ + uint32_t etag_etype; + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); + etag_etype |= IXGBE_ETAG_ETYPE_VALID; + IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/* Enable l2 tunnel */ +static int +ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_en = TRUE; + ret = ixgbe_e_tag_enable(hw); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Disable e-tag tunnel */ +static int +ixgbe_e_tag_disable(struct ixgbe_hw *hw) +{ + uint32_t etag_etype; + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); + etag_etype &= ~IXGBE_ETAG_ETYPE_VALID; + IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/* Disable l2 tunnel */ +static int +ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_en = FALSE; + ret = ixgbe_e_tag_disable(hw); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t i, rar_entries; + uint32_t rar_low, rar_high; + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + rar_entries = ixgbe_get_num_rx_addrs(hw); + + for (i = 1; i < rar_entries; i++) { + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i)); + if ((rar_high & IXGBE_RAH_AV) && + (rar_high & IXGBE_RAH_ADTYPE) && + ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) == + l2_tunnel->tunnel_id)) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); + + ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL); + + return ret; + } + } + + return ret; +} + +static int +ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t i, rar_entries; + uint32_t rar_low, rar_high; + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + /* One entry for one tunnel. Try to remove potential existing entry. */ + ixgbe_e_tag_filter_del(dev, l2_tunnel); + + rar_entries = ixgbe_get_num_rx_addrs(hw); + + for (i = 1; i < rar_entries; i++) { + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); + if (rar_high & IXGBE_RAH_AV) { + continue; + } else { + ixgbe_set_vmdq(hw, i, l2_tunnel->pool); + rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; + rar_low = l2_tunnel->tunnel_id; + + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); + + return ret; + } + } + + PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." + " Please remove a rule before adding a new one."); + return -EINVAL; +} + +static inline struct ixgbe_l2_tn_filter * +ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info, + struct ixgbe_l2_tn_key *key) +{ + int ret; + + ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); + if (ret < 0) + return NULL; + + return l2_tn_info->hash_map[ret]; +} + +static inline int +ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, + struct ixgbe_l2_tn_filter *l2_tn_filter) +{ + int ret; + + ret = rte_hash_add_key(l2_tn_info->hash_handle, + &l2_tn_filter->key); + + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to insert L2 tunnel filter" + " to hash table %d!", + ret); + return ret; + } + + l2_tn_info->hash_map[ret] = l2_tn_filter; + + TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); + + return 0; +} + +static inline int +ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, + struct ixgbe_l2_tn_key *key) +{ + int ret; + struct ixgbe_l2_tn_filter *l2_tn_filter; + + ret = rte_hash_del_key(l2_tn_info->hash_handle, key); + + if (ret < 0) { + PMD_DRV_LOG(ERR, + "No such L2 tunnel filter to delete %d!", + ret); + return ret; + } + + l2_tn_filter = l2_tn_info->hash_map[ret]; + l2_tn_info->hash_map[ret] = NULL; + + TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); + rte_free(l2_tn_filter); + + return 0; +} + +/* Add l2 tunnel filter */ +int +ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel, + bool restore) +{ + int ret; + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + struct ixgbe_l2_tn_key key; + struct ixgbe_l2_tn_filter *node; + + if (!restore) { + key.l2_tn_type = l2_tunnel->l2_tunnel_type; + key.tn_id = l2_tunnel->tunnel_id; + + node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key); + + if (node) { + PMD_DRV_LOG(ERR, + "The L2 tunnel filter already exists!"); + return -EINVAL; + } + + node = rte_zmalloc("ixgbe_l2_tn", + sizeof(struct ixgbe_l2_tn_filter), + 0); + if (!node) + return -ENOMEM; + + (void)rte_memcpy(&node->key, + &key, + sizeof(struct ixgbe_l2_tn_key)); + node->pool = l2_tunnel->pool; + ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node); + if (ret < 0) { + rte_free(node); + return ret; + } + } + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_filter_add(dev, l2_tunnel); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + if ((!restore) && (ret < 0)) + (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key); + + return ret; +} + +/* Delete l2 tunnel filter */ +int +ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret; + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + struct ixgbe_l2_tn_key key; + + key.l2_tn_type = l2_tunnel->l2_tunnel_type; + key.tn_id = l2_tunnel->tunnel_id; + ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key); + if (ret < 0) + return ret; + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_filter_del(dev, l2_tunnel); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/** + * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ +static int +ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + int ret; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_dev_l2_tunnel_filter_add + (dev, + (struct rte_eth_l2_tunnel_conf *)arg, + FALSE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_dev_l2_tunnel_filter_del + (dev, + (struct rte_eth_l2_tunnel_conf *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static int +ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) +{ + int ret = 0; + uint32_t ctrl; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; + if (en) + ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); + + return ret; +} + +/* Enable l2 tunnel forwarding */ +static int +ixgbe_dev_l2_tunnel_forwarding_enable + (struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + int ret = 0; + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_fwd_en = TRUE; + ret = ixgbe_e_tag_forwarding_en_dis(dev, 1); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Disable l2 tunnel forwarding */ +static int +ixgbe_dev_l2_tunnel_forwarding_disable + (struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + int ret = 0; + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_fwd_en = FALSE; + ret = ixgbe_e_tag_forwarding_en_dis(dev, 0); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel, + bool en) +{ + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + int ret = 0; + uint32_t vmtir, vmvir; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (l2_tunnel->vf_id >= pci_dev->max_vfs) { + PMD_DRV_LOG(ERR, + "VF id %u should be less than %u", + l2_tunnel->vf_id, + pci_dev->max_vfs); + return -EINVAL; + } + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + if (en) + vmtir = l2_tunnel->tunnel_id; + else + vmtir = 0; + + IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir); + + vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id)); + vmvir &= ~IXGBE_VMVIR_TAGA_MASK; + if (en) + vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT; + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir); + + return ret; +} + +/* Enable l2 tunnel tag insertion */ +static int +ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Disable l2 tunnel tag insertion */ +static int +ixgbe_dev_l2_tunnel_insertion_disable + (struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) +{ + int ret = 0; + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +static int +ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev, + bool en) +{ + int ret = 0; + uint32_t qde; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (filter->hig_pri) - synqf |= IXGBE_SYN_FILTER_SYNQFP; + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + qde = IXGBE_READ_REG(hw, IXGBE_QDE); + if (en) + qde |= IXGBE_QDE_STRIP_TAG; + else + qde &= ~IXGBE_QDE_STRIP_TAG; + qde &= ~IXGBE_QDE_READ; + qde |= IXGBE_QDE_WRITE; + IXGBE_WRITE_REG(hw, IXGBE_QDE, qde); + + return ret; +} + +/* Enable l2 tunnel tag stripping */ +static int +ixgbe_dev_l2_tunnel_stripping_enable + (struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + int ret = 0; + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_stripping_en_dis(dev, 1); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Disable l2 tunnel tag stripping */ +static int +ixgbe_dev_l2_tunnel_stripping_disable + (struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + int ret = 0; + + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_stripping_en_dis(dev, 0); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Enable/disable l2 tunnel offload functions */ +static int +ixgbe_dev_l2_tunnel_offload_set + (struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel, + uint32_t mask, + uint8_t en) +{ + int ret = 0; + + if (l2_tunnel == NULL) + return -EINVAL; + + ret = -EINVAL; + if (mask & ETH_L2_TUNNEL_ENABLE_MASK) { + if (en) + ret = ixgbe_dev_l2_tunnel_enable( + dev, + l2_tunnel->l2_tunnel_type); else - synqf &= ~IXGBE_SYN_FILTER_SYNQFP; - } else { - if (!(synqf & IXGBE_SYN_FILTER_ENABLE)) - return -ENOENT; - synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); + ret = ixgbe_dev_l2_tunnel_disable( + dev, + l2_tunnel->l2_tunnel_type); } - IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); + + if (mask & ETH_L2_TUNNEL_INSERTION_MASK) { + if (en) + ret = ixgbe_dev_l2_tunnel_insertion_enable( + dev, + l2_tunnel); + else + ret = ixgbe_dev_l2_tunnel_insertion_disable( + dev, + l2_tunnel); + } + + if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) { + if (en) + ret = ixgbe_dev_l2_tunnel_stripping_enable( + dev, + l2_tunnel->l2_tunnel_type); + else + ret = ixgbe_dev_l2_tunnel_stripping_disable( + dev, + l2_tunnel->l2_tunnel_type); + } + + if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) { + if (en) + ret = ixgbe_dev_l2_tunnel_forwarding_enable( + dev, + l2_tunnel->l2_tunnel_type); + else + ret = ixgbe_dev_l2_tunnel_forwarding_disable( + dev, + l2_tunnel->l2_tunnel_type); + } + + return ret; +} + +static int +ixgbe_update_vxlan_port(struct ixgbe_hw *hw, + uint16_t port) +{ + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port); IXGBE_WRITE_FLUSH(hw); + return 0; } +/* There's only one register for VxLAN UDP port. + * So, we cannot add several ports. Will update it. + */ static int -ixgbe_syn_filter_get(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter) +ixgbe_add_vxlan_port(struct ixgbe_hw *hw, + uint16_t port) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + if (port == 0) { + PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); + return -EINVAL; + } - if (synqf & IXGBE_SYN_FILTER_ENABLE) { - filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0; - filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1); - return 0; + return ixgbe_update_vxlan_port(hw, port); +} + +/* We cannot delete the VxLAN port. For there's a register for VxLAN + * UDP port, it must have a value. + * So, will reset it to the original value 0. + */ +static int +ixgbe_del_vxlan_port(struct ixgbe_hw *hw, + uint16_t port) +{ + uint16_t cur_port; + + cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL); + + if (cur_port != port) { + PMD_DRV_LOG(ERR, "Port %u does not exist.", port); + return -EINVAL; } - return -ENOENT; + + return ixgbe_update_vxlan_port(hw, 0); } +/* Add UDP tunneling port */ static int -ixgbe_syn_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) +ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) { + int ret = 0; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret; - MAC_TYPE_FILTER_SUP(hw->mac.type); - - if (filter_op == RTE_ETH_FILTER_NOP) - return 0; + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } - if (arg == NULL) { - PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", - filter_op); + if (udp_tunnel == NULL) return -EINVAL; - } - switch (filter_op) { - case RTE_ETH_FILTER_ADD: - ret = ixgbe_syn_filter_set(dev, - (struct rte_eth_syn_filter *)arg, - TRUE); + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port); break; - case RTE_ETH_FILTER_DELETE: - ret = ixgbe_syn_filter_set(dev, - (struct rte_eth_syn_filter *)arg, - FALSE); + + case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_TEREDO: + PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); + ret = -EINVAL; break; - case RTE_ETH_FILTER_GET: - ret = ixgbe_syn_filter_get(dev, - (struct rte_eth_syn_filter *)arg); + + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Remove UDP tunneling port */ +static int +ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) +{ + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } + + if (udp_tunnel == NULL) + return -EINVAL; + + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port); + break; + case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_TEREDO: + PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); + ret = -EINVAL; break; default: - PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); + PMD_DRV_LOG(ERR, "Invalid tunnel type"); ret = -EINVAL; break; } @@ -3810,644 +8082,576 @@ ixgbe_syn_filter_handle(struct rte_eth_dev *dev, return ret; } +static void +ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); -static inline enum ixgbe_5tuple_protocol -convert_protocol_type(uint8_t protocol_value) + hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI); +} + +static void +ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) { - if (protocol_value == IPPROTO_TCP) - return IXGBE_FILTER_PROTOCOL_TCP; - else if (protocol_value == IPPROTO_UDP) - return IXGBE_FILTER_PROTOCOL_UDP; - else if (protocol_value == IPPROTO_SCTP) - return IXGBE_FILTER_PROTOCOL_SCTP; - else - return IXGBE_FILTER_PROTOCOL_NONE; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE); +} + +static void ixgbevf_mbx_process(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u32 in_msg = 0; + + if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) + return; + + /* PF reset VF event */ + if (in_msg == IXGBE_PF_CONTROL_MSG) + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL); } -/* - * add a 5tuple filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates. - * filter: ponter to the filter that will be added. - * rx_queue: the queue id the filter assigned to. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ static int -ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, - struct ixgbe_5tuple_filter *filter) +ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) { + uint32_t eicr; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_filter_info *filter_info = - IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - int i, idx, shift; - uint32_t ftqf, sdpqf; - uint32_t l34timir = 0; - uint8_t mask = 0xff; + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + ixgbevf_intr_disable(hw); - /* - * look for an unused 5tuple filter index, - * and insert the filter to list. - */ - for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { - idx = i / (sizeof(uint32_t) * NBBY); - shift = i % (sizeof(uint32_t) * NBBY); - if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { - filter_info->fivetuple_mask[idx] |= 1 << shift; - filter->index = i; - TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, - filter, - entries); - break; - } - } - if (i >= IXGBE_MAX_FTQF_FILTERS) { - PMD_DRV_LOG(ERR, "5tuple filters are full."); - return -ENOSYS; + /* read-on-clear nic registers here */ + eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); + intr->flags = 0; + + /* only one misc vector supported - mailbox */ + eicr &= IXGBE_VTEICR_MASK; + if (eicr == IXGBE_MISC_VEC_ID) + intr->flags |= IXGBE_FLAG_MAILBOX; + + return 0; +} + +static int +ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + if (intr->flags & IXGBE_FLAG_MAILBOX) { + ixgbevf_mbx_process(dev); + intr->flags &= ~IXGBE_FLAG_MAILBOX; } - sdpqf = (uint32_t)(filter->filter_info.dst_port << - IXGBE_SDPQF_DSTPORT_SHIFT); - sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); + ixgbevf_intr_enable(hw); - ftqf = (uint32_t)(filter->filter_info.proto & - IXGBE_FTQF_PROTOCOL_MASK); - ftqf |= (uint32_t)((filter->filter_info.priority & - IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); - if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ - mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; - if (filter->filter_info.dst_ip_mask == 0) - mask &= IXGBE_FTQF_DEST_ADDR_MASK; - if (filter->filter_info.src_port_mask == 0) - mask &= IXGBE_FTQF_SOURCE_PORT_MASK; - if (filter->filter_info.dst_port_mask == 0) - mask &= IXGBE_FTQF_DEST_PORT_MASK; - if (filter->filter_info.proto_mask == 0) - mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; - ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; - ftqf |= IXGBE_FTQF_POOL_MASK_EN; - ftqf |= IXGBE_FTQF_QUEUE_ENABLE; + return 0; +} + +static void +ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, + void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + ixgbevf_dev_interrupt_get_status(dev); + ixgbevf_dev_interrupt_action(dev); +} + +/** + * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path + * @hw: pointer to hardware structure + * + * Stops the transmit data path and waits for the HW to internally empty + * the Tx security block + **/ +int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw) +{ +#define IXGBE_MAX_SECTX_POLL 40 - IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); - IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); - IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); - IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); + int i; + int sectxreg; + + sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + sectxreg |= IXGBE_SECTXCTRL_TX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); + for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) { + sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); + if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY) + break; + /* Use interrupt-safe sleep just in case */ + usec_delay(1000); + } - l34timir |= IXGBE_L34T_IMIR_RESERVE; - l34timir |= (uint32_t)(filter->queue << - IXGBE_L34T_IMIR_QUEUE_SHIFT); - IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); - return 0; + /* For informational purposes only */ + if (i >= IXGBE_MAX_SECTX_POLL) + PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security " + "path fully disabled. Continuing with init.\n"); + + return IXGBE_SUCCESS; } -/* - * remove a 5tuple filter +/** + * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path + * @hw: pointer to hardware structure * - * @param - * dev: Pointer to struct rte_eth_dev. - * filter: the pointer of the filter will be removed. - */ -static void -ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, - struct ixgbe_5tuple_filter *filter) + * Enables the transmit data path. + **/ +int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_filter_info *filter_info = - IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - uint16_t index = filter->index; + uint32_t sectxreg; - filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= - ~(1 << (index % (sizeof(uint32_t) * NBBY))); - TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); - rte_free(filter); + sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); + IXGBE_WRITE_FLUSH(hw); - IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); - IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); - IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); - IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); - IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); + return IXGBE_SUCCESS; } -static int -ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +int +rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp) { struct ixgbe_hw *hw; - uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + struct rte_eth_dev *dev; + uint32_t ctrl; - hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) - return -EINVAL; + dev = &rte_eth_devices[port]; + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - /* refuse mtu that requires the support of scattered packets when this - * feature has not been enabled before. */ - if (!dev->data->scattered_rx && - (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > - dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) - return -EINVAL; + /* Stop the data paths */ + if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS) + return -ENOTSUP; + /* + * Workaround: + * As no ixgbe_disable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. + * The hardware support has been checked by + * ixgbe_disable_sec_rx_path(). + */ + ixgbe_disable_sec_tx_path_generic(hw); + + /* Enable Ethernet CRC (required by MACsec offload) */ + ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0); + ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl); + + /* Enable the TX and RX crypto engines */ + ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK; + ctrl |= 0x3; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl); + + /* Enable SA lookup */ + ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); + ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; + ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT : + IXGBE_LSECTXCTRL_AUTH; + ctrl |= IXGBE_LSECTXCTRL_AISCI; + ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK; + ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK; + IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); + ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; + ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT; + ctrl &= ~IXGBE_LSECRXCTRL_PLSH; + if (rp) + ctrl |= IXGBE_LSECRXCTRL_RP; + else + ctrl &= ~IXGBE_LSECRXCTRL_RP; + IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); + /* Start the data paths */ + ixgbe_enable_sec_rx_path(hw); /* - * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU - * request of the version 2.0 of the mailbox API. - * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 - * of the mailbox API. - * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers - * prior to 3.11.33 which contains the following change: - * "ixgbe: Enable jumbo frames support w/ SR-IOV" + * Workaround: + * As no ixgbe_enable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. */ - ixgbevf_rlpml_set_vf(hw, max_frame); + ixgbe_enable_sec_tx_path_generic(hw); - /* update max frame size */ - dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; return 0; } -#define MAC_TYPE_FILTER_SUP_EXT(type) do {\ - if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\ - return -ENOTSUP;\ -} while (0) - -static inline struct ixgbe_5tuple_filter * -ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, - struct ixgbe_5tuple_filter_info *key) +int +rte_pmd_ixgbe_macsec_disable(uint8_t port) { - struct ixgbe_5tuple_filter *it; + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl; - TAILQ_FOREACH(it, filter_list, entries) { - if (memcmp(key, &it->filter_info, - sizeof(struct ixgbe_5tuple_filter_info)) == 0) { - return it; - } - } - return NULL; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Stop the data paths */ + if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS) + return -ENOTSUP; + /* + * Workaround: + * As no ixgbe_disable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. + * The hardware support has been checked by + * ixgbe_disable_sec_rx_path(). + */ + ixgbe_disable_sec_tx_path_generic(hw); + + /* Disable the TX and RX crypto engines */ + ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + ctrl |= IXGBE_SECTXCTRL_SECTX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + ctrl |= IXGBE_SECRXCTRL_SECRX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); + + /* Disable SA lookup */ + ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); + ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; + ctrl |= IXGBE_LSECTXCTRL_DISABLE; + IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); + ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; + ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); + + /* Start the data paths */ + ixgbe_enable_sec_rx_path(hw); + /* + * Workaround: + * As no ixgbe_enable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. + */ + ixgbe_enable_sec_tx_path_generic(hw); + + return 0; } -/* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ -static inline int -ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, - struct ixgbe_5tuple_filter_info *filter_info) +int +rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac) { - if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || - filter->priority > IXGBE_5TUPLE_MAX_PRI || - filter->priority < IXGBE_5TUPLE_MIN_PRI) - return -EINVAL; - - switch (filter->dst_ip_mask) { - case UINT32_MAX: - filter_info->dst_ip_mask = 0; - filter_info->dst_ip = filter->dst_ip; - break; - case 0: - filter_info->dst_ip_mask = 1; - break; - default: - PMD_DRV_LOG(ERR, "invalid dst_ip mask."); - return -EINVAL; - } + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl; - switch (filter->src_ip_mask) { - case UINT32_MAX: - filter_info->src_ip_mask = 0; - filter_info->src_ip = filter->src_ip; - break; - case 0: - filter_info->src_ip_mask = 1; - break; - default: - PMD_DRV_LOG(ERR, "invalid src_ip mask."); - return -EINVAL; - } + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - switch (filter->dst_port_mask) { - case UINT16_MAX: - filter_info->dst_port_mask = 0; - filter_info->dst_port = filter->dst_port; - break; - case 0: - filter_info->dst_port_mask = 1; - break; - default: - PMD_DRV_LOG(ERR, "invalid dst_port mask."); - return -EINVAL; - } + dev = &rte_eth_devices[port]; + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - switch (filter->src_port_mask) { - case UINT16_MAX: - filter_info->src_port_mask = 0; - filter_info->src_port = filter->src_port; - break; - case 0: - filter_info->src_port_mask = 1; - break; - default: - PMD_DRV_LOG(ERR, "invalid src_port mask."); - return -EINVAL; - } + ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl); - switch (filter->proto_mask) { - case UINT8_MAX: - filter_info->proto_mask = 0; - filter_info->proto = - convert_protocol_type(filter->proto); - break; - case 0: - filter_info->proto_mask = 1; - break; - default: - PMD_DRV_LOG(ERR, "invalid protocol mask."); - return -EINVAL; - } + ctrl = mac[4] | (mac[5] << 8); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl); - filter_info->priority = (uint8_t)filter->priority; return 0; } -/* - * add or delete a ntuple filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * ntuple_filter: Pointer to struct rte_eth_ntuple_filter - * add: if true, add filter, if false, remove filter - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ -static int -ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter, - bool add) +int +rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi) { - struct ixgbe_filter_info *filter_info = - IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - struct ixgbe_5tuple_filter_info filter_5tuple; - struct ixgbe_5tuple_filter *filter; - int ret; + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl; - if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { - PMD_DRV_LOG(ERR, "only 5tuple is supported."); - return -EINVAL; - } + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); - ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); - if (ret < 0) - return ret; + dev = &rte_eth_devices[port]; + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, - &filter_5tuple); - if (filter != NULL && add) { - PMD_DRV_LOG(ERR, "filter exists."); - return -EEXIST; - } - if (filter == NULL && !add) { - PMD_DRV_LOG(ERR, "filter doesn't exist."); - return -ENOENT; - } + ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl); - if (add) { - filter = rte_zmalloc("ixgbe_5tuple_filter", - sizeof(struct ixgbe_5tuple_filter), 0); - if (filter == NULL) - return -ENOMEM; - (void)rte_memcpy(&filter->filter_info, - &filter_5tuple, - sizeof(struct ixgbe_5tuple_filter_info)); - filter->queue = ntuple_filter->queue; - ret = ixgbe_add_5tuple_filter(dev, filter); - if (ret < 0) { - rte_free(filter); - return ret; - } - } else - ixgbe_remove_5tuple_filter(dev, filter); + pi = rte_cpu_to_be_16(pi); + ctrl = mac[4] | (mac[5] << 8) | (pi << 16); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl); return 0; } -/* - * get a ntuple filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * ntuple_filter: Pointer to struct rte_eth_ntuple_filter - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ -static int -ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter) +int +rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key) { - struct ixgbe_filter_info *filter_info = - IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - struct ixgbe_5tuple_filter_info filter_5tuple; - struct ixgbe_5tuple_filter *filter; - int ret; + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl, i; - if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { - PMD_DRV_LOG(ERR, "only 5tuple is supported."); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (idx != 0 && idx != 1) + return -EINVAL; + + if (an >= 4) return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Set the PN and key */ + pn = rte_cpu_to_be_32(pn); + if (idx == 0) { + IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn); + + for (i = 0; i < 4; i++) { + ctrl = (key[i * 4 + 0] << 0) | + (key[i * 4 + 1] << 8) | + (key[i * 4 + 2] << 16) | + (key[i * 4 + 3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl); + } + } else { + IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn); + + for (i = 0; i < 4; i++) { + ctrl = (key[i * 4 + 0] << 0) | + (key[i * 4 + 1] << 8) | + (key[i * 4 + 2] << 16) | + (key[i * 4 + 3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl); + } } - memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); - ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); - if (ret < 0) - return ret; + /* Set AN and select the SA */ + ctrl = (an << idx * 2) | (idx << 4); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl); - filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, - &filter_5tuple); - if (filter == NULL) { - PMD_DRV_LOG(ERR, "filter doesn't exist."); - return -ENOENT; - } - ntuple_filter->queue = filter->queue; return 0; } -/* - * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter. - * @dev: pointer to rte_eth_dev structure - * @filter_op:operation will be taken. - * @arg: a pointer to specific structure corresponding to the filter_op - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ -static int -ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) +int +rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret; + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl, i; - MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - if (filter_op == RTE_ETH_FILTER_NOP) - return 0; + dev = &rte_eth_devices[port]; + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (arg == NULL) { - PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", - filter_op); + if (idx != 0 && idx != 1) return -EINVAL; - } - switch (filter_op) { - case RTE_ETH_FILTER_ADD: - ret = ixgbe_add_del_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg, - TRUE); - break; - case RTE_ETH_FILTER_DELETE: - ret = ixgbe_add_del_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg, - FALSE); - break; - case RTE_ETH_FILTER_GET: - ret = ixgbe_get_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg); - break; - default: - PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); - ret = -EINVAL; - break; + if (an >= 4) + return -EINVAL; + + /* Set the PN */ + pn = rte_cpu_to_be_32(pn); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn); + + /* Set the key */ + for (i = 0; i < 4; i++) { + ctrl = (key[i * 4 + 0] << 0) | + (key[i * 4 + 1] << 8) | + (key[i * 4 + 2] << 16) | + (key[i * 4 + 3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl); } - return ret; + + /* Set the AN and validate the SA */ + ctrl = an | (1 << 2); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl); + + return 0; } -static inline int -ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info, - uint16_t ethertype) +/* restore n-tuple filter */ +static inline void +ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev) { - int i; + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter *node; - for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { - if (filter_info->ethertype_filters[i] == ethertype && - (filter_info->ethertype_mask & (1 << i))) - return i; + TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { + ixgbe_inject_5tuple_filter(dev, node); } - return -1; } -static inline int -ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info, - uint16_t ethertype) +/* restore ethernet type filter */ +static inline void +ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev) { + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); int i; for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { - if (!(filter_info->ethertype_mask & (1 << i))) { - filter_info->ethertype_mask |= 1 << i; - filter_info->ethertype_filters[i] = ethertype; - return i; + if (filter_info->ethertype_mask & (1 << i)) { + IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), + filter_info->ethertype_filters[i].etqf); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), + filter_info->ethertype_filters[i].etqs); + IXGBE_WRITE_FLUSH(hw); } } - return -1; -} - -static inline int -ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info, - uint8_t idx) -{ - if (idx >= IXGBE_MAX_ETQF_FILTERS) - return -1; - filter_info->ethertype_mask &= ~(1 << idx); - filter_info->ethertype_filters[idx] = 0; - return idx; } -static int -ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter, - bool add) +/* restore SYN filter */ +static inline void +ixgbe_syn_filter_restore(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_filter_info *filter_info = IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - uint32_t etqf = 0; - uint32_t etqs = 0; - int ret; + uint32_t synqf; - if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) - return -EINVAL; + synqf = filter_info->syn_info; - if (filter->ether_type == ETHER_TYPE_IPv4 || - filter->ether_type == ETHER_TYPE_IPv6) { - PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" - " ethertype filter.", filter->ether_type); - return -EINVAL; + if (synqf & IXGBE_SYN_FILTER_ENABLE) { + IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); + IXGBE_WRITE_FLUSH(hw); } +} - if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { - PMD_DRV_LOG(ERR, "mac compare is unsupported."); - return -EINVAL; - } - if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { - PMD_DRV_LOG(ERR, "drop option is unsupported."); - return -EINVAL; - } +/* restore L2 tunnel filter */ +static inline void +ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + struct ixgbe_l2_tn_filter *node; + struct rte_eth_l2_tunnel_conf l2_tn_conf; - ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); - if (ret >= 0 && add) { - PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", - filter->ether_type); - return -EEXIST; - } - if (ret < 0 && !add) { - PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", - filter->ether_type); - return -ENOENT; + TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { + l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; + l2_tn_conf.tunnel_id = node->key.tn_id; + l2_tn_conf.pool = node->pool; + (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); } +} - if (add) { - ret = ixgbe_ethertype_filter_insert(filter_info, - filter->ether_type); - if (ret < 0) { - PMD_DRV_LOG(ERR, "ethertype filters are full."); - return -ENOSYS; - } - etqf = IXGBE_ETQF_FILTER_EN; - etqf |= (uint32_t)filter->ether_type; - etqs |= (uint32_t)((filter->queue << - IXGBE_ETQS_RX_QUEUE_SHIFT) & - IXGBE_ETQS_RX_QUEUE); - etqs |= IXGBE_ETQS_QUEUE_EN; - } else { - ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); - if (ret < 0) - return -ENOSYS; - } - IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); - IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); - IXGBE_WRITE_FLUSH(hw); +static int +ixgbe_filter_restore(struct rte_eth_dev *dev) +{ + ixgbe_ntuple_filter_restore(dev); + ixgbe_ethertype_filter_restore(dev); + ixgbe_syn_filter_restore(dev); + ixgbe_fdir_filter_restore(dev); + ixgbe_l2_tn_filter_restore(dev); return 0; } -static int -ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter) +static void +ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev) { + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (l2_tn_info->e_tag_en) + (void)ixgbe_e_tag_enable(hw); + + if (l2_tn_info->e_tag_fwd_en) + (void)ixgbe_e_tag_forwarding_en_dis(dev, 1); + + (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); +} + +/* remove all the n-tuple filters */ +void +ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) +{ struct ixgbe_filter_info *filter_info = IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - uint32_t etqf, etqs; - int ret; - - ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); - if (ret < 0) { - PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", - filter->ether_type); - return -ENOENT; - } + struct ixgbe_5tuple_filter *p_5tuple; - etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret)); - if (etqf & IXGBE_ETQF_FILTER_EN) { - etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret)); - filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE; - filter->flags = 0; - filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> - IXGBE_ETQS_RX_QUEUE_SHIFT; - return 0; - } - return -ENOENT; + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) + ixgbe_remove_5tuple_filter(dev, p_5tuple); } -/* - * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter. - * @dev: pointer to rte_eth_dev structure - * @filter_op:operation will be taken. - * @arg: a pointer to specific structure corresponding to the filter_op - */ -static int -ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) +/* remove all the ether type filters */ +void +ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret; + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i; - MAC_TYPE_FILTER_SUP(hw->mac.type); + for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_mask & (1 << i) && + !filter_info->ethertype_filters[i].conf) { + (void)ixgbe_ethertype_filter_remove(filter_info, + (uint8_t)i); + IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0); + IXGBE_WRITE_FLUSH(hw); + } + } +} - if (filter_op == RTE_ETH_FILTER_NOP) - return 0; +/* remove the SYN filter */ +void +ixgbe_clear_syn_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - if (arg == NULL) { - PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", - filter_op); - return -EINVAL; - } + if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) { + filter_info->syn_info = 0; - switch (filter_op) { - case RTE_ETH_FILTER_ADD: - ret = ixgbe_add_del_ethertype_filter(dev, - (struct rte_eth_ethertype_filter *)arg, - TRUE); - break; - case RTE_ETH_FILTER_DELETE: - ret = ixgbe_add_del_ethertype_filter(dev, - (struct rte_eth_ethertype_filter *)arg, - FALSE); - break; - case RTE_ETH_FILTER_GET: - ret = ixgbe_get_ethertype_filter(dev, - (struct rte_eth_ethertype_filter *)arg); - break; - default: - PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); - ret = -EINVAL; - break; + IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0); + IXGBE_WRITE_FLUSH(hw); } - return ret; } -static int -ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, - enum rte_filter_type filter_type, - enum rte_filter_op filter_op, - void *arg) +/* remove all the L2 tunnel filters */ +int +ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) { - int ret = -EINVAL; + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + struct ixgbe_l2_tn_filter *l2_tn_filter; + struct rte_eth_l2_tunnel_conf l2_tn_conf; + int ret = 0; - switch (filter_type) { - case RTE_ETH_FILTER_NTUPLE: - ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_ETHERTYPE: - ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_SYN: - ret = ixgbe_syn_filter_handle(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_FDIR: - ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg); - break; - default: - PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", - filter_type); - break; + while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { + l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; + l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; + l2_tn_conf.pool = l2_tn_filter->pool; + ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); + if (ret < 0) + return ret; } - return ret; + return 0; } -static struct rte_driver rte_ixgbe_driver = { - .type = PMD_PDEV, - .init = rte_ixgbe_pmd_init, -}; - -static struct rte_driver rte_ixgbevf_driver = { - .type = PMD_PDEV, - .init = rte_ixgbevf_pmd_init, -}; - -PMD_REGISTER_DRIVER(rte_ixgbe_driver); -PMD_REGISTER_DRIVER(rte_ixgbevf_driver); +RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv); +RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio"); +RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv); +RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio");