X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_ethdev.c;h=594cc4a8f0b6ef226be8b8ef138043735e254df3;hb=0bc9f022a144f6522b9f7d3f191531fef392805d;hp=8431755c61a04d20a600f7944e0a54557bd1ca32;hpb=d87c3058bb2c9271ae1e8a2273821f0f0808ff98;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 8431755c61..594cc4a8f0 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -72,6 +72,8 @@ #include "base/ixgbe_phy.h" #include "ixgbe_regs.h" +#include "rte_pmd_ixgbe.h" + /* * High threshold controlling when to start sending XOFF frames. Must be at * least 8 bytes less than receive packet buffer size. This value is in units @@ -150,6 +152,16 @@ #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ #define IXGBE_QDE_STRIP_TAG 0x00000004 +#define IXGBE_VTEICR_MASK 0x07 + +enum ixgbevf_xcast_modes { + IXGBEVF_XCAST_MODE_NONE = 0, + IXGBEVF_XCAST_MODE_MULTI, + IXGBEVF_XCAST_MODE_ALLMULTI, +}; + +#define IXGBE_EXVET_VET_EXT_SHIFT 16 +#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev); static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); @@ -168,17 +180,24 @@ static int ixgbe_dev_link_update(struct rte_eth_dev *dev, static void ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n); + struct rte_eth_xstat *xstats, unsigned n); static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n); + struct rte_eth_xstat *xstats, unsigned n); static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev); static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); +static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit); +static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit); static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx); +static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, + size_t fw_size); static void ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); +static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); static void ixgbevf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); @@ -214,9 +233,11 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, uint16_t reta_size); static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev); +static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); -static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); +static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, + struct rte_intr_handle *handle); static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, void *param); static void ixgbe_dev_interrupt_delayed_handler(void *param); @@ -225,7 +246,7 @@ static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); -static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config); +static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); /* For Virtual Function support */ static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); @@ -252,17 +273,13 @@ static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); static void ixgbevf_configure_msix(struct rte_eth_dev *dev); +static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev); +static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); /* For Eth VMDQ APIs support */ static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct - ether_addr* mac_addr,uint8_t on); -static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on); -static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, - uint16_t rx_mask, uint8_t on); -static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); -static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); -static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, - uint64_t pool_mask,uint8_t vlan_on); + ether_addr * mac_addr, uint8_t on); +static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on); @@ -278,8 +295,6 @@ static void ixgbe_configure_msix(struct rte_eth_dev *dev); static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t tx_rate); -static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, - uint16_t tx_rate, uint64_t q_msk); static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, @@ -352,6 +367,8 @@ static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *timestamp); static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *timestamp); +static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle, + void *param); static int ixgbe_dev_l2_tunnel_eth_type_conf (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel); @@ -388,45 +405,102 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, last = latest; \ } -#define IXGBE_SET_HWSTRIP(h, q) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_SET_HWSTRIP(h, q) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (h)->bitmap[idx] |= 1 << bit;\ - }while(0) + } while (0) -#define IXGBE_CLEAR_HWSTRIP(h, q) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_CLEAR_HWSTRIP(h, q) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (h)->bitmap[idx] &= ~(1 << bit);\ - }while(0) + } while (0) -#define IXGBE_GET_HWSTRIP(h, q, r) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_GET_HWSTRIP(h, q, r) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (r) = (h)->bitmap[idx] >> bit & 1;\ - }while(0) + } while (0) /* * The set of PCI devices this driver supports */ static const struct rte_pci_id pci_id_ixgbe_map[] = { - -#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" - -{ .vendor_id = 0, /* sentinel */ }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, +#ifdef RTE_NIC_BYPASS + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, +#endif + { .vendor_id = 0, /* sentinel */ }, }; - /* * The set of PCI devices this driver supports (for 82599 VF) */ static const struct rte_pci_id pci_id_ixgbevf_map[] = { - -#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" -{ .vendor_id = 0, /* sentinel */ }, - + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) }, + { .vendor_id = 0, /* sentinel */ }, }; static const struct rte_eth_desc_lim rx_desc_lim = { @@ -439,6 +513,8 @@ static const struct rte_eth_desc_lim tx_desc_lim = { .nb_max = IXGBE_MAX_RING_DESC, .nb_min = IXGBE_MIN_RING_DESC, .nb_align = IXGBE_TXD_ALIGN, + .nb_seg_max = IXGBE_TX_MAX_SEG, + .nb_mtu_seg_max = IXGBE_TX_MAX_SEG, }; static const struct eth_dev_ops ixgbe_eth_dev_ops = { @@ -457,8 +533,11 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .xstats_get = ixgbe_dev_xstats_get, .stats_reset = ixgbe_dev_stats_reset, .xstats_reset = ixgbe_dev_xstats_reset, + .xstats_get_names = ixgbe_dev_xstats_get_names, .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, + .fw_version_get = ixgbe_fw_version_get, .dev_infos_get = ixgbe_dev_info_get, + .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, .mtu_set = ixgbe_dev_mtu_set, .vlan_filter_set = ixgbe_vlan_filter_set, .vlan_tpid_set = ixgbe_vlan_tpid_set, @@ -488,12 +567,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, .mirror_rule_set = ixgbe_mirror_rule_set, .mirror_rule_reset = ixgbe_mirror_rule_reset, - .set_vf_rx_mode = ixgbe_set_pool_rx_mode, - .set_vf_rx = ixgbe_set_pool_rx, - .set_vf_tx = ixgbe_set_pool_tx, - .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter, .set_queue_rate_limit = ixgbe_set_queue_rate_limit, - .set_vf_rate_limit = ixgbe_set_vf_rate_limit, .reta_update = ixgbe_dev_rss_reta_update, .reta_query = ixgbe_dev_rss_reta_query, #ifdef RTE_NIC_BYPASS @@ -517,7 +591,6 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .timesync_disable = ixgbe_timesync_disable, .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, - .get_reg_length = ixgbe_get_reg_length, .get_reg = ixgbe_get_regs, .get_eeprom_length = ixgbe_get_eeprom_length, .get_eeprom = ixgbe_get_eeprom, @@ -545,8 +618,12 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .xstats_get = ixgbevf_dev_xstats_get, .stats_reset = ixgbevf_dev_stats_reset, .xstats_reset = ixgbevf_dev_stats_reset, + .xstats_get_names = ixgbevf_dev_xstats_get_names, .dev_close = ixgbevf_dev_close, + .allmulticast_enable = ixgbevf_dev_allmulticast_enable, + .allmulticast_disable = ixgbevf_dev_allmulticast_disable, .dev_infos_get = ixgbevf_dev_info_get, + .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, .mtu_set = ixgbevf_dev_set_mtu, .vlan_filter_set = ixgbevf_vlan_filter_set, .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set, @@ -564,7 +641,6 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .rxq_info_get = ixgbe_rxq_info_get, .txq_info_get = ixgbe_txq_info_get, .mac_addr_set = ixgbevf_set_default_mac_addr, - .get_reg_length = ixgbevf_get_reg_length, .get_reg = ixgbevf_get_regs, .reta_update = ixgbe_dev_rss_reta_update, .reta_query = ixgbe_dev_rss_reta_query, @@ -662,6 +738,51 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ sizeof(rte_ixgbe_stats_strings[0])) +/* MACsec statistics */ +static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = { + {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats, + out_pkts_untagged)}, + {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats, + out_pkts_encrypted)}, + {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats, + out_pkts_protected)}, + {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats, + out_octets_encrypted)}, + {"out_octets_protected", offsetof(struct ixgbe_macsec_stats, + out_octets_protected)}, + {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats, + in_pkts_untagged)}, + {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats, + in_pkts_badtag)}, + {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats, + in_pkts_nosci)}, + {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats, + in_pkts_unknownsci)}, + {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats, + in_octets_decrypted)}, + {"in_octets_validated", offsetof(struct ixgbe_macsec_stats, + in_octets_validated)}, + {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats, + in_pkts_unchecked)}, + {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats, + in_pkts_delayed)}, + {"in_pkts_late", offsetof(struct ixgbe_macsec_stats, + in_pkts_late)}, + {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats, + in_pkts_ok)}, + {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats, + in_pkts_invalid)}, + {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats, + in_pkts_notvalid)}, + {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats, + in_pkts_unusedsa)}, + {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats, + in_pkts_notusingsa)}, +}; + +#define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \ + sizeof(rte_ixgbe_macsec_strings[0])) + /* Per-queue statistics */ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, @@ -672,6 +793,7 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ sizeof(rte_ixgbe_rxq_strings[0])) +#define IXGBE_NB_RXQ_PRIO_VALUES 8 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, @@ -682,6 +804,7 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ sizeof(rte_ixgbe_txq_strings[0])) +#define IXGBE_NB_TXQ_PRIO_VALUES 8 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, @@ -819,7 +942,7 @@ ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw) { uint32_t i; - for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { + for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) { IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0); IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0); } @@ -847,7 +970,8 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, if ((hw->mac.type != ixgbe_mac_82599EB) && (hw->mac.type != ixgbe_mac_X540) && (hw->mac.type != ixgbe_mac_X550) && - (hw->mac.type != ixgbe_mac_X550EM_x)) + (hw->mac.type != ixgbe_mac_X550EM_x) && + (hw->mac.type != ixgbe_mac_X550EM_a)) return -ENOSYS; PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d", @@ -887,8 +1011,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", stat_mappings->rqsmr[n], n); IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); - } - else { + } else { PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", stat_mappings->tqsm[n], n); IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); @@ -897,7 +1020,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, } static void -ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev) +ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) { struct ixgbe_stat_mapping_registers *stat_mappings = IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); @@ -915,7 +1038,7 @@ ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev) } static void -ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) +ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { uint8_t i; struct ixgbe_dcb_tc_config *tc; @@ -938,7 +1061,7 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) tc = &dcb_config->tc_config[0]; tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; - for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) { + for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; } @@ -952,7 +1075,8 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) /*we only support 4 Tcs for X540, X550 */ if (hw->mac.type == ixgbe_mac_X540 || hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x) { + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) { dcb_config->num_tcs.pg_tcs = 4; dcb_config->num_tcs.pfc_tcs = 4; } @@ -998,10 +1122,11 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); @@ -1018,32 +1143,34 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) eth_dev->dev_ops = &ixgbe_eth_dev_ops; eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; + eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts; /* * For secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different * RX and TX function. */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { struct ixgbe_tx_queue *txq; /* TX queue function in primary, set by last queue initialized - * Tx queue may not initialized by primary process */ + * Tx queue may not initialized by primary process + */ if (eth_dev->data->tx_queues) { txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; ixgbe_set_tx_function(eth_dev, txq); } else { /* Use default TX function if we get here */ PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " - "Using default TX function."); + "Using default TX function."); } ixgbe_set_rx_function(eth_dev); return 0; } - pci_dev = eth_dev->pci_dev; rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE; /* Vendor and Device ID need to be set before init of shared code */ hw->device_id = pci_dev->id.device_id; @@ -1071,7 +1198,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* Initialize DCB configuration*/ memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); - ixgbe_dcb_init(hw,dcb_config); + ixgbe_dcb_init(hw, dcb_config); /* Get Hardware Flow Control setting */ hw->fc.requested_mode = ixgbe_fc_full; hw->fc.current_mode = ixgbe_fc_full; @@ -1112,11 +1239,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) if (diag == IXGBE_ERR_EEPROM_VERSION) { PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" - "LOM. Please be aware there may be issues associated " - "with your hardware."); + "LOM. Please be aware there may be issues associated " + "with your hardware."); PMD_INIT_LOG(ERR, "If you are experiencing problems " - "please contact your Intel or hardware representative " - "who provided you with this hardware."); + "please contact your Intel or hardware representative " + "who provided you with this hardware."); } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); if (diag) { @@ -1135,12 +1262,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* Allocate memory for storing MAC addresses */ eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * - hw->mac.num_rar_entries, 0); + hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, - "Failed to allocate %u bytes needed to store " - "MAC addresses", - ETHER_ADDR_LEN * hw->mac.num_rar_entries); + "Failed to allocate %u bytes needed to store " + "MAC addresses", + ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } /* Copy the permanent MAC address */ @@ -1149,11 +1276,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* Allocate memory for storing hash filter MAC addresses */ eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * - IXGBE_VMDQ_NUM_UC_MAC, 0); + IXGBE_VMDQ_NUM_UC_MAC, 0); if (eth_dev->data->hash_mac_addrs == NULL) { PMD_INIT_LOG(ERR, - "Failed to allocate %d bytes needed to store MAC addresses", - ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); + "Failed to allocate %d bytes needed to store MAC addresses", + ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); return -ENOMEM; } @@ -1183,15 +1310,14 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) (int) hw->mac.type, (int) hw->phy.type); PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", - eth_dev->data->port_id, pci_dev->id.vendor_id, - pci_dev->id.device_id); + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); - rte_intr_callback_register(&pci_dev->intr_handle, - ixgbe_dev_interrupt_handler, - (void *)eth_dev); + rte_intr_callback_register(intr_handle, + ixgbe_dev_interrupt_handler, eth_dev); /* enable uio/vfio intr/eventfd mapping */ - rte_intr_enable(&pci_dev->intr_handle); + rte_intr_enable(intr_handle); /* enable support intr */ ixgbe_enable_intr(eth_dev); @@ -1199,7 +1325,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* initialize 5tuple filter list */ TAILQ_INIT(&filter_info->fivetuple_list); memset(filter_info->fivetuple_mask, 0, - sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); + sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); return 0; } @@ -1207,7 +1333,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw; PMD_INIT_FUNC_TRACE(); @@ -1216,7 +1343,6 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) return -EPERM; hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - pci_dev = eth_dev->pci_dev; if (hw->adapter_stopped == 0) ixgbe_dev_close(eth_dev); @@ -1229,9 +1355,9 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) ixgbe_swfw_lock_reset(hw); /* disable uio intr before callback unregister */ - rte_intr_disable(&(pci_dev->intr_handle)); - rte_intr_callback_unregister(&(pci_dev->intr_handle), - ixgbe_dev_interrupt_handler, (void *)eth_dev); + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + ixgbe_dev_interrupt_handler, eth_dev); /* uninitialize PF if max_vfs not zero */ ixgbe_pf_host_uninit(eth_dev); @@ -1259,6 +1385,7 @@ ixgbevf_negotiate_api(struct ixgbe_hw *hw) /* start with highest supported, proceed down */ static const enum ixgbe_pfvf_api_rev sup_ver[] = { + ixgbe_mbox_api_12, ixgbe_mbox_api_11, ixgbe_mbox_api_10, }; @@ -1294,10 +1421,11 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) { int diag; uint32_t tc, tcs; - struct rte_pci_device *pci_dev; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); @@ -1311,16 +1439,29 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) /* for secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different - * RX function */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY){ - if (eth_dev->data->scattered_rx) - eth_dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc; + * RX function + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + struct ixgbe_tx_queue *txq; + /* TX queue function in primary, set by last queue initialized + * Tx queue may not initialized by primary process + */ + if (eth_dev->data->tx_queues) { + txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1]; + ixgbe_set_tx_function(eth_dev, txq); + } else { + /* Use default TX function if we get here */ + PMD_INIT_LOG(NOTICE, + "No TX queues configured yet. Using default TX function."); + } + + ixgbe_set_rx_function(eth_dev); + return 0; } - pci_dev = eth_dev->pci_dev; - rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE; hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; @@ -1369,12 +1510,12 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) /* Allocate memory for storing MAC addresses */ eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * - hw->mac.num_rar_entries, 0); + hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, - "Failed to allocate %u bytes needed to store " - "MAC addresses", - ETHER_ADDR_LEN * hw->mac.num_rar_entries); + "Failed to allocate %u bytes needed to store " + "MAC addresses", + ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } @@ -1404,14 +1545,19 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) /* reset the hardware with the new settings */ diag = hw->mac.ops.start_hw(hw); switch (diag) { - case 0: - break; + case 0: + break; - default: - PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); - return -EIO; + default: + PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); + return -EIO; } + rte_intr_callback_register(intr_handle, + ixgbevf_dev_interrupt_handler, eth_dev); + rte_intr_enable(intr_handle); + ixgbevf_intr_enable(hw); + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id, "ixgbe_mac_82599_vf"); @@ -1424,8 +1570,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) { + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw; - unsigned i; PMD_INIT_FUNC_TRACE(); @@ -1444,30 +1591,22 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) /* Disable the interrupts for VF */ ixgbevf_intr_disable(hw); - for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { - ixgbe_dev_rx_queue_release(eth_dev->data->rx_queues[i]); - eth_dev->data->rx_queues[i] = NULL; - } - eth_dev->data->nb_rx_queues = 0; - - for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { - ixgbe_dev_tx_queue_release(eth_dev->data->tx_queues[i]); - eth_dev->data->tx_queues[i] = NULL; - } - eth_dev->data->nb_tx_queues = 0; - rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + ixgbevf_dev_interrupt_handler, eth_dev); + return 0; } static struct eth_driver rte_ixgbe_pmd = { .pci_drv = { - .name = "rte_ixgbe_pmd", .id_table = pci_id_ixgbe_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_DETACHABLE, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, }, .eth_dev_init = eth_ixgbe_dev_init, .eth_dev_uninit = eth_ixgbe_dev_uninit, @@ -1479,49 +1618,22 @@ static struct eth_driver rte_ixgbe_pmd = { */ static struct eth_driver rte_ixgbevf_pmd = { .pci_drv = { - .name = "rte_ixgbevf_pmd", .id_table = pci_id_ixgbevf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, }, .eth_dev_init = eth_ixgbevf_dev_init, .eth_dev_uninit = eth_ixgbevf_dev_uninit, .dev_private_size = sizeof(struct ixgbe_adapter), }; -/* - * Driver initialization routine. - * Invoked once at EAL init time. - * Register itself as the [Poll Mode] Driver of PCI IXGBE devices. - */ -static int -rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); - - rte_eth_driver_register(&rte_ixgbe_pmd); - return 0; -} - -/* - * VF Driver initialization routine. - * Invoked one at EAL init time. - * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices. - */ -static int -rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); - - rte_eth_driver_register(&rte_ixgbevf_pmd); - return 0; -} - static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vfta; uint32_t vid_idx; @@ -1559,15 +1671,47 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); int ret = 0; + uint32_t reg; + uint32_t qinq; + + qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + qinq &= IXGBE_DMATXCTL_GDV; switch (vlan_type) { case ETH_VLAN_TYPE_INNER: - /* Only the high 16-bits is valid */ - IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16); + if (qinq) { + reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) + | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + } else { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Inner type is not supported" + " by single VLAN"); + } + break; + case ETH_VLAN_TYPE_OUTER: + if (qinq) { + /* Only the high 16-bits is valid */ + IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << + IXGBE_EXVET_VET_EXT_SHIFT); + } else { + reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) + | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + } + break; default: ret = -EINVAL; - PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type); + PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); break; } @@ -1595,7 +1739,7 @@ ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vlnctrl; uint16_t i; @@ -1619,14 +1763,25 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) { struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); + struct ixgbe_rx_queue *rxq; - if(queue >= IXGBE_MAX_RX_QUEUE_NUM) + if (queue >= IXGBE_MAX_RX_QUEUE_NUM) return; if (on) IXGBE_SET_HWSTRIP(hwstrip, queue); else IXGBE_CLEAR_HWSTRIP(hwstrip, queue); + + if (queue >= dev->data->nb_rx_queues) + return; + + rxq = dev->data->rx_queues[queue]; + + if (on) + rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; + else + rxq->vlan_flags = PKT_RX_VLAN_PKT; } static void @@ -1643,12 +1798,12 @@ ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); return; } - else { - /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); - ctrl &= ~IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); - } + + /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); + ctrl &= ~IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); + /* record those setting for HW strip per queue */ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); } @@ -1667,12 +1822,12 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); return; } - else { - /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); - ctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); - } + + /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); + ctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); + /* record those setting for HW strip per queue */ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); } @@ -1684,6 +1839,7 @@ ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t ctrl; uint16_t i; + struct ixgbe_rx_queue *rxq; PMD_INIT_FUNC_TRACE(); @@ -1691,13 +1847,13 @@ ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev) ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ctrl &= ~IXGBE_VLNCTRL_VME; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); - } - else { + } else { /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ for (i = 0; i < dev->data->nb_rx_queues; i++) { - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + rxq = dev->data->rx_queues[i]; + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); ctrl &= ~IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl); + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); /* record those setting for HW strip per queue */ ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0); @@ -1712,6 +1868,7 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t ctrl; uint16_t i; + struct ixgbe_rx_queue *rxq; PMD_INIT_FUNC_TRACE(); @@ -1719,13 +1876,13 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev) ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ctrl |= IXGBE_VLNCTRL_VME; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); - } - else { + } else { /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ for (i = 0; i < dev->data->nb_rx_queues; i++) { - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + rxq = dev->data->rx_queues[i]; + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); ctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl); + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); /* record those setting for HW strip per queue */ ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1); @@ -1775,7 +1932,8 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) /* Clear pooling mode of PFVTCTL. It's required by X550. */ if (hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x) { + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) { ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); @@ -1790,21 +1948,21 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) { - if(mask & ETH_VLAN_STRIP_MASK){ + if (mask & ETH_VLAN_STRIP_MASK) { if (dev->data->dev_conf.rxmode.hw_vlan_strip) ixgbe_vlan_hw_strip_enable_all(dev); else ixgbe_vlan_hw_strip_disable_all(dev); } - if(mask & ETH_VLAN_FILTER_MASK){ + if (mask & ETH_VLAN_FILTER_MASK) { if (dev->data->dev_conf.rxmode.hw_vlan_filter) ixgbe_vlan_hw_filter_enable(dev); else ixgbe_vlan_hw_filter_disable(dev); } - if(mask & ETH_VLAN_EXTEND_MASK){ + if (mask & ETH_VLAN_EXTEND_MASK) { if (dev->data->dev_conf.rxmode.hw_vlan_extend) ixgbe_vlan_hw_extend_enable(dev); else @@ -1819,13 +1977,16 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */ + + vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); } static int ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) { + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + switch (nb_rx_q) { case 1: case 2: @@ -1839,7 +2000,7 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) } RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q; - RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q; + RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q; return 0; } @@ -1848,13 +2009,16 @@ static int ixgbe_check_mq_mode(struct rte_eth_dev *dev) { struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t nb_rx_q = dev->data->nb_rx_queues; - uint16_t nb_tx_q = dev->data->nb_rx_queues; + uint16_t nb_tx_q = dev->data->nb_tx_queues; if (RTE_ETH_DEV_SRIOV(dev).active != 0) { /* check multi-queue mode */ switch (dev_conf->rxmode.mq_mode) { case ETH_MQ_RX_VMDQ_DCB: + PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); + break; case ETH_MQ_RX_VMDQ_DCB_RSS: /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ PMD_INIT_LOG(ERR, "SRIOV active," @@ -1890,11 +2054,9 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) switch (dev_conf->txmode.mq_mode) { case ETH_MQ_TX_VMDQ_DCB: - /* DCB VMDQ in SRIOV mode, not implement yet */ - PMD_INIT_LOG(ERR, "SRIOV is active," - " unsupported VMDQ mq_mode tx %d.", - dev_conf->txmode.mq_mode); - return -EINVAL; + PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); + dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; + break; default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY; break; @@ -1904,11 +2066,18 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { PMD_INIT_LOG(ERR, "SRIOV is active," - " queue number must less equal to %d.", + " nb_rx_q=%d nb_tx_q=%d queue number" + " must be less than or equal to %d.", + nb_rx_q, nb_tx_q, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); return -EINVAL; } } else { + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { + PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" + " not supported."); + return -EINVAL; + } /* check configuration for vmdb+dcb mode */ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { const struct rte_eth_vmdq_dcb_conf *conf; @@ -1982,6 +2151,21 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) return -EINVAL; } } + + /* + * When DCB/VT is off, maximum number of queues changes, + * except for 82598EB, which remains constant. + */ + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && + hw->mac.type != ixgbe_mac_82598EB) { + if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) { + PMD_INIT_LOG(ERR, + "Neither VT nor DCB are enabled, " + "nb_tx_q > %d.", + IXGBE_NONE_MODE_TX_NB_QUEUES); + return -EINVAL; + } + } } return 0; } @@ -2017,6 +2201,25 @@ ixgbe_dev_configure(struct rte_eth_dev *dev) return 0; } +static void +ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + uint32_t gpie; + + /* only set up it on X550EM_X */ + if (hw->mac.type == ixgbe_mac_X550EM_x) { + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + gpie |= IXGBE_SDP0_GPIEN_X550EM_x; + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + if (hw->phy.type == ixgbe_phy_x550em_ext_t) + intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x; + } +} + /* * Configure device link speed and setup link. * It returns 0 on success. @@ -2028,21 +2231,24 @@ ixgbe_dev_start(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t intr_vector = 0; int err, link_up = 0, negotiate = 0; uint32_t speed = 0; int mask = 0; int status; uint16_t vf, idx; + uint32_t *link_speeds; PMD_INIT_FUNC_TRACE(); - /* IXGBE devices don't support half duplex */ - if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) && - (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) { - PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu", - dev->data->dev_conf.link_duplex, + /* IXGBE devices don't support: + * - half duplex (checked afterwards for valid speeds) + * - fixed speed: TODO implement + */ + if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { + PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported", dev->data->port_id); return -EINVAL; } @@ -2055,7 +2261,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev) ixgbe_stop_adapter(hw); /* reinitialize adapter - * this calls reset and start */ + * this calls reset and start + */ status = ixgbe_pf_reset_hw(hw); if (status != 0) return -1; @@ -2065,11 +2272,18 @@ ixgbe_dev_start(struct rte_eth_dev *dev) /* configure PF module if SRIOV enabled */ ixgbe_pf_host_configure(dev); + ixgbe_dev_phy_intr_setup(dev); + /* check and configure queue intr-vector mapping */ if ((rte_intr_cap_multiple(intr_handle) || !RTE_ETH_DEV_SRIOV(dev).active) && dev->data->dev_conf.intr_conf.rxq != 0) { intr_vector = dev->data->nb_rx_queues; + if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) { + PMD_INIT_LOG(ERR, "At most %d intr queues supported", + IXGBE_MAX_INTR_QUEUE_NUM); + return -ENOTSUP; + } if (rte_intr_efd_enable(intr_handle, intr_vector)) return -1; } @@ -2098,6 +2312,37 @@ ixgbe_dev_start(struct rte_eth_dev *dev) goto error; } + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK; + ixgbe_vlan_offload_set(dev, mask); + + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { + /* Enable vlan filtering for VMDq */ + ixgbe_vmdq_vlan_hw_filter_enable(dev); + } + + /* Configure DCB hw */ + ixgbe_configure_dcb(dev); + + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { + err = ixgbe_fdir_configure(dev); + if (err) + goto error; + } + + /* Restore vf rate limit */ + if (vfinfo != NULL) { + for (vf = 0; vf < pci_dev->max_vfs; vf++) + for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) + if (vfinfo[vf].tx_rate[idx] != 0) + rte_pmd_ixgbe_set_vf_rate_limit( + dev->data->port_id, vf, + vfinfo[vf].tx_rate[idx], + 1 << idx); + } + + ixgbe_restore_statistics_mapping(dev); + err = ixgbe_dev_rxtx_start(dev); if (err < 0) { PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); @@ -2132,30 +2377,25 @@ ixgbe_dev_start(struct rte_eth_dev *dev) if (err) goto error; - switch(dev->data->dev_conf.link_speed) { - case ETH_LINK_SPEED_AUTONEG: + link_speeds = &dev->data->dev_conf.link_speeds; + if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_10G)) { + PMD_INIT_LOG(ERR, "Invalid link setting"); + goto error; + } + + speed = 0x0; + if (*link_speeds == ETH_LINK_SPEED_AUTONEG) { speed = (hw->mac.type != ixgbe_mac_82598EB) ? IXGBE_LINK_SPEED_82599_AUTONEG : IXGBE_LINK_SPEED_82598_AUTONEG; - break; - case ETH_LINK_SPEED_100: - /* - * Invalid for 82598 but error will be detected by - * ixgbe_setup_link() - */ - speed = IXGBE_LINK_SPEED_100_FULL; - break; - case ETH_LINK_SPEED_1000: - speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - case ETH_LINK_SPEED_10000: - speed = IXGBE_LINK_SPEED_10GB_FULL; - break; - default: - PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu", - dev->data->dev_conf.link_speed, - dev->data->port_id); - goto error; + } else { + if (*link_speeds & ETH_LINK_SPEED_10G) + speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_1G) + speed |= IXGBE_LINK_SPEED_1GB_FULL; + if (*link_speeds & ETH_LINK_SPEED_100M) + speed |= IXGBE_LINK_SPEED_100_FULL; } err = ixgbe_setup_link(hw, speed, link_up); @@ -2168,10 +2408,10 @@ skip_link_setup: /* check if lsc interrupt is enabled */ if (dev->data->dev_conf.intr_conf.lsc != 0) ixgbe_dev_lsc_interrupt_setup(dev); + ixgbe_dev_macsec_interrupt_setup(dev); } else { rte_intr_callback_unregister(intr_handle, - ixgbe_dev_interrupt_handler, - (void *)dev); + ixgbe_dev_interrupt_handler, dev); if (dev->data->dev_conf.intr_conf.lsc != 0) PMD_INIT_LOG(INFO, "lsc won't enable because of" " no intr multiplex\n"); @@ -2188,36 +2428,6 @@ skip_link_setup: /* resume enabled intr since hw reset */ ixgbe_enable_intr(dev); - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ - ETH_VLAN_EXTEND_MASK; - ixgbe_vlan_offload_set(dev, mask); - - if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { - /* Enable vlan filtering for VMDq */ - ixgbe_vmdq_vlan_hw_filter_enable(dev); - } - - /* Configure DCB hw */ - ixgbe_configure_dcb(dev); - - if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { - err = ixgbe_fdir_configure(dev); - if (err) - goto error; - } - - /* Restore vf rate limit */ - if (vfinfo != NULL) { - for (vf = 0; vf < dev->pci_dev->max_vfs; vf++) - for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) - if (vfinfo[vf].tx_rate[idx] != 0) - ixgbe_set_vf_rate_limit(dev, vf, - vfinfo[vf].tx_rate[idx], - 1 << idx); - } - - ixgbe_restore_statistics_mapping(dev); - return 0; error: @@ -2240,7 +2450,8 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) struct ixgbe_filter_info *filter_info = IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int vf; PMD_INIT_FUNC_TRACE(); @@ -2248,9 +2459,6 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) /* disable interrupts */ ixgbe_disable_intr(hw); - /* disable intr eventfd mapping */ - rte_intr_disable(intr_handle); - /* reset the NIC */ ixgbe_pf_reset_hw(hw); hw->adapter_stopped = 0; @@ -2258,8 +2466,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) /* stop adapter */ ixgbe_stop_adapter(hw); - for (vf = 0; vfinfo != NULL && - vf < dev->pci_dev->max_vfs; vf++) + for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) vfinfo[vf].clear_to_send = false; if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { @@ -2392,6 +2599,7 @@ ixgbe_dev_close(struct rte_eth_dev *dev) static void ixgbe_read_stats_registers(struct ixgbe_hw *hw, struct ixgbe_hw_stats *hw_stats, + struct ixgbe_macsec_stats *macsec_stats, uint64_t *total_missed_rx, uint64_t *total_qbrc, uint64_t *total_qprc, uint64_t *total_qprdc) { @@ -2399,9 +2607,9 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, uint32_t delta_gprc = 0; unsigned i; /* Workaround for RX byte count not including CRC bytes when CRC -+ * strip is enabled. CRC bytes are removed from counters when crc_strip + * strip is enabled. CRC bytes are removed from counters when crc_strip * is disabled. -+ */ + */ int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & IXGBE_HLREG0_RXCRCSTRP); @@ -2411,8 +2619,8 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); for (i = 0; i < 8; i++) { - uint32_t mp; - mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + /* global total per queue */ hw_stats->mpc[i] += mp; /* Running comprehensive total for stats display */ @@ -2561,6 +2769,40 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, /* Flow Director Stats registers */ hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + + /* MACsec Stats registers */ + macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); + macsec_stats->out_pkts_encrypted += + IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE); + macsec_stats->out_pkts_protected += + IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP); + macsec_stats->out_octets_encrypted += + IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE); + macsec_stats->out_octets_protected += + IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP); + macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT); + macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD); + macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI); + macsec_stats->in_pkts_unknownsci += + IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI); + macsec_stats->in_octets_decrypted += + IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD); + macsec_stats->in_octets_validated += + IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV); + macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH); + macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY); + macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE); + for (i = 0; i < 2; i++) { + macsec_stats->in_pkts_ok += + IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i)); + macsec_stats->in_pkts_invalid += + IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i)); + macsec_stats->in_pkts_notvalid += + IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i)); + } + macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA); + macsec_stats->in_pkts_notusingsa += + IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA); } /* @@ -2573,6 +2815,9 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_stats *hw_stats = IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; unsigned i; @@ -2581,8 +2826,8 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) total_qprc = 0; total_qprdc = 0; - ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc, - &total_qprc, &total_qprdc); + ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, + &total_qbrc, &total_qprc, &total_qprdc); if (stats == NULL) return; @@ -2604,16 +2849,15 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* Rx Errors */ stats->imissed = total_missed_rx; stats->ierrors = hw_stats->crcerrs + - hw_stats->mspdc + - hw_stats->rlec + - hw_stats->ruc + - hw_stats->roc + - total_missed_rx + - hw_stats->illerrc + - hw_stats->errbc + - hw_stats->rfc + - hw_stats->fccrc + - hw_stats->fclast; + hw_stats->mspdc + + hw_stats->rlec + + hw_stats->ruc + + hw_stats->roc + + hw_stats->illerrc + + hw_stats->errbc + + hw_stats->rfc + + hw_stats->fccrc + + hw_stats->fclast; /* Tx Errors */ stats->oerrors = 0; @@ -2635,18 +2879,94 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev) /* This function calculates the number of xstats based on the current config */ static unsigned ixgbe_xstats_calc_num(void) { - return IXGBE_NB_HW_STATS + (IXGBE_NB_RXQ_PRIO_STATS * 8) + - (IXGBE_NB_TXQ_PRIO_STATS * 8); + return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS + + (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + + (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); +} + +static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit) +{ + const unsigned cnt_stats = ixgbe_xstats_calc_num(); + unsigned stat, i, count; + + if (xstats_names != NULL) { + count = 0; + + /* Note: limit >= cnt_stats checked upstream + * in rte_eth_xstats_names() + */ + + /* Extended stats from ixgbe_hw_stats */ + for (i = 0; i < IXGBE_NB_HW_STATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", + rte_ixgbe_stats_strings[i].name); + count++; + } + + /* MACsec Stats */ + for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", + rte_ixgbe_macsec_strings[i].name); + count++; + } + + /* RX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_priority%u_%s", i, + rte_ixgbe_rxq_strings[stat].name); + count++; + } + } + + /* TX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_priority%u_%s", i, + rte_ixgbe_txq_strings[stat].name); + count++; + } + } + } + return cnt_stats; +} + +static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned limit) +{ + unsigned i; + + if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) + return -ENOMEM; + + if (xstats_names != NULL) + for (i = 0; i < IXGBEVF_NB_XSTATS; i++) + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), + "%s", rte_ixgbevf_stats_strings[i].name); + return IXGBEVF_NB_XSTATS; } static int -ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, +ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_stats *hw_stats = IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; unsigned i, stat, count = 0; @@ -2660,8 +2980,8 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, total_qprc = 0; total_qprdc = 0; - ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc, - &total_qprc, &total_qprdc); + ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, + &total_qbrc, &total_qprc, &total_qprdc); /* If this is a reset xstats is NULL, and we have cleared the * registers by reading them. @@ -2672,39 +2992,41 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* Extended stats from ixgbe_hw_stats */ count = 0; for (i = 0; i < IXGBE_NB_HW_STATS; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), "%s", - rte_ixgbe_stats_strings[i].name); xstats[count].value = *(uint64_t *)(((char *)hw_stats) + rte_ixgbe_stats_strings[i].offset); + xstats[count].id = count; + count++; + } + + /* MACsec Stats */ + for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { + xstats[count].value = *(uint64_t *)(((char *)macsec_stats) + + rte_ixgbe_macsec_strings[i].offset); + xstats[count].id = count; count++; } /* RX Priority Stats */ for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { - for (i = 0; i < 8; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "rx_priority%u_%s", i, - rte_ixgbe_rxq_strings[stat].name); + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { xstats[count].value = *(uint64_t *)(((char *)hw_stats) + rte_ixgbe_rxq_strings[stat].offset + (sizeof(uint64_t) * i)); + xstats[count].id = count; count++; } } /* TX Priority Stats */ for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { - for (i = 0; i < 8; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "tx_priority%u_%s", i, - rte_ixgbe_txq_strings[stat].name); + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { xstats[count].value = *(uint64_t *)(((char *)hw_stats) + rte_ixgbe_txq_strings[stat].offset + (sizeof(uint64_t) * i)); + xstats[count].id = count; count++; } } - return count; } @@ -2713,6 +3035,9 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) { struct ixgbe_hw_stats *stats = IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); unsigned count = ixgbe_xstats_calc_num(); @@ -2721,13 +3046,14 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) /* Reset software totals */ memset(stats, 0, sizeof(*stats)); + memset(macsec_stats, 0, sizeof(*macsec_stats)); } static void ixgbevf_update_stats(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*) + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); /* Good Rx packet, include VF loopback */ @@ -2752,7 +3078,7 @@ ixgbevf_update_stats(struct rte_eth_dev *dev) } static int -ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, +ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n) { struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) @@ -2769,8 +3095,6 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* Extended stats */ for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { - snprintf(xstats[i].name, sizeof(xstats[i].name), - "%s", rte_ixgbevf_stats_strings[i].name); xstats[i].value = *(uint64_t *)(((char *)hw_stats) + rte_ixgbevf_stats_strings[i].offset); } @@ -2793,14 +3117,12 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->ibytes = hw_stats->vfgorc; stats->opackets = hw_stats->vfgptc; stats->obytes = hw_stats->vfgotc; - stats->imcasts = hw_stats->vfmprc; - /* stats->imcasts should be removed as imcasts is deprecated */ } static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) { - struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*) + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); /* Sync HW register to the last stats */ @@ -2811,22 +3133,53 @@ ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) hw_stats->vfgorc = 0; hw_stats->vfgptc = 0; hw_stats->vfgotc = 0; - hw_stats->vfmprc = 0; +} + +static int +ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u16 eeprom_verh, eeprom_verl; + u32 etrack_id; + int ret; + + ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh); + ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl); + etrack_id = (eeprom_verh << 16) | eeprom_verl; + ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); + + ret += 1; /* add the size of '\0' */ + if (fw_size < (u32)ret) + return ret; + else + return 0; } static void ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; + if (RTE_ETH_DEV_SRIOV(dev).active == 0) { + /* + * When DCB/VT is off, maximum number of queues changes, + * except for 82598EB, which remains constant. + */ + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE && + hw->mac.type != ixgbe_mac_82598EB) + dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES; + } dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ dev_info->max_mac_addrs = hw->mac.num_rar_entries; dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; - dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->max_vfs = pci_dev->max_vfs; if (hw->mac.type == ixgbe_mac_82598EB) dev_info->max_vmdq_pools = ETH_16_POOLS; else @@ -2847,8 +3200,13 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) !RTE_ETH_DEV_SRIOV(dev).active) dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO; + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP; + if (hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x) + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; dev_info->tx_offload_capa = @@ -2859,8 +3217,13 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO; + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT; + if (hw->mac.type == ixgbe_mac_X550 || - hw->mac.type == ixgbe_mac_X550EM_x) + hw->mac.type == ixgbe_mac_X550EM_x || + hw->mac.type == ixgbe_mac_X550EM_a) dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; dev_info->default_rxconf = (struct rte_eth_rxconf) { @@ -2891,21 +3254,64 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type); dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; -} -static void + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; + if (hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X540_vf || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550_vf) { + dev_info->speed_capa |= ETH_LINK_SPEED_100M; + } +} + +static const uint32_t * +ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /* For non-vec functions, + * refers to ixgbe_rxd_pkt_info_to_pkt_type(); + * for vec functions, + * refers to _recv_raw_pkts_vec(). + */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_IP, + RTE_PTYPE_INNER_L3_IPV6, + RTE_PTYPE_INNER_L3_IPV6_EXT, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == ixgbe_recv_pkts || + dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || + dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || + dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) + return ptypes; + return NULL; +} + +static void ixgbevf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */ dev_info->max_mac_addrs = hw->mac.num_rar_entries; dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; - dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->max_vfs = pci_dev->max_vfs; if (hw->mac.type == ixgbe_mac_82598EB) dev_info->max_vmdq_pools = ETH_16_POOLS; else @@ -2957,9 +3363,9 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) int link_up; int diag; - link.link_status = 0; + link.link_status = ETH_LINK_DOWN; link.link_speed = 0; - link.link_duplex = 0; + link.link_duplex = ETH_LINK_HALF_DUPLEX; memset(&old, 0, sizeof(old)); rte_ixgbe_dev_atomic_read_link_status(dev, &old); @@ -2972,8 +3378,8 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) diag = ixgbe_check_link(hw, &link_speed, &link_up, 1); if (diag != 0) { - link.link_speed = ETH_LINK_SPEED_100; - link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_speed = ETH_SPEED_NUM_100M; + link.link_duplex = ETH_LINK_FULL_DUPLEX; rte_ixgbe_dev_atomic_write_link_status(dev, &link); if (link.link_status == old.link_status) return -1; @@ -2986,26 +3392,26 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) return -1; return 0; } - link.link_status = 1; + link.link_status = ETH_LINK_UP; link.link_duplex = ETH_LINK_FULL_DUPLEX; switch (link_speed) { default: case IXGBE_LINK_SPEED_UNKNOWN: - link.link_duplex = ETH_LINK_HALF_DUPLEX; - link.link_speed = ETH_LINK_SPEED_100; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_speed = ETH_SPEED_NUM_100M; break; case IXGBE_LINK_SPEED_100_FULL: - link.link_speed = ETH_LINK_SPEED_100; + link.link_speed = ETH_SPEED_NUM_100M; break; case IXGBE_LINK_SPEED_1GB_FULL: - link.link_speed = ETH_LINK_SPEED_1000; + link.link_speed = ETH_SPEED_NUM_1G; break; case IXGBE_LINK_SPEED_10GB_FULL: - link.link_speed = ETH_LINK_SPEED_10000; + link.link_speed = ETH_SPEED_NUM_10G; break; } rte_ixgbe_dev_atomic_write_link_status(dev, &link); @@ -3112,6 +3518,28 @@ ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) return 0; } +/** + * It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + intr->mask |= IXGBE_EICR_LINKSEC; + + return 0; +} + /* * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. * @@ -3146,6 +3574,14 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) if (eicr & IXGBE_EICR_MAILBOX) intr->flags |= IXGBE_FLAG_MAILBOX; + if (eicr & IXGBE_EICR_LINKSEC) + intr->flags |= IXGBE_FLAG_MACSEC; + + if (hw->mac.type == ixgbe_mac_X550EM_x && + hw->phy.type == ixgbe_phy_x550em_ext_t && + (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) + intr->flags |= IXGBE_FLAG_PHY_INTERRUPT; + return 0; } @@ -3162,6 +3598,7 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev) { + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); struct rte_eth_link link; memset(&link, 0, sizeof(link)); @@ -3176,11 +3613,11 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev) PMD_INIT_LOG(INFO, " Port %d: Link Down", (int)(dev->data->port_id)); } - PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", - dev->pci_dev->addr.domain, - dev->pci_dev->addr.bus, - dev->pci_dev->addr.devid, - dev->pci_dev->addr.function); + PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, + pci_dev->addr.domain, + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); } /* @@ -3194,13 +3631,16 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev) * - On failure, a negative value. */ static int -ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) +ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, + struct rte_intr_handle *intr_handle) { struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); int64_t timeout; struct rte_eth_link link; int intr_enable_delay = false; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags); @@ -3209,6 +3649,11 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) intr->flags &= ~IXGBE_FLAG_MAILBOX; } + if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { + ixgbe_handle_lasi(hw); + intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; + } + if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { /* get the link status before link update, for predicting later */ memset(&link, 0, sizeof(link)); @@ -3232,12 +3677,12 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) if (intr_enable_delay) { if (rte_eal_alarm_set(timeout * 1000, - ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0) + ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) PMD_DRV_LOG(ERR, "Error setting alarm"); } else { PMD_DRV_LOG(DEBUG, "enable intr immediately"); ixgbe_enable_intr(dev); - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(intr_handle); } @@ -3262,6 +3707,8 @@ static void ixgbe_dev_interrupt_delayed_handler(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); struct ixgbe_hw *hw = @@ -3272,16 +3719,27 @@ ixgbe_dev_interrupt_delayed_handler(void *param) if (eicr & IXGBE_EICR_MAILBOX) ixgbe_pf_mbx_process(dev); + if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) { + ixgbe_handle_lasi(hw); + intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT; + } + if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { ixgbe_dev_link_update(dev, 0); intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; ixgbe_dev_link_status_print(dev); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + } + + if (intr->flags & IXGBE_FLAG_MACSEC) { + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, + NULL); + intr->flags &= ~IXGBE_FLAG_MACSEC; } PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); ixgbe_enable_intr(dev); - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(intr_handle); } /** @@ -3297,13 +3755,13 @@ ixgbe_dev_interrupt_delayed_handler(void *param) * void */ static void -ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, +ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; ixgbe_dev_interrupt_get_status(dev); - ixgbe_dev_interrupt_action(dev); + ixgbe_dev_interrupt_action(dev, handle); } static int @@ -3446,7 +3904,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * Enable flow control according to the current settings. */ static int -ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) +ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) { int ret_val = 0; uint32_t mflcn_reg, fccfg_reg; @@ -3464,13 +3922,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) /* Low water mark of zero causes XOFF floods */ if (hw->fc.current_mode & ixgbe_fc_tx_pause) { /* High/Low water can not be 0 */ - if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) { + if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) { PMD_INIT_LOG(ERR, "Invalid water mark configuration"); ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; goto out; } - if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { + if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) { PMD_INIT_LOG(ERR, "Invalid water mark configuration"); ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; goto out; @@ -3493,13 +3951,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) * and the TX pause can not be disabled */ nb_rx_en = 0; - for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); if (reg & IXGBE_FCRTH_FCEN) nb_rx_en++; } if (nb_rx_en > 1) - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_rx_pause: /* @@ -3516,20 +3974,20 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) * and the TX pause can not be disabled */ nb_rx_en = 0; - for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); if (reg & IXGBE_FCRTH_FCEN) nb_rx_en++; } if (nb_rx_en > 1) - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_tx_pause: /* * Tx Flow control is enabled, and Rx Flow control is * disabled by software override. */ - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_full: /* Flow control (both Rx and Tx) is enabled by SW override. */ @@ -3540,7 +3998,6 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); ret_val = IXGBE_ERR_CONFIG; goto out; - break; } /* Set 802.3x based flow control settings. */ @@ -3579,13 +4036,13 @@ out: } static int -ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num) +ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); int32_t ret_val = IXGBE_NOT_IMPLEMENTED; - if(hw->mac.type != ixgbe_mac_82598EB) { - ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num); + if (hw->mac.type != ixgbe_mac_82598EB) { + ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); } return ret_val; } @@ -3599,9 +4056,9 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p uint8_t tc_num; uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_dcb_config *dcb_config = - IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { ixgbe_fc_none, @@ -3634,7 +4091,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; - err = ixgbe_dcb_pfc_enable(dev,tc_num); + err = ixgbe_dcb_pfc_enable(dev, tc_num); /* Not negotiated is not an error case */ if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) @@ -3649,11 +4106,11 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { - uint8_t i, j, mask; + uint16_t i, sp_reta_size; + uint8_t j, mask; uint32_t reta, r; uint16_t idx, shift; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint16_t sp_reta_size; uint32_t reta_reg; PMD_INIT_FUNC_TRACE(); @@ -3703,11 +4160,11 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) { - uint8_t i, j, mask; + uint16_t i, sp_reta_size; + uint8_t j, mask; uint32_t reta; uint16_t idx, shift; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint16_t sp_reta_size; uint32_t reta_reg; PMD_INIT_FUNC_TRACE(); @@ -3766,6 +4223,38 @@ ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) ixgbe_add_rar(dev, addr, 0, 0); } +int +rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf, + struct ether_addr *mac_addr) +{ + struct ixgbe_hw *hw; + struct ixgbe_vf_info *vfinfo; + int rar_entry; + uint8_t *new_mac = (uint8_t *)(mac_addr); + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + rar_entry = hw->mac.num_rar_entries - (vf + 1); + + if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) { + rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, + ETHER_ADDR_LEN); + return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, + IXGBE_RAH_AV); + } + return -EINVAL; +} + static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { @@ -3782,7 +4271,8 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; /* refuse mtu that requires the support of scattered packets when this - * feature has not been enabled before. */ + * feature has not been enabled before. + */ if (!dev->data->scattered_rx && (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) @@ -3842,7 +4332,7 @@ ixgbevf_intr_enable(struct ixgbe_hw *hw) static int ixgbevf_dev_configure(struct rte_eth_dev *dev) { - struct rte_eth_conf* conf = &dev->data->dev_conf; + struct rte_eth_conf *conf = &dev->data->dev_conf; struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)dev->data->dev_private; @@ -3881,7 +4371,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t intr_vector = 0; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int err, mask = 0; @@ -3904,10 +4395,10 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) } /* Set vfta */ - ixgbevf_set_vfta_all(dev,1); + ixgbevf_set_vfta_all(dev, 1); /* Set HW strip */ - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; ixgbevf_vlan_offload_set(dev, mask); @@ -3944,10 +4435,13 @@ static void ixgbevf_dev_stop(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; PMD_INIT_FUNC_TRACE(); + ixgbevf_intr_disable(hw); + hw->adapter_stopped = 1; ixgbe_stop_adapter(hw); @@ -3955,16 +4449,13 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) * Clear what we set, but we still keep shadow_vfta to * restore after device starts */ - ixgbevf_set_vfta_all(dev,0); + ixgbevf_set_vfta_all(dev, 0); /* Clear stored conf */ dev->data->scattered_rx = 0; ixgbe_dev_clear_queues(dev); - /* disable intr eventfd mapping */ - rte_intr_disable(intr_handle); - /* Clean datapath event and queue/vec mapping */ rte_intr_efd_disable(intr_handle); if (intr_handle->intr_vec != NULL) { @@ -3986,25 +4477,30 @@ ixgbevf_dev_close(struct rte_eth_dev *dev) ixgbe_dev_free_queues(dev); - /* reprogram the RAR[0] in case user changed it. */ - ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + /** + * Remove the VF MAC address ro ensure + * that the VF traffic goes to the PF + * after stop, close and detach of the VF + **/ + ixgbevf_remove_mac_addr(dev, 0); } static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); int i = 0, j = 0, vfta = 0, mask = 1; - for (i = 0; i < IXGBE_VFTA_SIZE; i++){ + for (i = 0; i < IXGBE_VFTA_SIZE; i++) { vfta = shadow_vfta->vfta[i]; - if(vfta){ + if (vfta) { mask = 1; - for (j = 0; j < 32; j++){ - if(vfta & mask) - ixgbe_set_vfta(hw, (i<<5)+j, 0, on); - mask<<=1; + for (j = 0; j < 32; j++) { + if (vfta & mask) + ixgbe_set_vfta(hw, (i<<5)+j, 0, + on, false); + mask <<= 1; } } } @@ -4016,7 +4512,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vid_idx = 0; uint32_t vid_bit = 0; @@ -4025,8 +4521,8 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) PMD_INIT_FUNC_TRACE(); /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ - ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on); - if(ret){ + ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); + if (ret) { PMD_INIT_LOG(ERR, "Unable to set VF vlan"); return ret; } @@ -4051,17 +4547,17 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) PMD_INIT_FUNC_TRACE(); - if(queue >= hw->mac.max_rx_queues) + if (queue >= hw->mac.max_rx_queues) return; ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); - if(on) + if (on) ctrl |= IXGBE_RXDCTL_VME; else ctrl &= ~IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); - ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on); + ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); } static void @@ -4073,23 +4569,23 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) int on = 0; /* VF function only support hw strip feature, others are not support */ - if(mask & ETH_VLAN_STRIP_MASK){ + if (mask & ETH_VLAN_STRIP_MASK) { on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip); - for(i=0; i < hw->mac.max_rx_queues; i++) - ixgbevf_vlan_strip_queue_set(dev,i,on); + for (i = 0; i < hw->mac.max_rx_queues; i++) + ixgbevf_vlan_strip_queue_set(dev, i, on); } } static int -ixgbe_vmdq_mode_check(struct ixgbe_hw *hw) +ixgbe_vt_check(struct ixgbe_hw *hw) { uint32_t reg_val; - /* we only need to do this if VMDq is enabled */ + /* if Virtualization Technology is enabled */ reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { - PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting"); + PMD_INIT_LOG(ERR, "VT must be enabled for this setting"); return -1; } @@ -4097,9 +4593,10 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw) } static uint32_t -ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr) +ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr) { uint32_t vector = 0; + switch (hw->mac.mc_filter_type) { case 0: /* use bits [47:36] of the address */ vector = ((uc_addr->addr_bytes[4] >> 4) | @@ -4127,8 +4624,8 @@ ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr) } static int -ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, - uint8_t on) +ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint8_t on) { uint32_t vector; uint32_t uta_idx; @@ -4149,12 +4646,12 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, if (hw->mac.type < ixgbe_mac_82599EB) return -ENOTSUP; - vector = ixgbe_uta_vector(hw,mac_addr); + vector = ixgbe_uta_vector(hw, mac_addr); uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; uta_shift = vector & ixgbe_uta_bit_mask; rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0); - if(rc == on) + if (rc == on) return 0; reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx)); @@ -4174,7 +4671,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); else - IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type); + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); return 0; } @@ -4192,7 +4689,7 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) if (hw->mac.type < ixgbe_mac_82599EB) return -ENOTSUP; - if(on) { + if (on) { for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) { uta_info->uta_shadow[i] = ~0; IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); @@ -4226,22 +4723,249 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) return new_val; } -static int -ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, - uint16_t rx_mask, uint8_t on) + +int +rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) +{ + struct ixgbe_hw *hw; + struct ixgbe_mac_info *mac; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mac = &hw->mac; + + mac->ops.set_vlan_anti_spoofing(hw, on, vf); + + return 0; +} + +int +rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) +{ + struct ixgbe_hw *hw; + struct ixgbe_mac_info *mac; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mac = &hw->mac; + mac->ops.set_mac_anti_spoofing(hw, on, vf); + + return 0; +} + +int +rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id) +{ + struct ixgbe_hw *hw; + uint32_t ctrl; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (vlan_id > 4095) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf)); + if (vlan_id) { + ctrl = vlan_id; + ctrl |= IXGBE_VMVIR_VLANA_DEFAULT; + } else { + ctrl = 0; + } + + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl); + + return 0; +} + +int +rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on) +{ + struct ixgbe_hw *hw; + uint32_t ctrl; + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + /* enable or disable VMDQ loopback */ + if (on) + ctrl |= IXGBE_PFDTXGSWC_VT_LBEN; + else + ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN; + + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl); + + return 0; +} + +int +rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on) +{ + struct ixgbe_hw *hw; + uint32_t reg_value; + int i; + int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT); + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + for (i = 0; i <= num_queues; i++) { + reg_value = IXGBE_QDE_WRITE | + (i << IXGBE_QDE_IDX_SHIFT) | + (on & IXGBE_QDE_ENABLE); + IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value); + } + + return 0; +} + +int +rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on) +{ + struct ixgbe_hw *hw; + uint32_t reg_value; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + + /* only support VF's 0 to 63 */ + if ((vf >= dev_info.max_vfs) || (vf > 63)) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf)); + if (on) + reg_value |= IXGBE_SRRCTL_DROP_EN; + else + reg_value &= ~IXGBE_SRRCTL_DROP_EN; + + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value); + + return 0; +} + +int +rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + uint16_t queues_per_pool; + uint32_t q; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); + + /* The PF has 128 queue pairs and in SRIOV configuration + * those queues will be assigned to VF's, so RXDCTL + * registers will be dealing with queues which will be + * assigned to VF's. + * Let's say we have SRIOV configured with 31 VF's then the + * first 124 queues 0-123 will be allocated to VF's and only + * the last 4 queues 123-127 will be assigned to the PF. + */ + + queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools; + + for (q = 0; q < queues_per_pool; q++) + (*dev->dev_ops->vlan_strip_queue_set)(dev, + q + vf * queues_per_pool, on); + return 0; +} + +int +rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mask, uint8_t on) { int val = 0; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct ixgbe_hw *hw; + uint32_t vmolr; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + + if (strstr(dev_info.driver_name, "ixgbe_vf")) + return -ENOTSUP; + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); if (hw->mac.type == ixgbe_mac_82598EB) { PMD_INIT_LOG(ERR, "setting VF receive mode set should be done" " on 82599 hardware and newer"); return -ENOTSUP; } - if (ixgbe_vmdq_mode_check(hw) < 0) + if (ixgbe_vt_check(hw) < 0) return -ENOTSUP; val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val); @@ -4251,84 +4975,228 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, else vmolr &= ~val; - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); return 0; } -static int -ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) +int +rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on) { - uint32_t reg,addr; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + uint32_t reg, addr; uint32_t val; const uint8_t bit1 = 0x1; + struct ixgbe_hw *hw; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + + if (strstr(dev_info.driver_name, "ixgbe_vf")) + return -ENOTSUP; + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (ixgbe_vmdq_mode_check(hw) < 0) + if (ixgbe_vt_check(hw) < 0) return -ENOTSUP; - addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2); + /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */ + if (vf >= 32) { + addr = IXGBE_VFRE(1); + val = bit1 << (vf - 32); + } else { + addr = IXGBE_VFRE(0); + val = bit1 << vf; + } + reg = IXGBE_READ_REG(hw, addr); - val = bit1 << pool; if (on) reg |= val; else reg &= ~val; - IXGBE_WRITE_REG(hw, addr,reg); + IXGBE_WRITE_REG(hw, addr, reg); return 0; } -static int -ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) +int +rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on) { - uint32_t reg,addr; + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + uint32_t reg, addr; uint32_t val; const uint8_t bit1 = 0x1; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw *hw; - if (ixgbe_vmdq_mode_check(hw) < 0) + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + + if (strstr(dev_info.driver_name, "ixgbe_vf")) + return -ENOTSUP; + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (ixgbe_vt_check(hw) < 0) return -ENOTSUP; - addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2); + /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */ + if (vf >= 32) { + addr = IXGBE_VFTE(1); + val = bit1 << (vf - 32); + } else { + addr = IXGBE_VFTE(0); + val = bit1 << vf; + } + reg = IXGBE_READ_REG(hw, addr); - val = bit1 << pool; if (on) reg |= val; else reg &= ~val; - IXGBE_WRITE_REG(hw, addr,reg); + IXGBE_WRITE_REG(hw, addr, reg); return 0; } -static int -ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, - uint64_t pool_mask, uint8_t vlan_on) +int +rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan, + uint64_t vf_mask, uint8_t vlan_on) { + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; int ret = 0; - uint16_t pool_idx; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint16_t vf_idx; + struct ixgbe_hw *hw; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + + if (strstr(dev_info.driver_name, "ixgbe_vf")) + return -ENOTSUP; + + if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0)) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; + + for (vf_idx = 0; vf_idx < 64; vf_idx++) { + if (vf_mask & ((uint64_t)(1ULL << vf_idx))) { + ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx, + vlan_on, false); + if (ret < 0) + return ret; + } + } + + return ret; +} + +int rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf, + uint16_t tx_rate, uint64_t q_msk) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct ixgbe_hw *hw; + struct ixgbe_vf_info *vfinfo; + struct rte_eth_link link; + uint8_t nb_q_per_pool; + uint32_t queue_stride; + uint32_t queue_idx, idx = 0, vf_idx; + uint32_t queue_end; + uint16_t total_rate = 0; + struct rte_pci_device *pci_dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + rte_eth_dev_info_get(port, &dev_info); + rte_eth_link_get_nowait(port, &link); + + if (strstr(dev_info.driver_name, "ixgbe_vf")) + return -ENOTSUP; + + if (vf >= dev_info.max_vfs) + return -EINVAL; + + if (tx_rate > link.link_speed) + return -EINVAL; + + if (q_msk == 0) + return 0; + + pci_dev = IXGBE_DEV_TO_PCI(dev); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; + queue_idx = vf * queue_stride; + queue_end = queue_idx + nb_q_per_pool - 1; + if (queue_end >= hw->mac.max_tx_queues) + return -EINVAL; + + if (vfinfo) { + for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) { + if (vf_idx == vf) + continue; + for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); + idx++) + total_rate += vfinfo[vf_idx].tx_rate[idx]; + } + } else { + return -EINVAL; + } + + /* Store tx_rate for this vf. */ + for (idx = 0; idx < nb_q_per_pool; idx++) { + if (((uint64_t)0x1 << idx) & q_msk) { + if (vfinfo[vf].tx_rate[idx] != tx_rate) + vfinfo[vf].tx_rate[idx] = tx_rate; + total_rate += tx_rate; + } + } - if (ixgbe_vmdq_mode_check(hw) < 0) - return -ENOTSUP; - for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) { - if (pool_mask & ((uint64_t)(1ULL << pool_idx))) - ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on); - if (ret < 0) - return ret; + if (total_rate > dev->data->dev_link.link_speed) { + /* Reset stored TX rate of the VF if it causes exceed + * link speed. + */ + memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); + return -EINVAL; } - return ret; + /* Set RTTBCNRC of each queue/pool for vf X */ + for (; queue_idx <= queue_end; queue_idx++) { + if (0x1 & q_msk) + ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); + q_msk = q_msk >> 1; + } + + return 0; } #define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */ @@ -4344,7 +5212,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on) { - uint32_t mr_ctl,vlvf; + uint32_t mr_ctl, vlvf; uint32_t mp_lsb = 0; uint32_t mv_msb = 0; uint32_t mv_lsb = 0; @@ -4357,7 +5225,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, const uint8_t vlan_mask_offset = 32; const uint8_t dst_pool_offset = 8; const uint8_t rule_mr_offset = 4; - const uint8_t mirror_rule_mask= 0x0F; + const uint8_t mirror_rule_mask = 0x0F; struct ixgbe_mirror_info *mr_info = (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); @@ -4365,7 +5233,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint8_t mirror_type = 0; - if (ixgbe_vmdq_mode_check(hw) < 0) + if (ixgbe_vt_check(hw) < 0) return -ENOTSUP; if (rule_id >= IXGBE_MAX_MIRROR_RULES) @@ -4380,12 +5248,13 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { mirror_type |= IXGBE_MRCTL_VLME; /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */ - for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) { + for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) { if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { /* search vlan id related pool vlan filter index */ reg_index = ixgbe_find_vlvf_slot(hw, - mirror_conf->vlan.vlan_id[i]); - if(reg_index < 0) + mirror_conf->vlan.vlan_id[i], + false); + if (reg_index < 0) return -EINVAL; vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index)); if ((vlvf & IXGBE_VLVF_VIEN) && @@ -4403,8 +5272,8 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, mr_info->mr_conf[rule_id].vlan.vlan_mask = mirror_conf->vlan.vlan_mask; - for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) { - if(mirror_conf->vlan.vlan_mask & (1ULL << i)) + for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) { + if (mirror_conf->vlan.vlan_mask & (1ULL << i)) mr_info->mr_conf[rule_id].vlan.vlan_id[i] = mirror_conf->vlan.vlan_id[i]; } @@ -4412,7 +5281,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, mv_lsb = 0; mv_msb = 0; mr_info->mr_conf[rule_id].vlan.vlan_mask = 0; - for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) + for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0; } } @@ -4485,7 +5354,7 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) struct ixgbe_mirror_info *mr_info = (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); - if (ixgbe_vmdq_mode_check(hw) < 0) + if (ixgbe_vt_check(hw) < 0) return -ENOTSUP; memset(&mr_info->mr_conf[rule_id], 0, @@ -4508,6 +5377,8 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t mask; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -4517,7 +5388,7 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) RTE_SET_USED(queue_id); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(intr_handle); return 0; } @@ -4540,6 +5411,8 @@ ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t mask; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -4559,7 +5432,7 @@ ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) mask &= (1 << (queue_id - 32)); IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); } - rte_intr_enable(&dev->pci_dev->intr_handle); + rte_intr_enable(intr_handle); return 0; } @@ -4663,12 +5536,16 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, static void ixgbevf_configure_msix(struct rte_eth_dev *dev) { - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t q_idx; uint32_t vector_idx = IXGBE_MISC_VEC_ID; + /* Configure VF other cause ivar */ + ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); + /* won't configure msix register if no mapping is done * between intr vector and event fd. */ @@ -4683,9 +5560,6 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); intr_handle->intr_vec[q_idx] = vector_idx; } - - /* Configure VF other cause ivar */ - ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); } /** @@ -4696,7 +5570,8 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) static void ixgbe_configure_msix(struct rte_eth_dev *dev) { - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t queue_id, base = IXGBE_MISC_VEC_ID; @@ -4811,61 +5686,6 @@ static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, return 0; } -static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, - uint16_t tx_rate, uint64_t q_msk) -{ - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vf_info *vfinfo = - *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); - uint8_t nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; - uint32_t queue_stride = - IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; - uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx; - uint32_t queue_end = queue_idx + nb_q_per_pool - 1; - uint16_t total_rate = 0; - - if (queue_end >= hw->mac.max_tx_queues) - return -EINVAL; - - if (vfinfo != NULL) { - for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) { - if (vf_idx == vf) - continue; - for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); - idx++) - total_rate += vfinfo[vf_idx].tx_rate[idx]; - } - } else - return -EINVAL; - - /* Store tx_rate for this vf. */ - for (idx = 0; idx < nb_q_per_pool; idx++) { - if (((uint64_t)0x1 << idx) & q_msk) { - if (vfinfo[vf].tx_rate[idx] != tx_rate) - vfinfo[vf].tx_rate[idx] = tx_rate; - total_rate += tx_rate; - } - } - - if (total_rate > dev->data->dev_link.link_speed) { - /* - * Reset stored TX rate of the VF if it causes exceed - * link speed. - */ - memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); - return -EINVAL; - } - - /* Set RTTBCNRC of each queue/pool for vf X */ - for (; queue_idx <= queue_end; queue_idx++) { - if (0x1 & q_msk) - ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate); - q_msk = q_msk >> 1; - } - - return 0; -} - static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, __attribute__((unused)) uint32_t index, @@ -4945,7 +5765,8 @@ ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) #define MAC_TYPE_FILTER_SUP(type) do {\ if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\ - (type) != ixgbe_mac_X550)\ + (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\ + (type) != ixgbe_mac_X550EM_a)\ return -ENOTSUP;\ } while (0) @@ -5174,7 +5995,8 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; /* refuse mtu that requires the support of scattered packets when this - * feature has not been enabled before. */ + * feature has not been enabled before. + */ if (!dev->data->scattered_rx && (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) @@ -5706,6 +6528,8 @@ ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) switch (hw->mac.type) { case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) @@ -5728,6 +6552,8 @@ ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) switch (hw->mac.type) { case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: /* RXSTMPL stores ns and RXSTMPH stores seconds. */ rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) @@ -5751,6 +6577,8 @@ ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) switch (hw->mac.type) { case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: /* TXSTMPL stores ns and TXSTMPH stores seconds. */ tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) @@ -5782,15 +6610,15 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev) rte_ixgbe_dev_atomic_read_link_status(dev, &link); switch (link.link_speed) { - case ETH_LINK_SPEED_100: + case ETH_SPEED_NUM_100M: incval = IXGBE_INCVAL_100; shift = IXGBE_INCVAL_SHIFT_100; break; - case ETH_LINK_SPEED_1000: + case ETH_SPEED_NUM_1G: incval = IXGBE_INCVAL_1GB; shift = IXGBE_INCVAL_SHIFT_1GB; break; - case ETH_LINK_SPEED_10000: + case ETH_SPEED_NUM_10G: default: incval = IXGBE_INCVAL_10GB; shift = IXGBE_INCVAL_SHIFT_10GB; @@ -5799,6 +6627,8 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev) switch (hw->mac.type) { case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: /* Independent of link speed. */ incval = 1; /* Cycles read will be interpreted as ns. */ @@ -6030,6 +6860,12 @@ ixgbe_get_regs(struct rte_eth_dev *dev, const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? ixgbe_regs_mac_82598EB : ixgbe_regs_others; + if (data == NULL) { + regs->length = ixgbe_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; + } + /* Support only full register dump */ if ((regs->length == 0) || (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { @@ -6054,6 +6890,12 @@ ixgbevf_get_regs(struct rte_eth_dev *dev, int count = 0; const struct reg_info *reg_group; + if (data == NULL) { + regs->length = ixgbevf_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; + } + /* Support only full register dump */ if ((regs->length == 0) || (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { @@ -6122,9 +6964,11 @@ ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { switch (mac_type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: return ETH_RSS_RETA_SIZE_512; case ixgbe_mac_X550_vf: case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: return ETH_RSS_RETA_SIZE_64; default: return ETH_RSS_RETA_SIZE_128; @@ -6136,12 +6980,14 @@ ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { switch (mac_type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: if (reta_idx < ETH_RSS_RETA_SIZE_128) return IXGBE_RETA(reta_idx >> 2); else return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2); case ixgbe_mac_X550_vf: case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: return IXGBE_VFRETA(reta_idx >> 2); default: return IXGBE_RETA(reta_idx >> 2); @@ -6153,6 +6999,7 @@ ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { switch (mac_type) { case ixgbe_mac_X550_vf: case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: return IXGBE_VFMRQC; default: return IXGBE_MRQC; @@ -6164,6 +7011,7 @@ ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { switch (mac_type) { case ixgbe_mac_X550_vf: case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: return IXGBE_VFRSSRK(i); default: return IXGBE_RSSRK(i); @@ -6266,7 +7114,8 @@ ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, uint32_t etag_etype; if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6310,7 +7159,8 @@ ixgbe_e_tag_enable(struct ixgbe_hw *hw) uint32_t etag_etype; if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6350,7 +7200,8 @@ ixgbe_e_tag_disable(struct ixgbe_hw *hw) uint32_t etag_etype; if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6393,7 +7244,8 @@ ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, uint32_t rar_low, rar_high; if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6428,7 +7280,8 @@ ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, uint32_t rar_low, rar_high; if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6547,7 +7400,8 @@ ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6607,20 +7461,22 @@ ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel, bool en) { + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); int ret = 0; uint32_t vmtir, vmvir; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (l2_tunnel->vf_id >= dev->pci_dev->max_vfs) { + if (l2_tunnel->vf_id >= pci_dev->max_vfs) { PMD_DRV_LOG(ERR, "VF id %u should be less than %u", l2_tunnel->vf_id, - dev->pci_dev->max_vfs); + pci_dev->max_vfs); return -EINVAL; } if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6690,7 +7546,8 @@ ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev, struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6863,7 +7720,8 @@ ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6899,7 +7757,8 @@ ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x) { + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { return -ENOTSUP; } @@ -6924,15 +7783,410 @@ ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, return ret; } -static struct rte_driver rte_ixgbe_driver = { - .type = PMD_PDEV, - .init = rte_ixgbe_pmd_init, -}; +static void +ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); -static struct rte_driver rte_ixgbevf_driver = { - .type = PMD_PDEV, - .init = rte_ixgbevf_pmd_init, -}; + hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI); +} + +static void +ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE); +} + +static void ixgbevf_mbx_process(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u32 in_msg = 0; + + if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) + return; + + /* PF reset VF event */ + if (in_msg == IXGBE_PF_CONTROL_MSG) + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL); +} + +static int +ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) +{ + uint32_t eicr; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + ixgbevf_intr_disable(hw); + + /* read-on-clear nic registers here */ + eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); + intr->flags = 0; + + /* only one misc vector supported - mailbox */ + eicr &= IXGBE_VTEICR_MASK; + if (eicr == IXGBE_MISC_VEC_ID) + intr->flags |= IXGBE_FLAG_MAILBOX; + + return 0; +} + +static int +ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + if (intr->flags & IXGBE_FLAG_MAILBOX) { + ixgbevf_mbx_process(dev); + intr->flags &= ~IXGBE_FLAG_MAILBOX; + } + + ixgbevf_intr_enable(hw); + + return 0; +} + +static void +ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, + void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + ixgbevf_dev_interrupt_get_status(dev); + ixgbevf_dev_interrupt_action(dev); +} + +/** + * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path + * @hw: pointer to hardware structure + * + * Stops the transmit data path and waits for the HW to internally empty + * the Tx security block + **/ +int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw) +{ +#define IXGBE_MAX_SECTX_POLL 40 + + int i; + int sectxreg; + + sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + sectxreg |= IXGBE_SECTXCTRL_TX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); + for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) { + sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); + if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY) + break; + /* Use interrupt-safe sleep just in case */ + usec_delay(1000); + } + + /* For informational purposes only */ + if (i >= IXGBE_MAX_SECTX_POLL) + PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security " + "path fully disabled. Continuing with init.\n"); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path + * @hw: pointer to hardware structure + * + * Enables the transmit data path. + **/ +int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw) +{ + uint32_t sectxreg; + + sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + +int +rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Stop the data paths */ + if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS) + return -ENOTSUP; + /* + * Workaround: + * As no ixgbe_disable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. + * The hardware support has been checked by + * ixgbe_disable_sec_rx_path(). + */ + ixgbe_disable_sec_tx_path_generic(hw); + + /* Enable Ethernet CRC (required by MACsec offload) */ + ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0); + ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl); + + /* Enable the TX and RX crypto engines */ + ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK; + ctrl |= 0x3; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl); + + /* Enable SA lookup */ + ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); + ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; + ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT : + IXGBE_LSECTXCTRL_AUTH; + ctrl |= IXGBE_LSECTXCTRL_AISCI; + ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK; + ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK; + IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); + ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; + ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT; + ctrl &= ~IXGBE_LSECRXCTRL_PLSH; + if (rp) + ctrl |= IXGBE_LSECRXCTRL_RP; + else + ctrl &= ~IXGBE_LSECRXCTRL_RP; + IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); + + /* Start the data paths */ + ixgbe_enable_sec_rx_path(hw); + /* + * Workaround: + * As no ixgbe_enable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. + */ + ixgbe_enable_sec_tx_path_generic(hw); + + return 0; +} + +int +rte_pmd_ixgbe_macsec_disable(uint8_t port) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Stop the data paths */ + if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS) + return -ENOTSUP; + /* + * Workaround: + * As no ixgbe_disable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. + * The hardware support has been checked by + * ixgbe_disable_sec_rx_path(). + */ + ixgbe_disable_sec_tx_path_generic(hw); + + /* Disable the TX and RX crypto engines */ + ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + ctrl |= IXGBE_SECTXCTRL_SECTX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + ctrl |= IXGBE_SECRXCTRL_SECRX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); + + /* Disable SA lookup */ + ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); + ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; + ctrl |= IXGBE_LSECTXCTRL_DISABLE; + IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); + ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; + ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); + + /* Start the data paths */ + ixgbe_enable_sec_rx_path(hw); + /* + * Workaround: + * As no ixgbe_enable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. + */ + ixgbe_enable_sec_tx_path_generic(hw); + + return 0; +} + +int +rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl); + + ctrl = mac[4] | (mac[5] << 8); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl); + + return 0; +} + +int +rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl); + + pi = rte_cpu_to_be_16(pi); + ctrl = mac[4] | (mac[5] << 8) | (pi << 16); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl); + + return 0; +} + +int +rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl, i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (idx != 0 && idx != 1) + return -EINVAL; + + if (an >= 4) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Set the PN and key */ + pn = rte_cpu_to_be_32(pn); + if (idx == 0) { + IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn); + + for (i = 0; i < 4; i++) { + ctrl = (key[i * 4 + 0] << 0) | + (key[i * 4 + 1] << 8) | + (key[i * 4 + 2] << 16) | + (key[i * 4 + 3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl); + } + } else { + IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn); + + for (i = 0; i < 4; i++) { + ctrl = (key[i * 4 + 0] << 0) | + (key[i * 4 + 1] << 8) | + (key[i * 4 + 2] << 16) | + (key[i * 4 + 3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl); + } + } + + /* Set AN and select the SA */ + ctrl = (an << idx * 2) | (idx << 4); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl); + + return 0; +} + +int +rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key) +{ + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl, i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (idx != 0 && idx != 1) + return -EINVAL; + + if (an >= 4) + return -EINVAL; + + /* Set the PN */ + pn = rte_cpu_to_be_32(pn); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn); + + /* Set the key */ + for (i = 0; i < 4; i++) { + ctrl = (key[i * 4 + 0] << 0) | + (key[i * 4 + 1] << 8) | + (key[i * 4 + 2] << 16) | + (key[i * 4 + 3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl); + } + + /* Set the AN and validate the SA */ + ctrl = an | (1 << 2); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl); + + return 0; +} -PMD_REGISTER_DRIVER(rte_ixgbe_driver); -PMD_REGISTER_DRIVER(rte_ixgbevf_driver); +RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv); +RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio"); +RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv); +RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio");