X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_ethdev.c;h=4fd22c5ee39e45815a2620c9282c13631d3d86b3;hb=adce1f86f8d25fc10e9ac32fd59fa0bedce608ad;hp=3f1ebc157ee1af829ac27aae478b22a3e47d2ea3;hpb=82113036e4e54a82ec485a05acce9a43ceb60552;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 3f1ebc157e..4fd22c5ee3 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -60,6 +60,7 @@ #include #include #include +#include #include "ixgbe_logs.h" #include "base/ixgbe_api.h" @@ -72,6 +73,8 @@ #include "base/ixgbe_phy.h" #include "ixgbe_regs.h" +#include "rte_pmd_ixgbe.h" + /* * High threshold controlling when to start sending XOFF frames. Must be at * least 8 bytes less than receive packet buffer size. This value is in units @@ -150,15 +153,18 @@ #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ #define IXGBE_QDE_STRIP_TAG 0x00000004 +#define IXGBE_VTEICR_MASK 0x07 -enum ixgbevf_xcast_modes { - IXGBEVF_XCAST_MODE_NONE = 0, - IXGBEVF_XCAST_MODE_MULTI, - IXGBEVF_XCAST_MODE_ALLMULTI, -}; +#define IXGBE_EXVET_VET_EXT_SHIFT 16 +#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev); static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); +static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev); +static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev); +static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev); +static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev); +static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev); static int ixgbe_dev_configure(struct rte_eth_dev *dev); static int ixgbe_dev_start(struct rte_eth_dev *dev); static void ixgbe_dev_stop(struct rte_eth_dev *dev); @@ -174,15 +180,21 @@ static int ixgbe_dev_link_update(struct rte_eth_dev *dev, static void ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n); + struct rte_eth_xstat *xstats, unsigned n); static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n); + struct rte_eth_xstat *xstats, unsigned n); static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev); static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); +static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit); +static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit); static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx); +static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, + size_t fw_size); static void ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev); @@ -221,9 +233,11 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, uint16_t reta_size); static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev); +static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); -static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); +static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, + struct rte_intr_handle *handle); static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, void *param); static void ixgbe_dev_interrupt_delayed_handler(void *param); @@ -232,7 +246,9 @@ static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); -static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config); +static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); +static bool is_device_supported(struct rte_eth_dev *dev, + struct eth_driver *drv); /* For Virtual Function support */ static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); @@ -264,14 +280,8 @@ static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); /* For Eth VMDQ APIs support */ static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct - ether_addr* mac_addr,uint8_t on); -static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on); -static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, - uint16_t rx_mask, uint8_t on); -static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); -static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); -static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, - uint64_t pool_mask,uint8_t vlan_on); + ether_addr * mac_addr, uint8_t on); +static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on); @@ -287,8 +297,6 @@ static void ixgbe_configure_msix(struct rte_eth_dev *dev); static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t tx_rate); -static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, - uint16_t tx_rate, uint64_t q_msk); static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, @@ -296,9 +304,6 @@ static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev, static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index); static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); -static int ixgbe_syn_filter_set(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter, - bool add); static int ixgbe_syn_filter_get(struct rte_eth_dev *dev, struct rte_eth_syn_filter *filter); static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev, @@ -308,17 +313,11 @@ static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, struct ixgbe_5tuple_filter *filter); static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, struct ixgbe_5tuple_filter *filter); -static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *filter, - bool add); static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, struct rte_eth_ntuple_filter *filter); -static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter, - bool add); static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op, void *arg); @@ -361,6 +360,8 @@ static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *timestamp); static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *timestamp); +static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle, + void *param); static int ixgbe_dev_l2_tunnel_eth_type_conf (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel); @@ -377,6 +378,8 @@ static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel); static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, struct rte_eth_udp_tunnel *udp_tunnel); +static int ixgbe_filter_restore(struct rte_eth_dev *dev); +static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev); /* * Define VF Stats MACRO for Non "cleared on read" register @@ -397,21 +400,21 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, last = latest; \ } -#define IXGBE_SET_HWSTRIP(h, q) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_SET_HWSTRIP(h, q) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (h)->bitmap[idx] |= 1 << bit;\ } while (0) -#define IXGBE_CLEAR_HWSTRIP(h, q) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_CLEAR_HWSTRIP(h, q) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (h)->bitmap[idx] &= ~(1 << bit);\ } while (0) -#define IXGBE_GET_HWSTRIP(h, q, r) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_GET_HWSTRIP(h, q, r) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (r) = (h)->bitmap[idx] >> bit & 1;\ } while (0) @@ -419,23 +422,80 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, * The set of PCI devices this driver supports */ static const struct rte_pci_id pci_id_ixgbe_map[] = { - -#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" - -{ .vendor_id = 0, /* sentinel */ }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) }, +#ifdef RTE_NIC_BYPASS + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) }, +#endif + { .vendor_id = 0, /* sentinel */ }, }; - /* * The set of PCI devices this driver supports (for 82599 VF) */ static const struct rte_pci_id pci_id_ixgbevf_map[] = { - -#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" -{ .vendor_id = 0, /* sentinel */ }, - + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) }, + { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) }, + { .vendor_id = 0, /* sentinel */ }, }; static const struct rte_eth_desc_lim rx_desc_lim = { @@ -448,6 +508,8 @@ static const struct rte_eth_desc_lim tx_desc_lim = { .nb_max = IXGBE_MAX_RING_DESC, .nb_min = IXGBE_MIN_RING_DESC, .nb_align = IXGBE_TXD_ALIGN, + .nb_seg_max = IXGBE_TX_MAX_SEG, + .nb_mtu_seg_max = IXGBE_TX_MAX_SEG, }; static const struct eth_dev_ops ixgbe_eth_dev_ops = { @@ -466,7 +528,9 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .xstats_get = ixgbe_dev_xstats_get, .stats_reset = ixgbe_dev_stats_reset, .xstats_reset = ixgbe_dev_xstats_reset, + .xstats_get_names = ixgbe_dev_xstats_get_names, .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, + .fw_version_get = ixgbe_fw_version_get, .dev_infos_get = ixgbe_dev_info_get, .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, .mtu_set = ixgbe_dev_mtu_set, @@ -484,6 +548,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .rx_queue_release = ixgbe_dev_rx_queue_release, .rx_queue_count = ixgbe_dev_rx_queue_count, .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, + .rx_descriptor_status = ixgbe_dev_rx_descriptor_status, + .tx_descriptor_status = ixgbe_dev_tx_descriptor_status, .tx_queue_setup = ixgbe_dev_tx_queue_setup, .tx_queue_release = ixgbe_dev_tx_queue_release, .dev_led_on = ixgbe_dev_led_on, @@ -498,12 +564,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .uc_all_hash_table_set = ixgbe_uc_all_hash_table_set, .mirror_rule_set = ixgbe_mirror_rule_set, .mirror_rule_reset = ixgbe_mirror_rule_reset, - .set_vf_rx_mode = ixgbe_set_pool_rx_mode, - .set_vf_rx = ixgbe_set_pool_rx, - .set_vf_tx = ixgbe_set_pool_tx, - .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter, .set_queue_rate_limit = ixgbe_set_queue_rate_limit, - .set_vf_rate_limit = ixgbe_set_vf_rate_limit, .reta_update = ixgbe_dev_rss_reta_update, .reta_query = ixgbe_dev_rss_reta_query, #ifdef RTE_NIC_BYPASS @@ -527,7 +588,6 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .timesync_disable = ixgbe_timesync_disable, .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, - .get_reg_length = ixgbe_get_reg_length, .get_reg = ixgbe_get_regs, .get_eeprom_length = ixgbe_get_eeprom_length, .get_eeprom = ixgbe_get_eeprom, @@ -555,6 +615,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .xstats_get = ixgbevf_dev_xstats_get, .stats_reset = ixgbevf_dev_stats_reset, .xstats_reset = ixgbevf_dev_stats_reset, + .xstats_get_names = ixgbevf_dev_xstats_get_names, .dev_close = ixgbevf_dev_close, .allmulticast_enable = ixgbevf_dev_allmulticast_enable, .allmulticast_disable = ixgbevf_dev_allmulticast_disable, @@ -567,6 +628,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .rx_queue_setup = ixgbe_dev_rx_queue_setup, .rx_queue_release = ixgbe_dev_rx_queue_release, .rx_descriptor_done = ixgbe_dev_rx_descriptor_done, + .rx_descriptor_status = ixgbe_dev_rx_descriptor_status, + .tx_descriptor_status = ixgbe_dev_tx_descriptor_status, .tx_queue_setup = ixgbe_dev_tx_queue_setup, .tx_queue_release = ixgbe_dev_tx_queue_release, .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable, @@ -577,7 +640,6 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .rxq_info_get = ixgbe_rxq_info_get, .txq_info_get = ixgbe_txq_info_get, .mac_addr_set = ixgbevf_set_default_mac_addr, - .get_reg_length = ixgbevf_get_reg_length, .get_reg = ixgbevf_get_regs, .reta_update = ixgbe_dev_rss_reta_update, .reta_query = ixgbe_dev_rss_reta_query, @@ -675,6 +737,51 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = { #define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \ sizeof(rte_ixgbe_stats_strings[0])) +/* MACsec statistics */ +static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = { + {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats, + out_pkts_untagged)}, + {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats, + out_pkts_encrypted)}, + {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats, + out_pkts_protected)}, + {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats, + out_octets_encrypted)}, + {"out_octets_protected", offsetof(struct ixgbe_macsec_stats, + out_octets_protected)}, + {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats, + in_pkts_untagged)}, + {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats, + in_pkts_badtag)}, + {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats, + in_pkts_nosci)}, + {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats, + in_pkts_unknownsci)}, + {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats, + in_octets_decrypted)}, + {"in_octets_validated", offsetof(struct ixgbe_macsec_stats, + in_octets_validated)}, + {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats, + in_pkts_unchecked)}, + {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats, + in_pkts_delayed)}, + {"in_pkts_late", offsetof(struct ixgbe_macsec_stats, + in_pkts_late)}, + {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats, + in_pkts_ok)}, + {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats, + in_pkts_invalid)}, + {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats, + in_pkts_notvalid)}, + {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats, + in_pkts_unusedsa)}, + {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats, + in_pkts_notusingsa)}, +}; + +#define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \ + sizeof(rte_ixgbe_macsec_strings[0])) + /* Per-queue statistics */ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)}, @@ -685,6 +792,7 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ sizeof(rte_ixgbe_rxq_strings[0])) +#define IXGBE_NB_RXQ_PRIO_VALUES 8 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, @@ -695,6 +803,7 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ sizeof(rte_ixgbe_txq_strings[0])) +#define IXGBE_NB_TXQ_PRIO_VALUES 8 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, @@ -788,6 +897,8 @@ ixgbe_pf_reset_hw(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); IXGBE_WRITE_FLUSH(hw); + if (status == IXGBE_ERR_SFP_NOT_PRESENT) + status = IXGBE_SUCCESS; return status; } @@ -901,8 +1012,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", stat_mappings->rqsmr[n], n); IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); - } - else { + } else { PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", stat_mappings->tqsm[n], n); IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); @@ -911,7 +1021,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, } static void -ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev) +ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) { struct ixgbe_stat_mapping_registers *stat_mappings = IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); @@ -929,7 +1039,7 @@ ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev) } static void -ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) +ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { uint8_t i; struct ixgbe_dcb_tc_config *tc; @@ -952,7 +1062,7 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) tc = &dcb_config->tc_config[0]; tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; - for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) { + for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; } @@ -1013,10 +1123,11 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw) static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); @@ -1033,32 +1144,34 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) eth_dev->dev_ops = &ixgbe_eth_dev_ops; eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; + eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts; /* * For secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different * RX and TX function. */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { struct ixgbe_tx_queue *txq; /* TX queue function in primary, set by last queue initialized - * Tx queue may not initialized by primary process */ + * Tx queue may not initialized by primary process + */ if (eth_dev->data->tx_queues) { txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; ixgbe_set_tx_function(eth_dev, txq); } else { /* Use default TX function if we get here */ PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " - "Using default TX function."); + "Using default TX function."); } ixgbe_set_rx_function(eth_dev); return 0; } - pci_dev = eth_dev->pci_dev; rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; /* Vendor and Device ID need to be set before init of shared code */ hw->device_id = pci_dev->id.device_id; @@ -1086,7 +1199,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* Initialize DCB configuration*/ memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); - ixgbe_dcb_init(hw,dcb_config); + ixgbe_dcb_init(hw, dcb_config); /* Get Hardware Flow Control setting */ hw->fc.requested_mode = ixgbe_fc_full; hw->fc.current_mode = ixgbe_fc_full; @@ -1125,13 +1238,16 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) diag = ixgbe_init_hw(hw); } + if (diag == IXGBE_ERR_SFP_NOT_PRESENT) + diag = IXGBE_SUCCESS; + if (diag == IXGBE_ERR_EEPROM_VERSION) { PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" - "LOM. Please be aware there may be issues associated " - "with your hardware."); + "LOM. Please be aware there may be issues associated " + "with your hardware."); PMD_INIT_LOG(ERR, "If you are experiencing problems " - "please contact your Intel or hardware representative " - "who provided you with this hardware."); + "please contact your Intel or hardware representative " + "who provided you with this hardware."); } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); if (diag) { @@ -1150,12 +1266,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* Allocate memory for storing MAC addresses */ eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * - hw->mac.num_rar_entries, 0); + hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, - "Failed to allocate %u bytes needed to store " - "MAC addresses", - ETHER_ADDR_LEN * hw->mac.num_rar_entries); + "Failed to allocate %u bytes needed to store " + "MAC addresses", + ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } /* Copy the permanent MAC address */ @@ -1164,11 +1280,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* Allocate memory for storing hash filter MAC addresses */ eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * - IXGBE_VMDQ_NUM_UC_MAC, 0); + IXGBE_VMDQ_NUM_UC_MAC, 0); if (eth_dev->data->hash_mac_addrs == NULL) { PMD_INIT_LOG(ERR, - "Failed to allocate %d bytes needed to store MAC addresses", - ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); + "Failed to allocate %d bytes needed to store MAC addresses", + ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); return -ENOMEM; } @@ -1198,23 +1314,37 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) (int) hw->mac.type, (int) hw->phy.type); PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", - eth_dev->data->port_id, pci_dev->id.vendor_id, - pci_dev->id.device_id); + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); - rte_intr_callback_register(&pci_dev->intr_handle, - ixgbe_dev_interrupt_handler, - (void *)eth_dev); + rte_intr_callback_register(intr_handle, + ixgbe_dev_interrupt_handler, eth_dev); /* enable uio/vfio intr/eventfd mapping */ - rte_intr_enable(&pci_dev->intr_handle); + rte_intr_enable(intr_handle); /* enable support intr */ ixgbe_enable_intr(eth_dev); + /* initialize filter info */ + memset(filter_info, 0, + sizeof(struct ixgbe_filter_info)); + /* initialize 5tuple filter list */ TAILQ_INIT(&filter_info->fivetuple_list); - memset(filter_info->fivetuple_mask, 0, - sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); + + /* initialize flow director filter list & hash */ + ixgbe_fdir_filter_init(eth_dev); + + /* initialize l2 tunnel filter list & hash */ + ixgbe_l2_tn_filter_init(eth_dev); + + TAILQ_INIT(&filter_ntuple_list); + TAILQ_INIT(&filter_ethertype_list); + TAILQ_INIT(&filter_syn_list); + TAILQ_INIT(&filter_fdir_list); + TAILQ_INIT(&filter_l2_tunnel_list); + TAILQ_INIT(&ixgbe_flow_list); return 0; } @@ -1222,7 +1352,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw; PMD_INIT_FUNC_TRACE(); @@ -1231,7 +1362,6 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) return -EPERM; hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - pci_dev = eth_dev->pci_dev; if (hw->adapter_stopped == 0) ixgbe_dev_close(eth_dev); @@ -1244,9 +1374,9 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) ixgbe_swfw_lock_reset(hw); /* disable uio intr before callback unregister */ - rte_intr_disable(&(pci_dev->intr_handle)); - rte_intr_callback_unregister(&(pci_dev->intr_handle), - ixgbe_dev_interrupt_handler, (void *)eth_dev); + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + ixgbe_dev_interrupt_handler, eth_dev); /* uninitialize PF if max_vfs not zero */ ixgbe_pf_host_uninit(eth_dev); @@ -1257,9 +1387,154 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) rte_free(eth_dev->data->hash_mac_addrs); eth_dev->data->hash_mac_addrs = NULL; + /* remove all the fdir filters & hash */ + ixgbe_fdir_filter_uninit(eth_dev); + + /* remove all the L2 tunnel filters & hash */ + ixgbe_l2_tn_filter_uninit(eth_dev); + + /* Remove all ntuple filters of the device */ + ixgbe_ntuple_filter_uninit(eth_dev); + + /* clear all the filters list */ + ixgbe_filterlist_flush(); + + return 0; +} + +static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private); + struct ixgbe_5tuple_filter *p_5tuple; + + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) { + TAILQ_REMOVE(&filter_info->fivetuple_list, + p_5tuple, + entries); + rte_free(p_5tuple); + } + memset(filter_info->fivetuple_mask, 0, + sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); + + return 0; +} + +static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_hw_fdir_info *fdir_info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); + struct ixgbe_fdir_filter *fdir_filter; + + if (fdir_info->hash_map) + rte_free(fdir_info->hash_map); + if (fdir_info->hash_handle) + rte_hash_free(fdir_info->hash_handle); + + while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) { + TAILQ_REMOVE(&fdir_info->fdir_list, + fdir_filter, + entries); + rte_free(fdir_filter); + } + + return 0; +} + +static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); + struct ixgbe_l2_tn_filter *l2_tn_filter; + + if (l2_tn_info->hash_map) + rte_free(l2_tn_info->hash_map); + if (l2_tn_info->hash_handle) + rte_hash_free(l2_tn_info->hash_handle); + + while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { + TAILQ_REMOVE(&l2_tn_info->l2_tn_list, + l2_tn_filter, + entries); + rte_free(l2_tn_filter); + } + + return 0; +} + +static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_hw_fdir_info *fdir_info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private); + char fdir_hash_name[RTE_HASH_NAMESIZE]; + struct rte_hash_parameters fdir_hash_params = { + .name = fdir_hash_name, + .entries = IXGBE_MAX_FDIR_FILTER_NUM, + .key_len = sizeof(union ixgbe_atr_input), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; + + TAILQ_INIT(&fdir_info->fdir_list); + snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, + "fdir_%s", eth_dev->data->name); + fdir_info->hash_handle = rte_hash_create(&fdir_hash_params); + if (!fdir_info->hash_handle) { + PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); + return -EINVAL; + } + fdir_info->hash_map = rte_zmalloc("ixgbe", + sizeof(struct ixgbe_fdir_filter *) * + IXGBE_MAX_FDIR_FILTER_NUM, + 0); + if (!fdir_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir hash map!"); + return -ENOMEM; + } + fdir_info->mask_added = FALSE; + return 0; } +static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private); + char l2_tn_hash_name[RTE_HASH_NAMESIZE]; + struct rte_hash_parameters l2_tn_hash_params = { + .name = l2_tn_hash_name, + .entries = IXGBE_MAX_L2_TN_FILTER_NUM, + .key_len = sizeof(struct ixgbe_l2_tn_key), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), + }; + + TAILQ_INIT(&l2_tn_info->l2_tn_list); + snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE, + "l2_tn_%s", eth_dev->data->name); + l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params); + if (!l2_tn_info->hash_handle) { + PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!"); + return -EINVAL; + } + l2_tn_info->hash_map = rte_zmalloc("ixgbe", + sizeof(struct ixgbe_l2_tn_filter *) * + IXGBE_MAX_L2_TN_FILTER_NUM, + 0); + if (!l2_tn_info->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for L2 TN hash map!"); + return -ENOMEM; + } + l2_tn_info->e_tag_en = FALSE; + l2_tn_info->e_tag_fwd_en = FALSE; + l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE; + + return 0; +} /* * Negotiate mailbox API version with the PF. * After reset API version is always set to the basic one (ixgbe_mbox_api_10). @@ -1310,10 +1585,11 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) { int diag; uint32_t tc, tcs; - struct rte_pci_device *pci_dev; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); @@ -1327,8 +1603,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) /* for secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different - * RX function */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + * RX function + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { struct ixgbe_tx_queue *txq; /* TX queue function in primary, set by last queue initialized * Tx queue may not initialized by primary process @@ -1339,7 +1616,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) } else { /* Use default TX function if we get here */ PMD_INIT_LOG(NOTICE, - "No TX queues configured yet. Using default TX function."); + "No TX queues configured yet. Using default TX function."); } ixgbe_set_rx_function(eth_dev); @@ -1347,9 +1624,8 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) return 0; } - pci_dev = eth_dev->pci_dev; - rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; @@ -1398,12 +1674,12 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) /* Allocate memory for storing MAC addresses */ eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * - hw->mac.num_rar_entries, 0); + hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, - "Failed to allocate %u bytes needed to store " - "MAC addresses", - ETHER_ADDR_LEN * hw->mac.num_rar_entries); + "Failed to allocate %u bytes needed to store " + "MAC addresses", + ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } @@ -1433,14 +1709,19 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) /* reset the hardware with the new settings */ diag = hw->mac.ops.start_hw(hw); switch (diag) { - case 0: - break; + case 0: + break; - default: - PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); - return -EIO; + default: + PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); + return -EIO; } + rte_intr_callback_register(intr_handle, + ixgbevf_dev_interrupt_handler, eth_dev); + rte_intr_enable(intr_handle); + ixgbevf_intr_enable(hw); + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id, "ixgbe_mac_82599_vf"); @@ -1453,6 +1734,8 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) { + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw; PMD_INIT_FUNC_TRACE(); @@ -1475,15 +1758,19 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; + rte_intr_disable(intr_handle); + rte_intr_callback_unregister(intr_handle, + ixgbevf_dev_interrupt_handler, eth_dev); + return 0; } static struct eth_driver rte_ixgbe_pmd = { .pci_drv = { - .name = "rte_ixgbe_pmd", .id_table = pci_id_ixgbe_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | - RTE_PCI_DRV_DETACHABLE, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, }, .eth_dev_init = eth_ixgbe_dev_init, .eth_dev_uninit = eth_ixgbe_dev_uninit, @@ -1495,49 +1782,22 @@ static struct eth_driver rte_ixgbe_pmd = { */ static struct eth_driver rte_ixgbevf_pmd = { .pci_drv = { - .name = "rte_ixgbevf_pmd", .id_table = pci_id_ixgbevf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, }, .eth_dev_init = eth_ixgbevf_dev_init, .eth_dev_uninit = eth_ixgbevf_dev_uninit, .dev_private_size = sizeof(struct ixgbe_adapter), }; -/* - * Driver initialization routine. - * Invoked once at EAL init time. - * Register itself as the [Poll Mode] Driver of PCI IXGBE devices. - */ -static int -rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); - - rte_eth_driver_register(&rte_ixgbe_pmd); - return 0; -} - -/* - * VF Driver initialization routine. - * Invoked one at EAL init time. - * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices. - */ -static int -rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); - - rte_eth_driver_register(&rte_ixgbevf_pmd); - return 0; -} - static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vfta; uint32_t vid_idx; @@ -1575,15 +1835,47 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); int ret = 0; + uint32_t reg; + uint32_t qinq; + + qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + qinq &= IXGBE_DMATXCTL_GDV; switch (vlan_type) { case ETH_VLAN_TYPE_INNER: - /* Only the high 16-bits is valid */ - IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16); + if (qinq) { + reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) + | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + } else { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Inner type is not supported" + " by single VLAN"); + } + break; + case ETH_VLAN_TYPE_OUTER: + if (qinq) { + /* Only the high 16-bits is valid */ + IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << + IXGBE_EXVET_VET_EXT_SHIFT); + } else { + reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) + | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + } + break; default: ret = -EINVAL; - PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type); + PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); break; } @@ -1611,7 +1903,7 @@ ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vlnctrl; uint16_t i; @@ -1635,6 +1927,7 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) { struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); + struct ixgbe_rx_queue *rxq; if (queue >= IXGBE_MAX_RX_QUEUE_NUM) return; @@ -1643,6 +1936,16 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) IXGBE_SET_HWSTRIP(hwstrip, queue); else IXGBE_CLEAR_HWSTRIP(hwstrip, queue); + + if (queue >= dev->data->nb_rx_queues) + return; + + rxq = dev->data->rx_queues[queue]; + + if (on) + rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; + else + rxq->vlan_flags = PKT_RX_VLAN_PKT; } static void @@ -1659,12 +1962,12 @@ ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); return; } - else { - /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); - ctrl &= ~IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); - } + + /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); + ctrl &= ~IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); + /* record those setting for HW strip per queue */ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); } @@ -1683,12 +1986,12 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); return; } - else { - /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); - ctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); - } + + /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); + ctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); + /* record those setting for HW strip per queue */ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); } @@ -1700,6 +2003,7 @@ ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t ctrl; uint16_t i; + struct ixgbe_rx_queue *rxq; PMD_INIT_FUNC_TRACE(); @@ -1707,13 +2011,13 @@ ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev) ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ctrl &= ~IXGBE_VLNCTRL_VME; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); - } - else { + } else { /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ for (i = 0; i < dev->data->nb_rx_queues; i++) { - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + rxq = dev->data->rx_queues[i]; + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); ctrl &= ~IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl); + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); /* record those setting for HW strip per queue */ ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0); @@ -1728,6 +2032,7 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t ctrl; uint16_t i; + struct ixgbe_rx_queue *rxq; PMD_INIT_FUNC_TRACE(); @@ -1735,13 +2040,13 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev) ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ctrl |= IXGBE_VLNCTRL_VME; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); - } - else { + } else { /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ for (i = 0; i < dev->data->nb_rx_queues; i++) { - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + rxq = dev->data->rx_queues[i]; + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); ctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl); + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl); /* record those setting for HW strip per queue */ ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1); @@ -1836,6 +2141,7 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); } @@ -1843,6 +2149,8 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) static int ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) { + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + switch (nb_rx_q) { case 1: case 2: @@ -1856,7 +2164,7 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) } RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q; - RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q; + RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q; return 0; } @@ -1873,6 +2181,8 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) /* check multi-queue mode */ switch (dev_conf->rxmode.mq_mode) { case ETH_MQ_RX_VMDQ_DCB: + PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); + break; case ETH_MQ_RX_VMDQ_DCB_RSS: /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ PMD_INIT_LOG(ERR, "SRIOV active," @@ -1908,11 +2218,9 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) switch (dev_conf->txmode.mq_mode) { case ETH_MQ_TX_VMDQ_DCB: - /* DCB VMDQ in SRIOV mode, not implement yet */ - PMD_INIT_LOG(ERR, "SRIOV is active," - " unsupported VMDQ mq_mode tx %d.", - dev_conf->txmode.mq_mode); - return -EINVAL; + PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); + dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; + break; default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY; break; @@ -2087,7 +2395,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t intr_vector = 0; int err, link_up = 0, negotiate = 0; uint32_t speed = 0; @@ -2116,7 +2425,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev) ixgbe_stop_adapter(hw); /* reinitialize adapter - * this calls reset and start */ + * this calls reset and start + */ status = ixgbe_pf_reset_hw(hw); if (status != 0) return -1; @@ -2148,7 +2458,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev) dev->data->nb_rx_queues * sizeof(int), 0); if (intr_handle->intr_vec == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" - " intr_vec\n", dev->data->nb_rx_queues); + " intr_vec", dev->data->nb_rx_queues); return -ENOMEM; } } @@ -2166,6 +2476,37 @@ ixgbe_dev_start(struct rte_eth_dev *dev) goto error; } + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK; + ixgbe_vlan_offload_set(dev, mask); + + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { + /* Enable vlan filtering for VMDq */ + ixgbe_vmdq_vlan_hw_filter_enable(dev); + } + + /* Configure DCB hw */ + ixgbe_configure_dcb(dev); + + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { + err = ixgbe_fdir_configure(dev); + if (err) + goto error; + } + + /* Restore vf rate limit */ + if (vfinfo != NULL) { + for (vf = 0; vf < pci_dev->max_vfs; vf++) + for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) + if (vfinfo[vf].tx_rate[idx] != 0) + rte_pmd_ixgbe_set_vf_rate_limit( + dev->data->port_id, vf, + vfinfo[vf].tx_rate[idx], + 1 << idx); + } + + ixgbe_restore_statistics_mapping(dev); + err = ixgbe_dev_rxtx_start(dev); if (err < 0) { PMD_INIT_LOG(ERR, "Unable to start rxtx queues"); @@ -2231,13 +2572,13 @@ skip_link_setup: /* check if lsc interrupt is enabled */ if (dev->data->dev_conf.intr_conf.lsc != 0) ixgbe_dev_lsc_interrupt_setup(dev); + ixgbe_dev_macsec_interrupt_setup(dev); } else { rte_intr_callback_unregister(intr_handle, - ixgbe_dev_interrupt_handler, - (void *)dev); + ixgbe_dev_interrupt_handler, dev); if (dev->data->dev_conf.intr_conf.lsc != 0) PMD_INIT_LOG(INFO, "lsc won't enable because of" - " no intr multiplex\n"); + " no intr multiplex"); } /* check if rxq interrupt is enabled */ @@ -2250,36 +2591,8 @@ skip_link_setup: /* resume enabled intr since hw reset */ ixgbe_enable_intr(dev); - - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ - ETH_VLAN_EXTEND_MASK; - ixgbe_vlan_offload_set(dev, mask); - - if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { - /* Enable vlan filtering for VMDq */ - ixgbe_vmdq_vlan_hw_filter_enable(dev); - } - - /* Configure DCB hw */ - ixgbe_configure_dcb(dev); - - if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { - err = ixgbe_fdir_configure(dev); - if (err) - goto error; - } - - /* Restore vf rate limit */ - if (vfinfo != NULL) { - for (vf = 0; vf < dev->pci_dev->max_vfs; vf++) - for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++) - if (vfinfo[vf].tx_rate[idx] != 0) - ixgbe_set_vf_rate_limit(dev, vf, - vfinfo[vf].tx_rate[idx], - 1 << idx); - } - - ixgbe_restore_statistics_mapping(dev); + ixgbe_l2_tunnel_conf(dev); + ixgbe_filter_restore(dev); return 0; @@ -2300,10 +2613,8 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); - struct ixgbe_filter_info *filter_info = - IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int vf; PMD_INIT_FUNC_TRACE(); @@ -2318,8 +2629,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) /* stop adapter */ ixgbe_stop_adapter(hw); - for (vf = 0; vfinfo != NULL && - vf < dev->pci_dev->max_vfs; vf++) + for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++) vfinfo[vf].clear_to_send = false; if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { @@ -2340,17 +2650,6 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) memset(&link, 0, sizeof(link)); rte_ixgbe_dev_atomic_write_link_status(dev, &link); - /* Remove all ntuple filters of the device */ - for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list); - p_5tuple != NULL; p_5tuple = p_5tuple_next) { - p_5tuple_next = TAILQ_NEXT(p_5tuple, entries); - TAILQ_REMOVE(&filter_info->fivetuple_list, - p_5tuple, entries); - rte_free(p_5tuple); - } - memset(filter_info->fivetuple_mask, 0, - sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); - if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ rte_intr_callback_register(intr_handle, @@ -2452,6 +2751,7 @@ ixgbe_dev_close(struct rte_eth_dev *dev) static void ixgbe_read_stats_registers(struct ixgbe_hw *hw, struct ixgbe_hw_stats *hw_stats, + struct ixgbe_macsec_stats *macsec_stats, uint64_t *total_missed_rx, uint64_t *total_qbrc, uint64_t *total_qprc, uint64_t *total_qprdc) { @@ -2459,9 +2759,9 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, uint32_t delta_gprc = 0; unsigned i; /* Workaround for RX byte count not including CRC bytes when CRC -+ * strip is enabled. CRC bytes are removed from counters when crc_strip + * strip is enabled. CRC bytes are removed from counters when crc_strip * is disabled. -+ */ + */ int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) & IXGBE_HLREG0_RXCRCSTRP); @@ -2471,8 +2771,8 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); for (i = 0; i < 8; i++) { - uint32_t mp; - mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + /* global total per queue */ hw_stats->mpc[i] += mp; /* Running comprehensive total for stats display */ @@ -2621,6 +2921,40 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, /* Flow Director Stats registers */ hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + + /* MACsec Stats registers */ + macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT); + macsec_stats->out_pkts_encrypted += + IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE); + macsec_stats->out_pkts_protected += + IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP); + macsec_stats->out_octets_encrypted += + IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE); + macsec_stats->out_octets_protected += + IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP); + macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT); + macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD); + macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI); + macsec_stats->in_pkts_unknownsci += + IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI); + macsec_stats->in_octets_decrypted += + IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD); + macsec_stats->in_octets_validated += + IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV); + macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH); + macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY); + macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE); + for (i = 0; i < 2; i++) { + macsec_stats->in_pkts_ok += + IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i)); + macsec_stats->in_pkts_invalid += + IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i)); + macsec_stats->in_pkts_notvalid += + IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i)); + } + macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA); + macsec_stats->in_pkts_notusingsa += + IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA); } /* @@ -2633,6 +2967,9 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_stats *hw_stats = IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; unsigned i; @@ -2641,8 +2978,8 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) total_qprc = 0; total_qprdc = 0; - ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc, - &total_qprc, &total_qprdc); + ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, + &total_qbrc, &total_qprc, &total_qprdc); if (stats == NULL) return; @@ -2664,15 +3001,15 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* Rx Errors */ stats->imissed = total_missed_rx; stats->ierrors = hw_stats->crcerrs + - hw_stats->mspdc + - hw_stats->rlec + - hw_stats->ruc + - hw_stats->roc + - hw_stats->illerrc + - hw_stats->errbc + - hw_stats->rfc + - hw_stats->fccrc + - hw_stats->fclast; + hw_stats->mspdc + + hw_stats->rlec + + hw_stats->ruc + + hw_stats->roc + + hw_stats->illerrc + + hw_stats->errbc + + hw_stats->rfc + + hw_stats->fccrc + + hw_stats->fclast; /* Tx Errors */ stats->oerrors = 0; @@ -2694,18 +3031,94 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev) /* This function calculates the number of xstats based on the current config */ static unsigned ixgbe_xstats_calc_num(void) { - return IXGBE_NB_HW_STATS + (IXGBE_NB_RXQ_PRIO_STATS * 8) + - (IXGBE_NB_TXQ_PRIO_STATS * 8); + return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS + + (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + + (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); +} + +static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit) +{ + const unsigned cnt_stats = ixgbe_xstats_calc_num(); + unsigned stat, i, count; + + if (xstats_names != NULL) { + count = 0; + + /* Note: limit >= cnt_stats checked upstream + * in rte_eth_xstats_names() + */ + + /* Extended stats from ixgbe_hw_stats */ + for (i = 0; i < IXGBE_NB_HW_STATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", + rte_ixgbe_stats_strings[i].name); + count++; + } + + /* MACsec Stats */ + for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", + rte_ixgbe_macsec_strings[i].name); + count++; + } + + /* RX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_priority%u_%s", i, + rte_ixgbe_rxq_strings[stat].name); + count++; + } + } + + /* TX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_priority%u_%s", i, + rte_ixgbe_txq_strings[stat].name); + count++; + } + } + } + return cnt_stats; +} + +static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned limit) +{ + unsigned i; + + if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) + return -ENOMEM; + + if (xstats_names != NULL) + for (i = 0; i < IXGBEVF_NB_XSTATS; i++) + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), + "%s", rte_ixgbevf_stats_strings[i].name); + return IXGBEVF_NB_XSTATS; } static int -ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, +ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_stats *hw_stats = IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc; unsigned i, stat, count = 0; @@ -2719,8 +3132,8 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, total_qprc = 0; total_qprdc = 0; - ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc, - &total_qprc, &total_qprdc); + ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx, + &total_qbrc, &total_qprc, &total_qprdc); /* If this is a reset xstats is NULL, and we have cleared the * registers by reading them. @@ -2731,39 +3144,41 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* Extended stats from ixgbe_hw_stats */ count = 0; for (i = 0; i < IXGBE_NB_HW_STATS; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), "%s", - rte_ixgbe_stats_strings[i].name); xstats[count].value = *(uint64_t *)(((char *)hw_stats) + rte_ixgbe_stats_strings[i].offset); + xstats[count].id = count; + count++; + } + + /* MACsec Stats */ + for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) { + xstats[count].value = *(uint64_t *)(((char *)macsec_stats) + + rte_ixgbe_macsec_strings[i].offset); + xstats[count].id = count; count++; } /* RX Priority Stats */ for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { - for (i = 0; i < 8; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "rx_priority%u_%s", i, - rte_ixgbe_rxq_strings[stat].name); + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { xstats[count].value = *(uint64_t *)(((char *)hw_stats) + rte_ixgbe_rxq_strings[stat].offset + (sizeof(uint64_t) * i)); + xstats[count].id = count; count++; } } /* TX Priority Stats */ for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { - for (i = 0; i < 8; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "tx_priority%u_%s", i, - rte_ixgbe_txq_strings[stat].name); + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { xstats[count].value = *(uint64_t *)(((char *)hw_stats) + rte_ixgbe_txq_strings[stat].offset + (sizeof(uint64_t) * i)); + xstats[count].id = count; count++; } } - return count; } @@ -2772,6 +3187,9 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) { struct ixgbe_hw_stats *stats = IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + struct ixgbe_macsec_stats *macsec_stats = + IXGBE_DEV_PRIVATE_TO_MACSEC_STATS( + dev->data->dev_private); unsigned count = ixgbe_xstats_calc_num(); @@ -2780,13 +3198,14 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev) /* Reset software totals */ memset(stats, 0, sizeof(*stats)); + memset(macsec_stats, 0, sizeof(*macsec_stats)); } static void ixgbevf_update_stats(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*) + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); /* Good Rx packet, include VF loopback */ @@ -2811,7 +3230,7 @@ ixgbevf_update_stats(struct rte_eth_dev *dev) } static int -ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, +ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n) { struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) @@ -2828,8 +3247,7 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* Extended stats */ for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { - snprintf(xstats[i].name, sizeof(xstats[i].name), - "%s", rte_ixgbevf_stats_strings[i].name); + xstats[i].id = i; xstats[i].value = *(uint64_t *)(((char *)hw_stats) + rte_ixgbevf_stats_strings[i].offset); } @@ -2852,14 +3270,12 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->ibytes = hw_stats->vfgorc; stats->opackets = hw_stats->vfgptc; stats->obytes = hw_stats->vfgotc; - stats->imcasts = hw_stats->vfmprc; - /* stats->imcasts should be removed as imcasts is deprecated */ } static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) { - struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*) + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); /* Sync HW register to the last stats */ @@ -2870,16 +3286,37 @@ ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) hw_stats->vfgorc = 0; hw_stats->vfgptc = 0; hw_stats->vfgotc = 0; - hw_stats->vfmprc = 0; - } -static void -ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +static int +ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u16 eeprom_verh, eeprom_verl; + u32 etrack_id; + int ret; + + ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh); + ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl); + + etrack_id = (eeprom_verh << 16) | eeprom_verl; + ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id); + + ret += 1; /* add the size of '\0' */ + if (fw_size < (u32)ret) + return ret; + else + return 0; +} + +static void +ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; if (RTE_ETH_DEV_SRIOV(dev).active == 0) { @@ -2895,7 +3332,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ dev_info->max_mac_addrs = hw->mac.num_rar_entries; dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; - dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->max_vfs = pci_dev->max_vfs; if (hw->mac.type == ixgbe_mac_82598EB) dev_info->max_vmdq_pools = ETH_16_POOLS; else @@ -2916,6 +3353,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) !RTE_ETH_DEV_SRIOV(dev).active) dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO; + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP; + if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X550EM_x || hw->mac.type == ixgbe_mac_X550EM_a) @@ -2929,6 +3370,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO; + if (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540) + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT; + if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X550EM_x || hw->mac.type == ixgbe_mac_X550EM_a) @@ -3009,15 +3454,17 @@ static void ixgbevf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + dev_info->pci_dev = pci_dev; dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues; dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */ - dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */ + dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */ dev_info->max_mac_addrs = hw->mac.num_rar_entries; dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC; - dev_info->max_vfs = dev->pci_dev->max_vfs; + dev_info->max_vfs = pci_dev->max_vfs; if (hw->mac.type == ixgbe_mac_82598EB) dev_info->max_vmdq_pools = ETH_16_POOLS; else @@ -3224,6 +3671,28 @@ ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) return 0; } +/** + * It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + intr->mask |= IXGBE_EICR_LINKSEC; + + return 0; +} + /* * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. * @@ -3258,6 +3727,9 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) if (eicr & IXGBE_EICR_MAILBOX) intr->flags |= IXGBE_FLAG_MAILBOX; + if (eicr & IXGBE_EICR_LINKSEC) + intr->flags |= IXGBE_FLAG_MACSEC; + if (hw->mac.type == ixgbe_mac_X550EM_x && hw->phy.type == ixgbe_phy_x550em_ext_t && (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x)) @@ -3279,6 +3751,7 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev) { + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); struct rte_eth_link link; memset(&link, 0, sizeof(link)); @@ -3293,11 +3766,11 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev) PMD_INIT_LOG(INFO, " Port %d: Link Down", (int)(dev->data->port_id)); } - PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d", - dev->pci_dev->addr.domain, - dev->pci_dev->addr.bus, - dev->pci_dev->addr.devid, - dev->pci_dev->addr.function); + PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT, + pci_dev->addr.domain, + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); } /* @@ -3311,13 +3784,13 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev) * - On failure, a negative value. */ static int -ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) +ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, + struct rte_intr_handle *intr_handle) { struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); int64_t timeout; struct rte_eth_link link; - int intr_enable_delay = false; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -3350,20 +3823,19 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; ixgbe_dev_link_status_print(dev); - - intr_enable_delay = true; - } - - if (intr_enable_delay) { + intr->mask_original = intr->mask; + /* only disable lsc interrupt */ + intr->mask &= ~IXGBE_EIMS_LSC; if (rte_eal_alarm_set(timeout * 1000, - ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0) + ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) PMD_DRV_LOG(ERR, "Error setting alarm"); - } else { - PMD_DRV_LOG(DEBUG, "enable intr immediately"); - ixgbe_enable_intr(dev); - rte_intr_enable(&(dev->pci_dev->intr_handle)); + else + intr->mask = intr->mask_original; } + PMD_DRV_LOG(DEBUG, "enable intr immediately"); + ixgbe_enable_intr(dev); + rte_intr_enable(intr_handle); return 0; } @@ -3386,12 +3858,16 @@ static void ixgbe_dev_interrupt_delayed_handler(void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_interrupt *intr = IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t eicr; + ixgbe_disable_intr(hw); + eicr = IXGBE_READ_REG(hw, IXGBE_EICR); if (eicr & IXGBE_EICR_MAILBOX) ixgbe_pf_mbx_process(dev); @@ -3405,12 +3881,22 @@ ixgbe_dev_interrupt_delayed_handler(void *param) ixgbe_dev_link_update(dev, 0); intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; ixgbe_dev_link_status_print(dev); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + } + + if (intr->flags & IXGBE_FLAG_MACSEC) { + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC, + NULL); + intr->flags &= ~IXGBE_FLAG_MACSEC; } + /* restore original mask */ + intr->mask = intr->mask_original; + intr->mask_original = 0; + PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr); ixgbe_enable_intr(dev); - rte_intr_enable(&(dev->pci_dev->intr_handle)); + rte_intr_enable(intr_handle); } /** @@ -3426,13 +3912,13 @@ ixgbe_dev_interrupt_delayed_handler(void *param) * void */ static void -ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, +ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, void *param) { struct rte_eth_dev *dev = (struct rte_eth_dev *)param; ixgbe_dev_interrupt_get_status(dev); - ixgbe_dev_interrupt_action(dev); + ixgbe_dev_interrupt_action(dev, handle); } static int @@ -3575,7 +4061,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * Enable flow control according to the current settings. */ static int -ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) +ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) { int ret_val = 0; uint32_t mflcn_reg, fccfg_reg; @@ -3622,13 +4108,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) * and the TX pause can not be disabled */ nb_rx_en = 0; - for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); if (reg & IXGBE_FCRTH_FCEN) nb_rx_en++; } if (nb_rx_en > 1) - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_rx_pause: /* @@ -3645,20 +4131,20 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) * and the TX pause can not be disabled */ nb_rx_en = 0; - for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); if (reg & IXGBE_FCRTH_FCEN) nb_rx_en++; } if (nb_rx_en > 1) - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_tx_pause: /* * Tx Flow control is enabled, and Rx Flow control is * disabled by software override. */ - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_full: /* Flow control (both Rx and Tx) is enabled by SW override. */ @@ -3669,7 +4155,6 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); ret_val = IXGBE_ERR_CONFIG; goto out; - break; } /* Set 802.3x based flow control settings. */ @@ -3708,13 +4193,13 @@ out: } static int -ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num) +ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); int32_t ret_val = IXGBE_NOT_IMPLEMENTED; if (hw->mac.type != ixgbe_mac_82598EB) { - ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num); + ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); } return ret_val; } @@ -3728,9 +4213,9 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p uint8_t tc_num; uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_dcb_config *dcb_config = - IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { ixgbe_fc_none, @@ -3763,7 +4248,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; - err = ixgbe_dcb_pfc_enable(dev,tc_num); + err = ixgbe_dcb_pfc_enable(dev, tc_num); /* Not negotiated is not an error case */ if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) @@ -3797,7 +4282,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev, if (reta_size != sp_reta_size) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, sp_reta_size); + "(%d)", reta_size, sp_reta_size); return -EINVAL; } @@ -3844,7 +4329,7 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev, if (reta_size != sp_reta_size) { PMD_DRV_LOG(ERR, "The size of hash lookup table configured " "(%d) doesn't match the number hardware can supported " - "(%d)\n", reta_size, sp_reta_size); + "(%d)", reta_size, sp_reta_size); return -EINVAL; } @@ -3895,6 +4380,51 @@ ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) ixgbe_add_rar(dev, addr, 0, 0); } +static bool +is_device_supported(struct rte_eth_dev *dev, struct eth_driver *drv) +{ + if (strcmp(dev->driver->pci_drv.driver.name, + drv->pci_drv.driver.name)) + return false; + + return true; +} + +int +rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf, + struct ether_addr *mac_addr) +{ + struct ixgbe_hw *hw; + struct ixgbe_vf_info *vfinfo; + int rar_entry; + uint8_t *new_mac = (uint8_t *)(mac_addr); + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + pci_dev = IXGBE_DEV_TO_PCI(dev); + + if (!is_device_supported(dev, &rte_ixgbe_pmd)) + return -ENOTSUP; + + if (vf >= pci_dev->max_vfs) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + rar_entry = hw->mac.num_rar_entries - (vf + 1); + + if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) { + rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, + ETHER_ADDR_LEN); + return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, + IXGBE_RAH_AV); + } + return -EINVAL; +} + static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { @@ -3911,7 +4441,8 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; /* refuse mtu that requires the support of scattered packets when this - * feature has not been enabled before. */ + * feature has not been enabled before. + */ if (!dev->data->scattered_rx && (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) @@ -3971,7 +4502,7 @@ ixgbevf_intr_enable(struct ixgbe_hw *hw) static int ixgbevf_dev_configure(struct rte_eth_dev *dev) { - struct rte_eth_conf* conf = &dev->data->dev_conf; + struct rte_eth_conf *conf = &dev->data->dev_conf; struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)dev->data->dev_private; @@ -4010,7 +4541,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t intr_vector = 0; - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int err, mask = 0; @@ -4033,10 +4565,10 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) } /* Set vfta */ - ixgbevf_set_vfta_all(dev,1); + ixgbevf_set_vfta_all(dev, 1); /* Set HW strip */ - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; ixgbevf_vlan_offload_set(dev, mask); @@ -4055,7 +4587,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) dev->data->nb_rx_queues * sizeof(int), 0); if (intr_handle->intr_vec == NULL) { PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" - " intr_vec\n", dev->data->nb_rx_queues); + " intr_vec", dev->data->nb_rx_queues); return -ENOMEM; } } @@ -4073,10 +4605,13 @@ static void ixgbevf_dev_stop(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; PMD_INIT_FUNC_TRACE(); + ixgbevf_intr_disable(hw); + hw->adapter_stopped = 1; ixgbe_stop_adapter(hw); @@ -4084,7 +4619,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) * Clear what we set, but we still keep shadow_vfta to * restore after device starts */ - ixgbevf_set_vfta_all(dev,0); + ixgbevf_set_vfta_all(dev, 0); /* Clear stored conf */ dev->data->scattered_rx = 0; @@ -4123,18 +4658,19 @@ ixgbevf_dev_close(struct rte_eth_dev *dev) static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); int i = 0, j = 0, vfta = 0, mask = 1; - for (i = 0; i < IXGBE_VFTA_SIZE; i++){ + for (i = 0; i < IXGBE_VFTA_SIZE; i++) { vfta = shadow_vfta->vfta[i]; if (vfta) { mask = 1; - for (j = 0; j < 32; j++){ + for (j = 0; j < 32; j++) { if (vfta & mask) - ixgbe_set_vfta(hw, (i<<5)+j, 0, on); - mask<<=1; + ixgbe_set_vfta(hw, (i<<5)+j, 0, + on, false); + mask <<= 1; } } } @@ -4146,7 +4682,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vid_idx = 0; uint32_t vid_bit = 0; @@ -4155,7 +4691,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) PMD_INIT_FUNC_TRACE(); /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ - ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on); + ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); if (ret) { PMD_INIT_LOG(ERR, "Unable to set VF vlan"); return ret; @@ -4191,7 +4727,7 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ctrl &= ~IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); - ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on); + ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); } static void @@ -4207,19 +4743,19 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip); for (i = 0; i < hw->mac.max_rx_queues; i++) - ixgbevf_vlan_strip_queue_set(dev,i,on); + ixgbevf_vlan_strip_queue_set(dev, i, on); } } static int -ixgbe_vmdq_mode_check(struct ixgbe_hw *hw) +ixgbe_vt_check(struct ixgbe_hw *hw) { uint32_t reg_val; - /* we only need to do this if VMDq is enabled */ + /* if Virtualization Technology is enabled */ reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL); if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) { - PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting"); + PMD_INIT_LOG(ERR, "VT must be enabled for this setting"); return -1; } @@ -4227,9 +4763,10 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw) } static uint32_t -ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr) +ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr) { uint32_t vector = 0; + switch (hw->mac.mc_filter_type) { case 0: /* use bits [47:36] of the address */ vector = ((uc_addr->addr_bytes[4] >> 4) | @@ -4257,8 +4794,8 @@ ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr) } static int -ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, - uint8_t on) +ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint8_t on) { uint32_t vector; uint32_t uta_idx; @@ -4279,7 +4816,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, if (hw->mac.type < ixgbe_mac_82599EB) return -ENOTSUP; - vector = ixgbe_uta_vector(hw,mac_addr); + vector = ixgbe_uta_vector(hw, mac_addr); uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; uta_shift = vector & ixgbe_uta_bit_mask; @@ -4304,7 +4841,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); else - IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type); + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); return 0; } @@ -4356,618 +4893,477 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) return new_val; } -static int -ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, - uint16_t rx_mask, uint8_t on) + +int +rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) { - int val = 0; + struct ixgbe_hw *hw; + struct ixgbe_mac_info *mac; + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - if (hw->mac.type == ixgbe_mac_82598EB) { - PMD_INIT_LOG(ERR, "setting VF receive mode set should be done" - " on 82599 hardware and newer"); + dev = &rte_eth_devices[port]; + pci_dev = IXGBE_DEV_TO_PCI(dev); + + if (!is_device_supported(dev, &rte_ixgbe_pmd)) return -ENOTSUP; - } - if (ixgbe_vmdq_mode_check(hw) < 0) + + if (vf >= pci_dev->max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mac = &hw->mac; + + mac->ops.set_vlan_anti_spoofing(hw, on, vf); + + return 0; +} + +int +rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) +{ + struct ixgbe_hw *hw; + struct ixgbe_mac_info *mac; + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + pci_dev = IXGBE_DEV_TO_PCI(dev); + + if (!is_device_supported(dev, &rte_ixgbe_pmd)) return -ENOTSUP; - val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val); + if (vf >= pci_dev->max_vfs) + return -EINVAL; - if (on) - vmolr |= val; - else - vmolr &= ~val; + if (on > 1) + return -EINVAL; - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mac = &hw->mac; + mac->ops.set_mac_anti_spoofing(hw, on, vf); return 0; } -static int -ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) +int +rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id) { - uint32_t reg,addr; - uint32_t val; - const uint8_t bit1 = 0x1; + struct ixgbe_hw *hw; + uint32_t ctrl; + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + pci_dev = IXGBE_DEV_TO_PCI(dev); - if (ixgbe_vmdq_mode_check(hw) < 0) + if (!is_device_supported(dev, &rte_ixgbe_pmd)) return -ENOTSUP; - addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2); - reg = IXGBE_READ_REG(hw, addr); - val = bit1 << pool; + if (vf >= pci_dev->max_vfs) + return -EINVAL; - if (on) - reg |= val; - else - reg &= ~val; + if (vlan_id > ETHER_MAX_VLAN_ID) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf)); + if (vlan_id) { + ctrl = vlan_id; + ctrl |= IXGBE_VMVIR_VLANA_DEFAULT; + } else { + ctrl = 0; + } - IXGBE_WRITE_REG(hw, addr,reg); + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl); return 0; } -static int -ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) +int +rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on) { - uint32_t reg,addr; - uint32_t val; - const uint8_t bit1 = 0x1; + struct ixgbe_hw *hw; + uint32_t ctrl; + struct rte_eth_dev *dev; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - if (ixgbe_vmdq_mode_check(hw) < 0) + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_ixgbe_pmd)) return -ENOTSUP; - addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2); - reg = IXGBE_READ_REG(hw, addr); - val = bit1 << pool; + if (on > 1) + return -EINVAL; + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); + /* enable or disable VMDQ loopback */ if (on) - reg |= val; + ctrl |= IXGBE_PFDTXGSWC_VT_LBEN; else - reg &= ~val; + ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN; - IXGBE_WRITE_REG(hw, addr,reg); + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl); return 0; } -static int -ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, - uint64_t pool_mask, uint8_t vlan_on) +int +rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on) { - int ret = 0; - uint16_t pool_idx; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw *hw; + uint32_t reg_value; + int i; + int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT); + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; - if (ixgbe_vmdq_mode_check(hw) < 0) + if (!is_device_supported(dev, &rte_ixgbe_pmd)) return -ENOTSUP; - for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) { - if (pool_mask & ((uint64_t)(1ULL << pool_idx))) { - ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on); - if (ret < 0) - return ret; - } + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + for (i = 0; i <= num_queues; i++) { + reg_value = IXGBE_QDE_WRITE | + (i << IXGBE_QDE_IDX_SHIFT) | + (on & IXGBE_QDE_ENABLE); + IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value); } - return ret; + return 0; } -#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */ -#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */ -#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */ -#define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */ -#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \ - ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \ - ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN)) - -static int -ixgbe_mirror_rule_set(struct rte_eth_dev *dev, - struct rte_eth_mirror_conf *mirror_conf, - uint8_t rule_id, uint8_t on) +int +rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on) { - uint32_t mr_ctl,vlvf; - uint32_t mp_lsb = 0; - uint32_t mv_msb = 0; - uint32_t mv_lsb = 0; - uint32_t mp_msb = 0; - uint8_t i = 0; - int reg_index = 0; - uint64_t vlan_mask = 0; + struct ixgbe_hw *hw; + uint32_t reg_value; + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; - const uint8_t pool_mask_offset = 32; - const uint8_t vlan_mask_offset = 32; - const uint8_t dst_pool_offset = 8; - const uint8_t rule_mr_offset = 4; - const uint8_t mirror_rule_mask= 0x0F; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - struct ixgbe_mirror_info *mr_info = - (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint8_t mirror_type = 0; + dev = &rte_eth_devices[port]; + pci_dev = IXGBE_DEV_TO_PCI(dev); - if (ixgbe_vmdq_mode_check(hw) < 0) + if (!is_device_supported(dev, &rte_ixgbe_pmd)) return -ENOTSUP; - if (rule_id >= IXGBE_MAX_MIRROR_RULES) + /* only support VF's 0 to 63 */ + if ((vf >= pci_dev->max_vfs) || (vf > 63)) return -EINVAL; - if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) { - PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.", - mirror_conf->rule_type); + if (on > 1) return -EINVAL; - } - if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { - mirror_type |= IXGBE_MRCTL_VLME; - /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */ - for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) { - if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { - /* search vlan id related pool vlan filter index */ - reg_index = ixgbe_find_vlvf_slot(hw, - mirror_conf->vlan.vlan_id[i]); - if (reg_index < 0) - return -EINVAL; - vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index)); - if ((vlvf & IXGBE_VLVF_VIEN) && - ((vlvf & IXGBE_VLVF_VLANID_MASK) == - mirror_conf->vlan.vlan_id[i])) - vlan_mask |= (1ULL << reg_index); - else - return -EINVAL; - } - } + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf)); + if (on) + reg_value |= IXGBE_SRRCTL_DROP_EN; + else + reg_value &= ~IXGBE_SRRCTL_DROP_EN; - if (on) { - mv_lsb = vlan_mask & 0xFFFFFFFF; - mv_msb = vlan_mask >> vlan_mask_offset; + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value); - mr_info->mr_conf[rule_id].vlan.vlan_mask = - mirror_conf->vlan.vlan_mask; - for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) { - if (mirror_conf->vlan.vlan_mask & (1ULL << i)) - mr_info->mr_conf[rule_id].vlan.vlan_id[i] = - mirror_conf->vlan.vlan_id[i]; - } - } else { - mv_lsb = 0; - mv_msb = 0; - mr_info->mr_conf[rule_id].vlan.vlan_mask = 0; - for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) - mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0; - } - } + return 0; +} - /* - * if enable pool mirror, write related pool mask register,if disable - * pool mirror, clear PFMRVM register - */ - if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { - mirror_type |= IXGBE_MRCTL_VPME; - if (on) { - mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF; - mp_msb = mirror_conf->pool_mask >> pool_mask_offset; - mr_info->mr_conf[rule_id].pool_mask = - mirror_conf->pool_mask; +int +rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on) +{ + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; + struct ixgbe_hw *hw; + uint16_t queues_per_pool; + uint32_t q; - } else { - mp_lsb = 0; - mp_msb = 0; - mr_info->mr_conf[rule_id].pool_mask = 0; - } - } - if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT) - mirror_type |= IXGBE_MRCTL_UPME; - if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT) - mirror_type |= IXGBE_MRCTL_DPME; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - /* read mirror control register and recalculate it */ - mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id)); + dev = &rte_eth_devices[port]; + pci_dev = IXGBE_DEV_TO_PCI(dev); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (on) { - mr_ctl |= mirror_type; - mr_ctl &= mirror_rule_mask; - mr_ctl |= mirror_conf->dst_pool << dst_pool_offset; - } else - mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask); + if (!is_device_supported(dev, &rte_ixgbe_pmd)) + return -ENOTSUP; - mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type; - mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool; + if (vf >= pci_dev->max_vfs) + return -EINVAL; - /* write mirrror control register */ - IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); + if (on > 1) + return -EINVAL; - /* write pool mirrror control register */ - if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) { - IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); - IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), - mp_msb); - } - /* write VLAN mirrror control register */ - if (mirror_conf->rule_type == ETH_MIRROR_VLAN) { - IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); - IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), - mv_msb); - } + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP); + + /* The PF has 128 queue pairs and in SRIOV configuration + * those queues will be assigned to VF's, so RXDCTL + * registers will be dealing with queues which will be + * assigned to VF's. + * Let's say we have SRIOV configured with 31 VF's then the + * first 124 queues 0-123 will be allocated to VF's and only + * the last 4 queues 123-127 will be assigned to the PF. + */ + if (hw->mac.type == ixgbe_mac_82598EB) + queues_per_pool = (uint16_t)hw->mac.max_rx_queues / + ETH_16_POOLS; + else + queues_per_pool = (uint16_t)hw->mac.max_rx_queues / + ETH_64_POOLS; + for (q = 0; q < queues_per_pool; q++) + (*dev->dev_ops->vlan_strip_queue_set)(dev, + q + vf * queues_per_pool, on); return 0; } -static int -ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) +int +rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mask, uint8_t on) { - int mr_ctl = 0; - uint32_t lsb_val = 0; - uint32_t msb_val = 0; - const uint8_t rule_mr_offset = 4; + int val = 0; + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; + struct ixgbe_hw *hw; + uint32_t vmolr; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_mirror_info *mr_info = - (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + pci_dev = IXGBE_DEV_TO_PCI(dev); - if (ixgbe_vmdq_mode_check(hw) < 0) + if (!is_device_supported(dev, &rte_ixgbe_pmd)) return -ENOTSUP; - memset(&mr_info->mr_conf[rule_id], 0, - sizeof(struct rte_eth_mirror_conf)); + if (vf >= pci_dev->max_vfs) + return -EINVAL; - /* clear PFVMCTL register */ - IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); + if (on > 1) + return -EINVAL; - /* clear pool mask register */ - IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val); - IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); - /* clear vlan mask register */ - IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val); - IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val); + if (hw->mac.type == ixgbe_mac_82598EB) { + PMD_INIT_LOG(ERR, "setting VF receive mode set should be done" + " on 82599 hardware and newer"); + return -ENOTSUP; + } + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; + + val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val); + + if (on) + vmolr |= val; + else + vmolr &= ~val; + + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); return 0; } -static int -ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +int +rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on) { - uint32_t mask; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; + uint32_t reg, addr; + uint32_t val; + const uint8_t bit1 = 0x1; + struct ixgbe_hw *hw; - mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); - mask |= (1 << IXGBE_MISC_VEC_ID); - RTE_SET_USED(queue_id); - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - rte_intr_enable(&dev->pci_dev->intr_handle); + dev = &rte_eth_devices[port]; + pci_dev = IXGBE_DEV_TO_PCI(dev); - return 0; -} + if (!is_device_supported(dev, &rte_ixgbe_pmd)) + return -ENOTSUP; -static int -ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) -{ - uint32_t mask; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (vf >= pci_dev->max_vfs) + return -EINVAL; - mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); - mask &= ~(1 << IXGBE_MISC_VEC_ID); - RTE_SET_USED(queue_id); - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + if (on > 1) + return -EINVAL; - return 0; -} + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); -static int -ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) -{ - uint32_t mask; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_interrupt *intr = - IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; - if (queue_id < 16) { - ixgbe_disable_intr(hw); - intr->mask |= (1 << queue_id); - ixgbe_enable_intr(dev); - } else if (queue_id < 32) { - mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); - mask &= (1 << queue_id); - IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); - } else if (queue_id < 64) { - mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); - mask &= (1 << (queue_id - 32)); - IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */ + if (vf >= 32) { + addr = IXGBE_VFRE(1); + val = bit1 << (vf - 32); + } else { + addr = IXGBE_VFRE(0); + val = bit1 << vf; } - rte_intr_enable(&dev->pci_dev->intr_handle); + + reg = IXGBE_READ_REG(hw, addr); + + if (on) + reg |= val; + else + reg &= ~val; + + IXGBE_WRITE_REG(hw, addr, reg); return 0; } -static int -ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +int +rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on) { - uint32_t mask; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_interrupt *intr = - IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct rte_eth_dev *dev; + struct rte_pci_device *pci_dev; + uint32_t reg, addr; + uint32_t val; + const uint8_t bit1 = 0x1; - if (queue_id < 16) { - ixgbe_disable_intr(hw); - intr->mask &= ~(1 << queue_id); - ixgbe_enable_intr(dev); - } else if (queue_id < 32) { - mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); - mask &= ~(1 << queue_id); - IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); - } else if (queue_id < 64) { - mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); - mask &= ~(1 << (queue_id - 32)); - IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + struct ixgbe_hw *hw; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + pci_dev = IXGBE_DEV_TO_PCI(dev); + + if (!is_device_supported(dev, &rte_ixgbe_pmd)) + return -ENOTSUP; + + if (vf >= pci_dev->max_vfs) + return -EINVAL; + + if (on > 1) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; + + /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */ + if (vf >= 32) { + addr = IXGBE_VFTE(1); + val = bit1 << (vf - 32); + } else { + addr = IXGBE_VFTE(0); + val = bit1 << vf; } + reg = IXGBE_READ_REG(hw, addr); + + if (on) + reg |= val; + else + reg &= ~val; + + IXGBE_WRITE_REG(hw, addr, reg); + return 0; } -static void -ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, - uint8_t queue, uint8_t msix_vector) +int +rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan, + uint64_t vf_mask, uint8_t vlan_on) { - uint32_t tmp, idx; + struct rte_eth_dev *dev; + int ret = 0; + uint16_t vf_idx; + struct ixgbe_hw *hw; - if (direction == -1) { - /* other causes */ - msix_vector |= IXGBE_IVAR_ALLOC_VAL; - tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); - tmp &= ~0xFF; - tmp |= msix_vector; - IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp); - } else { - /* rx or tx cause */ - msix_vector |= IXGBE_IVAR_ALLOC_VAL; - idx = ((16 * (queue & 1)) + (8 * direction)); - tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); - tmp &= ~(0xFF << idx); - tmp |= (msix_vector << idx); - IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_ixgbe_pmd)) + return -ENOTSUP; + + if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0)) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; + + for (vf_idx = 0; vf_idx < 64; vf_idx++) { + if (vf_mask & ((uint64_t)(1ULL << vf_idx))) { + ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx, + vlan_on, false); + if (ret < 0) + return ret; + } } + + return ret; } -/** - * set the IVAR registers, mapping interrupt causes to vectors - * @param hw - * pointer to ixgbe_hw struct - * @direction - * 0 for Rx, 1 for Tx, -1 for other causes - * @queue - * queue to map the corresponding interrupt to - * @msix_vector - * the vector to map to the corresponding queue - */ -static void -ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, - uint8_t queue, uint8_t msix_vector) +int rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf, + uint16_t tx_rate, uint64_t q_msk) { - uint32_t tmp, idx; + struct rte_eth_dev *dev; + struct ixgbe_hw *hw; + struct ixgbe_vf_info *vfinfo; + struct rte_eth_link link; + uint8_t nb_q_per_pool; + uint32_t queue_stride; + uint32_t queue_idx, idx = 0, vf_idx; + uint32_t queue_end; + uint16_t total_rate = 0; + struct rte_pci_device *pci_dev; - msix_vector |= IXGBE_IVAR_ALLOC_VAL; - if (hw->mac.type == ixgbe_mac_82598EB) { - if (direction == -1) - direction = 0; - idx = (((direction * 64) + queue) >> 2) & 0x1F; - tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx)); - tmp &= ~(0xFF << (8 * (queue & 0x3))); - tmp |= (msix_vector << (8 * (queue & 0x3))); - IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); - } else if ((hw->mac.type == ixgbe_mac_82599EB) || - (hw->mac.type == ixgbe_mac_X540)) { - if (direction == -1) { - /* other causes */ - idx = ((queue & 1) * 8); - tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); - tmp &= ~(0xFF << idx); - tmp |= (msix_vector << idx); - IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp); - } else { - /* rx or tx causes */ - idx = ((16 * (queue & 1)) + (8 * direction)); - tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); - tmp &= ~(0xFF << idx); - tmp |= (msix_vector << idx); - IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp); - } - } -} - -static void -ixgbevf_configure_msix(struct rte_eth_dev *dev) -{ - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t q_idx; - uint32_t vector_idx = IXGBE_MISC_VEC_ID; - - /* won't configure msix register if no mapping is done - * between intr vector and event fd. - */ - if (!rte_intr_dp_is_en(intr_handle)) - return; - - /* Configure all RX queues of VF */ - for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { - /* Force all queue use vector 0, - * as IXGBE_VF_MAXMSIVECOTR = 1 - */ - ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); - intr_handle->intr_vec[q_idx] = vector_idx; - } - - /* Configure VF other cause ivar */ - ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); -} - -/** - * Sets up the hardware to properly generate MSI-X interrupts - * @hw - * board private structure - */ -static void -ixgbe_configure_msix(struct rte_eth_dev *dev) -{ - struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; - struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t queue_id, base = IXGBE_MISC_VEC_ID; - uint32_t vec = IXGBE_MISC_VEC_ID; - uint32_t mask; - uint32_t gpie; - - /* won't configure msix register if no mapping is done - * between intr vector and event fd - */ - if (!rte_intr_dp_is_en(intr_handle)) - return; - - if (rte_intr_allow_others(intr_handle)) - vec = base = IXGBE_RX_VEC_START; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - /* setup GPIE for MSI-x mode */ - gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); - gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | - IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME; - /* auto clearing and auto setting corresponding bits in EIMS - * when MSI-X interrupt is triggered - */ - if (hw->mac.type == ixgbe_mac_82598EB) { - IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); - } else { - IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); - IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); - } - IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); - - /* Populate the IVAR table and set the ITR values to the - * corresponding register. - */ - for (queue_id = 0; queue_id < dev->data->nb_rx_queues; - queue_id++) { - /* by default, 1:1 mapping */ - ixgbe_set_ivar_map(hw, 0, queue_id, vec); - intr_handle->intr_vec[queue_id] = vec; - if (vec < base + intr_handle->nb_efd - 1) - vec++; - } - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, - IXGBE_MISC_VEC_ID); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); - break; - default: - break; - } - IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), - IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF); - - /* set up to autoclear timer, and the vectors */ - mask = IXGBE_EIMS_ENABLE_MASK; - mask &= ~(IXGBE_EIMS_OTHER | - IXGBE_EIMS_MAILBOX | - IXGBE_EIMS_LSC); - - IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); -} + dev = &rte_eth_devices[port]; + pci_dev = IXGBE_DEV_TO_PCI(dev); + rte_eth_link_get_nowait(port, &link); -static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, - uint16_t queue_idx, uint16_t tx_rate) -{ - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t rf_dec, rf_int; - uint32_t bcnrc_val; - uint16_t link_speed = dev->data->dev_link.link_speed; + if (!is_device_supported(dev, &rte_ixgbe_pmd)) + return -ENOTSUP; - if (queue_idx >= hw->mac.max_tx_queues) + if (vf >= pci_dev->max_vfs) return -EINVAL; - if (tx_rate != 0) { - /* Calculate the rate factor values to set */ - rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; - rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; - rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; - - bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; - bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & - IXGBE_RTTBCNRC_RF_INT_MASK_M); - bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); - } else { - bcnrc_val = 0; - } - - /* - * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM - * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise - * set as 0x4. - */ - if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) && - (dev->data->dev_conf.rxmode.max_rx_pkt_len >= - IXGBE_MAX_JUMBO_FRAME_SIZE)) - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, - IXGBE_MMW_SIZE_JUMBO_FRAME); - else - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, - IXGBE_MMW_SIZE_DEFAULT); - - /* Set RTTBCNRC of queue X */ - IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} + if (tx_rate > link.link_speed) + return -EINVAL; -static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, - uint16_t tx_rate, uint64_t q_msk) -{ - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vf_info *vfinfo = - *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); - uint8_t nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; - uint32_t queue_stride = - IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; - uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx; - uint32_t queue_end = queue_idx + nb_q_per_pool - 1; - uint16_t total_rate = 0; + if (q_msk == 0) + return 0; + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; + queue_idx = vf * queue_stride; + queue_end = queue_idx + nb_q_per_pool - 1; if (queue_end >= hw->mac.max_tx_queues) return -EINVAL; - if (vfinfo != NULL) { - for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) { + if (vfinfo) { + for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) { if (vf_idx == vf) continue; for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate); idx++) total_rate += vfinfo[vf_idx].tx_rate[idx]; } - } else + } else { return -EINVAL; + } /* Store tx_rate for this vf. */ for (idx = 0; idx < nb_q_per_pool; idx++) { @@ -4979,8 +5375,7 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, } if (total_rate > dev->data->dev_link.link_speed) { - /* - * Reset stored TX rate of the VF if it causes exceed + /* Reset stored TX rate of the VF if it causes exceed * link speed. */ memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate)); @@ -4997,1449 +5392,2436 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, return 0; } -static void -ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, - __attribute__((unused)) uint32_t index, - __attribute__((unused)) uint32_t pool) +#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */ +#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */ +#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */ +#define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */ +#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \ + ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \ + ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN)) + +static int +ixgbe_mirror_rule_set(struct rte_eth_dev *dev, + struct rte_eth_mirror_conf *mirror_conf, + uint8_t rule_id, uint8_t on) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int diag; + uint32_t mr_ctl, vlvf; + uint32_t mp_lsb = 0; + uint32_t mv_msb = 0; + uint32_t mv_lsb = 0; + uint32_t mp_msb = 0; + uint8_t i = 0; + int reg_index = 0; + uint64_t vlan_mask = 0; - /* - * On a 82599 VF, adding again the same MAC addr is not an idempotent - * operation. Trap this case to avoid exhausting the [very limited] - * set of PF resources used to store VF MAC addresses. - */ - if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) - return; - diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); - if (diag == 0) - return; - PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag); -} + const uint8_t pool_mask_offset = 32; + const uint8_t vlan_mask_offset = 32; + const uint8_t dst_pool_offset = 8; + const uint8_t rule_mr_offset = 4; + const uint8_t mirror_rule_mask = 0x0F; -static void -ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) -{ - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; - struct ether_addr *mac_addr; - uint32_t i; - int diag; + struct ixgbe_mirror_info *mr_info = + (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint8_t mirror_type = 0; - /* - * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does - * not support the deletion of a given MAC address. - * Instead, it imposes to delete all MAC addresses, then to add again - * all MAC addresses with the exception of the one to be deleted. - */ - (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; - /* - * Add again all MAC addresses, with the exception of the deleted one - * and of the permanent MAC address. - */ - for (i = 0, mac_addr = dev->data->mac_addrs; - i < hw->mac.num_rar_entries; i++, mac_addr++) { - /* Skip the deleted MAC address */ - if (i == index) - continue; - /* Skip NULL MAC addresses */ - if (is_zero_ether_addr(mac_addr)) - continue; - /* Skip the permanent MAC address */ - if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) - continue; - diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); - if (diag != 0) - PMD_DRV_LOG(ERR, - "Adding again MAC address " - "%02x:%02x:%02x:%02x:%02x:%02x failed " - "diag=%d", - mac_addr->addr_bytes[0], - mac_addr->addr_bytes[1], - mac_addr->addr_bytes[2], - mac_addr->addr_bytes[3], - mac_addr->addr_bytes[4], - mac_addr->addr_bytes[5], - diag); + if (rule_id >= IXGBE_MAX_MIRROR_RULES) + return -EINVAL; + + if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) { + PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.", + mirror_conf->rule_type); + return -EINVAL; } -} -static void -ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) -{ - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { + mirror_type |= IXGBE_MRCTL_VLME; + /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */ + for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) { + if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { + /* search vlan id related pool vlan filter index */ + reg_index = ixgbe_find_vlvf_slot(hw, + mirror_conf->vlan.vlan_id[i], + false); + if (reg_index < 0) + return -EINVAL; + vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index)); + if ((vlvf & IXGBE_VLVF_VIEN) && + ((vlvf & IXGBE_VLVF_VLANID_MASK) == + mirror_conf->vlan.vlan_id[i])) + vlan_mask |= (1ULL << reg_index); + else + return -EINVAL; + } + } - hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); -} + if (on) { + mv_lsb = vlan_mask & 0xFFFFFFFF; + mv_msb = vlan_mask >> vlan_mask_offset; -#define MAC_TYPE_FILTER_SUP(type) do {\ - if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\ - (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\ - (type) != ixgbe_mac_X550EM_a)\ - return -ENOTSUP;\ -} while (0) + mr_info->mr_conf[rule_id].vlan.vlan_mask = + mirror_conf->vlan.vlan_mask; + for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) { + if (mirror_conf->vlan.vlan_mask & (1ULL << i)) + mr_info->mr_conf[rule_id].vlan.vlan_id[i] = + mirror_conf->vlan.vlan_id[i]; + } + } else { + mv_lsb = 0; + mv_msb = 0; + mr_info->mr_conf[rule_id].vlan.vlan_mask = 0; + for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) + mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0; + } + } -static int -ixgbe_syn_filter_set(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter, - bool add) -{ - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t synqf; + /* + * if enable pool mirror, write related pool mask register,if disable + * pool mirror, clear PFMRVM register + */ + if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { + mirror_type |= IXGBE_MRCTL_VPME; + if (on) { + mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF; + mp_msb = mirror_conf->pool_mask >> pool_mask_offset; + mr_info->mr_conf[rule_id].pool_mask = + mirror_conf->pool_mask; - if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) - return -EINVAL; + } else { + mp_lsb = 0; + mp_msb = 0; + mr_info->mr_conf[rule_id].pool_mask = 0; + } + } + if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT) + mirror_type |= IXGBE_MRCTL_UPME; + if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT) + mirror_type |= IXGBE_MRCTL_DPME; - synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + /* read mirror control register and recalculate it */ + mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id)); - if (add) { - if (synqf & IXGBE_SYN_FILTER_ENABLE) - return -EINVAL; - synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & - IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); + if (on) { + mr_ctl |= mirror_type; + mr_ctl &= mirror_rule_mask; + mr_ctl |= mirror_conf->dst_pool << dst_pool_offset; + } else + mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask); - if (filter->hig_pri) - synqf |= IXGBE_SYN_FILTER_SYNQFP; - else - synqf &= ~IXGBE_SYN_FILTER_SYNQFP; - } else { - if (!(synqf & IXGBE_SYN_FILTER_ENABLE)) - return -ENOENT; - synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); + mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type; + mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool; + + /* write mirrror control register */ + IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); + + /* write pool mirrror control register */ + if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) { + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), + mp_msb); } - IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); - IXGBE_WRITE_FLUSH(hw); + /* write VLAN mirrror control register */ + if (mirror_conf->rule_type == ETH_MIRROR_VLAN) { + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), + mv_msb); + } + return 0; } static int -ixgbe_syn_filter_get(struct rte_eth_dev *dev, - struct rte_eth_syn_filter *filter) +ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + int mr_ctl = 0; + uint32_t lsb_val = 0; + uint32_t msb_val = 0; + const uint8_t rule_mr_offset = 4; - if (synqf & IXGBE_SYN_FILTER_ENABLE) { - filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0; - filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1); - return 0; - } - return -ENOENT; -} + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_mirror_info *mr_info = + (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); -static int -ixgbe_syn_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) -{ - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret; + if (ixgbe_vt_check(hw) < 0) + return -ENOTSUP; - MAC_TYPE_FILTER_SUP(hw->mac.type); + memset(&mr_info->mr_conf[rule_id], 0, + sizeof(struct rte_eth_mirror_conf)); - if (filter_op == RTE_ETH_FILTER_NOP) - return 0; + /* clear PFVMCTL register */ + IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); - if (arg == NULL) { - PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", - filter_op); - return -EINVAL; - } + /* clear pool mask register */ + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val); + IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val); - switch (filter_op) { - case RTE_ETH_FILTER_ADD: - ret = ixgbe_syn_filter_set(dev, - (struct rte_eth_syn_filter *)arg, - TRUE); - break; - case RTE_ETH_FILTER_DELETE: - ret = ixgbe_syn_filter_set(dev, - (struct rte_eth_syn_filter *)arg, - FALSE); - break; - case RTE_ETH_FILTER_GET: - ret = ixgbe_syn_filter_get(dev, - (struct rte_eth_syn_filter *)arg); - break; - default: - PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); - ret = -EINVAL; - break; - } + /* clear vlan mask register */ + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val); + IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val); - return ret; + return 0; } +static int +ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t mask; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); + mask |= (1 << IXGBE_MISC_VEC_ID); + RTE_SET_USED(queue_id); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + + rte_intr_enable(intr_handle); -static inline enum ixgbe_5tuple_protocol -convert_protocol_type(uint8_t protocol_value) + return 0; +} + +static int +ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { - if (protocol_value == IPPROTO_TCP) - return IXGBE_FILTER_PROTOCOL_TCP; - else if (protocol_value == IPPROTO_UDP) - return IXGBE_FILTER_PROTOCOL_UDP; - else if (protocol_value == IPPROTO_SCTP) - return IXGBE_FILTER_PROTOCOL_SCTP; - else - return IXGBE_FILTER_PROTOCOL_NONE; + uint32_t mask; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); + mask &= ~(1 << IXGBE_MISC_VEC_ID); + RTE_SET_USED(queue_id); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + + return 0; } -/* - * add a 5tuple filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * index: the index the filter allocates. - * filter: ponter to the filter that will be added. - * rx_queue: the queue id the filter assigned to. - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ static int -ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, - struct ixgbe_5tuple_filter *filter) +ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_filter_info *filter_info = - IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - int i, idx, shift; - uint32_t ftqf, sdpqf; - uint32_t l34timir = 0; - uint8_t mask = 0xff; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t mask; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - /* - * look for an unused 5tuple filter index, - * and insert the filter to list. - */ - for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { - idx = i / (sizeof(uint32_t) * NBBY); - shift = i % (sizeof(uint32_t) * NBBY); - if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { - filter_info->fivetuple_mask[idx] |= 1 << shift; - filter->index = i; - TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, - filter, - entries); - break; + if (queue_id < 16) { + ixgbe_disable_intr(hw); + intr->mask |= (1 << queue_id); + ixgbe_enable_intr(dev); + } else if (queue_id < 32) { + mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); + mask &= (1 << queue_id); + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); + } else if (queue_id < 64) { + mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); + mask &= (1 << (queue_id - 32)); + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + } + rte_intr_enable(intr_handle); + + return 0; +} + +static int +ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + uint32_t mask; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + if (queue_id < 16) { + ixgbe_disable_intr(hw); + intr->mask &= ~(1 << queue_id); + ixgbe_enable_intr(dev); + } else if (queue_id < 32) { + mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0)); + mask &= ~(1 << queue_id); + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); + } else if (queue_id < 64) { + mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1)); + mask &= ~(1 << (queue_id - 32)); + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + } + + return 0; +} + +static void +ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector) +{ + uint32_t tmp, idx; + + if (direction == -1) { + /* other causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); + tmp &= ~0xFF; + tmp |= msix_vector; + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp); + } else { + /* rx or tx cause */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + idx = ((16 * (queue & 1)) + (8 * direction)); + tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); + tmp &= ~(0xFF << idx); + tmp |= (msix_vector << idx); + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp); + } +} + +/** + * set the IVAR registers, mapping interrupt causes to vectors + * @param hw + * pointer to ixgbe_hw struct + * @direction + * 0 for Rx, 1 for Tx, -1 for other causes + * @queue + * queue to map the corresponding interrupt to + * @msix_vector + * the vector to map to the corresponding queue + */ +static void +ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, + uint8_t queue, uint8_t msix_vector) +{ + uint32_t tmp, idx; + + msix_vector |= IXGBE_IVAR_ALLOC_VAL; + if (hw->mac.type == ixgbe_mac_82598EB) { + if (direction == -1) + direction = 0; + idx = (((direction * 64) + queue) >> 2) & 0x1F; + tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx)); + tmp &= ~(0xFF << (8 * (queue & 0x3))); + tmp |= (msix_vector << (8 * (queue & 0x3))); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp); + } else if ((hw->mac.type == ixgbe_mac_82599EB) || + (hw->mac.type == ixgbe_mac_X540)) { + if (direction == -1) { + /* other causes */ + idx = ((queue & 1) * 8); + tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); + tmp &= ~(0xFF << idx); + tmp |= (msix_vector << idx); + IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp); + } else { + /* rx or tx causes */ + idx = ((16 * (queue & 1)) + (8 * direction)); + tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1)); + tmp &= ~(0xFF << idx); + tmp |= (msix_vector << idx); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp); } } - if (i >= IXGBE_MAX_FTQF_FILTERS) { - PMD_DRV_LOG(ERR, "5tuple filters are full."); - return -ENOSYS; +} + +static void +ixgbevf_configure_msix(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t q_idx; + uint32_t vector_idx = IXGBE_MISC_VEC_ID; + + /* Configure VF other cause ivar */ + ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); + + /* won't configure msix register if no mapping is done + * between intr vector and event fd. + */ + if (!rte_intr_dp_is_en(intr_handle)) + return; + + /* Configure all RX queues of VF */ + for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { + /* Force all queue use vector 0, + * as IXGBE_VF_MAXMSIVECOTR = 1 + */ + ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); + intr_handle->intr_vec[q_idx] = vector_idx; } +} - sdpqf = (uint32_t)(filter->filter_info.dst_port << - IXGBE_SDPQF_DSTPORT_SHIFT); - sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); +/** + * Sets up the hardware to properly generate MSI-X interrupts + * @hw + * board private structure + */ +static void +ixgbe_configure_msix(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t queue_id, base = IXGBE_MISC_VEC_ID; + uint32_t vec = IXGBE_MISC_VEC_ID; + uint32_t mask; + uint32_t gpie; - ftqf = (uint32_t)(filter->filter_info.proto & - IXGBE_FTQF_PROTOCOL_MASK); - ftqf |= (uint32_t)((filter->filter_info.priority & - IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); - if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ - mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; - if (filter->filter_info.dst_ip_mask == 0) - mask &= IXGBE_FTQF_DEST_ADDR_MASK; - if (filter->filter_info.src_port_mask == 0) - mask &= IXGBE_FTQF_SOURCE_PORT_MASK; - if (filter->filter_info.dst_port_mask == 0) - mask &= IXGBE_FTQF_DEST_PORT_MASK; - if (filter->filter_info.proto_mask == 0) - mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; - ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; - ftqf |= IXGBE_FTQF_POOL_MASK_EN; - ftqf |= IXGBE_FTQF_QUEUE_ENABLE; + /* won't configure msix register if no mapping is done + * between intr vector and event fd + */ + if (!rte_intr_dp_is_en(intr_handle)) + return; - IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); - IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); - IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); - IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); + if (rte_intr_allow_others(intr_handle)) + vec = base = IXGBE_RX_VEC_START; - l34timir |= IXGBE_L34T_IMIR_RESERVE; - l34timir |= (uint32_t)(filter->queue << - IXGBE_L34T_IMIR_QUEUE_SHIFT); - IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); - return 0; + /* setup GPIE for MSI-x mode */ + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | + IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME; + /* auto clearing and auto setting corresponding bits in EIMS + * when MSI-X interrupt is triggered + */ + if (hw->mac.type == ixgbe_mac_82598EB) { + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); + } else { + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); + } + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + + /* Populate the IVAR table and set the ITR values to the + * corresponding register. + */ + for (queue_id = 0; queue_id < dev->data->nb_rx_queues; + queue_id++) { + /* by default, 1:1 mapping */ + ixgbe_set_ivar_map(hw, 0, queue_id, vec); + intr_handle->intr_vec[queue_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, + IXGBE_MISC_VEC_ID); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); + break; + default: + break; + } + IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), + IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF); + + /* set up to autoclear timer, and the vectors */ + mask = IXGBE_EIMS_ENABLE_MASK; + mask &= ~(IXGBE_EIMS_OTHER | + IXGBE_EIMS_MAILBOX | + IXGBE_EIMS_LSC); + + IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); +} + +static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t tx_rate) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rf_dec, rf_int; + uint32_t bcnrc_val; + uint16_t link_speed = dev->data->dev_link.link_speed; + + if (queue_idx >= hw->mac.max_tx_queues) + return -EINVAL; + + if (tx_rate != 0) { + /* Calculate the rate factor values to set */ + rf_int = (uint32_t)link_speed / (uint32_t)tx_rate; + rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate; + rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate; + + bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; + bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) & + IXGBE_RTTBCNRC_RF_INT_MASK_M); + bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); + } else { + bcnrc_val = 0; + } + + /* + * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise + * set as 0x4. + */ + if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) && + (dev->data->dev_conf.rxmode.max_rx_pkt_len >= + IXGBE_MAX_JUMBO_FRAME_SIZE)) + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, + IXGBE_MMW_SIZE_JUMBO_FRAME); + else + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, + IXGBE_MMW_SIZE_DEFAULT); + + /* Set RTTBCNRC of queue X */ + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +static void +ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + __attribute__((unused)) uint32_t index, + __attribute__((unused)) uint32_t pool) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int diag; + + /* + * On a 82599 VF, adding again the same MAC addr is not an idempotent + * operation. Trap this case to avoid exhausting the [very limited] + * set of PF resources used to store VF MAC addresses. + */ + if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) + return; + diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); + if (diag == 0) + return; + PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag); +} + +static void +ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr; + struct ether_addr *mac_addr; + uint32_t i; + int diag; + + /* + * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does + * not support the deletion of a given MAC address. + * Instead, it imposes to delete all MAC addresses, then to add again + * all MAC addresses with the exception of the one to be deleted. + */ + (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL); + + /* + * Add again all MAC addresses, with the exception of the deleted one + * and of the permanent MAC address. + */ + for (i = 0, mac_addr = dev->data->mac_addrs; + i < hw->mac.num_rar_entries; i++, mac_addr++) { + /* Skip the deleted MAC address */ + if (i == index) + continue; + /* Skip NULL MAC addresses */ + if (is_zero_ether_addr(mac_addr)) + continue; + /* Skip the permanent MAC address */ + if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0) + continue; + diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes); + if (diag != 0) + PMD_DRV_LOG(ERR, + "Adding again MAC address " + "%02x:%02x:%02x:%02x:%02x:%02x failed " + "diag=%d", + mac_addr->addr_bytes[0], + mac_addr->addr_bytes[1], + mac_addr->addr_bytes[2], + mac_addr->addr_bytes[3], + mac_addr->addr_bytes[4], + mac_addr->addr_bytes[5], + diag); + } +} + +static void +ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0); +} + +int +ixgbe_syn_filter_set(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter, + bool add) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t syn_info; + uint32_t synqf; + + if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; + + syn_info = filter_info->syn_info; + + if (add) { + if (syn_info & IXGBE_SYN_FILTER_ENABLE) + return -EINVAL; + synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) & + IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE); + + if (filter->hig_pri) + synqf |= IXGBE_SYN_FILTER_SYNQFP; + else + synqf &= ~IXGBE_SYN_FILTER_SYNQFP; + } else { + synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + if (!(syn_info & IXGBE_SYN_FILTER_ENABLE)) + return -ENOENT; + synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE); + } + + filter_info->syn_info = synqf; + IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); + IXGBE_WRITE_FLUSH(hw); + return 0; +} + +static int +ixgbe_syn_filter_get(struct rte_eth_dev *dev, + struct rte_eth_syn_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF); + + if (synqf & IXGBE_SYN_FILTER_ENABLE) { + filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0; + filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1); + return 0; + } + return -ENOENT; +} + +static int +ixgbe_syn_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_syn_filter_set(dev, + (struct rte_eth_syn_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_syn_filter_set(dev, + (struct rte_eth_syn_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = ixgbe_syn_filter_get(dev, + (struct rte_eth_syn_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); + ret = -EINVAL; + break; + } + + return ret; +} + + +static inline enum ixgbe_5tuple_protocol +convert_protocol_type(uint8_t protocol_value) +{ + if (protocol_value == IPPROTO_TCP) + return IXGBE_FILTER_PROTOCOL_TCP; + else if (protocol_value == IPPROTO_UDP) + return IXGBE_FILTER_PROTOCOL_UDP; + else if (protocol_value == IPPROTO_SCTP) + return IXGBE_FILTER_PROTOCOL_SCTP; + else + return IXGBE_FILTER_PROTOCOL_NONE; +} + +/* inject a 5-tuple filter to HW */ +static inline void +ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int i; + uint32_t ftqf, sdpqf; + uint32_t l34timir = 0; + uint8_t mask = 0xff; + + i = filter->index; + + sdpqf = (uint32_t)(filter->filter_info.dst_port << + IXGBE_SDPQF_DSTPORT_SHIFT); + sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT); + + ftqf = (uint32_t)(filter->filter_info.proto & + IXGBE_FTQF_PROTOCOL_MASK); + ftqf |= (uint32_t)((filter->filter_info.priority & + IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT); + if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */ + mask &= IXGBE_FTQF_SOURCE_ADDR_MASK; + if (filter->filter_info.dst_ip_mask == 0) + mask &= IXGBE_FTQF_DEST_ADDR_MASK; + if (filter->filter_info.src_port_mask == 0) + mask &= IXGBE_FTQF_SOURCE_PORT_MASK; + if (filter->filter_info.dst_port_mask == 0) + mask &= IXGBE_FTQF_DEST_PORT_MASK; + if (filter->filter_info.proto_mask == 0) + mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK; + ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT; + ftqf |= IXGBE_FTQF_POOL_MASK_EN; + ftqf |= IXGBE_FTQF_QUEUE_ENABLE; + + IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip); + IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip); + IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf); + IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf); + + l34timir |= IXGBE_L34T_IMIR_RESERVE; + l34timir |= (uint32_t)(filter->queue << + IXGBE_L34T_IMIR_QUEUE_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir); +} + +/* + * add a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * index: the index the filter allocates. + * filter: ponter to the filter that will be added. + * rx_queue: the queue id the filter assigned to. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i, idx, shift; + + /* + * look for an unused 5tuple filter index, + * and insert the filter to list. + */ + for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) { + idx = i / (sizeof(uint32_t) * NBBY); + shift = i % (sizeof(uint32_t) * NBBY); + if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) { + filter_info->fivetuple_mask[idx] |= 1 << shift; + filter->index = i; + TAILQ_INSERT_TAIL(&filter_info->fivetuple_list, + filter, + entries); + break; + } + } + if (i >= IXGBE_MAX_FTQF_FILTERS) { + PMD_DRV_LOG(ERR, "5tuple filters are full."); + return -ENOSYS; + } + + ixgbe_inject_5tuple_filter(dev, filter); + + return 0; +} + +/* + * remove a 5tuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * filter: the pointer of the filter will be removed. + */ +static void +ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, + struct ixgbe_5tuple_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint16_t index = filter->index; + + filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= + ~(1 << (index % (sizeof(uint32_t) * NBBY))); + TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); + rte_free(filter); + + IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); +} + +static int +ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct ixgbe_hw *hw; + uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) + return -EINVAL; + + /* refuse mtu that requires the support of scattered packets when this + * feature has not been enabled before. + */ + if (!dev->data->scattered_rx && + (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > + dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) + return -EINVAL; + + /* + * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU + * request of the version 2.0 of the mailbox API. + * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 + * of the mailbox API. + * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers + * prior to 3.11.33 which contains the following change: + * "ixgbe: Enable jumbo frames support w/ SR-IOV" + */ + ixgbevf_rlpml_set_vf(hw, max_frame); + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; + return 0; +} + +static inline struct ixgbe_5tuple_filter * +ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, + struct ixgbe_5tuple_filter_info *key) +{ + struct ixgbe_5tuple_filter *it; + + TAILQ_FOREACH(it, filter_list, entries) { + if (memcmp(key, &it->filter_info, + sizeof(struct ixgbe_5tuple_filter_info)) == 0) { + return it; + } + } + return NULL; +} + +/* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ +static inline int +ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, + struct ixgbe_5tuple_filter_info *filter_info) +{ + if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || + filter->priority > IXGBE_5TUPLE_MAX_PRI || + filter->priority < IXGBE_5TUPLE_MIN_PRI) + return -EINVAL; + + switch (filter->dst_ip_mask) { + case UINT32_MAX: + filter_info->dst_ip_mask = 0; + filter_info->dst_ip = filter->dst_ip; + break; + case 0: + filter_info->dst_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_ip mask."); + return -EINVAL; + } + + switch (filter->src_ip_mask) { + case UINT32_MAX: + filter_info->src_ip_mask = 0; + filter_info->src_ip = filter->src_ip; + break; + case 0: + filter_info->src_ip_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_ip mask."); + return -EINVAL; + } + + switch (filter->dst_port_mask) { + case UINT16_MAX: + filter_info->dst_port_mask = 0; + filter_info->dst_port = filter->dst_port; + break; + case 0: + filter_info->dst_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid dst_port mask."); + return -EINVAL; + } + + switch (filter->src_port_mask) { + case UINT16_MAX: + filter_info->src_port_mask = 0; + filter_info->src_port = filter->src_port; + break; + case 0: + filter_info->src_port_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid src_port mask."); + return -EINVAL; + } + + switch (filter->proto_mask) { + case UINT8_MAX: + filter_info->proto_mask = 0; + filter_info->proto = + convert_protocol_type(filter->proto); + break; + case 0: + filter_info->proto_mask = 1; + break; + default: + PMD_DRV_LOG(ERR, "invalid protocol mask."); + return -EINVAL; + } + + filter_info->priority = (uint8_t)filter->priority; + return 0; +} + +/* + * add or delete a ntuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * add: if true, add filter, if false, remove filter + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int +ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter, + bool add) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter_info filter_5tuple; + struct ixgbe_5tuple_filter *filter; + int ret; + + if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { + PMD_DRV_LOG(ERR, "only 5tuple is supported."); + return -EINVAL; + } + + memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); + if (ret < 0) + return ret; + + filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter != NULL && add) { + PMD_DRV_LOG(ERR, "filter exists."); + return -EEXIST; + } + if (filter == NULL && !add) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + + if (add) { + filter = rte_zmalloc("ixgbe_5tuple_filter", + sizeof(struct ixgbe_5tuple_filter), 0); + if (filter == NULL) + return -ENOMEM; + (void)rte_memcpy(&filter->filter_info, + &filter_5tuple, + sizeof(struct ixgbe_5tuple_filter_info)); + filter->queue = ntuple_filter->queue; + ret = ixgbe_add_5tuple_filter(dev, filter); + if (ret < 0) { + rte_free(filter); + return ret; + } + } else + ixgbe_remove_5tuple_filter(dev, filter); + + return 0; +} + +/* + * get a ntuple filter + * + * @param + * dev: Pointer to struct rte_eth_dev. + * ntuple_filter: Pointer to struct rte_eth_ntuple_filter + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, + struct rte_eth_ntuple_filter *ntuple_filter) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter_info filter_5tuple; + struct ixgbe_5tuple_filter *filter; + int ret; + + if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { + PMD_DRV_LOG(ERR, "only 5tuple is supported."); + return -EINVAL; + } + + memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); + ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); + if (ret < 0) + return ret; + + filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, + &filter_5tuple); + if (filter == NULL) { + PMD_DRV_LOG(ERR, "filter doesn't exist."); + return -ENOENT; + } + ntuple_filter->queue = filter->queue; + return 0; +} + +/* + * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_add_del_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_add_del_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = ixgbe_get_ntuple_filter(dev, + (struct rte_eth_ntuple_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +int +ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter, + bool add) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t etqf = 0; + uint32_t etqs = 0; + int ret; + struct ixgbe_ethertype_filter ethertype_filter; + + if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) + return -EINVAL; + + if (filter->ether_type == ETHER_TYPE_IPv4 || + filter->ether_type == ETHER_TYPE_IPv6) { + PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" + " ethertype filter.", filter->ether_type); + return -EINVAL; + } + + if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { + PMD_DRV_LOG(ERR, "mac compare is unsupported."); + return -EINVAL; + } + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { + PMD_DRV_LOG(ERR, "drop option is unsupported."); + return -EINVAL; + } + + ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret >= 0 && add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", + filter->ether_type); + return -EEXIST; + } + if (ret < 0 && !add) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + if (add) { + etqf = IXGBE_ETQF_FILTER_EN; + etqf |= (uint32_t)filter->ether_type; + etqs |= (uint32_t)((filter->queue << + IXGBE_ETQS_RX_QUEUE_SHIFT) & + IXGBE_ETQS_RX_QUEUE); + etqs |= IXGBE_ETQS_QUEUE_EN; + + ethertype_filter.ethertype = filter->ether_type; + ethertype_filter.etqf = etqf; + ethertype_filter.etqs = etqs; + ethertype_filter.conf = FALSE; + ret = ixgbe_ethertype_filter_insert(filter_info, + ðertype_filter); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype filters are full."); + return -ENOSPC; + } + } else { + ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); + if (ret < 0) + return -ENOSYS; + } + IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +static int +ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, + struct rte_eth_ethertype_filter *filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t etqf, etqs; + int ret; + + ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); + if (ret < 0) { + PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", + filter->ether_type); + return -ENOENT; + } + + etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret)); + if (etqf & IXGBE_ETQF_FILTER_EN) { + etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret)); + filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE; + filter->flags = 0; + filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> + IXGBE_ETQS_RX_QUEUE_SHIFT; + return 0; + } + return -ENOENT; +} + +/* + * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ +static int +ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + MAC_TYPE_FILTER_SUP(hw->mac.type); + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_add_del_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg, + TRUE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_add_del_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg, + FALSE); + break; + case RTE_ETH_FILTER_GET: + ret = ixgbe_get_ethertype_filter(dev, + (struct rte_eth_ethertype_filter *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static int +ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + + switch (filter_type) { + case RTE_ETH_FILTER_NTUPLE: + ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_ETHERTYPE: + ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_SYN: + ret = ixgbe_syn_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_FDIR: + ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_L2_TUNNEL: + ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &ixgbe_flow_ops; + break; + default: + PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", + filter_type); + ret = -EINVAL; + break; + } + + return ret; +} + +static u8 * +ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw, + u8 **mc_addr_ptr, u32 *vmdq) +{ + u8 *mc_addr; + + *vmdq = 0; + mc_addr = *mc_addr_ptr; + *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr)); + return mc_addr; +} + +static int +ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, + struct ether_addr *mc_addr_set, + uint32_t nb_mc_addr) +{ + struct ixgbe_hw *hw; + u8 *mc_addr_list; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mc_addr_list = (u8 *)mc_addr_set; + return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, + ixgbe_dev_addr_list_itr, TRUE); +} + +static uint64_t +ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t systime_cycles; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ + systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) + * NSEC_PER_SEC; + break; + default: + systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) + << 32; + } + + return systime_cycles; +} + +static uint64_t +ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rx_tstamp_cycles; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* RXSTMPL stores ns and RXSTMPH stores seconds. */ + rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); + rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) + * NSEC_PER_SEC; + break; + default: + /* RXSTMPL stores ns and RXSTMPH stores seconds. */ + rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); + rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) + << 32; + } + + return rx_tstamp_cycles; +} + +static uint64_t +ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t tx_tstamp_cycles; + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* TXSTMPL stores ns and TXSTMPH stores seconds. */ + tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); + tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) + * NSEC_PER_SEC; + break; + default: + /* TXSTMPL stores ns and TXSTMPH stores seconds. */ + tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); + tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) + << 32; + } + + return tx_tstamp_cycles; +} + +static void +ixgbe_start_timecounters(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + struct rte_eth_link link; + uint32_t incval = 0; + uint32_t shift = 0; + + /* Get current link speed. */ + memset(&link, 0, sizeof(link)); + ixgbe_dev_link_update(dev, 1); + rte_ixgbe_dev_atomic_read_link_status(dev, &link); + + switch (link.link_speed) { + case ETH_SPEED_NUM_100M: + incval = IXGBE_INCVAL_100; + shift = IXGBE_INCVAL_SHIFT_100; + break; + case ETH_SPEED_NUM_1G: + incval = IXGBE_INCVAL_1GB; + shift = IXGBE_INCVAL_SHIFT_1GB; + break; + case ETH_SPEED_NUM_10G: + default: + incval = IXGBE_INCVAL_10GB; + shift = IXGBE_INCVAL_SHIFT_10GB; + break; + } + + switch (hw->mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* Independent of link speed. */ + incval = 1; + /* Cycles read will be interpreted as ns. */ + shift = 0; + /* Fall-through */ + case ixgbe_mac_X540: + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); + break; + case ixgbe_mac_82599EB: + incval >>= IXGBE_INCVAL_SHIFT_82599; + shift -= IXGBE_INCVAL_SHIFT_82599; + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, + (1 << IXGBE_INCPER_SHIFT_82599) | incval); + break; + default: + /* Not supported. */ + return; + } + + memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); + + adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; + adapter->systime_tc.cc_shift = shift; + adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; + + adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; + adapter->rx_tstamp_tc.cc_shift = shift; + adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; + + adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; + adapter->tx_tstamp_tc.cc_shift = shift; + adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; } -/* - * remove a 5tuple filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * filter: the pointer of the filter will be removed. - */ -static void -ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev, - struct ixgbe_5tuple_filter *filter) +static int +ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_filter_info *filter_info = - IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - uint16_t index = filter->index; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; - filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &= - ~(1 << (index % (sizeof(uint32_t) * NBBY))); - TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries); - rte_free(filter); + adapter->systime_tc.nsec += delta; + adapter->rx_tstamp_tc.nsec += delta; + adapter->tx_tstamp_tc.nsec += delta; - IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0); - IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0); - IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0); - IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0); - IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0); + return 0; } static int -ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) { - struct ixgbe_hw *hw; - uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + uint64_t ns; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; - hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + ns = rte_timespec_to_ns(ts); + /* Set the timecounters to a new value. */ + adapter->systime_tc.nsec = ns; + adapter->rx_tstamp_tc.nsec = ns; + adapter->tx_tstamp_tc.nsec = ns; - if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) - return -EINVAL; + return 0; +} - /* refuse mtu that requires the support of scattered packets when this - * feature has not been enabled before. */ - if (!dev->data->scattered_rx && - (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > - dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) - return -EINVAL; +static int +ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) +{ + uint64_t ns, systime_cycles; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; - /* - * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU - * request of the version 2.0 of the mailbox API. - * For now, use the IXGBE_VF_SET_LPE request of the version 1.0 - * of the mailbox API. - * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers - * prior to 3.11.33 which contains the following change: - * "ixgbe: Enable jumbo frames support w/ SR-IOV" - */ - ixgbevf_rlpml_set_vf(hw, max_frame); + systime_cycles = ixgbe_read_systime_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); + *ts = rte_ns_to_timespec(ns); - /* update max frame size */ - dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame; return 0; } -#define MAC_TYPE_FILTER_SUP_EXT(type) do {\ - if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\ - return -ENOTSUP;\ -} while (0) - -static inline struct ixgbe_5tuple_filter * -ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list, - struct ixgbe_5tuple_filter_info *key) +static int +ixgbe_timesync_enable(struct rte_eth_dev *dev) { - struct ixgbe_5tuple_filter *it; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tsync_ctl; + uint32_t tsauxc; - TAILQ_FOREACH(it, filter_list, entries) { - if (memcmp(key, &it->filter_info, - sizeof(struct ixgbe_5tuple_filter_info)) == 0) { - return it; - } - } - return NULL; -} + /* Stop the timesync system time. */ + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0); + /* Reset the timesync system time value. */ + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0); -/* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/ -static inline int -ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter, - struct ixgbe_5tuple_filter_info *filter_info) -{ - if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM || - filter->priority > IXGBE_5TUPLE_MAX_PRI || - filter->priority < IXGBE_5TUPLE_MIN_PRI) - return -EINVAL; + /* Enable system time for platforms where it isn't on by default. */ + tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); + tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME; + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); - switch (filter->dst_ip_mask) { - case UINT32_MAX: - filter_info->dst_ip_mask = 0; - filter_info->dst_ip = filter->dst_ip; - break; - case 0: - filter_info->dst_ip_mask = 1; - break; - default: - PMD_DRV_LOG(ERR, "invalid dst_ip mask."); - return -EINVAL; - } + ixgbe_start_timecounters(dev); - switch (filter->src_ip_mask) { - case UINT32_MAX: - filter_info->src_ip_mask = 0; - filter_info->src_ip = filter->src_ip; - break; - case 0: - filter_info->src_ip_mask = 1; - break; - default: - PMD_DRV_LOG(ERR, "invalid src_ip mask."); - return -EINVAL; - } + /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), + (ETHER_TYPE_1588 | + IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_1588)); - switch (filter->dst_port_mask) { - case UINT16_MAX: - filter_info->dst_port_mask = 0; - filter_info->dst_port = filter->dst_port; - break; - case 0: - filter_info->dst_port_mask = 1; - break; - default: - PMD_DRV_LOG(ERR, "invalid dst_port mask."); - return -EINVAL; - } + /* Enable timestamping of received PTP packets. */ + tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); - switch (filter->src_port_mask) { - case UINT16_MAX: - filter_info->src_port_mask = 0; - filter_info->src_port = filter->src_port; - break; - case 0: - filter_info->src_port_mask = 1; - break; - default: - PMD_DRV_LOG(ERR, "invalid src_port mask."); - return -EINVAL; - } + /* Enable timestamping of transmitted PTP packets. */ + tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); - switch (filter->proto_mask) { - case UINT8_MAX: - filter_info->proto_mask = 0; - filter_info->proto = - convert_protocol_type(filter->proto); - break; - case 0: - filter_info->proto_mask = 1; - break; - default: - PMD_DRV_LOG(ERR, "invalid protocol mask."); - return -EINVAL; - } + IXGBE_WRITE_FLUSH(hw); - filter_info->priority = (uint8_t)filter->priority; return 0; } -/* - * add or delete a ntuple filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * ntuple_filter: Pointer to struct rte_eth_ntuple_filter - * add: if true, add filter, if false, remove filter - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ static int -ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter, - bool add) +ixgbe_timesync_disable(struct rte_eth_dev *dev) { - struct ixgbe_filter_info *filter_info = - IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - struct ixgbe_5tuple_filter_info filter_5tuple; - struct ixgbe_5tuple_filter *filter; - int ret; - - if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { - PMD_DRV_LOG(ERR, "only 5tuple is supported."); - return -EINVAL; - } - - memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); - ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); - if (ret < 0) - return ret; - - filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, - &filter_5tuple); - if (filter != NULL && add) { - PMD_DRV_LOG(ERR, "filter exists."); - return -EEXIST; - } - if (filter == NULL && !add) { - PMD_DRV_LOG(ERR, "filter doesn't exist."); - return -ENOENT; - } + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t tsync_ctl; - if (add) { - filter = rte_zmalloc("ixgbe_5tuple_filter", - sizeof(struct ixgbe_5tuple_filter), 0); - if (filter == NULL) - return -ENOMEM; - (void)rte_memcpy(&filter->filter_info, - &filter_5tuple, - sizeof(struct ixgbe_5tuple_filter_info)); - filter->queue = ntuple_filter->queue; - ret = ixgbe_add_5tuple_filter(dev, filter); - if (ret < 0) { - rte_free(filter); - return ret; - } - } else - ixgbe_remove_5tuple_filter(dev, filter); + /* Disable timestamping of transmitted PTP packets. */ + tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); + + /* Disable timestamping of received PTP packets. */ + tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED; + IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); + + /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); + + /* Stop incrementating the System Time registers. */ + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); return 0; } -/* - * get a ntuple filter - * - * @param - * dev: Pointer to struct rte_eth_dev. - * ntuple_filter: Pointer to struct rte_eth_ntuple_filter - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ static int -ixgbe_get_ntuple_filter(struct rte_eth_dev *dev, - struct rte_eth_ntuple_filter *ntuple_filter) +ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp, + uint32_t flags __rte_unused) { - struct ixgbe_filter_info *filter_info = - IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - struct ixgbe_5tuple_filter_info filter_5tuple; - struct ixgbe_5tuple_filter *filter; - int ret; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + uint32_t tsync_rxctl; + uint64_t rx_tstamp_cycles; + uint64_t ns; - if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) { - PMD_DRV_LOG(ERR, "only 5tuple is supported."); + tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0) return -EINVAL; - } - memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info)); - ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple); - if (ret < 0) - return ret; + rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); - filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list, - &filter_5tuple); - if (filter == NULL) { - PMD_DRV_LOG(ERR, "filter doesn't exist."); - return -ENOENT; - } - ntuple_filter->queue = filter->queue; - return 0; + return 0; } -/* - * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter. - * @dev: pointer to rte_eth_dev structure - * @filter_op:operation will be taken. - * @arg: a pointer to specific structure corresponding to the filter_op - * - * @return - * - On success, zero. - * - On failure, a negative value. - */ static int -ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) +ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, + struct timespec *timestamp) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret; - - MAC_TYPE_FILTER_SUP_EXT(hw->mac.type); - - if (filter_op == RTE_ETH_FILTER_NOP) - return 0; + struct ixgbe_adapter *adapter = + (struct ixgbe_adapter *)dev->data->dev_private; + uint32_t tsync_txctl; + uint64_t tx_tstamp_cycles; + uint64_t ns; - if (arg == NULL) { - PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", - filter_op); + tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); + if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0) return -EINVAL; - } - switch (filter_op) { - case RTE_ETH_FILTER_ADD: - ret = ixgbe_add_del_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg, - TRUE); - break; - case RTE_ETH_FILTER_DELETE: - ret = ixgbe_add_del_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg, - FALSE); - break; - case RTE_ETH_FILTER_GET: - ret = ixgbe_get_ntuple_filter(dev, - (struct rte_eth_ntuple_filter *)arg); - break; - default: - PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); - ret = -EINVAL; - break; - } - return ret; + tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev); + ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); + *timestamp = rte_ns_to_timespec(ns); + + return 0; } -static inline int -ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info, - uint16_t ethertype) +static int +ixgbe_get_reg_length(struct rte_eth_dev *dev) { - int i; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int count = 0; + int g_ind = 0; + const struct reg_info *reg_group; + const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? + ixgbe_regs_mac_82598EB : ixgbe_regs_others; - for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { - if (filter_info->ethertype_filters[i] == ethertype && - (filter_info->ethertype_mask & (1 << i))) - return i; - } - return -1; + while ((reg_group = reg_set[g_ind++])) + count += ixgbe_regs_group_count(reg_group); + + return count; } -static inline int -ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info, - uint16_t ethertype) +static int +ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) { - int i; + int count = 0; + int g_ind = 0; + const struct reg_info *reg_group; - for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { - if (!(filter_info->ethertype_mask & (1 << i))) { - filter_info->ethertype_mask |= 1 << i; - filter_info->ethertype_filters[i] = ethertype; - return i; - } - } - return -1; -} + while ((reg_group = ixgbevf_regs[g_ind++])) + count += ixgbe_regs_group_count(reg_group); -static inline int -ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info, - uint8_t idx) -{ - if (idx >= IXGBE_MAX_ETQF_FILTERS) - return -1; - filter_info->ethertype_mask &= ~(1 << idx); - filter_info->ethertype_filters[idx] = 0; - return idx; + return count; } static int -ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter, - bool add) +ixgbe_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_filter_info *filter_info = - IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - uint32_t etqf = 0; - uint32_t etqs = 0; - int ret; - - if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) - return -EINVAL; - - if (filter->ether_type == ETHER_TYPE_IPv4 || - filter->ether_type == ETHER_TYPE_IPv6) { - PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" - " ethertype filter.", filter->ether_type); - return -EINVAL; - } - - if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { - PMD_DRV_LOG(ERR, "mac compare is unsupported."); - return -EINVAL; - } - if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) { - PMD_DRV_LOG(ERR, "drop option is unsupported."); - return -EINVAL; - } + uint32_t *data = regs->data; + int g_ind = 0; + int count = 0; + const struct reg_info *reg_group; + const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? + ixgbe_regs_mac_82598EB : ixgbe_regs_others; - ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); - if (ret >= 0 && add) { - PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.", - filter->ether_type); - return -EEXIST; - } - if (ret < 0 && !add) { - PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", - filter->ether_type); - return -ENOENT; + if (data == NULL) { + regs->length = ixgbe_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; } - if (add) { - ret = ixgbe_ethertype_filter_insert(filter_info, - filter->ether_type); - if (ret < 0) { - PMD_DRV_LOG(ERR, "ethertype filters are full."); - return -ENOSYS; - } - etqf = IXGBE_ETQF_FILTER_EN; - etqf |= (uint32_t)filter->ether_type; - etqs |= (uint32_t)((filter->queue << - IXGBE_ETQS_RX_QUEUE_SHIFT) & - IXGBE_ETQS_RX_QUEUE); - etqs |= IXGBE_ETQS_QUEUE_EN; - } else { - ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret); - if (ret < 0) - return -ENOSYS; + /* Support only full register dump */ + if ((regs->length == 0) || + (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { + regs->version = hw->mac.type << 24 | hw->revision_id << 16 | + hw->device_id; + while ((reg_group = reg_set[g_ind++])) + count += ixgbe_read_regs_group(dev, &data[count], + reg_group); + return 0; } - IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf); - IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs); - IXGBE_WRITE_FLUSH(hw); - return 0; + return -ENOTSUP; } static int -ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, - struct rte_eth_ethertype_filter *filter) +ixgbevf_get_regs(struct rte_eth_dev *dev, + struct rte_dev_reg_info *regs) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_filter_info *filter_info = - IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - uint32_t etqf, etqs; - int ret; + uint32_t *data = regs->data; + int g_ind = 0; + int count = 0; + const struct reg_info *reg_group; - ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type); - if (ret < 0) { - PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.", - filter->ether_type); - return -ENOENT; + if (data == NULL) { + regs->length = ixgbevf_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; } - etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret)); - if (etqf & IXGBE_ETQF_FILTER_EN) { - etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret)); - filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE; - filter->flags = 0; - filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> - IXGBE_ETQS_RX_QUEUE_SHIFT; + /* Support only full register dump */ + if ((regs->length == 0) || + (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { + regs->version = hw->mac.type << 24 | hw->revision_id << 16 | + hw->device_id; + while ((reg_group = ixgbevf_regs[g_ind++])) + count += ixgbe_read_regs_group(dev, &data[count], + reg_group); return 0; } - return -ENOENT; + + return -ENOTSUP; } -/* - * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter. - * @dev: pointer to rte_eth_dev structure - * @filter_op:operation will be taken. - * @arg: a pointer to specific structure corresponding to the filter_op - */ static int -ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) +ixgbe_get_eeprom_length(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret; - MAC_TYPE_FILTER_SUP(hw->mac.type); + /* Return unit is byte count */ + return hw->eeprom.word_size * 2; +} - if (filter_op == RTE_ETH_FILTER_NOP) - return 0; +static int +ixgbe_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + uint16_t *data = in_eeprom->data; + int first, length; - if (arg == NULL) { - PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", - filter_op); + first = in_eeprom->offset >> 1; + length = in_eeprom->length >> 1; + if ((first > hw->eeprom.word_size) || + ((first + length) > hw->eeprom.word_size)) return -EINVAL; - } - switch (filter_op) { - case RTE_ETH_FILTER_ADD: - ret = ixgbe_add_del_ethertype_filter(dev, - (struct rte_eth_ethertype_filter *)arg, - TRUE); - break; - case RTE_ETH_FILTER_DELETE: - ret = ixgbe_add_del_ethertype_filter(dev, - (struct rte_eth_ethertype_filter *)arg, - FALSE); - break; - case RTE_ETH_FILTER_GET: - ret = ixgbe_get_ethertype_filter(dev, - (struct rte_eth_ethertype_filter *)arg); - break; - default: - PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); - ret = -EINVAL; - break; - } - return ret; + in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + return eeprom->ops.read_buffer(hw, first, length, data); } static int -ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev, - enum rte_filter_type filter_type, - enum rte_filter_op filter_op, - void *arg) +ixgbe_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) { - int ret = -EINVAL; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + uint16_t *data = in_eeprom->data; + int first, length; - switch (filter_type) { - case RTE_ETH_FILTER_NTUPLE: - ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_ETHERTYPE: - ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_SYN: - ret = ixgbe_syn_filter_handle(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_FDIR: - ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg); - break; - case RTE_ETH_FILTER_L2_TUNNEL: - ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg); - break; + first = in_eeprom->offset >> 1; + length = in_eeprom->length >> 1; + if ((first > hw->eeprom.word_size) || + ((first + length) > hw->eeprom.word_size)) + return -EINVAL; + + in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + return eeprom->ops.write_buffer(hw, first, length, data); +} + +uint16_t +ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { + switch (mac_type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + return ETH_RSS_RETA_SIZE_512; + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return ETH_RSS_RETA_SIZE_64; default: - PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", - filter_type); - break; + return ETH_RSS_RETA_SIZE_128; } +} - return ret; +uint32_t +ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { + switch (mac_type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + if (reta_idx < ETH_RSS_RETA_SIZE_128) + return IXGBE_RETA(reta_idx >> 2); + else + return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2); + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return IXGBE_VFRETA(reta_idx >> 2); + default: + return IXGBE_RETA(reta_idx >> 2); + } } -static u8 * -ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw, - u8 **mc_addr_ptr, u32 *vmdq) -{ - u8 *mc_addr; +uint32_t +ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { + switch (mac_type) { + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return IXGBE_VFMRQC; + default: + return IXGBE_MRQC; + } +} - *vmdq = 0; - mc_addr = *mc_addr_ptr; - *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr)); - return mc_addr; +uint32_t +ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { + switch (mac_type) { + case ixgbe_mac_X550_vf: + case ixgbe_mac_X550EM_x_vf: + case ixgbe_mac_X550EM_a_vf: + return IXGBE_VFRSSRK(i); + default: + return IXGBE_RSSRK(i); + } +} + +bool +ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) { + switch (mac_type) { + case ixgbe_mac_82599_vf: + case ixgbe_mac_X540_vf: + return 0; + default: + return 1; + } } static int -ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev, - struct ether_addr *mc_addr_set, - uint32_t nb_mc_addr) +ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, + struct rte_eth_dcb_info *dcb_info) { - struct ixgbe_hw *hw; - u8 *mc_addr_list; + struct ixgbe_dcb_config *dcb_config = + IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); + struct ixgbe_dcb_tc_config *tc; + uint8_t i, j; - hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - mc_addr_list = (u8 *)mc_addr_set; - return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr, - ixgbe_dev_addr_list_itr, TRUE); + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) + dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; + else + dcb_info->nb_tcs = 1; + + if (dcb_config->vt_mode) { /* vt is enabled*/ + struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = + &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; + for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { + for (j = 0; j < dcb_info->nb_tcs; j++) { + dcb_info->tc_queue.tc_rxq[i][j].base = + i * dcb_info->nb_tcs + j; + dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1; + dcb_info->tc_queue.tc_txq[i][j].base = + i * dcb_info->nb_tcs + j; + dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1; + } + } + } else { /* vt is disabled*/ + struct rte_eth_dcb_rx_conf *rx_conf = + &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; + if (dcb_info->nb_tcs == ETH_4_TCS) { + for (i = 0; i < dcb_info->nb_tcs; i++) { + dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; + dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; + } + dcb_info->tc_queue.tc_txq[0][0].base = 0; + dcb_info->tc_queue.tc_txq[0][1].base = 64; + dcb_info->tc_queue.tc_txq[0][2].base = 96; + dcb_info->tc_queue.tc_txq[0][3].base = 112; + dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; + dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; + dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; + dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; + } else if (dcb_info->nb_tcs == ETH_8_TCS) { + for (i = 0; i < dcb_info->nb_tcs; i++) { + dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; + dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; + } + dcb_info->tc_queue.tc_txq[0][0].base = 0; + dcb_info->tc_queue.tc_txq[0][1].base = 32; + dcb_info->tc_queue.tc_txq[0][2].base = 64; + dcb_info->tc_queue.tc_txq[0][3].base = 80; + dcb_info->tc_queue.tc_txq[0][4].base = 96; + dcb_info->tc_queue.tc_txq[0][5].base = 104; + dcb_info->tc_queue.tc_txq[0][6].base = 112; + dcb_info->tc_queue.tc_txq[0][7].base = 120; + dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; + dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; + dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; + dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; + dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; + dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; + dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; + dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; + } + } + for (i = 0; i < dcb_info->nb_tcs; i++) { + tc = &dcb_config->tc_config[i]; + dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent; + } + return 0; } -static uint64_t -ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev) +/* Update e-tag ether type */ +static int +ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, + uint16_t ether_type) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint64_t systime_cycles; + uint32_t etag_etype; - switch (hw->mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */ - systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); - systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) - * NSEC_PER_SEC; - break; - default: - systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML); - systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) - << 32; + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; } - return systime_cycles; + etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); + etag_etype &= ~IXGBE_ETAG_ETYPE_MASK; + etag_etype |= ether_type; + IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); + IXGBE_WRITE_FLUSH(hw); + + return 0; } -static uint64_t -ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev) +/* Config l2 tunnel ether type */ +static int +ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) { + int ret = 0; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint64_t rx_tstamp_cycles; + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); - switch (hw->mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - /* RXSTMPL stores ns and RXSTMPH stores seconds. */ - rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); - rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) - * NSEC_PER_SEC; + if (l2_tunnel == NULL) + return -EINVAL; + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type; + ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type); break; default: - /* RXSTMPL stores ns and RXSTMPH stores seconds. */ - rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); - rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) - << 32; + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; } - return rx_tstamp_cycles; + return ret; } -static uint64_t -ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev) +/* Enable e-tag tunnel */ +static int +ixgbe_e_tag_enable(struct ixgbe_hw *hw) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint64_t tx_tstamp_cycles; + uint32_t etag_etype; - switch (hw->mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - /* TXSTMPL stores ns and TXSTMPH stores seconds. */ - tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); - tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) - * NSEC_PER_SEC; - break; - default: - /* TXSTMPL stores ns and TXSTMPH stores seconds. */ - tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); - tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) - << 32; + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; } - return tx_tstamp_cycles; + etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); + etag_etype |= IXGBE_ETAG_ETYPE_VALID; + IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); + IXGBE_WRITE_FLUSH(hw); + + return 0; } -static void -ixgbe_start_timecounters(struct rte_eth_dev *dev) +/* Enable l2 tunnel */ +static int +ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) { + int ret = 0; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; - struct rte_eth_link link; - uint32_t incval = 0; - uint32_t shift = 0; - - /* Get current link speed. */ - memset(&link, 0, sizeof(link)); - ixgbe_dev_link_update(dev, 1); - rte_ixgbe_dev_atomic_read_link_status(dev, &link); + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); - switch (link.link_speed) { - case ETH_SPEED_NUM_100M: - incval = IXGBE_INCVAL_100; - shift = IXGBE_INCVAL_SHIFT_100; - break; - case ETH_SPEED_NUM_1G: - incval = IXGBE_INCVAL_1GB; - shift = IXGBE_INCVAL_SHIFT_1GB; + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_en = TRUE; + ret = ixgbe_e_tag_enable(hw); break; - case ETH_SPEED_NUM_10G: default: - incval = IXGBE_INCVAL_10GB; - shift = IXGBE_INCVAL_SHIFT_10GB; - break; - } - - switch (hw->mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - /* Independent of link speed. */ - incval = 1; - /* Cycles read will be interpreted as ns. */ - shift = 0; - /* Fall-through */ - case ixgbe_mac_X540: - IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); - break; - case ixgbe_mac_82599EB: - incval >>= IXGBE_INCVAL_SHIFT_82599; - shift -= IXGBE_INCVAL_SHIFT_82599; - IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, - (1 << IXGBE_INCPER_SHIFT_82599) | incval); + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; break; - default: - /* Not supported. */ - return; } - memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter)); - memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter)); - memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter)); - - adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; - adapter->systime_tc.cc_shift = shift; - adapter->systime_tc.nsec_mask = (1ULL << shift) - 1; - - adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; - adapter->rx_tstamp_tc.cc_shift = shift; - adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; - - adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK; - adapter->tx_tstamp_tc.cc_shift = shift; - adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1; + return ret; } +/* Disable e-tag tunnel */ static int -ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta) +ixgbe_e_tag_disable(struct ixgbe_hw *hw) { - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; - - adapter->systime_tc.nsec += delta; - adapter->rx_tstamp_tc.nsec += delta; - adapter->tx_tstamp_tc.nsec += delta; - - return 0; -} + uint32_t etag_etype; -static int -ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts) -{ - uint64_t ns; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } - ns = rte_timespec_to_ns(ts); - /* Set the timecounters to a new value. */ - adapter->systime_tc.nsec = ns; - adapter->rx_tstamp_tc.nsec = ns; - adapter->tx_tstamp_tc.nsec = ns; + etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); + etag_etype &= ~IXGBE_ETAG_ETYPE_VALID; + IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); + IXGBE_WRITE_FLUSH(hw); return 0; } +/* Disable l2 tunnel */ static int -ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts) +ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) { - uint64_t ns, systime_cycles; - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; + int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); - systime_cycles = ixgbe_read_systime_cyclecounter(dev); - ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles); - *ts = rte_ns_to_timespec(ns); + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_en = FALSE; + ret = ixgbe_e_tag_disable(hw); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } - return 0; + return ret; } static int -ixgbe_timesync_enable(struct rte_eth_dev *dev) +ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) { + int ret = 0; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t tsync_ctl; - uint32_t tsauxc; - - /* Stop the timesync system time. */ - IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0); - /* Reset the timesync system time value. */ - IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0); - IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0); - - /* Enable system time for platforms where it isn't on by default. */ - tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); - tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME; - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); + uint32_t i, rar_entries; + uint32_t rar_low, rar_high; - ixgbe_start_timecounters(dev); + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } - /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), - (ETHER_TYPE_1588 | - IXGBE_ETQF_FILTER_EN | - IXGBE_ETQF_1588)); + rar_entries = ixgbe_get_num_rx_addrs(hw); - /* Enable timestamping of received PTP packets. */ - tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); - tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED; - IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); + for (i = 1; i < rar_entries; i++) { + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i)); + if ((rar_high & IXGBE_RAH_AV) && + (rar_high & IXGBE_RAH_ADTYPE) && + ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) == + l2_tunnel->tunnel_id)) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); - /* Enable timestamping of transmitted PTP packets. */ - tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); - tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED; - IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); + ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL); - IXGBE_WRITE_FLUSH(hw); + return ret; + } + } - return 0; + return ret; } static int -ixgbe_timesync_disable(struct rte_eth_dev *dev) +ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) { + int ret = 0; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t tsync_ctl; + uint32_t i, rar_entries; + uint32_t rar_low, rar_high; - /* Disable timestamping of transmitted PTP packets. */ - tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); - tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED; - IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl); + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } - /* Disable timestamping of received PTP packets. */ - tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); - tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED; - IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl); + /* One entry for one tunnel. Try to remove potential existing entry. */ + ixgbe_e_tag_filter_del(dev, l2_tunnel); - /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */ - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); + rar_entries = ixgbe_get_num_rx_addrs(hw); - /* Stop incrementating the System Time registers. */ - IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0); + for (i = 1; i < rar_entries; i++) { + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); + if (rar_high & IXGBE_RAH_AV) { + continue; + } else { + ixgbe_set_vmdq(hw, i, l2_tunnel->pool); + rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; + rar_low = l2_tunnel->tunnel_id; - return 0; + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); + + return ret; + } + } + + PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." + " Please remove a rule before adding a new one."); + return -EINVAL; } -static int -ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev, - struct timespec *timestamp, - uint32_t flags __rte_unused) +static inline struct ixgbe_l2_tn_filter * +ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info, + struct ixgbe_l2_tn_key *key) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; - uint32_t tsync_rxctl; - uint64_t rx_tstamp_cycles; - uint64_t ns; - - tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); - if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0) - return -EINVAL; + int ret; - rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev); - ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles); - *timestamp = rte_ns_to_timespec(ns); + ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key); + if (ret < 0) + return NULL; - return 0; + return l2_tn_info->hash_map[ret]; } -static int -ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev, - struct timespec *timestamp) +static inline int +ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, + struct ixgbe_l2_tn_filter *l2_tn_filter) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_adapter *adapter = - (struct ixgbe_adapter *)dev->data->dev_private; - uint32_t tsync_txctl; - uint64_t tx_tstamp_cycles; - uint64_t ns; + int ret; - tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); - if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0) - return -EINVAL; + ret = rte_hash_add_key(l2_tn_info->hash_handle, + &l2_tn_filter->key); - tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev); - ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles); - *timestamp = rte_ns_to_timespec(ns); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "Failed to insert L2 tunnel filter" + " to hash table %d!", + ret); + return ret; + } + + l2_tn_info->hash_map[ret] = l2_tn_filter; + + TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); return 0; } -static int -ixgbe_get_reg_length(struct rte_eth_dev *dev) +static inline int +ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info, + struct ixgbe_l2_tn_key *key) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int count = 0; - int g_ind = 0; - const struct reg_info *reg_group; - const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? - ixgbe_regs_mac_82598EB : ixgbe_regs_others; + int ret; + struct ixgbe_l2_tn_filter *l2_tn_filter; - while ((reg_group = reg_set[g_ind++])) - count += ixgbe_regs_group_count(reg_group); + ret = rte_hash_del_key(l2_tn_info->hash_handle, key); - return count; -} + if (ret < 0) { + PMD_DRV_LOG(ERR, + "No such L2 tunnel filter to delete %d!", + ret); + return ret; + } -static int -ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused) -{ - int count = 0; - int g_ind = 0; - const struct reg_info *reg_group; + l2_tn_filter = l2_tn_info->hash_map[ret]; + l2_tn_info->hash_map[ret] = NULL; - while ((reg_group = ixgbevf_regs[g_ind++])) - count += ixgbe_regs_group_count(reg_group); + TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries); + rte_free(l2_tn_filter); - return count; + return 0; } -static int -ixgbe_get_regs(struct rte_eth_dev *dev, - struct rte_dev_reg_info *regs) +/* Add l2 tunnel filter */ +int +ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel, + bool restore) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t *data = regs->data; - int g_ind = 0; - int count = 0; - const struct reg_info *reg_group; - const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? - ixgbe_regs_mac_82598EB : ixgbe_regs_others; + int ret; + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + struct ixgbe_l2_tn_key key; + struct ixgbe_l2_tn_filter *node; - /* Support only full register dump */ - if ((regs->length == 0) || - (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { - regs->version = hw->mac.type << 24 | hw->revision_id << 16 | - hw->device_id; - while ((reg_group = reg_set[g_ind++])) - count += ixgbe_read_regs_group(dev, &data[count], - reg_group); - return 0; + if (!restore) { + key.l2_tn_type = l2_tunnel->l2_tunnel_type; + key.tn_id = l2_tunnel->tunnel_id; + + node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key); + + if (node) { + PMD_DRV_LOG(ERR, + "The L2 tunnel filter already exists!"); + return -EINVAL; + } + + node = rte_zmalloc("ixgbe_l2_tn", + sizeof(struct ixgbe_l2_tn_filter), + 0); + if (!node) + return -ENOMEM; + + (void)rte_memcpy(&node->key, + &key, + sizeof(struct ixgbe_l2_tn_key)); + node->pool = l2_tunnel->pool; + ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node); + if (ret < 0) { + rte_free(node); + return ret; + } } - return -ENOTSUP; + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_filter_add(dev, l2_tunnel); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; + } + + if ((!restore) && (ret < 0)) + (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key); + + return ret; } -static int -ixgbevf_get_regs(struct rte_eth_dev *dev, - struct rte_dev_reg_info *regs) +/* Delete l2 tunnel filter */ +int +ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t *data = regs->data; - int g_ind = 0; - int count = 0; - const struct reg_info *reg_group; + int ret; + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + struct ixgbe_l2_tn_key key; - /* Support only full register dump */ - if ((regs->length == 0) || - (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { - regs->version = hw->mac.type << 24 | hw->revision_id << 16 | - hw->device_id; - while ((reg_group = ixgbevf_regs[g_ind++])) - count += ixgbe_read_regs_group(dev, &data[count], - reg_group); - return 0; + key.l2_tn_type = l2_tunnel->l2_tunnel_type; + key.tn_id = l2_tunnel->tunnel_id; + ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key); + if (ret < 0) + return ret; + + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_filter_del(dev, l2_tunnel); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; } - return -ENOTSUP; + return ret; } +/** + * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter. + * @dev: pointer to rte_eth_dev structure + * @filter_op:operation will be taken. + * @arg: a pointer to specific structure corresponding to the filter_op + */ static int -ixgbe_get_eeprom_length(struct rte_eth_dev *dev) +ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; - /* Return unit is byte count */ - return hw->eeprom.word_size * 2; + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = ixgbe_dev_l2_tunnel_filter_add + (dev, + (struct rte_eth_l2_tunnel_conf *)arg, + FALSE); + break; + case RTE_ETH_FILTER_DELETE: + ret = ixgbe_dev_l2_tunnel_filter_del + (dev, + (struct rte_eth_l2_tunnel_conf *)arg); + break; + default: + PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; } static int -ixgbe_get_eeprom(struct rte_eth_dev *dev, - struct rte_dev_eeprom_info *in_eeprom) +ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) { + int ret = 0; + uint32_t ctrl; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - uint16_t *data = in_eeprom->data; - int first, length; - first = in_eeprom->offset >> 1; - length = in_eeprom->length >> 1; - if ((first > hw->eeprom.word_size) || - ((first + length) > hw->eeprom.word_size)) - return -EINVAL; + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } - in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); + ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; + if (en) + ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); - return eeprom->ops.read_buffer(hw, first, length, data); + return ret; } +/* Enable l2 tunnel forwarding */ static int -ixgbe_set_eeprom(struct rte_eth_dev *dev, - struct rte_dev_eeprom_info *in_eeprom) +ixgbe_dev_l2_tunnel_forwarding_enable + (struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) { - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_eeprom_info *eeprom = &hw->eeprom; - uint16_t *data = in_eeprom->data; - int first, length; - - first = in_eeprom->offset >> 1; - length = in_eeprom->length >> 1; - if ((first > hw->eeprom.word_size) || - ((first + length) > hw->eeprom.word_size)) - return -EINVAL; - - in_eeprom->magic = hw->vendor_id | (hw->device_id << 16); - - return eeprom->ops.write_buffer(hw, first, length, data); -} + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + int ret = 0; -uint16_t -ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) { - switch (mac_type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - return ETH_RSS_RETA_SIZE_512; - case ixgbe_mac_X550_vf: - case ixgbe_mac_X550EM_x_vf: - case ixgbe_mac_X550EM_a_vf: - return ETH_RSS_RETA_SIZE_64; + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_fwd_en = TRUE; + ret = ixgbe_e_tag_forwarding_en_dis(dev, 1); + break; default: - return ETH_RSS_RETA_SIZE_128; + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; } -} -uint32_t -ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) { - switch (mac_type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - if (reta_idx < ETH_RSS_RETA_SIZE_128) - return IXGBE_RETA(reta_idx >> 2); - else - return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2); - case ixgbe_mac_X550_vf: - case ixgbe_mac_X550EM_x_vf: - case ixgbe_mac_X550EM_a_vf: - return IXGBE_VFRETA(reta_idx >> 2); - default: - return IXGBE_RETA(reta_idx >> 2); - } + return ret; } -uint32_t -ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) { - switch (mac_type) { - case ixgbe_mac_X550_vf: - case ixgbe_mac_X550EM_x_vf: - case ixgbe_mac_X550EM_a_vf: - return IXGBE_VFMRQC; - default: - return IXGBE_MRQC; - } -} +/* Disable l2 tunnel forwarding */ +static int +ixgbe_dev_l2_tunnel_forwarding_disable + (struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + int ret = 0; -uint32_t -ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) { - switch (mac_type) { - case ixgbe_mac_X550_vf: - case ixgbe_mac_X550EM_x_vf: - case ixgbe_mac_X550EM_a_vf: - return IXGBE_VFRSSRK(i); + switch (l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + l2_tn_info->e_tag_fwd_en = FALSE; + ret = ixgbe_e_tag_forwarding_en_dis(dev, 0); + break; default: - return IXGBE_RSSRK(i); + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; } -} -bool -ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) { - switch (mac_type) { - case ixgbe_mac_82599_vf: - case ixgbe_mac_X540_vf: - return 0; - default: - return 1; - } + return ret; } static int -ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, - struct rte_eth_dcb_info *dcb_info) +ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel, + bool en) { - struct ixgbe_dcb_config *dcb_config = - IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); - struct ixgbe_dcb_tc_config *tc; - uint8_t i, j; - - if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) - dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs; - else - dcb_info->nb_tcs = 1; + struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev); + int ret = 0; + uint32_t vmtir, vmvir; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (dcb_config->vt_mode) { /* vt is enabled*/ - struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = - &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; - for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) - dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; - for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { - for (j = 0; j < dcb_info->nb_tcs; j++) { - dcb_info->tc_queue.tc_rxq[i][j].base = - i * dcb_info->nb_tcs + j; - dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1; - dcb_info->tc_queue.tc_txq[i][j].base = - i * dcb_info->nb_tcs + j; - dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1; - } - } - } else { /* vt is disabled*/ - struct rte_eth_dcb_rx_conf *rx_conf = - &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; - for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) - dcb_info->prio_tc[i] = rx_conf->dcb_tc[i]; - if (dcb_info->nb_tcs == ETH_4_TCS) { - for (i = 0; i < dcb_info->nb_tcs; i++) { - dcb_info->tc_queue.tc_rxq[0][i].base = i * 32; - dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; - } - dcb_info->tc_queue.tc_txq[0][0].base = 0; - dcb_info->tc_queue.tc_txq[0][1].base = 64; - dcb_info->tc_queue.tc_txq[0][2].base = 96; - dcb_info->tc_queue.tc_txq[0][3].base = 112; - dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64; - dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; - dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; - dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; - } else if (dcb_info->nb_tcs == ETH_8_TCS) { - for (i = 0; i < dcb_info->nb_tcs; i++) { - dcb_info->tc_queue.tc_rxq[0][i].base = i * 16; - dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16; - } - dcb_info->tc_queue.tc_txq[0][0].base = 0; - dcb_info->tc_queue.tc_txq[0][1].base = 32; - dcb_info->tc_queue.tc_txq[0][2].base = 64; - dcb_info->tc_queue.tc_txq[0][3].base = 80; - dcb_info->tc_queue.tc_txq[0][4].base = 96; - dcb_info->tc_queue.tc_txq[0][5].base = 104; - dcb_info->tc_queue.tc_txq[0][6].base = 112; - dcb_info->tc_queue.tc_txq[0][7].base = 120; - dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32; - dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32; - dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16; - dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16; - dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8; - dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8; - dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8; - dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8; - } + if (l2_tunnel->vf_id >= pci_dev->max_vfs) { + PMD_DRV_LOG(ERR, + "VF id %u should be less than %u", + l2_tunnel->vf_id, + pci_dev->max_vfs); + return -EINVAL; } - for (i = 0; i < dcb_info->nb_tcs; i++) { - tc = &dcb_config->tc_config[i]; - dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent; + + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; } - return 0; + + if (en) + vmtir = l2_tunnel->tunnel_id; + else + vmtir = 0; + + IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir); + + vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id)); + vmvir &= ~IXGBE_VMVIR_TAGA_MASK; + if (en) + vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT; + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir); + + return ret; } -/* Update e-tag ether type */ +/* Enable l2 tunnel tag insertion */ static int -ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw, - uint16_t ether_type) +ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) { - uint32_t etag_etype; + int ret = 0; - if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x && - hw->mac.type != ixgbe_mac_X550EM_a) { - return -ENOTSUP; + switch (l2_tunnel->l2_tunnel_type) { + case RTE_L2_TUNNEL_TYPE_E_TAG: + ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1); + break; + default: + PMD_DRV_LOG(ERR, "Invalid tunnel type"); + ret = -EINVAL; + break; } - etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); - etag_etype &= ~IXGBE_ETAG_ETYPE_MASK; - etag_etype |= ether_type; - IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); - IXGBE_WRITE_FLUSH(hw); - - return 0; + return ret; } -/* Config l2 tunnel ether type */ +/* Disable l2 tunnel tag insertion */ static int -ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel) +ixgbe_dev_l2_tunnel_insertion_disable + (struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel) { int ret = 0; - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - - if (l2_tunnel == NULL) - return -EINVAL; switch (l2_tunnel->l2_tunnel_type) { case RTE_L2_TUNNEL_TYPE_E_TAG: - ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type); + ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0); break; default: PMD_DRV_LOG(ERR, "Invalid tunnel type"); @@ -6450,11 +7832,13 @@ ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev, return ret; } -/* Enable e-tag tunnel */ static int -ixgbe_e_tag_enable(struct ixgbe_hw *hw) +ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev, + bool en) { - uint32_t etag_etype; + int ret = 0; + uint32_t qde; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (hw->mac.type != ixgbe_mac_X550 && hw->mac.type != ixgbe_mac_X550EM_x && @@ -6462,25 +7846,29 @@ ixgbe_e_tag_enable(struct ixgbe_hw *hw) return -ENOTSUP; } - etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); - etag_etype |= IXGBE_ETAG_ETYPE_VALID; - IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); - IXGBE_WRITE_FLUSH(hw); + qde = IXGBE_READ_REG(hw, IXGBE_QDE); + if (en) + qde |= IXGBE_QDE_STRIP_TAG; + else + qde &= ~IXGBE_QDE_STRIP_TAG; + qde &= ~IXGBE_QDE_READ; + qde |= IXGBE_QDE_WRITE; + IXGBE_WRITE_REG(hw, IXGBE_QDE, qde); - return 0; + return ret; } -/* Enable l2 tunnel */ +/* Enable l2 tunnel tag stripping */ static int -ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev, - enum rte_eth_tunnel_type l2_tunnel_type) +ixgbe_dev_l2_tunnel_stripping_enable + (struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) { int ret = 0; - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); switch (l2_tunnel_type) { case RTE_L2_TUNNEL_TYPE_E_TAG: - ret = ixgbe_e_tag_enable(hw); + ret = ixgbe_e_tag_stripping_en_dis(dev, 1); break; default: PMD_DRV_LOG(ERR, "Invalid tunnel type"); @@ -6491,37 +7879,17 @@ ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev, return ret; } -/* Disable e-tag tunnel */ -static int -ixgbe_e_tag_disable(struct ixgbe_hw *hw) -{ - uint32_t etag_etype; - - if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x && - hw->mac.type != ixgbe_mac_X550EM_a) { - return -ENOTSUP; - } - - etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE); - etag_etype &= ~IXGBE_ETAG_ETYPE_VALID; - IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} - -/* Disable l2 tunnel */ +/* Disable l2 tunnel tag stripping */ static int -ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev, - enum rte_eth_tunnel_type l2_tunnel_type) +ixgbe_dev_l2_tunnel_stripping_disable + (struct rte_eth_dev *dev, + enum rte_eth_tunnel_type l2_tunnel_type) { int ret = 0; - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); switch (l2_tunnel_type) { case RTE_L2_TUNNEL_TYPE_E_TAG: - ret = ixgbe_e_tag_disable(hw); + ret = ixgbe_e_tag_stripping_en_dis(dev, 0); break; default: PMD_DRV_LOG(ERR, "Invalid tunnel type"); @@ -6532,114 +7900,140 @@ ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev, return ret; } +/* Enable/disable l2 tunnel offload functions */ static int -ixgbe_e_tag_filter_del(struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel) +ixgbe_dev_l2_tunnel_offload_set + (struct rte_eth_dev *dev, + struct rte_eth_l2_tunnel_conf *l2_tunnel, + uint32_t mask, + uint8_t en) { int ret = 0; - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t i, rar_entries; - uint32_t rar_low, rar_high; - if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x && - hw->mac.type != ixgbe_mac_X550EM_a) { - return -ENOTSUP; - } + if (l2_tunnel == NULL) + return -EINVAL; - rar_entries = ixgbe_get_num_rx_addrs(hw); + ret = -EINVAL; + if (mask & ETH_L2_TUNNEL_ENABLE_MASK) { + if (en) + ret = ixgbe_dev_l2_tunnel_enable( + dev, + l2_tunnel->l2_tunnel_type); + else + ret = ixgbe_dev_l2_tunnel_disable( + dev, + l2_tunnel->l2_tunnel_type); + } - for (i = 1; i < rar_entries; i++) { - rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); - rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i)); - if ((rar_high & IXGBE_RAH_AV) && - (rar_high & IXGBE_RAH_ADTYPE) && - ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) == - l2_tunnel->tunnel_id)) { - IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); - IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); + if (mask & ETH_L2_TUNNEL_INSERTION_MASK) { + if (en) + ret = ixgbe_dev_l2_tunnel_insertion_enable( + dev, + l2_tunnel); + else + ret = ixgbe_dev_l2_tunnel_insertion_disable( + dev, + l2_tunnel); + } - ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL); + if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) { + if (en) + ret = ixgbe_dev_l2_tunnel_stripping_enable( + dev, + l2_tunnel->l2_tunnel_type); + else + ret = ixgbe_dev_l2_tunnel_stripping_disable( + dev, + l2_tunnel->l2_tunnel_type); + } - return ret; - } + if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) { + if (en) + ret = ixgbe_dev_l2_tunnel_forwarding_enable( + dev, + l2_tunnel->l2_tunnel_type); + else + ret = ixgbe_dev_l2_tunnel_forwarding_disable( + dev, + l2_tunnel->l2_tunnel_type); } return ret; } static int -ixgbe_e_tag_filter_add(struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel) +ixgbe_update_vxlan_port(struct ixgbe_hw *hw, + uint16_t port) { - int ret = 0; - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t i, rar_entries; - uint32_t rar_low, rar_high; + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port); + IXGBE_WRITE_FLUSH(hw); - if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x && - hw->mac.type != ixgbe_mac_X550EM_a) { - return -ENOTSUP; - } + return 0; +} - /* One entry for one tunnel. Try to remove potential existing entry. */ - ixgbe_e_tag_filter_del(dev, l2_tunnel); +/* There's only one register for VxLAN UDP port. + * So, we cannot add several ports. Will update it. + */ +static int +ixgbe_add_vxlan_port(struct ixgbe_hw *hw, + uint16_t port) +{ + if (port == 0) { + PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); + return -EINVAL; + } - rar_entries = ixgbe_get_num_rx_addrs(hw); + return ixgbe_update_vxlan_port(hw, port); +} - for (i = 1; i < rar_entries; i++) { - rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i)); - if (rar_high & IXGBE_RAH_AV) { - continue; - } else { - ixgbe_set_vmdq(hw, i, l2_tunnel->pool); - rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE; - rar_low = l2_tunnel->tunnel_id; +/* We cannot delete the VxLAN port. For there's a register for VxLAN + * UDP port, it must have a value. + * So, will reset it to the original value 0. + */ +static int +ixgbe_del_vxlan_port(struct ixgbe_hw *hw, + uint16_t port) +{ + uint16_t cur_port; - IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low); - IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high); + cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL); - return ret; - } + if (cur_port != port) { + PMD_DRV_LOG(ERR, "Port %u does not exist.", port); + return -EINVAL; } - PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full." - " Please remove a rule before adding a new one."); - return -EINVAL; + return ixgbe_update_vxlan_port(hw, 0); } -/* Add l2 tunnel filter */ +/* Add UDP tunneling port */ static int -ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel) +ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) { int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - switch (l2_tunnel->l2_tunnel_type) { - case RTE_L2_TUNNEL_TYPE_E_TAG: - ret = ixgbe_e_tag_filter_add(dev, l2_tunnel); - break; - default: - PMD_DRV_LOG(ERR, "Invalid tunnel type"); - ret = -EINVAL; - break; + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; } - return ret; -} + if (udp_tunnel == NULL) + return -EINVAL; -/* Delete l2 tunnel filter */ -static int -ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel) -{ - int ret = 0; + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port); + break; - switch (l2_tunnel->l2_tunnel_type) { - case RTE_L2_TUNNEL_TYPE_E_TAG: - ret = ixgbe_e_tag_filter_del(dev, l2_tunnel); + case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_TEREDO: + PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); + ret = -EINVAL; break; + default: PMD_DRV_LOG(ERR, "Invalid tunnel type"); ret = -EINVAL; @@ -6649,501 +8043,635 @@ ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, return ret; } -/** - * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter. - * @dev: pointer to rte_eth_dev structure - * @filter_op:operation will be taken. - * @arg: a pointer to specific structure corresponding to the filter_op - */ +/* Remove UDP tunneling port */ static int -ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg) +ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) { int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (filter_op == RTE_ETH_FILTER_NOP) - return 0; + if (hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_X550EM_a) { + return -ENOTSUP; + } - if (arg == NULL) { - PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.", - filter_op); + if (udp_tunnel == NULL) return -EINVAL; - } - switch (filter_op) { - case RTE_ETH_FILTER_ADD: - ret = ixgbe_dev_l2_tunnel_filter_add - (dev, - (struct rte_eth_l2_tunnel_conf *)arg); + switch (udp_tunnel->prot_type) { + case RTE_TUNNEL_TYPE_VXLAN: + ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port); break; - case RTE_ETH_FILTER_DELETE: - ret = ixgbe_dev_l2_tunnel_filter_del - (dev, - (struct rte_eth_l2_tunnel_conf *)arg); + case RTE_TUNNEL_TYPE_GENEVE: + case RTE_TUNNEL_TYPE_TEREDO: + PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); + ret = -EINVAL; break; default: - PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op); + PMD_DRV_LOG(ERR, "Invalid tunnel type"); ret = -EINVAL; break; } + return ret; } -static int -ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en) +static void +ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) { - int ret = 0; - uint32_t ctrl; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x && - hw->mac.type != ixgbe_mac_X550EM_a) { - return -ENOTSUP; - } + hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI); +} - ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); - ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK; - if (en) - ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG; - IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl); +static void +ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - return ret; + hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE); } -/* Enable l2 tunnel forwarding */ -static int -ixgbe_dev_l2_tunnel_forwarding_enable - (struct rte_eth_dev *dev, - enum rte_eth_tunnel_type l2_tunnel_type) +static void ixgbevf_mbx_process(struct rte_eth_dev *dev) { - int ret = 0; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u32 in_msg = 0; - switch (l2_tunnel_type) { - case RTE_L2_TUNNEL_TYPE_E_TAG: - ret = ixgbe_e_tag_forwarding_en_dis(dev, 1); - break; - default: - PMD_DRV_LOG(ERR, "Invalid tunnel type"); - ret = -EINVAL; - break; - } + if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) + return; - return ret; + /* PF reset VF event */ + if (in_msg == IXGBE_PF_CONTROL_MSG) + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL); } -/* Disable l2 tunnel forwarding */ static int -ixgbe_dev_l2_tunnel_forwarding_disable - (struct rte_eth_dev *dev, - enum rte_eth_tunnel_type l2_tunnel_type) +ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) { - int ret = 0; + uint32_t eicr; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + ixgbevf_intr_disable(hw); - switch (l2_tunnel_type) { - case RTE_L2_TUNNEL_TYPE_E_TAG: - ret = ixgbe_e_tag_forwarding_en_dis(dev, 0); - break; - default: - PMD_DRV_LOG(ERR, "Invalid tunnel type"); - ret = -EINVAL; - break; - } + /* read-on-clear nic registers here */ + eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); + intr->flags = 0; - return ret; + /* only one misc vector supported - mailbox */ + eicr &= IXGBE_VTEICR_MASK; + if (eicr == IXGBE_MISC_VEC_ID) + intr->flags |= IXGBE_FLAG_MAILBOX; + + return 0; } static int -ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel, - bool en) +ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) { - int ret = 0; - uint32_t vmtir, vmvir; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); - if (l2_tunnel->vf_id >= dev->pci_dev->max_vfs) { - PMD_DRV_LOG(ERR, - "VF id %u should be less than %u", - l2_tunnel->vf_id, - dev->pci_dev->max_vfs); - return -EINVAL; - } - - if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x && - hw->mac.type != ixgbe_mac_X550EM_a) { - return -ENOTSUP; + if (intr->flags & IXGBE_FLAG_MAILBOX) { + ixgbevf_mbx_process(dev); + intr->flags &= ~IXGBE_FLAG_MAILBOX; } - if (en) - vmtir = l2_tunnel->tunnel_id; - else - vmtir = 0; + ixgbevf_intr_enable(hw); - IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir); + return 0; +} - vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id)); - vmvir &= ~IXGBE_VMVIR_TAGA_MASK; - if (en) - vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT; - IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir); +static void +ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, + void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; - return ret; + ixgbevf_dev_interrupt_get_status(dev); + ixgbevf_dev_interrupt_action(dev); } -/* Enable l2 tunnel tag insertion */ -static int -ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel) +/** + * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path + * @hw: pointer to hardware structure + * + * Stops the transmit data path and waits for the HW to internally empty + * the Tx security block + **/ +int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw) { - int ret = 0; +#define IXGBE_MAX_SECTX_POLL 40 - switch (l2_tunnel->l2_tunnel_type) { - case RTE_L2_TUNNEL_TYPE_E_TAG: - ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1); - break; - default: - PMD_DRV_LOG(ERR, "Invalid tunnel type"); - ret = -EINVAL; - break; + int i; + int sectxreg; + + sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + sectxreg |= IXGBE_SECTXCTRL_TX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); + for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) { + sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT); + if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY) + break; + /* Use interrupt-safe sleep just in case */ + usec_delay(1000); } - return ret; + /* For informational purposes only */ + if (i >= IXGBE_MAX_SECTX_POLL) + PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security " + "path fully disabled. Continuing with init."); + + return IXGBE_SUCCESS; } -/* Disable l2 tunnel tag insertion */ -static int -ixgbe_dev_l2_tunnel_insertion_disable - (struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel) +/** + * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path + * @hw: pointer to hardware structure + * + * Enables the transmit data path. + **/ +int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw) { - int ret = 0; + uint32_t sectxreg; - switch (l2_tunnel->l2_tunnel_type) { - case RTE_L2_TUNNEL_TYPE_E_TAG: - ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0); - break; - default: - PMD_DRV_LOG(ERR, "Invalid tunnel type"); - ret = -EINVAL; - break; - } + sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg); + IXGBE_WRITE_FLUSH(hw); - return ret; + return IXGBE_SUCCESS; } -static int -ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev, - bool en) +int +rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp) { - int ret = 0; - uint32_t qde; - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl; - if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x && - hw->mac.type != ixgbe_mac_X550EM_a) { + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_ixgbe_pmd)) return -ENOTSUP; - } - qde = IXGBE_READ_REG(hw, IXGBE_QDE); - if (en) - qde |= IXGBE_QDE_STRIP_TAG; + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Stop the data paths */ + if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS) + return -ENOTSUP; + /* + * Workaround: + * As no ixgbe_disable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. + * The hardware support has been checked by + * ixgbe_disable_sec_rx_path(). + */ + ixgbe_disable_sec_tx_path_generic(hw); + + /* Enable Ethernet CRC (required by MACsec offload) */ + ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0); + ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl); + + /* Enable the TX and RX crypto engines */ + ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK; + ctrl |= 0x3; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl); + + /* Enable SA lookup */ + ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); + ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; + ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT : + IXGBE_LSECTXCTRL_AUTH; + ctrl |= IXGBE_LSECTXCTRL_AISCI; + ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK; + ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK; + IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); + ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; + ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT; + ctrl &= ~IXGBE_LSECRXCTRL_PLSH; + if (rp) + ctrl |= IXGBE_LSECRXCTRL_RP; else - qde &= ~IXGBE_QDE_STRIP_TAG; - qde &= ~IXGBE_QDE_READ; - qde |= IXGBE_QDE_WRITE; - IXGBE_WRITE_REG(hw, IXGBE_QDE, qde); + ctrl &= ~IXGBE_LSECRXCTRL_RP; + IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); - return ret; + /* Start the data paths */ + ixgbe_enable_sec_rx_path(hw); + /* + * Workaround: + * As no ixgbe_enable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. + */ + ixgbe_enable_sec_tx_path_generic(hw); + + return 0; } -/* Enable l2 tunnel tag stripping */ -static int -ixgbe_dev_l2_tunnel_stripping_enable - (struct rte_eth_dev *dev, - enum rte_eth_tunnel_type l2_tunnel_type) +int +rte_pmd_ixgbe_macsec_disable(uint8_t port) { - int ret = 0; + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl; - switch (l2_tunnel_type) { - case RTE_L2_TUNNEL_TYPE_E_TAG: - ret = ixgbe_e_tag_stripping_en_dis(dev, 1); - break; - default: - PMD_DRV_LOG(ERR, "Invalid tunnel type"); - ret = -EINVAL; - break; - } + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - return ret; -} + dev = &rte_eth_devices[port]; -/* Disable l2 tunnel tag stripping */ -static int -ixgbe_dev_l2_tunnel_stripping_disable - (struct rte_eth_dev *dev, - enum rte_eth_tunnel_type l2_tunnel_type) -{ - int ret = 0; + if (!is_device_supported(dev, &rte_ixgbe_pmd)) + return -ENOTSUP; - switch (l2_tunnel_type) { - case RTE_L2_TUNNEL_TYPE_E_TAG: - ret = ixgbe_e_tag_stripping_en_dis(dev, 0); - break; - default: - PMD_DRV_LOG(ERR, "Invalid tunnel type"); - ret = -EINVAL; - break; - } + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Stop the data paths */ + if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS) + return -ENOTSUP; + /* + * Workaround: + * As no ixgbe_disable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. + * The hardware support has been checked by + * ixgbe_disable_sec_rx_path(). + */ + ixgbe_disable_sec_tx_path_generic(hw); + + /* Disable the TX and RX crypto engines */ + ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); + ctrl |= IXGBE_SECTXCTRL_SECTX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + ctrl |= IXGBE_SECRXCTRL_SECRX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl); + + /* Disable SA lookup */ + ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL); + ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK; + ctrl |= IXGBE_LSECTXCTRL_DISABLE; + IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl); + + ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL); + ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK; + ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl); + + /* Start the data paths */ + ixgbe_enable_sec_rx_path(hw); + /* + * Workaround: + * As no ixgbe_enable_sec_rx_path equivalent is + * implemented for tx in the base code, and we are + * not allowed to modify the base code in DPDK, so + * just call the hand-written one directly for now. + */ + ixgbe_enable_sec_tx_path_generic(hw); - return ret; + return 0; } -/* Enable/disable l2 tunnel offload functions */ -static int -ixgbe_dev_l2_tunnel_offload_set - (struct rte_eth_dev *dev, - struct rte_eth_l2_tunnel_conf *l2_tunnel, - uint32_t mask, - uint8_t en) +int +rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac) { - int ret = 0; + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl; - if (l2_tunnel == NULL) - return -EINVAL; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); - ret = -EINVAL; - if (mask & ETH_L2_TUNNEL_ENABLE_MASK) { - if (en) - ret = ixgbe_dev_l2_tunnel_enable( - dev, - l2_tunnel->l2_tunnel_type); - else - ret = ixgbe_dev_l2_tunnel_disable( - dev, - l2_tunnel->l2_tunnel_type); - } + dev = &rte_eth_devices[port]; - if (mask & ETH_L2_TUNNEL_INSERTION_MASK) { - if (en) - ret = ixgbe_dev_l2_tunnel_insertion_enable( - dev, - l2_tunnel); - else - ret = ixgbe_dev_l2_tunnel_insertion_disable( - dev, - l2_tunnel); - } + if (!is_device_supported(dev, &rte_ixgbe_pmd)) + return -ENOTSUP; - if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) { - if (en) - ret = ixgbe_dev_l2_tunnel_stripping_enable( - dev, - l2_tunnel->l2_tunnel_type); - else - ret = ixgbe_dev_l2_tunnel_stripping_disable( - dev, - l2_tunnel->l2_tunnel_type); - } + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) { - if (en) - ret = ixgbe_dev_l2_tunnel_forwarding_enable( - dev, - l2_tunnel->l2_tunnel_type); - else - ret = ixgbe_dev_l2_tunnel_forwarding_disable( - dev, - l2_tunnel->l2_tunnel_type); - } + ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl); - return ret; + ctrl = mac[4] | (mac[5] << 8); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl); + + return 0; } -static int -ixgbe_update_vxlan_port(struct ixgbe_hw *hw, - uint16_t port) +int +rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi) { - IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port); - IXGBE_WRITE_FLUSH(hw); + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_ixgbe_pmd)) + return -ENOTSUP; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl); + + pi = rte_cpu_to_be_16(pi); + ctrl = mac[4] | (mac[5] << 8) | (pi << 16); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl); return 0; } -/* There's only one register for VxLAN UDP port. - * So, we cannot add several ports. Will update it. - */ -static int -ixgbe_add_vxlan_port(struct ixgbe_hw *hw, - uint16_t port) +int +rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key) { - if (port == 0) { - PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed."); - return -EINVAL; - } + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl, i; - return ixgbe_update_vxlan_port(hw, port); -} + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); -/* We cannot delete the VxLAN port. For there's a register for VxLAN - * UDP port, it must have a value. - * So, will reset it to the original value 0. - */ -static int -ixgbe_del_vxlan_port(struct ixgbe_hw *hw, - uint16_t port) -{ - uint16_t cur_port; + dev = &rte_eth_devices[port]; - cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL); + if (!is_device_supported(dev, &rte_ixgbe_pmd)) + return -ENOTSUP; - if (cur_port != port) { - PMD_DRV_LOG(ERR, "Port %u does not exist.", port); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (idx != 0 && idx != 1) return -EINVAL; + + if (an >= 4) + return -EINVAL; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Set the PN and key */ + pn = rte_cpu_to_be_32(pn); + if (idx == 0) { + IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn); + + for (i = 0; i < 4; i++) { + ctrl = (key[i * 4 + 0] << 0) | + (key[i * 4 + 1] << 8) | + (key[i * 4 + 2] << 16) | + (key[i * 4 + 3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl); + } + } else { + IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn); + + for (i = 0; i < 4; i++) { + ctrl = (key[i * 4 + 0] << 0) | + (key[i * 4 + 1] << 8) | + (key[i * 4 + 2] << 16) | + (key[i * 4 + 3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl); + } } - return ixgbe_update_vxlan_port(hw, 0); + /* Set AN and select the SA */ + ctrl = (an << idx * 2) | (idx << 4); + IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl); + + return 0; } -/* Add UDP tunneling port */ -static int -ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, - struct rte_eth_udp_tunnel *udp_tunnel) +int +rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an, + uint32_t pn, uint8_t *key) { - int ret = 0; - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw *hw; + struct rte_eth_dev *dev; + uint32_t ctrl, i; - if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x && - hw->mac.type != ixgbe_mac_X550EM_a) { + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_ixgbe_pmd)) return -ENOTSUP; - } - if (udp_tunnel == NULL) + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (idx != 0 && idx != 1) return -EINVAL; - switch (udp_tunnel->prot_type) { - case RTE_TUNNEL_TYPE_VXLAN: - ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port); - break; + if (an >= 4) + return -EINVAL; - case RTE_TUNNEL_TYPE_GENEVE: - case RTE_TUNNEL_TYPE_TEREDO: - PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); - ret = -EINVAL; - break; + /* Set the PN */ + pn = rte_cpu_to_be_32(pn); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn); - default: - PMD_DRV_LOG(ERR, "Invalid tunnel type"); - ret = -EINVAL; - break; + /* Set the key */ + for (i = 0; i < 4; i++) { + ctrl = (key[i * 4 + 0] << 0) | + (key[i * 4 + 1] << 8) | + (key[i * 4 + 2] << 16) | + (key[i * 4 + 3] << 24); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl); } - return ret; + /* Set the AN and validate the SA */ + ctrl = an | (1 << 2); + IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl); + + return 0; } -/* Remove UDP tunneling port */ -static int -ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, - struct rte_eth_udp_tunnel *udp_tunnel) +/* restore n-tuple filter */ +static inline void +ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev) { - int ret = 0; - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter *node; - if (hw->mac.type != ixgbe_mac_X550 && - hw->mac.type != ixgbe_mac_X550EM_x && - hw->mac.type != ixgbe_mac_X550EM_a) { - return -ENOTSUP; + TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) { + ixgbe_inject_5tuple_filter(dev, node); } +} - if (udp_tunnel == NULL) - return -EINVAL; +/* restore ethernet type filter */ +static inline void +ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i; - switch (udp_tunnel->prot_type) { - case RTE_TUNNEL_TYPE_VXLAN: - ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port); - break; - case RTE_TUNNEL_TYPE_GENEVE: - case RTE_TUNNEL_TYPE_TEREDO: - PMD_DRV_LOG(ERR, "Tunnel type is not supported now."); - ret = -EINVAL; - break; - default: - PMD_DRV_LOG(ERR, "Invalid tunnel type"); - ret = -EINVAL; - break; + for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_mask & (1 << i)) { + IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), + filter_info->ethertype_filters[i].etqf); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), + filter_info->ethertype_filters[i].etqs); + IXGBE_WRITE_FLUSH(hw); + } } - - return ret; } -/* ixgbevf_update_xcast_mode - Update Multicast mode - * @hw: pointer to the HW structure - * @netdev: pointer to net device structure - * @xcast_mode: new multicast mode - * - * Updates the Multicast Mode of VF. - */ -static int ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, - int xcast_mode) +/* restore SYN filter */ +static inline void +ixgbe_syn_filter_restore(struct rte_eth_dev *dev) { - struct ixgbe_mbx_info *mbx = &hw->mbx; - u32 msgbuf[2]; - s32 err; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + uint32_t synqf; - switch (hw->api_version) { - case ixgbe_mbox_api_12: - break; - default: - return -EOPNOTSUPP; - } + synqf = filter_info->syn_info; - msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; - msgbuf[1] = xcast_mode; + if (synqf & IXGBE_SYN_FILTER_ENABLE) { + IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf); + IXGBE_WRITE_FLUSH(hw); + } +} - err = mbx->ops.write_posted(hw, msgbuf, 2, 0); - if (err) - return err; +/* restore L2 tunnel filter */ +static inline void +ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + struct ixgbe_l2_tn_filter *node; + struct rte_eth_l2_tunnel_conf l2_tn_conf; - err = mbx->ops.read_posted(hw, msgbuf, 2, 0); - if (err) - return err; + TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) { + l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type; + l2_tn_conf.tunnel_id = node->key.tn_id; + l2_tn_conf.pool = node->pool; + (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE); + } +} - msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; - if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK)) - return -EPERM; +static int +ixgbe_filter_restore(struct rte_eth_dev *dev) +{ + ixgbe_ntuple_filter_restore(dev); + ixgbe_ethertype_filter_restore(dev); + ixgbe_syn_filter_restore(dev); + ixgbe_fdir_filter_restore(dev); + ixgbe_l2_tn_filter_restore(dev); return 0; } static void -ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev) +ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev) { + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI); + if (l2_tn_info->e_tag_en) + (void)ixgbe_e_tag_enable(hw); + + if (l2_tn_info->e_tag_fwd_en) + (void)ixgbe_e_tag_forwarding_en_dis(dev, 1); + + (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type); } -static void -ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) +/* remove all the n-tuple filters */ +void +ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + struct ixgbe_5tuple_filter *p_5tuple; + + while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) + ixgbe_remove_5tuple_filter(dev, p_5tuple); +} + +/* remove all the ether type filters */ +void +ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + int i; - ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE); + for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) { + if (filter_info->ethertype_mask & (1 << i) && + !filter_info->ethertype_filters[i].conf) { + (void)ixgbe_ethertype_filter_remove(filter_info, + (uint8_t)i); + IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0); + IXGBE_WRITE_FLUSH(hw); + } + } } -static struct rte_driver rte_ixgbe_driver = { - .type = PMD_PDEV, - .init = rte_ixgbe_pmd_init, -}; +/* remove the SYN filter */ +void +ixgbe_clear_syn_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); -static struct rte_driver rte_ixgbevf_driver = { - .type = PMD_PDEV, - .init = rte_ixgbevf_pmd_init, -}; + if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) { + filter_info->syn_info = 0; + + IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0); + IXGBE_WRITE_FLUSH(hw); + } +} + +/* remove all the L2 tunnel filters */ +int +ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev) +{ + struct ixgbe_l2_tn_info *l2_tn_info = + IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private); + struct ixgbe_l2_tn_filter *l2_tn_filter; + struct rte_eth_l2_tunnel_conf l2_tn_conf; + int ret = 0; + + while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) { + l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type; + l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id; + l2_tn_conf.pool = l2_tn_filter->pool; + ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf); + if (ret < 0) + return ret; + } + + return 0; +} -PMD_REGISTER_DRIVER(rte_ixgbe_driver); -PMD_REGISTER_DRIVER(rte_ixgbevf_driver); +RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv); +RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio"); +RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv); +RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio");