X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_ethdev.h;h=899f446cde5f18de339fc101827cae726e9ada27;hb=cb71192486c34eada5b65c6c46d32afd05cc091b;hp=28c3878bb628e8bb70aedf1d7da5c6c4b448bdee;hpb=c68a52b8b38c31639377755e7cc4d40c23b3f815;p=dpdk.git diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 28c3878bb6..899f446cde 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -11,6 +11,7 @@ #include "base/ice_common.h" #include "base/ice_adminq_cmd.h" +#include "base/ice_flow.h" #define ICE_VLAN_TAG_SIZE 4 @@ -42,6 +43,15 @@ #define ICE_MAX_PKT_TYPE 1024 +/* DDP package search path */ +#define ICE_PKG_FILE_DEFAULT "/lib/firmware/intel/ice/ddp/ice.pkg" +#define ICE_PKG_FILE_UPDATES "/lib/firmware/updates/intel/ice/ddp/ice.pkg" +#define ICE_PKG_FILE_SEARCH_PATH_DEFAULT "/lib/firmware/intel/ice/ddp/" +#define ICE_PKG_FILE_SEARCH_PATH_UPDATES "/lib/firmware/updates/intel/ice/ddp/" +#define ICE_MAX_PKG_FILENAME_SIZE 256 + +#define MAX_ACL_ENTRIES 512 + /** * vlan_id is a 12 bit number. * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. @@ -105,11 +115,13 @@ ICE_FLAG_VF_MAC_BY_PF) #define ICE_RSS_OFFLOAD_ALL ( \ + ETH_RSS_IPV4 | \ ETH_RSS_FRAG_IPV4 | \ ETH_RSS_NONFRAG_IPV4_TCP | \ ETH_RSS_NONFRAG_IPV4_UDP | \ ETH_RSS_NONFRAG_IPV4_SCTP | \ ETH_RSS_NONFRAG_IPV4_OTHER | \ + ETH_RSS_IPV6 | \ ETH_RSS_FRAG_IPV6 | \ ETH_RSS_NONFRAG_IPV6_TCP | \ ETH_RSS_NONFRAG_IPV6_UDP | \ @@ -117,13 +129,33 @@ ETH_RSS_NONFRAG_IPV6_OTHER | \ ETH_RSS_L2_PAYLOAD) +/** + * The overhead from MTU to max frame size. + * Considering QinQ packet, the VLAN tag needs to be counted twice. + */ +#define ICE_ETH_OVERHEAD \ + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE * 2) + +#define ICE_RXTX_BYTES_HIGH(bytes) ((bytes) & ~ICE_40_BIT_MASK) +#define ICE_RXTX_BYTES_LOW(bytes) ((bytes) & ICE_40_BIT_MASK) + +/* Max number of flexible descriptor rxdid */ +#define ICE_FLEX_DESC_RXDID_MAX_NUM 64 + +/* DDP package type */ +enum ice_pkg_type { + ICE_PKG_TYPE_UNKNOWN, + ICE_PKG_TYPE_OS_DEFAULT, + ICE_PKG_TYPE_COMMS, +}; + struct ice_adapter; /** * MAC filter structure */ struct ice_mac_filter_info { - struct ether_addr mac_addr; + struct rte_ether_addr mac_addr; }; TAILQ_HEAD(ice_mac_filter_list, ice_mac_filter); @@ -225,6 +257,149 @@ struct ice_vsi { struct ice_eth_stats eth_stats_offset; struct ice_eth_stats eth_stats; bool offset_loaded; + uint64_t old_rx_bytes; + uint64_t old_tx_bytes; +}; + +enum proto_xtr_type { + PROTO_XTR_NONE, + PROTO_XTR_VLAN, + PROTO_XTR_IPV4, + PROTO_XTR_IPV6, + PROTO_XTR_IPV6_FLOW, + PROTO_XTR_TCP, + PROTO_XTR_IP_OFFSET, + PROTO_XTR_MAX /* The last one */ +}; + +enum ice_fdir_tunnel_type { + ICE_FDIR_TUNNEL_TYPE_NONE = 0, + ICE_FDIR_TUNNEL_TYPE_VXLAN, + ICE_FDIR_TUNNEL_TYPE_GTPU, + ICE_FDIR_TUNNEL_TYPE_GTPU_EH, +}; + +struct rte_flow; +TAILQ_HEAD(ice_flow_list, rte_flow); + +struct ice_flow_parser_node; +TAILQ_HEAD(ice_parser_list, ice_flow_parser_node); + +struct ice_fdir_filter_conf { + struct ice_fdir_fltr input; + enum ice_fdir_tunnel_type tunnel_type; + + struct ice_fdir_counter *counter; /* flow specific counter context */ + struct rte_flow_action_count act_count; + + uint64_t input_set; + uint64_t outer_input_set; /* only for tunnel packets outer fields */ + uint32_t mark_flag; +}; + +#define ICE_MAX_FDIR_FILTER_NUM (1024 * 16) + +struct ice_fdir_fltr_pattern { + enum ice_fltr_ptype flow_type; + + union { + struct ice_fdir_v4 v4; + struct ice_fdir_v6 v6; + } ip, mask; + + struct ice_fdir_udp_gtp gtpu_data; + struct ice_fdir_udp_gtp gtpu_mask; + + struct ice_fdir_extra ext_data; + struct ice_fdir_extra ext_mask; + + enum ice_fdir_tunnel_type tunnel_type; +}; + +#define ICE_FDIR_COUNTER_DEFAULT_POOL_SIZE 1 +#define ICE_FDIR_COUNTER_MAX_POOL_SIZE 32 +#define ICE_FDIR_COUNTERS_PER_BLOCK 256 +#define ICE_FDIR_COUNTER_INDEX(base_idx) \ + ((base_idx) * ICE_FDIR_COUNTERS_PER_BLOCK) +struct ice_fdir_counter_pool; + +struct ice_fdir_counter { + TAILQ_ENTRY(ice_fdir_counter) next; + struct ice_fdir_counter_pool *pool; + uint8_t shared; + uint32_t ref_cnt; + uint32_t id; + uint64_t hits; + uint64_t bytes; + uint32_t hw_index; +}; + +TAILQ_HEAD(ice_fdir_counter_list, ice_fdir_counter); + +struct ice_fdir_counter_pool { + TAILQ_ENTRY(ice_fdir_counter_pool) next; + struct ice_fdir_counter_list counter_list; + struct ice_fdir_counter counters[0]; +}; + +TAILQ_HEAD(ice_fdir_counter_pool_list, ice_fdir_counter_pool); + +struct ice_fdir_counter_pool_container { + struct ice_fdir_counter_pool_list pool_list; + struct ice_fdir_counter_pool *pools[ICE_FDIR_COUNTER_MAX_POOL_SIZE]; + uint8_t index_free; +}; + +/** + * A structure used to define fields of a FDIR related info. + */ +struct ice_fdir_info { + struct ice_vsi *fdir_vsi; /* pointer to fdir VSI structure */ + struct ice_tx_queue *txq; + struct ice_rx_queue *rxq; + void *prg_pkt; /* memory for fdir program packet */ + uint64_t dma_addr; /* physic address of packet memory*/ + const struct rte_memzone *mz; + struct ice_fdir_filter_conf conf; + + struct ice_fdir_filter_conf **hash_map; + struct rte_hash *hash_table; + + struct ice_fdir_counter_pool_container counter; +}; + +#define ICE_HASH_GTPU_CTX_EH_IP 0 +#define ICE_HASH_GTPU_CTX_EH_IP_UDP 1 +#define ICE_HASH_GTPU_CTX_EH_IP_TCP 2 +#define ICE_HASH_GTPU_CTX_UP_IP 3 +#define ICE_HASH_GTPU_CTX_UP_IP_UDP 4 +#define ICE_HASH_GTPU_CTX_UP_IP_TCP 5 +#define ICE_HASH_GTPU_CTX_DW_IP 6 +#define ICE_HASH_GTPU_CTX_DW_IP_UDP 7 +#define ICE_HASH_GTPU_CTX_DW_IP_TCP 8 +#define ICE_HASH_GTPU_CTX_MAX 9 + +struct ice_hash_gtpu_ctx { + struct ice_rss_hash_cfg ctx[ICE_HASH_GTPU_CTX_MAX]; +}; + +struct ice_hash_ctx { + struct ice_hash_gtpu_ctx gtpu4; + struct ice_hash_gtpu_ctx gtpu6; +}; + +struct ice_acl_conf { + struct ice_fdir_fltr input; + uint64_t input_set; +}; + +/** + * A structure used to define fields of ACL related info. + */ +struct ice_acl_info { + struct ice_acl_conf conf; + struct rte_bitmap *slots; + uint64_t hw_entry_id[MAX_ACL_ENTRIES]; }; struct ice_pf { @@ -240,11 +415,20 @@ struct ice_pf { struct ice_res_pool_info qp_pool; /*Queue pair pool */ struct ice_res_pool_info msix_pool; /* MSIX interrupt pool */ struct rte_eth_dev_data *dev_data; /* Pointer to the device data */ - struct ether_addr dev_addr; /* PF device mac address */ + struct rte_ether_addr dev_addr; /* PF device mac address */ uint64_t flags; /* PF feature flags */ uint16_t hash_lut_size; /* The size of hash lookup table */ uint16_t lan_nb_qp_max; uint16_t lan_nb_qps; /* The number of queue pairs of LAN */ + uint16_t base_queue; /* The base queue pairs index in the device */ + uint8_t *proto_xtr; /* Protocol extraction type for all queues */ + uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ + uint16_t fdir_qp_offset; + struct ice_fdir_info fdir; /* flow director info */ + struct ice_acl_info acl; /* ACL info */ + struct ice_hash_ctx hash_ctx; + uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; + uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; struct ice_hw_port_stats stats_offset; struct ice_hw_port_stats stats; /* internal packet statistics, it should be excluded from the total */ @@ -252,6 +436,28 @@ struct ice_pf { struct ice_eth_stats internal_stats; bool offset_loaded; bool adapter_stopped; + struct ice_flow_list flow_list; + rte_spinlock_t flow_ops_lock; + struct ice_parser_list rss_parser_list; + struct ice_parser_list perm_parser_list; + struct ice_parser_list dist_parser_list; + bool init_link_up; + uint64_t old_rx_bytes; + uint64_t old_tx_bytes; + uint64_t supported_rxdid; /* bitmap for supported RXDID */ + uint64_t rss_hf; +}; + +#define ICE_MAX_QUEUE_NUM 2048 + +/** + * Cache devargs parse result. + */ +struct ice_devargs { + int safe_mode_support; + uint8_t proto_xtr_dflt; + int pipe_mode_support; + uint8_t proto_xtr[ICE_MAX_QUEUE_NUM]; }; /** @@ -263,9 +469,15 @@ struct ice_adapter { struct rte_eth_dev *eth_dev; struct ice_pf pf; bool rx_bulk_alloc_allowed; + bool rx_vec_allowed; + bool tx_vec_allowed; bool tx_simple_allowed; /* ptype mapping table */ uint32_t ptype_tbl[ICE_MAX_PKT_TYPE] __rte_cache_min_aligned; + bool is_safe_mode; + struct ice_devargs devargs; + enum ice_pkg_type active_pkg_type; /* loaded ddp package type */ + uint16_t fdir_ref_cnt; }; struct ice_vsi_vlan_pvid_info { @@ -310,6 +522,19 @@ struct ice_vsi_vlan_pvid_info { #define ICE_PF_TO_ETH_DEV(pf) \ (((struct ice_pf *)pf)->adapter->eth_dev) +enum ice_pkg_type ice_load_pkg_type(struct ice_hw *hw); +struct ice_vsi * +ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type); +int +ice_release_vsi(struct ice_vsi *vsi); +void ice_vsi_enable_queues_intr(struct ice_vsi *vsi); +void ice_vsi_disable_queues_intr(struct ice_vsi *vsi); +void ice_vsi_queues_bind_intr(struct ice_vsi *vsi); +int ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + struct ice_rss_hash_cfg *cfg); +int ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + struct ice_rss_hash_cfg *cfg); + static inline int ice_align_floor(int n) { @@ -317,4 +542,44 @@ ice_align_floor(int n) return 0; return 1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)); } + +#define ICE_PHY_TYPE_SUPPORT_50G(phy_type) \ + (((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_CR2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_SR2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_LR2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_KR2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_LAUI2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_CP) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_SR) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_FR) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_LR) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI1)) + +#define ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type) \ + (((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CR4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_SR4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_LR4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_KR4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100G_CAUI4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100G_AUI4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CP2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_SR2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_DR)) + +#define ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type) \ + (((phy_type) & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_100G_CAUI2) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_100G_AUI2)) + #endif /* _ICE_ETHDEV_H_ */