X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_ethdev.h;h=393dfeab1c39868d5251a123e747dde27717f298;hb=185fe122f4899f48569d0086c9dcacc431ef0967;hp=1516cf6cb1a7105cc1f282fd53d89196f1f89b56;hpb=37f4ac5f5946655fddedb8a78c079f2b190fa70e;p=dpdk.git diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 1516cf6cb1..393dfeab1c 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -42,6 +42,13 @@ #define ICE_MAX_PKT_TYPE 1024 +/* DDP package search path */ +#define ICE_PKG_FILE_DEFAULT "/lib/firmware/intel/ice/ddp/ice.pkg" +#define ICE_PKG_FILE_UPDATES "/lib/firmware/updates/intel/ice/ddp/ice.pkg" +#define ICE_PKG_FILE_SEARCH_PATH_DEFAULT "/lib/firmware/intel/ice/ddp/" +#define ICE_PKG_FILE_SEARCH_PATH_UPDATES "/lib/firmware/updates/intel/ice/ddp/" +#define ICE_MAX_PKG_FILENAME_SIZE 256 + /** * vlan_id is a 12 bit number. * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. @@ -105,11 +112,13 @@ ICE_FLAG_VF_MAC_BY_PF) #define ICE_RSS_OFFLOAD_ALL ( \ + ETH_RSS_IPV4 | \ ETH_RSS_FRAG_IPV4 | \ ETH_RSS_NONFRAG_IPV4_TCP | \ ETH_RSS_NONFRAG_IPV4_UDP | \ ETH_RSS_NONFRAG_IPV4_SCTP | \ ETH_RSS_NONFRAG_IPV4_OTHER | \ + ETH_RSS_IPV6 | \ ETH_RSS_FRAG_IPV6 | \ ETH_RSS_NONFRAG_IPV6_TCP | \ ETH_RSS_NONFRAG_IPV6_UDP | \ @@ -117,13 +126,30 @@ ETH_RSS_NONFRAG_IPV6_OTHER | \ ETH_RSS_L2_PAYLOAD) +/** + * The overhead from MTU to max frame size. + * Considering QinQ packet, the VLAN tag needs to be counted twice. + */ +#define ICE_ETH_OVERHEAD \ + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE * 2) + +#define ICE_RXTX_BYTES_HIGH(bytes) ((bytes) & ~ICE_40_BIT_MASK) +#define ICE_RXTX_BYTES_LOW(bytes) ((bytes) & ICE_40_BIT_MASK) + +/* DDP package type */ +enum ice_pkg_type { + ICE_PKG_TYPE_UNKNOWN, + ICE_PKG_TYPE_OS_DEFAULT, + ICE_PKG_TYPE_COMMS, +}; + struct ice_adapter; /** * MAC filter structure */ struct ice_mac_filter_info { - struct ether_addr mac_addr; + struct rte_ether_addr mac_addr; }; TAILQ_HEAD(ice_mac_filter_list, ice_mac_filter); @@ -225,6 +251,144 @@ struct ice_vsi { struct ice_eth_stats eth_stats_offset; struct ice_eth_stats eth_stats; bool offset_loaded; + uint64_t old_rx_bytes; + uint64_t old_tx_bytes; +}; + +enum proto_xtr_type { + PROTO_XTR_NONE, + PROTO_XTR_VLAN, + PROTO_XTR_IPV4, + PROTO_XTR_IPV6, + PROTO_XTR_IPV6_FLOW, + PROTO_XTR_TCP, +}; + +enum ice_fdir_tunnel_type { + ICE_FDIR_TUNNEL_TYPE_NONE = 0, + ICE_FDIR_TUNNEL_TYPE_VXLAN, + ICE_FDIR_TUNNEL_TYPE_GTPU, + ICE_FDIR_TUNNEL_TYPE_GTPU_EH, +}; + +struct rte_flow; +TAILQ_HEAD(ice_flow_list, rte_flow); + +struct ice_flow_parser_node; +TAILQ_HEAD(ice_parser_list, ice_flow_parser_node); + +struct ice_fdir_filter_conf { + struct ice_fdir_fltr input; + enum ice_fdir_tunnel_type tunnel_type; + + struct ice_fdir_counter *counter; /* flow specific counter context */ + struct rte_flow_action_count act_count; + + uint64_t input_set; +}; + +#define ICE_MAX_FDIR_FILTER_NUM (1024 * 16) + +struct ice_fdir_fltr_pattern { + enum ice_fltr_ptype flow_type; + + union { + struct ice_fdir_v4 v4; + struct ice_fdir_v6 v6; + } ip, mask; + + struct ice_fdir_udp_gtp gtpu_data; + struct ice_fdir_udp_gtp gtpu_mask; + + struct ice_fdir_extra ext_data; + struct ice_fdir_extra ext_mask; + + enum ice_fdir_tunnel_type tunnel_type; +}; + +#define ICE_FDIR_COUNTER_DEFAULT_POOL_SIZE 1 +#define ICE_FDIR_COUNTER_MAX_POOL_SIZE 32 +#define ICE_FDIR_COUNTERS_PER_BLOCK 256 +#define ICE_FDIR_COUNTER_INDEX(base_idx) \ + ((base_idx) * ICE_FDIR_COUNTERS_PER_BLOCK) +struct ice_fdir_counter_pool; + +struct ice_fdir_counter { + TAILQ_ENTRY(ice_fdir_counter) next; + struct ice_fdir_counter_pool *pool; + uint8_t shared; + uint32_t ref_cnt; + uint32_t id; + uint64_t hits; + uint64_t bytes; + uint32_t hw_index; +}; + +TAILQ_HEAD(ice_fdir_counter_list, ice_fdir_counter); + +struct ice_fdir_counter_pool { + TAILQ_ENTRY(ice_fdir_counter_pool) next; + struct ice_fdir_counter_list counter_list; + struct ice_fdir_counter counters[0]; +}; + +TAILQ_HEAD(ice_fdir_counter_pool_list, ice_fdir_counter_pool); + +struct ice_fdir_counter_pool_container { + struct ice_fdir_counter_pool_list pool_list; + struct ice_fdir_counter_pool *pools[ICE_FDIR_COUNTER_MAX_POOL_SIZE]; + uint8_t index_free; +}; + +/** + * A structure used to define fields of a FDIR related info. + */ +struct ice_fdir_info { + struct ice_vsi *fdir_vsi; /* pointer to fdir VSI structure */ + struct ice_tx_queue *txq; + struct ice_rx_queue *rxq; + void *prg_pkt; /* memory for fdir program packet */ + uint64_t dma_addr; /* physic address of packet memory*/ + const struct rte_memzone *mz; + struct ice_fdir_filter_conf conf; + + struct ice_fdir_filter_conf **hash_map; + struct rte_hash *hash_table; + + struct ice_fdir_counter_pool_container counter; +}; + +#define ICE_HASH_CFG_VALID(p) \ + ((p)->hash_fld != 0 && (p)->pkt_hdr != 0) + +#define ICE_HASH_CFG_RESET(p) do { \ + (p)->hash_fld = 0; \ + (p)->pkt_hdr = 0; \ +} while (0) + +#define ICE_HASH_CFG_IS_ROTATING(p) \ + ((p)->rotate == true) + +#define ICE_HASH_CFG_ROTATE_START(p) \ + ((p)->rotate = true) + +#define ICE_HASH_CFG_ROTATE_STOP(p) \ + ((p)->rotate = false) + +struct ice_hash_cfg { + uint32_t pkt_hdr; + uint64_t hash_fld; + bool rotate; /* rotate l3 rule after l4 rule. */ + bool symm; +}; + +struct ice_hash_gtpu_ctx { + struct ice_hash_cfg ipv4; + struct ice_hash_cfg ipv6; + struct ice_hash_cfg ipv4_udp; + struct ice_hash_cfg ipv6_udp; + struct ice_hash_cfg ipv4_tcp; + struct ice_hash_cfg ipv6_tcp; }; struct ice_pf { @@ -240,11 +404,19 @@ struct ice_pf { struct ice_res_pool_info qp_pool; /*Queue pair pool */ struct ice_res_pool_info msix_pool; /* MSIX interrupt pool */ struct rte_eth_dev_data *dev_data; /* Pointer to the device data */ - struct ether_addr dev_addr; /* PF device mac address */ + struct rte_ether_addr dev_addr; /* PF device mac address */ uint64_t flags; /* PF feature flags */ uint16_t hash_lut_size; /* The size of hash lookup table */ uint16_t lan_nb_qp_max; uint16_t lan_nb_qps; /* The number of queue pairs of LAN */ + uint16_t base_queue; /* The base queue pairs index in the device */ + uint8_t *proto_xtr; /* Protocol extraction type for all queues */ + uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ + uint16_t fdir_qp_offset; + struct ice_fdir_info fdir; /* flow director info */ + struct ice_hash_gtpu_ctx gtpu_hash_ctx; + uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; + uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; struct ice_hw_port_stats stats_offset; struct ice_hw_port_stats stats; /* internal packet statistics, it should be excluded from the total */ @@ -252,6 +424,27 @@ struct ice_pf { struct ice_eth_stats internal_stats; bool offset_loaded; bool adapter_stopped; + struct ice_flow_list flow_list; + rte_spinlock_t flow_ops_lock; + struct ice_parser_list rss_parser_list; + struct ice_parser_list perm_parser_list; + struct ice_parser_list dist_parser_list; + bool init_link_up; + uint64_t old_rx_bytes; + uint64_t old_tx_bytes; +}; + +#define ICE_MAX_QUEUE_NUM 2048 + +/** + * Cache devargs parse result. + */ +struct ice_devargs { + int safe_mode_support; + uint8_t proto_xtr_dflt; + int pipe_mode_support; + int flow_mark_support; + uint8_t proto_xtr[ICE_MAX_QUEUE_NUM]; }; /** @@ -263,10 +456,14 @@ struct ice_adapter { struct rte_eth_dev *eth_dev; struct ice_pf pf; bool rx_bulk_alloc_allowed; + bool rx_vec_allowed; + bool tx_vec_allowed; bool tx_simple_allowed; /* ptype mapping table */ uint32_t ptype_tbl[ICE_MAX_PKT_TYPE] __rte_cache_min_aligned; bool is_safe_mode; + struct ice_devargs devargs; + enum ice_pkg_type active_pkg_type; /* loaded ddp package type */ }; struct ice_vsi_vlan_pvid_info { @@ -311,6 +508,19 @@ struct ice_vsi_vlan_pvid_info { #define ICE_PF_TO_ETH_DEV(pf) \ (((struct ice_pf *)pf)->adapter->eth_dev) +enum ice_pkg_type ice_load_pkg_type(struct ice_hw *hw); +struct ice_vsi * +ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type); +int +ice_release_vsi(struct ice_vsi *vsi); +void ice_vsi_enable_queues_intr(struct ice_vsi *vsi); +void ice_vsi_disable_queues_intr(struct ice_vsi *vsi); +void ice_vsi_queues_bind_intr(struct ice_vsi *vsi); +int ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + uint64_t hash_fld, uint32_t pkt_hdr, bool symm); +int ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + uint64_t hash_fld, uint32_t pkt_hdr); + static inline int ice_align_floor(int n) { @@ -318,4 +528,44 @@ ice_align_floor(int n) return 0; return 1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n)); } + +#define ICE_PHY_TYPE_SUPPORT_50G(phy_type) \ + (((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_CR2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_SR2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_LR2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_KR2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_LAUI2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_CP) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_SR) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_FR) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_LR) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI1)) + +#define ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type) \ + (((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CR4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_SR4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_LR4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_KR4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100G_CAUI4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100G_AUI4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CP2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_SR2) || \ + ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_DR)) + +#define ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type) \ + (((phy_type) & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_100G_CAUI2) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC) || \ + ((phy_type) & ICE_PHY_TYPE_HIGH_100G_AUI2)) + #endif /* _ICE_ETHDEV_H_ */