X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_ethdev.h;h=2e3e45f3d7695a457f2ff4097f0540d124a874ec;hb=b53d106d34b5c638f5a2cbdfee0da5bd42d4383f;hp=37b956e2f3596343cfc9bb60455a811688e11129;hpb=62451c94addc54475ef4580eec2799b8815f696a;p=dpdk.git diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 37b956e2f3..2e3e45f3d7 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -6,17 +6,18 @@ #define _ICE_ETHDEV_H_ #include +#include -#include +#include #include "base/ice_common.h" #include "base/ice_adminq_cmd.h" - -#define ICE_VLAN_TAG_SIZE 4 +#include "base/ice_flow.h" #define ICE_ADMINQ_LEN 32 #define ICE_SBIOQ_LEN 32 #define ICE_MAILBOXQ_LEN 32 +#define ICE_SBQ_LEN 64 #define ICE_ADMINQ_BUF_SZ 4096 #define ICE_SBIOQ_BUF_SZ 4096 #define ICE_MAILBOXQ_BUF_SZ 4096 @@ -49,6 +50,8 @@ #define ICE_PKG_FILE_SEARCH_PATH_UPDATES "/lib/firmware/updates/intel/ice/ddp/" #define ICE_MAX_PKG_FILENAME_SIZE 256 +#define MAX_ACL_NORMAL_ENTRIES 256 + /** * vlan_id is a 12 bit number. * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. @@ -112,26 +115,27 @@ ICE_FLAG_VF_MAC_BY_PF) #define ICE_RSS_OFFLOAD_ALL ( \ - ETH_RSS_IPV4 | \ - ETH_RSS_FRAG_IPV4 | \ - ETH_RSS_NONFRAG_IPV4_TCP | \ - ETH_RSS_NONFRAG_IPV4_UDP | \ - ETH_RSS_NONFRAG_IPV4_SCTP | \ - ETH_RSS_NONFRAG_IPV4_OTHER | \ - ETH_RSS_IPV6 | \ - ETH_RSS_FRAG_IPV6 | \ - ETH_RSS_NONFRAG_IPV6_TCP | \ - ETH_RSS_NONFRAG_IPV6_UDP | \ - ETH_RSS_NONFRAG_IPV6_SCTP | \ - ETH_RSS_NONFRAG_IPV6_OTHER | \ - ETH_RSS_L2_PAYLOAD) + RTE_ETH_RSS_IPV4 | \ + RTE_ETH_RSS_FRAG_IPV4 | \ + RTE_ETH_RSS_NONFRAG_IPV4_TCP | \ + RTE_ETH_RSS_NONFRAG_IPV4_UDP | \ + RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \ + RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \ + RTE_ETH_RSS_IPV6 | \ + RTE_ETH_RSS_FRAG_IPV6 | \ + RTE_ETH_RSS_NONFRAG_IPV6_TCP | \ + RTE_ETH_RSS_NONFRAG_IPV6_UDP | \ + RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \ + RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \ + RTE_ETH_RSS_L2_PAYLOAD) /** * The overhead from MTU to max frame size. * Considering QinQ packet, the VLAN tag needs to be counted twice. */ #define ICE_ETH_OVERHEAD \ - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE * 2) + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN * 2) +#define ICE_ETH_MAX_LEN (RTE_ETHER_MTU + ICE_ETH_OVERHEAD) #define ICE_RXTX_BYTES_HIGH(bytes) ((bytes) & ~ICE_40_BIT_MASK) #define ICE_RXTX_BYTES_LOW(bytes) ((bytes) & ICE_40_BIT_MASK) @@ -139,6 +143,12 @@ /* Max number of flexible descriptor rxdid */ #define ICE_FLEX_DESC_RXDID_MAX_NUM 64 +/* Per-channel register definitions */ +#define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8)) +#define GLTSYN_CLKO(_chan, _idx) (GLTSYN_CLKO_0(_idx) + ((_chan) * 8)) +#define GLTSYN_TGT_L(_chan, _idx) (GLTSYN_TGT_L_0(_idx) + ((_chan) * 16)) +#define GLTSYN_TGT_H(_chan, _idx) (GLTSYN_TGT_H_0(_idx) + ((_chan) * 16)) + /* DDP package type */ enum ice_pkg_type { ICE_PKG_TYPE_UNKNOWN, @@ -146,6 +156,12 @@ enum ice_pkg_type { ICE_PKG_TYPE_COMMS, }; +enum pps_type { + PPS_NONE, + PPS_PIN, + PPS_MAX, +}; + struct ice_adapter; /** @@ -163,11 +179,19 @@ struct ice_mac_filter { struct ice_mac_filter_info mac_info; }; +struct ice_vlan { + uint16_t tpid; + uint16_t vid; +}; + +#define ICE_VLAN(tpid, vid) \ + ((struct ice_vlan){ tpid, vid }) + /** * VLAN filter structure */ struct ice_vlan_filter_info { - uint16_t vlan_id; + struct ice_vlan vlan; }; TAILQ_HEAD(ice_vlan_filter_list, ice_vlan_filter); @@ -218,7 +242,7 @@ struct ice_vsi { * needs to add, HW needs to know the layout that VSIs are organized. * Besides that, VSI isan element and can't switch packets, which needs * to add new component VEB to perform switching. So, a new VSI needs - * to specify the the uplink VSI (Parent VSI) before created. The + * to specify the uplink VSI (Parent VSI) before created. The * uplink VSI will check whether it had a VEB to switch packets. If no, * it will try to create one. Then, uplink VSI will move the new VSI * into its' sib_vsi_list to manage all the downlink VSI. @@ -289,9 +313,14 @@ struct ice_fdir_filter_conf { struct ice_fdir_counter *counter; /* flow specific counter context */ struct rte_flow_action_count act_count; - uint64_t input_set; - uint64_t outer_input_set; /* only for tunnel packets outer fields */ + uint64_t input_set_o; /* used for non-tunnel or tunnel outer fields */ + uint64_t input_set_i; /* only for tunnel inner fields */ uint32_t mark_flag; + + struct ice_parser_profile *prof; + bool parser_ena; + u8 *pkt_buf; + u8 pkt_len; }; #define ICE_MAX_FDIR_FILTER_NUM (1024 * 16) @@ -365,37 +394,38 @@ struct ice_fdir_info { struct ice_fdir_counter_pool_container counter; }; -#define ICE_HASH_CFG_VALID(p) \ - ((p)->hash_fld != 0 && (p)->pkt_hdr != 0) - -#define ICE_HASH_CFG_RESET(p) do { \ - (p)->hash_fld = 0; \ - (p)->pkt_hdr = 0; \ -} while (0) - -#define ICE_HASH_CFG_IS_ROTATING(p) \ - ((p)->rotate == true) +#define ICE_HASH_GTPU_CTX_EH_IP 0 +#define ICE_HASH_GTPU_CTX_EH_IP_UDP 1 +#define ICE_HASH_GTPU_CTX_EH_IP_TCP 2 +#define ICE_HASH_GTPU_CTX_UP_IP 3 +#define ICE_HASH_GTPU_CTX_UP_IP_UDP 4 +#define ICE_HASH_GTPU_CTX_UP_IP_TCP 5 +#define ICE_HASH_GTPU_CTX_DW_IP 6 +#define ICE_HASH_GTPU_CTX_DW_IP_UDP 7 +#define ICE_HASH_GTPU_CTX_DW_IP_TCP 8 +#define ICE_HASH_GTPU_CTX_MAX 9 -#define ICE_HASH_CFG_ROTATE_START(p) \ - ((p)->rotate = true) +struct ice_hash_gtpu_ctx { + struct ice_rss_hash_cfg ctx[ICE_HASH_GTPU_CTX_MAX]; +}; -#define ICE_HASH_CFG_ROTATE_STOP(p) \ - ((p)->rotate = false) +struct ice_hash_ctx { + struct ice_hash_gtpu_ctx gtpu4; + struct ice_hash_gtpu_ctx gtpu6; +}; -struct ice_hash_cfg { - uint32_t pkt_hdr; - uint64_t hash_fld; - bool rotate; /* rotate l3 rule after l4 rule. */ - bool symm; +struct ice_acl_conf { + struct ice_fdir_fltr input; + uint64_t input_set; }; -struct ice_hash_gtpu_ctx { - struct ice_hash_cfg ipv4; - struct ice_hash_cfg ipv6; - struct ice_hash_cfg ipv4_udp; - struct ice_hash_cfg ipv6_udp; - struct ice_hash_cfg ipv4_tcp; - struct ice_hash_cfg ipv6_tcp; +/** + * A structure used to define fields of ACL related info. + */ +struct ice_acl_info { + struct ice_acl_conf conf; + struct rte_bitmap *slots; + uint64_t hw_entry_id[MAX_ACL_NORMAL_ENTRIES]; }; struct ice_pf { @@ -421,7 +451,8 @@ struct ice_pf { uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ uint16_t fdir_qp_offset; struct ice_fdir_info fdir; /* flow director info */ - struct ice_hash_gtpu_ctx gtpu_hash_ctx; + struct ice_acl_info acl; /* ACL info */ + struct ice_hash_ctx hash_ctx; uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; struct ice_hw_port_stats stats_offset; @@ -440,18 +471,39 @@ struct ice_pf { uint64_t old_rx_bytes; uint64_t old_tx_bytes; uint64_t supported_rxdid; /* bitmap for supported RXDID */ + uint64_t rss_hf; }; #define ICE_MAX_QUEUE_NUM 2048 +#define ICE_MAX_PIN_NUM 4 /** * Cache devargs parse result. */ struct ice_devargs { + int rx_low_latency; int safe_mode_support; uint8_t proto_xtr_dflt; int pipe_mode_support; uint8_t proto_xtr[ICE_MAX_QUEUE_NUM]; + uint8_t pin_idx; + uint8_t pps_out_ena; +}; + +/** + * Structure to store fdir fv entry. + */ +struct ice_fdir_prof_info { + struct ice_parser_profile prof; + u64 fdir_actived_cnt; +}; + +/** + * Structure to store rss fv entry. + */ +struct ice_rss_prof_info { + struct ice_parser_profile prof; + bool symm; }; /** @@ -460,7 +512,6 @@ struct ice_devargs { struct ice_adapter { /* Common for both PF and VF */ struct ice_hw hw; - struct rte_eth_dev *eth_dev; struct ice_pf pf; bool rx_bulk_alloc_allowed; bool rx_vec_allowed; @@ -472,6 +523,21 @@ struct ice_adapter { struct ice_devargs devargs; enum ice_pkg_type active_pkg_type; /* loaded ddp package type */ uint16_t fdir_ref_cnt; + /* For PTP */ + struct rte_timecounter systime_tc; + struct rte_timecounter rx_tstamp_tc; + struct rte_timecounter tx_tstamp_tc; + bool ptp_ena; + uint64_t time_hw; + struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS]; + struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS]; +#ifdef RTE_ARCH_X86 + bool rx_use_avx2; + bool rx_use_avx512; + bool tx_use_avx2; + bool tx_use_avx512; + bool rx_vec_offload_support; +#endif }; struct ice_vsi_vlan_pvid_info { @@ -505,8 +571,6 @@ struct ice_vsi_vlan_pvid_info { (&(((struct ice_vsi *)vsi)->adapter->hw)) #define ICE_VSI_TO_PF(vsi) \ (&(((struct ice_vsi *)vsi)->adapter->pf)) -#define ICE_VSI_TO_ETH_DEV(vsi) \ - (((struct ice_vsi *)vsi)->adapter->eth_dev) /* ICE_PF_TO */ #define ICE_PF_TO_HW(pf) \ @@ -516,7 +580,8 @@ struct ice_vsi_vlan_pvid_info { #define ICE_PF_TO_ETH_DEV(pf) \ (((struct ice_pf *)pf)->adapter->eth_dev) -enum ice_pkg_type ice_load_pkg_type(struct ice_hw *hw); +int +ice_load_pkg(struct ice_adapter *adapter, bool use_dsn, uint64_t dsn); struct ice_vsi * ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type); int @@ -525,9 +590,9 @@ void ice_vsi_enable_queues_intr(struct ice_vsi *vsi); void ice_vsi_disable_queues_intr(struct ice_vsi *vsi); void ice_vsi_queues_bind_intr(struct ice_vsi *vsi); int ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, - uint64_t hash_fld, uint32_t pkt_hdr, bool symm); + struct ice_rss_hash_cfg *cfg); int ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, - uint64_t hash_fld, uint32_t pkt_hdr); + struct ice_rss_hash_cfg *cfg); static inline int ice_align_floor(int n)