X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Ftxgbe%2Ftxgbe_ethdev.h;h=112567eecca438b9fae684b7ac4b1c3fb67ec611;hb=b225783dda7a254fc49bc4d43b8b58f67e03be1d;hp=77269e9b73e0148d8768c5cb2e0c1d3d2fecce36;hpb=635c21354f9aa70a6d1a1a1c9c73aa6911b6368d;p=dpdk.git diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 77269e9b73..112567eecc 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2020 + * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd. + * Copyright(c) 2010-2017 Intel Corporation */ #ifndef _TXGBE_ETHDEV_H_ @@ -9,12 +10,18 @@ #include "base/txgbe.h" #include "txgbe_ptypes.h" +#ifdef RTE_LIB_SECURITY +#include "txgbe_ipsec.h" +#endif #include +#include #include #include #include #include #include +#include +#include /* need update link, bit flag */ #define TXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0) @@ -22,6 +29,7 @@ #define TXGBE_FLAG_PHY_INTERRUPT (uint32_t)(1 << 2) #define TXGBE_FLAG_MACSEC (uint32_t)(1 << 3) #define TXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4) +#define TXGBE_FLAG_NEED_AN_CONFIG (uint32_t)(1 << 5) /* * Defines that were not part of txgbe_type.h as they are not used by the @@ -92,6 +100,18 @@ struct txgbe_fdir_filter { /* list of fdir filters */ TAILQ_HEAD(txgbe_fdir_filter_list, txgbe_fdir_filter); +struct txgbe_fdir_rule { + struct txgbe_hw_fdir_mask mask; + struct txgbe_atr_input input; /* key of fdir filter */ + bool b_spec; /* If TRUE, input, fdirflags, queue have meaning. */ + bool b_mask; /* If TRUE, mask has meaning. */ + enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */ + uint32_t fdirflags; /* drop or forward */ + uint32_t soft_id; /* an unique value for this rule */ + uint8_t queue; /* assigned rx queue */ + uint8_t flex_bytes_offset; +}; + struct txgbe_hw_fdir_info { struct txgbe_hw_fdir_mask mask; uint8_t flex_bytes_offset; @@ -110,13 +130,19 @@ struct txgbe_hw_fdir_info { bool mask_added; /* If already got mask from consistent filter */ }; +struct txgbe_rte_flow_rss_conf { + struct rte_flow_action_rss conf; /**< RSS parameters. */ + uint8_t key[TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */ + uint16_t queue[TXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */ +}; + /* structure for interrupt relative data */ struct txgbe_interrupt { uint32_t flags; uint32_t mask_misc; - /* to save original mask during delayed handler */ - uint32_t mask_misc_orig; - uint32_t mask[2]; + uint32_t mask_misc_orig; /* save mask during delayed handler */ + uint64_t mask; + uint64_t mask_orig; /* save mask during delayed handler */ }; #define TXGBE_NB_STAT_MAPPING 32 @@ -147,13 +173,6 @@ struct txgbe_uta_info { uint32_t uta_shadow[TXGBE_MAX_UTA]; }; -#define TXGBE_MAX_MIRROR_RULES 4 /* Maximum nb. of mirror rules. */ - -struct txgbe_mirror_info { - struct rte_eth_mirror_conf mr_conf[TXGBE_MAX_MIRROR_RULES]; - /* store PF mirror rules configuration */ -}; - struct txgbe_vf_info { uint8_t vf_mac_addresses[RTE_ETHER_ADDR_LEN]; uint16_t vf_mc_hashes[TXGBE_MAX_VF_MC_ENTRIES]; @@ -220,6 +239,8 @@ struct txgbe_filter_info { struct txgbe_5tuple_filter_list fivetuple_list; /* store the SYN filter info */ uint32_t syn_info; + /* store the rss filter info */ + struct txgbe_rte_flow_rss_conf rss_info; }; struct txgbe_l2_tn_key { @@ -244,11 +265,78 @@ struct txgbe_l2_tn_info { uint16_t e_tag_ether_type; /* ether type for e-tag */ }; +struct rte_flow { + enum rte_filter_type filter_type; + void *rule; +}; + /* The configuration of bandwidth */ struct txgbe_bw_conf { uint8_t tc_num; /* Number of TCs. */ }; +/* Struct to store Traffic Manager shaper profile. */ +struct txgbe_tm_shaper_profile { + TAILQ_ENTRY(txgbe_tm_shaper_profile) node; + uint32_t shaper_profile_id; + uint32_t reference_count; + struct rte_tm_shaper_params profile; +}; + +TAILQ_HEAD(txgbe_shaper_profile_list, txgbe_tm_shaper_profile); + +/* node type of Traffic Manager */ +enum txgbe_tm_node_type { + TXGBE_TM_NODE_TYPE_PORT, + TXGBE_TM_NODE_TYPE_TC, + TXGBE_TM_NODE_TYPE_QUEUE, + TXGBE_TM_NODE_TYPE_MAX, +}; + +/* Struct to store Traffic Manager node configuration. */ +struct txgbe_tm_node { + TAILQ_ENTRY(txgbe_tm_node) node; + uint32_t id; + uint32_t priority; + uint32_t weight; + uint32_t reference_count; + uint16_t no; + struct txgbe_tm_node *parent; + struct txgbe_tm_shaper_profile *shaper_profile; + struct rte_tm_node_params params; +}; + +TAILQ_HEAD(txgbe_tm_node_list, txgbe_tm_node); + +/* The configuration of Traffic Manager */ +struct txgbe_tm_conf { + struct txgbe_shaper_profile_list shaper_profile_list; + struct txgbe_tm_node *root; /* root node - port */ + struct txgbe_tm_node_list tc_list; /* node list for all the TCs */ + struct txgbe_tm_node_list queue_list; /* node list for all the queues */ + /** + * The number of added TC nodes. + * It should be no more than the TC number of this port. + */ + uint32_t nb_tc_node; + /** + * The number of added queue nodes. + * It should be no more than the queue number of this port. + */ + uint32_t nb_queue_node; + /** + * This flag is used to check if APP can change the TM node + * configuration. + * When it's true, means the configuration is applied to HW, + * APP should not change the configuration. + * As we don't support on-the-fly configuration, when starting + * the port, APP should call the hierarchy_commit API to set this + * flag to true. When stopping the port, this flag should be set + * to false. + */ + bool committed; +}; + /* * Structure to store private data for each driver instance (for each port). */ @@ -261,16 +349,19 @@ struct txgbe_adapter { struct txgbe_vfta shadow_vfta; struct txgbe_hwstrip hwstrip; struct txgbe_dcb_config dcb_config; - struct txgbe_mirror_info mr_data; struct txgbe_vf_info *vfdata; struct txgbe_uta_info uta_info; struct txgbe_filter_info filter; struct txgbe_l2_tn_info l2_tn; struct txgbe_bw_conf bw_conf; +#ifdef RTE_LIB_SECURITY + struct txgbe_ipsec ipsec; +#endif bool rx_bulk_alloc_allowed; struct rte_timecounter systime_tc; struct rte_timecounter rx_tstamp_tc; struct rte_timecounter tx_tstamp_tc; + struct txgbe_tm_conf tm_conf; /* For RSS reta table update */ uint8_t rss_reta_updated; @@ -321,6 +412,11 @@ struct txgbe_adapter { #define TXGBE_DEV_BW_CONF(dev) \ (&((struct txgbe_adapter *)(dev)->data->dev_private)->bw_conf) +#define TXGBE_DEV_TM_CONF(dev) \ + (&((struct txgbe_adapter *)(dev)->data->dev_private)->tm_conf) + +#define TXGBE_DEV_IPSEC(dev) \ + (&((struct txgbe_adapter *)(dev)->data->dev_private)->ipsec) /* * RX/TX function prototypes @@ -329,9 +425,9 @@ void txgbe_dev_clear_queues(struct rte_eth_dev *dev); void txgbe_dev_free_queues(struct rte_eth_dev *dev); -void txgbe_dev_rx_queue_release(void *rxq); +void txgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); -void txgbe_dev_tx_queue_release(void *txq); +void txgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid); int txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, @@ -373,6 +469,12 @@ void txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, void txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_txq_info *qinfo); +int txgbevf_dev_rx_init(struct rte_eth_dev *dev); + +void txgbevf_dev_tx_init(struct rte_eth_dev *dev); + +void txgbevf_dev_rxtx_start(struct rte_eth_dev *dev); + uint16_t txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); @@ -429,9 +531,23 @@ txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, int txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev, struct txgbe_l2_tunnel_conf *l2_tunnel); +void txgbe_filterlist_init(void); +void txgbe_filterlist_flush(void); + void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); +/* + * Flow director function prototypes + */ +int txgbe_fdir_configure(struct rte_eth_dev *dev); +int txgbe_fdir_set_input_mask(struct rte_eth_dev *dev); +int txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev, + uint16_t offset); +int txgbe_fdir_filter_program(struct rte_eth_dev *dev, + struct txgbe_fdir_rule *rule, + bool del, bool update); + void txgbe_configure_pb(struct rte_eth_dev *dev); void txgbe_configure_port(struct rte_eth_dev *dev); void txgbe_configure_dcb(struct rte_eth_dev *dev); @@ -449,12 +565,30 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev); uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val); +void txgbe_fdir_filter_restore(struct rte_eth_dev *dev); +int txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev); + extern const struct rte_flow_ops txgbe_flow_ops; +void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev); +void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev); +void txgbe_clear_syn_filter(struct rte_eth_dev *dev); +int txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev); + int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, uint16_t tx_rate, uint64_t q_msk); +int txgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops); +void txgbe_tm_conf_init(struct rte_eth_dev *dev); +void txgbe_tm_conf_uninit(struct rte_eth_dev *dev); int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t tx_rate); +int txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in); +int txgbe_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with); +int txgbe_config_rss_filter(struct rte_eth_dev *dev, + struct txgbe_rte_flow_rss_conf *conf, bool add); + static inline int txgbe_ethertype_filter_lookup(struct txgbe_filter_info *filter_info, uint16_t ethertype) @@ -507,6 +641,10 @@ txgbe_ethertype_filter_remove(struct txgbe_filter_info *filter_info, return idx; } +#ifdef RTE_LIB_SECURITY +int txgbe_ipsec_ctx_create(struct rte_eth_dev *dev); +#endif + /* High threshold controlling when to start sending XOFF frames. */ #define TXGBE_FC_XOFF_HITH 128 /*KB*/ /* Low threshold controlling when to start sending XON frames. */