X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev.h;h=cd6deabd60b38ea4441f830036949b60dc5f8edc;hb=046f1161956777e3afb13504acbe8df2ec3a383c;hp=5b84da23516ce3ffb3c3b1b935d7f6f258483f64;hpb=a286ebeb0714b0c766d175561538e331ec997bd6;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 5b84da2351..cd6deabd60 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -1,45 +1,23 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #ifndef _I40E_ETHDEV_H_ #define _I40E_ETHDEV_H_ -#include +#include + #include #include #include +#include #include #include +#include "rte_pmd_i40e.h" + +#include "base/i40e_register.h" +#include "base/i40e_type.h" +#include "base/virtchnl.h" #define I40E_VLAN_TAG_SIZE 4 @@ -51,6 +29,7 @@ #define I40E_NUM_DESC_ALIGN 32 #define I40E_BUF_SIZE_MIN 1024 #define I40E_FRAME_SIZE_MAX 9728 +#define I40E_TSO_FRAME_SIZE_MAX 262144 #define I40E_QUEUE_BASE_ADDR_UNIT 128 /* number of VSIs and queue default setting */ #define I40E_MAX_QP_NUM_PER_VF 16 @@ -61,7 +40,8 @@ #define I40E_NUM_MACADDR_MAX 64 /* Maximum number of VFs */ #define I40E_MAX_VF 128 - +/*flag of no loopback*/ +#define I40E_AQ_LB_MODE_NONE 0x0 /* * vlan_id is a 12 bit number. * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. @@ -106,6 +86,25 @@ (((vf)->version_major == VIRTCHNL_VERSION_MAJOR) && \ ((vf)->version_minor == 1)) +#define I40E_WRITE_GLB_REG(hw, reg, value) \ + do { \ + uint32_t ori_val; \ + struct rte_eth_dev *dev; \ + struct rte_eth_dev_data *dev_data; \ + ori_val = I40E_READ_REG((hw), (reg)); \ + dev_data = ((struct i40e_adapter *)hw->back)->pf.dev_data; \ + dev = &rte_eth_devices[dev_data->port_id]; \ + I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((hw), \ + (reg)), (value)); \ + if (ori_val != value) \ + PMD_DRV_LOG(WARNING, \ + "i40e device %s changed global " \ + "register [0x%08x]. original: 0x%08x, " \ + "new: 0x%08x ", \ + (dev->device->name), (reg), \ + (ori_val), (value)); \ + } while (0) + /* index flex payload per layer */ enum i40e_flxpld_layer_idx { I40E_FLXPLD_L2_IDX = 0, @@ -129,7 +128,6 @@ enum i40e_flxpld_layer_idx { #define I40E_FLAG_FDIR (1ULL << 6) #define I40E_FLAG_VXLAN (1ULL << 7) #define I40E_FLAG_RSS_AQ_CAPABLE (1ULL << 8) -#define I40E_FLAG_VF_MAC_BY_PF (1ULL << 9) #define I40E_FLAG_ALL (I40E_FLAG_RSS | \ I40E_FLAG_DCB | \ I40E_FLAG_VMDQ | \ @@ -138,8 +136,7 @@ enum i40e_flxpld_layer_idx { I40E_FLAG_HEADER_SPLIT_ENABLED | \ I40E_FLAG_FDIR | \ I40E_FLAG_VXLAN | \ - I40E_FLAG_RSS_AQ_CAPABLE | \ - I40E_FLAG_VF_MAC_BY_PF) + I40E_FLAG_RSS_AQ_CAPABLE) #define I40E_RSS_OFFLOAD_ALL ( \ ETH_RSS_FRAG_IPV4 | \ @@ -189,6 +186,7 @@ enum i40e_flxpld_layer_idx { #define I40E_ITR_INDEX_NONE 3 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */ +#define I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ /* Special FW support this floating VEB feature */ #define FLOATING_VEB_SUPPORTED_FW_MAJ 5 #define FLOATING_VEB_SUPPORTED_FW_MIN 0 @@ -198,6 +196,9 @@ enum i40e_flxpld_layer_idx { #define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \ I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) +#define I40E_RSS_TYPE_NONE 0ULL +#define I40E_RSS_TYPE_INVALID 1ULL + #define I40E_INSET_NONE 0x00000000000000000ULL /* bit0 ~ bit 7 */ @@ -260,22 +261,55 @@ enum i40e_flxpld_layer_idx { #define I40E_QOS_BW_WEIGHT_MIN 1 /* The max bandwidth weight is 127. */ #define I40E_QOS_BW_WEIGHT_MAX 127 +/* The max queue region index is 7. */ +#define I40E_REGION_MAX_INDEX 7 + +#define I40E_MAX_PERCENT 100 +#define I40E_DEFAULT_DCB_APP_NUM 1 +#define I40E_DEFAULT_DCB_APP_PRIO 3 + +#define I40E_FDIR_PRG_PKT_CNT 128 + +/* + * Struct to store flow created. + */ +struct rte_flow { + TAILQ_ENTRY(rte_flow) node; + enum rte_filter_type filter_type; + void *rule; +}; /** * The overhead from MTU to max frame size. * Considering QinQ packet, the VLAN tag needs to be counted twice. */ #define I40E_ETH_OVERHEAD \ - (ETHER_HDR_LEN + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE * 2) + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE * 2) +#define I40E_ETH_MAX_LEN (RTE_ETHER_MTU + I40E_ETH_OVERHEAD) + +#define I40E_RXTX_BYTES_H_16_BIT(bytes) ((bytes) & ~I40E_48_BIT_MASK) +#define I40E_RXTX_BYTES_L_48_BIT(bytes) ((bytes) & I40E_48_BIT_MASK) struct i40e_adapter; +struct rte_pci_driver; + +/** + * MAC filter type + */ +enum i40e_mac_filter_type { + I40E_MAC_PERFECT_MATCH = 1, /**< exact match of MAC addr. */ + I40E_MACVLAN_PERFECT_MATCH, /**< exact match of MAC addr and VLAN ID. */ + I40E_MAC_HASH_MATCH, /**< hash match of MAC addr. */ + /** hash match of MAC addr and exact match of VLAN ID. */ + I40E_MACVLAN_HASH_MATCH, +}; /** * MAC filter structure */ struct i40e_mac_filter_info { - enum rte_mac_filter_type filter_type; - struct ether_addr mac_addr; + enum i40e_mac_filter_type filter_type; + struct rte_ether_addr mac_addr; }; TAILQ_HEAD(i40e_mac_filter_list, i40e_mac_filter); @@ -328,8 +362,8 @@ struct i40e_veb { /* i40e MACVLAN filter structure */ struct i40e_macvlan_filter { - struct ether_addr macaddr; - enum rte_mac_filter_type filter_type; + struct rte_ether_addr macaddr; + enum i40e_mac_filter_type filter_type; uint16_t vlan_id; }; @@ -347,7 +381,7 @@ struct i40e_vsi { * needs to add, HW needs to know the layout that VSIs are organized. * Besides that, VSI isan element and can't switch packets, which needs * to add new component VEB to perform switching. So, a new VSI needs - * to specify the the uplink VSI (Parent VSI) before created. The + * to specify the uplink VSI (Parent VSI) before created. The * uplink VSI will check whether it had a VEB to switch packets. If no, * it will try to create one. Then, uplink VSI will move the new VSI * into its' sib_vsi_list to manage all the downlink VSI. @@ -384,6 +418,8 @@ struct i40e_vsi { uint8_t vlan_anti_spoof_on; /* The VLAN anti-spoofing enabled */ uint8_t vlan_filter_on; /* The VLAN filter enabled */ struct i40e_bw_info bw_info; /* VSI bandwidth information */ + uint64_t prev_rx_bytes; + uint64_t prev_tx_bytes; }; struct pool_entry { @@ -419,7 +455,27 @@ struct i40e_pf_vf { uint16_t vf_idx; /* VF index in pf->vfs */ uint16_t lan_nb_qps; /* Actual queues allocated */ uint16_t reset_cnt; /* Total vf reset times */ - struct ether_addr mac_addr; /* Default MAC address */ + struct rte_ether_addr mac_addr; /* Default MAC address */ + /* version of the virtchnl from VF */ + struct virtchnl_version_info version; + uint32_t request_caps; /* offload caps requested from VF */ + uint64_t num_mdd_events; /* num of mdd events detected */ + + /* + * Variables for store the arrival timestamp of VF messages. + * If the timestamp of latest message stored at + * `msg_timestamps[index % max]` then the timestamp of + * earliest message stored at `msg_time[(index + 1) % max]`. + * When a new message come, the timestamp of this message + * will be stored at `msg_timestamps[(index + 1) % max]` and the + * earliest message timestamp is at + * `msg_timestamps[(index + 2) % max]` now... + */ + uint32_t msg_index; + uint64_t *msg_timestamps; + + /* cycle of stop ignoring VF message */ + uint64_t ignore_end_cycle; }; /* @@ -460,13 +516,181 @@ struct i40e_vmdq_info { #define I40E_FLEX_WORD_MASK(off) (0x80 >> (off)) #define I40E_FDIR_IPv6_TC_OFFSET 20 +/* A structure used to define the input for GTP flow */ +struct i40e_gtp_flow { + struct rte_eth_udpv4_flow udp; /* IPv4 UDP fields to match. */ + uint8_t msg_type; /* Message type. */ + uint32_t teid; /* TEID in big endian. */ +}; + +/* A structure used to define the input for GTP IPV4 flow */ +struct i40e_gtp_ipv4_flow { + struct i40e_gtp_flow gtp; + struct rte_eth_ipv4_flow ip4; +}; + +/* A structure used to define the input for GTP IPV6 flow */ +struct i40e_gtp_ipv6_flow { + struct i40e_gtp_flow gtp; + struct rte_eth_ipv6_flow ip6; +}; + +/* A structure used to define the input for ESP IPV4 flow */ +struct i40e_esp_ipv4_flow { + struct rte_eth_ipv4_flow ipv4; + uint32_t spi; /* SPI in big endian. */ +}; + +/* A structure used to define the input for ESP IPV6 flow */ +struct i40e_esp_ipv6_flow { + struct rte_eth_ipv6_flow ipv6; + uint32_t spi; /* SPI in big endian. */ +}; +/* A structure used to define the input for ESP IPV4 UDP flow */ +struct i40e_esp_ipv4_udp_flow { + struct rte_eth_udpv4_flow udp; + uint32_t spi; /* SPI in big endian. */ +}; + +/* A structure used to define the input for ESP IPV6 UDP flow */ +struct i40e_esp_ipv6_udp_flow { + struct rte_eth_udpv6_flow udp; + uint32_t spi; /* SPI in big endian. */ +}; + +/* A structure used to define the input for raw type flow */ +struct i40e_raw_flow { + uint16_t pctype; + void *packet; + uint32_t length; +}; + +/* A structure used to define the input for L2TPv3 over IPv4 flow */ +struct i40e_ipv4_l2tpv3oip_flow { + struct rte_eth_ipv4_flow ip4; + uint32_t session_id; /* Session ID in big endian. */ +}; + +/* A structure used to define the input for L2TPv3 over IPv6 flow */ +struct i40e_ipv6_l2tpv3oip_flow { + struct rte_eth_ipv6_flow ip6; + uint32_t session_id; /* Session ID in big endian. */ +}; + +/* A structure used to define the input for l2 dst type flow */ +struct i40e_l2_flow { + struct rte_ether_addr dst; + struct rte_ether_addr src; + uint16_t ether_type; /**< Ether type in big endian */ +}; + /* + * A union contains the inputs for all types of flow + * items in flows need to be in big endian + */ +union i40e_fdir_flow { + struct i40e_l2_flow l2_flow; + struct rte_eth_udpv4_flow udp4_flow; + struct rte_eth_tcpv4_flow tcp4_flow; + struct rte_eth_sctpv4_flow sctp4_flow; + struct rte_eth_ipv4_flow ip4_flow; + struct rte_eth_udpv6_flow udp6_flow; + struct rte_eth_tcpv6_flow tcp6_flow; + struct rte_eth_sctpv6_flow sctp6_flow; + struct rte_eth_ipv6_flow ipv6_flow; + struct i40e_gtp_flow gtp_flow; + struct i40e_gtp_ipv4_flow gtp_ipv4_flow; + struct i40e_gtp_ipv6_flow gtp_ipv6_flow; + struct i40e_raw_flow raw_flow; + struct i40e_ipv4_l2tpv3oip_flow ip4_l2tpv3oip_flow; + struct i40e_ipv6_l2tpv3oip_flow ip6_l2tpv3oip_flow; + struct i40e_esp_ipv4_flow esp_ipv4_flow; + struct i40e_esp_ipv6_flow esp_ipv6_flow; + struct i40e_esp_ipv4_udp_flow esp_ipv4_udp_flow; + struct i40e_esp_ipv6_udp_flow esp_ipv6_udp_flow; +}; + +enum i40e_fdir_ip_type { + I40E_FDIR_IPTYPE_IPV4, + I40E_FDIR_IPTYPE_IPV6, +}; + +/** * Structure to store flex pit for flow diretor. */ struct i40e_fdir_flex_pit { - uint8_t src_offset; /* offset in words from the beginning of payload */ - uint8_t size; /* size in words */ - uint8_t dst_offset; /* offset in words of flexible payload */ + uint8_t src_offset; /* offset in words from the beginning of payload */ + uint8_t size; /* size in words */ + uint8_t dst_offset; /* offset in words of flexible payload */ +}; + +/* A structure used to contain extend input of flow */ +struct i40e_fdir_flow_ext { + uint16_t vlan_tci; + uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN]; + /* It is filled by the flexible payload to match. */ + uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN]; + uint8_t raw_id; + uint8_t is_vf; /* 1 for VF, 0 for port dev */ + uint16_t dst_id; /* VF ID, available when is_vf is 1*/ + uint64_t input_set; + bool inner_ip; /* If there is inner ip */ + enum i40e_fdir_ip_type iip_type; /* ip type for inner ip */ + enum i40e_fdir_ip_type oip_type; /* ip type for outer ip */ + bool customized_pctype; /* If customized pctype is used */ + bool pkt_template; /* If raw packet template is used */ + bool is_udp; /* ipv4|ipv6 udp flow */ + enum i40e_flxpld_layer_idx layer_idx; + struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED]; + bool is_flex_flow; +}; + +/* A structure used to define the input for a flow director filter entry */ +struct i40e_fdir_input { + enum i40e_filter_pctype pctype; + union i40e_fdir_flow flow; + /* Flow fields to match, dependent on flow_type */ + struct i40e_fdir_flow_ext flow_ext; + /* Additional fields to match */ +}; + +/* Behavior will be taken if FDIR match */ +enum i40e_fdir_behavior { + I40E_FDIR_ACCEPT = 0, + I40E_FDIR_REJECT, + I40E_FDIR_PASSTHRU, +}; + +/* Flow director report status + * It defines what will be reported if FDIR entry is matched. + */ +enum i40e_fdir_status { + I40E_FDIR_NO_REPORT_STATUS = 0, /* Report nothing. */ + I40E_FDIR_REPORT_ID, /* Only report FD ID. */ + I40E_FDIR_REPORT_ID_FLEX_4, /* Report FD ID and 4 flex bytes. */ + I40E_FDIR_REPORT_FLEX_8, /* Report 8 flex bytes. */ +}; + +/* A structure used to define an action when match FDIR packet filter. */ +struct i40e_fdir_action { + uint16_t rx_queue; /* Queue assigned to if FDIR match. */ + enum i40e_fdir_behavior behavior; /* Behavior will be taken */ + enum i40e_fdir_status report_status; /* Status report option */ + /* If report_status is I40E_FDIR_REPORT_ID_FLEX_4 or + * I40E_FDIR_REPORT_FLEX_8, flex_off specifies where the reported + * flex bytes start from in flexible payload. + */ + uint8_t flex_off; +}; + +/* A structure used to define the flow director filter entry by filter_ctrl API + * It supports RTE_ETH_FILTER_FDIR data representation. + */ +struct i40e_fdir_filter_conf { + uint32_t soft_id; + /* ID, an unique value is required when deal with FDIR entry */ + struct i40e_fdir_input input; /* Input set */ + struct i40e_fdir_action action; /* Action taken when match */ }; struct i40e_fdir_flex_mask { @@ -484,9 +708,26 @@ struct i40e_fdir_flex_mask { struct i40e_fdir_filter { TAILQ_ENTRY(i40e_fdir_filter) rules; - struct rte_eth_fdir_filter fdir; + struct i40e_fdir_filter_conf fdir; +}; + +/* fdir memory pool entry */ +struct i40e_fdir_entry { + struct rte_flow flow; + uint32_t idx; +}; + +/* pre-allocated fdir memory pool */ +struct i40e_fdir_flow_pool { + /* a bitmap to manage the fdir pool */ + struct rte_bitmap *bitmap; + /* the size the pool is pf->fdir->fdir_space_size */ + struct i40e_fdir_entry *pool; }; +#define FLOW_TO_FLOW_BITMAP(f) \ + container_of((f), struct i40e_fdir_entry, flow) + TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter); /* * A structure used to define fields of a FDIR related info. @@ -496,8 +737,14 @@ struct i40e_fdir_info { uint16_t match_counter_index; /* Statistic counter index used for fdir*/ struct i40e_tx_queue *txq; struct i40e_rx_queue *rxq; - void *prg_pkt; /* memory for fdir program packet */ - uint64_t dma_addr; /* physic address of packet memory*/ + void *prg_pkt[I40E_FDIR_PRG_PKT_CNT]; /* memory for fdir program packet */ + uint64_t dma_addr[I40E_FDIR_PRG_PKT_CNT]; /* physic address of packet memory*/ + /* + * txq available buffer counter, indicates how many available buffers + * for fdir programming, initialized as I40E_FDIR_PRG_PKT_CNT + */ + int txq_available_buf_count; + /* input set bits for each pctype */ uint64_t input_set[I40E_FILTER_PCTYPE_MAX]; /* @@ -510,12 +757,42 @@ struct i40e_fdir_info { struct i40e_fdir_filter_list fdir_list; struct i40e_fdir_filter **hash_map; struct rte_hash *hash_table; + /* An array to store the inserted rules input */ + struct i40e_fdir_filter *fdir_filter_array; + + /* + * Priority ordering at filter invalidation(destroying a flow) between + * "best effort" space and "guaranteed" space. + * + * 0 = At filter invalidation, the hardware first tries to increment the + * "best effort" space. The "guaranteed" space is incremented only when + * the global "best effort" space is at it max value or the "best effort" + * space of the PF is at its max value. + * 1 = At filter invalidation, the hardware first tries to increment its + * "guaranteed" space. The "best effort" space is incremented only when + * it is already at its max value. + */ + uint32_t fdir_invalprio; + /* the total size of the fdir, this number is the sum of the guaranteed + + * shared space + */ + uint32_t fdir_space_size; + /* the actual number of the fdir rules in hardware, initialized as 0 */ + uint32_t fdir_actual_cnt; + /* the free guaranteed space of the fdir */ + uint32_t fdir_guarantee_free_space; + /* the fdir total guaranteed space */ + uint32_t fdir_guarantee_total_space; + /* the pre-allocated pool of the rte_flow */ + struct i40e_fdir_flow_pool fdir_flow_pool; /* Mark if flex pit and mask is set */ bool flex_pit_flag[I40E_MAX_FLXPLD_LAYER]; bool flex_mask_flag[I40E_FILTER_PCTYPE_MAX]; - bool inset_flag[I40E_FILTER_PCTYPE_MAX]; /* Mark if input set is set */ + uint32_t flow_count[I40E_FILTER_PCTYPE_MAX]; + + uint32_t flex_flow_count[I40E_MAX_FLXPLD_LAYER]; }; /* Ethertype filter number HW supports */ @@ -523,7 +800,7 @@ struct i40e_fdir_info { /* Ethertype filter struct */ struct i40e_ethertype_filter_input { - struct ether_addr mac_addr; /* Mac address to match */ + struct rte_ether_addr mac_addr; /* Mac address to match */ uint16_t ether_type; /* Ether type to match */ }; @@ -542,17 +819,57 @@ struct i40e_ethertype_rule { struct rte_hash *hash_table; }; +/* queue region info */ +struct i40e_queue_region_info { + /* the region id for this configuration */ + uint8_t region_id; + /* the start queue index for this region */ + uint8_t queue_start_index; + /* the total queue number of this queue region */ + uint8_t queue_num; + /* the total number of user priority for this region */ + uint8_t user_priority_num; + /* the packet's user priority for this region */ + uint8_t user_priority[I40E_MAX_USER_PRIORITY]; + /* the total number of flowtype for this region */ + uint8_t flowtype_num; + /** + * the pctype or hardware flowtype of packet, + * the specific index for each type has been defined + * in file i40e_type.h as enum i40e_filter_pctype. + */ + uint8_t hw_flowtype[I40E_FILTER_PCTYPE_MAX]; +}; + +struct i40e_queue_regions { + /* the total number of queue region for this port */ + uint16_t queue_region_number; + struct i40e_queue_region_info region[I40E_REGION_MAX_INDEX + 1]; +}; + +struct i40e_rss_pattern_info { + uint8_t action_flag; + uint64_t types; +}; + /* Tunnel filter number HW supports */ #define I40E_MAX_TUNNEL_FILTER_NUM 400 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0 44 #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1 45 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP 8 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE 9 -#define I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ 0x10 -#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP 0x11 -#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE 0x12 -#define I40E_AQC_ADD_L1_FILTER_TEID_MPLS 0x11 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT 29 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT 30 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP 8 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE 9 +#define I40E_AQC_ADD_CLOUD_FILTER_0X10 0x10 +#define I40E_AQC_ADD_CLOUD_FILTER_0X11 0x11 +#define I40E_AQC_ADD_CLOUD_FILTER_0X12 0x12 +#define I40E_AQC_ADD_L1_FILTER_0X10 0x10 +#define I40E_AQC_ADD_L1_FILTER_0X11 0x11 +#define I40E_AQC_ADD_L1_FILTER_0X12 0x12 +#define I40E_AQC_ADD_L1_FILTER_0X13 0x13 +#define I40E_AQC_NEW_TR_21 21 +#define I40E_AQC_NEW_TR_22 22 enum i40e_tunnel_iptype { I40E_TUNNEL_IPTYPE_IPV4, @@ -600,15 +917,30 @@ enum i40e_tunnel_type { I40E_TUNNEL_TYPE_MPLSoUDP, I40E_TUNNEL_TYPE_MPLSoGRE, I40E_TUNNEL_TYPE_QINQ, + I40E_TUNNEL_TYPE_GTPC, + I40E_TUNNEL_TYPE_GTPU, + I40E_TUNNEL_TYPE_ESPoUDP, + I40E_TUNNEL_TYPE_ESPoIP, + I40E_CLOUD_TYPE_UDP, + I40E_CLOUD_TYPE_TCP, + I40E_CLOUD_TYPE_SCTP, I40E_TUNNEL_TYPE_MAX, }; +/** + * L4 port type. + */ +enum i40e_l4_port_type { + I40E_L4_PORT_TYPE_SRC = 0, + I40E_L4_PORT_TYPE_DST, +}; + /** * Tunneling Packet filter configuration. */ struct i40e_tunnel_filter_conf { - struct ether_addr outer_mac; /**< Outer MAC address to match. */ - struct ether_addr inner_mac; /**< Inner MAC address to match. */ + struct rte_ether_addr outer_mac; /**< Outer MAC address to match. */ + struct rte_ether_addr inner_mac; /**< Inner MAC address to match. */ uint16_t inner_vlan; /**< Inner VLAN to match. */ uint32_t outer_vlan; /**< Outer VLAN to match */ enum i40e_tunnel_iptype ip_type; /**< IP address type. */ @@ -624,6 +956,7 @@ struct i40e_tunnel_filter_conf { /** Flags from ETH_TUNNEL_FILTER_XX - see above. */ uint16_t filter_type; enum i40e_tunnel_type tunnel_type; /**< Tunnel Type. */ + enum i40e_l4_port_type l4_port_type; /**< L4 Port Type. */ uint32_t tenant_id; /**< Tenant ID to match. VNI, GRE key... */ uint16_t queue_id; /**< Queue assigned to if match. */ uint8_t is_to_vf; /**< 0 - to PF, 1 - to VF */ @@ -651,15 +984,6 @@ struct i40e_mirror_rule { TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule); -/* - * Struct to store flow created. - */ -struct rte_flow { - TAILQ_ENTRY(rte_flow) node; - enum rte_filter_type filter_type; - void *rule; -}; - TAILQ_HEAD(i40e_flow_list, rte_flow); /* Struct to store Traffic Manager shaper profile. */ @@ -723,6 +1047,79 @@ struct i40e_tm_conf { bool committed; }; +enum i40e_new_pctype { + I40E_CUSTOMIZED_GTPC = 0, + I40E_CUSTOMIZED_GTPU_IPV4, + I40E_CUSTOMIZED_GTPU_IPV6, + I40E_CUSTOMIZED_GTPU, + I40E_CUSTOMIZED_IPV4_L2TPV3, + I40E_CUSTOMIZED_IPV6_L2TPV3, + I40E_CUSTOMIZED_ESP_IPV4, + I40E_CUSTOMIZED_ESP_IPV6, + I40E_CUSTOMIZED_ESP_IPV4_UDP, + I40E_CUSTOMIZED_ESP_IPV6_UDP, + I40E_CUSTOMIZED_AH_IPV4, + I40E_CUSTOMIZED_AH_IPV6, + I40E_CUSTOMIZED_MAX, +}; + +#define I40E_FILTER_PCTYPE_INVALID 0 +struct i40e_customized_pctype { + enum i40e_new_pctype index; /* Indicate which customized pctype */ + uint8_t pctype; /* New pctype value */ + bool valid; /* Check if it's valid */ +}; + +struct i40e_rte_flow_rss_conf { + struct rte_flow_action_rss conf; /**< RSS parameters. */ + + uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ? + I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t)]; /**< Hash key. */ + uint16_t queue[ETH_RSS_RETA_SIZE_512]; /**< Queues indices to use. */ + + bool symmetric_enable; /**< true, if enable symmetric */ + uint64_t config_pctypes; /**< All PCTYPES with the flow */ + uint64_t inset; /**< input sets */ + + uint8_t region_priority; /**< queue region priority */ + uint8_t region_queue_num; /**< region queue number */ + uint16_t region_queue_start; /**< region queue start */ + + uint32_t misc_reset_flags; +#define I40E_HASH_FLOW_RESET_FLAG_FUNC 0x01UL +#define I40E_HASH_FLOW_RESET_FLAG_KEY 0x02UL +#define I40E_HASH_FLOW_RESET_FLAG_QUEUE 0x04UL +#define I40E_HASH_FLOW_RESET_FLAG_REGION 0x08UL + + /**< All PCTYPES that reset with the flow */ + uint64_t reset_config_pctypes; + /**< Symmetric function should reset on PCTYPES */ + uint64_t reset_symmetric_pctypes; +}; + +/* RSS filter list structure */ +struct i40e_rss_filter { + TAILQ_ENTRY(i40e_rss_filter) next; + struct i40e_rte_flow_rss_conf rss_filter_info; +}; + +TAILQ_HEAD(i40e_rss_conf_list, i40e_rss_filter); + +struct i40e_vf_msg_cfg { + /* maximal VF message during a statistic period */ + uint32_t max_msg; + + /* statistic period, in second */ + uint32_t period; + /* + * If message statistics from a VF exceed the maximal limitation, + * the PF will ignore any new message from that VF for + * 'ignor_second' time. + */ + uint32_t ignore_second; +}; + /* * Structure to store private data specific for PF instance. */ @@ -743,7 +1140,7 @@ struct i40e_pf { bool offset_loaded; struct rte_eth_dev_data *dev_data; /* Pointer to the device data */ - struct ether_addr dev_addr; /* PF device mac address */ + struct rte_ether_addr dev_addr; /* PF device mac address */ uint64_t flags; /* PF feature flags */ /* All kinds of queue pair setting for different VSIs */ struct i40e_pf_vf *vfs; @@ -763,6 +1160,8 @@ struct i40e_pf { uint16_t fdir_qp_offset; uint16_t hash_lut_size; /* The size of hash lookup table */ + bool hash_filter_enabled; + uint64_t hash_enabled_queues; /* input set bits for each pctype */ uint64_t hash_input_set[I40E_FILTER_PCTYPE_MAX]; /* store VXLAN UDP ports */ @@ -777,6 +1176,8 @@ struct i40e_pf { struct i40e_fdir_info fdir; /* flow director info */ struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */ struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */ + struct i40e_rss_conf_list rss_config_list; /* RSS rule list */ + struct i40e_queue_regions queue_region; /* queue region info */ struct i40e_fc_conf fc_conf; /* Flow control conf */ struct i40e_mirror_rule_list mirror_list; uint16_t nb_mirror_rule; /* The number of mirror rules */ @@ -785,8 +1186,27 @@ struct i40e_pf { bool floating_veb_list[I40E_MAX_VF]; struct i40e_flow_list flow_list; bool mpls_replace_flag; /* 1 - MPLS filter replace is done */ + bool gtp_replace_flag; /* 1 - GTP-C/U filter replace is done */ bool qinq_replace_flag; /* QINQ filter replace is done */ + /* l4 port flag */ + bool sport_replace_flag; /* Source port replace is done */ + bool dport_replace_flag; /* Destination port replace is done */ struct i40e_tm_conf tm_conf; + bool support_multi_driver; /* 1 - support multiple driver */ + + /* Dynamic Device Personalization */ + bool gtp_support; /* 1 - support GTP-C and GTP-U */ + bool esp_support; /* 1 - support ESP SPI */ + /* customer customized pctype */ + struct i40e_customized_pctype customized_pctype[I40E_CUSTOMIZED_MAX]; + /* Switch Domain Id */ + uint16_t switch_domain_id; + + struct i40e_vf_msg_cfg vf_msg_cfg; + uint64_t prev_rx_bytes; + uint64_t prev_tx_bytes; + uint64_t internal_prev_rx_bytes; + uint64_t internal_prev_tx_bytes; }; enum pending_msg { @@ -831,11 +1251,16 @@ struct i40e_vf { bool promisc_unicast_enabled; bool promisc_multicast_enabled; + rte_spinlock_t cmd_send_lock; uint32_t version_major; /* Major version number */ uint32_t version_minor; /* Minor version number */ uint16_t promisc_flags; /* Promiscuous setting */ uint32_t vlan[I40E_VFTA_SIZE]; /* VLAN bit map */ + /* Multicast addrs */ + struct rte_ether_addr mc_addrs[I40E_NUM_MACADDR_MAX]; + uint16_t mc_addrs_num; /* Multicast mac addresses number */ + /* Event from pf */ bool dev_closed; bool link_up; @@ -862,7 +1287,6 @@ struct i40e_vf { struct i40e_adapter { /* Common for both PF and VF */ struct i40e_hw hw; - struct rte_eth_dev *eth_dev; /* Specific for PF or VF */ union { @@ -887,15 +1311,39 @@ struct i40e_adapter { uint64_t pctypes_tbl[I40E_FLOW_TYPE_MAX] __rte_cache_min_aligned; uint64_t flow_types_mask; uint64_t pctypes_mask; + + /* For RSS reta table update */ + uint8_t rss_reta_updated; +#ifdef RTE_ARCH_X86 + bool rx_use_avx2; + bool rx_use_avx512; + bool tx_use_avx2; + bool tx_use_avx512; +#endif +}; + +/** + * Strucute to store private data for each VF representor instance + */ +struct i40e_vf_representor { + uint16_t switch_domain_id; + /**< Virtual Function ID */ + uint16_t vf_id; + /**< Virtual Function ID */ + struct i40e_adapter *adapter; + /**< Private data store of assocaiated physical function */ + struct i40e_eth_stats stats_offset; + /**< Zero-point of VF statistics*/ }; extern const struct rte_flow_ops i40e_flow_ops; union i40e_filter_t { struct rte_eth_ethertype_filter ethertype_filter; - struct rte_eth_fdir_filter fdir_filter; + struct i40e_fdir_filter_conf fdir_filter; struct rte_eth_tunnel_filter_conf tunnel_filter; struct i40e_tunnel_filter_conf consistent_tunnel_filter; + struct i40e_rte_flow_rss_conf rss_conf; }; typedef int (*parse_filter_t)(struct rte_eth_dev *dev, @@ -920,13 +1368,14 @@ int i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on); int i40e_vsi_add_vlan(struct i40e_vsi *vsi, uint16_t vlan); int i40e_vsi_delete_vlan(struct i40e_vsi *vsi, uint16_t vlan); int i40e_vsi_add_mac(struct i40e_vsi *vsi, struct i40e_mac_filter_info *filter); -int i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr); +int i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct rte_ether_addr *addr); void i40e_update_vsi_stats(struct i40e_vsi *vsi); void i40e_pf_disable_irq0(struct i40e_hw *hw); void i40e_pf_enable_irq0(struct i40e_hw *hw); int i40e_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); -void i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx); +int i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx); void i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi); +void i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi); int i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi, struct i40e_vsi_vlan_pvid_info *info); int i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on); @@ -936,27 +1385,29 @@ uint64_t i40e_parse_hena(const struct i40e_adapter *adapter, uint64_t flags); enum i40e_status_code i40e_fdir_setup_tx_resources(struct i40e_pf *pf); enum i40e_status_code i40e_fdir_setup_rx_resources(struct i40e_pf *pf); int i40e_fdir_setup(struct i40e_pf *pf); +void i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi); const struct rte_memzone *i40e_memzone_reserve(const char *name, uint32_t len, int socket_id); int i40e_fdir_configure(struct rte_eth_dev *dev); +void i40e_fdir_rx_proc_enable(struct rte_eth_dev *dev, bool on); void i40e_fdir_teardown(struct i40e_pf *pf); enum i40e_filter_pctype i40e_flowtype_to_pctype(const struct i40e_adapter *adapter, uint16_t flow_type); uint16_t i40e_pctype_to_flowtype(const struct i40e_adapter *adapter, enum i40e_filter_pctype pctype); -int i40e_fdir_ctrl_func(struct rte_eth_dev *dev, - enum rte_filter_op filter_op, - void *arg); +int i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len); +void i40e_fdir_info_get(struct rte_eth_dev *dev, + struct rte_eth_fdir_info *fdir); +void i40e_fdir_stats_get(struct rte_eth_dev *dev, + struct rte_eth_fdir_stats *stat); int i40e_select_filter_input_set(struct i40e_hw *hw, struct rte_eth_input_set_conf *conf, enum rte_filter_type filter); void i40e_fdir_filter_restore(struct i40e_pf *pf); -int i40e_hash_filter_inset_select(struct i40e_hw *hw, - struct rte_eth_input_set_conf *conf); -int i40e_fdir_filter_inset_select(struct i40e_pf *pf, - struct rte_eth_input_set_conf *conf); +int i40e_set_hash_inset(struct i40e_hw *hw, uint64_t input_set, + uint32_t pctype, bool add); int i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf, uint32_t opcode, uint32_t retval, uint8_t *msg, uint16_t msglen); @@ -964,13 +1415,17 @@ void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_rxq_info *qinfo); void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_txq_info *qinfo); +int i40e_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_burst_mode *mode); +int i40e_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_burst_mode *mode); struct i40e_ethertype_filter * i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule, const struct i40e_ethertype_filter_input *input); int i40e_sw_ethertype_filter_del(struct i40e_pf *pf, struct i40e_ethertype_filter_input *input); int i40e_sw_fdir_filter_del(struct i40e_pf *pf, - struct rte_eth_fdir_input *input); + struct i40e_fdir_input *input); struct i40e_tunnel_filter * i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule, const struct i40e_tunnel_filter_input *input); @@ -980,9 +1435,13 @@ uint64_t i40e_get_default_input_set(uint16_t pctype); int i40e_ethertype_filter_set(struct i40e_pf *pf, struct rte_eth_ethertype_filter *filter, bool add); -int i40e_add_del_fdir_filter(struct rte_eth_dev *dev, - const struct rte_eth_fdir_filter *filter, - bool add); +struct rte_flow * +i40e_fdir_entry_pool_get(struct i40e_fdir_info *fdir_info); +void i40e_fdir_entry_pool_put(struct i40e_fdir_info *fdir_info, + struct rte_flow *flow); +int i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev, + const struct i40e_fdir_filter_conf *filter, + bool add); int i40e_dev_tunnel_filter_set(struct i40e_pf *pf, struct rte_eth_tunnel_filter_conf *tunnel_filter, uint8_t add); @@ -992,7 +1451,7 @@ int i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, int i40e_fdir_flush(struct rte_eth_dev *dev); int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi, struct i40e_macvlan_filter *mv_f, - int num, struct ether_addr *addr); + int num, struct rte_ether_addr *addr); int i40e_remove_macvlan_filters(struct i40e_vsi *vsi, struct i40e_macvlan_filter *filter, int total); @@ -1000,18 +1459,42 @@ void i40e_set_vlan_filter(struct i40e_vsi *vsi, uint16_t vlan_id, bool on); int i40e_add_macvlan_filters(struct i40e_vsi *vsi, struct i40e_macvlan_filter *filter, int total); +bool is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv); bool is_i40e_supported(struct rte_eth_dev *dev); - +bool is_i40evf_supported(struct rte_eth_dev *dev); +void i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, + uint8_t enable); int i40e_validate_input_set(enum i40e_filter_pctype pctype, enum rte_filter_type filter, uint64_t inset); -int i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, - uint8_t nb_elem); +int i40e_generate_inset_mask_reg(struct i40e_hw *hw, uint64_t inset, + uint32_t *mask, uint8_t nb_elem); uint64_t i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input); void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val); +void i40e_check_write_global_reg(struct i40e_hw *hw, + uint32_t addr, uint32_t val); int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops); void i40e_tm_conf_init(struct rte_eth_dev *dev); void i40e_tm_conf_uninit(struct rte_eth_dev *dev); +struct i40e_customized_pctype* +i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index); +void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg, + uint32_t pkg_size, + enum rte_pmd_i40e_package_op op); +int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb); +int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev, + struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on); +void i40e_init_queue_region_conf(struct rte_eth_dev *dev); +void i40e_flex_payload_reg_set_default(struct i40e_hw *hw); +void i40e_pf_disable_rss(struct i40e_pf *pf); +int i40e_pf_calc_configured_queues_num(struct i40e_pf *pf); +int i40e_pf_reset_rss_reta(struct i40e_pf *pf); +int i40e_pf_reset_rss_key(struct i40e_pf *pf); +int i40e_pf_config_rss(struct i40e_pf *pf); +int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len); +int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size); +int i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params); +int i40e_vf_representor_uninit(struct rte_eth_dev *ethdev); #define I40E_DEV_TO_PCI(eth_dev) \ RTE_DEV_TO_PCI((eth_dev)->device) @@ -1058,7 +1541,7 @@ i40e_get_vsi_from_adapter(struct i40e_adapter *adapter) #define I40E_VSI_TO_DEV_DATA(vsi) \ (((struct i40e_vsi *)vsi)->adapter->pf.dev_data) #define I40E_VSI_TO_ETH_DEV(vsi) \ - (((struct i40e_vsi *)vsi)->adapter->eth_dev) + (&rte_eth_devices[((struct i40e_vsi *)vsi)->adapter->pf.dev_data->port_id]) /* I40E_PF_TO */ #define I40E_PF_TO_HW(pf) \ @@ -1088,10 +1571,18 @@ i40e_align_floor(int n) } static inline uint16_t -i40e_calc_itr_interval(int16_t interval) +i40e_calc_itr_interval(bool is_pf, bool is_multi_drv) { - if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) - interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; + uint16_t interval = 0; + + if (is_multi_drv) { + interval = I40E_QUEUE_ITR_INTERVAL_MAX; + } else { + if (is_pf) + interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; + else + interval = I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT; + } /* Convert to hardware count, as writing each 1 represents 2 us */ return interval / 2; @@ -1154,6 +1645,8 @@ i40e_calc_itr_interval(int16_t interval) (((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_KR) || \ ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_CR) || \ ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_SR) || \ - ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_LR)) + ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_LR) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_AOC) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_ACC)) #endif /* _I40E_ETHDEV_H_ */