X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcommon%2Fiavf%2Fvirtchnl.h;h=b931da61e5bac666b77ceeac801fbb067f01794f;hb=5b38d8cd4663;hp=14fd96228802790b0221ac5f36368c07835a89bd;hpb=72390691c80c1ed20ef47cf1222d71d9f19347fa;p=dpdk.git diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h index 14fd962288..b931da61e5 100644 --- a/drivers/common/iavf/virtchnl.h +++ b/drivers/common/iavf/virtchnl.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2001-2020 + * Copyright(c) 2001-2020 Intel Corporation */ #ifndef _VIRTCHNL_H_ @@ -129,6 +129,22 @@ enum virtchnl_ops { VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, /* opcodes 34, 35, 36, 37 and 38 are reserved */ + VIRTCHNL_OP_DCF_CMD_DESC = 39, + VIRTCHNL_OP_DCF_CMD_BUFF = 40, + VIRTCHNL_OP_DCF_DISABLE = 41, + VIRTCHNL_OP_DCF_GET_VSI_MAP = 42, + VIRTCHNL_OP_DCF_GET_PKG_INFO = 43, + VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44, + VIRTCHNL_OP_ADD_RSS_CFG = 45, + VIRTCHNL_OP_DEL_RSS_CFG = 46, + VIRTCHNL_OP_ADD_FDIR_FILTER = 47, + VIRTCHNL_OP_DEL_FDIR_FILTER = 48, + VIRTCHNL_OP_QUERY_FDIR_FILTER = 49, + VIRTCHNL_OP_GET_MAX_RSS_QREGION = 50, + VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107, + VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108, + VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111, + VIRTCHNL_OP_MAX, }; /* These macros are used to generate compilation errors if a structure/union @@ -233,6 +249,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040 #define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080 + /* 0X00000100 is reserved */ +#define VIRTCHNL_VF_LARGE_NUM_QPAIRS 0x00000200 #define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 @@ -243,6 +261,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000 #define VIRTCHNL_VF_OFFLOAD_ADQ_V2 0X01000000 #define VIRTCHNL_VF_OFFLOAD_USO 0X02000000 +#define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC 0X04000000 +#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF 0X08000000 +#define VIRTCHNL_VF_OFFLOAD_FDIR_PF 0X10000000 + /* 0X20000000 is reserved */ +#define VIRTCHNL_VF_CAP_DCF 0X40000000 /* 0X80000000 is reserved */ /* Define below the capability flags that are not offloads */ @@ -306,7 +329,9 @@ struct virtchnl_rxq_info { u32 databuffer_size; u32 max_pkt_size; u8 crc_disable; - u8 pad1[3]; + /* only used when VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is supported */ + u8 rxdid; + u8 pad1[2]; u64 dma_ring_addr; enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */ u32 pad2; @@ -406,6 +431,35 @@ struct virtchnl_queue_select { VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); +/* VIRTCHNL_OP_GET_MAX_RSS_QREGION + * + * if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES + * then this op must be supported. + * + * VF sends this message in order to query the max RSS queue region + * size supported by PF, when VIRTCHNL_VF_LARGE_NUM_QPAIRS is enabled. + * This information should be used when configuring the RSS LUT and/or + * configuring queue region based filters. + * + * The maximum RSS queue region is 2^qregion_width. So, a qregion_width + * of 6 would inform the VF that the PF supports a maximum RSS queue region + * of 64. + * + * A queue region represents a range of queues that can be used to configure + * a RSS LUT. For example, if a VF is given 64 queues, but only a max queue + * region size of 16 (i.e. 2^qregion_width = 16) then it will only be able + * to configure the RSS LUT with queue indices from 0 to 15. However, other + * filters can be used to direct packets to queues >15 via specifying a queue + * base/offset and queue region width. + */ +struct virtchnl_max_rss_qregion { + u16 vport_id; + u16 qregion_width; + u8 pad[4]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_max_rss_qregion); + /* VIRTCHNL_OP_ADD_ETH_ADDR * VF sends this message in order to add one or more unicast or multicast * address filters for the specified VSI. @@ -418,9 +472,36 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); * PF removes the filters and returns status. */ +/* VIRTCHNL_ETHER_ADDR_LEGACY + * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad + * bytes. Moving forward all VF drivers should not set type to + * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy + * behavior. The control plane function (i.e. PF) can use a best effort method + * of tracking the primary/device unicast in this case, but there is no + * guarantee and functionality depends on the implementation of the PF. + */ + +/* VIRTCHNL_ETHER_ADDR_PRIMARY + * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the + * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and + * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane + * function (i.e. PF) to accurately track and use this MAC address for + * displaying on the host and for VM/function reset. + */ + +/* VIRTCHNL_ETHER_ADDR_EXTRA + * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra + * unicast and/or multicast filters that are being added/deleted via + * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively. + */ struct virtchnl_ether_addr { u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; - u8 pad[2]; + u8 type; +#define VIRTCHNL_ETHER_ADDR_LEGACY 0 +#define VIRTCHNL_ETHER_ADDR_PRIMARY 1 +#define VIRTCHNL_ETHER_ADDR_EXTRA 2 +#define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */ + u8 pad; }; VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); @@ -531,6 +612,14 @@ struct virtchnl_rss_hena { VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); +/* Type of RSS algorithm */ +enum virtchnl_rss_algorithm { + VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0, + VIRTCHNL_RSS_ALG_XOR_ASYMMETRIC = 1, + VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2, + VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3, +}; + /* This is used by PF driver to enforce how many channels can be supported. * When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise * PF driver will allow only max 4 channels @@ -571,8 +660,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info); */ struct virtchnl_l4_spec { - u8 src_mac[ETH_ALEN]; - u8 dst_mac[ETH_ALEN]; + u8 src_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; + u8 dst_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; /* vlan_prio is part of this 16 bit field even from OS perspective * vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio * in future, when decided to offload vlan_prio, pass that information @@ -599,6 +688,11 @@ enum virtchnl_action { /* action types */ VIRTCHNL_ACTION_DROP = 0, VIRTCHNL_ACTION_TC_REDIRECT, + VIRTCHNL_ACTION_PASSTHRU, + VIRTCHNL_ACTION_QUEUE, + VIRTCHNL_ACTION_Q_REGION, + VIRTCHNL_ACTION_MARK, + VIRTCHNL_ACTION_COUNT, }; enum virtchnl_flow_type { @@ -620,6 +714,52 @@ struct virtchnl_filter { VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); +/* VIRTCHNL_OP_DCF_GET_VSI_MAP + * VF sends this message to get VSI mapping table. + * PF responds with an indirect message containing VF's + * HW VSI IDs. + * The index of vf_vsi array is the logical VF ID, the + * value of vf_vsi array is the VF's HW VSI ID with its + * valid configuration. + */ +struct virtchnl_dcf_vsi_map { + u16 pf_vsi; /* PF's HW VSI ID */ + u16 num_vfs; /* The actual number of VFs allocated */ +#define VIRTCHNL_DCF_VF_VSI_ID_S 0 +#define VIRTCHNL_DCF_VF_VSI_ID_M (0xFFF << VIRTCHNL_DCF_VF_VSI_ID_S) +#define VIRTCHNL_DCF_VF_VSI_VALID BIT(15) + u16 vf_vsi[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_dcf_vsi_map); + +#define PKG_NAME_SIZE 32 +#define DSN_SIZE 8 + +struct pkg_version { + u8 major; + u8 minor; + u8 update; + u8 draft; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, pkg_version); + +struct virtchnl_pkg_info { + struct pkg_version pkg_ver; + u32 track_id; + char pkg_name[PKG_NAME_SIZE]; + u8 dsn[DSN_SIZE]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_pkg_info); + +struct virtchnl_supported_rxdids { + u64 supported_rxdids; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_supported_rxdids); + /* VIRTCHNL_OP_EVENT * PF sends this message to inform the VF driver of events that may affect it. * No direct response is expected from the VF, though it may generate other @@ -630,6 +770,7 @@ enum virtchnl_event_codes { VIRTCHNL_EVENT_LINK_CHANGE, VIRTCHNL_EVENT_RESET_IMPENDING, VIRTCHNL_EVENT_PF_DRIVER_CLOSE, + VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE, }; #define PF_EVENT_SEVERITY_INFO 0 @@ -657,6 +798,10 @@ struct virtchnl_pf_event { u32 link_speed; u8 link_status; } link_event_adv; + struct { + u16 vf_id; + u16 vsi_id; + } vf_vsi_map; } event_data; int severity; @@ -665,6 +810,449 @@ struct virtchnl_pf_event { VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); +/* VF reset states - these are written into the RSTAT register: + * VFGEN_RSTAT on the VF + * When the PF initiates a reset, it writes 0 + * When the reset is complete, it writes 1 + * When the PF detects that the VF has recovered, it writes 2 + * VF checks this register periodically to determine if a reset has occurred, + * then polls it to know when the reset is complete. + * If either the PF or VF reads the register while the hardware + * is in a reset state, it will return DEADBEEF, which, when masked + * will result in 3. + */ +enum virtchnl_vfr_states { + VIRTCHNL_VFR_INPROGRESS = 0, + VIRTCHNL_VFR_COMPLETED, + VIRTCHNL_VFR_VFACTIVE, +}; + +#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32 +#define PROTO_HDR_SHIFT 5 +#define PROTO_HDR_FIELD_START(proto_hdr_type) \ + (proto_hdr_type << PROTO_HDR_SHIFT) +#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1) + +/* VF use these macros to configure each protocol header. + * Specify which protocol headers and protocol header fields base on + * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field. + * @param hdr: a struct of virtchnl_proto_hdr + * @param hdr_type: ETH/IPV4/TCP, etc + * @param field: SRC/DST/TEID/SPI, etc + */ +#define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \ + ((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK)) +#define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \ + ((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK)) +#define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \ + ((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK)) +#define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector) + +#define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \ + (VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \ + VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field)) +#define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \ + (VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \ + VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field)) + +#define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \ + ((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type) +#define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \ + (((hdr)->type) >> PROTO_HDR_SHIFT) +#define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \ + ((hdr)->type == ((val) >> PROTO_HDR_SHIFT)) +#define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \ + (VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) && \ + VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val)) + +/* Protocol header type within a packet segment. A segment consists of one or + * more protocol headers that make up a logical group of protocol headers. Each + * logical group of protocol headers encapsulates or is encapsulated using/by + * tunneling or encapsulation protocols for network virtualization. + */ +enum virtchnl_proto_hdr_type { + VIRTCHNL_PROTO_HDR_NONE, + VIRTCHNL_PROTO_HDR_ETH, + VIRTCHNL_PROTO_HDR_S_VLAN, + VIRTCHNL_PROTO_HDR_C_VLAN, + VIRTCHNL_PROTO_HDR_IPV4, + VIRTCHNL_PROTO_HDR_IPV6, + VIRTCHNL_PROTO_HDR_TCP, + VIRTCHNL_PROTO_HDR_UDP, + VIRTCHNL_PROTO_HDR_SCTP, + VIRTCHNL_PROTO_HDR_GTPU_IP, + VIRTCHNL_PROTO_HDR_GTPU_EH, + VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN, + VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP, + VIRTCHNL_PROTO_HDR_PPPOE, + VIRTCHNL_PROTO_HDR_L2TPV3, + VIRTCHNL_PROTO_HDR_ESP, + VIRTCHNL_PROTO_HDR_AH, + VIRTCHNL_PROTO_HDR_PFCP, + VIRTCHNL_PROTO_HDR_GTPC, +}; + +/* Protocol header field within a protocol header. */ +enum virtchnl_proto_hdr_field { + /* ETHER */ + VIRTCHNL_PROTO_HDR_ETH_SRC = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH), + VIRTCHNL_PROTO_HDR_ETH_DST, + VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, + /* S-VLAN */ + VIRTCHNL_PROTO_HDR_S_VLAN_ID = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN), + /* C-VLAN */ + VIRTCHNL_PROTO_HDR_C_VLAN_ID = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN), + /* IPV4 */ + VIRTCHNL_PROTO_HDR_IPV4_SRC = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4), + VIRTCHNL_PROTO_HDR_IPV4_DST, + VIRTCHNL_PROTO_HDR_IPV4_DSCP, + VIRTCHNL_PROTO_HDR_IPV4_TTL, + VIRTCHNL_PROTO_HDR_IPV4_PROT, + /* IPV6 */ + VIRTCHNL_PROTO_HDR_IPV6_SRC = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6), + VIRTCHNL_PROTO_HDR_IPV6_DST, + VIRTCHNL_PROTO_HDR_IPV6_TC, + VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, + VIRTCHNL_PROTO_HDR_IPV6_PROT, + /* IPV6 Prefix */ + VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_SRC, + VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_DST, + VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_SRC, + VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_DST, + VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_SRC, + VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_DST, + VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_SRC, + VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_DST, + VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC, + VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST, + VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_SRC, + VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_DST, + /* TCP */ + VIRTCHNL_PROTO_HDR_TCP_SRC_PORT = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP), + VIRTCHNL_PROTO_HDR_TCP_DST_PORT, + /* UDP */ + VIRTCHNL_PROTO_HDR_UDP_SRC_PORT = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP), + VIRTCHNL_PROTO_HDR_UDP_DST_PORT, + /* SCTP */ + VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP), + VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, + /* GTPU_IP */ + VIRTCHNL_PROTO_HDR_GTPU_IP_TEID = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP), + /* GTPU_EH */ + VIRTCHNL_PROTO_HDR_GTPU_EH_PDU = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH), + VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, + /* PPPOE */ + VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE), + /* L2TPV3 */ + VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3), + /* ESP */ + VIRTCHNL_PROTO_HDR_ESP_SPI = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP), + /* AH */ + VIRTCHNL_PROTO_HDR_AH_SPI = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH), + /* PFCP */ + VIRTCHNL_PROTO_HDR_PFCP_S_FIELD = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP), + VIRTCHNL_PROTO_HDR_PFCP_SEID, + /* GTPC */ + VIRTCHNL_PROTO_HDR_GTPC_TEID = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPC), +}; + +struct virtchnl_proto_hdr { + enum virtchnl_proto_hdr_type type; + u32 field_selector; /* a bit mask to select field for header type */ + u8 buffer[64]; + /** + * binary buffer in network order for specific header type. + * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4 + * header is expected to be copied into the buffer. + */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr); + +struct virtchnl_proto_hdrs { + u8 tunnel_level; + /** + * specify where protocol header start from. + * 0 - from the outer layer + * 1 - from the first inner layer + * 2 - from the second inner layer + * .... + **/ + int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */ + struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs); + +struct virtchnl_rss_cfg { + struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */ + enum virtchnl_rss_algorithm rss_algorithm; /* rss algorithm type */ + u8 reserved[128]; /* reserve for future */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg); + +/* action configuration for FDIR */ +struct virtchnl_filter_action { + enum virtchnl_action type; + union { + /* used for queue and qgroup action */ + struct { + u16 index; + u8 region; + } queue; + /* used for count action */ + struct { + /* share counter ID with other flow rules */ + u8 shared; + u32 id; /* counter ID */ + } count; + /* used for mark action */ + u32 mark_id; + u8 reserve[32]; + } act_conf; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action); + +#define VIRTCHNL_MAX_NUM_ACTIONS 8 + +struct virtchnl_filter_action_set { + /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */ + int count; + struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set); + +/* pattern and action for FDIR rule */ +struct virtchnl_fdir_rule { + struct virtchnl_proto_hdrs proto_hdrs; + struct virtchnl_filter_action_set action_set; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule); + +/* query information to retrieve fdir rule counters. + * PF will fill out this structure to reset counter. + */ +struct virtchnl_fdir_query_info { + u32 match_packets_valid:1; + u32 match_bytes_valid:1; + u32 reserved:30; /* Reserved, must be zero. */ + u32 pad; + u64 matched_packets; /* Number of packets for this rule. */ + u64 matched_bytes; /* Number of bytes through this rule. */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_fdir_query_info); + +/* Status returned to VF after VF requests FDIR commands + * VIRTCHNL_FDIR_SUCCESS + * VF FDIR related request is successfully done by PF + * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER. + * + * VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE + * OP_ADD_FDIR_FILTER request is failed due to no Hardware resource. + * + * VIRTCHNL_FDIR_FAILURE_RULE_EXIST + * OP_ADD_FDIR_FILTER request is failed due to the rule is already existed. + * + * VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT + * OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule. + * + * VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST + * OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist. + * + * VIRTCHNL_FDIR_FAILURE_RULE_INVALID + * OP_ADD_FDIR_FILTER request is failed due to parameters validation + * or HW doesn't support. + * + * VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT + * OP_ADD/DEL_FDIR_FILTER request is failed due to timing out + * for programming. + * + * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID + * OP_QUERY_FDIR_FILTER request is failed due to parameters validation, + * for example, VF query counter of a rule who has no counter action. + */ +enum virtchnl_fdir_prgm_status { + VIRTCHNL_FDIR_SUCCESS = 0, + VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE, + VIRTCHNL_FDIR_FAILURE_RULE_EXIST, + VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT, + VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST, + VIRTCHNL_FDIR_FAILURE_RULE_INVALID, + VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT, + VIRTCHNL_FDIR_FAILURE_QUERY_INVALID, +}; + +/* VIRTCHNL_OP_ADD_FDIR_FILTER + * VF sends this request to PF by filling out vsi_id, + * validate_only and rule_cfg. PF will return flow_id + * if the request is successfully done and return add_status to VF. + */ +struct virtchnl_fdir_add { + u16 vsi_id; /* INPUT */ + /* + * 1 for validating a fdir rule, 0 for creating a fdir rule. + * Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER. + */ + u16 validate_only; /* INPUT */ + u32 flow_id; /* OUTPUT */ + struct virtchnl_fdir_rule rule_cfg; /* INPUT */ + enum virtchnl_fdir_prgm_status status; /* OUTPUT */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add); + +/* VIRTCHNL_OP_DEL_FDIR_FILTER + * VF sends this request to PF by filling out vsi_id + * and flow_id. PF will return del_status to VF. + */ +struct virtchnl_fdir_del { + u16 vsi_id; /* INPUT */ + u16 pad; + u32 flow_id; /* INPUT */ + enum virtchnl_fdir_prgm_status status; /* OUTPUT */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); + +/* VIRTCHNL_OP_QUERY_FDIR_FILTER + * VF sends this request to PF by filling out vsi_id, + * flow_id and reset_counter. PF will return query_info + * and query_status to VF. + */ +struct virtchnl_fdir_query { + u16 vsi_id; /* INPUT */ + u16 pad1[3]; + u32 flow_id; /* INPUT */ + u32 reset_counter:1; /* INPUT */ + struct virtchnl_fdir_query_info query_info; /* OUTPUT */ + enum virtchnl_fdir_prgm_status status; /* OUTPUT */ + u32 pad2; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_fdir_query); + +/* TX and RX queue types are valid in legacy as well as split queue models. + * With Split Queue model, 2 additional types are introduced - TX_COMPLETION + * and RX_BUFFER. In split queue model, RX corresponds to the queue where HW + * posts completions. + */ +enum virtchnl_queue_type { + VIRTCHNL_QUEUE_TYPE_TX = 0, + VIRTCHNL_QUEUE_TYPE_RX = 1, + VIRTCHNL_QUEUE_TYPE_TX_COMPLETION = 2, + VIRTCHNL_QUEUE_TYPE_RX_BUFFER = 3, + VIRTCHNL_QUEUE_TYPE_CONFIG_TX = 4, + VIRTCHNL_QUEUE_TYPE_CONFIG_RX = 5 +}; + + +/* structure to specify a chunk of contiguous queues */ +struct virtchnl_queue_chunk { + enum virtchnl_queue_type type; + u16 start_queue_id; + u16 num_queues; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk); + +/* structure to specify several chunks of contiguous queues */ +struct virtchnl_queue_chunks { + u16 num_chunks; + u16 rsvd; + struct virtchnl_queue_chunk chunks[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_chunks); + + +/* VIRTCHNL_OP_ENABLE_QUEUES_V2 + * VIRTCHNL_OP_DISABLE_QUEUES_V2 + * VIRTCHNL_OP_DEL_QUEUES + * + * If VIRTCHNL_CAP_EXT_FEATURES was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES + * then all of these ops are available. + * + * If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES + * then VIRTCHNL_OP_ENABLE_QUEUES_V2 and VIRTCHNL_OP_DISABLE_QUEUES_V2 are + * available. + * + * PF sends these messages to enable, disable or delete queues specified in + * chunks. PF sends virtchnl_del_ena_dis_queues struct to specify the queues + * to be enabled/disabled/deleted. Also applicable to single queue RX or + * TX. CP performs requested action and returns status. + */ +struct virtchnl_del_ena_dis_queues { + u16 vport_id; + u16 pad; + struct virtchnl_queue_chunks chunks; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_del_ena_dis_queues); + +/* Virtchannel interrupt throttling rate index */ +enum virtchnl_itr_idx { + VIRTCHNL_ITR_IDX_0 = 0, + VIRTCHNL_ITR_IDX_1 = 1, + VIRTCHNL_ITR_IDX_NO_ITR = 3, +}; + +/* Queue to vector mapping */ +struct virtchnl_queue_vector { + u16 queue_id; + u16 vector_id; + u8 pad[4]; + enum virtchnl_itr_idx itr_idx; + enum virtchnl_queue_type queue_type; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queue_vector); + +/* VIRTCHNL_OP_MAP_QUEUE_VECTOR + * VIRTCHNL_OP_UNMAP_QUEUE_VECTOR + * + * If VIRTCHNL_CAP_EXT_FEATURES was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES + * then all of these ops are available. + * + * If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES + * then only VIRTCHNL_OP_MAP_QUEUE_VECTOR is available. + * + * PF sends this message to map or unmap queues to vectors and ITR index + * registers. External data buffer contains virtchnl_queue_vector_maps structure + * that contains num_qv_maps of virtchnl_queue_vector structures. + * CP maps the requested queue vector maps after validating the queue and vector + * ids and returns a status code. + */ +struct virtchnl_queue_vector_maps { + u16 vport_id; + u16 num_qv_maps; + u8 pad[4]; + struct virtchnl_queue_vector qv_maps[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps); + + /* Since VF messages are limited by u16 size, precalculate the maximum possible * values of nested elements in virtchnl structures that virtual channel can * possibly handle in a single message. @@ -690,23 +1278,14 @@ enum virtchnl_vector_limits { VIRTCHNL_OP_ENABLE_CHANNELS_MAX = ((u16)(~0) - sizeof(struct virtchnl_tc_info)) / sizeof(struct virtchnl_channel_info), -}; -/* VF reset states - these are written into the RSTAT register: - * VFGEN_RSTAT on the VF - * When the PF initiates a reset, it writes 0 - * When the reset is complete, it writes 1 - * When the PF detects that the VF has recovered, it writes 2 - * VF checks this register periodically to determine if a reset has occurred, - * then polls it to know when the reset is complete. - * If either the PF or VF reads the register while the hardware - * is in a reset state, it will return DEADBEEF, which, when masked - * will result in 3. - */ -enum virtchnl_vfr_states { - VIRTCHNL_VFR_INPROGRESS = 0, - VIRTCHNL_VFR_COMPLETED, - VIRTCHNL_VFR_VFACTIVE, + VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX = + ((u16)(~0) - sizeof(struct virtchnl_del_ena_dis_queues)) / + sizeof(struct virtchnl_queue_chunk), + + VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX = + ((u16)(~0) - sizeof(struct virtchnl_queue_vector_maps)) / + sizeof(struct virtchnl_queue_vector), }; /** @@ -723,7 +1302,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, u8 *msg, u16 msglen) { bool err_msg_format = false; - int valid_len = 0; + u32 valid_len = 0; /* Validate message length. */ switch (v_opcode) { @@ -779,6 +1358,8 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, case VIRTCHNL_OP_DISABLE_QUEUES: valid_len = sizeof(struct virtchnl_queue_select); break; + case VIRTCHNL_OP_GET_MAX_RSS_QREGION: + break; case VIRTCHNL_OP_ADD_ETH_ADDR: case VIRTCHNL_OP_DEL_ETH_ADDR: valid_len = sizeof(struct virtchnl_ether_addr_list); @@ -879,6 +1460,61 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, case VIRTCHNL_OP_DEL_CLOUD_FILTER: valid_len = sizeof(struct virtchnl_filter); break; + case VIRTCHNL_OP_DCF_CMD_DESC: + case VIRTCHNL_OP_DCF_CMD_BUFF: + /* These two opcodes are specific to handle the AdminQ command, + * so the validation needs to be done in PF's context. + */ + valid_len = msglen; + break; + case VIRTCHNL_OP_DCF_DISABLE: + case VIRTCHNL_OP_DCF_GET_VSI_MAP: + case VIRTCHNL_OP_DCF_GET_PKG_INFO: + break; + case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: + break; + case VIRTCHNL_OP_ADD_RSS_CFG: + case VIRTCHNL_OP_DEL_RSS_CFG: + valid_len = sizeof(struct virtchnl_rss_cfg); + break; + case VIRTCHNL_OP_ADD_FDIR_FILTER: + valid_len = sizeof(struct virtchnl_fdir_add); + break; + case VIRTCHNL_OP_DEL_FDIR_FILTER: + valid_len = sizeof(struct virtchnl_fdir_del); + break; + case VIRTCHNL_OP_QUERY_FDIR_FILTER: + valid_len = sizeof(struct virtchnl_fdir_query); + break; + case VIRTCHNL_OP_ENABLE_QUEUES_V2: + case VIRTCHNL_OP_DISABLE_QUEUES_V2: + valid_len = sizeof(struct virtchnl_del_ena_dis_queues); + if (msglen >= valid_len) { + struct virtchnl_del_ena_dis_queues *qs = + (struct virtchnl_del_ena_dis_queues *)msg; + if (qs->chunks.num_chunks == 0 || + qs->chunks.num_chunks > VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX) { + err_msg_format = true; + break; + } + valid_len += (qs->chunks.num_chunks - 1) * + sizeof(struct virtchnl_queue_chunk); + } + break; + case VIRTCHNL_OP_MAP_QUEUE_VECTOR: + valid_len = sizeof(struct virtchnl_queue_vector_maps); + if (msglen >= valid_len) { + struct virtchnl_queue_vector_maps *v_qp = + (struct virtchnl_queue_vector_maps *)msg; + if (v_qp->num_qv_maps == 0 || + v_qp->num_qv_maps > VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX) { + err_msg_format = true; + break; + } + valid_len += (v_qp->num_qv_maps - 1) * + sizeof(struct virtchnl_queue_vector); + } + break; /* These are always errors coming from the VF. */ case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_UNKNOWN: