/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright(c) 2019-2020 Xilinx, Inc.
+ * Copyright(c) 2019-2021 Xilinx, Inc.
* Copyright(c) 2006-2019 Solarflare Communications Inc.
*/
EFX_FAMILY_NTYPES
} efx_family_t;
+typedef enum efx_bar_type_e {
+ EFX_BAR_TYPE_MEM,
+ EFX_BAR_TYPE_IO
+} efx_bar_type_t;
+
+typedef struct efx_bar_region_s {
+ efx_bar_type_t ebr_type;
+ int ebr_index;
+ efsys_dma_addr_t ebr_offset;
+ efsys_dma_addr_t ebr_length;
+} efx_bar_region_t;
+
+/* The function is deprecated. It is used only if Riverhead is not supported. */
LIBEFX_API
extern __checkReturn efx_rc_t
efx_family(
__out efx_family_t *efp,
__out unsigned int *membarp);
+#if EFSYS_OPT_PCI
+
+typedef struct efx_pci_ops_s {
+ /*
+ * Function for reading PCIe configuration space.
+ *
+ * espcp System-specific PCIe device handle;
+ * offset Offset inside PCIe configuration space to start reading
+ * from;
+ * edp EFX DWORD structure that should be populated by function
+ * in little-endian order;
+ *
+ * Returns status code, 0 on success, any other value on error.
+ */
+ efx_rc_t (*epo_config_readd)(efsys_pci_config_t *espcp,
+ uint32_t offset, efx_dword_t *edp);
+ /*
+ * Function for finding PCIe memory bar handle by its index from a PCIe
+ * device handle. The found memory bar is available in read-only mode.
+ *
+ * configp System-specific PCIe device handle;
+ * index Memory bar index;
+ * memp Pointer to the found memory bar handle;
+ *
+ * Returns status code, 0 on success, any other value on error.
+ */
+ efx_rc_t (*epo_find_mem_bar)(efsys_pci_config_t *configp,
+ int index, efsys_bar_t *memp);
+} efx_pci_ops_t;
+
+/* Determine EFX family and perform lookup of the function control window
+ *
+ * The function requires PCI config handle from which all memory bars can
+ * be accessed.
+ * A user of the API must be aware of memory bars indexes (not available
+ * on Windows).
+ */
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_family_probe_bar(
+ __in uint16_t venid,
+ __in uint16_t devid,
+ __in efsys_pci_config_t *espcp,
+ __in const efx_pci_ops_t *epop,
+ __out efx_family_t *efp,
+ __out efx_bar_region_t *ebrp);
+
+#endif /* EFSYS_OPT_PCI */
+
#define EFX_PCI_VENID_SFC 0x1924
#define EFX_PCI_VENID_XILINX 0x10EE
__in efx_family_t family,
__in efsys_identifier_t *esip,
__in efsys_bar_t *esbp,
+ __in uint32_t fcw_offset,
__in efsys_lock_t *eslp,
__deref_out efx_nic_t **enpp);
efx_phy_verify(
__in efx_nic_t *enp);
-#if EFSYS_OPT_PHY_LED_CONTROL
-
typedef enum efx_phy_led_mode_e {
EFX_PHY_LED_DEFAULT = 0,
EFX_PHY_LED_OFF,
EFX_PHY_LED_NMODES
} efx_phy_led_mode_t;
+#if EFSYS_OPT_PHY_LED_CONTROL
+
LIBEFX_API
extern __checkReturn efx_rc_t
efx_phy_led_set(
uint32_t enc_evq_timer_quantum_ns;
uint32_t enc_evq_timer_max_us;
uint32_t enc_clk_mult;
+ uint32_t enc_ev_ew_desc_size;
uint32_t enc_ev_desc_size;
uint32_t enc_rx_desc_size;
uint32_t enc_tx_desc_size;
+ /* Maximum Rx prefix size if many Rx prefixes are supported */
uint32_t enc_rx_prefix_size;
uint32_t enc_rx_buf_align_start;
uint32_t enc_rx_buf_align_end;
/* Datapath firmware vport reconfigure support */
boolean_t enc_vport_reconfigure_supported;
boolean_t enc_rx_disable_scatter_supported;
+ /* Maximum number of Rx scatter segments supported by HW */
+ uint32_t enc_rx_scatter_max;
boolean_t enc_allow_set_mac_with_installed_filters;
boolean_t enc_enhanced_set_mac_supported;
boolean_t enc_init_evq_v2_supported;
+ boolean_t enc_init_evq_extended_width_supported;
boolean_t enc_no_cont_ev_mode_supported;
boolean_t enc_init_rxq_with_buffer_size;
boolean_t enc_rx_packed_stream_supported;
uint32_t enc_mac_stats_nstats;
boolean_t enc_fec_counters;
boolean_t enc_hlb_counters;
+ /* NIC support for Match-Action Engine (MAE). */
+ boolean_t enc_mae_supported;
/* Firmware support for "FLAG" and "MARK" filter actions */
boolean_t enc_filter_action_flag_supported;
boolean_t enc_filter_action_mark_supported;
uint32_t enc_assigned_port;
} efx_nic_cfg_t;
+#define EFX_PCI_VF_INVALID 0xffff
+
#define EFX_VPORT_PCI_FUNCTION_IS_PF(configp) \
- ((configp)->evc_function == 0xffff)
+ ((configp)->evc_function == EFX_PCI_VF_INVALID)
-#define EFX_PCI_FUNCTION_IS_PF(_encp) ((_encp)->enc_vf == 0xffff)
-#define EFX_PCI_FUNCTION_IS_VF(_encp) ((_encp)->enc_vf != 0xffff)
+#define EFX_PCI_FUNCTION_IS_PF(_encp) ((_encp)->enc_vf == EFX_PCI_VF_INVALID)
+#define EFX_PCI_FUNCTION_IS_VF(_encp) ((_encp)->enc_vf != EFX_PCI_VF_INVALID)
#define EFX_PCI_FUNCTION(_encp) \
(EFX_PCI_FUNCTION_IS_PF(_encp) ? (_encp)->enc_pf : (_encp)->enc_vf)
__in efx_nic_t *enp,
__out efx_nic_fw_info_t *enfip);
+#define EFX_NIC_BOARD_INFO_SERIAL_LEN (64)
+#define EFX_NIC_BOARD_INFO_NAME_LEN (16)
+
+typedef struct efx_nic_board_info_s {
+ /* The following two fields are NUL-terminated ASCII strings. */
+ char enbi_serial[EFX_NIC_BOARD_INFO_SERIAL_LEN];
+ char enbi_name[EFX_NIC_BOARD_INFO_NAME_LEN];
+ uint32_t enbi_revision;
+} efx_nic_board_info_t;
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_nic_get_board_info(
+ __in efx_nic_t *enp,
+ __out efx_nic_board_info_t *board_infop);
+
/* Driver resource limits (minimum required/maximum usable). */
typedef struct efx_drv_limits_s {
uint32_t edl_min_evq_count;
extern __checkReturn size_t
efx_evq_size(
__in const efx_nic_t *enp,
- __in unsigned int ndescs);
+ __in unsigned int ndescs,
+ __in uint32_t flags);
LIBEFX_API
extern __checkReturn unsigned int
efx_evq_nbufs(
__in const efx_nic_t *enp,
- __in unsigned int ndescs);
+ __in unsigned int ndescs,
+ __in uint32_t flags);
#define EFX_EVQ_FLAGS_TYPE_MASK (0x3)
#define EFX_EVQ_FLAGS_TYPE_AUTO (0x0)
*/
#define EFX_EVQ_FLAGS_NO_CONT_EV (0x10)
+/* Configure EVQ for extended width events (EF100 only) */
+#define EFX_EVQ_FLAGS_EXTENDED_WIDTH (0x20)
+
+
LIBEFX_API
extern __checkReturn efx_rc_t
efx_ev_qcreate(
#endif /* EFSYS_OPT_MAC_STATS */
+#if EFSYS_OPT_DESC_PROXY
+
+/*
+ * NOTE: This callback returns the raw descriptor data, which has not been
+ * converted to host endian. The callback must use the EFX_OWORD macros
+ * to extract the descriptor fields as host endian values.
+ */
+typedef __checkReturn boolean_t
+(*efx_desc_proxy_txq_desc_ev_t)(
+ __in_opt void *arg,
+ __in uint16_t vi_id,
+ __in efx_oword_t txq_desc);
+
+/*
+ * NOTE: This callback returns the raw descriptor data, which has not been
+ * converted to host endian. The callback must use the EFX_OWORD macros
+ * to extract the descriptor fields as host endian values.
+ */
+typedef __checkReturn boolean_t
+(*efx_desc_proxy_virtq_desc_ev_t)(
+ __in_opt void *arg,
+ __in uint16_t vi_id,
+ __in uint16_t avail,
+ __in efx_oword_t virtq_desc);
+
+#endif /* EFSYS_OPT_DESC_PROXY */
+
typedef struct efx_ev_callbacks_s {
efx_initialized_ev_t eec_initialized;
efx_rx_ev_t eec_rx;
#if EFSYS_OPT_MAC_STATS
efx_mac_stats_ev_t eec_mac_stats;
#endif /* EFSYS_OPT_MAC_STATS */
+#if EFSYS_OPT_DESC_PROXY
+ efx_desc_proxy_txq_desc_ev_t eec_desc_proxy_txq_desc;
+ efx_desc_proxy_virtq_desc_ev_t eec_desc_proxy_virtq_desc;
+#endif /* EFSYS_OPT_DESC_PROXY */
+
} efx_ev_callbacks_t;
LIBEFX_API
#define EFX_RXQ_LIMIT(_ndescs) ((_ndescs) - 16)
+/*
+ * libefx representation of the Rx prefix layout information.
+ *
+ * The information may be used inside libefx to implement Rx prefix fields
+ * accessors and by drivers which process Rx prefix itself.
+ */
+
+/*
+ * All known Rx prefix fields.
+ *
+ * An Rx prefix may have a subset of these fields.
+ */
+typedef enum efx_rx_prefix_field_e {
+ EFX_RX_PREFIX_FIELD_LENGTH = 0,
+ EFX_RX_PREFIX_FIELD_ORIG_LENGTH,
+ EFX_RX_PREFIX_FIELD_CLASS,
+ EFX_RX_PREFIX_FIELD_RSS_HASH,
+ EFX_RX_PREFIX_FIELD_RSS_HASH_VALID,
+ EFX_RX_PREFIX_FIELD_PARTIAL_TSTAMP,
+ EFX_RX_PREFIX_FIELD_VLAN_STRIP_TCI,
+ EFX_RX_PREFIX_FIELD_INNER_VLAN_STRIP_TCI,
+ EFX_RX_PREFIX_FIELD_USER_FLAG,
+ EFX_RX_PREFIX_FIELD_USER_MARK,
+ EFX_RX_PREFIX_FIELD_USER_MARK_VALID,
+ EFX_RX_PREFIX_FIELD_CSUM_FRAME,
+ EFX_RX_PREFIX_FIELD_INGRESS_VPORT,
+ EFX_RX_PREFIX_NFIELDS
+} efx_rx_prefix_field_t;
+
+/*
+ * Location and endianness of a field in Rx prefix.
+ *
+ * If width is zero, the field is not present.
+ */
+typedef struct efx_rx_prefix_field_info_s {
+ uint16_t erpfi_offset_bits;
+ uint8_t erpfi_width_bits;
+ boolean_t erpfi_big_endian;
+} efx_rx_prefix_field_info_t;
+
+/* Helper macro to define Rx prefix fields */
+#define EFX_RX_PREFIX_FIELD(_efx, _field, _big_endian) \
+ [EFX_RX_PREFIX_FIELD_ ## _efx] = { \
+ .erpfi_offset_bits = EFX_LOW_BIT(_field), \
+ .erpfi_width_bits = EFX_WIDTH(_field), \
+ .erpfi_big_endian = (_big_endian), \
+ }
+
+typedef struct efx_rx_prefix_layout_s {
+ uint32_t erpl_id;
+ uint8_t erpl_length;
+ efx_rx_prefix_field_info_t erpl_fields[EFX_RX_PREFIX_NFIELDS];
+} efx_rx_prefix_layout_t;
+
+/*
+ * Helper function to find out a bit mask of wanted but not available
+ * Rx prefix fields.
+ *
+ * A field is considered as not available if any parameter mismatch.
+ */
+LIBEFX_API
+extern __checkReturn uint32_t
+efx_rx_prefix_layout_check(
+ __in const efx_rx_prefix_layout_t *available,
+ __in const efx_rx_prefix_layout_t *wanted);
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_rx_prefix_get_layout(
+ __in const efx_rxq_t *erp,
+ __out efx_rx_prefix_layout_t *erplp);
+
typedef enum efx_rxq_type_e {
EFX_RXQ_TYPE_DEFAULT,
EFX_RXQ_TYPE_PACKED_STREAM,
* Rx checksum offload results.
*/
#define EFX_RXQ_FLAG_INNER_CLASSES 0x2
+/*
+ * Request delivery of the RSS hash calculated by HW to be used by
+ * the driver.
+ */
+#define EFX_RXQ_FLAG_RSS_HASH 0x4
LIBEFX_API
extern __checkReturn efx_rc_t
__in uint16_t port /* host/cpu-endian */,
__in efx_tunnel_protocol_t protocol);
+/*
+ * Returns EBUSY if reconfiguration of the port is in progress in other thread.
+ */
LIBEFX_API
extern __checkReturn efx_rc_t
efx_tunnel_config_udp_remove(
__in uint16_t port /* host/cpu-endian */,
__in efx_tunnel_protocol_t protocol);
+/*
+ * Returns EBUSY if reconfiguration of any of the tunnel entries
+ * is in progress in other thread.
+ */
LIBEFX_API
-extern void
+extern __checkReturn efx_rc_t
efx_tunnel_config_clear(
__in efx_nic_t *enp);
#define EFX_VPORT_ID_INVALID 0
typedef struct efx_vport_config_s {
- /* Either VF index or 0xffff for PF */
+ /* Either VF index or EFX_PCI_VF_INVALID for PF */
uint16_t evc_function;
/* VLAN ID of the associated function */
uint16_t evc_vid;
#endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */
+#if EFSYS_OPT_MAE
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_init(
+ __in efx_nic_t *enp);
+
+LIBEFX_API
+extern void
+efx_mae_fini(
+ __in efx_nic_t *enp);
+
+typedef struct efx_mae_limits_s {
+ uint32_t eml_max_n_action_prios;
+ uint32_t eml_max_n_outer_prios;
+ uint32_t eml_encap_types_supported;
+ uint32_t eml_encap_header_size_limit;
+} efx_mae_limits_t;
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_get_limits(
+ __in efx_nic_t *enp,
+ __out efx_mae_limits_t *emlp);
+
+typedef enum efx_mae_rule_type_e {
+ EFX_MAE_RULE_ACTION = 0,
+ EFX_MAE_RULE_OUTER,
+
+ EFX_MAE_RULE_NTYPES
+} efx_mae_rule_type_t;
+
+typedef struct efx_mae_match_spec_s efx_mae_match_spec_t;
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_match_spec_init(
+ __in efx_nic_t *enp,
+ __in efx_mae_rule_type_t type,
+ __in uint32_t prio,
+ __out efx_mae_match_spec_t **specp);
+
+LIBEFX_API
+extern void
+efx_mae_match_spec_fini(
+ __in efx_nic_t *enp,
+ __in efx_mae_match_spec_t *spec);
+
+typedef enum efx_mae_field_id_e {
+ /*
+ * Fields which can be set by efx_mae_match_spec_field_set()
+ * or by using dedicated field-specific helper APIs.
+ */
+ EFX_MAE_FIELD_INGRESS_MPORT_SELECTOR = 0,
+ EFX_MAE_FIELD_ETHER_TYPE_BE,
+ EFX_MAE_FIELD_ETH_SADDR_BE,
+ EFX_MAE_FIELD_ETH_DADDR_BE,
+ EFX_MAE_FIELD_VLAN0_TCI_BE,
+ EFX_MAE_FIELD_VLAN0_PROTO_BE,
+ EFX_MAE_FIELD_VLAN1_TCI_BE,
+ EFX_MAE_FIELD_VLAN1_PROTO_BE,
+ EFX_MAE_FIELD_SRC_IP4_BE,
+ EFX_MAE_FIELD_DST_IP4_BE,
+ EFX_MAE_FIELD_IP_PROTO,
+ EFX_MAE_FIELD_IP_TOS,
+ EFX_MAE_FIELD_IP_TTL,
+ EFX_MAE_FIELD_SRC_IP6_BE,
+ EFX_MAE_FIELD_DST_IP6_BE,
+ EFX_MAE_FIELD_L4_SPORT_BE,
+ EFX_MAE_FIELD_L4_DPORT_BE,
+ EFX_MAE_FIELD_TCP_FLAGS_BE,
+ EFX_MAE_FIELD_ENC_ETHER_TYPE_BE,
+ EFX_MAE_FIELD_ENC_ETH_SADDR_BE,
+ EFX_MAE_FIELD_ENC_ETH_DADDR_BE,
+ EFX_MAE_FIELD_ENC_VLAN0_TCI_BE,
+ EFX_MAE_FIELD_ENC_VLAN0_PROTO_BE,
+ EFX_MAE_FIELD_ENC_VLAN1_TCI_BE,
+ EFX_MAE_FIELD_ENC_VLAN1_PROTO_BE,
+ EFX_MAE_FIELD_ENC_SRC_IP4_BE,
+ EFX_MAE_FIELD_ENC_DST_IP4_BE,
+ EFX_MAE_FIELD_ENC_IP_PROTO,
+ EFX_MAE_FIELD_ENC_IP_TOS,
+ EFX_MAE_FIELD_ENC_IP_TTL,
+ EFX_MAE_FIELD_ENC_SRC_IP6_BE,
+ EFX_MAE_FIELD_ENC_DST_IP6_BE,
+ EFX_MAE_FIELD_ENC_L4_SPORT_BE,
+ EFX_MAE_FIELD_ENC_L4_DPORT_BE,
+ EFX_MAE_FIELD_ENC_VNET_ID_BE,
+ EFX_MAE_FIELD_OUTER_RULE_ID,
+
+ /* Single bits which can be set by efx_mae_match_spec_bit_set(). */
+ EFX_MAE_FIELD_HAS_OVLAN,
+ EFX_MAE_FIELD_HAS_IVLAN,
+ EFX_MAE_FIELD_ENC_HAS_OVLAN,
+ EFX_MAE_FIELD_ENC_HAS_IVLAN,
+
+ EFX_MAE_FIELD_NIDS
+} efx_mae_field_id_t;
+
+/* MPORT selector. Used to refer to MPORTs in match/action rules. */
+typedef struct efx_mport_sel_s {
+ uint32_t sel;
+} efx_mport_sel_t;
+
+#define EFX_MPORT_NULL (0U)
+
+/*
+ * Get MPORT selector of a physical port.
+ *
+ * The resulting MPORT selector is opaque to the caller and can be
+ * passed as an argument to efx_mae_match_spec_mport_set()
+ * and efx_mae_action_set_populate_deliver().
+ */
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_mport_by_phy_port(
+ __in uint32_t phy_port,
+ __out efx_mport_sel_t *mportp);
+
+/*
+ * Get MPORT selector of a PCIe function.
+ *
+ * The resulting MPORT selector is opaque to the caller and can be
+ * passed as an argument to efx_mae_match_spec_mport_set()
+ * and efx_mae_action_set_populate_deliver().
+ */
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_mport_by_pcie_function(
+ __in uint32_t pf,
+ __in uint32_t vf,
+ __out efx_mport_sel_t *mportp);
+
+/*
+ * Fields which have BE postfix in their named constants are expected
+ * to be passed by callers in big-endian byte order. They will appear
+ * in the MCDI buffer, which is a part of the match specification, in
+ * the very same byte order, that is, no conversion will be performed.
+ *
+ * Fields which don't have BE postfix in their named constants are in
+ * host byte order. MCDI expects them to be little-endian, so the API
+ * will take care to carry out conversion to little-endian byte order.
+ * At the moment, the only field in host byte order is MPORT selector.
+ */
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_match_spec_field_set(
+ __in efx_mae_match_spec_t *spec,
+ __in efx_mae_field_id_t field_id,
+ __in size_t value_size,
+ __in_bcount(value_size) const uint8_t *value,
+ __in size_t mask_size,
+ __in_bcount(mask_size) const uint8_t *mask);
+
+/* The corresponding mask will be set to B_TRUE. */
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_match_spec_bit_set(
+ __in efx_mae_match_spec_t *spec,
+ __in efx_mae_field_id_t field_id,
+ __in boolean_t value);
+
+/* If the mask argument is NULL, the API will use full mask by default. */
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_match_spec_mport_set(
+ __in efx_mae_match_spec_t *spec,
+ __in const efx_mport_sel_t *valuep,
+ __in_opt const efx_mport_sel_t *maskp);
+
+LIBEFX_API
+extern __checkReturn boolean_t
+efx_mae_match_specs_equal(
+ __in const efx_mae_match_spec_t *left,
+ __in const efx_mae_match_spec_t *right);
+
+/*
+ * Make sure that match fields known by EFX have proper masks set
+ * in the match specification as per requirements of SF-122526-TC.
+ *
+ * In the case efx_mae_field_id_t lacks named identifiers for any
+ * fields which the FW maintains with support status MATCH_ALWAYS,
+ * the validation result may not be accurate.
+ */
+LIBEFX_API
+extern __checkReturn boolean_t
+efx_mae_match_spec_is_valid(
+ __in efx_nic_t *enp,
+ __in const efx_mae_match_spec_t *spec);
+
+typedef struct efx_mae_actions_s efx_mae_actions_t;
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_action_set_spec_init(
+ __in efx_nic_t *enp,
+ __out efx_mae_actions_t **specp);
+
+LIBEFX_API
+extern void
+efx_mae_action_set_spec_fini(
+ __in efx_nic_t *enp,
+ __in efx_mae_actions_t *spec);
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_action_set_populate_decap(
+ __in efx_mae_actions_t *spec);
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_action_set_populate_vlan_pop(
+ __in efx_mae_actions_t *spec);
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_action_set_populate_vlan_push(
+ __in efx_mae_actions_t *spec,
+ __in uint16_t tpid_be,
+ __in uint16_t tci_be);
+
+/*
+ * Use efx_mae_action_set_fill_in_eh_id() to set ID of the allocated
+ * encap. header in the specification prior to action set allocation.
+ */
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_action_set_populate_encap(
+ __in efx_mae_actions_t *spec);
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_action_set_populate_flag(
+ __in efx_mae_actions_t *spec);
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_action_set_populate_mark(
+ __in efx_mae_actions_t *spec,
+ __in uint32_t mark_value);
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_action_set_populate_deliver(
+ __in efx_mae_actions_t *spec,
+ __in const efx_mport_sel_t *mportp);
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_action_set_populate_drop(
+ __in efx_mae_actions_t *spec);
+
+LIBEFX_API
+extern __checkReturn boolean_t
+efx_mae_action_set_specs_equal(
+ __in const efx_mae_actions_t *left,
+ __in const efx_mae_actions_t *right);
+
+/*
+ * Conduct a comparison to check whether two match specifications
+ * of equal rule type (action / outer) and priority would map to
+ * the very same rule class from the firmware's standpoint.
+ *
+ * For match specification fields that are not supported by firmware,
+ * the rule class only matches if the mask/value pairs for that field
+ * are equal. Clients should use efx_mae_match_spec_is_valid() before
+ * calling this API to detect usage of unsupported fields.
+ */
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_match_specs_class_cmp(
+ __in efx_nic_t *enp,
+ __in const efx_mae_match_spec_t *left,
+ __in const efx_mae_match_spec_t *right,
+ __out boolean_t *have_same_classp);
+
+#define EFX_MAE_RSRC_ID_INVALID UINT32_MAX
+
+/* Rule ID */
+typedef struct efx_mae_rule_id_s {
+ uint32_t id;
+} efx_mae_rule_id_t;
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_outer_rule_insert(
+ __in efx_nic_t *enp,
+ __in const efx_mae_match_spec_t *spec,
+ __in efx_tunnel_protocol_t encap_type,
+ __out efx_mae_rule_id_t *or_idp);
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_outer_rule_remove(
+ __in efx_nic_t *enp,
+ __in const efx_mae_rule_id_t *or_idp);
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_match_spec_outer_rule_id_set(
+ __in efx_mae_match_spec_t *spec,
+ __in const efx_mae_rule_id_t *or_idp);
+
+/* Encap. header ID */
+typedef struct efx_mae_eh_id_s {
+ uint32_t id;
+} efx_mae_eh_id_t;
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_encap_header_alloc(
+ __in efx_nic_t *enp,
+ __in efx_tunnel_protocol_t encap_type,
+ __in_bcount(header_size) uint8_t *header_data,
+ __in size_t header_size,
+ __out efx_mae_eh_id_t *eh_idp);
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_encap_header_free(
+ __in efx_nic_t *enp,
+ __in const efx_mae_eh_id_t *eh_idp);
+
+/* See description before efx_mae_action_set_populate_encap(). */
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_action_set_fill_in_eh_id(
+ __in efx_mae_actions_t *spec,
+ __in const efx_mae_eh_id_t *eh_idp);
+
+/* Action set ID */
+typedef struct efx_mae_aset_id_s {
+ uint32_t id;
+} efx_mae_aset_id_t;
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_action_set_alloc(
+ __in efx_nic_t *enp,
+ __in const efx_mae_actions_t *spec,
+ __out efx_mae_aset_id_t *aset_idp);
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_action_set_free(
+ __in efx_nic_t *enp,
+ __in const efx_mae_aset_id_t *aset_idp);
+
+/* Action set list ID */
+typedef struct efx_mae_aset_list_id_s {
+ uint32_t id;
+} efx_mae_aset_list_id_t;
+
+/*
+ * Either action set list ID or action set ID must be passed to this API,
+ * but not both.
+ */
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_action_rule_insert(
+ __in efx_nic_t *enp,
+ __in const efx_mae_match_spec_t *spec,
+ __in const efx_mae_aset_list_id_t *asl_idp,
+ __in const efx_mae_aset_id_t *as_idp,
+ __out efx_mae_rule_id_t *ar_idp);
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_mae_action_rule_remove(
+ __in efx_nic_t *enp,
+ __in const efx_mae_rule_id_t *ar_idp);
+
+#endif /* EFSYS_OPT_MAE */
+
+#if EFSYS_OPT_VIRTIO
+
+/* A Virtio net device can have one or more pairs of Rx/Tx virtqueues
+ * while virtio block device has a single virtqueue,
+ * for further details refer section of 4.2.3 of SF-120734
+ */
+typedef enum efx_virtio_vq_type_e {
+ EFX_VIRTIO_VQ_TYPE_NET_RXQ,
+ EFX_VIRTIO_VQ_TYPE_NET_TXQ,
+ EFX_VIRTIO_VQ_TYPE_BLOCK,
+ EFX_VIRTIO_VQ_NTYPES
+} efx_virtio_vq_type_t;
+
+typedef struct efx_virtio_vq_dyncfg_s {
+ /*
+ * If queue is being created to be migrated then this
+ * should be the FINAL_PIDX value returned by MC_CMD_VIRTIO_FINI_QUEUE
+ * of the queue being migrated from. Otherwise, it should be zero.
+ */
+ uint32_t evvd_vq_pidx;
+ /*
+ * If this queue is being created to be migrated then this
+ * should be the FINAL_CIDX value returned by MC_CMD_VIRTIO_FINI_QUEUE
+ * of the queue being migrated from. Otherwise, it should be zero.
+ */
+ uint32_t evvd_vq_cidx;
+} efx_virtio_vq_dyncfg_t;
+
+/*
+ * Virtqueue size must be a power of 2, maximum size is 32768
+ * (see VIRTIO v1.1 section 2.6)
+ */
+#define EFX_VIRTIO_MAX_VQ_SIZE 0x8000
+
+typedef struct efx_virtio_vq_cfg_s {
+ unsigned int evvc_vq_num;
+ efx_virtio_vq_type_t evvc_type;
+ /*
+ * vDPA as VF : It is target VF number if queue is being created on VF.
+ * vDPA as PF : If queue to be created on PF then it should be
+ * EFX_PCI_VF_INVALID.
+ */
+ uint16_t evvc_target_vf;
+ /*
+ * Maximum virtqueue size is EFX_VIRTIO_MAX_VQ_SIZE and
+ * virtqueue size 0 means the queue is unavailable.
+ */
+ uint32_t evvc_vq_size;
+ efsys_dma_addr_t evvc_desc_tbl_addr;
+ efsys_dma_addr_t evvc_avail_ring_addr;
+ efsys_dma_addr_t evvc_used_ring_addr;
+ /* MSIX vector number for the virtqueue or 0xFFFF if MSIX is not used */
+ uint16_t evvc_msix_vector;
+ /*
+ * evvc_pas_id contains a PCIe address space identifier if the queue
+ * uses PASID.
+ */
+ boolean_t evvc_use_pasid;
+ uint32_t evvc_pas_id;
+ /* Negotiated virtio features to be applied to this virtqueue */
+ uint64_t evcc_features;
+} efx_virtio_vq_cfg_t;
+
+typedef struct efx_virtio_vq_s efx_virtio_vq_t;
+
+typedef enum efx_virtio_device_type_e {
+ EFX_VIRTIO_DEVICE_TYPE_RESERVED,
+ EFX_VIRTIO_DEVICE_TYPE_NET,
+ EFX_VIRTIO_DEVICE_TYPE_BLOCK,
+ EFX_VIRTIO_DEVICE_NTYPES
+} efx_virtio_device_type_t;
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_virtio_init(
+ __in efx_nic_t *enp);
+
+LIBEFX_API
+extern void
+efx_virtio_fini(
+ __in efx_nic_t *enp);
+
+/*
+ * When virtio net driver in the guest sets VIRTIO_CONFIG_STATUS_DRIVER_OK bit,
+ * hypervisor starts configuring all the virtqueues in the device. When the
+ * vhost_user has received VHOST_USER_SET_VRING_ENABLE for all the virtqueues,
+ * then it invokes VDPA driver callback dev_conf. APIs qstart and qcreate would
+ * be invoked from dev_conf callback to create the virtqueues, For further
+ * details refer SF-122427.
+ */
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_virtio_qcreate(
+ __in efx_nic_t *enp,
+ __deref_out efx_virtio_vq_t **evvpp);
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_virtio_qstart(
+ __in efx_virtio_vq_t *evvp,
+ __in efx_virtio_vq_cfg_t *evvcp,
+ __in_opt efx_virtio_vq_dyncfg_t *evvdp);
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_virtio_qstop(
+ __in efx_virtio_vq_t *evvp,
+ __out_opt efx_virtio_vq_dyncfg_t *evvdp);
+
+LIBEFX_API
+extern void
+efx_virtio_qdestroy(
+ __in efx_virtio_vq_t *evvp);
+
+/*
+ * Get the offset in the BAR of the doorbells for a VI.
+ * net device : doorbell offset of RX & TX queues
+ * block device : request doorbell offset in the BAR.
+ * For further details refer section of 4 of SF-119689
+ */
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_virtio_get_doorbell_offset(
+ __in efx_virtio_vq_t *evvp,
+ __out uint32_t *offsetp);
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_virtio_get_features(
+ __in efx_nic_t *enp,
+ __in efx_virtio_device_type_t type,
+ __out uint64_t *featuresp);
+
+LIBEFX_API
+extern __checkReturn efx_rc_t
+efx_virtio_verify_features(
+ __in efx_nic_t *enp,
+ __in efx_virtio_device_type_t type,
+ __in uint64_t features);
+
+#endif /* EFSYS_OPT_VIRTIO */
+
#ifdef __cplusplus
}
#endif