/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright(c) 2019-2020 Xilinx, Inc.
+ * Copyright(c) 2019-2021 Xilinx, Inc.
* Copyright(c) 2012-2019 Solarflare Communications Inc.
*/
#include "mcdi_mon.h"
#endif
-#if EFX_OPTS_EF10()
+#if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
#include "ef10_tlv_layout.h"
MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
efx_rc_t rc;
- EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
+ EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
req.emr_in_buf = payload;
MC_CMD_GET_PORT_MODES_OUT_LEN);
efx_rc_t rc;
- EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
+ EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
req.emr_cmd = MC_CMD_GET_PORT_MODES;
req.emr_in_buf = payload;
return (rc);
}
+#endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
+
+#if EFX_OPTS_EF10()
+
__checkReturn efx_rc_t
efx_mcdi_vadaptor_alloc(
__in efx_nic_t *enp,
return (rc);
}
+#endif /* EFX_OPTS_EF10() */
+
+#if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
+
__checkReturn efx_rc_t
efx_mcdi_get_mac_address_pf(
__in efx_nic_t *enp,
MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
efx_rc_t rc;
- EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
+ EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
req.emr_in_buf = payload;
MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
efx_rc_t rc;
- EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
+ EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
req.emr_in_buf = payload;
MC_CMD_GET_CLOCK_OUT_LEN);
efx_rc_t rc;
- EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
+ EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp));
req.emr_cmd = MC_CMD_GET_CLOCK;
req.emr_in_buf = payload;
req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
efx_mcdi_execute(enp, &req);
+
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
+ if (req.emr_out_length_used < MC_CMD_GET_RXDP_CONFIG_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
/* RX DMA end padding is disabled */
break;
default:
rc = ENOTSUP;
- goto fail2;
+ goto fail3;
}
}
return (0);
+fail3:
+ EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
return (rc);
}
-static __checkReturn efx_rc_t
+ __checkReturn efx_rc_t
efx_mcdi_alloc_vis(
__in efx_nic_t *enp,
__in uint32_t min_vi_count,
}
-static __checkReturn efx_rc_t
+ __checkReturn efx_rc_t
efx_mcdi_free_vis(
__in efx_nic_t *enp)
{
return (rc);
}
+#endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
+
+#if EFX_OPTS_EF10()
static __checkReturn efx_rc_t
efx_mcdi_alloc_piobuf(
return (efx_mcdi_unlink_piobuf(enp, vi_index));
}
+#endif /* EFX_OPTS_EF10() */
+
+#if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
+
static __checkReturn efx_rc_t
ef10_mcdi_get_pf_count(
__in efx_nic_t *enp,
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN,
- MC_CMD_GET_CAPABILITIES_V5_OUT_LEN);
+ MC_CMD_GET_CAPABILITIES_V7_OUT_LEN);
efx_rc_t rc;
req.emr_cmd = MC_CMD_GET_CAPABILITIES;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_CAPABILITIES_V5_OUT_LEN;
+ req.emr_out_length = MC_CMD_GET_CAPABILITIES_V7_OUT_LEN;
efx_mcdi_execute_quiet(enp, &req);
(MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) & \
(1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))))
+#define CAP_FLAGS3(_req, _flag) \
+ (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V7_OUT_LEN) && \
+ (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V7_OUT_FLAGS3) & \
+ (1u << (MC_CMD_GET_CAPABILITIES_V7_OUT_ ## _flag ## _LBN))))
+
/* Check if RXDP firmware inserts 14 byte prefix */
if (CAP_FLAGS1(req, RX_PREFIX_LEN_14))
encp->enc_rx_prefix_size = 14;
else
encp->enc_fw_assisted_tso_v2_encap_enabled = B_FALSE;
+ /* Check if TSOv3 is supported */
+ if (CAP_FLAGS2(req, TX_TSO_V3))
+ encp->enc_tso_v3_enabled = B_TRUE;
+ else
+ encp->enc_tso_v3_enabled = B_FALSE;
+
/* Check if the firmware has vadapter/vport/vswitch support */
if (CAP_FLAGS1(req, EVB))
encp->enc_datapath_cap_evb = B_TRUE;
else
encp->enc_rx_disable_scatter_supported = B_FALSE;
+ /* No limit on maximum number of Rx scatter elements per packet. */
+ encp->enc_rx_scatter_max = -1;
+
/* Check if the firmware supports packed stream mode */
if (CAP_FLAGS1(req, RX_PACKED_STREAM))
encp->enc_rx_packed_stream_supported = B_TRUE;
else
encp->enc_init_evq_v2_supported = B_FALSE;
+ /*
+ * Check if firmware supports extended width event queues, which have
+ * a different event descriptor layout.
+ */
+ if (CAP_FLAGS3(req, EXTENDED_WIDTH_EVQS_SUPPORTED))
+ encp->enc_init_evq_extended_width_supported = B_TRUE;
+ else
+ encp->enc_init_evq_extended_width_supported = B_FALSE;
+
/*
* Check if the NO_CONT_EV mode for RX events is supported.
*/
else
encp->enc_filter_action_mark_max = 0;
+#if EFSYS_OPT_MAE
+ /*
+ * Check support for EF100 Match Action Engine (MAE).
+ * MAE hardware is present on Riverhead boards (from R2),
+ * and on Keystone, and requires support in firmware.
+ *
+ * MAE control operations require MAE control privilege,
+ * which is not available for VFs.
+ *
+ * Privileges can change dynamically at runtime: we assume
+ * MAE support requires the privilege is granted initially,
+ * and ignore later dynamic changes.
+ */
+ if (CAP_FLAGS3(req, MAE_SUPPORTED) &&
+ EFX_MCDI_HAVE_PRIVILEGE(encp->enc_privilege_mask, MAE))
+ encp->enc_mae_supported = B_TRUE;
+ else
+ encp->enc_mae_supported = B_FALSE;
+#else
+ encp->enc_mae_supported = B_FALSE;
+#endif /* EFSYS_OPT_MAE */
+
#undef CAP_FLAGS1
#undef CAP_FLAGS2
+#undef CAP_FLAGS3
return (0);
(1U << TLV_PORT_MODE_NA_2x2), /* mode 14 */
{ EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
},
+ /*
+ * Modes that on Riverhead allocate each port number to a separate
+ * cage.
+ * port 0 -> cage 1
+ * port 1 -> cage 2
+ */
+ {
+ EFX_FAMILY_RIVERHEAD,
+ (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */
+ (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */
+ (1U << TLV_PORT_MODE_1x1_1x1), /* mode 2 */
+ { 0, 1, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
+ },
};
static __checkReturn efx_rc_t
return (rc);
}
-static __checkReturn efx_rc_t
+ __checkReturn efx_rc_t
efx_mcdi_nic_board_cfg(
__in efx_nic_t *enp)
{
EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+ /*
+ * Get the current privilege mask. Note that this may be modified
+ * dynamically, so for most cases the value is informational only.
+ * If the privilege being discovered can't be granted dynamically,
+ * it's fine to rely on the value. In all other cases, DO NOT use
+ * the privilege mask to check for sufficient privileges, as that
+ * can result in time-of-check/time-of-use bugs.
+ */
+ if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
+ goto fail6;
+ encp->enc_privilege_mask = mask;
+
/* Board configuration (legacy) */
rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
if (rc != 0) {
if (rc == EACCES)
board_type = 0;
else
- goto fail6;
+ goto fail7;
}
encp->enc_board_type = board_type;
/* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
- goto fail7;
+ goto fail8;
/*
* Firmware with support for *_FEC capability bits does not
/* Obtain the default PHY advertised capabilities */
if ((rc = ef10_phy_get_link(enp, &els)) != 0)
- goto fail8;
+ goto fail9;
epp->ep_default_adv_cap_mask = els.epls.epls_adv_cap_mask;
epp->ep_adv_cap_mask = els.epls.epls_adv_cap_mask;
/* Check capabilities of running datapath firmware */
if ((rc = ef10_get_datapath_caps(enp)) != 0)
- goto fail9;
+ goto fail10;
/* Get interrupt vector limits */
if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
if (EFX_PCI_FUNCTION_IS_PF(encp))
- goto fail10;
+ goto fail11;
/* Ignore error (cannot query vector limits from a VF). */
base = 0;
encp->enc_intr_vec_base = base;
encp->enc_intr_limit = nvec;
- /*
- * Get the current privilege mask. Note that this may be modified
- * dynamically, so this value is informational only. DO NOT use
- * the privilege mask to check for sufficient privileges, as that
- * can result in time-of-check/time-of-use bugs.
- */
- if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
- goto fail11;
- encp->enc_privilege_mask = mask;
-
return (0);
fail11:
return (rc);
}
+ __checkReturn efx_rc_t
+efx_mcdi_entity_reset(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ENTITY_RESET_IN_LEN,
+ MC_CMD_ENTITY_RESET_OUT_LEN);
+ efx_rc_t rc;
+
+ req.emr_cmd = MC_CMD_ENTITY_RESET;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
+
+ MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
+ ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
+
+#if EFX_OPTS_EF10()
+
static __checkReturn efx_rc_t
ef10_set_workaround_bug26807(
__in efx_nic_t *enp)
*/
encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
+ /* EF10 TSO engine demands that packet header be contiguous. */
+ encp->enc_tx_tso_max_header_ndescs = 1;
+
+ /* The overall TSO header length is not limited. */
+ encp->enc_tx_tso_max_header_length = UINT32_MAX;
+
+ /*
+ * There are no specific limitations on the number of
+ * TSO payload descriptors.
+ */
+ encp->enc_tx_tso_max_payload_ndescs = UINT32_MAX;
+
+ /* TSO superframe payload length is not limited. */
+ encp->enc_tx_tso_max_payload_length = UINT32_MAX;
+
+ /*
+ * Limitation on the maximum number of outgoing packets per
+ * TSO transaction described in SF-108452-SW.
+ */
+ encp->enc_tx_tso_max_nframes = 32767;
+
/*
* Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
* MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
ef10_nic_reset(
__in efx_nic_t *enp)
{
- efx_mcdi_req_t req;
- EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ENTITY_RESET_IN_LEN,
- MC_CMD_ENTITY_RESET_OUT_LEN);
efx_rc_t rc;
/* ef10_nic_reset() is called to recover from BADASSERT failures. */
if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
goto fail2;
- req.emr_cmd = MC_CMD_ENTITY_RESET;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
-
- MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
- ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
+ if ((rc = efx_mcdi_entity_reset(enp)) != 0)
goto fail3;
- }
/* Clear RX/TX DMA queue errors */
enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
return (rc);
}
-static __checkReturn efx_rc_t
+#endif /* EFX_OPTS_EF10() */
+
+#if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
+
+ __checkReturn efx_rc_t
ef10_upstream_port_vadaptor_alloc(
__in efx_nic_t *enp)
{
return (rc);
}
+#endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
+
+#if EFX_OPTS_EF10()
+
__checkReturn efx_rc_t
ef10_nic_init(
__in efx_nic_t *enp)