1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2012-2018 Solarflare Communications Inc.
15 #include "ef10_tlv_layout.h"
17 __checkReturn efx_rc_t
18 efx_mcdi_get_port_assignment(
20 __out uint32_t *portp)
23 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
24 MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
27 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
29 req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
30 req.emr_in_buf = payload;
31 req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
32 req.emr_out_buf = payload;
33 req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
35 efx_mcdi_execute(enp, &req);
37 if (req.emr_rc != 0) {
42 if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
47 *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
54 EFSYS_PROBE1(fail1, efx_rc_t, rc);
59 __checkReturn efx_rc_t
60 efx_mcdi_get_port_modes(
62 __out uint32_t *modesp,
63 __out_opt uint32_t *current_modep,
64 __out_opt uint32_t *default_modep)
67 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_MODES_IN_LEN,
68 MC_CMD_GET_PORT_MODES_OUT_LEN);
71 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
73 req.emr_cmd = MC_CMD_GET_PORT_MODES;
74 req.emr_in_buf = payload;
75 req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
76 req.emr_out_buf = payload;
77 req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
79 efx_mcdi_execute(enp, &req);
81 if (req.emr_rc != 0) {
87 * Require only Modes and DefaultMode fields, unless the current mode
88 * was requested (CurrentMode field was added for Medford).
90 if (req.emr_out_length_used <
91 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
95 if ((current_modep != NULL) && (req.emr_out_length_used <
96 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) {
101 *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
103 if (current_modep != NULL) {
104 *current_modep = MCDI_OUT_DWORD(req,
105 GET_PORT_MODES_OUT_CURRENT_MODE);
108 if (default_modep != NULL) {
109 *default_modep = MCDI_OUT_DWORD(req,
110 GET_PORT_MODES_OUT_DEFAULT_MODE);
120 EFSYS_PROBE1(fail1, efx_rc_t, rc);
125 __checkReturn efx_rc_t
126 ef10_nic_get_port_mode_bandwidth(
128 __out uint32_t *bandwidth_mbpsp)
131 uint32_t current_mode;
132 efx_port_t *epp = &(enp->en_port);
134 uint32_t single_lane;
140 if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
141 ¤t_mode, NULL)) != 0) {
142 /* No port mode info available. */
146 if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_25000FDX))
151 if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_50000FDX))
156 if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_100000FDX))
161 switch (current_mode) {
162 case TLV_PORT_MODE_1x1_NA: /* mode 0 */
163 bandwidth = single_lane;
165 case TLV_PORT_MODE_1x2_NA: /* mode 10 */
166 case TLV_PORT_MODE_NA_1x2: /* mode 11 */
167 bandwidth = dual_lane;
169 case TLV_PORT_MODE_1x1_1x1: /* mode 2 */
170 bandwidth = single_lane + single_lane;
172 case TLV_PORT_MODE_4x1_NA: /* mode 4 */
173 case TLV_PORT_MODE_NA_4x1: /* mode 8 */
174 bandwidth = 4 * single_lane;
176 case TLV_PORT_MODE_2x1_2x1: /* mode 5 */
177 bandwidth = (2 * single_lane) + (2 * single_lane);
179 case TLV_PORT_MODE_1x2_1x2: /* mode 12 */
180 bandwidth = dual_lane + dual_lane;
182 case TLV_PORT_MODE_1x2_2x1: /* mode 17 */
183 case TLV_PORT_MODE_2x1_1x2: /* mode 18 */
184 bandwidth = dual_lane + (2 * single_lane);
186 /* Legacy Medford-only mode. Do not use (see bug63270) */
187 case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2: /* mode 9 */
188 bandwidth = 4 * single_lane;
190 case TLV_PORT_MODE_1x4_NA: /* mode 1 */
191 case TLV_PORT_MODE_NA_1x4: /* mode 22 */
192 bandwidth = quad_lane;
194 case TLV_PORT_MODE_2x2_NA: /* mode 13 */
195 case TLV_PORT_MODE_NA_2x2: /* mode 14 */
196 bandwidth = 2 * dual_lane;
198 case TLV_PORT_MODE_1x4_2x1: /* mode 6 */
199 case TLV_PORT_MODE_2x1_1x4: /* mode 7 */
200 bandwidth = quad_lane + (2 * single_lane);
202 case TLV_PORT_MODE_1x4_1x2: /* mode 15 */
203 case TLV_PORT_MODE_1x2_1x4: /* mode 16 */
204 bandwidth = quad_lane + dual_lane;
206 case TLV_PORT_MODE_1x4_1x4: /* mode 3 */
207 bandwidth = quad_lane + quad_lane;
214 *bandwidth_mbpsp = bandwidth;
221 EFSYS_PROBE1(fail1, efx_rc_t, rc);
226 static __checkReturn efx_rc_t
227 efx_mcdi_vadaptor_alloc(
229 __in uint32_t port_id)
232 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_ALLOC_IN_LEN,
233 MC_CMD_VADAPTOR_ALLOC_OUT_LEN);
236 EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
238 req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
239 req.emr_in_buf = payload;
240 req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
241 req.emr_out_buf = payload;
242 req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
244 MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
245 MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
246 VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
247 enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
249 efx_mcdi_execute(enp, &req);
251 if (req.emr_rc != 0) {
259 EFSYS_PROBE1(fail1, efx_rc_t, rc);
264 static __checkReturn efx_rc_t
265 efx_mcdi_vadaptor_free(
267 __in uint32_t port_id)
270 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_FREE_IN_LEN,
271 MC_CMD_VADAPTOR_FREE_OUT_LEN);
274 req.emr_cmd = MC_CMD_VADAPTOR_FREE;
275 req.emr_in_buf = payload;
276 req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
277 req.emr_out_buf = payload;
278 req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
280 MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
282 efx_mcdi_execute(enp, &req);
284 if (req.emr_rc != 0) {
292 EFSYS_PROBE1(fail1, efx_rc_t, rc);
297 __checkReturn efx_rc_t
298 efx_mcdi_get_mac_address_pf(
300 __out_ecount_opt(6) uint8_t mac_addrp[6])
303 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
304 MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
307 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
309 req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
310 req.emr_in_buf = payload;
311 req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
312 req.emr_out_buf = payload;
313 req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
315 efx_mcdi_execute(enp, &req);
317 if (req.emr_rc != 0) {
322 if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
327 if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
332 if (mac_addrp != NULL) {
335 addrp = MCDI_OUT2(req, uint8_t,
336 GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
338 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
348 EFSYS_PROBE1(fail1, efx_rc_t, rc);
353 __checkReturn efx_rc_t
354 efx_mcdi_get_mac_address_vf(
356 __out_ecount_opt(6) uint8_t mac_addrp[6])
359 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
360 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
363 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
365 req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
366 req.emr_in_buf = payload;
367 req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
368 req.emr_out_buf = payload;
369 req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
371 MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
372 EVB_PORT_ID_ASSIGNED);
374 efx_mcdi_execute(enp, &req);
376 if (req.emr_rc != 0) {
381 if (req.emr_out_length_used <
382 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
387 if (MCDI_OUT_DWORD(req,
388 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
393 if (mac_addrp != NULL) {
396 addrp = MCDI_OUT2(req, uint8_t,
397 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
399 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
409 EFSYS_PROBE1(fail1, efx_rc_t, rc);
414 __checkReturn efx_rc_t
417 __out uint32_t *sys_freqp,
418 __out uint32_t *dpcpu_freqp)
421 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CLOCK_IN_LEN,
422 MC_CMD_GET_CLOCK_OUT_LEN);
425 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
427 req.emr_cmd = MC_CMD_GET_CLOCK;
428 req.emr_in_buf = payload;
429 req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
430 req.emr_out_buf = payload;
431 req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
433 efx_mcdi_execute(enp, &req);
435 if (req.emr_rc != 0) {
440 if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
445 *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
446 if (*sys_freqp == 0) {
450 *dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ);
451 if (*dpcpu_freqp == 0) {
465 EFSYS_PROBE1(fail1, efx_rc_t, rc);
470 __checkReturn efx_rc_t
471 efx_mcdi_get_rxdp_config(
473 __out uint32_t *end_paddingp)
476 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RXDP_CONFIG_IN_LEN,
477 MC_CMD_GET_RXDP_CONFIG_OUT_LEN);
478 uint32_t end_padding;
481 req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
482 req.emr_in_buf = payload;
483 req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
484 req.emr_out_buf = payload;
485 req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
487 efx_mcdi_execute(enp, &req);
488 if (req.emr_rc != 0) {
493 if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
494 GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
495 /* RX DMA end padding is disabled */
498 switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
499 GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
500 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
503 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
506 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
515 *end_paddingp = end_padding;
522 EFSYS_PROBE1(fail1, efx_rc_t, rc);
527 __checkReturn efx_rc_t
528 efx_mcdi_get_vector_cfg(
530 __out_opt uint32_t *vec_basep,
531 __out_opt uint32_t *pf_nvecp,
532 __out_opt uint32_t *vf_nvecp)
535 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_VECTOR_CFG_IN_LEN,
536 MC_CMD_GET_VECTOR_CFG_OUT_LEN);
539 req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
540 req.emr_in_buf = payload;
541 req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
542 req.emr_out_buf = payload;
543 req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
545 efx_mcdi_execute(enp, &req);
547 if (req.emr_rc != 0) {
552 if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
557 if (vec_basep != NULL)
558 *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
559 if (pf_nvecp != NULL)
560 *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
561 if (vf_nvecp != NULL)
562 *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
569 EFSYS_PROBE1(fail1, efx_rc_t, rc);
574 static __checkReturn efx_rc_t
577 __in uint32_t min_vi_count,
578 __in uint32_t max_vi_count,
579 __out uint32_t *vi_basep,
580 __out uint32_t *vi_countp,
581 __out uint32_t *vi_shiftp)
584 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_VIS_IN_LEN,
585 MC_CMD_ALLOC_VIS_EXT_OUT_LEN);
588 if (vi_countp == NULL) {
593 req.emr_cmd = MC_CMD_ALLOC_VIS;
594 req.emr_in_buf = payload;
595 req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
596 req.emr_out_buf = payload;
597 req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN;
599 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
600 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
602 efx_mcdi_execute(enp, &req);
604 if (req.emr_rc != 0) {
609 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
614 *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
615 *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
617 /* Report VI_SHIFT if available (always zero for Huntington) */
618 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
621 *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
630 EFSYS_PROBE1(fail1, efx_rc_t, rc);
636 static __checkReturn efx_rc_t
643 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
644 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
646 req.emr_cmd = MC_CMD_FREE_VIS;
647 req.emr_in_buf = NULL;
648 req.emr_in_length = 0;
649 req.emr_out_buf = NULL;
650 req.emr_out_length = 0;
652 efx_mcdi_execute_quiet(enp, &req);
654 /* Ignore ELREADY (no allocated VIs, so nothing to free) */
655 if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
663 EFSYS_PROBE1(fail1, efx_rc_t, rc);
669 static __checkReturn efx_rc_t
670 efx_mcdi_alloc_piobuf(
672 __out efx_piobuf_handle_t *handlep)
675 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_PIOBUF_IN_LEN,
676 MC_CMD_ALLOC_PIOBUF_OUT_LEN);
679 if (handlep == NULL) {
684 req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
685 req.emr_in_buf = payload;
686 req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
687 req.emr_out_buf = payload;
688 req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
690 efx_mcdi_execute_quiet(enp, &req);
692 if (req.emr_rc != 0) {
697 if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
702 *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
711 EFSYS_PROBE1(fail1, efx_rc_t, rc);
716 static __checkReturn efx_rc_t
717 efx_mcdi_free_piobuf(
719 __in efx_piobuf_handle_t handle)
722 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FREE_PIOBUF_IN_LEN,
723 MC_CMD_FREE_PIOBUF_OUT_LEN);
726 req.emr_cmd = MC_CMD_FREE_PIOBUF;
727 req.emr_in_buf = payload;
728 req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
729 req.emr_out_buf = payload;
730 req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
732 MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
734 efx_mcdi_execute_quiet(enp, &req);
736 if (req.emr_rc != 0) {
744 EFSYS_PROBE1(fail1, efx_rc_t, rc);
749 static __checkReturn efx_rc_t
750 efx_mcdi_link_piobuf(
752 __in uint32_t vi_index,
753 __in efx_piobuf_handle_t handle)
756 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LINK_PIOBUF_IN_LEN,
757 MC_CMD_LINK_PIOBUF_OUT_LEN);
760 req.emr_cmd = MC_CMD_LINK_PIOBUF;
761 req.emr_in_buf = payload;
762 req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
763 req.emr_out_buf = payload;
764 req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
766 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
767 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
769 efx_mcdi_execute(enp, &req);
771 if (req.emr_rc != 0) {
779 EFSYS_PROBE1(fail1, efx_rc_t, rc);
784 static __checkReturn efx_rc_t
785 efx_mcdi_unlink_piobuf(
787 __in uint32_t vi_index)
790 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_UNLINK_PIOBUF_IN_LEN,
791 MC_CMD_UNLINK_PIOBUF_OUT_LEN);
794 req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
795 req.emr_in_buf = payload;
796 req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
797 req.emr_out_buf = payload;
798 req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
800 MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
802 efx_mcdi_execute_quiet(enp, &req);
804 if (req.emr_rc != 0) {
812 EFSYS_PROBE1(fail1, efx_rc_t, rc);
818 ef10_nic_alloc_piobufs(
820 __in uint32_t max_piobuf_count)
822 efx_piobuf_handle_t *handlep;
825 EFSYS_ASSERT3U(max_piobuf_count, <=,
826 EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
828 enp->en_arch.ef10.ena_piobuf_count = 0;
830 for (i = 0; i < max_piobuf_count; i++) {
831 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
833 if (efx_mcdi_alloc_piobuf(enp, handlep) != 0)
836 enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
837 enp->en_arch.ef10.ena_piobuf_count++;
843 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
844 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
846 (void) efx_mcdi_free_piobuf(enp, *handlep);
847 *handlep = EFX_PIOBUF_HANDLE_INVALID;
849 enp->en_arch.ef10.ena_piobuf_count = 0;
854 ef10_nic_free_piobufs(
857 efx_piobuf_handle_t *handlep;
860 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
861 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
863 (void) efx_mcdi_free_piobuf(enp, *handlep);
864 *handlep = EFX_PIOBUF_HANDLE_INVALID;
866 enp->en_arch.ef10.ena_piobuf_count = 0;
869 /* Sub-allocate a block from a piobuf */
870 __checkReturn efx_rc_t
872 __inout efx_nic_t *enp,
873 __out uint32_t *bufnump,
874 __out efx_piobuf_handle_t *handlep,
875 __out uint32_t *blknump,
876 __out uint32_t *offsetp,
879 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
880 efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
881 uint32_t blk_per_buf;
885 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
886 EFSYS_ASSERT(bufnump);
887 EFSYS_ASSERT(handlep);
888 EFSYS_ASSERT(blknump);
889 EFSYS_ASSERT(offsetp);
892 if ((edcp->edc_pio_alloc_size == 0) ||
893 (enp->en_arch.ef10.ena_piobuf_count == 0)) {
897 blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
899 for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
900 uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
905 EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
906 for (blk = 0; blk < blk_per_buf; blk++) {
907 if ((*map & (1u << blk)) == 0) {
917 *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
920 *sizep = edcp->edc_pio_alloc_size;
921 *offsetp = blk * (*sizep);
928 EFSYS_PROBE1(fail1, efx_rc_t, rc);
933 /* Free a piobuf sub-allocated block */
934 __checkReturn efx_rc_t
936 __inout efx_nic_t *enp,
937 __in uint32_t bufnum,
938 __in uint32_t blknum)
943 if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
944 (blknum >= (8 * sizeof (*map)))) {
949 map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
950 if ((*map & (1u << blknum)) == 0) {
954 *map &= ~(1u << blknum);
961 EFSYS_PROBE1(fail1, efx_rc_t, rc);
966 __checkReturn efx_rc_t
968 __inout efx_nic_t *enp,
969 __in uint32_t vi_index,
970 __in efx_piobuf_handle_t handle)
972 return (efx_mcdi_link_piobuf(enp, vi_index, handle));
975 __checkReturn efx_rc_t
977 __inout efx_nic_t *enp,
978 __in uint32_t vi_index)
980 return (efx_mcdi_unlink_piobuf(enp, vi_index));
983 static __checkReturn efx_rc_t
984 ef10_mcdi_get_pf_count(
986 __out uint32_t *pf_countp)
989 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PF_COUNT_IN_LEN,
990 MC_CMD_GET_PF_COUNT_OUT_LEN);
993 req.emr_cmd = MC_CMD_GET_PF_COUNT;
994 req.emr_in_buf = payload;
995 req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;
996 req.emr_out_buf = payload;
997 req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN;
999 efx_mcdi_execute(enp, &req);
1001 if (req.emr_rc != 0) {
1006 if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) {
1011 *pf_countp = *MCDI_OUT(req, uint8_t,
1012 MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST);
1014 EFSYS_ASSERT(*pf_countp != 0);
1021 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1026 static __checkReturn efx_rc_t
1027 ef10_get_datapath_caps(
1028 __in efx_nic_t *enp)
1030 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1032 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN,
1033 MC_CMD_GET_CAPABILITIES_V5_OUT_LEN);
1036 if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
1040 req.emr_cmd = MC_CMD_GET_CAPABILITIES;
1041 req.emr_in_buf = payload;
1042 req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
1043 req.emr_out_buf = payload;
1044 req.emr_out_length = MC_CMD_GET_CAPABILITIES_V5_OUT_LEN;
1046 efx_mcdi_execute_quiet(enp, &req);
1048 if (req.emr_rc != 0) {
1053 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
1058 #define CAP_FLAGS1(_req, _flag) \
1059 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_OUT_FLAGS1) & \
1060 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN)))
1062 #define CAP_FLAGS2(_req, _flag) \
1063 (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) && \
1064 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) & \
1065 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))))
1068 * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
1069 * We only support the 14 byte prefix here.
1071 if (CAP_FLAGS1(req, RX_PREFIX_LEN_14) == 0) {
1075 encp->enc_rx_prefix_size = 14;
1077 #if EFSYS_OPT_RX_SCALE
1078 /* Check if the firmware supports additional RSS modes */
1079 if (CAP_FLAGS1(req, ADDITIONAL_RSS_MODES))
1080 encp->enc_rx_scale_additional_modes_supported = B_TRUE;
1082 encp->enc_rx_scale_additional_modes_supported = B_FALSE;
1083 #endif /* EFSYS_OPT_RX_SCALE */
1085 /* Check if the firmware supports TSO */
1086 if (CAP_FLAGS1(req, TX_TSO))
1087 encp->enc_fw_assisted_tso_enabled = B_TRUE;
1089 encp->enc_fw_assisted_tso_enabled = B_FALSE;
1091 /* Check if the firmware supports FATSOv2 */
1092 if (CAP_FLAGS2(req, TX_TSO_V2)) {
1093 encp->enc_fw_assisted_tso_v2_enabled = B_TRUE;
1094 encp->enc_fw_assisted_tso_v2_n_contexts = MCDI_OUT_WORD(req,
1095 GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS);
1097 encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
1098 encp->enc_fw_assisted_tso_v2_n_contexts = 0;
1101 /* Check if the firmware supports FATSOv2 encap */
1102 if (CAP_FLAGS2(req, TX_TSO_V2_ENCAP))
1103 encp->enc_fw_assisted_tso_v2_encap_enabled = B_TRUE;
1105 encp->enc_fw_assisted_tso_v2_encap_enabled = B_FALSE;
1107 /* Check if the firmware has vadapter/vport/vswitch support */
1108 if (CAP_FLAGS1(req, EVB))
1109 encp->enc_datapath_cap_evb = B_TRUE;
1111 encp->enc_datapath_cap_evb = B_FALSE;
1113 /* Check if the firmware supports VLAN insertion */
1114 if (CAP_FLAGS1(req, TX_VLAN_INSERTION))
1115 encp->enc_hw_tx_insert_vlan_enabled = B_TRUE;
1117 encp->enc_hw_tx_insert_vlan_enabled = B_FALSE;
1119 /* Check if the firmware supports RX event batching */
1120 if (CAP_FLAGS1(req, RX_BATCHING))
1121 encp->enc_rx_batching_enabled = B_TRUE;
1123 encp->enc_rx_batching_enabled = B_FALSE;
1126 * Even if batching isn't reported as supported, we may still get
1127 * batched events (see bug61153).
1129 encp->enc_rx_batch_max = 16;
1131 /* Check if the firmware supports disabling scatter on RXQs */
1132 if (CAP_FLAGS1(req, RX_DISABLE_SCATTER))
1133 encp->enc_rx_disable_scatter_supported = B_TRUE;
1135 encp->enc_rx_disable_scatter_supported = B_FALSE;
1137 /* Check if the firmware supports packed stream mode */
1138 if (CAP_FLAGS1(req, RX_PACKED_STREAM))
1139 encp->enc_rx_packed_stream_supported = B_TRUE;
1141 encp->enc_rx_packed_stream_supported = B_FALSE;
1144 * Check if the firmware supports configurable buffer sizes
1145 * for packed stream mode (otherwise buffer size is 1Mbyte)
1147 if (CAP_FLAGS1(req, RX_PACKED_STREAM_VAR_BUFFERS))
1148 encp->enc_rx_var_packed_stream_supported = B_TRUE;
1150 encp->enc_rx_var_packed_stream_supported = B_FALSE;
1152 /* Check if the firmware supports equal stride super-buffer mode */
1153 if (CAP_FLAGS2(req, EQUAL_STRIDE_SUPER_BUFFER))
1154 encp->enc_rx_es_super_buffer_supported = B_TRUE;
1156 encp->enc_rx_es_super_buffer_supported = B_FALSE;
1158 /* Check if the firmware supports FW subvariant w/o Tx checksumming */
1159 if (CAP_FLAGS2(req, FW_SUBVARIANT_NO_TX_CSUM))
1160 encp->enc_fw_subvariant_no_tx_csum_supported = B_TRUE;
1162 encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE;
1164 /* Check if the firmware supports set mac with running filters */
1165 if (CAP_FLAGS1(req, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED))
1166 encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
1168 encp->enc_allow_set_mac_with_installed_filters = B_FALSE;
1171 * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
1172 * specifying which parameters to configure.
1174 if (CAP_FLAGS1(req, SET_MAC_ENHANCED))
1175 encp->enc_enhanced_set_mac_supported = B_TRUE;
1177 encp->enc_enhanced_set_mac_supported = B_FALSE;
1180 * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows
1181 * us to let the firmware choose the settings to use on an EVQ.
1183 if (CAP_FLAGS2(req, INIT_EVQ_V2))
1184 encp->enc_init_evq_v2_supported = B_TRUE;
1186 encp->enc_init_evq_v2_supported = B_FALSE;
1189 * Check if the NO_CONT_EV mode for RX events is supported.
1191 if (CAP_FLAGS2(req, INIT_RXQ_NO_CONT_EV))
1192 encp->enc_no_cont_ev_mode_supported = B_TRUE;
1194 encp->enc_no_cont_ev_mode_supported = B_FALSE;
1197 * Check if buffer size may and must be specified on INIT_RXQ.
1198 * It may be always specified to efx_rx_qcreate(), but will be
1199 * just kept libefx internal if MCDI does not support it.
1201 if (CAP_FLAGS2(req, INIT_RXQ_WITH_BUFFER_SIZE))
1202 encp->enc_init_rxq_with_buffer_size = B_TRUE;
1204 encp->enc_init_rxq_with_buffer_size = B_FALSE;
1207 * Check if firmware-verified NVRAM updates must be used.
1209 * The firmware trusted installer requires all NVRAM updates to use
1210 * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update)
1211 * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
1212 * partition and report the result).
1214 if (CAP_FLAGS2(req, NVRAM_UPDATE_REPORT_VERIFY_RESULT))
1215 encp->enc_nvram_update_verify_result_supported = B_TRUE;
1217 encp->enc_nvram_update_verify_result_supported = B_FALSE;
1220 * Check if firmware update via the BUNDLE partition is supported
1222 if (CAP_FLAGS2(req, BUNDLE_UPDATE))
1223 encp->enc_nvram_bundle_update_supported = B_TRUE;
1225 encp->enc_nvram_bundle_update_supported = B_FALSE;
1228 * Check if firmware provides packet memory and Rx datapath
1231 if (CAP_FLAGS1(req, PM_AND_RXDP_COUNTERS))
1232 encp->enc_pm_and_rxdp_counters = B_TRUE;
1234 encp->enc_pm_and_rxdp_counters = B_FALSE;
1237 * Check if the 40G MAC hardware is capable of reporting
1238 * statistics for Tx size bins.
1240 if (CAP_FLAGS2(req, MAC_STATS_40G_TX_SIZE_BINS))
1241 encp->enc_mac_stats_40g_tx_size_bins = B_TRUE;
1243 encp->enc_mac_stats_40g_tx_size_bins = B_FALSE;
1246 * Check if firmware supports VXLAN and NVGRE tunnels.
1247 * The capability indicates Geneve protocol support as well.
1249 if (CAP_FLAGS1(req, VXLAN_NVGRE)) {
1250 encp->enc_tunnel_encapsulations_supported =
1251 (1u << EFX_TUNNEL_PROTOCOL_VXLAN) |
1252 (1u << EFX_TUNNEL_PROTOCOL_GENEVE) |
1253 (1u << EFX_TUNNEL_PROTOCOL_NVGRE);
1255 EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES ==
1256 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
1257 encp->enc_tunnel_config_udp_entries_max =
1258 EFX_TUNNEL_MAXNENTRIES;
1260 encp->enc_tunnel_config_udp_entries_max = 0;
1264 * Check if firmware reports the VI window mode.
1265 * Medford2 has a variable VI window size (8K, 16K or 64K).
1266 * Medford and Huntington have a fixed 8K VI window size.
1268 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
1270 MCDI_OUT_BYTE(req, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
1273 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
1274 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1276 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
1277 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_16K;
1279 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
1280 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_64K;
1283 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1286 } else if ((enp->en_family == EFX_FAMILY_HUNTINGTON) ||
1287 (enp->en_family == EFX_FAMILY_MEDFORD)) {
1288 /* Huntington and Medford have fixed 8K window size */
1289 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1291 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1294 /* Check if firmware supports extended MAC stats. */
1295 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
1296 /* Extended stats buffer supported */
1297 encp->enc_mac_stats_nstats = MCDI_OUT_WORD(req,
1298 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
1300 /* Use Siena-compatible legacy MAC stats */
1301 encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS;
1304 if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V2)
1305 encp->enc_fec_counters = B_TRUE;
1307 encp->enc_fec_counters = B_FALSE;
1309 /* Check if the firmware provides head-of-line blocking counters */
1310 if (CAP_FLAGS2(req, RXDP_HLB_IDLE))
1311 encp->enc_hlb_counters = B_TRUE;
1313 encp->enc_hlb_counters = B_FALSE;
1315 #if EFSYS_OPT_RX_SCALE
1316 if (CAP_FLAGS1(req, RX_RSS_LIMITED)) {
1317 /* Only one exclusive RSS context is available per port. */
1318 encp->enc_rx_scale_max_exclusive_contexts = 1;
1320 switch (enp->en_family) {
1321 case EFX_FAMILY_MEDFORD2:
1322 encp->enc_rx_scale_hash_alg_mask =
1323 (1U << EFX_RX_HASHALG_TOEPLITZ);
1326 case EFX_FAMILY_MEDFORD:
1327 case EFX_FAMILY_HUNTINGTON:
1329 * Packed stream firmware variant maintains a
1330 * non-standard algorithm for hash computation.
1331 * It implies explicit XORing together
1332 * source + destination IP addresses (or last
1333 * four bytes in the case of IPv6) and using the
1334 * resulting value as the input to a Toeplitz hash.
1336 encp->enc_rx_scale_hash_alg_mask =
1337 (1U << EFX_RX_HASHALG_PACKED_STREAM);
1345 /* Port numbers cannot contribute to the hash value */
1346 encp->enc_rx_scale_l4_hash_supported = B_FALSE;
1349 * Maximum number of exclusive RSS contexts.
1350 * EF10 hardware supports 64 in total, but 6 are reserved
1351 * for shared contexts. They are a global resource so
1352 * not all may be available.
1354 encp->enc_rx_scale_max_exclusive_contexts = 64 - 6;
1356 encp->enc_rx_scale_hash_alg_mask =
1357 (1U << EFX_RX_HASHALG_TOEPLITZ);
1360 * It is possible to use port numbers as
1361 * the input data for hash computation.
1363 encp->enc_rx_scale_l4_hash_supported = B_TRUE;
1365 #endif /* EFSYS_OPT_RX_SCALE */
1367 /* Check if the firmware supports "FLAG" and "MARK" filter actions */
1368 if (CAP_FLAGS2(req, FILTER_ACTION_FLAG))
1369 encp->enc_filter_action_flag_supported = B_TRUE;
1371 encp->enc_filter_action_flag_supported = B_FALSE;
1373 if (CAP_FLAGS2(req, FILTER_ACTION_MARK))
1374 encp->enc_filter_action_mark_supported = B_TRUE;
1376 encp->enc_filter_action_mark_supported = B_FALSE;
1378 /* Get maximum supported value for "MARK" filter action */
1379 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)
1380 encp->enc_filter_action_mark_max = MCDI_OUT_DWORD(req,
1381 GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX);
1383 encp->enc_filter_action_mark_max = 0;
1390 #if EFSYS_OPT_RX_SCALE
1393 #endif /* EFSYS_OPT_RX_SCALE */
1401 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1407 #define EF10_LEGACY_PF_PRIVILEGE_MASK \
1408 (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \
1409 MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \
1410 MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \
1411 MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \
1412 MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \
1413 MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \
1414 MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \
1415 MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \
1416 MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \
1417 MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \
1418 MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
1420 #define EF10_LEGACY_VF_PRIVILEGE_MASK 0
1423 __checkReturn efx_rc_t
1424 ef10_get_privilege_mask(
1425 __in efx_nic_t *enp,
1426 __out uint32_t *maskp)
1428 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1432 if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
1437 /* Fallback for old firmware without privilege mask support */
1438 if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1439 /* Assume PF has admin privilege */
1440 mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
1442 /* VF is always unprivileged by default */
1443 mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
1452 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1458 #define EFX_EXT_PORT_MAX 4
1459 #define EFX_EXT_PORT_NA 0xFF
1462 * Table of mapping schemes from port number to external number.
1464 * Each port number ultimately corresponds to a connector: either as part of
1465 * a cable assembly attached to a module inserted in an SFP+/QSFP+ cage on
1466 * the board, or fixed to the board (e.g. 10GBASE-T magjack on SFN5121T
1467 * "Salina"). In general:
1469 * Port number (0-based)
1471 * port mapping (n:1)
1474 * External port number (1-based)
1476 * fixed (1:1) or cable assembly (1:m)
1481 * The external numbering refers to the cages or magjacks on the board,
1482 * as visibly annotated on the board or back panel. This table describes
1483 * how to determine which external cage/magjack corresponds to the port
1484 * numbers used by the driver.
1486 * The count of consecutive port numbers that map to each external number,
1487 * is determined by the chip family and the current port mode.
1489 * For the Huntington family, the current port mode cannot be discovered,
1490 * but a single mapping is used by all modes for a given chip variant,
1491 * so the mapping used is instead the last match in the table to the full
1492 * set of port modes to which the NIC can be configured. Therefore the
1493 * ordering of entries in the mapping table is significant.
1495 static struct ef10_external_port_map_s {
1496 efx_family_t family;
1497 uint32_t modes_mask;
1498 uint8_t base_port[EFX_EXT_PORT_MAX];
1499 } __ef10_external_port_mappings[] = {
1501 * Modes used by Huntington family controllers where each port
1502 * number maps to a separate cage.
1503 * SFN7x22F (Torino):
1513 EFX_FAMILY_HUNTINGTON,
1514 (1U << TLV_PORT_MODE_10G) | /* mode 0 */
1515 (1U << TLV_PORT_MODE_10G_10G) | /* mode 2 */
1516 (1U << TLV_PORT_MODE_10G_10G_10G_10G), /* mode 4 */
1520 * Modes which for Huntington identify a chip variant where 2
1521 * adjacent port numbers map to each cage.
1529 EFX_FAMILY_HUNTINGTON,
1530 (1U << TLV_PORT_MODE_40G) | /* mode 1 */
1531 (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */
1532 (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */
1533 (1U << TLV_PORT_MODE_10G_10G_40G), /* mode 7 */
1534 { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1537 * Modes that on Medford allocate each port number to a separate
1546 (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */
1547 (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */
1548 (1U << TLV_PORT_MODE_1x1_1x1), /* mode 2 */
1552 * Modes that on Medford allocate 2 adjacent port numbers to each
1561 (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */
1562 (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 5 */
1563 (1U << TLV_PORT_MODE_1x4_2x1) | /* mode 6 */
1564 (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */
1565 /* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */
1566 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), /* mode 9 */
1567 { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1570 * Modes that on Medford allocate 4 adjacent port numbers to
1579 /* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */
1580 (1U << TLV_PORT_MODE_4x1_NA), /* mode 4 */
1581 { 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1584 * Modes that on Medford allocate 4 adjacent port numbers to
1593 (1U << TLV_PORT_MODE_NA_4x1), /* mode 8 */
1594 { EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1597 * Modes that on Medford2 allocate each port number to a separate
1605 EFX_FAMILY_MEDFORD2,
1606 (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */
1607 (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */
1608 (1U << TLV_PORT_MODE_1x1_1x1) | /* mode 2 */
1609 (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */
1610 (1U << TLV_PORT_MODE_1x2_NA) | /* mode 10 */
1611 (1U << TLV_PORT_MODE_1x2_1x2) | /* mode 12 */
1612 (1U << TLV_PORT_MODE_1x4_1x2) | /* mode 15 */
1613 (1U << TLV_PORT_MODE_1x2_1x4), /* mode 16 */
1617 * Modes that on Medford2 allocate 1 port to cage 1 and the rest
1624 EFX_FAMILY_MEDFORD2,
1625 (1U << TLV_PORT_MODE_1x2_2x1) | /* mode 17 */
1626 (1U << TLV_PORT_MODE_1x4_2x1), /* mode 6 */
1627 { 0, 1, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1630 * Modes that on Medford2 allocate 2 adjacent port numbers to cage 1
1631 * and the rest to cage 2.
1638 EFX_FAMILY_MEDFORD2,
1639 (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 4 */
1640 (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */
1641 (1U << TLV_PORT_MODE_2x2_NA) | /* mode 13 */
1642 (1U << TLV_PORT_MODE_2x1_1x2), /* mode 18 */
1643 { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1646 * Modes that on Medford2 allocate up to 4 adjacent port numbers
1654 EFX_FAMILY_MEDFORD2,
1655 (1U << TLV_PORT_MODE_4x1_NA), /* mode 5 */
1656 { 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1659 * Modes that on Medford2 allocate up to 4 adjacent port numbers
1667 EFX_FAMILY_MEDFORD2,
1668 (1U << TLV_PORT_MODE_NA_4x1) | /* mode 8 */
1669 (1U << TLV_PORT_MODE_NA_1x2) | /* mode 11 */
1670 (1U << TLV_PORT_MODE_NA_2x2), /* mode 14 */
1671 { EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA }
1675 static __checkReturn efx_rc_t
1676 ef10_external_port_mapping(
1677 __in efx_nic_t *enp,
1679 __out uint8_t *external_portp)
1683 uint32_t port_modes;
1686 struct ef10_external_port_map_s *mapp = NULL;
1687 int ext_index = port; /* Default 1-1 mapping */
1689 if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, ¤t,
1692 * No current port mode information (i.e. Huntington)
1693 * - infer mapping from available modes
1695 if ((rc = efx_mcdi_get_port_modes(enp,
1696 &port_modes, NULL, NULL)) != 0) {
1698 * No port mode information available
1699 * - use default mapping
1704 /* Only need to scan the current mode */
1705 port_modes = 1 << current;
1709 * Infer the internal port -> external number mapping from
1710 * the possible port modes for this NIC.
1712 for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
1713 struct ef10_external_port_map_s *eepmp =
1714 &__ef10_external_port_mappings[i];
1715 if (eepmp->family != enp->en_family)
1717 matches = (eepmp->modes_mask & port_modes);
1720 * Some modes match. For some Huntington boards
1721 * there will be multiple matches. The mapping on the
1722 * last match is used.
1725 port_modes &= ~matches;
1729 if (port_modes != 0) {
1730 /* Some advertised modes are not supported */
1738 * External ports are assigned a sequence of consecutive
1739 * port numbers, so find the one with the closest base_port.
1741 uint32_t delta = EFX_EXT_PORT_NA;
1743 for (i = 0; i < EFX_EXT_PORT_MAX; i++) {
1744 uint32_t base = mapp->base_port[i];
1745 if ((base != EFX_EXT_PORT_NA) && (base <= port)) {
1746 if ((port - base) < delta) {
1747 delta = (port - base);
1753 *external_portp = (uint8_t)(ext_index + 1);
1758 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1763 static __checkReturn efx_rc_t
1764 ef10_set_workaround_bug26807(
1765 __in efx_nic_t *enp)
1767 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1772 * If the bug26807 workaround is enabled, then firmware has enabled
1773 * support for chained multicast filters. Firmware will reset (FLR)
1774 * functions which have filters in the hardware filter table when the
1775 * workaround is enabled/disabled.
1777 * We must recheck if the workaround is enabled after inserting the
1778 * first hardware filter, in case it has been changed since this check.
1780 rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807,
1783 encp->enc_bug26807_workaround = B_TRUE;
1784 if (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) {
1786 * Other functions had installed filters before the
1787 * workaround was enabled, and they have been reset
1790 EFSYS_PROBE(bug26807_workaround_flr_done);
1791 /* FIXME: bump MC warm boot count ? */
1793 } else if (rc == EACCES) {
1795 * Unprivileged functions cannot enable the workaround in older
1798 encp->enc_bug26807_workaround = B_FALSE;
1799 } else if ((rc == ENOTSUP) || (rc == ENOENT)) {
1800 encp->enc_bug26807_workaround = B_FALSE;
1808 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1813 static __checkReturn efx_rc_t
1815 __in efx_nic_t *enp)
1817 const efx_nic_ops_t *enop = enp->en_enop;
1818 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
1819 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1820 ef10_link_state_t els;
1821 efx_port_t *epp = &(enp->en_port);
1822 uint32_t board_type = 0;
1823 uint32_t base, nvec;
1828 uint8_t mac_addr[6] = { 0 };
1831 /* Get the (zero-based) MCDI port number */
1832 if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
1835 /* EFX MCDI interface uses one-based port numbers */
1836 emip->emi_port = port + 1;
1838 if ((rc = ef10_external_port_mapping(enp, port,
1839 &encp->enc_external_port)) != 0)
1843 * Get PCIe function number from firmware (used for
1844 * per-function privilege and dynamic config info).
1845 * - PCIe PF: pf = PF number, vf = 0xffff.
1846 * - PCIe VF: pf = parent PF, vf = VF number.
1848 if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
1854 /* MAC address for this function */
1855 if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1856 rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
1857 #if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
1859 * Disable static config checking, ONLY for manufacturing test
1860 * and setup at the factory, to allow the static config to be
1863 #else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
1864 if ((rc == 0) && (mac_addr[0] & 0x02)) {
1866 * If the static config does not include a global MAC
1867 * address pool then the board may return a locally
1868 * administered MAC address (this should only happen on
1869 * incorrectly programmed boards).
1873 #endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
1875 rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
1880 EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
1882 /* Board configuration (legacy) */
1883 rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
1885 /* Unprivileged functions may not be able to read board cfg */
1892 encp->enc_board_type = board_type;
1893 encp->enc_clk_mult = 1; /* not used for EF10 */
1895 /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
1896 if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
1900 * Firmware with support for *_FEC capability bits does not
1901 * report that the corresponding *_FEC_REQUESTED bits are supported.
1902 * Add them here so that drivers understand that they are supported.
1904 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_BASER_FEC))
1905 epp->ep_phy_cap_mask |=
1906 (1u << EFX_PHY_CAP_BASER_FEC_REQUESTED);
1907 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_RS_FEC))
1908 epp->ep_phy_cap_mask |=
1909 (1u << EFX_PHY_CAP_RS_FEC_REQUESTED);
1910 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_25G_BASER_FEC))
1911 epp->ep_phy_cap_mask |=
1912 (1u << EFX_PHY_CAP_25G_BASER_FEC_REQUESTED);
1914 /* Obtain the default PHY advertised capabilities */
1915 if ((rc = ef10_phy_get_link(enp, &els)) != 0)
1917 epp->ep_default_adv_cap_mask = els.epls.epls_adv_cap_mask;
1918 epp->ep_adv_cap_mask = els.epls.epls_adv_cap_mask;
1920 /* Check capabilities of running datapath firmware */
1921 if ((rc = ef10_get_datapath_caps(enp)) != 0)
1924 /* Alignment for WPTR updates */
1925 encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
1927 encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
1928 /* No boundary crossing limits */
1929 encp->enc_tx_dma_desc_boundary = 0;
1932 * Maximum number of bytes into the frame the TCP header can start for
1933 * firmware assisted TSO to work.
1935 encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
1938 * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
1939 * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
1940 * resources (allocated to this PCIe function), which is zero until
1941 * after we have allocated VIs.
1943 encp->enc_evq_limit = 1024;
1944 encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
1945 encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
1947 encp->enc_buftbl_limit = UINT32_MAX;
1949 /* Get interrupt vector limits */
1950 if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
1951 if (EFX_PCI_FUNCTION_IS_PF(encp))
1954 /* Ignore error (cannot query vector limits from a VF). */
1958 encp->enc_intr_vec_base = base;
1959 encp->enc_intr_limit = nvec;
1962 * Get the current privilege mask. Note that this may be modified
1963 * dynamically, so this value is informational only. DO NOT use
1964 * the privilege mask to check for sufficient privileges, as that
1965 * can result in time-of-check/time-of-use bugs.
1967 if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
1969 encp->enc_privilege_mask = mask;
1971 if ((rc = ef10_set_workaround_bug26807(enp)) != 0)
1974 /* Get remaining controller-specific board config */
1975 if ((rc = enop->eno_board_cfg(enp)) != 0)
1982 EFSYS_PROBE(fail12);
1984 EFSYS_PROBE(fail11);
1986 EFSYS_PROBE(fail10);
2004 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2009 __checkReturn efx_rc_t
2011 __in efx_nic_t *enp)
2013 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2014 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2017 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2019 /* Read and clear any assertion state */
2020 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
2023 /* Exit the assertion handler */
2024 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
2028 if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
2031 if ((rc = ef10_nic_board_cfg(enp)) != 0)
2035 * Set default driver config limits (based on board config).
2037 * FIXME: For now allocate a fixed number of VIs which is likely to be
2038 * sufficient and small enough to allow multiple functions on the same
2041 edcp->edc_min_vi_count = edcp->edc_max_vi_count =
2042 MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
2044 /* The client driver must configure and enable PIO buffer support */
2045 edcp->edc_max_piobuf_count = 0;
2046 edcp->edc_pio_alloc_size = 0;
2048 #if EFSYS_OPT_MAC_STATS
2049 /* Wipe the MAC statistics */
2050 if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
2054 #if EFSYS_OPT_LOOPBACK
2055 if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
2059 #if EFSYS_OPT_MON_STATS
2060 if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
2061 /* Unprivileged functions do not have access to sensors */
2067 encp->enc_features = enp->en_features;
2071 #if EFSYS_OPT_MON_STATS
2075 #if EFSYS_OPT_LOOPBACK
2079 #if EFSYS_OPT_MAC_STATS
2090 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2095 __checkReturn efx_rc_t
2096 ef10_nic_set_drv_limits(
2097 __inout efx_nic_t *enp,
2098 __in efx_drv_limits_t *edlp)
2100 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2101 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2102 uint32_t min_evq_count, max_evq_count;
2103 uint32_t min_rxq_count, max_rxq_count;
2104 uint32_t min_txq_count, max_txq_count;
2112 /* Get minimum required and maximum usable VI limits */
2113 min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
2114 min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
2115 min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
2117 edcp->edc_min_vi_count =
2118 MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
2120 max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
2121 max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
2122 max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
2124 edcp->edc_max_vi_count =
2125 MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
2128 * Check limits for sub-allocated piobuf blocks.
2129 * PIO is optional, so don't fail if the limits are incorrect.
2131 if ((encp->enc_piobuf_size == 0) ||
2132 (encp->enc_piobuf_limit == 0) ||
2133 (edlp->edl_min_pio_alloc_size == 0) ||
2134 (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
2136 edcp->edc_max_piobuf_count = 0;
2137 edcp->edc_pio_alloc_size = 0;
2139 uint32_t blk_size, blk_count, blks_per_piobuf;
2142 MAX(edlp->edl_min_pio_alloc_size,
2143 encp->enc_piobuf_min_alloc_size);
2145 blks_per_piobuf = encp->enc_piobuf_size / blk_size;
2146 EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
2148 blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
2150 /* A zero max pio alloc count means unlimited */
2151 if ((edlp->edl_max_pio_alloc_count > 0) &&
2152 (edlp->edl_max_pio_alloc_count < blk_count)) {
2153 blk_count = edlp->edl_max_pio_alloc_count;
2156 edcp->edc_pio_alloc_size = blk_size;
2157 edcp->edc_max_piobuf_count =
2158 (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
2164 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2170 __checkReturn efx_rc_t
2172 __in efx_nic_t *enp)
2175 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ENTITY_RESET_IN_LEN,
2176 MC_CMD_ENTITY_RESET_OUT_LEN);
2179 /* ef10_nic_reset() is called to recover from BADASSERT failures. */
2180 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
2182 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
2185 req.emr_cmd = MC_CMD_ENTITY_RESET;
2186 req.emr_in_buf = payload;
2187 req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
2188 req.emr_out_buf = payload;
2189 req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
2191 MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
2192 ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
2194 efx_mcdi_execute(enp, &req);
2196 if (req.emr_rc != 0) {
2201 /* Clear RX/TX DMA queue errors */
2202 enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
2211 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2216 __checkReturn efx_rc_t
2218 __in efx_nic_t *enp)
2220 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2221 uint32_t min_vi_count, max_vi_count;
2222 uint32_t vi_count, vi_base, vi_shift;
2226 uint32_t vi_window_size;
2229 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2231 /* Enable reporting of some events (e.g. link change) */
2232 if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
2235 /* Allocate (optional) on-chip PIO buffers */
2236 ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
2239 * For best performance, PIO writes should use a write-combined
2240 * (WC) memory mapping. Using a separate WC mapping for the PIO
2241 * aperture of each VI would be a burden to drivers (and not
2242 * possible if the host page size is >4Kbyte).
2244 * To avoid this we use a single uncached (UC) mapping for VI
2245 * register access, and a single WC mapping for extra VIs used
2248 * Each piobuf must be linked to a VI in the WC mapping, and to
2249 * each VI that is using a sub-allocated block from the piobuf.
2251 min_vi_count = edcp->edc_min_vi_count;
2253 edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
2255 /* Ensure that the previously attached driver's VIs are freed */
2256 if ((rc = efx_mcdi_free_vis(enp)) != 0)
2260 * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
2261 * fails then retrying the request for fewer VI resources may succeed.
2264 if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
2265 &vi_base, &vi_count, &vi_shift)) != 0)
2268 EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
2270 if (vi_count < min_vi_count) {
2275 enp->en_arch.ef10.ena_vi_base = vi_base;
2276 enp->en_arch.ef10.ena_vi_count = vi_count;
2277 enp->en_arch.ef10.ena_vi_shift = vi_shift;
2279 if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
2280 /* Not enough extra VIs to map piobufs */
2281 ef10_nic_free_piobufs(enp);
2284 enp->en_arch.ef10.ena_pio_write_vi_base =
2285 vi_count - enp->en_arch.ef10.ena_piobuf_count;
2287 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=,
2288 EFX_VI_WINDOW_SHIFT_INVALID);
2289 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=,
2290 EFX_VI_WINDOW_SHIFT_64K);
2291 vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift;
2293 /* Save UC memory mapping details */
2294 enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
2295 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2296 enp->en_arch.ef10.ena_uc_mem_map_size =
2298 enp->en_arch.ef10.ena_pio_write_vi_base);
2300 enp->en_arch.ef10.ena_uc_mem_map_size =
2302 enp->en_arch.ef10.ena_vi_count);
2305 /* Save WC memory mapping details */
2306 enp->en_arch.ef10.ena_wc_mem_map_offset =
2307 enp->en_arch.ef10.ena_uc_mem_map_offset +
2308 enp->en_arch.ef10.ena_uc_mem_map_size;
2310 enp->en_arch.ef10.ena_wc_mem_map_size =
2312 enp->en_arch.ef10.ena_piobuf_count);
2314 /* Link piobufs to extra VIs in WC mapping */
2315 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2316 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2317 rc = efx_mcdi_link_piobuf(enp,
2318 enp->en_arch.ef10.ena_pio_write_vi_base + i,
2319 enp->en_arch.ef10.ena_piobuf_handle[i]);
2326 * Allocate a vAdaptor attached to our upstream vPort/pPort.
2328 * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
2329 * driver has yet to bring up the EVB port. See bug 56147. In this case,
2330 * retry the request several times after waiting a while. The wait time
2331 * between retries starts small (10ms) and exponentially increases.
2332 * Total wait time is a little over two seconds. Retry logic in the
2333 * client driver may mean this whole loop is repeated if it continues to
2338 while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
2339 if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
2342 * Do not retry alloc for PF, or for other errors on
2348 /* VF startup before PF is ready. Retry allocation. */
2350 /* Too many attempts */
2354 EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
2355 EFSYS_SLEEP(delay_us);
2357 if (delay_us < 500000)
2361 enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
2362 enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
2377 ef10_nic_free_piobufs(enp);
2380 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2385 __checkReturn efx_rc_t
2386 ef10_nic_get_vi_pool(
2387 __in efx_nic_t *enp,
2388 __out uint32_t *vi_countp)
2390 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2393 * Report VIs that the client driver can use.
2394 * Do not include VIs used for PIO buffer writes.
2396 *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
2401 __checkReturn efx_rc_t
2402 ef10_nic_get_bar_region(
2403 __in efx_nic_t *enp,
2404 __in efx_nic_region_t region,
2405 __out uint32_t *offsetp,
2406 __out size_t *sizep)
2410 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2413 * TODO: Specify host memory mapping alignment and granularity
2414 * in efx_drv_limits_t so that they can be taken into account
2415 * when allocating extra VIs for PIO writes.
2419 /* UC mapped memory BAR region for VI registers */
2420 *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
2421 *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
2424 case EFX_REGION_PIO_WRITE_VI:
2425 /* WC mapped memory BAR region for piobuf writes */
2426 *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
2427 *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
2438 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2443 __checkReturn boolean_t
2444 ef10_nic_hw_unavailable(
2445 __in efx_nic_t *enp)
2449 if (enp->en_reset_flags & EFX_RESET_HW_UNAVAIL)
2452 EFX_BAR_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, &dword, B_FALSE);
2453 if (EFX_DWORD_FIELD(dword, EFX_DWORD_0) == 0xffffffff)
2459 ef10_nic_set_hw_unavailable(enp);
2465 ef10_nic_set_hw_unavailable(
2466 __in efx_nic_t *enp)
2468 EFSYS_PROBE(hw_unavail);
2469 enp->en_reset_flags |= EFX_RESET_HW_UNAVAIL;
2475 __in efx_nic_t *enp)
2480 (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
2481 enp->en_vport_id = 0;
2483 /* Unlink piobufs from extra VIs in WC mapping */
2484 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2485 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2486 rc = efx_mcdi_unlink_piobuf(enp,
2487 enp->en_arch.ef10.ena_pio_write_vi_base + i);
2493 ef10_nic_free_piobufs(enp);
2495 (void) efx_mcdi_free_vis(enp);
2496 enp->en_arch.ef10.ena_vi_count = 0;
2501 __in efx_nic_t *enp)
2503 #if EFSYS_OPT_MON_STATS
2504 mcdi_mon_cfg_free(enp);
2505 #endif /* EFSYS_OPT_MON_STATS */
2506 (void) efx_mcdi_drv_attach(enp, B_FALSE);
2511 __checkReturn efx_rc_t
2512 ef10_nic_register_test(
2513 __in efx_nic_t *enp)
2518 _NOTE(ARGUNUSED(enp))
2519 _NOTE(CONSTANTCONDITION)
2529 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2534 #endif /* EFSYS_OPT_DIAG */
2536 #if EFSYS_OPT_FW_SUBVARIANT_AWARE
2538 __checkReturn efx_rc_t
2539 efx_mcdi_get_nic_global(
2540 __in efx_nic_t *enp,
2542 __out uint32_t *valuep)
2545 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_NIC_GLOBAL_IN_LEN,
2546 MC_CMD_GET_NIC_GLOBAL_OUT_LEN);
2549 req.emr_cmd = MC_CMD_GET_NIC_GLOBAL;
2550 req.emr_in_buf = payload;
2551 req.emr_in_length = MC_CMD_GET_NIC_GLOBAL_IN_LEN;
2552 req.emr_out_buf = payload;
2553 req.emr_out_length = MC_CMD_GET_NIC_GLOBAL_OUT_LEN;
2555 MCDI_IN_SET_DWORD(req, GET_NIC_GLOBAL_IN_KEY, key);
2557 efx_mcdi_execute(enp, &req);
2559 if (req.emr_rc != 0) {
2564 if (req.emr_out_length_used != MC_CMD_GET_NIC_GLOBAL_OUT_LEN) {
2569 *valuep = MCDI_OUT_DWORD(req, GET_NIC_GLOBAL_OUT_VALUE);
2576 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2581 __checkReturn efx_rc_t
2582 efx_mcdi_set_nic_global(
2583 __in efx_nic_t *enp,
2585 __in uint32_t value)
2588 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_NIC_GLOBAL_IN_LEN, 0);
2591 req.emr_cmd = MC_CMD_SET_NIC_GLOBAL;
2592 req.emr_in_buf = payload;
2593 req.emr_in_length = MC_CMD_SET_NIC_GLOBAL_IN_LEN;
2594 req.emr_out_buf = NULL;
2595 req.emr_out_length = 0;
2597 MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_KEY, key);
2598 MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_VALUE, value);
2600 efx_mcdi_execute(enp, &req);
2602 if (req.emr_rc != 0) {
2610 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2615 #endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
2617 #endif /* EFX_OPTS_EF10() */