1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2012-2018 Solarflare Communications Inc.
13 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
15 #include "ef10_tlv_layout.h"
17 __checkReturn efx_rc_t
18 efx_mcdi_get_port_assignment(
20 __out uint32_t *portp)
23 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
24 MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
27 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
28 enp->en_family == EFX_FAMILY_MEDFORD ||
29 enp->en_family == EFX_FAMILY_MEDFORD2);
31 req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
32 req.emr_in_buf = payload;
33 req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
34 req.emr_out_buf = payload;
35 req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
37 efx_mcdi_execute(enp, &req);
39 if (req.emr_rc != 0) {
44 if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
49 *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
56 EFSYS_PROBE1(fail1, efx_rc_t, rc);
61 __checkReturn efx_rc_t
62 efx_mcdi_get_port_modes(
64 __out uint32_t *modesp,
65 __out_opt uint32_t *current_modep,
66 __out_opt uint32_t *default_modep)
69 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_MODES_IN_LEN,
70 MC_CMD_GET_PORT_MODES_OUT_LEN);
73 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
74 enp->en_family == EFX_FAMILY_MEDFORD ||
75 enp->en_family == EFX_FAMILY_MEDFORD2);
77 req.emr_cmd = MC_CMD_GET_PORT_MODES;
78 req.emr_in_buf = payload;
79 req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
80 req.emr_out_buf = payload;
81 req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
83 efx_mcdi_execute(enp, &req);
85 if (req.emr_rc != 0) {
91 * Require only Modes and DefaultMode fields, unless the current mode
92 * was requested (CurrentMode field was added for Medford).
94 if (req.emr_out_length_used <
95 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
99 if ((current_modep != NULL) && (req.emr_out_length_used <
100 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) {
105 *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
107 if (current_modep != NULL) {
108 *current_modep = MCDI_OUT_DWORD(req,
109 GET_PORT_MODES_OUT_CURRENT_MODE);
112 if (default_modep != NULL) {
113 *default_modep = MCDI_OUT_DWORD(req,
114 GET_PORT_MODES_OUT_DEFAULT_MODE);
124 EFSYS_PROBE1(fail1, efx_rc_t, rc);
129 __checkReturn efx_rc_t
130 ef10_nic_get_port_mode_bandwidth(
132 __out uint32_t *bandwidth_mbpsp)
135 uint32_t current_mode;
136 uint32_t single_lane = 10000;
137 uint32_t dual_lane = 50000;
138 uint32_t quad_lane = 40000;
142 if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
143 ¤t_mode, NULL)) != 0) {
144 /* No port mode info available. */
148 switch (current_mode) {
149 case TLV_PORT_MODE_1x1_NA: /* mode 0 */
150 bandwidth = single_lane;
152 case TLV_PORT_MODE_1x2_NA: /* mode 10 */
153 case TLV_PORT_MODE_NA_1x2: /* mode 11 */
154 bandwidth = dual_lane;
156 case TLV_PORT_MODE_1x1_1x1: /* mode 2 */
157 bandwidth = single_lane + single_lane;
159 case TLV_PORT_MODE_4x1_NA: /* mode 4 */
160 case TLV_PORT_MODE_NA_4x1: /* mode 8 */
161 bandwidth = 4 * single_lane;
163 case TLV_PORT_MODE_2x1_2x1: /* mode 5 */
164 bandwidth = (2 * single_lane) + (2 * single_lane);
166 case TLV_PORT_MODE_1x2_1x2: /* mode 12 */
167 bandwidth = dual_lane + dual_lane;
169 case TLV_PORT_MODE_1x2_2x1: /* mode 17 */
170 case TLV_PORT_MODE_2x1_1x2: /* mode 18 */
171 bandwidth = dual_lane + (2 * single_lane);
173 /* Legacy Medford-only mode. Do not use (see bug63270) */
174 case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2: /* mode 9 */
175 bandwidth = 4 * single_lane;
177 case TLV_PORT_MODE_1x4_NA: /* mode 1 */
178 case TLV_PORT_MODE_NA_1x4: /* mode 22 */
179 bandwidth = quad_lane;
181 case TLV_PORT_MODE_2x2_NA: /* mode 13 */
182 case TLV_PORT_MODE_NA_2x2: /* mode 14 */
183 bandwidth = 2 * dual_lane;
185 case TLV_PORT_MODE_1x4_2x1: /* mode 6 */
186 case TLV_PORT_MODE_2x1_1x4: /* mode 7 */
187 bandwidth = quad_lane + (2 * single_lane);
189 case TLV_PORT_MODE_1x4_1x2: /* mode 15 */
190 case TLV_PORT_MODE_1x2_1x4: /* mode 16 */
191 bandwidth = quad_lane + dual_lane;
193 case TLV_PORT_MODE_1x4_1x4: /* mode 3 */
194 bandwidth = quad_lane + quad_lane;
201 *bandwidth_mbpsp = bandwidth;
208 EFSYS_PROBE1(fail1, efx_rc_t, rc);
213 static __checkReturn efx_rc_t
214 efx_mcdi_vadaptor_alloc(
216 __in uint32_t port_id)
219 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_ALLOC_IN_LEN,
220 MC_CMD_VADAPTOR_ALLOC_OUT_LEN);
223 EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
225 req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
226 req.emr_in_buf = payload;
227 req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
228 req.emr_out_buf = payload;
229 req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
231 MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
232 MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
233 VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
234 enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
236 efx_mcdi_execute(enp, &req);
238 if (req.emr_rc != 0) {
246 EFSYS_PROBE1(fail1, efx_rc_t, rc);
251 static __checkReturn efx_rc_t
252 efx_mcdi_vadaptor_free(
254 __in uint32_t port_id)
257 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_FREE_IN_LEN,
258 MC_CMD_VADAPTOR_FREE_OUT_LEN);
261 req.emr_cmd = MC_CMD_VADAPTOR_FREE;
262 req.emr_in_buf = payload;
263 req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
264 req.emr_out_buf = payload;
265 req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
267 MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
269 efx_mcdi_execute(enp, &req);
271 if (req.emr_rc != 0) {
279 EFSYS_PROBE1(fail1, efx_rc_t, rc);
284 __checkReturn efx_rc_t
285 efx_mcdi_get_mac_address_pf(
287 __out_ecount_opt(6) uint8_t mac_addrp[6])
290 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
291 MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
294 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
295 enp->en_family == EFX_FAMILY_MEDFORD ||
296 enp->en_family == EFX_FAMILY_MEDFORD2);
298 req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
299 req.emr_in_buf = payload;
300 req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
301 req.emr_out_buf = payload;
302 req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
304 efx_mcdi_execute(enp, &req);
306 if (req.emr_rc != 0) {
311 if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
316 if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
321 if (mac_addrp != NULL) {
324 addrp = MCDI_OUT2(req, uint8_t,
325 GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
327 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
337 EFSYS_PROBE1(fail1, efx_rc_t, rc);
342 __checkReturn efx_rc_t
343 efx_mcdi_get_mac_address_vf(
345 __out_ecount_opt(6) uint8_t mac_addrp[6])
348 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
349 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
352 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
353 enp->en_family == EFX_FAMILY_MEDFORD ||
354 enp->en_family == EFX_FAMILY_MEDFORD2);
356 req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
357 req.emr_in_buf = payload;
358 req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
359 req.emr_out_buf = payload;
360 req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
362 MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
363 EVB_PORT_ID_ASSIGNED);
365 efx_mcdi_execute(enp, &req);
367 if (req.emr_rc != 0) {
372 if (req.emr_out_length_used <
373 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
378 if (MCDI_OUT_DWORD(req,
379 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
384 if (mac_addrp != NULL) {
387 addrp = MCDI_OUT2(req, uint8_t,
388 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
390 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
400 EFSYS_PROBE1(fail1, efx_rc_t, rc);
405 __checkReturn efx_rc_t
408 __out uint32_t *sys_freqp,
409 __out uint32_t *dpcpu_freqp)
412 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CLOCK_IN_LEN,
413 MC_CMD_GET_CLOCK_OUT_LEN);
416 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
417 enp->en_family == EFX_FAMILY_MEDFORD ||
418 enp->en_family == EFX_FAMILY_MEDFORD2);
420 req.emr_cmd = MC_CMD_GET_CLOCK;
421 req.emr_in_buf = payload;
422 req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
423 req.emr_out_buf = payload;
424 req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
426 efx_mcdi_execute(enp, &req);
428 if (req.emr_rc != 0) {
433 if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
438 *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
439 if (*sys_freqp == 0) {
443 *dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ);
444 if (*dpcpu_freqp == 0) {
458 EFSYS_PROBE1(fail1, efx_rc_t, rc);
463 __checkReturn efx_rc_t
464 efx_mcdi_get_rxdp_config(
466 __out uint32_t *end_paddingp)
469 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RXDP_CONFIG_IN_LEN,
470 MC_CMD_GET_RXDP_CONFIG_OUT_LEN);
471 uint32_t end_padding;
474 req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
475 req.emr_in_buf = payload;
476 req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
477 req.emr_out_buf = payload;
478 req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
480 efx_mcdi_execute(enp, &req);
481 if (req.emr_rc != 0) {
486 if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
487 GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
488 /* RX DMA end padding is disabled */
491 switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
492 GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
493 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
496 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
499 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
508 *end_paddingp = end_padding;
515 EFSYS_PROBE1(fail1, efx_rc_t, rc);
520 __checkReturn efx_rc_t
521 efx_mcdi_get_vector_cfg(
523 __out_opt uint32_t *vec_basep,
524 __out_opt uint32_t *pf_nvecp,
525 __out_opt uint32_t *vf_nvecp)
528 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_VECTOR_CFG_IN_LEN,
529 MC_CMD_GET_VECTOR_CFG_OUT_LEN);
532 req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
533 req.emr_in_buf = payload;
534 req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
535 req.emr_out_buf = payload;
536 req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
538 efx_mcdi_execute(enp, &req);
540 if (req.emr_rc != 0) {
545 if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
550 if (vec_basep != NULL)
551 *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
552 if (pf_nvecp != NULL)
553 *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
554 if (vf_nvecp != NULL)
555 *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
562 EFSYS_PROBE1(fail1, efx_rc_t, rc);
567 static __checkReturn efx_rc_t
570 __in uint32_t min_vi_count,
571 __in uint32_t max_vi_count,
572 __out uint32_t *vi_basep,
573 __out uint32_t *vi_countp,
574 __out uint32_t *vi_shiftp)
577 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_VIS_IN_LEN,
578 MC_CMD_ALLOC_VIS_EXT_OUT_LEN);
581 if (vi_countp == NULL) {
586 req.emr_cmd = MC_CMD_ALLOC_VIS;
587 req.emr_in_buf = payload;
588 req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
589 req.emr_out_buf = payload;
590 req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN;
592 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
593 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
595 efx_mcdi_execute(enp, &req);
597 if (req.emr_rc != 0) {
602 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
607 *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
608 *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
610 /* Report VI_SHIFT if available (always zero for Huntington) */
611 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
614 *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
623 EFSYS_PROBE1(fail1, efx_rc_t, rc);
629 static __checkReturn efx_rc_t
636 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
637 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
639 req.emr_cmd = MC_CMD_FREE_VIS;
640 req.emr_in_buf = NULL;
641 req.emr_in_length = 0;
642 req.emr_out_buf = NULL;
643 req.emr_out_length = 0;
645 efx_mcdi_execute_quiet(enp, &req);
647 /* Ignore ELREADY (no allocated VIs, so nothing to free) */
648 if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
656 EFSYS_PROBE1(fail1, efx_rc_t, rc);
662 static __checkReturn efx_rc_t
663 efx_mcdi_alloc_piobuf(
665 __out efx_piobuf_handle_t *handlep)
668 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_PIOBUF_IN_LEN,
669 MC_CMD_ALLOC_PIOBUF_OUT_LEN);
672 if (handlep == NULL) {
677 req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
678 req.emr_in_buf = payload;
679 req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
680 req.emr_out_buf = payload;
681 req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
683 efx_mcdi_execute_quiet(enp, &req);
685 if (req.emr_rc != 0) {
690 if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
695 *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
704 EFSYS_PROBE1(fail1, efx_rc_t, rc);
709 static __checkReturn efx_rc_t
710 efx_mcdi_free_piobuf(
712 __in efx_piobuf_handle_t handle)
715 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FREE_PIOBUF_IN_LEN,
716 MC_CMD_FREE_PIOBUF_OUT_LEN);
719 req.emr_cmd = MC_CMD_FREE_PIOBUF;
720 req.emr_in_buf = payload;
721 req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
722 req.emr_out_buf = payload;
723 req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
725 MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
727 efx_mcdi_execute_quiet(enp, &req);
729 if (req.emr_rc != 0) {
737 EFSYS_PROBE1(fail1, efx_rc_t, rc);
742 static __checkReturn efx_rc_t
743 efx_mcdi_link_piobuf(
745 __in uint32_t vi_index,
746 __in efx_piobuf_handle_t handle)
749 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LINK_PIOBUF_IN_LEN,
750 MC_CMD_LINK_PIOBUF_OUT_LEN);
753 req.emr_cmd = MC_CMD_LINK_PIOBUF;
754 req.emr_in_buf = payload;
755 req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
756 req.emr_out_buf = payload;
757 req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
759 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
760 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
762 efx_mcdi_execute(enp, &req);
764 if (req.emr_rc != 0) {
772 EFSYS_PROBE1(fail1, efx_rc_t, rc);
777 static __checkReturn efx_rc_t
778 efx_mcdi_unlink_piobuf(
780 __in uint32_t vi_index)
783 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_UNLINK_PIOBUF_IN_LEN,
784 MC_CMD_UNLINK_PIOBUF_OUT_LEN);
787 req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
788 req.emr_in_buf = payload;
789 req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
790 req.emr_out_buf = payload;
791 req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
793 MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
795 efx_mcdi_execute_quiet(enp, &req);
797 if (req.emr_rc != 0) {
805 EFSYS_PROBE1(fail1, efx_rc_t, rc);
811 ef10_nic_alloc_piobufs(
813 __in uint32_t max_piobuf_count)
815 efx_piobuf_handle_t *handlep;
818 EFSYS_ASSERT3U(max_piobuf_count, <=,
819 EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
821 enp->en_arch.ef10.ena_piobuf_count = 0;
823 for (i = 0; i < max_piobuf_count; i++) {
824 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
826 if (efx_mcdi_alloc_piobuf(enp, handlep) != 0)
829 enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
830 enp->en_arch.ef10.ena_piobuf_count++;
836 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
837 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
839 (void) efx_mcdi_free_piobuf(enp, *handlep);
840 *handlep = EFX_PIOBUF_HANDLE_INVALID;
842 enp->en_arch.ef10.ena_piobuf_count = 0;
847 ef10_nic_free_piobufs(
850 efx_piobuf_handle_t *handlep;
853 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
854 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
856 (void) efx_mcdi_free_piobuf(enp, *handlep);
857 *handlep = EFX_PIOBUF_HANDLE_INVALID;
859 enp->en_arch.ef10.ena_piobuf_count = 0;
862 /* Sub-allocate a block from a piobuf */
863 __checkReturn efx_rc_t
865 __inout efx_nic_t *enp,
866 __out uint32_t *bufnump,
867 __out efx_piobuf_handle_t *handlep,
868 __out uint32_t *blknump,
869 __out uint32_t *offsetp,
872 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
873 efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
874 uint32_t blk_per_buf;
878 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
879 enp->en_family == EFX_FAMILY_MEDFORD ||
880 enp->en_family == EFX_FAMILY_MEDFORD2);
881 EFSYS_ASSERT(bufnump);
882 EFSYS_ASSERT(handlep);
883 EFSYS_ASSERT(blknump);
884 EFSYS_ASSERT(offsetp);
887 if ((edcp->edc_pio_alloc_size == 0) ||
888 (enp->en_arch.ef10.ena_piobuf_count == 0)) {
892 blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
894 for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
895 uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
900 EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
901 for (blk = 0; blk < blk_per_buf; blk++) {
902 if ((*map & (1u << blk)) == 0) {
912 *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
915 *sizep = edcp->edc_pio_alloc_size;
916 *offsetp = blk * (*sizep);
923 EFSYS_PROBE1(fail1, efx_rc_t, rc);
928 /* Free a piobuf sub-allocated block */
929 __checkReturn efx_rc_t
931 __inout efx_nic_t *enp,
932 __in uint32_t bufnum,
933 __in uint32_t blknum)
938 if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
939 (blknum >= (8 * sizeof (*map)))) {
944 map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
945 if ((*map & (1u << blknum)) == 0) {
949 *map &= ~(1u << blknum);
956 EFSYS_PROBE1(fail1, efx_rc_t, rc);
961 __checkReturn efx_rc_t
963 __inout efx_nic_t *enp,
964 __in uint32_t vi_index,
965 __in efx_piobuf_handle_t handle)
967 return (efx_mcdi_link_piobuf(enp, vi_index, handle));
970 __checkReturn efx_rc_t
972 __inout efx_nic_t *enp,
973 __in uint32_t vi_index)
975 return (efx_mcdi_unlink_piobuf(enp, vi_index));
978 static __checkReturn efx_rc_t
979 ef10_mcdi_get_pf_count(
981 __out uint32_t *pf_countp)
984 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PF_COUNT_IN_LEN,
985 MC_CMD_GET_PF_COUNT_OUT_LEN);
988 req.emr_cmd = MC_CMD_GET_PF_COUNT;
989 req.emr_in_buf = payload;
990 req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;
991 req.emr_out_buf = payload;
992 req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN;
994 efx_mcdi_execute(enp, &req);
996 if (req.emr_rc != 0) {
1001 if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) {
1006 *pf_countp = *MCDI_OUT(req, uint8_t,
1007 MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST);
1009 EFSYS_ASSERT(*pf_countp != 0);
1016 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1021 static __checkReturn efx_rc_t
1022 ef10_get_datapath_caps(
1023 __in efx_nic_t *enp)
1025 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1027 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN,
1028 MC_CMD_GET_CAPABILITIES_V5_OUT_LEN);
1031 if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
1035 req.emr_cmd = MC_CMD_GET_CAPABILITIES;
1036 req.emr_in_buf = payload;
1037 req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
1038 req.emr_out_buf = payload;
1039 req.emr_out_length = MC_CMD_GET_CAPABILITIES_V5_OUT_LEN;
1041 efx_mcdi_execute_quiet(enp, &req);
1043 if (req.emr_rc != 0) {
1048 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
1053 #define CAP_FLAGS1(_req, _flag) \
1054 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_OUT_FLAGS1) & \
1055 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN)))
1057 #define CAP_FLAGS2(_req, _flag) \
1058 (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) && \
1059 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) & \
1060 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))))
1063 * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
1064 * We only support the 14 byte prefix here.
1066 if (CAP_FLAGS1(req, RX_PREFIX_LEN_14) == 0) {
1070 encp->enc_rx_prefix_size = 14;
1072 /* Check if the firmware supports additional RSS modes */
1073 if (CAP_FLAGS1(req, ADDITIONAL_RSS_MODES))
1074 encp->enc_rx_scale_additional_modes_supported = B_TRUE;
1076 encp->enc_rx_scale_additional_modes_supported = B_FALSE;
1078 /* Check if the firmware supports TSO */
1079 if (CAP_FLAGS1(req, TX_TSO))
1080 encp->enc_fw_assisted_tso_enabled = B_TRUE;
1082 encp->enc_fw_assisted_tso_enabled = B_FALSE;
1084 /* Check if the firmware supports FATSOv2 */
1085 if (CAP_FLAGS2(req, TX_TSO_V2)) {
1086 encp->enc_fw_assisted_tso_v2_enabled = B_TRUE;
1087 encp->enc_fw_assisted_tso_v2_n_contexts = MCDI_OUT_WORD(req,
1088 GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS);
1090 encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
1091 encp->enc_fw_assisted_tso_v2_n_contexts = 0;
1094 /* Check if the firmware supports FATSOv2 encap */
1095 if (CAP_FLAGS2(req, TX_TSO_V2_ENCAP))
1096 encp->enc_fw_assisted_tso_v2_encap_enabled = B_TRUE;
1098 encp->enc_fw_assisted_tso_v2_encap_enabled = B_FALSE;
1100 /* Check if the firmware has vadapter/vport/vswitch support */
1101 if (CAP_FLAGS1(req, EVB))
1102 encp->enc_datapath_cap_evb = B_TRUE;
1104 encp->enc_datapath_cap_evb = B_FALSE;
1106 /* Check if the firmware supports VLAN insertion */
1107 if (CAP_FLAGS1(req, TX_VLAN_INSERTION))
1108 encp->enc_hw_tx_insert_vlan_enabled = B_TRUE;
1110 encp->enc_hw_tx_insert_vlan_enabled = B_FALSE;
1112 /* Check if the firmware supports RX event batching */
1113 if (CAP_FLAGS1(req, RX_BATCHING))
1114 encp->enc_rx_batching_enabled = B_TRUE;
1116 encp->enc_rx_batching_enabled = B_FALSE;
1119 * Even if batching isn't reported as supported, we may still get
1120 * batched events (see bug61153).
1122 encp->enc_rx_batch_max = 16;
1124 /* Check if the firmware supports disabling scatter on RXQs */
1125 if (CAP_FLAGS1(req, RX_DISABLE_SCATTER))
1126 encp->enc_rx_disable_scatter_supported = B_TRUE;
1128 encp->enc_rx_disable_scatter_supported = B_FALSE;
1130 /* Check if the firmware supports packed stream mode */
1131 if (CAP_FLAGS1(req, RX_PACKED_STREAM))
1132 encp->enc_rx_packed_stream_supported = B_TRUE;
1134 encp->enc_rx_packed_stream_supported = B_FALSE;
1137 * Check if the firmware supports configurable buffer sizes
1138 * for packed stream mode (otherwise buffer size is 1Mbyte)
1140 if (CAP_FLAGS1(req, RX_PACKED_STREAM_VAR_BUFFERS))
1141 encp->enc_rx_var_packed_stream_supported = B_TRUE;
1143 encp->enc_rx_var_packed_stream_supported = B_FALSE;
1145 /* Check if the firmware supports equal stride super-buffer mode */
1146 if (CAP_FLAGS2(req, EQUAL_STRIDE_SUPER_BUFFER))
1147 encp->enc_rx_es_super_buffer_supported = B_TRUE;
1149 encp->enc_rx_es_super_buffer_supported = B_FALSE;
1151 /* Check if the firmware supports FW subvariant w/o Tx checksumming */
1152 if (CAP_FLAGS2(req, FW_SUBVARIANT_NO_TX_CSUM))
1153 encp->enc_fw_subvariant_no_tx_csum_supported = B_TRUE;
1155 encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE;
1157 /* Check if the firmware supports set mac with running filters */
1158 if (CAP_FLAGS1(req, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED))
1159 encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
1161 encp->enc_allow_set_mac_with_installed_filters = B_FALSE;
1164 * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
1165 * specifying which parameters to configure.
1167 if (CAP_FLAGS1(req, SET_MAC_ENHANCED))
1168 encp->enc_enhanced_set_mac_supported = B_TRUE;
1170 encp->enc_enhanced_set_mac_supported = B_FALSE;
1173 * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows
1174 * us to let the firmware choose the settings to use on an EVQ.
1176 if (CAP_FLAGS2(req, INIT_EVQ_V2))
1177 encp->enc_init_evq_v2_supported = B_TRUE;
1179 encp->enc_init_evq_v2_supported = B_FALSE;
1182 * Check if firmware-verified NVRAM updates must be used.
1184 * The firmware trusted installer requires all NVRAM updates to use
1185 * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update)
1186 * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
1187 * partition and report the result).
1189 if (CAP_FLAGS2(req, NVRAM_UPDATE_REPORT_VERIFY_RESULT))
1190 encp->enc_nvram_update_verify_result_supported = B_TRUE;
1192 encp->enc_nvram_update_verify_result_supported = B_FALSE;
1195 * Check if firmware provides packet memory and Rx datapath
1198 if (CAP_FLAGS1(req, PM_AND_RXDP_COUNTERS))
1199 encp->enc_pm_and_rxdp_counters = B_TRUE;
1201 encp->enc_pm_and_rxdp_counters = B_FALSE;
1204 * Check if the 40G MAC hardware is capable of reporting
1205 * statistics for Tx size bins.
1207 if (CAP_FLAGS2(req, MAC_STATS_40G_TX_SIZE_BINS))
1208 encp->enc_mac_stats_40g_tx_size_bins = B_TRUE;
1210 encp->enc_mac_stats_40g_tx_size_bins = B_FALSE;
1213 * Check if firmware supports VXLAN and NVGRE tunnels.
1214 * The capability indicates Geneve protocol support as well.
1216 if (CAP_FLAGS1(req, VXLAN_NVGRE)) {
1217 encp->enc_tunnel_encapsulations_supported =
1218 (1u << EFX_TUNNEL_PROTOCOL_VXLAN) |
1219 (1u << EFX_TUNNEL_PROTOCOL_GENEVE) |
1220 (1u << EFX_TUNNEL_PROTOCOL_NVGRE);
1222 EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES ==
1223 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
1224 encp->enc_tunnel_config_udp_entries_max =
1225 EFX_TUNNEL_MAXNENTRIES;
1227 encp->enc_tunnel_config_udp_entries_max = 0;
1231 * Check if firmware reports the VI window mode.
1232 * Medford2 has a variable VI window size (8K, 16K or 64K).
1233 * Medford and Huntington have a fixed 8K VI window size.
1235 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
1237 MCDI_OUT_BYTE(req, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
1240 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
1241 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1243 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
1244 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_16K;
1246 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
1247 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_64K;
1250 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1253 } else if ((enp->en_family == EFX_FAMILY_HUNTINGTON) ||
1254 (enp->en_family == EFX_FAMILY_MEDFORD)) {
1255 /* Huntington and Medford have fixed 8K window size */
1256 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1258 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1261 /* Check if firmware supports extended MAC stats. */
1262 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
1263 /* Extended stats buffer supported */
1264 encp->enc_mac_stats_nstats = MCDI_OUT_WORD(req,
1265 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
1267 /* Use Siena-compatible legacy MAC stats */
1268 encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS;
1271 if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V2)
1272 encp->enc_fec_counters = B_TRUE;
1274 encp->enc_fec_counters = B_FALSE;
1276 /* Check if the firmware provides head-of-line blocking counters */
1277 if (CAP_FLAGS2(req, RXDP_HLB_IDLE))
1278 encp->enc_hlb_counters = B_TRUE;
1280 encp->enc_hlb_counters = B_FALSE;
1282 if (CAP_FLAGS1(req, RX_RSS_LIMITED)) {
1283 /* Only one exclusive RSS context is available per port. */
1284 encp->enc_rx_scale_max_exclusive_contexts = 1;
1286 switch (enp->en_family) {
1287 case EFX_FAMILY_MEDFORD2:
1288 encp->enc_rx_scale_hash_alg_mask =
1289 (1U << EFX_RX_HASHALG_TOEPLITZ);
1292 case EFX_FAMILY_MEDFORD:
1293 case EFX_FAMILY_HUNTINGTON:
1295 * Packed stream firmware variant maintains a
1296 * non-standard algorithm for hash computation.
1297 * It implies explicit XORing together
1298 * source + destination IP addresses (or last
1299 * four bytes in the case of IPv6) and using the
1300 * resulting value as the input to a Toeplitz hash.
1302 encp->enc_rx_scale_hash_alg_mask =
1303 (1U << EFX_RX_HASHALG_PACKED_STREAM);
1311 /* Port numbers cannot contribute to the hash value */
1312 encp->enc_rx_scale_l4_hash_supported = B_FALSE;
1315 * Maximum number of exclusive RSS contexts.
1316 * EF10 hardware supports 64 in total, but 6 are reserved
1317 * for shared contexts. They are a global resource so
1318 * not all may be available.
1320 encp->enc_rx_scale_max_exclusive_contexts = 64 - 6;
1322 encp->enc_rx_scale_hash_alg_mask =
1323 (1U << EFX_RX_HASHALG_TOEPLITZ);
1326 * It is possible to use port numbers as
1327 * the input data for hash computation.
1329 encp->enc_rx_scale_l4_hash_supported = B_TRUE;
1331 /* Check if the firmware supports "FLAG" and "MARK" filter actions */
1332 if (CAP_FLAGS2(req, FILTER_ACTION_FLAG))
1333 encp->enc_filter_action_flag_supported = B_TRUE;
1335 encp->enc_filter_action_flag_supported = B_FALSE;
1337 if (CAP_FLAGS2(req, FILTER_ACTION_MARK))
1338 encp->enc_filter_action_mark_supported = B_TRUE;
1340 encp->enc_filter_action_mark_supported = B_FALSE;
1342 /* Get maximum supported value for "MARK" filter action */
1343 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)
1344 encp->enc_filter_action_mark_max = MCDI_OUT_DWORD(req,
1345 GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX);
1347 encp->enc_filter_action_mark_max = 0;
1363 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1369 #define EF10_LEGACY_PF_PRIVILEGE_MASK \
1370 (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \
1371 MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \
1372 MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \
1373 MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \
1374 MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \
1375 MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \
1376 MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \
1377 MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \
1378 MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \
1379 MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \
1380 MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
1382 #define EF10_LEGACY_VF_PRIVILEGE_MASK 0
1385 __checkReturn efx_rc_t
1386 ef10_get_privilege_mask(
1387 __in efx_nic_t *enp,
1388 __out uint32_t *maskp)
1390 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1394 if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
1399 /* Fallback for old firmware without privilege mask support */
1400 if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1401 /* Assume PF has admin privilege */
1402 mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
1404 /* VF is always unprivileged by default */
1405 mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
1414 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1421 * Table of mapping schemes from port number to external number.
1423 * Each port number ultimately corresponds to a connector: either as part of
1424 * a cable assembly attached to a module inserted in an SFP+/QSFP+ cage on
1425 * the board, or fixed to the board (e.g. 10GBASE-T magjack on SFN5121T
1426 * "Salina"). In general:
1428 * Port number (0-based)
1430 * port mapping (n:1)
1433 * External port number (normally 1-based)
1435 * fixed (1:1) or cable assembly (1:m)
1440 * The external numbering refers to the cages or magjacks on the board,
1441 * as visibly annotated on the board or back panel. This table describes
1442 * how to determine which external cage/magjack corresponds to the port
1443 * numbers used by the driver.
1445 * The count of adjacent port numbers that map to each external number,
1446 * and the offset in the numbering, is determined by the chip family and
1447 * current port mode.
1449 * For the Huntington family, the current port mode cannot be discovered,
1450 * but a single mapping is used by all modes for a given chip variant,
1451 * so the mapping used is instead the last match in the table to the full
1452 * set of port modes to which the NIC can be configured. Therefore the
1453 * ordering of entries in the mapping table is significant.
1455 static struct ef10_external_port_map_s {
1456 efx_family_t family;
1457 uint32_t modes_mask;
1460 } __ef10_external_port_mappings[] = {
1462 * Modes used by Huntington family controllers where each port
1463 * number maps to a separate cage.
1464 * SFN7x22F (Torino):
1474 EFX_FAMILY_HUNTINGTON,
1475 (1U << TLV_PORT_MODE_10G) | /* mode 0 */
1476 (1U << TLV_PORT_MODE_10G_10G) | /* mode 2 */
1477 (1U << TLV_PORT_MODE_10G_10G_10G_10G), /* mode 4 */
1478 1, /* ports per cage */
1482 * Modes which for Huntington identify a chip variant where 2
1483 * adjacent port numbers map to each cage.
1491 EFX_FAMILY_HUNTINGTON,
1492 (1U << TLV_PORT_MODE_40G) | /* mode 1 */
1493 (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */
1494 (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */
1495 (1U << TLV_PORT_MODE_10G_10G_40G), /* mode 7 */
1496 2, /* ports per cage */
1500 * Modes that on Medford allocate each port number to a separate
1509 (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */
1510 (1U << TLV_PORT_MODE_1x1_1x1), /* mode 2 */
1511 1, /* ports per cage */
1515 * Modes that on Medford allocate 2 adjacent port numbers to each
1524 (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */
1525 (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */
1526 (1U << TLV_PORT_MODE_1x4_2x1) | /* mode 6 */
1527 (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */
1528 /* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */
1529 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), /* mode 9 */
1530 2, /* ports per cage */
1534 * Modes that on Medford allocate 4 adjacent port numbers to each
1535 * connector, starting on cage 1.
1543 (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 5 */
1544 /* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */
1545 (1U << TLV_PORT_MODE_4x1_NA), /* mode 4 */
1546 4, /* ports per cage */
1550 * Modes that on Medford allocate 4 adjacent port numbers to each
1551 * connector, starting on cage 2.
1559 (1U << TLV_PORT_MODE_NA_4x1), /* mode 8 */
1560 4, /* ports per cage */
1564 * Modes that on Medford2 allocate each port number to a separate
1572 EFX_FAMILY_MEDFORD2,
1573 (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */
1574 (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */
1575 (1U << TLV_PORT_MODE_1x1_1x1) | /* mode 2 */
1576 (1U << TLV_PORT_MODE_1x2_NA) | /* mode 10 */
1577 (1U << TLV_PORT_MODE_1x2_1x2) | /* mode 12 */
1578 (1U << TLV_PORT_MODE_1x4_1x2) | /* mode 15 */
1579 (1U << TLV_PORT_MODE_1x2_1x4), /* mode 16 */
1580 1, /* ports per cage */
1584 * FIXME: Some port modes are not representable in this mapping:
1585 * - TLV_PORT_MODE_1x2_2x1 (mode 17):
1591 * Modes that on Medford2 allocate 2 adjacent port numbers to each
1592 * cage, starting on cage 1.
1599 EFX_FAMILY_MEDFORD2,
1600 (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */
1601 (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 4 */
1602 (1U << TLV_PORT_MODE_1x4_2x1) | /* mode 6 */
1603 (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */
1604 (1U << TLV_PORT_MODE_2x2_NA) | /* mode 13 */
1605 (1U << TLV_PORT_MODE_2x1_1x2), /* mode 18 */
1606 2, /* ports per cage */
1610 * Modes that on Medford2 allocate 2 adjacent port numbers to each
1611 * cage, starting on cage 2.
1616 EFX_FAMILY_MEDFORD2,
1617 (1U << TLV_PORT_MODE_NA_2x2), /* mode 14 */
1618 2, /* ports per cage */
1622 * Modes that on Medford2 allocate 4 adjacent port numbers to each
1623 * connector, starting on cage 1.
1630 EFX_FAMILY_MEDFORD2,
1631 (1U << TLV_PORT_MODE_4x1_NA), /* mode 5 */
1632 4, /* ports per cage */
1636 * Modes that on Medford2 allocate 4 adjacent port numbers to each
1637 * connector, starting on cage 2.
1644 EFX_FAMILY_MEDFORD2,
1645 (1U << TLV_PORT_MODE_NA_4x1) | /* mode 8 */
1646 (1U << TLV_PORT_MODE_NA_1x2), /* mode 11 */
1647 4, /* ports per cage */
1652 static __checkReturn efx_rc_t
1653 ef10_external_port_mapping(
1654 __in efx_nic_t *enp,
1656 __out uint8_t *external_portp)
1660 uint32_t port_modes;
1663 int32_t count = 1; /* Default 1-1 mapping */
1664 int32_t offset = 1; /* Default starting external port number */
1666 if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, ¤t,
1669 * No current port mode information (i.e. Huntington)
1670 * - infer mapping from available modes
1672 if ((rc = efx_mcdi_get_port_modes(enp,
1673 &port_modes, NULL, NULL)) != 0) {
1675 * No port mode information available
1676 * - use default mapping
1681 /* Only need to scan the current mode */
1682 port_modes = 1 << current;
1686 * Infer the internal port -> external number mapping from
1687 * the possible port modes for this NIC.
1689 for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
1690 struct ef10_external_port_map_s *eepmp =
1691 &__ef10_external_port_mappings[i];
1692 if (eepmp->family != enp->en_family)
1694 matches = (eepmp->modes_mask & port_modes);
1697 * Some modes match. For some Huntington boards
1698 * there will be multiple matches. The mapping on the
1699 * last match is used.
1701 count = eepmp->count;
1702 offset = eepmp->offset;
1703 port_modes &= ~matches;
1707 if (port_modes != 0) {
1708 /* Some advertised modes are not supported */
1715 * Scale as required by last matched mode and then convert to
1716 * correctly offset numbering
1718 *external_portp = (uint8_t)((port / count) + offset);
1722 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1727 static __checkReturn efx_rc_t
1729 __in efx_nic_t *enp)
1731 const efx_nic_ops_t *enop = enp->en_enop;
1732 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
1733 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1734 ef10_link_state_t els;
1735 efx_port_t *epp = &(enp->en_port);
1736 uint32_t board_type = 0;
1737 uint32_t base, nvec;
1742 uint8_t mac_addr[6] = { 0 };
1745 /* Get the (zero-based) MCDI port number */
1746 if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
1749 /* EFX MCDI interface uses one-based port numbers */
1750 emip->emi_port = port + 1;
1752 if ((rc = ef10_external_port_mapping(enp, port,
1753 &encp->enc_external_port)) != 0)
1757 * Get PCIe function number from firmware (used for
1758 * per-function privilege and dynamic config info).
1759 * - PCIe PF: pf = PF number, vf = 0xffff.
1760 * - PCIe VF: pf = parent PF, vf = VF number.
1762 if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
1768 /* MAC address for this function */
1769 if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1770 rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
1771 #if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
1773 * Disable static config checking, ONLY for manufacturing test
1774 * and setup at the factory, to allow the static config to be
1777 #else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
1778 if ((rc == 0) && (mac_addr[0] & 0x02)) {
1780 * If the static config does not include a global MAC
1781 * address pool then the board may return a locally
1782 * administered MAC address (this should only happen on
1783 * incorrectly programmed boards).
1787 #endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
1789 rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
1794 EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
1796 /* Board configuration (legacy) */
1797 rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
1799 /* Unprivileged functions may not be able to read board cfg */
1806 encp->enc_board_type = board_type;
1807 encp->enc_clk_mult = 1; /* not used for EF10 */
1809 /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
1810 if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
1814 * Firmware with support for *_FEC capability bits does not
1815 * report that the corresponding *_FEC_REQUESTED bits are supported.
1816 * Add them here so that drivers understand that they are supported.
1818 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_BASER_FEC))
1819 epp->ep_phy_cap_mask |=
1820 (1u << EFX_PHY_CAP_BASER_FEC_REQUESTED);
1821 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_RS_FEC))
1822 epp->ep_phy_cap_mask |=
1823 (1u << EFX_PHY_CAP_RS_FEC_REQUESTED);
1824 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_25G_BASER_FEC))
1825 epp->ep_phy_cap_mask |=
1826 (1u << EFX_PHY_CAP_25G_BASER_FEC_REQUESTED);
1828 /* Obtain the default PHY advertised capabilities */
1829 if ((rc = ef10_phy_get_link(enp, &els)) != 0)
1831 epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
1832 epp->ep_adv_cap_mask = els.els_adv_cap_mask;
1834 /* Check capabilities of running datapath firmware */
1835 if ((rc = ef10_get_datapath_caps(enp)) != 0)
1838 /* Alignment for WPTR updates */
1839 encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
1841 encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
1842 /* No boundary crossing limits */
1843 encp->enc_tx_dma_desc_boundary = 0;
1846 * Maximum number of bytes into the frame the TCP header can start for
1847 * firmware assisted TSO to work.
1849 encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
1852 * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
1853 * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
1854 * resources (allocated to this PCIe function), which is zero until
1855 * after we have allocated VIs.
1857 encp->enc_evq_limit = 1024;
1858 encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
1859 encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
1861 encp->enc_buftbl_limit = 0xFFFFFFFF;
1863 /* Get interrupt vector limits */
1864 if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
1865 if (EFX_PCI_FUNCTION_IS_PF(encp))
1868 /* Ignore error (cannot query vector limits from a VF). */
1872 encp->enc_intr_vec_base = base;
1873 encp->enc_intr_limit = nvec;
1876 * Get the current privilege mask. Note that this may be modified
1877 * dynamically, so this value is informational only. DO NOT use
1878 * the privilege mask to check for sufficient privileges, as that
1879 * can result in time-of-check/time-of-use bugs.
1881 if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
1883 encp->enc_privilege_mask = mask;
1885 /* Get remaining controller-specific board config */
1886 if ((rc = enop->eno_board_cfg(enp)) != 0)
1893 EFSYS_PROBE(fail11);
1895 EFSYS_PROBE(fail10);
1913 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1918 __checkReturn efx_rc_t
1920 __in efx_nic_t *enp)
1922 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1923 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
1926 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
1927 enp->en_family == EFX_FAMILY_MEDFORD ||
1928 enp->en_family == EFX_FAMILY_MEDFORD2);
1930 /* Read and clear any assertion state */
1931 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
1934 /* Exit the assertion handler */
1935 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
1939 if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
1942 if ((rc = ef10_nic_board_cfg(enp)) != 0)
1946 * Set default driver config limits (based on board config).
1948 * FIXME: For now allocate a fixed number of VIs which is likely to be
1949 * sufficient and small enough to allow multiple functions on the same
1952 edcp->edc_min_vi_count = edcp->edc_max_vi_count =
1953 MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
1955 /* The client driver must configure and enable PIO buffer support */
1956 edcp->edc_max_piobuf_count = 0;
1957 edcp->edc_pio_alloc_size = 0;
1959 #if EFSYS_OPT_MAC_STATS
1960 /* Wipe the MAC statistics */
1961 if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
1965 #if EFSYS_OPT_LOOPBACK
1966 if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
1970 #if EFSYS_OPT_MON_STATS
1971 if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
1972 /* Unprivileged functions do not have access to sensors */
1978 encp->enc_features = enp->en_features;
1982 #if EFSYS_OPT_MON_STATS
1986 #if EFSYS_OPT_LOOPBACK
1990 #if EFSYS_OPT_MAC_STATS
2001 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2006 __checkReturn efx_rc_t
2007 ef10_nic_set_drv_limits(
2008 __inout efx_nic_t *enp,
2009 __in efx_drv_limits_t *edlp)
2011 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2012 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2013 uint32_t min_evq_count, max_evq_count;
2014 uint32_t min_rxq_count, max_rxq_count;
2015 uint32_t min_txq_count, max_txq_count;
2023 /* Get minimum required and maximum usable VI limits */
2024 min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
2025 min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
2026 min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
2028 edcp->edc_min_vi_count =
2029 MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
2031 max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
2032 max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
2033 max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
2035 edcp->edc_max_vi_count =
2036 MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
2039 * Check limits for sub-allocated piobuf blocks.
2040 * PIO is optional, so don't fail if the limits are incorrect.
2042 if ((encp->enc_piobuf_size == 0) ||
2043 (encp->enc_piobuf_limit == 0) ||
2044 (edlp->edl_min_pio_alloc_size == 0) ||
2045 (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
2047 edcp->edc_max_piobuf_count = 0;
2048 edcp->edc_pio_alloc_size = 0;
2050 uint32_t blk_size, blk_count, blks_per_piobuf;
2053 MAX(edlp->edl_min_pio_alloc_size,
2054 encp->enc_piobuf_min_alloc_size);
2056 blks_per_piobuf = encp->enc_piobuf_size / blk_size;
2057 EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
2059 blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
2061 /* A zero max pio alloc count means unlimited */
2062 if ((edlp->edl_max_pio_alloc_count > 0) &&
2063 (edlp->edl_max_pio_alloc_count < blk_count)) {
2064 blk_count = edlp->edl_max_pio_alloc_count;
2067 edcp->edc_pio_alloc_size = blk_size;
2068 edcp->edc_max_piobuf_count =
2069 (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
2075 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2081 __checkReturn efx_rc_t
2083 __in efx_nic_t *enp)
2086 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ENTITY_RESET_IN_LEN,
2087 MC_CMD_ENTITY_RESET_OUT_LEN);
2090 /* ef10_nic_reset() is called to recover from BADASSERT failures. */
2091 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
2093 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
2096 req.emr_cmd = MC_CMD_ENTITY_RESET;
2097 req.emr_in_buf = payload;
2098 req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
2099 req.emr_out_buf = payload;
2100 req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
2102 MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
2103 ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
2105 efx_mcdi_execute(enp, &req);
2107 if (req.emr_rc != 0) {
2112 /* Clear RX/TX DMA queue errors */
2113 enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
2122 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2127 __checkReturn efx_rc_t
2129 __in efx_nic_t *enp)
2131 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2132 uint32_t min_vi_count, max_vi_count;
2133 uint32_t vi_count, vi_base, vi_shift;
2137 uint32_t vi_window_size;
2140 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
2141 enp->en_family == EFX_FAMILY_MEDFORD ||
2142 enp->en_family == EFX_FAMILY_MEDFORD2);
2144 /* Enable reporting of some events (e.g. link change) */
2145 if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
2148 /* Allocate (optional) on-chip PIO buffers */
2149 ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
2152 * For best performance, PIO writes should use a write-combined
2153 * (WC) memory mapping. Using a separate WC mapping for the PIO
2154 * aperture of each VI would be a burden to drivers (and not
2155 * possible if the host page size is >4Kbyte).
2157 * To avoid this we use a single uncached (UC) mapping for VI
2158 * register access, and a single WC mapping for extra VIs used
2161 * Each piobuf must be linked to a VI in the WC mapping, and to
2162 * each VI that is using a sub-allocated block from the piobuf.
2164 min_vi_count = edcp->edc_min_vi_count;
2166 edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
2168 /* Ensure that the previously attached driver's VIs are freed */
2169 if ((rc = efx_mcdi_free_vis(enp)) != 0)
2173 * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
2174 * fails then retrying the request for fewer VI resources may succeed.
2177 if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
2178 &vi_base, &vi_count, &vi_shift)) != 0)
2181 EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
2183 if (vi_count < min_vi_count) {
2188 enp->en_arch.ef10.ena_vi_base = vi_base;
2189 enp->en_arch.ef10.ena_vi_count = vi_count;
2190 enp->en_arch.ef10.ena_vi_shift = vi_shift;
2192 if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
2193 /* Not enough extra VIs to map piobufs */
2194 ef10_nic_free_piobufs(enp);
2197 enp->en_arch.ef10.ena_pio_write_vi_base =
2198 vi_count - enp->en_arch.ef10.ena_piobuf_count;
2200 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=,
2201 EFX_VI_WINDOW_SHIFT_INVALID);
2202 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=,
2203 EFX_VI_WINDOW_SHIFT_64K);
2204 vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift;
2206 /* Save UC memory mapping details */
2207 enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
2208 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2209 enp->en_arch.ef10.ena_uc_mem_map_size =
2211 enp->en_arch.ef10.ena_pio_write_vi_base);
2213 enp->en_arch.ef10.ena_uc_mem_map_size =
2215 enp->en_arch.ef10.ena_vi_count);
2218 /* Save WC memory mapping details */
2219 enp->en_arch.ef10.ena_wc_mem_map_offset =
2220 enp->en_arch.ef10.ena_uc_mem_map_offset +
2221 enp->en_arch.ef10.ena_uc_mem_map_size;
2223 enp->en_arch.ef10.ena_wc_mem_map_size =
2225 enp->en_arch.ef10.ena_piobuf_count);
2227 /* Link piobufs to extra VIs in WC mapping */
2228 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2229 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2230 rc = efx_mcdi_link_piobuf(enp,
2231 enp->en_arch.ef10.ena_pio_write_vi_base + i,
2232 enp->en_arch.ef10.ena_piobuf_handle[i]);
2239 * Allocate a vAdaptor attached to our upstream vPort/pPort.
2241 * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
2242 * driver has yet to bring up the EVB port. See bug 56147. In this case,
2243 * retry the request several times after waiting a while. The wait time
2244 * between retries starts small (10ms) and exponentially increases.
2245 * Total wait time is a little over two seconds. Retry logic in the
2246 * client driver may mean this whole loop is repeated if it continues to
2251 while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
2252 if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
2255 * Do not retry alloc for PF, or for other errors on
2261 /* VF startup before PF is ready. Retry allocation. */
2263 /* Too many attempts */
2267 EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
2268 EFSYS_SLEEP(delay_us);
2270 if (delay_us < 500000)
2274 enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
2275 enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
2290 ef10_nic_free_piobufs(enp);
2293 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2298 __checkReturn efx_rc_t
2299 ef10_nic_get_vi_pool(
2300 __in efx_nic_t *enp,
2301 __out uint32_t *vi_countp)
2303 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
2304 enp->en_family == EFX_FAMILY_MEDFORD ||
2305 enp->en_family == EFX_FAMILY_MEDFORD2);
2308 * Report VIs that the client driver can use.
2309 * Do not include VIs used for PIO buffer writes.
2311 *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
2316 __checkReturn efx_rc_t
2317 ef10_nic_get_bar_region(
2318 __in efx_nic_t *enp,
2319 __in efx_nic_region_t region,
2320 __out uint32_t *offsetp,
2321 __out size_t *sizep)
2325 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
2326 enp->en_family == EFX_FAMILY_MEDFORD ||
2327 enp->en_family == EFX_FAMILY_MEDFORD2);
2330 * TODO: Specify host memory mapping alignment and granularity
2331 * in efx_drv_limits_t so that they can be taken into account
2332 * when allocating extra VIs for PIO writes.
2336 /* UC mapped memory BAR region for VI registers */
2337 *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
2338 *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
2341 case EFX_REGION_PIO_WRITE_VI:
2342 /* WC mapped memory BAR region for piobuf writes */
2343 *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
2344 *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
2355 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2360 __checkReturn boolean_t
2361 ef10_nic_hw_unavailable(
2362 __in efx_nic_t *enp)
2366 if (enp->en_reset_flags & EFX_RESET_HW_UNAVAIL)
2369 EFX_BAR_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, &dword, B_FALSE);
2370 if (EFX_DWORD_FIELD(dword, EFX_DWORD_0) == 0xffffffff)
2376 ef10_nic_set_hw_unavailable(enp);
2382 ef10_nic_set_hw_unavailable(
2383 __in efx_nic_t *enp)
2385 EFSYS_PROBE(hw_unavail);
2386 enp->en_reset_flags |= EFX_RESET_HW_UNAVAIL;
2392 __in efx_nic_t *enp)
2397 (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
2398 enp->en_vport_id = 0;
2400 /* Unlink piobufs from extra VIs in WC mapping */
2401 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2402 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2403 rc = efx_mcdi_unlink_piobuf(enp,
2404 enp->en_arch.ef10.ena_pio_write_vi_base + i);
2410 ef10_nic_free_piobufs(enp);
2412 (void) efx_mcdi_free_vis(enp);
2413 enp->en_arch.ef10.ena_vi_count = 0;
2418 __in efx_nic_t *enp)
2420 #if EFSYS_OPT_MON_STATS
2421 mcdi_mon_cfg_free(enp);
2422 #endif /* EFSYS_OPT_MON_STATS */
2423 (void) efx_mcdi_drv_attach(enp, B_FALSE);
2428 __checkReturn efx_rc_t
2429 ef10_nic_register_test(
2430 __in efx_nic_t *enp)
2435 _NOTE(ARGUNUSED(enp))
2436 _NOTE(CONSTANTCONDITION)
2446 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2451 #endif /* EFSYS_OPT_DIAG */
2453 #if EFSYS_OPT_FW_SUBVARIANT_AWARE
2455 __checkReturn efx_rc_t
2456 efx_mcdi_get_nic_global(
2457 __in efx_nic_t *enp,
2459 __out uint32_t *valuep)
2462 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_NIC_GLOBAL_IN_LEN,
2463 MC_CMD_GET_NIC_GLOBAL_OUT_LEN);
2466 req.emr_cmd = MC_CMD_GET_NIC_GLOBAL;
2467 req.emr_in_buf = payload;
2468 req.emr_in_length = MC_CMD_GET_NIC_GLOBAL_IN_LEN;
2469 req.emr_out_buf = payload;
2470 req.emr_out_length = MC_CMD_GET_NIC_GLOBAL_OUT_LEN;
2472 MCDI_IN_SET_DWORD(req, GET_NIC_GLOBAL_IN_KEY, key);
2474 efx_mcdi_execute(enp, &req);
2476 if (req.emr_rc != 0) {
2481 if (req.emr_out_length_used != MC_CMD_GET_NIC_GLOBAL_OUT_LEN) {
2486 *valuep = MCDI_OUT_DWORD(req, GET_NIC_GLOBAL_OUT_VALUE);
2493 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2498 __checkReturn efx_rc_t
2499 efx_mcdi_set_nic_global(
2500 __in efx_nic_t *enp,
2502 __in uint32_t value)
2505 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_NIC_GLOBAL_IN_LEN, 0);
2508 req.emr_cmd = MC_CMD_SET_NIC_GLOBAL;
2509 req.emr_in_buf = payload;
2510 req.emr_in_length = MC_CMD_SET_NIC_GLOBAL_IN_LEN;
2511 req.emr_out_buf = NULL;
2512 req.emr_out_length = 0;
2514 MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_KEY, key);
2515 MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_VALUE, value);
2517 efx_mcdi_execute(enp, &req);
2519 if (req.emr_rc != 0) {
2527 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2532 #endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
2534 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */