1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2012-2018 Solarflare Communications Inc.
13 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
15 #include "ef10_tlv_layout.h"
17 __checkReturn efx_rc_t
18 efx_mcdi_get_port_assignment(
20 __out uint32_t *portp)
23 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
24 MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
27 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
28 enp->en_family == EFX_FAMILY_MEDFORD ||
29 enp->en_family == EFX_FAMILY_MEDFORD2);
31 req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
32 req.emr_in_buf = payload;
33 req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
34 req.emr_out_buf = payload;
35 req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
37 efx_mcdi_execute(enp, &req);
39 if (req.emr_rc != 0) {
44 if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
49 *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
56 EFSYS_PROBE1(fail1, efx_rc_t, rc);
61 __checkReturn efx_rc_t
62 efx_mcdi_get_port_modes(
64 __out uint32_t *modesp,
65 __out_opt uint32_t *current_modep,
66 __out_opt uint32_t *default_modep)
69 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_MODES_IN_LEN,
70 MC_CMD_GET_PORT_MODES_OUT_LEN);
73 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
74 enp->en_family == EFX_FAMILY_MEDFORD ||
75 enp->en_family == EFX_FAMILY_MEDFORD2);
77 req.emr_cmd = MC_CMD_GET_PORT_MODES;
78 req.emr_in_buf = payload;
79 req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
80 req.emr_out_buf = payload;
81 req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
83 efx_mcdi_execute(enp, &req);
85 if (req.emr_rc != 0) {
91 * Require only Modes and DefaultMode fields, unless the current mode
92 * was requested (CurrentMode field was added for Medford).
94 if (req.emr_out_length_used <
95 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
99 if ((current_modep != NULL) && (req.emr_out_length_used <
100 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) {
105 *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
107 if (current_modep != NULL) {
108 *current_modep = MCDI_OUT_DWORD(req,
109 GET_PORT_MODES_OUT_CURRENT_MODE);
112 if (default_modep != NULL) {
113 *default_modep = MCDI_OUT_DWORD(req,
114 GET_PORT_MODES_OUT_DEFAULT_MODE);
124 EFSYS_PROBE1(fail1, efx_rc_t, rc);
129 __checkReturn efx_rc_t
130 ef10_nic_get_port_mode_bandwidth(
131 __in uint32_t port_mode,
132 __out uint32_t *bandwidth_mbpsp)
138 case TLV_PORT_MODE_10G:
141 case TLV_PORT_MODE_10G_10G:
142 bandwidth = 10000 * 2;
144 case TLV_PORT_MODE_10G_10G_10G_10G:
145 case TLV_PORT_MODE_10G_10G_10G_10G_Q:
146 case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2:
147 case TLV_PORT_MODE_10G_10G_10G_10G_Q2:
148 bandwidth = 10000 * 4;
150 case TLV_PORT_MODE_40G:
153 case TLV_PORT_MODE_40G_40G:
154 bandwidth = 40000 * 2;
156 case TLV_PORT_MODE_40G_10G_10G:
157 case TLV_PORT_MODE_10G_10G_40G:
158 bandwidth = 40000 + (10000 * 2);
165 *bandwidth_mbpsp = bandwidth;
170 EFSYS_PROBE1(fail1, efx_rc_t, rc);
175 static __checkReturn efx_rc_t
176 efx_mcdi_vadaptor_alloc(
178 __in uint32_t port_id)
181 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_ALLOC_IN_LEN,
182 MC_CMD_VADAPTOR_ALLOC_OUT_LEN);
185 EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
187 req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
188 req.emr_in_buf = payload;
189 req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
190 req.emr_out_buf = payload;
191 req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
193 MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
194 MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
195 VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
196 enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
198 efx_mcdi_execute(enp, &req);
200 if (req.emr_rc != 0) {
208 EFSYS_PROBE1(fail1, efx_rc_t, rc);
213 static __checkReturn efx_rc_t
214 efx_mcdi_vadaptor_free(
216 __in uint32_t port_id)
219 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_FREE_IN_LEN,
220 MC_CMD_VADAPTOR_FREE_OUT_LEN);
223 req.emr_cmd = MC_CMD_VADAPTOR_FREE;
224 req.emr_in_buf = payload;
225 req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
226 req.emr_out_buf = payload;
227 req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
229 MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
231 efx_mcdi_execute(enp, &req);
233 if (req.emr_rc != 0) {
241 EFSYS_PROBE1(fail1, efx_rc_t, rc);
246 __checkReturn efx_rc_t
247 efx_mcdi_get_mac_address_pf(
249 __out_ecount_opt(6) uint8_t mac_addrp[6])
252 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
253 MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
256 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
257 enp->en_family == EFX_FAMILY_MEDFORD ||
258 enp->en_family == EFX_FAMILY_MEDFORD2);
260 req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
261 req.emr_in_buf = payload;
262 req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
263 req.emr_out_buf = payload;
264 req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
266 efx_mcdi_execute(enp, &req);
268 if (req.emr_rc != 0) {
273 if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
278 if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
283 if (mac_addrp != NULL) {
286 addrp = MCDI_OUT2(req, uint8_t,
287 GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
289 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
299 EFSYS_PROBE1(fail1, efx_rc_t, rc);
304 __checkReturn efx_rc_t
305 efx_mcdi_get_mac_address_vf(
307 __out_ecount_opt(6) uint8_t mac_addrp[6])
310 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
311 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
314 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
315 enp->en_family == EFX_FAMILY_MEDFORD ||
316 enp->en_family == EFX_FAMILY_MEDFORD2);
318 req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
319 req.emr_in_buf = payload;
320 req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
321 req.emr_out_buf = payload;
322 req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
324 MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
325 EVB_PORT_ID_ASSIGNED);
327 efx_mcdi_execute(enp, &req);
329 if (req.emr_rc != 0) {
334 if (req.emr_out_length_used <
335 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
340 if (MCDI_OUT_DWORD(req,
341 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
346 if (mac_addrp != NULL) {
349 addrp = MCDI_OUT2(req, uint8_t,
350 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
352 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
362 EFSYS_PROBE1(fail1, efx_rc_t, rc);
367 __checkReturn efx_rc_t
370 __out uint32_t *sys_freqp,
371 __out uint32_t *dpcpu_freqp)
374 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CLOCK_IN_LEN,
375 MC_CMD_GET_CLOCK_OUT_LEN);
378 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
379 enp->en_family == EFX_FAMILY_MEDFORD ||
380 enp->en_family == EFX_FAMILY_MEDFORD2);
382 req.emr_cmd = MC_CMD_GET_CLOCK;
383 req.emr_in_buf = payload;
384 req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
385 req.emr_out_buf = payload;
386 req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
388 efx_mcdi_execute(enp, &req);
390 if (req.emr_rc != 0) {
395 if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
400 *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
401 if (*sys_freqp == 0) {
405 *dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ);
406 if (*dpcpu_freqp == 0) {
420 EFSYS_PROBE1(fail1, efx_rc_t, rc);
425 __checkReturn efx_rc_t
426 efx_mcdi_get_rxdp_config(
428 __out uint32_t *end_paddingp)
431 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RXDP_CONFIG_IN_LEN,
432 MC_CMD_GET_RXDP_CONFIG_OUT_LEN);
433 uint32_t end_padding;
436 req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
437 req.emr_in_buf = payload;
438 req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
439 req.emr_out_buf = payload;
440 req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
442 efx_mcdi_execute(enp, &req);
443 if (req.emr_rc != 0) {
448 if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
449 GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
450 /* RX DMA end padding is disabled */
453 switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
454 GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
455 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
458 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
461 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
470 *end_paddingp = end_padding;
477 EFSYS_PROBE1(fail1, efx_rc_t, rc);
482 __checkReturn efx_rc_t
483 efx_mcdi_get_vector_cfg(
485 __out_opt uint32_t *vec_basep,
486 __out_opt uint32_t *pf_nvecp,
487 __out_opt uint32_t *vf_nvecp)
490 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_VECTOR_CFG_IN_LEN,
491 MC_CMD_GET_VECTOR_CFG_OUT_LEN);
494 req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
495 req.emr_in_buf = payload;
496 req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
497 req.emr_out_buf = payload;
498 req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
500 efx_mcdi_execute(enp, &req);
502 if (req.emr_rc != 0) {
507 if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
512 if (vec_basep != NULL)
513 *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
514 if (pf_nvecp != NULL)
515 *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
516 if (vf_nvecp != NULL)
517 *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
524 EFSYS_PROBE1(fail1, efx_rc_t, rc);
529 static __checkReturn efx_rc_t
532 __in uint32_t min_vi_count,
533 __in uint32_t max_vi_count,
534 __out uint32_t *vi_basep,
535 __out uint32_t *vi_countp,
536 __out uint32_t *vi_shiftp)
539 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_VIS_IN_LEN,
540 MC_CMD_ALLOC_VIS_EXT_OUT_LEN);
543 if (vi_countp == NULL) {
548 req.emr_cmd = MC_CMD_ALLOC_VIS;
549 req.emr_in_buf = payload;
550 req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
551 req.emr_out_buf = payload;
552 req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN;
554 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
555 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
557 efx_mcdi_execute(enp, &req);
559 if (req.emr_rc != 0) {
564 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
569 *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
570 *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
572 /* Report VI_SHIFT if available (always zero for Huntington) */
573 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
576 *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
585 EFSYS_PROBE1(fail1, efx_rc_t, rc);
591 static __checkReturn efx_rc_t
598 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
599 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
601 req.emr_cmd = MC_CMD_FREE_VIS;
602 req.emr_in_buf = NULL;
603 req.emr_in_length = 0;
604 req.emr_out_buf = NULL;
605 req.emr_out_length = 0;
607 efx_mcdi_execute_quiet(enp, &req);
609 /* Ignore ELREADY (no allocated VIs, so nothing to free) */
610 if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
618 EFSYS_PROBE1(fail1, efx_rc_t, rc);
624 static __checkReturn efx_rc_t
625 efx_mcdi_alloc_piobuf(
627 __out efx_piobuf_handle_t *handlep)
630 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_PIOBUF_IN_LEN,
631 MC_CMD_ALLOC_PIOBUF_OUT_LEN);
634 if (handlep == NULL) {
639 req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
640 req.emr_in_buf = payload;
641 req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
642 req.emr_out_buf = payload;
643 req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
645 efx_mcdi_execute_quiet(enp, &req);
647 if (req.emr_rc != 0) {
652 if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
657 *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
666 EFSYS_PROBE1(fail1, efx_rc_t, rc);
671 static __checkReturn efx_rc_t
672 efx_mcdi_free_piobuf(
674 __in efx_piobuf_handle_t handle)
677 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FREE_PIOBUF_IN_LEN,
678 MC_CMD_FREE_PIOBUF_OUT_LEN);
681 req.emr_cmd = MC_CMD_FREE_PIOBUF;
682 req.emr_in_buf = payload;
683 req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
684 req.emr_out_buf = payload;
685 req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
687 MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
689 efx_mcdi_execute_quiet(enp, &req);
691 if (req.emr_rc != 0) {
699 EFSYS_PROBE1(fail1, efx_rc_t, rc);
704 static __checkReturn efx_rc_t
705 efx_mcdi_link_piobuf(
707 __in uint32_t vi_index,
708 __in efx_piobuf_handle_t handle)
711 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LINK_PIOBUF_IN_LEN,
712 MC_CMD_LINK_PIOBUF_OUT_LEN);
715 req.emr_cmd = MC_CMD_LINK_PIOBUF;
716 req.emr_in_buf = payload;
717 req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
718 req.emr_out_buf = payload;
719 req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
721 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
722 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
724 efx_mcdi_execute(enp, &req);
726 if (req.emr_rc != 0) {
734 EFSYS_PROBE1(fail1, efx_rc_t, rc);
739 static __checkReturn efx_rc_t
740 efx_mcdi_unlink_piobuf(
742 __in uint32_t vi_index)
745 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_UNLINK_PIOBUF_IN_LEN,
746 MC_CMD_UNLINK_PIOBUF_OUT_LEN);
749 req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
750 req.emr_in_buf = payload;
751 req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
752 req.emr_out_buf = payload;
753 req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
755 MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
757 efx_mcdi_execute_quiet(enp, &req);
759 if (req.emr_rc != 0) {
767 EFSYS_PROBE1(fail1, efx_rc_t, rc);
773 ef10_nic_alloc_piobufs(
775 __in uint32_t max_piobuf_count)
777 efx_piobuf_handle_t *handlep;
780 EFSYS_ASSERT3U(max_piobuf_count, <=,
781 EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
783 enp->en_arch.ef10.ena_piobuf_count = 0;
785 for (i = 0; i < max_piobuf_count; i++) {
786 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
788 if (efx_mcdi_alloc_piobuf(enp, handlep) != 0)
791 enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
792 enp->en_arch.ef10.ena_piobuf_count++;
798 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
799 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
801 (void) efx_mcdi_free_piobuf(enp, *handlep);
802 *handlep = EFX_PIOBUF_HANDLE_INVALID;
804 enp->en_arch.ef10.ena_piobuf_count = 0;
809 ef10_nic_free_piobufs(
812 efx_piobuf_handle_t *handlep;
815 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
816 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
818 (void) efx_mcdi_free_piobuf(enp, *handlep);
819 *handlep = EFX_PIOBUF_HANDLE_INVALID;
821 enp->en_arch.ef10.ena_piobuf_count = 0;
824 /* Sub-allocate a block from a piobuf */
825 __checkReturn efx_rc_t
827 __inout efx_nic_t *enp,
828 __out uint32_t *bufnump,
829 __out efx_piobuf_handle_t *handlep,
830 __out uint32_t *blknump,
831 __out uint32_t *offsetp,
834 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
835 efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
836 uint32_t blk_per_buf;
840 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
841 enp->en_family == EFX_FAMILY_MEDFORD ||
842 enp->en_family == EFX_FAMILY_MEDFORD2);
843 EFSYS_ASSERT(bufnump);
844 EFSYS_ASSERT(handlep);
845 EFSYS_ASSERT(blknump);
846 EFSYS_ASSERT(offsetp);
849 if ((edcp->edc_pio_alloc_size == 0) ||
850 (enp->en_arch.ef10.ena_piobuf_count == 0)) {
854 blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
856 for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
857 uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
862 EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
863 for (blk = 0; blk < blk_per_buf; blk++) {
864 if ((*map & (1u << blk)) == 0) {
874 *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
877 *sizep = edcp->edc_pio_alloc_size;
878 *offsetp = blk * (*sizep);
885 EFSYS_PROBE1(fail1, efx_rc_t, rc);
890 /* Free a piobuf sub-allocated block */
891 __checkReturn efx_rc_t
893 __inout efx_nic_t *enp,
894 __in uint32_t bufnum,
895 __in uint32_t blknum)
900 if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
901 (blknum >= (8 * sizeof (*map)))) {
906 map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
907 if ((*map & (1u << blknum)) == 0) {
911 *map &= ~(1u << blknum);
918 EFSYS_PROBE1(fail1, efx_rc_t, rc);
923 __checkReturn efx_rc_t
925 __inout efx_nic_t *enp,
926 __in uint32_t vi_index,
927 __in efx_piobuf_handle_t handle)
929 return (efx_mcdi_link_piobuf(enp, vi_index, handle));
932 __checkReturn efx_rc_t
934 __inout efx_nic_t *enp,
935 __in uint32_t vi_index)
937 return (efx_mcdi_unlink_piobuf(enp, vi_index));
940 static __checkReturn efx_rc_t
941 ef10_mcdi_get_pf_count(
943 __out uint32_t *pf_countp)
946 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PF_COUNT_IN_LEN,
947 MC_CMD_GET_PF_COUNT_OUT_LEN);
950 req.emr_cmd = MC_CMD_GET_PF_COUNT;
951 req.emr_in_buf = payload;
952 req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;
953 req.emr_out_buf = payload;
954 req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN;
956 efx_mcdi_execute(enp, &req);
958 if (req.emr_rc != 0) {
963 if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) {
968 *pf_countp = *MCDI_OUT(req, uint8_t,
969 MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST);
971 EFSYS_ASSERT(*pf_countp != 0);
978 EFSYS_PROBE1(fail1, efx_rc_t, rc);
983 static __checkReturn efx_rc_t
984 ef10_get_datapath_caps(
987 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
989 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN,
990 MC_CMD_GET_CAPABILITIES_V5_OUT_LEN);
993 if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
997 req.emr_cmd = MC_CMD_GET_CAPABILITIES;
998 req.emr_in_buf = payload;
999 req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
1000 req.emr_out_buf = payload;
1001 req.emr_out_length = MC_CMD_GET_CAPABILITIES_V5_OUT_LEN;
1003 efx_mcdi_execute_quiet(enp, &req);
1005 if (req.emr_rc != 0) {
1010 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
1015 #define CAP_FLAGS1(_req, _flag) \
1016 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_OUT_FLAGS1) & \
1017 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN)))
1019 #define CAP_FLAGS2(_req, _flag) \
1020 (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) && \
1021 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) & \
1022 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))))
1025 * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
1026 * We only support the 14 byte prefix here.
1028 if (CAP_FLAGS1(req, RX_PREFIX_LEN_14) == 0) {
1032 encp->enc_rx_prefix_size = 14;
1034 /* Check if the firmware supports additional RSS modes */
1035 if (CAP_FLAGS1(req, ADDITIONAL_RSS_MODES))
1036 encp->enc_rx_scale_additional_modes_supported = B_TRUE;
1038 encp->enc_rx_scale_additional_modes_supported = B_FALSE;
1040 /* Check if the firmware supports TSO */
1041 if (CAP_FLAGS1(req, TX_TSO))
1042 encp->enc_fw_assisted_tso_enabled = B_TRUE;
1044 encp->enc_fw_assisted_tso_enabled = B_FALSE;
1046 /* Check if the firmware supports FATSOv2 */
1047 if (CAP_FLAGS2(req, TX_TSO_V2)) {
1048 encp->enc_fw_assisted_tso_v2_enabled = B_TRUE;
1049 encp->enc_fw_assisted_tso_v2_n_contexts = MCDI_OUT_WORD(req,
1050 GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS);
1052 encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
1053 encp->enc_fw_assisted_tso_v2_n_contexts = 0;
1056 /* Check if the firmware supports FATSOv2 encap */
1057 if (CAP_FLAGS2(req, TX_TSO_V2_ENCAP))
1058 encp->enc_fw_assisted_tso_v2_encap_enabled = B_TRUE;
1060 encp->enc_fw_assisted_tso_v2_encap_enabled = B_FALSE;
1062 /* Check if the firmware has vadapter/vport/vswitch support */
1063 if (CAP_FLAGS1(req, EVB))
1064 encp->enc_datapath_cap_evb = B_TRUE;
1066 encp->enc_datapath_cap_evb = B_FALSE;
1068 /* Check if the firmware supports VLAN insertion */
1069 if (CAP_FLAGS1(req, TX_VLAN_INSERTION))
1070 encp->enc_hw_tx_insert_vlan_enabled = B_TRUE;
1072 encp->enc_hw_tx_insert_vlan_enabled = B_FALSE;
1074 /* Check if the firmware supports RX event batching */
1075 if (CAP_FLAGS1(req, RX_BATCHING))
1076 encp->enc_rx_batching_enabled = B_TRUE;
1078 encp->enc_rx_batching_enabled = B_FALSE;
1081 * Even if batching isn't reported as supported, we may still get
1082 * batched events (see bug61153).
1084 encp->enc_rx_batch_max = 16;
1086 /* Check if the firmware supports disabling scatter on RXQs */
1087 if (CAP_FLAGS1(req, RX_DISABLE_SCATTER))
1088 encp->enc_rx_disable_scatter_supported = B_TRUE;
1090 encp->enc_rx_disable_scatter_supported = B_FALSE;
1092 /* Check if the firmware supports packed stream mode */
1093 if (CAP_FLAGS1(req, RX_PACKED_STREAM))
1094 encp->enc_rx_packed_stream_supported = B_TRUE;
1096 encp->enc_rx_packed_stream_supported = B_FALSE;
1099 * Check if the firmware supports configurable buffer sizes
1100 * for packed stream mode (otherwise buffer size is 1Mbyte)
1102 if (CAP_FLAGS1(req, RX_PACKED_STREAM_VAR_BUFFERS))
1103 encp->enc_rx_var_packed_stream_supported = B_TRUE;
1105 encp->enc_rx_var_packed_stream_supported = B_FALSE;
1107 /* Check if the firmware supports equal stride super-buffer mode */
1108 if (CAP_FLAGS2(req, EQUAL_STRIDE_SUPER_BUFFER))
1109 encp->enc_rx_es_super_buffer_supported = B_TRUE;
1111 encp->enc_rx_es_super_buffer_supported = B_FALSE;
1113 /* Check if the firmware supports FW subvariant w/o Tx checksumming */
1114 if (CAP_FLAGS2(req, FW_SUBVARIANT_NO_TX_CSUM))
1115 encp->enc_fw_subvariant_no_tx_csum_supported = B_TRUE;
1117 encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE;
1119 /* Check if the firmware supports set mac with running filters */
1120 if (CAP_FLAGS1(req, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED))
1121 encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
1123 encp->enc_allow_set_mac_with_installed_filters = B_FALSE;
1126 * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
1127 * specifying which parameters to configure.
1129 if (CAP_FLAGS1(req, SET_MAC_ENHANCED))
1130 encp->enc_enhanced_set_mac_supported = B_TRUE;
1132 encp->enc_enhanced_set_mac_supported = B_FALSE;
1135 * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows
1136 * us to let the firmware choose the settings to use on an EVQ.
1138 if (CAP_FLAGS2(req, INIT_EVQ_V2))
1139 encp->enc_init_evq_v2_supported = B_TRUE;
1141 encp->enc_init_evq_v2_supported = B_FALSE;
1144 * Check if firmware-verified NVRAM updates must be used.
1146 * The firmware trusted installer requires all NVRAM updates to use
1147 * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update)
1148 * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
1149 * partition and report the result).
1151 if (CAP_FLAGS2(req, NVRAM_UPDATE_REPORT_VERIFY_RESULT))
1152 encp->enc_nvram_update_verify_result_supported = B_TRUE;
1154 encp->enc_nvram_update_verify_result_supported = B_FALSE;
1157 * Check if firmware provides packet memory and Rx datapath
1160 if (CAP_FLAGS1(req, PM_AND_RXDP_COUNTERS))
1161 encp->enc_pm_and_rxdp_counters = B_TRUE;
1163 encp->enc_pm_and_rxdp_counters = B_FALSE;
1166 * Check if the 40G MAC hardware is capable of reporting
1167 * statistics for Tx size bins.
1169 if (CAP_FLAGS2(req, MAC_STATS_40G_TX_SIZE_BINS))
1170 encp->enc_mac_stats_40g_tx_size_bins = B_TRUE;
1172 encp->enc_mac_stats_40g_tx_size_bins = B_FALSE;
1175 * Check if firmware supports VXLAN and NVGRE tunnels.
1176 * The capability indicates Geneve protocol support as well.
1178 if (CAP_FLAGS1(req, VXLAN_NVGRE)) {
1179 encp->enc_tunnel_encapsulations_supported =
1180 (1u << EFX_TUNNEL_PROTOCOL_VXLAN) |
1181 (1u << EFX_TUNNEL_PROTOCOL_GENEVE) |
1182 (1u << EFX_TUNNEL_PROTOCOL_NVGRE);
1184 EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES ==
1185 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
1186 encp->enc_tunnel_config_udp_entries_max =
1187 EFX_TUNNEL_MAXNENTRIES;
1189 encp->enc_tunnel_config_udp_entries_max = 0;
1193 * Check if firmware reports the VI window mode.
1194 * Medford2 has a variable VI window size (8K, 16K or 64K).
1195 * Medford and Huntington have a fixed 8K VI window size.
1197 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
1199 MCDI_OUT_BYTE(req, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
1202 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
1203 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1205 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
1206 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_16K;
1208 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
1209 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_64K;
1212 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1215 } else if ((enp->en_family == EFX_FAMILY_HUNTINGTON) ||
1216 (enp->en_family == EFX_FAMILY_MEDFORD)) {
1217 /* Huntington and Medford have fixed 8K window size */
1218 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1220 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1223 /* Check if firmware supports extended MAC stats. */
1224 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
1225 /* Extended stats buffer supported */
1226 encp->enc_mac_stats_nstats = MCDI_OUT_WORD(req,
1227 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
1229 /* Use Siena-compatible legacy MAC stats */
1230 encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS;
1233 if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V2)
1234 encp->enc_fec_counters = B_TRUE;
1236 encp->enc_fec_counters = B_FALSE;
1238 /* Check if the firmware provides head-of-line blocking counters */
1239 if (CAP_FLAGS2(req, RXDP_HLB_IDLE))
1240 encp->enc_hlb_counters = B_TRUE;
1242 encp->enc_hlb_counters = B_FALSE;
1244 if (CAP_FLAGS1(req, RX_RSS_LIMITED)) {
1245 /* Only one exclusive RSS context is available per port. */
1246 encp->enc_rx_scale_max_exclusive_contexts = 1;
1248 switch (enp->en_family) {
1249 case EFX_FAMILY_MEDFORD2:
1250 encp->enc_rx_scale_hash_alg_mask =
1251 (1U << EFX_RX_HASHALG_TOEPLITZ);
1254 case EFX_FAMILY_MEDFORD:
1255 case EFX_FAMILY_HUNTINGTON:
1257 * Packed stream firmware variant maintains a
1258 * non-standard algorithm for hash computation.
1259 * It implies explicit XORing together
1260 * source + destination IP addresses (or last
1261 * four bytes in the case of IPv6) and using the
1262 * resulting value as the input to a Toeplitz hash.
1264 encp->enc_rx_scale_hash_alg_mask =
1265 (1U << EFX_RX_HASHALG_PACKED_STREAM);
1273 /* Port numbers cannot contribute to the hash value */
1274 encp->enc_rx_scale_l4_hash_supported = B_FALSE;
1277 * Maximum number of exclusive RSS contexts.
1278 * EF10 hardware supports 64 in total, but 6 are reserved
1279 * for shared contexts. They are a global resource so
1280 * not all may be available.
1282 encp->enc_rx_scale_max_exclusive_contexts = 64 - 6;
1284 encp->enc_rx_scale_hash_alg_mask =
1285 (1U << EFX_RX_HASHALG_TOEPLITZ);
1288 * It is possible to use port numbers as
1289 * the input data for hash computation.
1291 encp->enc_rx_scale_l4_hash_supported = B_TRUE;
1293 /* Check if the firmware supports "FLAG" and "MARK" filter actions */
1294 if (CAP_FLAGS2(req, FILTER_ACTION_FLAG))
1295 encp->enc_filter_action_flag_supported = B_TRUE;
1297 encp->enc_filter_action_flag_supported = B_FALSE;
1299 if (CAP_FLAGS2(req, FILTER_ACTION_MARK))
1300 encp->enc_filter_action_mark_supported = B_TRUE;
1302 encp->enc_filter_action_mark_supported = B_FALSE;
1304 /* Get maximum supported value for "MARK" filter action */
1305 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)
1306 encp->enc_filter_action_mark_max = MCDI_OUT_DWORD(req,
1307 GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX);
1309 encp->enc_filter_action_mark_max = 0;
1325 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1331 #define EF10_LEGACY_PF_PRIVILEGE_MASK \
1332 (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \
1333 MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \
1334 MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \
1335 MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \
1336 MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \
1337 MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \
1338 MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \
1339 MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \
1340 MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \
1341 MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \
1342 MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
1344 #define EF10_LEGACY_VF_PRIVILEGE_MASK 0
1347 __checkReturn efx_rc_t
1348 ef10_get_privilege_mask(
1349 __in efx_nic_t *enp,
1350 __out uint32_t *maskp)
1352 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1356 if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
1361 /* Fallback for old firmware without privilege mask support */
1362 if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1363 /* Assume PF has admin privilege */
1364 mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
1366 /* VF is always unprivileged by default */
1367 mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
1376 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1383 * Table of mapping schemes from port number to external number.
1385 * Each port number ultimately corresponds to a connector: either as part of
1386 * a cable assembly attached to a module inserted in an SFP+/QSFP+ cage on
1387 * the board, or fixed to the board (e.g. 10GBASE-T magjack on SFN5121T
1388 * "Salina"). In general:
1390 * Port number (0-based)
1392 * port mapping (n:1)
1395 * External port number (normally 1-based)
1397 * fixed (1:1) or cable assembly (1:m)
1402 * The external numbering refers to the cages or magjacks on the board,
1403 * as visibly annotated on the board or back panel. This table describes
1404 * how to determine which external cage/magjack corresponds to the port
1405 * numbers used by the driver.
1407 * The count of adjacent port numbers that map to each external number,
1408 * and the offset in the numbering, is determined by the chip family and
1409 * current port mode.
1411 * For the Huntington family, the current port mode cannot be discovered,
1412 * but a single mapping is used by all modes for a given chip variant,
1413 * so the mapping used is instead the last match in the table to the full
1414 * set of port modes to which the NIC can be configured. Therefore the
1415 * ordering of entries in the mapping table is significant.
1417 static struct ef10_external_port_map_s {
1418 efx_family_t family;
1419 uint32_t modes_mask;
1422 } __ef10_external_port_mappings[] = {
1424 * Modes used by Huntington family controllers where each port
1425 * number maps to a separate cage.
1426 * SFN7x22F (Torino):
1436 EFX_FAMILY_HUNTINGTON,
1437 (1U << TLV_PORT_MODE_10G) | /* mode 0 */
1438 (1U << TLV_PORT_MODE_10G_10G) | /* mode 2 */
1439 (1U << TLV_PORT_MODE_10G_10G_10G_10G), /* mode 4 */
1440 1, /* ports per cage */
1444 * Modes which for Huntington identify a chip variant where 2
1445 * adjacent port numbers map to each cage.
1453 EFX_FAMILY_HUNTINGTON,
1454 (1U << TLV_PORT_MODE_40G) | /* mode 1 */
1455 (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */
1456 (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */
1457 (1U << TLV_PORT_MODE_10G_10G_40G), /* mode 7 */
1458 2, /* ports per cage */
1462 * Modes that on Medford allocate each port number to a separate
1471 (1U << TLV_PORT_MODE_10G) | /* mode 0 */
1472 (1U << TLV_PORT_MODE_10G_10G), /* mode 2 */
1473 1, /* ports per cage */
1477 * Modes that on Medford allocate 2 adjacent port numbers to each
1486 (1U << TLV_PORT_MODE_40G) | /* mode 1 */
1487 (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */
1488 (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */
1489 (1U << TLV_PORT_MODE_10G_10G_40G) | /* mode 7 */
1490 /* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */
1491 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), /* mode 9 */
1492 2, /* ports per cage */
1496 * Modes that on Medford allocate 4 adjacent port numbers to each
1497 * connector, starting on cage 1.
1505 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q) | /* mode 5 */
1506 /* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */
1507 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1), /* mode 4 */
1508 4, /* ports per cage */
1512 * Modes that on Medford allocate 4 adjacent port numbers to each
1513 * connector, starting on cage 2.
1521 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q2), /* mode 8 */
1522 4, /* ports per cage */
1526 * Modes that on Medford2 allocate each port number to a separate
1534 EFX_FAMILY_MEDFORD2,
1535 (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */
1536 (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */
1537 (1U << TLV_PORT_MODE_1x1_1x1) | /* mode 2 */
1538 (1U << TLV_PORT_MODE_1x2_NA) | /* mode 10 */
1539 (1U << TLV_PORT_MODE_1x2_1x2) | /* mode 12 */
1540 (1U << TLV_PORT_MODE_1x4_1x2) | /* mode 15 */
1541 (1U << TLV_PORT_MODE_1x2_1x4), /* mode 16 */
1542 1, /* ports per cage */
1546 * FIXME: Some port modes are not representable in this mapping:
1547 * - TLV_PORT_MODE_1x2_2x1 (mode 17):
1553 * Modes that on Medford2 allocate 2 adjacent port numbers to each
1554 * cage, starting on cage 1.
1561 EFX_FAMILY_MEDFORD2,
1562 (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */
1563 (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 4 */
1564 (1U << TLV_PORT_MODE_1x4_2x1) | /* mode 6 */
1565 (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */
1566 (1U << TLV_PORT_MODE_2x2_NA) | /* mode 13 */
1567 (1U << TLV_PORT_MODE_2x1_1x2), /* mode 18 */
1568 2, /* ports per cage */
1572 * Modes that on Medford2 allocate 2 adjacent port numbers to each
1573 * cage, starting on cage 2.
1578 EFX_FAMILY_MEDFORD2,
1579 (1U << TLV_PORT_MODE_NA_2x2), /* mode 14 */
1580 2, /* ports per cage */
1584 * Modes that on Medford2 allocate 4 adjacent port numbers to each
1585 * connector, starting on cage 1.
1592 EFX_FAMILY_MEDFORD2,
1593 (1U << TLV_PORT_MODE_4x1_NA), /* mode 5 */
1594 4, /* ports per cage */
1598 * Modes that on Medford2 allocate 4 adjacent port numbers to each
1599 * connector, starting on cage 2.
1606 EFX_FAMILY_MEDFORD2,
1607 (1U << TLV_PORT_MODE_NA_4x1) | /* mode 8 */
1608 (1U << TLV_PORT_MODE_NA_1x2), /* mode 11 */
1609 4, /* ports per cage */
1614 static __checkReturn efx_rc_t
1615 ef10_external_port_mapping(
1616 __in efx_nic_t *enp,
1618 __out uint8_t *external_portp)
1622 uint32_t port_modes;
1625 int32_t count = 1; /* Default 1-1 mapping */
1626 int32_t offset = 1; /* Default starting external port number */
1628 if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, ¤t,
1631 * No current port mode information (i.e. Huntington)
1632 * - infer mapping from available modes
1634 if ((rc = efx_mcdi_get_port_modes(enp,
1635 &port_modes, NULL, NULL)) != 0) {
1637 * No port mode information available
1638 * - use default mapping
1643 /* Only need to scan the current mode */
1644 port_modes = 1 << current;
1648 * Infer the internal port -> external number mapping from
1649 * the possible port modes for this NIC.
1651 for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
1652 struct ef10_external_port_map_s *eepmp =
1653 &__ef10_external_port_mappings[i];
1654 if (eepmp->family != enp->en_family)
1656 matches = (eepmp->modes_mask & port_modes);
1659 * Some modes match. For some Huntington boards
1660 * there will be multiple matches. The mapping on the
1661 * last match is used.
1663 count = eepmp->count;
1664 offset = eepmp->offset;
1665 port_modes &= ~matches;
1669 if (port_modes != 0) {
1670 /* Some advertised modes are not supported */
1677 * Scale as required by last matched mode and then convert to
1678 * correctly offset numbering
1680 *external_portp = (uint8_t)((port / count) + offset);
1684 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1689 static __checkReturn efx_rc_t
1691 __in efx_nic_t *enp)
1693 const efx_nic_ops_t *enop = enp->en_enop;
1694 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
1695 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1696 ef10_link_state_t els;
1697 efx_port_t *epp = &(enp->en_port);
1698 uint32_t board_type = 0;
1699 uint32_t base, nvec;
1704 uint8_t mac_addr[6] = { 0 };
1707 /* Get the (zero-based) MCDI port number */
1708 if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
1711 /* EFX MCDI interface uses one-based port numbers */
1712 emip->emi_port = port + 1;
1714 if ((rc = ef10_external_port_mapping(enp, port,
1715 &encp->enc_external_port)) != 0)
1719 * Get PCIe function number from firmware (used for
1720 * per-function privilege and dynamic config info).
1721 * - PCIe PF: pf = PF number, vf = 0xffff.
1722 * - PCIe VF: pf = parent PF, vf = VF number.
1724 if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
1730 /* MAC address for this function */
1731 if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1732 rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
1733 #if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
1735 * Disable static config checking, ONLY for manufacturing test
1736 * and setup at the factory, to allow the static config to be
1739 #else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
1740 if ((rc == 0) && (mac_addr[0] & 0x02)) {
1742 * If the static config does not include a global MAC
1743 * address pool then the board may return a locally
1744 * administered MAC address (this should only happen on
1745 * incorrectly programmed boards).
1749 #endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
1751 rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
1756 EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
1758 /* Board configuration (legacy) */
1759 rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
1761 /* Unprivileged functions may not be able to read board cfg */
1768 encp->enc_board_type = board_type;
1769 encp->enc_clk_mult = 1; /* not used for EF10 */
1771 /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
1772 if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
1776 * Firmware with support for *_FEC capability bits does not
1777 * report that the corresponding *_FEC_REQUESTED bits are supported.
1778 * Add them here so that drivers understand that they are supported.
1780 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_BASER_FEC))
1781 epp->ep_phy_cap_mask |=
1782 (1u << EFX_PHY_CAP_BASER_FEC_REQUESTED);
1783 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_RS_FEC))
1784 epp->ep_phy_cap_mask |=
1785 (1u << EFX_PHY_CAP_RS_FEC_REQUESTED);
1786 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_25G_BASER_FEC))
1787 epp->ep_phy_cap_mask |=
1788 (1u << EFX_PHY_CAP_25G_BASER_FEC_REQUESTED);
1790 /* Obtain the default PHY advertised capabilities */
1791 if ((rc = ef10_phy_get_link(enp, &els)) != 0)
1793 epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
1794 epp->ep_adv_cap_mask = els.els_adv_cap_mask;
1796 /* Check capabilities of running datapath firmware */
1797 if ((rc = ef10_get_datapath_caps(enp)) != 0)
1800 /* Alignment for WPTR updates */
1801 encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
1803 encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
1804 /* No boundary crossing limits */
1805 encp->enc_tx_dma_desc_boundary = 0;
1808 * Maximum number of bytes into the frame the TCP header can start for
1809 * firmware assisted TSO to work.
1811 encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
1814 * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
1815 * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
1816 * resources (allocated to this PCIe function), which is zero until
1817 * after we have allocated VIs.
1819 encp->enc_evq_limit = 1024;
1820 encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
1821 encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
1823 encp->enc_buftbl_limit = 0xFFFFFFFF;
1825 /* Get interrupt vector limits */
1826 if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
1827 if (EFX_PCI_FUNCTION_IS_PF(encp))
1830 /* Ignore error (cannot query vector limits from a VF). */
1834 encp->enc_intr_vec_base = base;
1835 encp->enc_intr_limit = nvec;
1838 * Get the current privilege mask. Note that this may be modified
1839 * dynamically, so this value is informational only. DO NOT use
1840 * the privilege mask to check for sufficient privileges, as that
1841 * can result in time-of-check/time-of-use bugs.
1843 if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
1845 encp->enc_privilege_mask = mask;
1847 /* Get remaining controller-specific board config */
1848 if ((rc = enop->eno_board_cfg(enp)) != 0)
1855 EFSYS_PROBE(fail11);
1857 EFSYS_PROBE(fail10);
1875 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1880 __checkReturn efx_rc_t
1882 __in efx_nic_t *enp)
1884 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1885 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
1888 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
1889 enp->en_family == EFX_FAMILY_MEDFORD ||
1890 enp->en_family == EFX_FAMILY_MEDFORD2);
1892 /* Read and clear any assertion state */
1893 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
1896 /* Exit the assertion handler */
1897 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
1901 if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
1904 if ((rc = ef10_nic_board_cfg(enp)) != 0)
1908 * Set default driver config limits (based on board config).
1910 * FIXME: For now allocate a fixed number of VIs which is likely to be
1911 * sufficient and small enough to allow multiple functions on the same
1914 edcp->edc_min_vi_count = edcp->edc_max_vi_count =
1915 MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
1917 /* The client driver must configure and enable PIO buffer support */
1918 edcp->edc_max_piobuf_count = 0;
1919 edcp->edc_pio_alloc_size = 0;
1921 #if EFSYS_OPT_MAC_STATS
1922 /* Wipe the MAC statistics */
1923 if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
1927 #if EFSYS_OPT_LOOPBACK
1928 if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
1932 #if EFSYS_OPT_MON_STATS
1933 if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
1934 /* Unprivileged functions do not have access to sensors */
1940 encp->enc_features = enp->en_features;
1944 #if EFSYS_OPT_MON_STATS
1948 #if EFSYS_OPT_LOOPBACK
1952 #if EFSYS_OPT_MAC_STATS
1963 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1968 __checkReturn efx_rc_t
1969 ef10_nic_set_drv_limits(
1970 __inout efx_nic_t *enp,
1971 __in efx_drv_limits_t *edlp)
1973 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1974 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
1975 uint32_t min_evq_count, max_evq_count;
1976 uint32_t min_rxq_count, max_rxq_count;
1977 uint32_t min_txq_count, max_txq_count;
1985 /* Get minimum required and maximum usable VI limits */
1986 min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
1987 min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
1988 min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
1990 edcp->edc_min_vi_count =
1991 MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
1993 max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
1994 max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
1995 max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
1997 edcp->edc_max_vi_count =
1998 MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
2001 * Check limits for sub-allocated piobuf blocks.
2002 * PIO is optional, so don't fail if the limits are incorrect.
2004 if ((encp->enc_piobuf_size == 0) ||
2005 (encp->enc_piobuf_limit == 0) ||
2006 (edlp->edl_min_pio_alloc_size == 0) ||
2007 (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
2009 edcp->edc_max_piobuf_count = 0;
2010 edcp->edc_pio_alloc_size = 0;
2012 uint32_t blk_size, blk_count, blks_per_piobuf;
2015 MAX(edlp->edl_min_pio_alloc_size,
2016 encp->enc_piobuf_min_alloc_size);
2018 blks_per_piobuf = encp->enc_piobuf_size / blk_size;
2019 EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
2021 blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
2023 /* A zero max pio alloc count means unlimited */
2024 if ((edlp->edl_max_pio_alloc_count > 0) &&
2025 (edlp->edl_max_pio_alloc_count < blk_count)) {
2026 blk_count = edlp->edl_max_pio_alloc_count;
2029 edcp->edc_pio_alloc_size = blk_size;
2030 edcp->edc_max_piobuf_count =
2031 (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
2037 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2043 __checkReturn efx_rc_t
2045 __in efx_nic_t *enp)
2048 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ENTITY_RESET_IN_LEN,
2049 MC_CMD_ENTITY_RESET_OUT_LEN);
2052 /* ef10_nic_reset() is called to recover from BADASSERT failures. */
2053 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
2055 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
2058 req.emr_cmd = MC_CMD_ENTITY_RESET;
2059 req.emr_in_buf = payload;
2060 req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
2061 req.emr_out_buf = payload;
2062 req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
2064 MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
2065 ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
2067 efx_mcdi_execute(enp, &req);
2069 if (req.emr_rc != 0) {
2074 /* Clear RX/TX DMA queue errors */
2075 enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
2084 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2089 __checkReturn efx_rc_t
2091 __in efx_nic_t *enp)
2093 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2094 uint32_t min_vi_count, max_vi_count;
2095 uint32_t vi_count, vi_base, vi_shift;
2099 uint32_t vi_window_size;
2102 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
2103 enp->en_family == EFX_FAMILY_MEDFORD ||
2104 enp->en_family == EFX_FAMILY_MEDFORD2);
2106 /* Enable reporting of some events (e.g. link change) */
2107 if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
2110 /* Allocate (optional) on-chip PIO buffers */
2111 ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
2114 * For best performance, PIO writes should use a write-combined
2115 * (WC) memory mapping. Using a separate WC mapping for the PIO
2116 * aperture of each VI would be a burden to drivers (and not
2117 * possible if the host page size is >4Kbyte).
2119 * To avoid this we use a single uncached (UC) mapping for VI
2120 * register access, and a single WC mapping for extra VIs used
2123 * Each piobuf must be linked to a VI in the WC mapping, and to
2124 * each VI that is using a sub-allocated block from the piobuf.
2126 min_vi_count = edcp->edc_min_vi_count;
2128 edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
2130 /* Ensure that the previously attached driver's VIs are freed */
2131 if ((rc = efx_mcdi_free_vis(enp)) != 0)
2135 * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
2136 * fails then retrying the request for fewer VI resources may succeed.
2139 if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
2140 &vi_base, &vi_count, &vi_shift)) != 0)
2143 EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
2145 if (vi_count < min_vi_count) {
2150 enp->en_arch.ef10.ena_vi_base = vi_base;
2151 enp->en_arch.ef10.ena_vi_count = vi_count;
2152 enp->en_arch.ef10.ena_vi_shift = vi_shift;
2154 if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
2155 /* Not enough extra VIs to map piobufs */
2156 ef10_nic_free_piobufs(enp);
2159 enp->en_arch.ef10.ena_pio_write_vi_base =
2160 vi_count - enp->en_arch.ef10.ena_piobuf_count;
2162 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=,
2163 EFX_VI_WINDOW_SHIFT_INVALID);
2164 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=,
2165 EFX_VI_WINDOW_SHIFT_64K);
2166 vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift;
2168 /* Save UC memory mapping details */
2169 enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
2170 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2171 enp->en_arch.ef10.ena_uc_mem_map_size =
2173 enp->en_arch.ef10.ena_pio_write_vi_base);
2175 enp->en_arch.ef10.ena_uc_mem_map_size =
2177 enp->en_arch.ef10.ena_vi_count);
2180 /* Save WC memory mapping details */
2181 enp->en_arch.ef10.ena_wc_mem_map_offset =
2182 enp->en_arch.ef10.ena_uc_mem_map_offset +
2183 enp->en_arch.ef10.ena_uc_mem_map_size;
2185 enp->en_arch.ef10.ena_wc_mem_map_size =
2187 enp->en_arch.ef10.ena_piobuf_count);
2189 /* Link piobufs to extra VIs in WC mapping */
2190 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2191 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2192 rc = efx_mcdi_link_piobuf(enp,
2193 enp->en_arch.ef10.ena_pio_write_vi_base + i,
2194 enp->en_arch.ef10.ena_piobuf_handle[i]);
2201 * Allocate a vAdaptor attached to our upstream vPort/pPort.
2203 * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
2204 * driver has yet to bring up the EVB port. See bug 56147. In this case,
2205 * retry the request several times after waiting a while. The wait time
2206 * between retries starts small (10ms) and exponentially increases.
2207 * Total wait time is a little over two seconds. Retry logic in the
2208 * client driver may mean this whole loop is repeated if it continues to
2213 while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
2214 if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
2217 * Do not retry alloc for PF, or for other errors on
2223 /* VF startup before PF is ready. Retry allocation. */
2225 /* Too many attempts */
2229 EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
2230 EFSYS_SLEEP(delay_us);
2232 if (delay_us < 500000)
2236 enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
2237 enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
2252 ef10_nic_free_piobufs(enp);
2255 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2260 __checkReturn efx_rc_t
2261 ef10_nic_get_vi_pool(
2262 __in efx_nic_t *enp,
2263 __out uint32_t *vi_countp)
2265 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
2266 enp->en_family == EFX_FAMILY_MEDFORD ||
2267 enp->en_family == EFX_FAMILY_MEDFORD2);
2270 * Report VIs that the client driver can use.
2271 * Do not include VIs used for PIO buffer writes.
2273 *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
2278 __checkReturn efx_rc_t
2279 ef10_nic_get_bar_region(
2280 __in efx_nic_t *enp,
2281 __in efx_nic_region_t region,
2282 __out uint32_t *offsetp,
2283 __out size_t *sizep)
2287 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
2288 enp->en_family == EFX_FAMILY_MEDFORD ||
2289 enp->en_family == EFX_FAMILY_MEDFORD2);
2292 * TODO: Specify host memory mapping alignment and granularity
2293 * in efx_drv_limits_t so that they can be taken into account
2294 * when allocating extra VIs for PIO writes.
2298 /* UC mapped memory BAR region for VI registers */
2299 *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
2300 *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
2303 case EFX_REGION_PIO_WRITE_VI:
2304 /* WC mapped memory BAR region for piobuf writes */
2305 *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
2306 *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
2317 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2322 __checkReturn boolean_t
2323 ef10_nic_hw_unavailable(
2324 __in efx_nic_t *enp)
2328 if (enp->en_reset_flags & EFX_RESET_HW_UNAVAIL)
2331 EFX_BAR_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, &dword, B_FALSE);
2332 if (EFX_DWORD_FIELD(dword, EFX_DWORD_0) == 0xffffffff)
2338 ef10_nic_set_hw_unavailable(enp);
2344 ef10_nic_set_hw_unavailable(
2345 __in efx_nic_t *enp)
2347 EFSYS_PROBE(hw_unavail);
2348 enp->en_reset_flags |= EFX_RESET_HW_UNAVAIL;
2354 __in efx_nic_t *enp)
2359 (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
2360 enp->en_vport_id = 0;
2362 /* Unlink piobufs from extra VIs in WC mapping */
2363 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2364 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2365 rc = efx_mcdi_unlink_piobuf(enp,
2366 enp->en_arch.ef10.ena_pio_write_vi_base + i);
2372 ef10_nic_free_piobufs(enp);
2374 (void) efx_mcdi_free_vis(enp);
2375 enp->en_arch.ef10.ena_vi_count = 0;
2380 __in efx_nic_t *enp)
2382 #if EFSYS_OPT_MON_STATS
2383 mcdi_mon_cfg_free(enp);
2384 #endif /* EFSYS_OPT_MON_STATS */
2385 (void) efx_mcdi_drv_attach(enp, B_FALSE);
2390 __checkReturn efx_rc_t
2391 ef10_nic_register_test(
2392 __in efx_nic_t *enp)
2397 _NOTE(ARGUNUSED(enp))
2398 _NOTE(CONSTANTCONDITION)
2408 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2413 #endif /* EFSYS_OPT_DIAG */
2415 #if EFSYS_OPT_FW_SUBVARIANT_AWARE
2417 __checkReturn efx_rc_t
2418 efx_mcdi_get_nic_global(
2419 __in efx_nic_t *enp,
2421 __out uint32_t *valuep)
2424 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_NIC_GLOBAL_IN_LEN,
2425 MC_CMD_GET_NIC_GLOBAL_OUT_LEN);
2428 req.emr_cmd = MC_CMD_GET_NIC_GLOBAL;
2429 req.emr_in_buf = payload;
2430 req.emr_in_length = MC_CMD_GET_NIC_GLOBAL_IN_LEN;
2431 req.emr_out_buf = payload;
2432 req.emr_out_length = MC_CMD_GET_NIC_GLOBAL_OUT_LEN;
2434 MCDI_IN_SET_DWORD(req, GET_NIC_GLOBAL_IN_KEY, key);
2436 efx_mcdi_execute(enp, &req);
2438 if (req.emr_rc != 0) {
2443 if (req.emr_out_length_used != MC_CMD_GET_NIC_GLOBAL_OUT_LEN) {
2448 *valuep = MCDI_OUT_DWORD(req, GET_NIC_GLOBAL_OUT_VALUE);
2455 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2460 __checkReturn efx_rc_t
2461 efx_mcdi_set_nic_global(
2462 __in efx_nic_t *enp,
2464 __in uint32_t value)
2467 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_NIC_GLOBAL_IN_LEN, 0);
2470 req.emr_cmd = MC_CMD_SET_NIC_GLOBAL;
2471 req.emr_in_buf = payload;
2472 req.emr_in_length = MC_CMD_SET_NIC_GLOBAL_IN_LEN;
2473 req.emr_out_buf = NULL;
2474 req.emr_out_length = 0;
2476 MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_KEY, key);
2477 MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_VALUE, value);
2479 efx_mcdi_execute(enp, &req);
2481 if (req.emr_rc != 0) {
2489 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2494 #endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
2496 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */