1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2012-2018 Solarflare Communications Inc.
13 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
15 #include "ef10_tlv_layout.h"
17 __checkReturn efx_rc_t
18 efx_mcdi_get_port_assignment(
20 __out uint32_t *portp)
23 uint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
24 MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)];
27 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
28 enp->en_family == EFX_FAMILY_MEDFORD ||
29 enp->en_family == EFX_FAMILY_MEDFORD2);
31 (void) memset(payload, 0, sizeof (payload));
32 req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
33 req.emr_in_buf = payload;
34 req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
35 req.emr_out_buf = payload;
36 req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
38 efx_mcdi_execute(enp, &req);
40 if (req.emr_rc != 0) {
45 if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
50 *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
57 EFSYS_PROBE1(fail1, efx_rc_t, rc);
62 __checkReturn efx_rc_t
63 efx_mcdi_get_port_modes(
65 __out uint32_t *modesp,
66 __out_opt uint32_t *current_modep,
67 __out_opt uint32_t *default_modep)
70 uint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN,
71 MC_CMD_GET_PORT_MODES_OUT_LEN)];
74 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
75 enp->en_family == EFX_FAMILY_MEDFORD ||
76 enp->en_family == EFX_FAMILY_MEDFORD2);
78 (void) memset(payload, 0, sizeof (payload));
79 req.emr_cmd = MC_CMD_GET_PORT_MODES;
80 req.emr_in_buf = payload;
81 req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
82 req.emr_out_buf = payload;
83 req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
85 efx_mcdi_execute(enp, &req);
87 if (req.emr_rc != 0) {
93 * Require only Modes and DefaultMode fields, unless the current mode
94 * was requested (CurrentMode field was added for Medford).
96 if (req.emr_out_length_used <
97 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
101 if ((current_modep != NULL) && (req.emr_out_length_used <
102 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) {
107 *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
109 if (current_modep != NULL) {
110 *current_modep = MCDI_OUT_DWORD(req,
111 GET_PORT_MODES_OUT_CURRENT_MODE);
114 if (default_modep != NULL) {
115 *default_modep = MCDI_OUT_DWORD(req,
116 GET_PORT_MODES_OUT_DEFAULT_MODE);
126 EFSYS_PROBE1(fail1, efx_rc_t, rc);
131 __checkReturn efx_rc_t
132 ef10_nic_get_port_mode_bandwidth(
133 __in uint32_t port_mode,
134 __out uint32_t *bandwidth_mbpsp)
140 case TLV_PORT_MODE_10G:
143 case TLV_PORT_MODE_10G_10G:
144 bandwidth = 10000 * 2;
146 case TLV_PORT_MODE_10G_10G_10G_10G:
147 case TLV_PORT_MODE_10G_10G_10G_10G_Q:
148 case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2:
149 case TLV_PORT_MODE_10G_10G_10G_10G_Q2:
150 bandwidth = 10000 * 4;
152 case TLV_PORT_MODE_40G:
155 case TLV_PORT_MODE_40G_40G:
156 bandwidth = 40000 * 2;
158 case TLV_PORT_MODE_40G_10G_10G:
159 case TLV_PORT_MODE_10G_10G_40G:
160 bandwidth = 40000 + (10000 * 2);
167 *bandwidth_mbpsp = bandwidth;
172 EFSYS_PROBE1(fail1, efx_rc_t, rc);
177 static __checkReturn efx_rc_t
178 efx_mcdi_vadaptor_alloc(
180 __in uint32_t port_id)
183 uint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN,
184 MC_CMD_VADAPTOR_ALLOC_OUT_LEN)];
187 EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
189 (void) memset(payload, 0, sizeof (payload));
190 req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
191 req.emr_in_buf = payload;
192 req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
193 req.emr_out_buf = payload;
194 req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
196 MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
197 MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
198 VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
199 enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
201 efx_mcdi_execute(enp, &req);
203 if (req.emr_rc != 0) {
211 EFSYS_PROBE1(fail1, efx_rc_t, rc);
216 static __checkReturn efx_rc_t
217 efx_mcdi_vadaptor_free(
219 __in uint32_t port_id)
222 uint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN,
223 MC_CMD_VADAPTOR_FREE_OUT_LEN)];
226 (void) memset(payload, 0, sizeof (payload));
227 req.emr_cmd = MC_CMD_VADAPTOR_FREE;
228 req.emr_in_buf = payload;
229 req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
230 req.emr_out_buf = payload;
231 req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
233 MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
235 efx_mcdi_execute(enp, &req);
237 if (req.emr_rc != 0) {
245 EFSYS_PROBE1(fail1, efx_rc_t, rc);
250 __checkReturn efx_rc_t
251 efx_mcdi_get_mac_address_pf(
253 __out_ecount_opt(6) uint8_t mac_addrp[6])
256 uint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
257 MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)];
260 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
261 enp->en_family == EFX_FAMILY_MEDFORD ||
262 enp->en_family == EFX_FAMILY_MEDFORD2);
264 (void) memset(payload, 0, sizeof (payload));
265 req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
266 req.emr_in_buf = payload;
267 req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
268 req.emr_out_buf = payload;
269 req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
271 efx_mcdi_execute(enp, &req);
273 if (req.emr_rc != 0) {
278 if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
283 if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
288 if (mac_addrp != NULL) {
291 addrp = MCDI_OUT2(req, uint8_t,
292 GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
294 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
304 EFSYS_PROBE1(fail1, efx_rc_t, rc);
309 __checkReturn efx_rc_t
310 efx_mcdi_get_mac_address_vf(
312 __out_ecount_opt(6) uint8_t mac_addrp[6])
315 uint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
316 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)];
319 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
320 enp->en_family == EFX_FAMILY_MEDFORD ||
321 enp->en_family == EFX_FAMILY_MEDFORD2);
323 (void) memset(payload, 0, sizeof (payload));
324 req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
325 req.emr_in_buf = payload;
326 req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
327 req.emr_out_buf = payload;
328 req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
330 MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
331 EVB_PORT_ID_ASSIGNED);
333 efx_mcdi_execute(enp, &req);
335 if (req.emr_rc != 0) {
340 if (req.emr_out_length_used <
341 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
346 if (MCDI_OUT_DWORD(req,
347 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
352 if (mac_addrp != NULL) {
355 addrp = MCDI_OUT2(req, uint8_t,
356 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
358 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
368 EFSYS_PROBE1(fail1, efx_rc_t, rc);
373 __checkReturn efx_rc_t
376 __out uint32_t *sys_freqp,
377 __out uint32_t *dpcpu_freqp)
380 uint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN,
381 MC_CMD_GET_CLOCK_OUT_LEN)];
384 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
385 enp->en_family == EFX_FAMILY_MEDFORD ||
386 enp->en_family == EFX_FAMILY_MEDFORD2);
388 (void) memset(payload, 0, sizeof (payload));
389 req.emr_cmd = MC_CMD_GET_CLOCK;
390 req.emr_in_buf = payload;
391 req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
392 req.emr_out_buf = payload;
393 req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
395 efx_mcdi_execute(enp, &req);
397 if (req.emr_rc != 0) {
402 if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
407 *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
408 if (*sys_freqp == 0) {
412 *dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ);
413 if (*dpcpu_freqp == 0) {
427 EFSYS_PROBE1(fail1, efx_rc_t, rc);
432 __checkReturn efx_rc_t
433 efx_mcdi_get_rxdp_config(
435 __out uint32_t *end_paddingp)
438 uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN,
439 MC_CMD_GET_RXDP_CONFIG_OUT_LEN)];
440 uint32_t end_padding;
443 memset(payload, 0, sizeof (payload));
444 req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
445 req.emr_in_buf = payload;
446 req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
447 req.emr_out_buf = payload;
448 req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
450 efx_mcdi_execute(enp, &req);
451 if (req.emr_rc != 0) {
456 if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
457 GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
458 /* RX DMA end padding is disabled */
461 switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
462 GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
463 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
466 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
469 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
478 *end_paddingp = end_padding;
485 EFSYS_PROBE1(fail1, efx_rc_t, rc);
490 __checkReturn efx_rc_t
491 efx_mcdi_get_vector_cfg(
493 __out_opt uint32_t *vec_basep,
494 __out_opt uint32_t *pf_nvecp,
495 __out_opt uint32_t *vf_nvecp)
498 uint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN,
499 MC_CMD_GET_VECTOR_CFG_OUT_LEN)];
502 (void) memset(payload, 0, sizeof (payload));
503 req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
504 req.emr_in_buf = payload;
505 req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
506 req.emr_out_buf = payload;
507 req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
509 efx_mcdi_execute(enp, &req);
511 if (req.emr_rc != 0) {
516 if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
521 if (vec_basep != NULL)
522 *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
523 if (pf_nvecp != NULL)
524 *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
525 if (vf_nvecp != NULL)
526 *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
533 EFSYS_PROBE1(fail1, efx_rc_t, rc);
538 static __checkReturn efx_rc_t
541 __in uint32_t min_vi_count,
542 __in uint32_t max_vi_count,
543 __out uint32_t *vi_basep,
544 __out uint32_t *vi_countp,
545 __out uint32_t *vi_shiftp)
548 uint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN,
549 MC_CMD_ALLOC_VIS_EXT_OUT_LEN)];
552 if (vi_countp == NULL) {
557 (void) memset(payload, 0, sizeof (payload));
558 req.emr_cmd = MC_CMD_ALLOC_VIS;
559 req.emr_in_buf = payload;
560 req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
561 req.emr_out_buf = payload;
562 req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN;
564 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
565 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
567 efx_mcdi_execute(enp, &req);
569 if (req.emr_rc != 0) {
574 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
579 *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
580 *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
582 /* Report VI_SHIFT if available (always zero for Huntington) */
583 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
586 *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
595 EFSYS_PROBE1(fail1, efx_rc_t, rc);
601 static __checkReturn efx_rc_t
608 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
609 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
611 req.emr_cmd = MC_CMD_FREE_VIS;
612 req.emr_in_buf = NULL;
613 req.emr_in_length = 0;
614 req.emr_out_buf = NULL;
615 req.emr_out_length = 0;
617 efx_mcdi_execute_quiet(enp, &req);
619 /* Ignore ELREADY (no allocated VIs, so nothing to free) */
620 if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
628 EFSYS_PROBE1(fail1, efx_rc_t, rc);
634 static __checkReturn efx_rc_t
635 efx_mcdi_alloc_piobuf(
637 __out efx_piobuf_handle_t *handlep)
640 uint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN,
641 MC_CMD_ALLOC_PIOBUF_OUT_LEN)];
644 if (handlep == NULL) {
649 (void) memset(payload, 0, sizeof (payload));
650 req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
651 req.emr_in_buf = payload;
652 req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
653 req.emr_out_buf = payload;
654 req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
656 efx_mcdi_execute_quiet(enp, &req);
658 if (req.emr_rc != 0) {
663 if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
668 *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
677 EFSYS_PROBE1(fail1, efx_rc_t, rc);
682 static __checkReturn efx_rc_t
683 efx_mcdi_free_piobuf(
685 __in efx_piobuf_handle_t handle)
688 uint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN,
689 MC_CMD_FREE_PIOBUF_OUT_LEN)];
692 (void) memset(payload, 0, sizeof (payload));
693 req.emr_cmd = MC_CMD_FREE_PIOBUF;
694 req.emr_in_buf = payload;
695 req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
696 req.emr_out_buf = payload;
697 req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
699 MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
701 efx_mcdi_execute_quiet(enp, &req);
703 if (req.emr_rc != 0) {
711 EFSYS_PROBE1(fail1, efx_rc_t, rc);
716 static __checkReturn efx_rc_t
717 efx_mcdi_link_piobuf(
719 __in uint32_t vi_index,
720 __in efx_piobuf_handle_t handle)
723 uint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN,
724 MC_CMD_LINK_PIOBUF_OUT_LEN)];
727 (void) memset(payload, 0, sizeof (payload));
728 req.emr_cmd = MC_CMD_LINK_PIOBUF;
729 req.emr_in_buf = payload;
730 req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
731 req.emr_out_buf = payload;
732 req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
734 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
735 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
737 efx_mcdi_execute(enp, &req);
739 if (req.emr_rc != 0) {
747 EFSYS_PROBE1(fail1, efx_rc_t, rc);
752 static __checkReturn efx_rc_t
753 efx_mcdi_unlink_piobuf(
755 __in uint32_t vi_index)
758 uint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN,
759 MC_CMD_UNLINK_PIOBUF_OUT_LEN)];
762 (void) memset(payload, 0, sizeof (payload));
763 req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
764 req.emr_in_buf = payload;
765 req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
766 req.emr_out_buf = payload;
767 req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
769 MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
771 efx_mcdi_execute_quiet(enp, &req);
773 if (req.emr_rc != 0) {
781 EFSYS_PROBE1(fail1, efx_rc_t, rc);
787 ef10_nic_alloc_piobufs(
789 __in uint32_t max_piobuf_count)
791 efx_piobuf_handle_t *handlep;
794 EFSYS_ASSERT3U(max_piobuf_count, <=,
795 EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
797 enp->en_arch.ef10.ena_piobuf_count = 0;
799 for (i = 0; i < max_piobuf_count; i++) {
800 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
802 if (efx_mcdi_alloc_piobuf(enp, handlep) != 0)
805 enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
806 enp->en_arch.ef10.ena_piobuf_count++;
812 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
813 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
815 (void) efx_mcdi_free_piobuf(enp, *handlep);
816 *handlep = EFX_PIOBUF_HANDLE_INVALID;
818 enp->en_arch.ef10.ena_piobuf_count = 0;
823 ef10_nic_free_piobufs(
826 efx_piobuf_handle_t *handlep;
829 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
830 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
832 (void) efx_mcdi_free_piobuf(enp, *handlep);
833 *handlep = EFX_PIOBUF_HANDLE_INVALID;
835 enp->en_arch.ef10.ena_piobuf_count = 0;
838 /* Sub-allocate a block from a piobuf */
839 __checkReturn efx_rc_t
841 __inout efx_nic_t *enp,
842 __out uint32_t *bufnump,
843 __out efx_piobuf_handle_t *handlep,
844 __out uint32_t *blknump,
845 __out uint32_t *offsetp,
848 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
849 efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
850 uint32_t blk_per_buf;
854 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
855 enp->en_family == EFX_FAMILY_MEDFORD ||
856 enp->en_family == EFX_FAMILY_MEDFORD2);
857 EFSYS_ASSERT(bufnump);
858 EFSYS_ASSERT(handlep);
859 EFSYS_ASSERT(blknump);
860 EFSYS_ASSERT(offsetp);
863 if ((edcp->edc_pio_alloc_size == 0) ||
864 (enp->en_arch.ef10.ena_piobuf_count == 0)) {
868 blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
870 for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
871 uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
876 EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
877 for (blk = 0; blk < blk_per_buf; blk++) {
878 if ((*map & (1u << blk)) == 0) {
888 *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
891 *sizep = edcp->edc_pio_alloc_size;
892 *offsetp = blk * (*sizep);
899 EFSYS_PROBE1(fail1, efx_rc_t, rc);
904 /* Free a piobuf sub-allocated block */
905 __checkReturn efx_rc_t
907 __inout efx_nic_t *enp,
908 __in uint32_t bufnum,
909 __in uint32_t blknum)
914 if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
915 (blknum >= (8 * sizeof (*map)))) {
920 map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
921 if ((*map & (1u << blknum)) == 0) {
925 *map &= ~(1u << blknum);
932 EFSYS_PROBE1(fail1, efx_rc_t, rc);
937 __checkReturn efx_rc_t
939 __inout efx_nic_t *enp,
940 __in uint32_t vi_index,
941 __in efx_piobuf_handle_t handle)
943 return (efx_mcdi_link_piobuf(enp, vi_index, handle));
946 __checkReturn efx_rc_t
948 __inout efx_nic_t *enp,
949 __in uint32_t vi_index)
951 return (efx_mcdi_unlink_piobuf(enp, vi_index));
954 static __checkReturn efx_rc_t
955 ef10_mcdi_get_pf_count(
957 __out uint32_t *pf_countp)
960 uint8_t payload[MAX(MC_CMD_GET_PF_COUNT_IN_LEN,
961 MC_CMD_GET_PF_COUNT_OUT_LEN)];
964 (void) memset(payload, 0, sizeof (payload));
965 req.emr_cmd = MC_CMD_GET_PF_COUNT;
966 req.emr_in_buf = payload;
967 req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;
968 req.emr_out_buf = payload;
969 req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN;
971 efx_mcdi_execute(enp, &req);
973 if (req.emr_rc != 0) {
978 if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) {
983 *pf_countp = *MCDI_OUT(req, uint8_t,
984 MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST);
986 EFSYS_ASSERT(*pf_countp != 0);
993 EFSYS_PROBE1(fail1, efx_rc_t, rc);
998 static __checkReturn efx_rc_t
999 ef10_get_datapath_caps(
1000 __in efx_nic_t *enp)
1002 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1004 uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
1005 MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)];
1008 if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
1012 (void) memset(payload, 0, sizeof (payload));
1013 req.emr_cmd = MC_CMD_GET_CAPABILITIES;
1014 req.emr_in_buf = payload;
1015 req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
1016 req.emr_out_buf = payload;
1017 req.emr_out_length = MC_CMD_GET_CAPABILITIES_V5_OUT_LEN;
1019 efx_mcdi_execute_quiet(enp, &req);
1021 if (req.emr_rc != 0) {
1026 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
1031 #define CAP_FLAGS1(_req, _flag) \
1032 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_OUT_FLAGS1) & \
1033 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN)))
1035 #define CAP_FLAGS2(_req, _flag) \
1036 (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) && \
1037 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) & \
1038 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))))
1041 * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
1042 * We only support the 14 byte prefix here.
1044 if (CAP_FLAGS1(req, RX_PREFIX_LEN_14) == 0) {
1048 encp->enc_rx_prefix_size = 14;
1050 /* Check if the firmware supports additional RSS modes */
1051 if (CAP_FLAGS1(req, ADDITIONAL_RSS_MODES))
1052 encp->enc_rx_scale_additional_modes_supported = B_TRUE;
1054 encp->enc_rx_scale_additional_modes_supported = B_FALSE;
1056 /* Check if the firmware supports TSO */
1057 if (CAP_FLAGS1(req, TX_TSO))
1058 encp->enc_fw_assisted_tso_enabled = B_TRUE;
1060 encp->enc_fw_assisted_tso_enabled = B_FALSE;
1062 /* Check if the firmware supports FATSOv2 */
1063 if (CAP_FLAGS2(req, TX_TSO_V2)) {
1064 encp->enc_fw_assisted_tso_v2_enabled = B_TRUE;
1065 encp->enc_fw_assisted_tso_v2_n_contexts = MCDI_OUT_WORD(req,
1066 GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS);
1068 encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
1069 encp->enc_fw_assisted_tso_v2_n_contexts = 0;
1072 /* Check if the firmware supports FATSOv2 encap */
1073 if (CAP_FLAGS2(req, TX_TSO_V2_ENCAP))
1074 encp->enc_fw_assisted_tso_v2_encap_enabled = B_TRUE;
1076 encp->enc_fw_assisted_tso_v2_encap_enabled = B_FALSE;
1078 /* Check if the firmware has vadapter/vport/vswitch support */
1079 if (CAP_FLAGS1(req, EVB))
1080 encp->enc_datapath_cap_evb = B_TRUE;
1082 encp->enc_datapath_cap_evb = B_FALSE;
1084 /* Check if the firmware supports VLAN insertion */
1085 if (CAP_FLAGS1(req, TX_VLAN_INSERTION))
1086 encp->enc_hw_tx_insert_vlan_enabled = B_TRUE;
1088 encp->enc_hw_tx_insert_vlan_enabled = B_FALSE;
1090 /* Check if the firmware supports RX event batching */
1091 if (CAP_FLAGS1(req, RX_BATCHING))
1092 encp->enc_rx_batching_enabled = B_TRUE;
1094 encp->enc_rx_batching_enabled = B_FALSE;
1097 * Even if batching isn't reported as supported, we may still get
1098 * batched events (see bug61153).
1100 encp->enc_rx_batch_max = 16;
1102 /* Check if the firmware supports disabling scatter on RXQs */
1103 if (CAP_FLAGS1(req, RX_DISABLE_SCATTER))
1104 encp->enc_rx_disable_scatter_supported = B_TRUE;
1106 encp->enc_rx_disable_scatter_supported = B_FALSE;
1108 /* Check if the firmware supports packed stream mode */
1109 if (CAP_FLAGS1(req, RX_PACKED_STREAM))
1110 encp->enc_rx_packed_stream_supported = B_TRUE;
1112 encp->enc_rx_packed_stream_supported = B_FALSE;
1115 * Check if the firmware supports configurable buffer sizes
1116 * for packed stream mode (otherwise buffer size is 1Mbyte)
1118 if (CAP_FLAGS1(req, RX_PACKED_STREAM_VAR_BUFFERS))
1119 encp->enc_rx_var_packed_stream_supported = B_TRUE;
1121 encp->enc_rx_var_packed_stream_supported = B_FALSE;
1123 /* Check if the firmware supports equal stride super-buffer mode */
1124 if (CAP_FLAGS2(req, EQUAL_STRIDE_SUPER_BUFFER))
1125 encp->enc_rx_es_super_buffer_supported = B_TRUE;
1127 encp->enc_rx_es_super_buffer_supported = B_FALSE;
1129 /* Check if the firmware supports FW subvariant w/o Tx checksumming */
1130 if (CAP_FLAGS2(req, FW_SUBVARIANT_NO_TX_CSUM))
1131 encp->enc_fw_subvariant_no_tx_csum_supported = B_TRUE;
1133 encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE;
1135 /* Check if the firmware supports set mac with running filters */
1136 if (CAP_FLAGS1(req, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED))
1137 encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
1139 encp->enc_allow_set_mac_with_installed_filters = B_FALSE;
1142 * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
1143 * specifying which parameters to configure.
1145 if (CAP_FLAGS1(req, SET_MAC_ENHANCED))
1146 encp->enc_enhanced_set_mac_supported = B_TRUE;
1148 encp->enc_enhanced_set_mac_supported = B_FALSE;
1151 * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows
1152 * us to let the firmware choose the settings to use on an EVQ.
1154 if (CAP_FLAGS2(req, INIT_EVQ_V2))
1155 encp->enc_init_evq_v2_supported = B_TRUE;
1157 encp->enc_init_evq_v2_supported = B_FALSE;
1160 * Check if firmware-verified NVRAM updates must be used.
1162 * The firmware trusted installer requires all NVRAM updates to use
1163 * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update)
1164 * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
1165 * partition and report the result).
1167 if (CAP_FLAGS2(req, NVRAM_UPDATE_REPORT_VERIFY_RESULT))
1168 encp->enc_nvram_update_verify_result_supported = B_TRUE;
1170 encp->enc_nvram_update_verify_result_supported = B_FALSE;
1173 * Check if firmware provides packet memory and Rx datapath
1176 if (CAP_FLAGS1(req, PM_AND_RXDP_COUNTERS))
1177 encp->enc_pm_and_rxdp_counters = B_TRUE;
1179 encp->enc_pm_and_rxdp_counters = B_FALSE;
1182 * Check if the 40G MAC hardware is capable of reporting
1183 * statistics for Tx size bins.
1185 if (CAP_FLAGS2(req, MAC_STATS_40G_TX_SIZE_BINS))
1186 encp->enc_mac_stats_40g_tx_size_bins = B_TRUE;
1188 encp->enc_mac_stats_40g_tx_size_bins = B_FALSE;
1191 * Check if firmware supports VXLAN and NVGRE tunnels.
1192 * The capability indicates Geneve protocol support as well.
1194 if (CAP_FLAGS1(req, VXLAN_NVGRE)) {
1195 encp->enc_tunnel_encapsulations_supported =
1196 (1u << EFX_TUNNEL_PROTOCOL_VXLAN) |
1197 (1u << EFX_TUNNEL_PROTOCOL_GENEVE) |
1198 (1u << EFX_TUNNEL_PROTOCOL_NVGRE);
1200 EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES ==
1201 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
1202 encp->enc_tunnel_config_udp_entries_max =
1203 EFX_TUNNEL_MAXNENTRIES;
1205 encp->enc_tunnel_config_udp_entries_max = 0;
1209 * Check if firmware reports the VI window mode.
1210 * Medford2 has a variable VI window size (8K, 16K or 64K).
1211 * Medford and Huntington have a fixed 8K VI window size.
1213 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
1215 MCDI_OUT_BYTE(req, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
1218 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
1219 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1221 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
1222 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_16K;
1224 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
1225 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_64K;
1228 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1231 } else if ((enp->en_family == EFX_FAMILY_HUNTINGTON) ||
1232 (enp->en_family == EFX_FAMILY_MEDFORD)) {
1233 /* Huntington and Medford have fixed 8K window size */
1234 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1236 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1239 /* Check if firmware supports extended MAC stats. */
1240 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
1241 /* Extended stats buffer supported */
1242 encp->enc_mac_stats_nstats = MCDI_OUT_WORD(req,
1243 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
1245 /* Use Siena-compatible legacy MAC stats */
1246 encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS;
1249 if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V2)
1250 encp->enc_fec_counters = B_TRUE;
1252 encp->enc_fec_counters = B_FALSE;
1254 /* Check if the firmware provides head-of-line blocking counters */
1255 if (CAP_FLAGS2(req, RXDP_HLB_IDLE))
1256 encp->enc_hlb_counters = B_TRUE;
1258 encp->enc_hlb_counters = B_FALSE;
1260 if (CAP_FLAGS1(req, RX_RSS_LIMITED)) {
1261 /* Only one exclusive RSS context is available per port. */
1262 encp->enc_rx_scale_max_exclusive_contexts = 1;
1264 switch (enp->en_family) {
1265 case EFX_FAMILY_MEDFORD2:
1266 encp->enc_rx_scale_hash_alg_mask =
1267 (1U << EFX_RX_HASHALG_TOEPLITZ);
1270 case EFX_FAMILY_MEDFORD:
1271 case EFX_FAMILY_HUNTINGTON:
1273 * Packed stream firmware variant maintains a
1274 * non-standard algorithm for hash computation.
1275 * It implies explicit XORing together
1276 * source + destination IP addresses (or last
1277 * four bytes in the case of IPv6) and using the
1278 * resulting value as the input to a Toeplitz hash.
1280 encp->enc_rx_scale_hash_alg_mask =
1281 (1U << EFX_RX_HASHALG_PACKED_STREAM);
1289 /* Port numbers cannot contribute to the hash value */
1290 encp->enc_rx_scale_l4_hash_supported = B_FALSE;
1293 * Maximum number of exclusive RSS contexts.
1294 * EF10 hardware supports 64 in total, but 6 are reserved
1295 * for shared contexts. They are a global resource so
1296 * not all may be available.
1298 encp->enc_rx_scale_max_exclusive_contexts = 64 - 6;
1300 encp->enc_rx_scale_hash_alg_mask =
1301 (1U << EFX_RX_HASHALG_TOEPLITZ);
1304 * It is possible to use port numbers as
1305 * the input data for hash computation.
1307 encp->enc_rx_scale_l4_hash_supported = B_TRUE;
1309 /* Check if the firmware supports "FLAG" and "MARK" filter actions */
1310 if (CAP_FLAGS2(req, FILTER_ACTION_FLAG))
1311 encp->enc_filter_action_flag_supported = B_TRUE;
1313 encp->enc_filter_action_flag_supported = B_FALSE;
1315 if (CAP_FLAGS2(req, FILTER_ACTION_MARK))
1316 encp->enc_filter_action_mark_supported = B_TRUE;
1318 encp->enc_filter_action_mark_supported = B_FALSE;
1320 /* Get maximum supported value for "MARK" filter action */
1321 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)
1322 encp->enc_filter_action_mark_max = MCDI_OUT_DWORD(req,
1323 GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX);
1325 encp->enc_filter_action_mark_max = 0;
1341 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1347 #define EF10_LEGACY_PF_PRIVILEGE_MASK \
1348 (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \
1349 MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \
1350 MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \
1351 MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \
1352 MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \
1353 MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \
1354 MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \
1355 MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \
1356 MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \
1357 MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \
1358 MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
1360 #define EF10_LEGACY_VF_PRIVILEGE_MASK 0
1363 __checkReturn efx_rc_t
1364 ef10_get_privilege_mask(
1365 __in efx_nic_t *enp,
1366 __out uint32_t *maskp)
1368 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1372 if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
1377 /* Fallback for old firmware without privilege mask support */
1378 if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1379 /* Assume PF has admin privilege */
1380 mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
1382 /* VF is always unprivileged by default */
1383 mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
1392 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1399 * Table of mapping schemes from port number to external number.
1401 * Each port number ultimately corresponds to a connector: either as part of
1402 * a cable assembly attached to a module inserted in an SFP+/QSFP+ cage on
1403 * the board, or fixed to the board (e.g. 10GBASE-T magjack on SFN5121T
1404 * "Salina"). In general:
1406 * Port number (0-based)
1408 * port mapping (n:1)
1411 * External port number (normally 1-based)
1413 * fixed (1:1) or cable assembly (1:m)
1418 * The external numbering refers to the cages or magjacks on the board,
1419 * as visibly annotated on the board or back panel. This table describes
1420 * how to determine which external cage/magjack corresponds to the port
1421 * numbers used by the driver.
1423 * The count of adjacent port numbers that map to each external number,
1424 * and the offset in the numbering, is determined by the chip family and
1425 * current port mode.
1427 * For the Huntington family, the current port mode cannot be discovered,
1428 * but a single mapping is used by all modes for a given chip variant,
1429 * so the mapping used is instead the last match in the table to the full
1430 * set of port modes to which the NIC can be configured. Therefore the
1431 * ordering of entries in the mapping table is significant.
1433 static struct ef10_external_port_map_s {
1434 efx_family_t family;
1435 uint32_t modes_mask;
1438 } __ef10_external_port_mappings[] = {
1440 * Modes used by Huntington family controllers where each port
1441 * number maps to a separate cage.
1442 * SFN7x22F (Torino):
1452 EFX_FAMILY_HUNTINGTON,
1453 (1U << TLV_PORT_MODE_10G) | /* mode 0 */
1454 (1U << TLV_PORT_MODE_10G_10G) | /* mode 2 */
1455 (1U << TLV_PORT_MODE_10G_10G_10G_10G), /* mode 4 */
1456 1, /* ports per cage */
1460 * Modes which for Huntington identify a chip variant where 2
1461 * adjacent port numbers map to each cage.
1469 EFX_FAMILY_HUNTINGTON,
1470 (1U << TLV_PORT_MODE_40G) | /* mode 1 */
1471 (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */
1472 (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */
1473 (1U << TLV_PORT_MODE_10G_10G_40G), /* mode 7 */
1474 2, /* ports per cage */
1478 * Modes that on Medford allocate each port number to a separate
1487 (1U << TLV_PORT_MODE_10G) | /* mode 0 */
1488 (1U << TLV_PORT_MODE_10G_10G), /* mode 2 */
1489 1, /* ports per cage */
1493 * Modes that on Medford allocate 2 adjacent port numbers to each
1502 (1U << TLV_PORT_MODE_40G) | /* mode 1 */
1503 (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */
1504 (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */
1505 (1U << TLV_PORT_MODE_10G_10G_40G) | /* mode 7 */
1506 /* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */
1507 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), /* mode 9 */
1508 2, /* ports per cage */
1512 * Modes that on Medford allocate 4 adjacent port numbers to each
1513 * connector, starting on cage 1.
1521 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q) | /* mode 5 */
1522 /* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */
1523 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1), /* mode 4 */
1524 4, /* ports per cage */
1528 * Modes that on Medford allocate 4 adjacent port numbers to each
1529 * connector, starting on cage 2.
1537 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q2), /* mode 8 */
1538 4, /* ports per cage */
1542 * Modes that on Medford2 allocate each port number to a separate
1550 EFX_FAMILY_MEDFORD2,
1551 (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */
1552 (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */
1553 (1U << TLV_PORT_MODE_1x1_1x1) | /* mode 2 */
1554 (1U << TLV_PORT_MODE_1x2_NA) | /* mode 10 */
1555 (1U << TLV_PORT_MODE_1x2_1x2) | /* mode 12 */
1556 (1U << TLV_PORT_MODE_1x4_1x2) | /* mode 15 */
1557 (1U << TLV_PORT_MODE_1x2_1x4), /* mode 16 */
1558 1, /* ports per cage */
1562 * FIXME: Some port modes are not representable in this mapping:
1563 * - TLV_PORT_MODE_1x2_2x1 (mode 17):
1569 * Modes that on Medford2 allocate 2 adjacent port numbers to each
1570 * cage, starting on cage 1.
1577 EFX_FAMILY_MEDFORD2,
1578 (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */
1579 (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 4 */
1580 (1U << TLV_PORT_MODE_1x4_2x1) | /* mode 6 */
1581 (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */
1582 (1U << TLV_PORT_MODE_2x2_NA) | /* mode 13 */
1583 (1U << TLV_PORT_MODE_2x1_1x2), /* mode 18 */
1584 2, /* ports per cage */
1588 * Modes that on Medford2 allocate 2 adjacent port numbers to each
1589 * cage, starting on cage 2.
1594 EFX_FAMILY_MEDFORD2,
1595 (1U << TLV_PORT_MODE_NA_2x2), /* mode 14 */
1596 2, /* ports per cage */
1600 * Modes that on Medford2 allocate 4 adjacent port numbers to each
1601 * connector, starting on cage 1.
1608 EFX_FAMILY_MEDFORD2,
1609 (1U << TLV_PORT_MODE_4x1_NA), /* mode 5 */
1610 4, /* ports per cage */
1614 * Modes that on Medford2 allocate 4 adjacent port numbers to each
1615 * connector, starting on cage 2.
1622 EFX_FAMILY_MEDFORD2,
1623 (1U << TLV_PORT_MODE_NA_4x1) | /* mode 8 */
1624 (1U << TLV_PORT_MODE_NA_1x2), /* mode 11 */
1625 4, /* ports per cage */
1630 static __checkReturn efx_rc_t
1631 ef10_external_port_mapping(
1632 __in efx_nic_t *enp,
1634 __out uint8_t *external_portp)
1638 uint32_t port_modes;
1641 int32_t count = 1; /* Default 1-1 mapping */
1642 int32_t offset = 1; /* Default starting external port number */
1644 if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, ¤t,
1647 * No current port mode information (i.e. Huntington)
1648 * - infer mapping from available modes
1650 if ((rc = efx_mcdi_get_port_modes(enp,
1651 &port_modes, NULL, NULL)) != 0) {
1653 * No port mode information available
1654 * - use default mapping
1659 /* Only need to scan the current mode */
1660 port_modes = 1 << current;
1664 * Infer the internal port -> external number mapping from
1665 * the possible port modes for this NIC.
1667 for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
1668 struct ef10_external_port_map_s *eepmp =
1669 &__ef10_external_port_mappings[i];
1670 if (eepmp->family != enp->en_family)
1672 matches = (eepmp->modes_mask & port_modes);
1675 * Some modes match. For some Huntington boards
1676 * there will be multiple matches. The mapping on the
1677 * last match is used.
1679 count = eepmp->count;
1680 offset = eepmp->offset;
1681 port_modes &= ~matches;
1685 if (port_modes != 0) {
1686 /* Some advertised modes are not supported */
1693 * Scale as required by last matched mode and then convert to
1694 * correctly offset numbering
1696 *external_portp = (uint8_t)((port / count) + offset);
1700 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1705 static __checkReturn efx_rc_t
1707 __in efx_nic_t *enp)
1709 const efx_nic_ops_t *enop = enp->en_enop;
1710 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
1711 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1712 ef10_link_state_t els;
1713 efx_port_t *epp = &(enp->en_port);
1714 uint32_t board_type = 0;
1715 uint32_t base, nvec;
1720 uint8_t mac_addr[6] = { 0 };
1723 /* Get the (zero-based) MCDI port number */
1724 if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
1727 /* EFX MCDI interface uses one-based port numbers */
1728 emip->emi_port = port + 1;
1730 if ((rc = ef10_external_port_mapping(enp, port,
1731 &encp->enc_external_port)) != 0)
1735 * Get PCIe function number from firmware (used for
1736 * per-function privilege and dynamic config info).
1737 * - PCIe PF: pf = PF number, vf = 0xffff.
1738 * - PCIe VF: pf = parent PF, vf = VF number.
1740 if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
1746 /* MAC address for this function */
1747 if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1748 rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
1749 #if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
1751 * Disable static config checking, ONLY for manufacturing test
1752 * and setup at the factory, to allow the static config to be
1755 #else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
1756 if ((rc == 0) && (mac_addr[0] & 0x02)) {
1758 * If the static config does not include a global MAC
1759 * address pool then the board may return a locally
1760 * administered MAC address (this should only happen on
1761 * incorrectly programmed boards).
1765 #endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
1767 rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
1772 EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
1774 /* Board configuration (legacy) */
1775 rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
1777 /* Unprivileged functions may not be able to read board cfg */
1784 encp->enc_board_type = board_type;
1785 encp->enc_clk_mult = 1; /* not used for EF10 */
1787 /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
1788 if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
1791 /* Obtain the default PHY advertised capabilities */
1792 if ((rc = ef10_phy_get_link(enp, &els)) != 0)
1794 epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
1795 epp->ep_adv_cap_mask = els.els_adv_cap_mask;
1797 /* Check capabilities of running datapath firmware */
1798 if ((rc = ef10_get_datapath_caps(enp)) != 0)
1801 /* Alignment for WPTR updates */
1802 encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
1804 encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
1805 /* No boundary crossing limits */
1806 encp->enc_tx_dma_desc_boundary = 0;
1809 * Maximum number of bytes into the frame the TCP header can start for
1810 * firmware assisted TSO to work.
1812 encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
1815 * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
1816 * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
1817 * resources (allocated to this PCIe function), which is zero until
1818 * after we have allocated VIs.
1820 encp->enc_evq_limit = 1024;
1821 encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
1822 encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
1824 encp->enc_buftbl_limit = 0xFFFFFFFF;
1826 /* Get interrupt vector limits */
1827 if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
1828 if (EFX_PCI_FUNCTION_IS_PF(encp))
1831 /* Ignore error (cannot query vector limits from a VF). */
1835 encp->enc_intr_vec_base = base;
1836 encp->enc_intr_limit = nvec;
1839 * Get the current privilege mask. Note that this may be modified
1840 * dynamically, so this value is informational only. DO NOT use
1841 * the privilege mask to check for sufficient privileges, as that
1842 * can result in time-of-check/time-of-use bugs.
1844 if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
1846 encp->enc_privilege_mask = mask;
1848 /* Get remaining controller-specific board config */
1849 if ((rc = enop->eno_board_cfg(enp)) != 0)
1856 EFSYS_PROBE(fail11);
1858 EFSYS_PROBE(fail10);
1876 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1881 __checkReturn efx_rc_t
1883 __in efx_nic_t *enp)
1885 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1886 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
1889 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
1890 enp->en_family == EFX_FAMILY_MEDFORD ||
1891 enp->en_family == EFX_FAMILY_MEDFORD2);
1893 /* Read and clear any assertion state */
1894 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
1897 /* Exit the assertion handler */
1898 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
1902 if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
1905 if ((rc = ef10_nic_board_cfg(enp)) != 0)
1909 * Set default driver config limits (based on board config).
1911 * FIXME: For now allocate a fixed number of VIs which is likely to be
1912 * sufficient and small enough to allow multiple functions on the same
1915 edcp->edc_min_vi_count = edcp->edc_max_vi_count =
1916 MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
1918 /* The client driver must configure and enable PIO buffer support */
1919 edcp->edc_max_piobuf_count = 0;
1920 edcp->edc_pio_alloc_size = 0;
1922 #if EFSYS_OPT_MAC_STATS
1923 /* Wipe the MAC statistics */
1924 if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
1928 #if EFSYS_OPT_LOOPBACK
1929 if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
1933 #if EFSYS_OPT_MON_STATS
1934 if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
1935 /* Unprivileged functions do not have access to sensors */
1941 encp->enc_features = enp->en_features;
1945 #if EFSYS_OPT_MON_STATS
1949 #if EFSYS_OPT_LOOPBACK
1953 #if EFSYS_OPT_MAC_STATS
1964 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1969 __checkReturn efx_rc_t
1970 ef10_nic_set_drv_limits(
1971 __inout efx_nic_t *enp,
1972 __in efx_drv_limits_t *edlp)
1974 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1975 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
1976 uint32_t min_evq_count, max_evq_count;
1977 uint32_t min_rxq_count, max_rxq_count;
1978 uint32_t min_txq_count, max_txq_count;
1986 /* Get minimum required and maximum usable VI limits */
1987 min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
1988 min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
1989 min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
1991 edcp->edc_min_vi_count =
1992 MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
1994 max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
1995 max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
1996 max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
1998 edcp->edc_max_vi_count =
1999 MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
2002 * Check limits for sub-allocated piobuf blocks.
2003 * PIO is optional, so don't fail if the limits are incorrect.
2005 if ((encp->enc_piobuf_size == 0) ||
2006 (encp->enc_piobuf_limit == 0) ||
2007 (edlp->edl_min_pio_alloc_size == 0) ||
2008 (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
2010 edcp->edc_max_piobuf_count = 0;
2011 edcp->edc_pio_alloc_size = 0;
2013 uint32_t blk_size, blk_count, blks_per_piobuf;
2016 MAX(edlp->edl_min_pio_alloc_size,
2017 encp->enc_piobuf_min_alloc_size);
2019 blks_per_piobuf = encp->enc_piobuf_size / blk_size;
2020 EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
2022 blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
2024 /* A zero max pio alloc count means unlimited */
2025 if ((edlp->edl_max_pio_alloc_count > 0) &&
2026 (edlp->edl_max_pio_alloc_count < blk_count)) {
2027 blk_count = edlp->edl_max_pio_alloc_count;
2030 edcp->edc_pio_alloc_size = blk_size;
2031 edcp->edc_max_piobuf_count =
2032 (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
2038 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2044 __checkReturn efx_rc_t
2046 __in efx_nic_t *enp)
2049 uint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN,
2050 MC_CMD_ENTITY_RESET_OUT_LEN)];
2053 /* ef10_nic_reset() is called to recover from BADASSERT failures. */
2054 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
2056 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
2059 (void) memset(payload, 0, sizeof (payload));
2060 req.emr_cmd = MC_CMD_ENTITY_RESET;
2061 req.emr_in_buf = payload;
2062 req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
2063 req.emr_out_buf = payload;
2064 req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
2066 MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
2067 ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
2069 efx_mcdi_execute(enp, &req);
2071 if (req.emr_rc != 0) {
2076 /* Clear RX/TX DMA queue errors */
2077 enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
2086 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2091 __checkReturn efx_rc_t
2093 __in efx_nic_t *enp)
2095 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
2096 uint32_t min_vi_count, max_vi_count;
2097 uint32_t vi_count, vi_base, vi_shift;
2101 uint32_t vi_window_size;
2104 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
2105 enp->en_family == EFX_FAMILY_MEDFORD ||
2106 enp->en_family == EFX_FAMILY_MEDFORD2);
2108 /* Enable reporting of some events (e.g. link change) */
2109 if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
2112 /* Allocate (optional) on-chip PIO buffers */
2113 ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
2116 * For best performance, PIO writes should use a write-combined
2117 * (WC) memory mapping. Using a separate WC mapping for the PIO
2118 * aperture of each VI would be a burden to drivers (and not
2119 * possible if the host page size is >4Kbyte).
2121 * To avoid this we use a single uncached (UC) mapping for VI
2122 * register access, and a single WC mapping for extra VIs used
2125 * Each piobuf must be linked to a VI in the WC mapping, and to
2126 * each VI that is using a sub-allocated block from the piobuf.
2128 min_vi_count = edcp->edc_min_vi_count;
2130 edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
2132 /* Ensure that the previously attached driver's VIs are freed */
2133 if ((rc = efx_mcdi_free_vis(enp)) != 0)
2137 * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
2138 * fails then retrying the request for fewer VI resources may succeed.
2141 if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
2142 &vi_base, &vi_count, &vi_shift)) != 0)
2145 EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
2147 if (vi_count < min_vi_count) {
2152 enp->en_arch.ef10.ena_vi_base = vi_base;
2153 enp->en_arch.ef10.ena_vi_count = vi_count;
2154 enp->en_arch.ef10.ena_vi_shift = vi_shift;
2156 if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
2157 /* Not enough extra VIs to map piobufs */
2158 ef10_nic_free_piobufs(enp);
2161 enp->en_arch.ef10.ena_pio_write_vi_base =
2162 vi_count - enp->en_arch.ef10.ena_piobuf_count;
2164 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=,
2165 EFX_VI_WINDOW_SHIFT_INVALID);
2166 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=,
2167 EFX_VI_WINDOW_SHIFT_64K);
2168 vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift;
2170 /* Save UC memory mapping details */
2171 enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
2172 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2173 enp->en_arch.ef10.ena_uc_mem_map_size =
2175 enp->en_arch.ef10.ena_pio_write_vi_base);
2177 enp->en_arch.ef10.ena_uc_mem_map_size =
2179 enp->en_arch.ef10.ena_vi_count);
2182 /* Save WC memory mapping details */
2183 enp->en_arch.ef10.ena_wc_mem_map_offset =
2184 enp->en_arch.ef10.ena_uc_mem_map_offset +
2185 enp->en_arch.ef10.ena_uc_mem_map_size;
2187 enp->en_arch.ef10.ena_wc_mem_map_size =
2189 enp->en_arch.ef10.ena_piobuf_count);
2191 /* Link piobufs to extra VIs in WC mapping */
2192 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2193 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2194 rc = efx_mcdi_link_piobuf(enp,
2195 enp->en_arch.ef10.ena_pio_write_vi_base + i,
2196 enp->en_arch.ef10.ena_piobuf_handle[i]);
2203 * Allocate a vAdaptor attached to our upstream vPort/pPort.
2205 * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
2206 * driver has yet to bring up the EVB port. See bug 56147. In this case,
2207 * retry the request several times after waiting a while. The wait time
2208 * between retries starts small (10ms) and exponentially increases.
2209 * Total wait time is a little over two seconds. Retry logic in the
2210 * client driver may mean this whole loop is repeated if it continues to
2215 while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
2216 if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
2219 * Do not retry alloc for PF, or for other errors on
2225 /* VF startup before PF is ready. Retry allocation. */
2227 /* Too many attempts */
2231 EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
2232 EFSYS_SLEEP(delay_us);
2234 if (delay_us < 500000)
2238 enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
2239 enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
2254 ef10_nic_free_piobufs(enp);
2257 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2262 __checkReturn efx_rc_t
2263 ef10_nic_get_vi_pool(
2264 __in efx_nic_t *enp,
2265 __out uint32_t *vi_countp)
2267 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
2268 enp->en_family == EFX_FAMILY_MEDFORD ||
2269 enp->en_family == EFX_FAMILY_MEDFORD2);
2272 * Report VIs that the client driver can use.
2273 * Do not include VIs used for PIO buffer writes.
2275 *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
2280 __checkReturn efx_rc_t
2281 ef10_nic_get_bar_region(
2282 __in efx_nic_t *enp,
2283 __in efx_nic_region_t region,
2284 __out uint32_t *offsetp,
2285 __out size_t *sizep)
2289 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
2290 enp->en_family == EFX_FAMILY_MEDFORD ||
2291 enp->en_family == EFX_FAMILY_MEDFORD2);
2294 * TODO: Specify host memory mapping alignment and granularity
2295 * in efx_drv_limits_t so that they can be taken into account
2296 * when allocating extra VIs for PIO writes.
2300 /* UC mapped memory BAR region for VI registers */
2301 *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
2302 *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
2305 case EFX_REGION_PIO_WRITE_VI:
2306 /* WC mapped memory BAR region for piobuf writes */
2307 *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
2308 *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
2319 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2326 __in efx_nic_t *enp)
2331 (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
2332 enp->en_vport_id = 0;
2334 /* Unlink piobufs from extra VIs in WC mapping */
2335 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2336 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2337 rc = efx_mcdi_unlink_piobuf(enp,
2338 enp->en_arch.ef10.ena_pio_write_vi_base + i);
2344 ef10_nic_free_piobufs(enp);
2346 (void) efx_mcdi_free_vis(enp);
2347 enp->en_arch.ef10.ena_vi_count = 0;
2352 __in efx_nic_t *enp)
2354 #if EFSYS_OPT_MON_STATS
2355 mcdi_mon_cfg_free(enp);
2356 #endif /* EFSYS_OPT_MON_STATS */
2357 (void) efx_mcdi_drv_attach(enp, B_FALSE);
2362 __checkReturn efx_rc_t
2363 ef10_nic_register_test(
2364 __in efx_nic_t *enp)
2369 _NOTE(ARGUNUSED(enp))
2370 _NOTE(CONSTANTCONDITION)
2380 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2385 #endif /* EFSYS_OPT_DIAG */
2387 #if EFSYS_OPT_FW_SUBVARIANT_AWARE
2389 __checkReturn efx_rc_t
2390 efx_mcdi_get_nic_global(
2391 __in efx_nic_t *enp,
2393 __out uint32_t *valuep)
2396 uint8_t payload[MAX(MC_CMD_GET_NIC_GLOBAL_IN_LEN,
2397 MC_CMD_GET_NIC_GLOBAL_OUT_LEN)];
2400 (void) memset(payload, 0, sizeof (payload));
2401 req.emr_cmd = MC_CMD_GET_NIC_GLOBAL;
2402 req.emr_in_buf = payload;
2403 req.emr_in_length = MC_CMD_GET_NIC_GLOBAL_IN_LEN;
2404 req.emr_out_buf = payload;
2405 req.emr_out_length = MC_CMD_GET_NIC_GLOBAL_OUT_LEN;
2407 MCDI_IN_SET_DWORD(req, GET_NIC_GLOBAL_IN_KEY, key);
2409 efx_mcdi_execute(enp, &req);
2411 if (req.emr_rc != 0) {
2416 if (req.emr_out_length_used != MC_CMD_GET_NIC_GLOBAL_OUT_LEN) {
2421 *valuep = MCDI_OUT_DWORD(req, GET_NIC_GLOBAL_OUT_VALUE);
2428 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2433 __checkReturn efx_rc_t
2434 efx_mcdi_set_nic_global(
2435 __in efx_nic_t *enp,
2437 __in uint32_t value)
2440 uint8_t payload[MC_CMD_SET_NIC_GLOBAL_IN_LEN];
2443 (void) memset(payload, 0, sizeof (payload));
2444 req.emr_cmd = MC_CMD_SET_NIC_GLOBAL;
2445 req.emr_in_buf = payload;
2446 req.emr_in_length = MC_CMD_SET_NIC_GLOBAL_IN_LEN;
2447 req.emr_out_buf = NULL;
2448 req.emr_out_length = 0;
2450 MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_KEY, key);
2451 MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_VALUE, value);
2453 efx_mcdi_execute(enp, &req);
2455 if (req.emr_rc != 0) {
2463 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2468 #endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
2470 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */