1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2012-2018 Solarflare Communications Inc.
13 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
15 #include "ef10_tlv_layout.h"
17 __checkReturn efx_rc_t
18 efx_mcdi_get_port_assignment(
20 __out uint32_t *portp)
23 uint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
24 MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)];
27 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
28 enp->en_family == EFX_FAMILY_MEDFORD);
30 (void) memset(payload, 0, sizeof (payload));
31 req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
32 req.emr_in_buf = payload;
33 req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
34 req.emr_out_buf = payload;
35 req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
37 efx_mcdi_execute(enp, &req);
39 if (req.emr_rc != 0) {
44 if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
49 *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
56 EFSYS_PROBE1(fail1, efx_rc_t, rc);
61 __checkReturn efx_rc_t
62 efx_mcdi_get_port_modes(
64 __out uint32_t *modesp,
65 __out_opt uint32_t *current_modep)
68 uint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN,
69 MC_CMD_GET_PORT_MODES_OUT_LEN)];
72 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
73 enp->en_family == EFX_FAMILY_MEDFORD);
75 (void) memset(payload, 0, sizeof (payload));
76 req.emr_cmd = MC_CMD_GET_PORT_MODES;
77 req.emr_in_buf = payload;
78 req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
79 req.emr_out_buf = payload;
80 req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
82 efx_mcdi_execute(enp, &req);
84 if (req.emr_rc != 0) {
90 * Require only Modes and DefaultMode fields, unless the current mode
91 * was requested (CurrentMode field was added for Medford).
93 if (req.emr_out_length_used <
94 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
98 if ((current_modep != NULL) && (req.emr_out_length_used <
99 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) {
104 *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
106 if (current_modep != NULL) {
107 *current_modep = MCDI_OUT_DWORD(req,
108 GET_PORT_MODES_OUT_CURRENT_MODE);
118 EFSYS_PROBE1(fail1, efx_rc_t, rc);
123 __checkReturn efx_rc_t
124 ef10_nic_get_port_mode_bandwidth(
125 __in uint32_t port_mode,
126 __out uint32_t *bandwidth_mbpsp)
132 case TLV_PORT_MODE_10G:
135 case TLV_PORT_MODE_10G_10G:
136 bandwidth = 10000 * 2;
138 case TLV_PORT_MODE_10G_10G_10G_10G:
139 case TLV_PORT_MODE_10G_10G_10G_10G_Q:
140 case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2:
141 case TLV_PORT_MODE_10G_10G_10G_10G_Q2:
142 bandwidth = 10000 * 4;
144 case TLV_PORT_MODE_40G:
147 case TLV_PORT_MODE_40G_40G:
148 bandwidth = 40000 * 2;
150 case TLV_PORT_MODE_40G_10G_10G:
151 case TLV_PORT_MODE_10G_10G_40G:
152 bandwidth = 40000 + (10000 * 2);
159 *bandwidth_mbpsp = bandwidth;
164 EFSYS_PROBE1(fail1, efx_rc_t, rc);
169 static __checkReturn efx_rc_t
170 efx_mcdi_vadaptor_alloc(
172 __in uint32_t port_id)
175 uint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN,
176 MC_CMD_VADAPTOR_ALLOC_OUT_LEN)];
179 EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
181 (void) memset(payload, 0, sizeof (payload));
182 req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
183 req.emr_in_buf = payload;
184 req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
185 req.emr_out_buf = payload;
186 req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
188 MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
189 MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
190 VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
191 enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
193 efx_mcdi_execute(enp, &req);
195 if (req.emr_rc != 0) {
203 EFSYS_PROBE1(fail1, efx_rc_t, rc);
208 static __checkReturn efx_rc_t
209 efx_mcdi_vadaptor_free(
211 __in uint32_t port_id)
214 uint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN,
215 MC_CMD_VADAPTOR_FREE_OUT_LEN)];
218 (void) memset(payload, 0, sizeof (payload));
219 req.emr_cmd = MC_CMD_VADAPTOR_FREE;
220 req.emr_in_buf = payload;
221 req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
222 req.emr_out_buf = payload;
223 req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
225 MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
227 efx_mcdi_execute(enp, &req);
229 if (req.emr_rc != 0) {
237 EFSYS_PROBE1(fail1, efx_rc_t, rc);
242 __checkReturn efx_rc_t
243 efx_mcdi_get_mac_address_pf(
245 __out_ecount_opt(6) uint8_t mac_addrp[6])
248 uint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
249 MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)];
252 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
253 enp->en_family == EFX_FAMILY_MEDFORD);
255 (void) memset(payload, 0, sizeof (payload));
256 req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
257 req.emr_in_buf = payload;
258 req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
259 req.emr_out_buf = payload;
260 req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
262 efx_mcdi_execute(enp, &req);
264 if (req.emr_rc != 0) {
269 if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
274 if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
279 if (mac_addrp != NULL) {
282 addrp = MCDI_OUT2(req, uint8_t,
283 GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
285 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
295 EFSYS_PROBE1(fail1, efx_rc_t, rc);
300 __checkReturn efx_rc_t
301 efx_mcdi_get_mac_address_vf(
303 __out_ecount_opt(6) uint8_t mac_addrp[6])
306 uint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
307 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)];
310 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
311 enp->en_family == EFX_FAMILY_MEDFORD);
313 (void) memset(payload, 0, sizeof (payload));
314 req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
315 req.emr_in_buf = payload;
316 req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
317 req.emr_out_buf = payload;
318 req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
320 MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
321 EVB_PORT_ID_ASSIGNED);
323 efx_mcdi_execute(enp, &req);
325 if (req.emr_rc != 0) {
330 if (req.emr_out_length_used <
331 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
336 if (MCDI_OUT_DWORD(req,
337 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
342 if (mac_addrp != NULL) {
345 addrp = MCDI_OUT2(req, uint8_t,
346 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
348 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
358 EFSYS_PROBE1(fail1, efx_rc_t, rc);
363 __checkReturn efx_rc_t
366 __out uint32_t *sys_freqp,
367 __out uint32_t *dpcpu_freqp)
370 uint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN,
371 MC_CMD_GET_CLOCK_OUT_LEN)];
374 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
375 enp->en_family == EFX_FAMILY_MEDFORD);
377 (void) memset(payload, 0, sizeof (payload));
378 req.emr_cmd = MC_CMD_GET_CLOCK;
379 req.emr_in_buf = payload;
380 req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
381 req.emr_out_buf = payload;
382 req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
384 efx_mcdi_execute(enp, &req);
386 if (req.emr_rc != 0) {
391 if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
396 *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
397 if (*sys_freqp == 0) {
401 *dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ);
402 if (*dpcpu_freqp == 0) {
416 EFSYS_PROBE1(fail1, efx_rc_t, rc);
421 __checkReturn efx_rc_t
422 efx_mcdi_get_vector_cfg(
424 __out_opt uint32_t *vec_basep,
425 __out_opt uint32_t *pf_nvecp,
426 __out_opt uint32_t *vf_nvecp)
429 uint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN,
430 MC_CMD_GET_VECTOR_CFG_OUT_LEN)];
433 (void) memset(payload, 0, sizeof (payload));
434 req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
435 req.emr_in_buf = payload;
436 req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
437 req.emr_out_buf = payload;
438 req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
440 efx_mcdi_execute(enp, &req);
442 if (req.emr_rc != 0) {
447 if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
452 if (vec_basep != NULL)
453 *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
454 if (pf_nvecp != NULL)
455 *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
456 if (vf_nvecp != NULL)
457 *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
464 EFSYS_PROBE1(fail1, efx_rc_t, rc);
469 static __checkReturn efx_rc_t
472 __in uint32_t min_vi_count,
473 __in uint32_t max_vi_count,
474 __out uint32_t *vi_basep,
475 __out uint32_t *vi_countp,
476 __out uint32_t *vi_shiftp)
479 uint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN,
480 MC_CMD_ALLOC_VIS_EXT_OUT_LEN)];
483 if (vi_countp == NULL) {
488 (void) memset(payload, 0, sizeof (payload));
489 req.emr_cmd = MC_CMD_ALLOC_VIS;
490 req.emr_in_buf = payload;
491 req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
492 req.emr_out_buf = payload;
493 req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN;
495 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
496 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
498 efx_mcdi_execute(enp, &req);
500 if (req.emr_rc != 0) {
505 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
510 *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
511 *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
513 /* Report VI_SHIFT if available (always zero for Huntington) */
514 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
517 *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
526 EFSYS_PROBE1(fail1, efx_rc_t, rc);
532 static __checkReturn efx_rc_t
539 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
540 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
542 req.emr_cmd = MC_CMD_FREE_VIS;
543 req.emr_in_buf = NULL;
544 req.emr_in_length = 0;
545 req.emr_out_buf = NULL;
546 req.emr_out_length = 0;
548 efx_mcdi_execute_quiet(enp, &req);
550 /* Ignore ELREADY (no allocated VIs, so nothing to free) */
551 if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
559 EFSYS_PROBE1(fail1, efx_rc_t, rc);
565 static __checkReturn efx_rc_t
566 efx_mcdi_alloc_piobuf(
568 __out efx_piobuf_handle_t *handlep)
571 uint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN,
572 MC_CMD_ALLOC_PIOBUF_OUT_LEN)];
575 if (handlep == NULL) {
580 (void) memset(payload, 0, sizeof (payload));
581 req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
582 req.emr_in_buf = payload;
583 req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
584 req.emr_out_buf = payload;
585 req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
587 efx_mcdi_execute_quiet(enp, &req);
589 if (req.emr_rc != 0) {
594 if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
599 *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
608 EFSYS_PROBE1(fail1, efx_rc_t, rc);
613 static __checkReturn efx_rc_t
614 efx_mcdi_free_piobuf(
616 __in efx_piobuf_handle_t handle)
619 uint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN,
620 MC_CMD_FREE_PIOBUF_OUT_LEN)];
623 (void) memset(payload, 0, sizeof (payload));
624 req.emr_cmd = MC_CMD_FREE_PIOBUF;
625 req.emr_in_buf = payload;
626 req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
627 req.emr_out_buf = payload;
628 req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
630 MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
632 efx_mcdi_execute_quiet(enp, &req);
634 if (req.emr_rc != 0) {
642 EFSYS_PROBE1(fail1, efx_rc_t, rc);
647 static __checkReturn efx_rc_t
648 efx_mcdi_link_piobuf(
650 __in uint32_t vi_index,
651 __in efx_piobuf_handle_t handle)
654 uint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN,
655 MC_CMD_LINK_PIOBUF_OUT_LEN)];
658 (void) memset(payload, 0, sizeof (payload));
659 req.emr_cmd = MC_CMD_LINK_PIOBUF;
660 req.emr_in_buf = payload;
661 req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
662 req.emr_out_buf = payload;
663 req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
665 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
666 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
668 efx_mcdi_execute(enp, &req);
670 if (req.emr_rc != 0) {
678 EFSYS_PROBE1(fail1, efx_rc_t, rc);
683 static __checkReturn efx_rc_t
684 efx_mcdi_unlink_piobuf(
686 __in uint32_t vi_index)
689 uint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN,
690 MC_CMD_UNLINK_PIOBUF_OUT_LEN)];
693 (void) memset(payload, 0, sizeof (payload));
694 req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
695 req.emr_in_buf = payload;
696 req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
697 req.emr_out_buf = payload;
698 req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
700 MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
702 efx_mcdi_execute_quiet(enp, &req);
704 if (req.emr_rc != 0) {
712 EFSYS_PROBE1(fail1, efx_rc_t, rc);
718 ef10_nic_alloc_piobufs(
720 __in uint32_t max_piobuf_count)
722 efx_piobuf_handle_t *handlep;
725 EFSYS_ASSERT3U(max_piobuf_count, <=,
726 EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
728 enp->en_arch.ef10.ena_piobuf_count = 0;
730 for (i = 0; i < max_piobuf_count; i++) {
731 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
733 if (efx_mcdi_alloc_piobuf(enp, handlep) != 0)
736 enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
737 enp->en_arch.ef10.ena_piobuf_count++;
743 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
744 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
746 efx_mcdi_free_piobuf(enp, *handlep);
747 *handlep = EFX_PIOBUF_HANDLE_INVALID;
749 enp->en_arch.ef10.ena_piobuf_count = 0;
754 ef10_nic_free_piobufs(
757 efx_piobuf_handle_t *handlep;
760 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
761 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
763 efx_mcdi_free_piobuf(enp, *handlep);
764 *handlep = EFX_PIOBUF_HANDLE_INVALID;
766 enp->en_arch.ef10.ena_piobuf_count = 0;
769 /* Sub-allocate a block from a piobuf */
770 __checkReturn efx_rc_t
772 __inout efx_nic_t *enp,
773 __out uint32_t *bufnump,
774 __out efx_piobuf_handle_t *handlep,
775 __out uint32_t *blknump,
776 __out uint32_t *offsetp,
779 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
780 efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
781 uint32_t blk_per_buf;
785 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
786 enp->en_family == EFX_FAMILY_MEDFORD);
787 EFSYS_ASSERT(bufnump);
788 EFSYS_ASSERT(handlep);
789 EFSYS_ASSERT(blknump);
790 EFSYS_ASSERT(offsetp);
793 if ((edcp->edc_pio_alloc_size == 0) ||
794 (enp->en_arch.ef10.ena_piobuf_count == 0)) {
798 blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
800 for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
801 uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
806 EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
807 for (blk = 0; blk < blk_per_buf; blk++) {
808 if ((*map & (1u << blk)) == 0) {
818 *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
821 *sizep = edcp->edc_pio_alloc_size;
822 *offsetp = blk * (*sizep);
829 EFSYS_PROBE1(fail1, efx_rc_t, rc);
834 /* Free a piobuf sub-allocated block */
835 __checkReturn efx_rc_t
837 __inout efx_nic_t *enp,
838 __in uint32_t bufnum,
839 __in uint32_t blknum)
844 if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
845 (blknum >= (8 * sizeof (*map)))) {
850 map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
851 if ((*map & (1u << blknum)) == 0) {
855 *map &= ~(1u << blknum);
862 EFSYS_PROBE1(fail1, efx_rc_t, rc);
867 __checkReturn efx_rc_t
869 __inout efx_nic_t *enp,
870 __in uint32_t vi_index,
871 __in efx_piobuf_handle_t handle)
873 return (efx_mcdi_link_piobuf(enp, vi_index, handle));
876 __checkReturn efx_rc_t
878 __inout efx_nic_t *enp,
879 __in uint32_t vi_index)
881 return (efx_mcdi_unlink_piobuf(enp, vi_index));
884 static __checkReturn efx_rc_t
885 ef10_mcdi_get_pf_count(
887 __out uint32_t *pf_countp)
890 uint8_t payload[MAX(MC_CMD_GET_PF_COUNT_IN_LEN,
891 MC_CMD_GET_PF_COUNT_OUT_LEN)];
894 (void) memset(payload, 0, sizeof (payload));
895 req.emr_cmd = MC_CMD_GET_PF_COUNT;
896 req.emr_in_buf = payload;
897 req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;
898 req.emr_out_buf = payload;
899 req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN;
901 efx_mcdi_execute(enp, &req);
903 if (req.emr_rc != 0) {
908 if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) {
913 *pf_countp = *MCDI_OUT(req, uint8_t,
914 MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST);
916 EFSYS_ASSERT(*pf_countp != 0);
923 EFSYS_PROBE1(fail1, efx_rc_t, rc);
928 __checkReturn efx_rc_t
929 ef10_get_datapath_caps(
932 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
938 if ((rc = efx_mcdi_get_capabilities(enp, &flags, NULL, NULL,
939 &flags2, &tso2nc)) != 0)
942 if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
945 #define CAP_FLAG(flags1, field) \
946 ((flags1) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
948 #define CAP_FLAG2(flags2, field) \
949 ((flags2) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
952 * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
953 * We only support the 14 byte prefix here.
955 if (CAP_FLAG(flags, RX_PREFIX_LEN_14) == 0) {
959 encp->enc_rx_prefix_size = 14;
961 /* Check if the firmware supports TSO */
962 encp->enc_fw_assisted_tso_enabled =
963 CAP_FLAG(flags, TX_TSO) ? B_TRUE : B_FALSE;
965 /* Check if the firmware supports FATSOv2 */
966 encp->enc_fw_assisted_tso_v2_enabled =
967 CAP_FLAG2(flags2, TX_TSO_V2) ? B_TRUE : B_FALSE;
969 /* Get the number of TSO contexts (FATSOv2) */
970 encp->enc_fw_assisted_tso_v2_n_contexts =
971 CAP_FLAG2(flags2, TX_TSO_V2) ? tso2nc : 0;
973 /* Check if the firmware has vadapter/vport/vswitch support */
974 encp->enc_datapath_cap_evb =
975 CAP_FLAG(flags, EVB) ? B_TRUE : B_FALSE;
977 /* Check if the firmware supports VLAN insertion */
978 encp->enc_hw_tx_insert_vlan_enabled =
979 CAP_FLAG(flags, TX_VLAN_INSERTION) ? B_TRUE : B_FALSE;
981 /* Check if the firmware supports RX event batching */
982 encp->enc_rx_batching_enabled =
983 CAP_FLAG(flags, RX_BATCHING) ? B_TRUE : B_FALSE;
986 * Even if batching isn't reported as supported, we may still get
987 * batched events (see bug61153).
989 encp->enc_rx_batch_max = 16;
991 /* Check if the firmware supports disabling scatter on RXQs */
992 encp->enc_rx_disable_scatter_supported =
993 CAP_FLAG(flags, RX_DISABLE_SCATTER) ? B_TRUE : B_FALSE;
995 /* Check if the firmware supports packed stream mode */
996 encp->enc_rx_packed_stream_supported =
997 CAP_FLAG(flags, RX_PACKED_STREAM) ? B_TRUE : B_FALSE;
1000 * Check if the firmware supports configurable buffer sizes
1001 * for packed stream mode (otherwise buffer size is 1Mbyte)
1003 encp->enc_rx_var_packed_stream_supported =
1004 CAP_FLAG(flags, RX_PACKED_STREAM_VAR_BUFFERS) ? B_TRUE : B_FALSE;
1006 /* Check if the firmware supports set mac with running filters */
1007 encp->enc_allow_set_mac_with_installed_filters =
1008 CAP_FLAG(flags, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED) ?
1012 * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
1013 * specifying which parameters to configure.
1015 encp->enc_enhanced_set_mac_supported =
1016 CAP_FLAG(flags, SET_MAC_ENHANCED) ? B_TRUE : B_FALSE;
1019 * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows
1020 * us to let the firmware choose the settings to use on an EVQ.
1022 encp->enc_init_evq_v2_supported =
1023 CAP_FLAG2(flags2, INIT_EVQ_V2) ? B_TRUE : B_FALSE;
1026 * Check if firmware-verified NVRAM updates must be used.
1028 * The firmware trusted installer requires all NVRAM updates to use
1029 * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update)
1030 * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
1031 * partition and report the result).
1033 encp->enc_nvram_update_verify_result_supported =
1034 CAP_FLAG2(flags2, NVRAM_UPDATE_REPORT_VERIFY_RESULT) ?
1038 * Check if firmware provides packet memory and Rx datapath
1041 encp->enc_pm_and_rxdp_counters =
1042 CAP_FLAG(flags, PM_AND_RXDP_COUNTERS) ? B_TRUE : B_FALSE;
1045 * Check if the 40G MAC hardware is capable of reporting
1046 * statistics for Tx size bins.
1048 encp->enc_mac_stats_40g_tx_size_bins =
1049 CAP_FLAG2(flags2, MAC_STATS_40G_TX_SIZE_BINS) ? B_TRUE : B_FALSE;
1052 * Check if firmware supports VXLAN and NVGRE tunnels.
1053 * The capability indicates Geneve protocol support as well.
1055 if (CAP_FLAG(flags, VXLAN_NVGRE)) {
1056 encp->enc_tunnel_encapsulations_supported =
1057 (1u << EFX_TUNNEL_PROTOCOL_VXLAN) |
1058 (1u << EFX_TUNNEL_PROTOCOL_GENEVE) |
1059 (1u << EFX_TUNNEL_PROTOCOL_NVGRE);
1061 EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES ==
1062 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
1063 encp->enc_tunnel_config_udp_entries_max =
1064 EFX_TUNNEL_MAXNENTRIES;
1066 encp->enc_tunnel_config_udp_entries_max = 0;
1077 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1083 #define EF10_LEGACY_PF_PRIVILEGE_MASK \
1084 (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \
1085 MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \
1086 MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \
1087 MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \
1088 MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \
1089 MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \
1090 MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \
1091 MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \
1092 MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \
1093 MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \
1094 MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
1096 #define EF10_LEGACY_VF_PRIVILEGE_MASK 0
1099 __checkReturn efx_rc_t
1100 ef10_get_privilege_mask(
1101 __in efx_nic_t *enp,
1102 __out uint32_t *maskp)
1104 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1108 if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
1113 /* Fallback for old firmware without privilege mask support */
1114 if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1115 /* Assume PF has admin privilege */
1116 mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
1118 /* VF is always unprivileged by default */
1119 mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
1128 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1135 * Table of mapping schemes from port number to the number of the external
1136 * connector on the board. The external numbering does not distinguish
1137 * off-board separated outputs such as from multi-headed cables.
1139 * The count of adjacent port numbers that map to each external port
1140 * and the offset in the numbering, is determined by the chip family and
1141 * current port mode.
1143 * For the Huntington family, the current port mode cannot be discovered,
1144 * so the mapping used is instead the last match in the table to the full
1145 * set of port modes to which the NIC can be configured. Therefore the
1146 * ordering of entries in the mapping table is significant.
1149 efx_family_t family;
1150 uint32_t modes_mask;
1153 } __ef10_external_port_mappings[] = {
1154 /* Supported modes with 1 output per external port */
1156 EFX_FAMILY_HUNTINGTON,
1157 (1 << TLV_PORT_MODE_10G) |
1158 (1 << TLV_PORT_MODE_10G_10G) |
1159 (1 << TLV_PORT_MODE_10G_10G_10G_10G),
1165 (1 << TLV_PORT_MODE_10G) |
1166 (1 << TLV_PORT_MODE_10G_10G),
1170 /* Supported modes with 2 outputs per external port */
1172 EFX_FAMILY_HUNTINGTON,
1173 (1 << TLV_PORT_MODE_40G) |
1174 (1 << TLV_PORT_MODE_40G_40G) |
1175 (1 << TLV_PORT_MODE_40G_10G_10G) |
1176 (1 << TLV_PORT_MODE_10G_10G_40G),
1182 (1 << TLV_PORT_MODE_40G) |
1183 (1 << TLV_PORT_MODE_40G_40G) |
1184 (1 << TLV_PORT_MODE_40G_10G_10G) |
1185 (1 << TLV_PORT_MODE_10G_10G_40G) |
1186 (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2),
1190 /* Supported modes with 4 outputs per external port */
1193 (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q) |
1194 (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1),
1200 (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q2),
1206 __checkReturn efx_rc_t
1207 ef10_external_port_mapping(
1208 __in efx_nic_t *enp,
1210 __out uint8_t *external_portp)
1214 uint32_t port_modes;
1217 int32_t count = 1; /* Default 1-1 mapping */
1218 int32_t offset = 1; /* Default starting external port number */
1220 if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, ¤t)) != 0) {
1222 * No current port mode information
1223 * - infer mapping from available modes
1225 if ((rc = efx_mcdi_get_port_modes(enp,
1226 &port_modes, NULL)) != 0) {
1228 * No port mode information available
1229 * - use default mapping
1234 /* Only need to scan the current mode */
1235 port_modes = 1 << current;
1239 * Infer the internal port -> external port mapping from
1240 * the possible port modes for this NIC.
1242 for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
1243 if (__ef10_external_port_mappings[i].family !=
1246 matches = (__ef10_external_port_mappings[i].modes_mask &
1249 count = __ef10_external_port_mappings[i].count;
1250 offset = __ef10_external_port_mappings[i].offset;
1251 port_modes &= ~matches;
1255 if (port_modes != 0) {
1256 /* Some advertised modes are not supported */
1263 * Scale as required by last matched mode and then convert to
1264 * correctly offset numbering
1266 *external_portp = (uint8_t)((port / count) + offset);
1270 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1276 __checkReturn efx_rc_t
1278 __in efx_nic_t *enp)
1280 const efx_nic_ops_t *enop = enp->en_enop;
1281 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1282 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
1285 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
1286 enp->en_family == EFX_FAMILY_MEDFORD);
1288 /* Read and clear any assertion state */
1289 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
1292 /* Exit the assertion handler */
1293 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
1297 if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
1300 if ((rc = enop->eno_board_cfg(enp)) != 0)
1305 * Set default driver config limits (based on board config).
1307 * FIXME: For now allocate a fixed number of VIs which is likely to be
1308 * sufficient and small enough to allow multiple functions on the same
1311 edcp->edc_min_vi_count = edcp->edc_max_vi_count =
1312 MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
1314 /* The client driver must configure and enable PIO buffer support */
1315 edcp->edc_max_piobuf_count = 0;
1316 edcp->edc_pio_alloc_size = 0;
1318 #if EFSYS_OPT_MAC_STATS
1319 /* Wipe the MAC statistics */
1320 if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
1324 #if EFSYS_OPT_LOOPBACK
1325 if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
1329 #if EFSYS_OPT_MON_STATS
1330 if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
1331 /* Unprivileged functions do not have access to sensors */
1337 encp->enc_features = enp->en_features;
1341 #if EFSYS_OPT_MON_STATS
1345 #if EFSYS_OPT_LOOPBACK
1349 #if EFSYS_OPT_MAC_STATS
1360 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1365 __checkReturn efx_rc_t
1366 ef10_nic_set_drv_limits(
1367 __inout efx_nic_t *enp,
1368 __in efx_drv_limits_t *edlp)
1370 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1371 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
1372 uint32_t min_evq_count, max_evq_count;
1373 uint32_t min_rxq_count, max_rxq_count;
1374 uint32_t min_txq_count, max_txq_count;
1382 /* Get minimum required and maximum usable VI limits */
1383 min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
1384 min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
1385 min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
1387 edcp->edc_min_vi_count =
1388 MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
1390 max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
1391 max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
1392 max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
1394 edcp->edc_max_vi_count =
1395 MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
1398 * Check limits for sub-allocated piobuf blocks.
1399 * PIO is optional, so don't fail if the limits are incorrect.
1401 if ((encp->enc_piobuf_size == 0) ||
1402 (encp->enc_piobuf_limit == 0) ||
1403 (edlp->edl_min_pio_alloc_size == 0) ||
1404 (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
1406 edcp->edc_max_piobuf_count = 0;
1407 edcp->edc_pio_alloc_size = 0;
1409 uint32_t blk_size, blk_count, blks_per_piobuf;
1412 MAX(edlp->edl_min_pio_alloc_size,
1413 encp->enc_piobuf_min_alloc_size);
1415 blks_per_piobuf = encp->enc_piobuf_size / blk_size;
1416 EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
1418 blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
1420 /* A zero max pio alloc count means unlimited */
1421 if ((edlp->edl_max_pio_alloc_count > 0) &&
1422 (edlp->edl_max_pio_alloc_count < blk_count)) {
1423 blk_count = edlp->edl_max_pio_alloc_count;
1426 edcp->edc_pio_alloc_size = blk_size;
1427 edcp->edc_max_piobuf_count =
1428 (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
1434 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1440 __checkReturn efx_rc_t
1442 __in efx_nic_t *enp)
1445 uint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN,
1446 MC_CMD_ENTITY_RESET_OUT_LEN)];
1449 /* ef10_nic_reset() is called to recover from BADASSERT failures. */
1450 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
1452 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
1455 (void) memset(payload, 0, sizeof (payload));
1456 req.emr_cmd = MC_CMD_ENTITY_RESET;
1457 req.emr_in_buf = payload;
1458 req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
1459 req.emr_out_buf = payload;
1460 req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
1462 MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
1463 ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
1465 efx_mcdi_execute(enp, &req);
1467 if (req.emr_rc != 0) {
1472 /* Clear RX/TX DMA queue errors */
1473 enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
1482 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1487 __checkReturn efx_rc_t
1489 __in efx_nic_t *enp)
1491 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
1492 uint32_t min_vi_count, max_vi_count;
1493 uint32_t vi_count, vi_base, vi_shift;
1499 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
1500 enp->en_family == EFX_FAMILY_MEDFORD);
1502 /* Enable reporting of some events (e.g. link change) */
1503 if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
1506 /* Allocate (optional) on-chip PIO buffers */
1507 ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
1510 * For best performance, PIO writes should use a write-combined
1511 * (WC) memory mapping. Using a separate WC mapping for the PIO
1512 * aperture of each VI would be a burden to drivers (and not
1513 * possible if the host page size is >4Kbyte).
1515 * To avoid this we use a single uncached (UC) mapping for VI
1516 * register access, and a single WC mapping for extra VIs used
1519 * Each piobuf must be linked to a VI in the WC mapping, and to
1520 * each VI that is using a sub-allocated block from the piobuf.
1522 min_vi_count = edcp->edc_min_vi_count;
1524 edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
1526 /* Ensure that the previously attached driver's VIs are freed */
1527 if ((rc = efx_mcdi_free_vis(enp)) != 0)
1531 * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
1532 * fails then retrying the request for fewer VI resources may succeed.
1535 if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
1536 &vi_base, &vi_count, &vi_shift)) != 0)
1539 EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
1541 if (vi_count < min_vi_count) {
1546 enp->en_arch.ef10.ena_vi_base = vi_base;
1547 enp->en_arch.ef10.ena_vi_count = vi_count;
1548 enp->en_arch.ef10.ena_vi_shift = vi_shift;
1550 if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
1551 /* Not enough extra VIs to map piobufs */
1552 ef10_nic_free_piobufs(enp);
1555 enp->en_arch.ef10.ena_pio_write_vi_base =
1556 vi_count - enp->en_arch.ef10.ena_piobuf_count;
1558 /* Save UC memory mapping details */
1559 enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
1560 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
1561 enp->en_arch.ef10.ena_uc_mem_map_size =
1562 (ER_DZ_TX_PIOBUF_STEP *
1563 enp->en_arch.ef10.ena_pio_write_vi_base);
1565 enp->en_arch.ef10.ena_uc_mem_map_size =
1566 (ER_DZ_TX_PIOBUF_STEP *
1567 enp->en_arch.ef10.ena_vi_count);
1570 /* Save WC memory mapping details */
1571 enp->en_arch.ef10.ena_wc_mem_map_offset =
1572 enp->en_arch.ef10.ena_uc_mem_map_offset +
1573 enp->en_arch.ef10.ena_uc_mem_map_size;
1575 enp->en_arch.ef10.ena_wc_mem_map_size =
1576 (ER_DZ_TX_PIOBUF_STEP *
1577 enp->en_arch.ef10.ena_piobuf_count);
1579 /* Link piobufs to extra VIs in WC mapping */
1580 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
1581 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
1582 rc = efx_mcdi_link_piobuf(enp,
1583 enp->en_arch.ef10.ena_pio_write_vi_base + i,
1584 enp->en_arch.ef10.ena_piobuf_handle[i]);
1591 * Allocate a vAdaptor attached to our upstream vPort/pPort.
1593 * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
1594 * driver has yet to bring up the EVB port. See bug 56147. In this case,
1595 * retry the request several times after waiting a while. The wait time
1596 * between retries starts small (10ms) and exponentially increases.
1597 * Total wait time is a little over two seconds. Retry logic in the
1598 * client driver may mean this whole loop is repeated if it continues to
1603 while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
1604 if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
1607 * Do not retry alloc for PF, or for other errors on
1613 /* VF startup before PF is ready. Retry allocation. */
1615 /* Too many attempts */
1619 EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
1620 EFSYS_SLEEP(delay_us);
1622 if (delay_us < 500000)
1626 enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
1627 enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
1642 ef10_nic_free_piobufs(enp);
1645 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1650 __checkReturn efx_rc_t
1651 ef10_nic_get_vi_pool(
1652 __in efx_nic_t *enp,
1653 __out uint32_t *vi_countp)
1655 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
1656 enp->en_family == EFX_FAMILY_MEDFORD);
1659 * Report VIs that the client driver can use.
1660 * Do not include VIs used for PIO buffer writes.
1662 *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
1667 __checkReturn efx_rc_t
1668 ef10_nic_get_bar_region(
1669 __in efx_nic_t *enp,
1670 __in efx_nic_region_t region,
1671 __out uint32_t *offsetp,
1672 __out size_t *sizep)
1676 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
1677 enp->en_family == EFX_FAMILY_MEDFORD);
1680 * TODO: Specify host memory mapping alignment and granularity
1681 * in efx_drv_limits_t so that they can be taken into account
1682 * when allocating extra VIs for PIO writes.
1686 /* UC mapped memory BAR region for VI registers */
1687 *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
1688 *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
1691 case EFX_REGION_PIO_WRITE_VI:
1692 /* WC mapped memory BAR region for piobuf writes */
1693 *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
1694 *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
1705 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1712 __in efx_nic_t *enp)
1717 (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
1718 enp->en_vport_id = 0;
1720 /* Unlink piobufs from extra VIs in WC mapping */
1721 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
1722 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
1723 rc = efx_mcdi_unlink_piobuf(enp,
1724 enp->en_arch.ef10.ena_pio_write_vi_base + i);
1730 ef10_nic_free_piobufs(enp);
1732 (void) efx_mcdi_free_vis(enp);
1733 enp->en_arch.ef10.ena_vi_count = 0;
1738 __in efx_nic_t *enp)
1740 #if EFSYS_OPT_MON_STATS
1741 mcdi_mon_cfg_free(enp);
1742 #endif /* EFSYS_OPT_MON_STATS */
1743 (void) efx_mcdi_drv_attach(enp, B_FALSE);
1748 __checkReturn efx_rc_t
1749 ef10_nic_register_test(
1750 __in efx_nic_t *enp)
1755 _NOTE(ARGUNUSED(enp))
1756 _NOTE(CONSTANTCONDITION)
1766 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1771 #endif /* EFSYS_OPT_DIAG */
1774 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */