1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2012-2018 Solarflare Communications Inc.
13 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
15 #include "ef10_tlv_layout.h"
17 __checkReturn efx_rc_t
18 efx_mcdi_get_port_assignment(
20 __out uint32_t *portp)
23 uint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
24 MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)];
27 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
28 enp->en_family == EFX_FAMILY_MEDFORD ||
29 enp->en_family == EFX_FAMILY_MEDFORD2);
31 (void) memset(payload, 0, sizeof (payload));
32 req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
33 req.emr_in_buf = payload;
34 req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
35 req.emr_out_buf = payload;
36 req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
38 efx_mcdi_execute(enp, &req);
40 if (req.emr_rc != 0) {
45 if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
50 *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
57 EFSYS_PROBE1(fail1, efx_rc_t, rc);
62 __checkReturn efx_rc_t
63 efx_mcdi_get_port_modes(
65 __out uint32_t *modesp,
66 __out_opt uint32_t *current_modep)
69 uint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN,
70 MC_CMD_GET_PORT_MODES_OUT_LEN)];
73 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
74 enp->en_family == EFX_FAMILY_MEDFORD ||
75 enp->en_family == EFX_FAMILY_MEDFORD2);
77 (void) memset(payload, 0, sizeof (payload));
78 req.emr_cmd = MC_CMD_GET_PORT_MODES;
79 req.emr_in_buf = payload;
80 req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
81 req.emr_out_buf = payload;
82 req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
84 efx_mcdi_execute(enp, &req);
86 if (req.emr_rc != 0) {
92 * Require only Modes and DefaultMode fields, unless the current mode
93 * was requested (CurrentMode field was added for Medford).
95 if (req.emr_out_length_used <
96 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
100 if ((current_modep != NULL) && (req.emr_out_length_used <
101 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) {
106 *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
108 if (current_modep != NULL) {
109 *current_modep = MCDI_OUT_DWORD(req,
110 GET_PORT_MODES_OUT_CURRENT_MODE);
120 EFSYS_PROBE1(fail1, efx_rc_t, rc);
125 __checkReturn efx_rc_t
126 ef10_nic_get_port_mode_bandwidth(
127 __in uint32_t port_mode,
128 __out uint32_t *bandwidth_mbpsp)
134 case TLV_PORT_MODE_10G:
137 case TLV_PORT_MODE_10G_10G:
138 bandwidth = 10000 * 2;
140 case TLV_PORT_MODE_10G_10G_10G_10G:
141 case TLV_PORT_MODE_10G_10G_10G_10G_Q:
142 case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2:
143 case TLV_PORT_MODE_10G_10G_10G_10G_Q2:
144 bandwidth = 10000 * 4;
146 case TLV_PORT_MODE_40G:
149 case TLV_PORT_MODE_40G_40G:
150 bandwidth = 40000 * 2;
152 case TLV_PORT_MODE_40G_10G_10G:
153 case TLV_PORT_MODE_10G_10G_40G:
154 bandwidth = 40000 + (10000 * 2);
161 *bandwidth_mbpsp = bandwidth;
166 EFSYS_PROBE1(fail1, efx_rc_t, rc);
171 static __checkReturn efx_rc_t
172 efx_mcdi_vadaptor_alloc(
174 __in uint32_t port_id)
177 uint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN,
178 MC_CMD_VADAPTOR_ALLOC_OUT_LEN)];
181 EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
183 (void) memset(payload, 0, sizeof (payload));
184 req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
185 req.emr_in_buf = payload;
186 req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
187 req.emr_out_buf = payload;
188 req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
190 MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
191 MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
192 VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
193 enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
195 efx_mcdi_execute(enp, &req);
197 if (req.emr_rc != 0) {
205 EFSYS_PROBE1(fail1, efx_rc_t, rc);
210 static __checkReturn efx_rc_t
211 efx_mcdi_vadaptor_free(
213 __in uint32_t port_id)
216 uint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN,
217 MC_CMD_VADAPTOR_FREE_OUT_LEN)];
220 (void) memset(payload, 0, sizeof (payload));
221 req.emr_cmd = MC_CMD_VADAPTOR_FREE;
222 req.emr_in_buf = payload;
223 req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
224 req.emr_out_buf = payload;
225 req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
227 MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
229 efx_mcdi_execute(enp, &req);
231 if (req.emr_rc != 0) {
239 EFSYS_PROBE1(fail1, efx_rc_t, rc);
244 __checkReturn efx_rc_t
245 efx_mcdi_get_mac_address_pf(
247 __out_ecount_opt(6) uint8_t mac_addrp[6])
250 uint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
251 MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)];
254 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
255 enp->en_family == EFX_FAMILY_MEDFORD ||
256 enp->en_family == EFX_FAMILY_MEDFORD2);
258 (void) memset(payload, 0, sizeof (payload));
259 req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
260 req.emr_in_buf = payload;
261 req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
262 req.emr_out_buf = payload;
263 req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
265 efx_mcdi_execute(enp, &req);
267 if (req.emr_rc != 0) {
272 if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
277 if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
282 if (mac_addrp != NULL) {
285 addrp = MCDI_OUT2(req, uint8_t,
286 GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
288 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
298 EFSYS_PROBE1(fail1, efx_rc_t, rc);
303 __checkReturn efx_rc_t
304 efx_mcdi_get_mac_address_vf(
306 __out_ecount_opt(6) uint8_t mac_addrp[6])
309 uint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
310 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)];
313 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
314 enp->en_family == EFX_FAMILY_MEDFORD ||
315 enp->en_family == EFX_FAMILY_MEDFORD2);
317 (void) memset(payload, 0, sizeof (payload));
318 req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
319 req.emr_in_buf = payload;
320 req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
321 req.emr_out_buf = payload;
322 req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
324 MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
325 EVB_PORT_ID_ASSIGNED);
327 efx_mcdi_execute(enp, &req);
329 if (req.emr_rc != 0) {
334 if (req.emr_out_length_used <
335 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
340 if (MCDI_OUT_DWORD(req,
341 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
346 if (mac_addrp != NULL) {
349 addrp = MCDI_OUT2(req, uint8_t,
350 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
352 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
362 EFSYS_PROBE1(fail1, efx_rc_t, rc);
367 __checkReturn efx_rc_t
370 __out uint32_t *sys_freqp,
371 __out uint32_t *dpcpu_freqp)
374 uint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN,
375 MC_CMD_GET_CLOCK_OUT_LEN)];
378 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
379 enp->en_family == EFX_FAMILY_MEDFORD ||
380 enp->en_family == EFX_FAMILY_MEDFORD2);
382 (void) memset(payload, 0, sizeof (payload));
383 req.emr_cmd = MC_CMD_GET_CLOCK;
384 req.emr_in_buf = payload;
385 req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
386 req.emr_out_buf = payload;
387 req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
389 efx_mcdi_execute(enp, &req);
391 if (req.emr_rc != 0) {
396 if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
401 *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
402 if (*sys_freqp == 0) {
406 *dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ);
407 if (*dpcpu_freqp == 0) {
421 EFSYS_PROBE1(fail1, efx_rc_t, rc);
426 __checkReturn efx_rc_t
427 efx_mcdi_get_rxdp_config(
429 __out uint32_t *end_paddingp)
432 uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN,
433 MC_CMD_GET_RXDP_CONFIG_OUT_LEN)];
434 uint32_t end_padding;
437 memset(payload, 0, sizeof (payload));
438 req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
439 req.emr_in_buf = payload;
440 req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
441 req.emr_out_buf = payload;
442 req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
444 efx_mcdi_execute(enp, &req);
445 if (req.emr_rc != 0) {
450 if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
451 GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
452 /* RX DMA end padding is disabled */
455 switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
456 GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
457 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
460 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
463 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
472 *end_paddingp = end_padding;
479 EFSYS_PROBE1(fail1, efx_rc_t, rc);
484 __checkReturn efx_rc_t
485 efx_mcdi_get_vector_cfg(
487 __out_opt uint32_t *vec_basep,
488 __out_opt uint32_t *pf_nvecp,
489 __out_opt uint32_t *vf_nvecp)
492 uint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN,
493 MC_CMD_GET_VECTOR_CFG_OUT_LEN)];
496 (void) memset(payload, 0, sizeof (payload));
497 req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
498 req.emr_in_buf = payload;
499 req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
500 req.emr_out_buf = payload;
501 req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
503 efx_mcdi_execute(enp, &req);
505 if (req.emr_rc != 0) {
510 if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
515 if (vec_basep != NULL)
516 *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
517 if (pf_nvecp != NULL)
518 *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
519 if (vf_nvecp != NULL)
520 *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
527 EFSYS_PROBE1(fail1, efx_rc_t, rc);
532 static __checkReturn efx_rc_t
535 __in uint32_t min_vi_count,
536 __in uint32_t max_vi_count,
537 __out uint32_t *vi_basep,
538 __out uint32_t *vi_countp,
539 __out uint32_t *vi_shiftp)
542 uint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN,
543 MC_CMD_ALLOC_VIS_EXT_OUT_LEN)];
546 if (vi_countp == NULL) {
551 (void) memset(payload, 0, sizeof (payload));
552 req.emr_cmd = MC_CMD_ALLOC_VIS;
553 req.emr_in_buf = payload;
554 req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
555 req.emr_out_buf = payload;
556 req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN;
558 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
559 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
561 efx_mcdi_execute(enp, &req);
563 if (req.emr_rc != 0) {
568 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
573 *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
574 *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
576 /* Report VI_SHIFT if available (always zero for Huntington) */
577 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
580 *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
589 EFSYS_PROBE1(fail1, efx_rc_t, rc);
595 static __checkReturn efx_rc_t
602 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
603 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
605 req.emr_cmd = MC_CMD_FREE_VIS;
606 req.emr_in_buf = NULL;
607 req.emr_in_length = 0;
608 req.emr_out_buf = NULL;
609 req.emr_out_length = 0;
611 efx_mcdi_execute_quiet(enp, &req);
613 /* Ignore ELREADY (no allocated VIs, so nothing to free) */
614 if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
622 EFSYS_PROBE1(fail1, efx_rc_t, rc);
628 static __checkReturn efx_rc_t
629 efx_mcdi_alloc_piobuf(
631 __out efx_piobuf_handle_t *handlep)
634 uint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN,
635 MC_CMD_ALLOC_PIOBUF_OUT_LEN)];
638 if (handlep == NULL) {
643 (void) memset(payload, 0, sizeof (payload));
644 req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
645 req.emr_in_buf = payload;
646 req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
647 req.emr_out_buf = payload;
648 req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
650 efx_mcdi_execute_quiet(enp, &req);
652 if (req.emr_rc != 0) {
657 if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
662 *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
671 EFSYS_PROBE1(fail1, efx_rc_t, rc);
676 static __checkReturn efx_rc_t
677 efx_mcdi_free_piobuf(
679 __in efx_piobuf_handle_t handle)
682 uint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN,
683 MC_CMD_FREE_PIOBUF_OUT_LEN)];
686 (void) memset(payload, 0, sizeof (payload));
687 req.emr_cmd = MC_CMD_FREE_PIOBUF;
688 req.emr_in_buf = payload;
689 req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
690 req.emr_out_buf = payload;
691 req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
693 MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
695 efx_mcdi_execute_quiet(enp, &req);
697 if (req.emr_rc != 0) {
705 EFSYS_PROBE1(fail1, efx_rc_t, rc);
710 static __checkReturn efx_rc_t
711 efx_mcdi_link_piobuf(
713 __in uint32_t vi_index,
714 __in efx_piobuf_handle_t handle)
717 uint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN,
718 MC_CMD_LINK_PIOBUF_OUT_LEN)];
721 (void) memset(payload, 0, sizeof (payload));
722 req.emr_cmd = MC_CMD_LINK_PIOBUF;
723 req.emr_in_buf = payload;
724 req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
725 req.emr_out_buf = payload;
726 req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
728 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
729 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
731 efx_mcdi_execute(enp, &req);
733 if (req.emr_rc != 0) {
741 EFSYS_PROBE1(fail1, efx_rc_t, rc);
746 static __checkReturn efx_rc_t
747 efx_mcdi_unlink_piobuf(
749 __in uint32_t vi_index)
752 uint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN,
753 MC_CMD_UNLINK_PIOBUF_OUT_LEN)];
756 (void) memset(payload, 0, sizeof (payload));
757 req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
758 req.emr_in_buf = payload;
759 req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
760 req.emr_out_buf = payload;
761 req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
763 MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
765 efx_mcdi_execute_quiet(enp, &req);
767 if (req.emr_rc != 0) {
775 EFSYS_PROBE1(fail1, efx_rc_t, rc);
781 ef10_nic_alloc_piobufs(
783 __in uint32_t max_piobuf_count)
785 efx_piobuf_handle_t *handlep;
788 EFSYS_ASSERT3U(max_piobuf_count, <=,
789 EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
791 enp->en_arch.ef10.ena_piobuf_count = 0;
793 for (i = 0; i < max_piobuf_count; i++) {
794 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
796 if (efx_mcdi_alloc_piobuf(enp, handlep) != 0)
799 enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
800 enp->en_arch.ef10.ena_piobuf_count++;
806 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
807 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
809 efx_mcdi_free_piobuf(enp, *handlep);
810 *handlep = EFX_PIOBUF_HANDLE_INVALID;
812 enp->en_arch.ef10.ena_piobuf_count = 0;
817 ef10_nic_free_piobufs(
820 efx_piobuf_handle_t *handlep;
823 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
824 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
826 efx_mcdi_free_piobuf(enp, *handlep);
827 *handlep = EFX_PIOBUF_HANDLE_INVALID;
829 enp->en_arch.ef10.ena_piobuf_count = 0;
832 /* Sub-allocate a block from a piobuf */
833 __checkReturn efx_rc_t
835 __inout efx_nic_t *enp,
836 __out uint32_t *bufnump,
837 __out efx_piobuf_handle_t *handlep,
838 __out uint32_t *blknump,
839 __out uint32_t *offsetp,
842 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
843 efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
844 uint32_t blk_per_buf;
848 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
849 enp->en_family == EFX_FAMILY_MEDFORD ||
850 enp->en_family == EFX_FAMILY_MEDFORD2);
851 EFSYS_ASSERT(bufnump);
852 EFSYS_ASSERT(handlep);
853 EFSYS_ASSERT(blknump);
854 EFSYS_ASSERT(offsetp);
857 if ((edcp->edc_pio_alloc_size == 0) ||
858 (enp->en_arch.ef10.ena_piobuf_count == 0)) {
862 blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
864 for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
865 uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
870 EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
871 for (blk = 0; blk < blk_per_buf; blk++) {
872 if ((*map & (1u << blk)) == 0) {
882 *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
885 *sizep = edcp->edc_pio_alloc_size;
886 *offsetp = blk * (*sizep);
893 EFSYS_PROBE1(fail1, efx_rc_t, rc);
898 /* Free a piobuf sub-allocated block */
899 __checkReturn efx_rc_t
901 __inout efx_nic_t *enp,
902 __in uint32_t bufnum,
903 __in uint32_t blknum)
908 if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
909 (blknum >= (8 * sizeof (*map)))) {
914 map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
915 if ((*map & (1u << blknum)) == 0) {
919 *map &= ~(1u << blknum);
926 EFSYS_PROBE1(fail1, efx_rc_t, rc);
931 __checkReturn efx_rc_t
933 __inout efx_nic_t *enp,
934 __in uint32_t vi_index,
935 __in efx_piobuf_handle_t handle)
937 return (efx_mcdi_link_piobuf(enp, vi_index, handle));
940 __checkReturn efx_rc_t
942 __inout efx_nic_t *enp,
943 __in uint32_t vi_index)
945 return (efx_mcdi_unlink_piobuf(enp, vi_index));
948 static __checkReturn efx_rc_t
949 ef10_mcdi_get_pf_count(
951 __out uint32_t *pf_countp)
954 uint8_t payload[MAX(MC_CMD_GET_PF_COUNT_IN_LEN,
955 MC_CMD_GET_PF_COUNT_OUT_LEN)];
958 (void) memset(payload, 0, sizeof (payload));
959 req.emr_cmd = MC_CMD_GET_PF_COUNT;
960 req.emr_in_buf = payload;
961 req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;
962 req.emr_out_buf = payload;
963 req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN;
965 efx_mcdi_execute(enp, &req);
967 if (req.emr_rc != 0) {
972 if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) {
977 *pf_countp = *MCDI_OUT(req, uint8_t,
978 MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST);
980 EFSYS_ASSERT(*pf_countp != 0);
987 EFSYS_PROBE1(fail1, efx_rc_t, rc);
992 static __checkReturn efx_rc_t
993 ef10_get_datapath_caps(
996 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
998 uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
999 MC_CMD_GET_CAPABILITIES_V4_OUT_LEN)];
1002 if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
1006 (void) memset(payload, 0, sizeof (payload));
1007 req.emr_cmd = MC_CMD_GET_CAPABILITIES;
1008 req.emr_in_buf = payload;
1009 req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
1010 req.emr_out_buf = payload;
1011 req.emr_out_length = MC_CMD_GET_CAPABILITIES_V4_OUT_LEN;
1013 efx_mcdi_execute_quiet(enp, &req);
1015 if (req.emr_rc != 0) {
1020 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
1025 #define CAP_FLAGS1(_req, _flag) \
1026 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_OUT_FLAGS1) & \
1027 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN)))
1029 #define CAP_FLAGS2(_req, _flag) \
1030 (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) && \
1031 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) & \
1032 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))))
1035 * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
1036 * We only support the 14 byte prefix here.
1038 if (CAP_FLAGS1(req, RX_PREFIX_LEN_14) == 0) {
1042 encp->enc_rx_prefix_size = 14;
1044 /* Check if the firmware supports TSO */
1045 if (CAP_FLAGS1(req, TX_TSO))
1046 encp->enc_fw_assisted_tso_enabled = B_TRUE;
1048 encp->enc_fw_assisted_tso_enabled = B_FALSE;
1050 /* Check if the firmware supports FATSOv2 */
1051 if (CAP_FLAGS2(req, TX_TSO_V2)) {
1052 encp->enc_fw_assisted_tso_v2_enabled = B_TRUE;
1053 encp->enc_fw_assisted_tso_v2_n_contexts = MCDI_OUT_WORD(req,
1054 GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS);
1056 encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
1057 encp->enc_fw_assisted_tso_v2_n_contexts = 0;
1060 /* Check if the firmware has vadapter/vport/vswitch support */
1061 if (CAP_FLAGS1(req, EVB))
1062 encp->enc_datapath_cap_evb = B_TRUE;
1064 encp->enc_datapath_cap_evb = B_FALSE;
1066 /* Check if the firmware supports VLAN insertion */
1067 if (CAP_FLAGS1(req, TX_VLAN_INSERTION))
1068 encp->enc_hw_tx_insert_vlan_enabled = B_TRUE;
1070 encp->enc_hw_tx_insert_vlan_enabled = B_FALSE;
1072 /* Check if the firmware supports RX event batching */
1073 if (CAP_FLAGS1(req, RX_BATCHING))
1074 encp->enc_rx_batching_enabled = B_TRUE;
1076 encp->enc_rx_batching_enabled = B_FALSE;
1079 * Even if batching isn't reported as supported, we may still get
1080 * batched events (see bug61153).
1082 encp->enc_rx_batch_max = 16;
1084 /* Check if the firmware supports disabling scatter on RXQs */
1085 if (CAP_FLAGS1(req, RX_DISABLE_SCATTER))
1086 encp->enc_rx_disable_scatter_supported = B_TRUE;
1088 encp->enc_rx_disable_scatter_supported = B_FALSE;
1090 /* Check if the firmware supports packed stream mode */
1091 if (CAP_FLAGS1(req, RX_PACKED_STREAM))
1092 encp->enc_rx_packed_stream_supported = B_TRUE;
1094 encp->enc_rx_packed_stream_supported = B_FALSE;
1097 * Check if the firmware supports configurable buffer sizes
1098 * for packed stream mode (otherwise buffer size is 1Mbyte)
1100 if (CAP_FLAGS1(req, RX_PACKED_STREAM_VAR_BUFFERS))
1101 encp->enc_rx_var_packed_stream_supported = B_TRUE;
1103 encp->enc_rx_var_packed_stream_supported = B_FALSE;
1105 /* Check if the firmware supports set mac with running filters */
1106 if (CAP_FLAGS1(req, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED))
1107 encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
1109 encp->enc_allow_set_mac_with_installed_filters = B_FALSE;
1112 * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
1113 * specifying which parameters to configure.
1115 if (CAP_FLAGS1(req, SET_MAC_ENHANCED))
1116 encp->enc_enhanced_set_mac_supported = B_TRUE;
1118 encp->enc_enhanced_set_mac_supported = B_FALSE;
1121 * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows
1122 * us to let the firmware choose the settings to use on an EVQ.
1124 if (CAP_FLAGS2(req, INIT_EVQ_V2))
1125 encp->enc_init_evq_v2_supported = B_TRUE;
1127 encp->enc_init_evq_v2_supported = B_FALSE;
1130 * Check if firmware-verified NVRAM updates must be used.
1132 * The firmware trusted installer requires all NVRAM updates to use
1133 * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update)
1134 * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
1135 * partition and report the result).
1137 if (CAP_FLAGS2(req, NVRAM_UPDATE_REPORT_VERIFY_RESULT))
1138 encp->enc_nvram_update_verify_result_supported = B_TRUE;
1140 encp->enc_nvram_update_verify_result_supported = B_FALSE;
1143 * Check if firmware provides packet memory and Rx datapath
1146 if (CAP_FLAGS1(req, PM_AND_RXDP_COUNTERS))
1147 encp->enc_pm_and_rxdp_counters = B_TRUE;
1149 encp->enc_pm_and_rxdp_counters = B_FALSE;
1152 * Check if the 40G MAC hardware is capable of reporting
1153 * statistics for Tx size bins.
1155 if (CAP_FLAGS2(req, MAC_STATS_40G_TX_SIZE_BINS))
1156 encp->enc_mac_stats_40g_tx_size_bins = B_TRUE;
1158 encp->enc_mac_stats_40g_tx_size_bins = B_FALSE;
1161 * Check if firmware supports VXLAN and NVGRE tunnels.
1162 * The capability indicates Geneve protocol support as well.
1164 if (CAP_FLAGS1(req, VXLAN_NVGRE)) {
1165 encp->enc_tunnel_encapsulations_supported =
1166 (1u << EFX_TUNNEL_PROTOCOL_VXLAN) |
1167 (1u << EFX_TUNNEL_PROTOCOL_GENEVE) |
1168 (1u << EFX_TUNNEL_PROTOCOL_NVGRE);
1170 EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES ==
1171 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
1172 encp->enc_tunnel_config_udp_entries_max =
1173 EFX_TUNNEL_MAXNENTRIES;
1175 encp->enc_tunnel_config_udp_entries_max = 0;
1179 * Check if firmware reports the VI window mode.
1180 * Medford2 has a variable VI window size (8K, 16K or 64K).
1181 * Medford and Huntington have a fixed 8K VI window size.
1183 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
1185 MCDI_OUT_BYTE(req, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
1188 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
1189 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1191 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
1192 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_16K;
1194 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
1195 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_64K;
1198 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1201 } else if ((enp->en_family == EFX_FAMILY_HUNTINGTON) ||
1202 (enp->en_family == EFX_FAMILY_MEDFORD)) {
1203 /* Huntington and Medford have fixed 8K window size */
1204 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
1206 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
1209 /* Check if firmware supports extended MAC stats. */
1210 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
1211 /* Extended stats buffer supported */
1212 encp->enc_mac_stats_nstats = MCDI_OUT_WORD(req,
1213 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
1215 /* Use Siena-compatible legacy MAC stats */
1216 encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS;
1219 if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V2)
1220 encp->enc_fec_counters = B_TRUE;
1222 encp->enc_fec_counters = B_FALSE;
1236 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1242 #define EF10_LEGACY_PF_PRIVILEGE_MASK \
1243 (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \
1244 MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \
1245 MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \
1246 MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \
1247 MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \
1248 MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \
1249 MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \
1250 MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \
1251 MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \
1252 MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \
1253 MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
1255 #define EF10_LEGACY_VF_PRIVILEGE_MASK 0
1258 __checkReturn efx_rc_t
1259 ef10_get_privilege_mask(
1260 __in efx_nic_t *enp,
1261 __out uint32_t *maskp)
1263 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1267 if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
1272 /* Fallback for old firmware without privilege mask support */
1273 if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1274 /* Assume PF has admin privilege */
1275 mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
1277 /* VF is always unprivileged by default */
1278 mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
1287 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1294 * Table of mapping schemes from port number to external number.
1296 * Each port number ultimately corresponds to a connector: either as part of
1297 * a cable assembly attached to a module inserted in an SFP+/QSFP+ cage on
1298 * the board, or fixed to the board (e.g. 10GBASE-T magjack on SFN5121T
1299 * "Salina"). In general:
1301 * Port number (0-based)
1303 * port mapping (n:1)
1306 * External port number (normally 1-based)
1308 * fixed (1:1) or cable assembly (1:m)
1313 * The external numbering refers to the cages or magjacks on the board,
1314 * as visibly annotated on the board or back panel. This table describes
1315 * how to determine which external cage/magjack corresponds to the port
1316 * numbers used by the driver.
1318 * The count of adjacent port numbers that map to each external number,
1319 * and the offset in the numbering, is determined by the chip family and
1320 * current port mode.
1322 * For the Huntington family, the current port mode cannot be discovered,
1323 * but a single mapping is used by all modes for a given chip variant,
1324 * so the mapping used is instead the last match in the table to the full
1325 * set of port modes to which the NIC can be configured. Therefore the
1326 * ordering of entries in the mapping table is significant.
1328 static struct ef10_external_port_map_s {
1329 efx_family_t family;
1330 uint32_t modes_mask;
1333 } __ef10_external_port_mappings[] = {
1335 * Modes used by Huntington family controllers where each port
1336 * number maps to a separate cage.
1337 * SFN7x22F (Torino):
1347 EFX_FAMILY_HUNTINGTON,
1348 (1U << TLV_PORT_MODE_10G) | /* mode 0 */
1349 (1U << TLV_PORT_MODE_10G_10G) | /* mode 2 */
1350 (1U << TLV_PORT_MODE_10G_10G_10G_10G), /* mode 4 */
1351 1, /* ports per cage */
1355 * Modes which for Huntington identify a chip variant where 2
1356 * adjacent port numbers map to each cage.
1364 EFX_FAMILY_HUNTINGTON,
1365 (1U << TLV_PORT_MODE_40G) | /* mode 1 */
1366 (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */
1367 (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */
1368 (1U << TLV_PORT_MODE_10G_10G_40G), /* mode 7 */
1369 2, /* ports per cage */
1373 * Modes that on Medford allocate each port number to a separate
1382 (1U << TLV_PORT_MODE_10G) | /* mode 0 */
1383 (1U << TLV_PORT_MODE_10G_10G), /* mode 2 */
1384 1, /* ports per cage */
1388 * Modes that on Medford allocate 2 adjacent port numbers to each
1397 (1U << TLV_PORT_MODE_40G) | /* mode 1 */
1398 (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */
1399 (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */
1400 (1U << TLV_PORT_MODE_10G_10G_40G) | /* mode 7 */
1401 /* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */
1402 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), /* mode 9 */
1403 2, /* ports per cage */
1407 * Modes that on Medford allocate 4 adjacent port numbers to each
1408 * connector, starting on cage 1.
1416 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q) | /* mode 5 */
1417 /* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */
1418 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1), /* mode 4 */
1419 4, /* ports per cage */
1423 * Modes that on Medford allocate 4 adjacent port numbers to each
1424 * connector, starting on cage 2.
1432 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q2), /* mode 8 */
1433 4, /* ports per cage */
1438 static __checkReturn efx_rc_t
1439 ef10_external_port_mapping(
1440 __in efx_nic_t *enp,
1442 __out uint8_t *external_portp)
1446 uint32_t port_modes;
1449 int32_t count = 1; /* Default 1-1 mapping */
1450 int32_t offset = 1; /* Default starting external port number */
1452 if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, ¤t)) != 0) {
1454 * No current port mode information (i.e. Huntington)
1455 * - infer mapping from available modes
1457 if ((rc = efx_mcdi_get_port_modes(enp,
1458 &port_modes, NULL)) != 0) {
1460 * No port mode information available
1461 * - use default mapping
1466 /* Only need to scan the current mode */
1467 port_modes = 1 << current;
1471 * Infer the internal port -> external number mapping from
1472 * the possible port modes for this NIC.
1474 for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
1475 struct ef10_external_port_map_s *eepmp =
1476 &__ef10_external_port_mappings[i];
1477 if (eepmp->family != enp->en_family)
1479 matches = (eepmp->modes_mask & port_modes);
1482 * Some modes match. For some Huntington boards
1483 * there will be multiple matches. The mapping on the
1484 * last match is used.
1486 count = eepmp->count;
1487 offset = eepmp->offset;
1488 port_modes &= ~matches;
1492 if (port_modes != 0) {
1493 /* Some advertised modes are not supported */
1500 * Scale as required by last matched mode and then convert to
1501 * correctly offset numbering
1503 *external_portp = (uint8_t)((port / count) + offset);
1507 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1512 static __checkReturn efx_rc_t
1514 __in efx_nic_t *enp)
1516 const efx_nic_ops_t *enop = enp->en_enop;
1517 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
1518 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1519 ef10_link_state_t els;
1520 efx_port_t *epp = &(enp->en_port);
1521 uint32_t board_type = 0;
1522 uint32_t base, nvec;
1527 uint8_t mac_addr[6] = { 0 };
1530 /* Get the (zero-based) MCDI port number */
1531 if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
1534 /* EFX MCDI interface uses one-based port numbers */
1535 emip->emi_port = port + 1;
1537 if ((rc = ef10_external_port_mapping(enp, port,
1538 &encp->enc_external_port)) != 0)
1542 * Get PCIe function number from firmware (used for
1543 * per-function privilege and dynamic config info).
1544 * - PCIe PF: pf = PF number, vf = 0xffff.
1545 * - PCIe VF: pf = parent PF, vf = VF number.
1547 if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
1553 /* MAC address for this function */
1554 if (EFX_PCI_FUNCTION_IS_PF(encp)) {
1555 rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
1556 #if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
1558 * Disable static config checking, ONLY for manufacturing test
1559 * and setup at the factory, to allow the static config to be
1562 #else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
1563 if ((rc == 0) && (mac_addr[0] & 0x02)) {
1565 * If the static config does not include a global MAC
1566 * address pool then the board may return a locally
1567 * administered MAC address (this should only happen on
1568 * incorrectly programmed boards).
1572 #endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
1574 rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
1579 EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
1581 /* Board configuration (legacy) */
1582 rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
1584 /* Unprivileged functions may not be able to read board cfg */
1591 encp->enc_board_type = board_type;
1592 encp->enc_clk_mult = 1; /* not used for EF10 */
1594 /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
1595 if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
1598 /* Obtain the default PHY advertised capabilities */
1599 if ((rc = ef10_phy_get_link(enp, &els)) != 0)
1601 epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
1602 epp->ep_adv_cap_mask = els.els_adv_cap_mask;
1604 /* Check capabilities of running datapath firmware */
1605 if ((rc = ef10_get_datapath_caps(enp)) != 0)
1608 /* Alignment for WPTR updates */
1609 encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
1612 * Maximum number of exclusive RSS contexts. EF10 hardware supports 64
1613 * in total, but 6 are reserved for shared contexts. They are a global
1614 * resource so not all may be available.
1616 encp->enc_rx_scale_max_exclusive_contexts = 64 - 6;
1618 encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
1619 /* No boundary crossing limits */
1620 encp->enc_tx_dma_desc_boundary = 0;
1623 * Maximum number of bytes into the frame the TCP header can start for
1624 * firmware assisted TSO to work.
1626 encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
1629 * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
1630 * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
1631 * resources (allocated to this PCIe function), which is zero until
1632 * after we have allocated VIs.
1634 encp->enc_evq_limit = 1024;
1635 encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
1636 encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
1638 encp->enc_buftbl_limit = 0xFFFFFFFF;
1640 /* Get interrupt vector limits */
1641 if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
1642 if (EFX_PCI_FUNCTION_IS_PF(encp))
1645 /* Ignore error (cannot query vector limits from a VF). */
1649 encp->enc_intr_vec_base = base;
1650 encp->enc_intr_limit = nvec;
1653 * Get the current privilege mask. Note that this may be modified
1654 * dynamically, so this value is informational only. DO NOT use
1655 * the privilege mask to check for sufficient privileges, as that
1656 * can result in time-of-check/time-of-use bugs.
1658 if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
1660 encp->enc_privilege_mask = mask;
1662 /* Get remaining controller-specific board config */
1663 if ((rc = enop->eno_board_cfg(enp)) != 0)
1670 EFSYS_PROBE(fail11);
1672 EFSYS_PROBE(fail10);
1690 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1695 __checkReturn efx_rc_t
1697 __in efx_nic_t *enp)
1699 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1700 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
1703 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
1704 enp->en_family == EFX_FAMILY_MEDFORD ||
1705 enp->en_family == EFX_FAMILY_MEDFORD2);
1707 /* Read and clear any assertion state */
1708 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
1711 /* Exit the assertion handler */
1712 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
1716 if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
1719 if ((rc = ef10_nic_board_cfg(enp)) != 0)
1723 * Set default driver config limits (based on board config).
1725 * FIXME: For now allocate a fixed number of VIs which is likely to be
1726 * sufficient and small enough to allow multiple functions on the same
1729 edcp->edc_min_vi_count = edcp->edc_max_vi_count =
1730 MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
1732 /* The client driver must configure and enable PIO buffer support */
1733 edcp->edc_max_piobuf_count = 0;
1734 edcp->edc_pio_alloc_size = 0;
1736 #if EFSYS_OPT_MAC_STATS
1737 /* Wipe the MAC statistics */
1738 if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
1742 #if EFSYS_OPT_LOOPBACK
1743 if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
1747 #if EFSYS_OPT_MON_STATS
1748 if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
1749 /* Unprivileged functions do not have access to sensors */
1755 encp->enc_features = enp->en_features;
1759 #if EFSYS_OPT_MON_STATS
1763 #if EFSYS_OPT_LOOPBACK
1767 #if EFSYS_OPT_MAC_STATS
1778 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1783 __checkReturn efx_rc_t
1784 ef10_nic_set_drv_limits(
1785 __inout efx_nic_t *enp,
1786 __in efx_drv_limits_t *edlp)
1788 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1789 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
1790 uint32_t min_evq_count, max_evq_count;
1791 uint32_t min_rxq_count, max_rxq_count;
1792 uint32_t min_txq_count, max_txq_count;
1800 /* Get minimum required and maximum usable VI limits */
1801 min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
1802 min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
1803 min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
1805 edcp->edc_min_vi_count =
1806 MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
1808 max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
1809 max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
1810 max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
1812 edcp->edc_max_vi_count =
1813 MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
1816 * Check limits for sub-allocated piobuf blocks.
1817 * PIO is optional, so don't fail if the limits are incorrect.
1819 if ((encp->enc_piobuf_size == 0) ||
1820 (encp->enc_piobuf_limit == 0) ||
1821 (edlp->edl_min_pio_alloc_size == 0) ||
1822 (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
1824 edcp->edc_max_piobuf_count = 0;
1825 edcp->edc_pio_alloc_size = 0;
1827 uint32_t blk_size, blk_count, blks_per_piobuf;
1830 MAX(edlp->edl_min_pio_alloc_size,
1831 encp->enc_piobuf_min_alloc_size);
1833 blks_per_piobuf = encp->enc_piobuf_size / blk_size;
1834 EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
1836 blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
1838 /* A zero max pio alloc count means unlimited */
1839 if ((edlp->edl_max_pio_alloc_count > 0) &&
1840 (edlp->edl_max_pio_alloc_count < blk_count)) {
1841 blk_count = edlp->edl_max_pio_alloc_count;
1844 edcp->edc_pio_alloc_size = blk_size;
1845 edcp->edc_max_piobuf_count =
1846 (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
1852 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1858 __checkReturn efx_rc_t
1860 __in efx_nic_t *enp)
1863 uint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN,
1864 MC_CMD_ENTITY_RESET_OUT_LEN)];
1867 /* ef10_nic_reset() is called to recover from BADASSERT failures. */
1868 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
1870 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
1873 (void) memset(payload, 0, sizeof (payload));
1874 req.emr_cmd = MC_CMD_ENTITY_RESET;
1875 req.emr_in_buf = payload;
1876 req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
1877 req.emr_out_buf = payload;
1878 req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
1880 MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
1881 ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
1883 efx_mcdi_execute(enp, &req);
1885 if (req.emr_rc != 0) {
1890 /* Clear RX/TX DMA queue errors */
1891 enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
1900 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1905 __checkReturn efx_rc_t
1907 __in efx_nic_t *enp)
1909 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
1910 uint32_t min_vi_count, max_vi_count;
1911 uint32_t vi_count, vi_base, vi_shift;
1915 uint32_t vi_window_size;
1918 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
1919 enp->en_family == EFX_FAMILY_MEDFORD ||
1920 enp->en_family == EFX_FAMILY_MEDFORD2);
1922 /* Enable reporting of some events (e.g. link change) */
1923 if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
1926 /* Allocate (optional) on-chip PIO buffers */
1927 ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
1930 * For best performance, PIO writes should use a write-combined
1931 * (WC) memory mapping. Using a separate WC mapping for the PIO
1932 * aperture of each VI would be a burden to drivers (and not
1933 * possible if the host page size is >4Kbyte).
1935 * To avoid this we use a single uncached (UC) mapping for VI
1936 * register access, and a single WC mapping for extra VIs used
1939 * Each piobuf must be linked to a VI in the WC mapping, and to
1940 * each VI that is using a sub-allocated block from the piobuf.
1942 min_vi_count = edcp->edc_min_vi_count;
1944 edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
1946 /* Ensure that the previously attached driver's VIs are freed */
1947 if ((rc = efx_mcdi_free_vis(enp)) != 0)
1951 * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
1952 * fails then retrying the request for fewer VI resources may succeed.
1955 if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
1956 &vi_base, &vi_count, &vi_shift)) != 0)
1959 EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
1961 if (vi_count < min_vi_count) {
1966 enp->en_arch.ef10.ena_vi_base = vi_base;
1967 enp->en_arch.ef10.ena_vi_count = vi_count;
1968 enp->en_arch.ef10.ena_vi_shift = vi_shift;
1970 if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
1971 /* Not enough extra VIs to map piobufs */
1972 ef10_nic_free_piobufs(enp);
1975 enp->en_arch.ef10.ena_pio_write_vi_base =
1976 vi_count - enp->en_arch.ef10.ena_piobuf_count;
1978 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=,
1979 EFX_VI_WINDOW_SHIFT_INVALID);
1980 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=,
1981 EFX_VI_WINDOW_SHIFT_64K);
1982 vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift;
1984 /* Save UC memory mapping details */
1985 enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
1986 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
1987 enp->en_arch.ef10.ena_uc_mem_map_size =
1989 enp->en_arch.ef10.ena_pio_write_vi_base);
1991 enp->en_arch.ef10.ena_uc_mem_map_size =
1993 enp->en_arch.ef10.ena_vi_count);
1996 /* Save WC memory mapping details */
1997 enp->en_arch.ef10.ena_wc_mem_map_offset =
1998 enp->en_arch.ef10.ena_uc_mem_map_offset +
1999 enp->en_arch.ef10.ena_uc_mem_map_size;
2001 enp->en_arch.ef10.ena_wc_mem_map_size =
2003 enp->en_arch.ef10.ena_piobuf_count);
2005 /* Link piobufs to extra VIs in WC mapping */
2006 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2007 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2008 rc = efx_mcdi_link_piobuf(enp,
2009 enp->en_arch.ef10.ena_pio_write_vi_base + i,
2010 enp->en_arch.ef10.ena_piobuf_handle[i]);
2017 * Allocate a vAdaptor attached to our upstream vPort/pPort.
2019 * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
2020 * driver has yet to bring up the EVB port. See bug 56147. In this case,
2021 * retry the request several times after waiting a while. The wait time
2022 * between retries starts small (10ms) and exponentially increases.
2023 * Total wait time is a little over two seconds. Retry logic in the
2024 * client driver may mean this whole loop is repeated if it continues to
2029 while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
2030 if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
2033 * Do not retry alloc for PF, or for other errors on
2039 /* VF startup before PF is ready. Retry allocation. */
2041 /* Too many attempts */
2045 EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
2046 EFSYS_SLEEP(delay_us);
2048 if (delay_us < 500000)
2052 enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
2053 enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
2068 ef10_nic_free_piobufs(enp);
2071 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2076 __checkReturn efx_rc_t
2077 ef10_nic_get_vi_pool(
2078 __in efx_nic_t *enp,
2079 __out uint32_t *vi_countp)
2081 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
2082 enp->en_family == EFX_FAMILY_MEDFORD ||
2083 enp->en_family == EFX_FAMILY_MEDFORD2);
2086 * Report VIs that the client driver can use.
2087 * Do not include VIs used for PIO buffer writes.
2089 *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
2094 __checkReturn efx_rc_t
2095 ef10_nic_get_bar_region(
2096 __in efx_nic_t *enp,
2097 __in efx_nic_region_t region,
2098 __out uint32_t *offsetp,
2099 __out size_t *sizep)
2103 EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
2104 enp->en_family == EFX_FAMILY_MEDFORD ||
2105 enp->en_family == EFX_FAMILY_MEDFORD2);
2108 * TODO: Specify host memory mapping alignment and granularity
2109 * in efx_drv_limits_t so that they can be taken into account
2110 * when allocating extra VIs for PIO writes.
2114 /* UC mapped memory BAR region for VI registers */
2115 *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
2116 *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
2119 case EFX_REGION_PIO_WRITE_VI:
2120 /* WC mapped memory BAR region for piobuf writes */
2121 *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
2122 *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
2133 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2140 __in efx_nic_t *enp)
2145 (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
2146 enp->en_vport_id = 0;
2148 /* Unlink piobufs from extra VIs in WC mapping */
2149 if (enp->en_arch.ef10.ena_piobuf_count > 0) {
2150 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
2151 rc = efx_mcdi_unlink_piobuf(enp,
2152 enp->en_arch.ef10.ena_pio_write_vi_base + i);
2158 ef10_nic_free_piobufs(enp);
2160 (void) efx_mcdi_free_vis(enp);
2161 enp->en_arch.ef10.ena_vi_count = 0;
2166 __in efx_nic_t *enp)
2168 #if EFSYS_OPT_MON_STATS
2169 mcdi_mon_cfg_free(enp);
2170 #endif /* EFSYS_OPT_MON_STATS */
2171 (void) efx_mcdi_drv_attach(enp, B_FALSE);
2176 __checkReturn efx_rc_t
2177 ef10_nic_register_test(
2178 __in efx_nic_t *enp)
2183 _NOTE(ARGUNUSED(enp))
2184 _NOTE(CONSTANTCONDITION)
2194 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2199 #endif /* EFSYS_OPT_DIAG */
2202 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */