1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2008-2019 Solarflare Communications Inc.
13 * There are three versions of the MCDI interface:
14 * - MCDIv0: Siena BootROM. Transport uses MCDIv1 headers.
15 * - MCDIv1: Siena firmware and Huntington BootROM.
16 * - MCDIv2: EF10 firmware (Huntington/Medford) and Medford BootROM.
17 * Transport uses MCDIv2 headers.
19 * MCDIv2 Header NOT_EPOCH flag
20 * ----------------------------
21 * A new epoch begins at initial startup or after an MC reboot, and defines when
22 * the MC should reject stale MCDI requests.
24 * The first MCDI request sent by the host should contain NOT_EPOCH=0, and all
25 * subsequent requests (until the next MC reboot) should contain NOT_EPOCH=1.
27 * After rebooting the MC will fail all requests with NOT_EPOCH=1 by writing a
28 * response with ERROR=1 and DATALEN=0 until a request is seen with NOT_EPOCH=0.
35 static const efx_mcdi_ops_t __efx_mcdi_siena_ops = {
36 siena_mcdi_init, /* emco_init */
37 siena_mcdi_send_request, /* emco_send_request */
38 siena_mcdi_poll_reboot, /* emco_poll_reboot */
39 siena_mcdi_poll_response, /* emco_poll_response */
40 siena_mcdi_read_response, /* emco_read_response */
41 siena_mcdi_fini, /* emco_fini */
42 siena_mcdi_feature_supported, /* emco_feature_supported */
43 siena_mcdi_get_timeout, /* emco_get_timeout */
46 #endif /* EFSYS_OPT_SIENA */
50 static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = {
51 ef10_mcdi_init, /* emco_init */
52 ef10_mcdi_send_request, /* emco_send_request */
53 ef10_mcdi_poll_reboot, /* emco_poll_reboot */
54 ef10_mcdi_poll_response, /* emco_poll_response */
55 ef10_mcdi_read_response, /* emco_read_response */
56 ef10_mcdi_fini, /* emco_fini */
57 ef10_mcdi_feature_supported, /* emco_feature_supported */
58 ef10_mcdi_get_timeout, /* emco_get_timeout */
61 #endif /* EFX_OPTS_EF10() */
63 #if EFSYS_OPT_RIVERHEAD
65 static const efx_mcdi_ops_t __efx_mcdi_rhead_ops = {
66 ef10_mcdi_init, /* emco_init */
67 ef10_mcdi_send_request, /* emco_send_request */
68 ef10_mcdi_poll_reboot, /* emco_poll_reboot */
69 ef10_mcdi_poll_response, /* emco_poll_response */
70 ef10_mcdi_read_response, /* emco_read_response */
71 ef10_mcdi_fini, /* emco_fini */
72 ef10_mcdi_feature_supported, /* emco_feature_supported */
73 ef10_mcdi_get_timeout, /* emco_get_timeout */
76 #endif /* EFSYS_OPT_RIVERHEAD */
80 __checkReturn efx_rc_t
83 __in const efx_mcdi_transport_t *emtp)
85 const efx_mcdi_ops_t *emcop;
88 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
89 EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
91 switch (enp->en_family) {
93 case EFX_FAMILY_SIENA:
94 emcop = &__efx_mcdi_siena_ops;
96 #endif /* EFSYS_OPT_SIENA */
98 #if EFSYS_OPT_HUNTINGTON
99 case EFX_FAMILY_HUNTINGTON:
100 emcop = &__efx_mcdi_ef10_ops;
102 #endif /* EFSYS_OPT_HUNTINGTON */
104 #if EFSYS_OPT_MEDFORD
105 case EFX_FAMILY_MEDFORD:
106 emcop = &__efx_mcdi_ef10_ops;
108 #endif /* EFSYS_OPT_MEDFORD */
110 #if EFSYS_OPT_MEDFORD2
111 case EFX_FAMILY_MEDFORD2:
112 emcop = &__efx_mcdi_ef10_ops;
114 #endif /* EFSYS_OPT_MEDFORD2 */
116 #if EFSYS_OPT_RIVERHEAD
117 case EFX_FAMILY_RIVERHEAD:
118 emcop = &__efx_mcdi_rhead_ops;
120 #endif /* EFSYS_OPT_RIVERHEAD */
128 if (enp->en_features & EFX_FEATURE_MCDI_DMA) {
129 /* MCDI requires a DMA buffer in host memory */
130 if ((emtp == NULL) || (emtp->emt_dma_mem) == NULL) {
135 enp->en_mcdi.em_emtp = emtp;
137 if (emcop != NULL && emcop->emco_init != NULL) {
138 if ((rc = emcop->emco_init(enp, emtp)) != 0)
142 enp->en_mcdi.em_emcop = emcop;
143 enp->en_mod_flags |= EFX_MOD_MCDI;
152 EFSYS_PROBE1(fail1, efx_rc_t, rc);
154 enp->en_mcdi.em_emcop = NULL;
155 enp->en_mcdi.em_emtp = NULL;
156 enp->en_mod_flags &= ~EFX_MOD_MCDI;
165 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
166 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
168 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
169 EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI);
171 if (emcop != NULL && emcop->emco_fini != NULL)
172 emcop->emco_fini(enp);
175 emip->emi_aborted = 0;
177 enp->en_mcdi.em_emcop = NULL;
178 enp->en_mod_flags &= ~EFX_MOD_MCDI;
185 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
186 efsys_lock_state_t state;
188 /* Start a new epoch (allow fresh MCDI requests to succeed) */
189 EFSYS_LOCK(enp->en_eslp, state);
190 emip->emi_new_epoch = B_TRUE;
191 EFSYS_UNLOCK(enp->en_eslp, state);
195 efx_mcdi_send_request(
202 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
204 emcop->emco_send_request(enp, hdrp, hdr_len, sdup, sdu_len);
208 efx_mcdi_poll_reboot(
211 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
214 rc = emcop->emco_poll_reboot(enp);
219 efx_mcdi_poll_response(
222 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
225 available = emcop->emco_poll_response(enp);
230 efx_mcdi_read_response(
236 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
238 emcop->emco_read_response(enp, bufferp, offset, length);
242 efx_mcdi_request_start(
244 __in efx_mcdi_req_t *emrp,
245 __in boolean_t ev_cpl)
247 #if EFSYS_OPT_MCDI_LOGGING
248 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
250 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
253 unsigned int max_version;
257 efsys_lock_state_t state;
259 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
260 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
261 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
264 * efx_mcdi_request_start() is naturally serialised against both
265 * efx_mcdi_request_poll() and efx_mcdi_ev_cpl()/efx_mcdi_ev_death(),
266 * by virtue of there only being one outstanding MCDI request.
267 * Unfortunately, upper layers may also call efx_mcdi_request_abort()
268 * at any time, to timeout a pending mcdi request, That request may
269 * then subsequently complete, meaning efx_mcdi_ev_cpl() or
270 * efx_mcdi_ev_death() may end up running in parallel with
271 * efx_mcdi_request_start(). This race is handled by ensuring that
272 * %emi_pending_req, %emi_ev_cpl and %emi_seq are protected by the
275 EFSYS_LOCK(enp->en_eslp, state);
276 EFSYS_ASSERT(emip->emi_pending_req == NULL);
277 emip->emi_pending_req = emrp;
278 emip->emi_ev_cpl = ev_cpl;
279 emip->emi_poll_cnt = 0;
280 seq = emip->emi_seq++ & EFX_MASK32(MCDI_HEADER_SEQ);
281 new_epoch = emip->emi_new_epoch;
282 max_version = emip->emi_max_version;
283 EFSYS_UNLOCK(enp->en_eslp, state);
287 xflags |= MCDI_HEADER_XFLAGS_EVREQ;
290 * Huntington firmware supports MCDIv2, but the Huntington BootROM only
291 * supports MCDIv1. Use MCDIv1 headers for MCDIv1 commands where
292 * possible to support this.
294 if ((max_version >= 2) &&
295 ((emrp->emr_cmd > MC_CMD_CMD_SPACE_ESCAPE_7) ||
296 (emrp->emr_in_length > MCDI_CTL_SDU_LEN_MAX_V1) ||
297 (emrp->emr_out_length > MCDI_CTL_SDU_LEN_MAX_V1))) {
298 /* Construct MCDI v2 header */
299 hdr_len = sizeof (hdr);
300 EFX_POPULATE_DWORD_8(hdr[0],
301 MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
302 MCDI_HEADER_RESYNC, 1,
303 MCDI_HEADER_DATALEN, 0,
304 MCDI_HEADER_SEQ, seq,
305 MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
306 MCDI_HEADER_ERROR, 0,
307 MCDI_HEADER_RESPONSE, 0,
308 MCDI_HEADER_XFLAGS, xflags);
310 EFX_POPULATE_DWORD_2(hdr[1],
311 MC_CMD_V2_EXTN_IN_EXTENDED_CMD, emrp->emr_cmd,
312 MC_CMD_V2_EXTN_IN_ACTUAL_LEN, emrp->emr_in_length);
314 /* Construct MCDI v1 header */
315 hdr_len = sizeof (hdr[0]);
316 EFX_POPULATE_DWORD_8(hdr[0],
317 MCDI_HEADER_CODE, emrp->emr_cmd,
318 MCDI_HEADER_RESYNC, 1,
319 MCDI_HEADER_DATALEN, emrp->emr_in_length,
320 MCDI_HEADER_SEQ, seq,
321 MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
322 MCDI_HEADER_ERROR, 0,
323 MCDI_HEADER_RESPONSE, 0,
324 MCDI_HEADER_XFLAGS, xflags);
327 #if EFSYS_OPT_MCDI_LOGGING
328 if (emtp->emt_logger != NULL) {
329 emtp->emt_logger(emtp->emt_context, EFX_LOG_MCDI_REQUEST,
331 emrp->emr_in_buf, emrp->emr_in_length);
333 #endif /* EFSYS_OPT_MCDI_LOGGING */
335 efx_mcdi_send_request(enp, &hdr[0], hdr_len,
336 emrp->emr_in_buf, emrp->emr_in_length);
341 efx_mcdi_read_response_header(
343 __inout efx_mcdi_req_t *emrp)
345 #if EFSYS_OPT_MCDI_LOGGING
346 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
347 #endif /* EFSYS_OPT_MCDI_LOGGING */
348 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
350 unsigned int hdr_len;
351 unsigned int data_len;
357 EFSYS_ASSERT(emrp != NULL);
359 efx_mcdi_read_response(enp, &hdr[0], 0, sizeof (hdr[0]));
360 hdr_len = sizeof (hdr[0]);
362 cmd = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE);
363 seq = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_SEQ);
364 error = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_ERROR);
366 if (cmd != MC_CMD_V2_EXTN) {
367 data_len = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_DATALEN);
369 efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1]));
370 hdr_len += sizeof (hdr[1]);
372 cmd = EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD);
374 EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
377 if (error && (data_len == 0)) {
378 /* The MC has rebooted since the request was sent. */
379 EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US);
380 efx_mcdi_poll_reboot(enp);
384 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER
385 if (((cmd != emrp->emr_cmd) && (emrp->emr_cmd != MC_CMD_PROXY_CMD)) ||
387 if ((cmd != emrp->emr_cmd) ||
389 (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) {
390 /* Response is for a different request */
396 unsigned int err_len = MIN(data_len, sizeof (err));
397 int err_code = MC_CMD_ERR_EPROTO;
400 /* Read error code (and arg num for MCDI v2 commands) */
401 efx_mcdi_read_response(enp, &err, hdr_len, err_len);
403 if (err_len >= (MC_CMD_ERR_CODE_OFST + sizeof (efx_dword_t)))
404 err_code = EFX_DWORD_FIELD(err[0], EFX_DWORD_0);
406 if (err_len >= (MC_CMD_ERR_ARG_OFST + sizeof (efx_dword_t)))
407 err_arg = EFX_DWORD_FIELD(err[1], EFX_DWORD_0);
409 emrp->emr_err_code = err_code;
410 emrp->emr_err_arg = err_arg;
412 #if EFSYS_OPT_MCDI_PROXY_AUTH
413 if ((err_code == MC_CMD_ERR_PROXY_PENDING) &&
414 (err_len == sizeof (err))) {
416 * The MCDI request would normally fail with EPERM, but
417 * firmware has forwarded it to an authorization agent
418 * attached to a privileged PF.
420 * Save the authorization request handle. The client
421 * must wait for a PROXY_RESPONSE event, or timeout.
423 emrp->emr_proxy_handle = err_arg;
425 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
427 #if EFSYS_OPT_MCDI_LOGGING
428 if (emtp->emt_logger != NULL) {
429 emtp->emt_logger(emtp->emt_context,
430 EFX_LOG_MCDI_RESPONSE,
434 #endif /* EFSYS_OPT_MCDI_LOGGING */
436 if (!emrp->emr_quiet) {
437 EFSYS_PROBE3(mcdi_err_arg, int, emrp->emr_cmd,
438 int, err_code, int, err_arg);
441 rc = efx_mcdi_request_errcode(err_code);
446 emrp->emr_out_length_used = data_len;
447 #if EFSYS_OPT_MCDI_PROXY_AUTH
448 emrp->emr_proxy_handle = 0;
449 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
456 emrp->emr_out_length_used = 0;
460 efx_mcdi_finish_response(
462 __in efx_mcdi_req_t *emrp)
464 #if EFSYS_OPT_MCDI_LOGGING
465 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
466 #endif /* EFSYS_OPT_MCDI_LOGGING */
468 unsigned int hdr_len;
470 unsigned int resp_off;
471 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER
472 unsigned int resp_cmd;
473 boolean_t proxied_cmd_resp = B_FALSE;
474 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */
476 if (emrp->emr_out_buf == NULL)
479 /* Read the command header to detect MCDI response format */
480 hdr_len = sizeof (hdr[0]);
481 efx_mcdi_read_response(enp, &hdr[0], 0, hdr_len);
482 if (EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE) == MC_CMD_V2_EXTN) {
484 * Read the actual payload length. The length given in the event
485 * is only correct for responses with the V1 format.
487 efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1]));
488 hdr_len += sizeof (hdr[1]);
491 emrp->emr_out_length_used = EFX_DWORD_FIELD(hdr[1],
492 MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
493 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER
495 * A proxy MCDI command is executed by PF on behalf of
496 * one of its VFs. The command to be proxied follows
497 * immediately afterward in the host buffer.
498 * PROXY_CMD inner call complete response should be copied to
499 * output buffer so that it can be returned to the requesting
500 * function in MC_CMD_PROXY_COMPLETE payload.
503 EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD);
504 proxied_cmd_resp = ((emrp->emr_cmd == MC_CMD_PROXY_CMD) &&
505 (resp_cmd != MC_CMD_PROXY_CMD));
506 if (proxied_cmd_resp) {
508 emrp->emr_out_length_used += hdr_len;
510 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */
515 /* Copy payload out into caller supplied buffer */
516 bytes = MIN(emrp->emr_out_length_used, emrp->emr_out_length);
517 efx_mcdi_read_response(enp, emrp->emr_out_buf, resp_off, bytes);
519 /* Report bytes copied to caller (response message may be larger) */
520 emrp->emr_out_length_used = bytes;
522 #if EFSYS_OPT_MCDI_LOGGING
523 if (emtp->emt_logger != NULL) {
524 emtp->emt_logger(emtp->emt_context,
525 EFX_LOG_MCDI_RESPONSE,
527 emrp->emr_out_buf, bytes);
529 #endif /* EFSYS_OPT_MCDI_LOGGING */
533 __checkReturn boolean_t
534 efx_mcdi_request_poll(
537 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
538 efx_mcdi_req_t *emrp;
539 efsys_lock_state_t state;
542 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
543 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
544 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
546 /* Serialise against post-watchdog efx_mcdi_ev* */
547 EFSYS_LOCK(enp->en_eslp, state);
549 EFSYS_ASSERT(emip->emi_pending_req != NULL);
550 EFSYS_ASSERT(!emip->emi_ev_cpl);
551 emrp = emip->emi_pending_req;
553 /* Check if hardware is unavailable */
554 if (efx_nic_hw_unavailable(enp)) {
555 EFSYS_UNLOCK(enp->en_eslp, state);
559 /* Check for reboot atomically w.r.t efx_mcdi_request_start */
560 if (emip->emi_poll_cnt++ == 0) {
561 if ((rc = efx_mcdi_poll_reboot(enp)) != 0) {
562 emip->emi_pending_req = NULL;
563 EFSYS_UNLOCK(enp->en_eslp, state);
565 /* Reboot/Assertion */
566 if (rc == EIO || rc == EINTR)
567 efx_mcdi_raise_exception(enp, emrp, rc);
573 /* Check if a response is available */
574 if (efx_mcdi_poll_response(enp) == B_FALSE) {
575 EFSYS_UNLOCK(enp->en_eslp, state);
579 /* Read the response header */
580 efx_mcdi_read_response_header(enp, emrp);
582 /* Request complete */
583 emip->emi_pending_req = NULL;
585 /* Ensure stale MCDI requests fail after an MC reboot. */
586 emip->emi_new_epoch = B_FALSE;
588 EFSYS_UNLOCK(enp->en_eslp, state);
590 if ((rc = emrp->emr_rc) != 0)
593 efx_mcdi_finish_response(enp, emrp);
597 if (!emrp->emr_quiet)
600 if (!emrp->emr_quiet)
601 EFSYS_PROBE1(fail1, efx_rc_t, rc);
606 __checkReturn boolean_t
607 efx_mcdi_request_abort(
610 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
611 efx_mcdi_req_t *emrp;
613 efsys_lock_state_t state;
615 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
616 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
617 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
620 * efx_mcdi_ev_* may have already completed this event, and be
621 * spinning/blocked on the upper layer lock. So it *is* legitimate
622 * to for emi_pending_req to be NULL. If there is a pending event
623 * completed request, then provide a "credit" to allow
624 * efx_mcdi_ev_cpl() to accept a single spurious completion.
626 EFSYS_LOCK(enp->en_eslp, state);
627 emrp = emip->emi_pending_req;
628 aborted = (emrp != NULL);
630 emip->emi_pending_req = NULL;
632 /* Error the request */
633 emrp->emr_out_length_used = 0;
634 emrp->emr_rc = ETIMEDOUT;
636 /* Provide a credit for seqno/emr_pending_req mismatches */
637 if (emip->emi_ev_cpl)
641 * The upper layer has called us, so we don't
642 * need to complete the request.
645 EFSYS_UNLOCK(enp->en_eslp, state);
650 __checkReturn efx_rc_t
651 efx_mcdi_get_client_handle(
653 __in efx_pcie_interface_t intf,
656 __out uint32_t *handle)
659 EFX_MCDI_DECLARE_BUF(payload,
660 MC_CMD_GET_CLIENT_HANDLE_IN_LEN,
661 MC_CMD_GET_CLIENT_HANDLE_OUT_LEN);
664 if (handle == NULL) {
669 req.emr_cmd = MC_CMD_GET_CLIENT_HANDLE;
670 req.emr_in_buf = payload;
671 req.emr_in_length = MC_CMD_GET_CLIENT_HANDLE_IN_LEN;
672 req.emr_out_buf = payload;
673 req.emr_out_length = MC_CMD_GET_CLIENT_HANDLE_OUT_LEN;
675 MCDI_IN_SET_DWORD(req, GET_CLIENT_HANDLE_IN_TYPE,
676 MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_FUNC);
677 MCDI_IN_SET_WORD(req, GET_CLIENT_HANDLE_IN_FUNC_PF, pf);
678 MCDI_IN_SET_WORD(req, GET_CLIENT_HANDLE_IN_FUNC_VF, vf);
679 MCDI_IN_SET_DWORD(req, GET_CLIENT_HANDLE_IN_FUNC_INTF, intf);
681 efx_mcdi_execute(enp, &req);
683 if (req.emr_rc != 0) {
688 if (req.emr_out_length_used < MC_CMD_GET_CLIENT_HANDLE_OUT_LEN) {
693 *handle = MCDI_OUT_DWORD(req, GET_CLIENT_HANDLE_OUT_HANDLE);
701 EFSYS_PROBE1(fail1, efx_rc_t, rc);
705 __checkReturn efx_rc_t
706 efx_mcdi_get_own_client_handle(
708 __out uint32_t *handle)
712 rc = efx_mcdi_get_client_handle(enp, PCIE_INTERFACE_CALLER,
713 PCIE_FUNCTION_PF_NULL, PCIE_FUNCTION_VF_NULL, handle);
719 EFSYS_PROBE1(fail1, efx_rc_t, rc);
724 efx_mcdi_get_timeout(
726 __in efx_mcdi_req_t *emrp,
727 __out uint32_t *timeoutp)
729 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
731 emcop->emco_get_timeout(enp, emrp, timeoutp);
734 __checkReturn efx_rc_t
735 efx_mcdi_request_errcode(
736 __in unsigned int err)
741 case MC_CMD_ERR_EPERM:
743 case MC_CMD_ERR_ENOENT:
745 case MC_CMD_ERR_EINTR:
747 case MC_CMD_ERR_EACCES:
749 case MC_CMD_ERR_EBUSY:
751 case MC_CMD_ERR_EINVAL:
753 case MC_CMD_ERR_EDEADLK:
755 case MC_CMD_ERR_ENOSYS:
757 case MC_CMD_ERR_ETIME:
759 case MC_CMD_ERR_ENOTSUP:
761 case MC_CMD_ERR_EALREADY:
765 case MC_CMD_ERR_EEXIST:
767 #ifdef MC_CMD_ERR_EAGAIN
768 case MC_CMD_ERR_EAGAIN:
771 #ifdef MC_CMD_ERR_ENOSPC
772 case MC_CMD_ERR_ENOSPC:
775 case MC_CMD_ERR_ERANGE:
778 case MC_CMD_ERR_ALLOC_FAIL:
780 case MC_CMD_ERR_NO_VADAPTOR:
782 case MC_CMD_ERR_NO_EVB_PORT:
784 case MC_CMD_ERR_NO_VSWITCH:
786 case MC_CMD_ERR_VLAN_LIMIT:
788 case MC_CMD_ERR_BAD_PCI_FUNC:
790 case MC_CMD_ERR_BAD_VLAN_MODE:
792 case MC_CMD_ERR_BAD_VSWITCH_TYPE:
794 case MC_CMD_ERR_BAD_VPORT_TYPE:
796 case MC_CMD_ERR_MAC_EXIST:
799 case MC_CMD_ERR_PROXY_PENDING:
803 EFSYS_PROBE1(mc_pcol_error, int, err);
809 efx_mcdi_raise_exception(
811 __in_opt efx_mcdi_req_t *emrp,
814 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
815 efx_mcdi_exception_t exception;
817 /* Reboot or Assertion failure only */
818 EFSYS_ASSERT(rc == EIO || rc == EINTR);
821 * If MC_CMD_REBOOT causes a reboot (dependent on parameters),
822 * then the EIO is not worthy of an exception.
824 if (emrp != NULL && emrp->emr_cmd == MC_CMD_REBOOT && rc == EIO)
827 exception = (rc == EIO)
828 ? EFX_MCDI_EXCEPTION_MC_REBOOT
829 : EFX_MCDI_EXCEPTION_MC_BADASSERT;
831 emtp->emt_exception(emtp->emt_context, exception);
837 __inout efx_mcdi_req_t *emrp)
839 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
841 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
842 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
844 emrp->emr_quiet = B_FALSE;
845 emtp->emt_execute(emtp->emt_context, emrp);
849 efx_mcdi_execute_quiet(
851 __inout efx_mcdi_req_t *emrp)
853 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
855 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
856 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
858 emrp->emr_quiet = B_TRUE;
859 emtp->emt_execute(emtp->emt_context, emrp);
865 __in unsigned int seq,
866 __in unsigned int outlen,
869 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
870 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
871 efx_mcdi_req_t *emrp;
872 efsys_lock_state_t state;
874 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
875 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
878 * Serialise against efx_mcdi_request_poll()/efx_mcdi_request_start()
879 * when we're completing an aborted request.
881 EFSYS_LOCK(enp->en_eslp, state);
882 if (emip->emi_pending_req == NULL || !emip->emi_ev_cpl ||
883 (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) {
884 EFSYS_ASSERT(emip->emi_aborted > 0);
885 if (emip->emi_aborted > 0)
887 EFSYS_UNLOCK(enp->en_eslp, state);
891 emrp = emip->emi_pending_req;
892 emip->emi_pending_req = NULL;
893 EFSYS_UNLOCK(enp->en_eslp, state);
895 if (emip->emi_max_version >= 2) {
896 /* MCDIv2 response details do not fit into an event. */
897 efx_mcdi_read_response_header(enp, emrp);
900 if (!emrp->emr_quiet) {
901 EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd,
904 emrp->emr_out_length_used = 0;
905 emrp->emr_rc = efx_mcdi_request_errcode(errcode);
907 emrp->emr_out_length_used = outlen;
911 if (emrp->emr_rc == 0)
912 efx_mcdi_finish_response(enp, emrp);
914 emtp->emt_ev_cpl(emtp->emt_context);
917 #if EFSYS_OPT_MCDI_PROXY_AUTH
919 __checkReturn efx_rc_t
920 efx_mcdi_get_proxy_handle(
922 __in efx_mcdi_req_t *emrp,
923 __out uint32_t *handlep)
927 _NOTE(ARGUNUSED(enp))
930 * Return proxy handle from MCDI request that returned with error
931 * MC_MCD_ERR_PROXY_PENDING. This handle is used to wait for a matching
932 * PROXY_RESPONSE event.
934 if ((emrp == NULL) || (handlep == NULL)) {
938 if ((emrp->emr_rc != 0) &&
939 (emrp->emr_err_code == MC_CMD_ERR_PROXY_PENDING)) {
940 *handlep = emrp->emr_proxy_handle;
949 EFSYS_PROBE1(fail1, efx_rc_t, rc);
954 efx_mcdi_ev_proxy_response(
956 __in unsigned int handle,
957 __in unsigned int status)
959 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
963 * Handle results of an authorization request for a privileged MCDI
964 * command. If authorization was granted then we must re-issue the
965 * original MCDI request. If authorization failed or timed out,
966 * then the original MCDI request should be completed with the
967 * result code from this event.
969 rc = (status == 0) ? 0 : efx_mcdi_request_errcode(status);
971 emtp->emt_ev_proxy_response(emtp->emt_context, handle, rc);
973 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
975 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER
977 efx_mcdi_ev_proxy_request(
979 __in unsigned int index)
981 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
983 if (emtp->emt_ev_proxy_request != NULL)
984 emtp->emt_ev_proxy_request(emtp->emt_context, index);
986 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */
992 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
993 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
994 efx_mcdi_req_t *emrp = NULL;
996 efsys_lock_state_t state;
999 * The MCDI request (if there is one) has been terminated, either
1000 * by a BADASSERT or REBOOT event.
1002 * If there is an outstanding event-completed MCDI operation, then we
1003 * will never receive the completion event (because both MCDI
1004 * completions and BADASSERT events are sent to the same evq). So
1005 * complete this MCDI op.
1007 * This function might run in parallel with efx_mcdi_request_poll()
1008 * for poll completed mcdi requests, and also with
1009 * efx_mcdi_request_start() for post-watchdog completions.
1011 EFSYS_LOCK(enp->en_eslp, state);
1012 emrp = emip->emi_pending_req;
1013 ev_cpl = emip->emi_ev_cpl;
1014 if (emrp != NULL && emip->emi_ev_cpl) {
1015 emip->emi_pending_req = NULL;
1017 emrp->emr_out_length_used = 0;
1019 ++emip->emi_aborted;
1023 * Since we're running in parallel with a request, consume the
1024 * status word before dropping the lock.
1026 if (rc == EIO || rc == EINTR) {
1027 EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US);
1028 (void) efx_mcdi_poll_reboot(enp);
1029 emip->emi_new_epoch = B_TRUE;
1032 EFSYS_UNLOCK(enp->en_eslp, state);
1034 efx_mcdi_raise_exception(enp, emrp, rc);
1036 if (emrp != NULL && ev_cpl)
1037 emtp->emt_ev_cpl(emtp->emt_context);
1040 __checkReturn efx_rc_t
1041 efx_mcdi_get_version(
1042 __in efx_nic_t *enp,
1043 __in uint32_t flags,
1044 __out efx_mcdi_version_t *verp)
1046 efx_nic_board_info_t *board_infop = &verp->emv_board_info;
1047 EFX_MCDI_DECLARE_BUF(payload,
1048 MC_CMD_GET_VERSION_EXT_IN_LEN,
1049 MC_CMD_GET_VERSION_V2_OUT_LEN);
1050 efx_word_t *ver_words;
1051 uint16_t version[4];
1056 EFX_STATIC_ASSERT(sizeof (verp->emv_version) ==
1057 MC_CMD_GET_VERSION_OUT_VERSION_LEN);
1058 EFX_STATIC_ASSERT(sizeof (verp->emv_firmware) ==
1059 MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN);
1061 EFX_STATIC_ASSERT(EFX_MCDI_VERSION_BOARD_INFO ==
1062 (1U << MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_LBN));
1064 EFX_STATIC_ASSERT(sizeof (board_infop->enbi_serial) ==
1065 MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_LEN);
1066 EFX_STATIC_ASSERT(sizeof (board_infop->enbi_name) ==
1067 MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_LEN);
1068 EFX_STATIC_ASSERT(sizeof (board_infop->enbi_revision) ==
1069 MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_LEN);
1071 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
1073 req.emr_cmd = MC_CMD_GET_VERSION;
1074 req.emr_in_buf = payload;
1075 req.emr_out_buf = payload;
1077 if ((flags & EFX_MCDI_VERSION_BOARD_INFO) != 0) {
1078 /* Request basic + extended version information. */
1079 req.emr_in_length = MC_CMD_GET_VERSION_EXT_IN_LEN;
1080 req.emr_out_length = MC_CMD_GET_VERSION_V2_OUT_LEN;
1082 /* Request only basic version information. */
1083 req.emr_in_length = MC_CMD_GET_VERSION_IN_LEN;
1084 req.emr_out_length = MC_CMD_GET_VERSION_OUT_LEN;
1087 efx_mcdi_execute(enp, &req);
1089 if (req.emr_rc != 0) {
1094 /* bootrom support */
1095 if (req.emr_out_length_used == MC_CMD_GET_VERSION_V0_OUT_LEN) {
1096 version[0] = version[1] = version[2] = version[3] = 0;
1097 firmware = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
1101 if (req.emr_out_length_used < req.emr_out_length) {
1106 ver_words = MCDI_OUT2(req, efx_word_t, GET_VERSION_OUT_VERSION);
1107 version[0] = EFX_WORD_FIELD(ver_words[0], EFX_WORD_0);
1108 version[1] = EFX_WORD_FIELD(ver_words[1], EFX_WORD_0);
1109 version[2] = EFX_WORD_FIELD(ver_words[2], EFX_WORD_0);
1110 version[3] = EFX_WORD_FIELD(ver_words[3], EFX_WORD_0);
1111 firmware = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
1114 memset(verp, 0, sizeof (*verp));
1116 verp->emv_version[0] = version[0];
1117 verp->emv_version[1] = version[1];
1118 verp->emv_version[2] = version[2];
1119 verp->emv_version[3] = version[3];
1120 verp->emv_firmware = firmware;
1122 verp->emv_flags = MCDI_OUT_DWORD(req, GET_VERSION_V2_OUT_FLAGS);
1123 verp->emv_flags &= flags;
1125 if ((verp->emv_flags & EFX_MCDI_VERSION_BOARD_INFO) != 0) {
1126 memcpy(board_infop->enbi_serial,
1127 MCDI_OUT2(req, char, GET_VERSION_V2_OUT_BOARD_SERIAL),
1128 sizeof (board_infop->enbi_serial));
1129 memcpy(board_infop->enbi_name,
1130 MCDI_OUT2(req, char, GET_VERSION_V2_OUT_BOARD_NAME),
1131 sizeof (board_infop->enbi_name));
1132 board_infop->enbi_revision =
1133 MCDI_OUT_DWORD(req, GET_VERSION_V2_OUT_BOARD_REVISION);
1141 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1146 static __checkReturn efx_rc_t
1147 efx_mcdi_get_boot_status(
1148 __in efx_nic_t *enp,
1149 __out efx_mcdi_boot_t *statusp)
1151 EFX_MCDI_DECLARE_BUF(payload,
1152 MC_CMD_GET_BOOT_STATUS_IN_LEN,
1153 MC_CMD_GET_BOOT_STATUS_OUT_LEN);
1157 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
1159 req.emr_cmd = MC_CMD_GET_BOOT_STATUS;
1160 req.emr_in_buf = payload;
1161 req.emr_in_length = MC_CMD_GET_BOOT_STATUS_IN_LEN;
1162 req.emr_out_buf = payload;
1163 req.emr_out_length = MC_CMD_GET_BOOT_STATUS_OUT_LEN;
1165 efx_mcdi_execute_quiet(enp, &req);
1168 * NOTE: Unprivileged functions cannot access boot status,
1169 * so the MCDI request will return EACCES. This is
1170 * also checked in efx_mcdi_version.
1173 if (req.emr_rc != 0) {
1178 if (req.emr_out_length_used < MC_CMD_GET_BOOT_STATUS_OUT_LEN) {
1183 if (MCDI_OUT_DWORD_FIELD(req, GET_BOOT_STATUS_OUT_FLAGS,
1184 GET_BOOT_STATUS_OUT_FLAGS_PRIMARY))
1185 *statusp = EFX_MCDI_BOOT_PRIMARY;
1187 *statusp = EFX_MCDI_BOOT_SECONDARY;
1194 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1199 __checkReturn efx_rc_t
1201 __in efx_nic_t *enp,
1202 __out_ecount_opt(4) uint16_t versionp[4],
1203 __out_opt uint32_t *buildp,
1204 __out_opt efx_mcdi_boot_t *statusp)
1206 efx_mcdi_version_t ver;
1207 efx_mcdi_boot_t status;
1210 rc = efx_mcdi_get_version(enp, 0, &ver);
1214 /* The bootrom doesn't understand BOOT_STATUS */
1215 if (MC_FW_VERSION_IS_BOOTLOADER(ver.emv_firmware)) {
1216 status = EFX_MCDI_BOOT_ROM;
1220 rc = efx_mcdi_get_boot_status(enp, &status);
1222 /* Unprivileged functions cannot access BOOT_STATUS */
1223 status = EFX_MCDI_BOOT_PRIMARY;
1224 memset(ver.emv_version, 0, sizeof (ver.emv_version));
1225 ver.emv_firmware = 0;
1226 } else if (rc != 0) {
1231 if (versionp != NULL)
1232 memcpy(versionp, ver.emv_version, sizeof (ver.emv_version));
1234 *buildp = ver.emv_firmware;
1235 if (statusp != NULL)
1243 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1248 __checkReturn efx_rc_t
1249 efx_mcdi_get_capabilities(
1250 __in efx_nic_t *enp,
1251 __out_opt uint32_t *flagsp,
1252 __out_opt uint16_t *rx_dpcpu_fw_idp,
1253 __out_opt uint16_t *tx_dpcpu_fw_idp,
1254 __out_opt uint32_t *flags2p,
1255 __out_opt uint32_t *tso2ncp)
1258 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN,
1259 MC_CMD_GET_CAPABILITIES_V2_OUT_LEN);
1260 boolean_t v2_capable;
1263 req.emr_cmd = MC_CMD_GET_CAPABILITIES;
1264 req.emr_in_buf = payload;
1265 req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
1266 req.emr_out_buf = payload;
1267 req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN;
1269 efx_mcdi_execute_quiet(enp, &req);
1271 if (req.emr_rc != 0) {
1276 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
1282 *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1);
1284 if (rx_dpcpu_fw_idp != NULL)
1285 *rx_dpcpu_fw_idp = MCDI_OUT_WORD(req,
1286 GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
1288 if (tx_dpcpu_fw_idp != NULL)
1289 *tx_dpcpu_fw_idp = MCDI_OUT_WORD(req,
1290 GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
1292 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
1293 v2_capable = B_FALSE;
1295 v2_capable = B_TRUE;
1297 if (flags2p != NULL) {
1298 *flags2p = (v2_capable) ?
1299 MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2) :
1303 if (tso2ncp != NULL) {
1304 *tso2ncp = (v2_capable) ?
1306 GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS) :
1315 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1320 static __checkReturn efx_rc_t
1322 __in efx_nic_t *enp,
1323 __in boolean_t after_assertion)
1325 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_REBOOT_IN_LEN,
1326 MC_CMD_REBOOT_OUT_LEN);
1331 * We could require the caller to have caused en_mod_flags=0 to
1332 * call this function. This doesn't help the other port though,
1333 * who's about to get the MC ripped out from underneath them.
1334 * Since they have to cope with the subsequent fallout of MCDI
1335 * failures, we should as well.
1337 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
1339 req.emr_cmd = MC_CMD_REBOOT;
1340 req.emr_in_buf = payload;
1341 req.emr_in_length = MC_CMD_REBOOT_IN_LEN;
1342 req.emr_out_buf = payload;
1343 req.emr_out_length = MC_CMD_REBOOT_OUT_LEN;
1345 MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS,
1346 (after_assertion ? MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION : 0));
1348 efx_mcdi_execute_quiet(enp, &req);
1350 if (req.emr_rc == EACCES) {
1351 /* Unprivileged functions cannot reboot the MC. */
1355 /* A successful reboot request returns EIO. */
1356 if (req.emr_rc != 0 && req.emr_rc != EIO) {
1365 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1370 __checkReturn efx_rc_t
1372 __in efx_nic_t *enp)
1374 return (efx_mcdi_do_reboot(enp, B_FALSE));
1377 __checkReturn efx_rc_t
1378 efx_mcdi_exit_assertion_handler(
1379 __in efx_nic_t *enp)
1381 return (efx_mcdi_do_reboot(enp, B_TRUE));
1384 __checkReturn efx_rc_t
1385 efx_mcdi_read_assertion(
1386 __in efx_nic_t *enp)
1389 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_ASSERTS_IN_LEN,
1390 MC_CMD_GET_ASSERTS_OUT_LEN);
1399 * Before we attempt to chat to the MC, we should verify that the MC
1400 * isn't in it's assertion handler, either due to a previous reboot,
1401 * or because we're reinitializing due to an eec_exception().
1403 * Use GET_ASSERTS to read any assertion state that may be present.
1404 * Retry this command twice. Once because a boot-time assertion failure
1405 * might cause the 1st MCDI request to fail. And once again because
1406 * we might race with efx_mcdi_exit_assertion_handler() running on
1407 * partner port(s) on the same NIC.
1411 (void) memset(payload, 0, sizeof (payload));
1412 req.emr_cmd = MC_CMD_GET_ASSERTS;
1413 req.emr_in_buf = payload;
1414 req.emr_in_length = MC_CMD_GET_ASSERTS_IN_LEN;
1415 req.emr_out_buf = payload;
1416 req.emr_out_length = MC_CMD_GET_ASSERTS_OUT_LEN;
1418 MCDI_IN_SET_DWORD(req, GET_ASSERTS_IN_CLEAR, 1);
1419 efx_mcdi_execute_quiet(enp, &req);
1421 } while ((req.emr_rc == EINTR || req.emr_rc == EIO) && retry-- > 0);
1423 if (req.emr_rc != 0) {
1424 if (req.emr_rc == EACCES) {
1425 /* Unprivileged functions cannot clear assertions. */
1432 if (req.emr_out_length_used < MC_CMD_GET_ASSERTS_OUT_LEN) {
1437 /* Print out any assertion state recorded */
1438 flags = MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_GLOBAL_FLAGS);
1439 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
1442 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
1443 ? "system-level assertion"
1444 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
1445 ? "thread-level assertion"
1446 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
1448 : (flags == MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP)
1449 ? "illegal address trap"
1450 : "unknown assertion";
1451 EFSYS_PROBE3(mcpu_assertion,
1452 const char *, reason, unsigned int,
1453 MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_SAVED_PC_OFFS),
1455 MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_THREAD_OFFS));
1457 /* Print out the registers (r1 ... r31) */
1458 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
1460 index < 1 + MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
1462 EFSYS_PROBE2(mcpu_register, unsigned int, index, unsigned int,
1463 EFX_DWORD_FIELD(*MCDI_OUT(req, efx_dword_t, ofst),
1465 ofst += sizeof (efx_dword_t);
1467 EFSYS_ASSERT(ofst <= MC_CMD_GET_ASSERTS_OUT_LEN);
1475 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1482 * Internal routines for for specific MCDI requests.
1485 __checkReturn efx_rc_t
1486 efx_mcdi_drv_attach(
1487 __in efx_nic_t *enp,
1488 __in boolean_t attach)
1491 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRV_ATTACH_IN_V2_LEN,
1492 MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
1495 req.emr_cmd = MC_CMD_DRV_ATTACH;
1496 req.emr_in_buf = payload;
1497 if (enp->en_drv_version[0] == '\0') {
1498 req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN;
1500 req.emr_in_length = MC_CMD_DRV_ATTACH_IN_V2_LEN;
1502 req.emr_out_buf = payload;
1503 req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN;
1506 * Typically, client drivers use DONT_CARE for the datapath firmware
1507 * type to ensure that the driver can attach to an unprivileged
1508 * function. The datapath firmware type to use is controlled by the
1510 * If a client driver wishes to attach with a specific datapath firmware
1511 * type, that can be passed in second argument of efx_nic_probe API. One
1512 * such example is the ESXi native driver that attempts attaching with
1513 * FULL_FEATURED datapath firmware type first and fall backs to
1514 * DONT_CARE datapath firmware type if MC_CMD_DRV_ATTACH fails.
1516 MCDI_IN_POPULATE_DWORD_2(req, DRV_ATTACH_IN_NEW_STATE,
1517 DRV_ATTACH_IN_ATTACH, attach ? 1 : 0,
1518 DRV_ATTACH_IN_SUBVARIANT_AWARE, EFSYS_OPT_FW_SUBVARIANT_AWARE);
1519 MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1);
1520 MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, enp->efv);
1522 if (req.emr_in_length >= MC_CMD_DRV_ATTACH_IN_V2_LEN) {
1523 EFX_STATIC_ASSERT(sizeof (enp->en_drv_version) ==
1524 MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN);
1525 memcpy(MCDI_IN2(req, char, DRV_ATTACH_IN_V2_DRIVER_VERSION),
1526 enp->en_drv_version,
1527 MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN);
1530 efx_mcdi_execute(enp, &req);
1532 if (req.emr_rc != 0) {
1537 if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) {
1547 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1552 __checkReturn efx_rc_t
1553 efx_mcdi_get_board_cfg(
1554 __in efx_nic_t *enp,
1555 __out_opt uint32_t *board_typep,
1556 __out_opt efx_dword_t *capabilitiesp,
1557 __out_ecount_opt(6) uint8_t mac_addrp[6])
1559 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
1561 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_BOARD_CFG_IN_LEN,
1562 MC_CMD_GET_BOARD_CFG_OUT_LENMIN);
1565 req.emr_cmd = MC_CMD_GET_BOARD_CFG;
1566 req.emr_in_buf = payload;
1567 req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN;
1568 req.emr_out_buf = payload;
1569 req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMIN;
1571 efx_mcdi_execute(enp, &req);
1573 if (req.emr_rc != 0) {
1578 if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
1583 if (mac_addrp != NULL) {
1586 if (emip->emi_port == 1) {
1587 addrp = MCDI_OUT2(req, uint8_t,
1588 GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0);
1589 } else if (emip->emi_port == 2) {
1590 addrp = MCDI_OUT2(req, uint8_t,
1591 GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1);
1597 EFX_MAC_ADDR_COPY(mac_addrp, addrp);
1600 if (capabilitiesp != NULL) {
1601 if (emip->emi_port == 1) {
1602 *capabilitiesp = *MCDI_OUT2(req, efx_dword_t,
1603 GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
1604 } else if (emip->emi_port == 2) {
1605 *capabilitiesp = *MCDI_OUT2(req, efx_dword_t,
1606 GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
1613 if (board_typep != NULL) {
1614 *board_typep = MCDI_OUT_DWORD(req,
1615 GET_BOARD_CFG_OUT_BOARD_TYPE);
1627 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1632 __checkReturn efx_rc_t
1633 efx_mcdi_get_resource_limits(
1634 __in efx_nic_t *enp,
1635 __out_opt uint32_t *nevqp,
1636 __out_opt uint32_t *nrxqp,
1637 __out_opt uint32_t *ntxqp)
1640 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RESOURCE_LIMITS_IN_LEN,
1641 MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN);
1644 req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS;
1645 req.emr_in_buf = payload;
1646 req.emr_in_length = MC_CMD_GET_RESOURCE_LIMITS_IN_LEN;
1647 req.emr_out_buf = payload;
1648 req.emr_out_length = MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN;
1650 efx_mcdi_execute(enp, &req);
1652 if (req.emr_rc != 0) {
1657 if (req.emr_out_length_used < MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN) {
1663 *nevqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_EVQ);
1665 *nrxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_RXQ);
1667 *ntxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_TXQ);
1674 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1679 __checkReturn efx_rc_t
1680 efx_mcdi_get_phy_cfg(
1681 __in efx_nic_t *enp)
1683 efx_port_t *epp = &(enp->en_port);
1684 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1686 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_CFG_IN_LEN,
1687 MC_CMD_GET_PHY_CFG_OUT_LEN);
1692 uint32_t phy_media_type;
1695 req.emr_cmd = MC_CMD_GET_PHY_CFG;
1696 req.emr_in_buf = payload;
1697 req.emr_in_length = MC_CMD_GET_PHY_CFG_IN_LEN;
1698 req.emr_out_buf = payload;
1699 req.emr_out_length = MC_CMD_GET_PHY_CFG_OUT_LEN;
1701 efx_mcdi_execute(enp, &req);
1703 if (req.emr_rc != 0) {
1708 if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) {
1713 encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE);
1715 namep = MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME);
1716 namelen = MIN(sizeof (encp->enc_phy_name) - 1,
1717 strnlen(namep, MC_CMD_GET_PHY_CFG_OUT_NAME_LEN));
1718 (void) memset(encp->enc_phy_name, 0,
1719 sizeof (encp->enc_phy_name));
1720 memcpy(encp->enc_phy_name, namep, namelen);
1721 #endif /* EFSYS_OPT_NAMES */
1722 (void) memset(encp->enc_phy_revision, 0,
1723 sizeof (encp->enc_phy_revision));
1724 memcpy(encp->enc_phy_revision,
1725 MCDI_OUT2(req, char, GET_PHY_CFG_OUT_REVISION),
1726 MIN(sizeof (encp->enc_phy_revision) - 1,
1727 MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN));
1728 #if EFSYS_OPT_PHY_LED_CONTROL
1729 encp->enc_led_mask = ((1 << EFX_PHY_LED_DEFAULT) |
1730 (1 << EFX_PHY_LED_OFF) |
1731 (1 << EFX_PHY_LED_ON));
1732 #endif /* EFSYS_OPT_PHY_LED_CONTROL */
1734 /* Get the media type of the fixed port, if recognised. */
1735 EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI);
1736 EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4);
1737 EFX_STATIC_ASSERT(MC_CMD_MEDIA_KX4 == EFX_PHY_MEDIA_KX4);
1738 EFX_STATIC_ASSERT(MC_CMD_MEDIA_XFP == EFX_PHY_MEDIA_XFP);
1739 EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS);
1740 EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T);
1741 EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS);
1742 phy_media_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE);
1743 epp->ep_fixed_port_type = (efx_phy_media_type_t)phy_media_type;
1744 if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES)
1745 epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID;
1747 epp->ep_phy_cap_mask =
1748 MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_SUPPORTED_CAP);
1749 #if EFSYS_OPT_PHY_FLAGS
1750 encp->enc_phy_flags_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_FLAGS);
1751 #endif /* EFSYS_OPT_PHY_FLAGS */
1753 encp->enc_port = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_PRT);
1755 /* Populate internal state */
1756 encp->enc_mcdi_mdio_channel =
1757 (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_CHANNEL);
1759 #if EFSYS_OPT_PHY_STATS
1760 encp->enc_mcdi_phy_stat_mask =
1761 MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_STATS_MASK);
1762 #endif /* EFSYS_OPT_PHY_STATS */
1765 encp->enc_bist_mask = 0;
1766 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
1767 GET_PHY_CFG_OUT_BIST_CABLE_SHORT))
1768 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_SHORT);
1769 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
1770 GET_PHY_CFG_OUT_BIST_CABLE_LONG))
1771 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_LONG);
1772 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
1773 GET_PHY_CFG_OUT_BIST))
1774 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_NORMAL);
1775 #endif /* EFSYS_OPT_BIST */
1782 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1787 __checkReturn efx_rc_t
1788 efx_mcdi_firmware_update_supported(
1789 __in efx_nic_t *enp,
1790 __out boolean_t *supportedp)
1792 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
1795 if (emcop != NULL) {
1796 if ((rc = emcop->emco_feature_supported(enp,
1797 EFX_MCDI_FEATURE_FW_UPDATE, supportedp)) != 0)
1800 /* Earlier devices always supported updates */
1801 *supportedp = B_TRUE;
1807 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1812 __checkReturn efx_rc_t
1813 efx_mcdi_macaddr_change_supported(
1814 __in efx_nic_t *enp,
1815 __out boolean_t *supportedp)
1817 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
1820 if (emcop != NULL) {
1821 if ((rc = emcop->emco_feature_supported(enp,
1822 EFX_MCDI_FEATURE_MACADDR_CHANGE, supportedp)) != 0)
1825 /* Earlier devices always supported MAC changes */
1826 *supportedp = B_TRUE;
1832 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1837 __checkReturn efx_rc_t
1838 efx_mcdi_link_control_supported(
1839 __in efx_nic_t *enp,
1840 __out boolean_t *supportedp)
1842 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
1845 if (emcop != NULL) {
1846 if ((rc = emcop->emco_feature_supported(enp,
1847 EFX_MCDI_FEATURE_LINK_CONTROL, supportedp)) != 0)
1850 /* Earlier devices always supported link control */
1851 *supportedp = B_TRUE;
1857 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1862 __checkReturn efx_rc_t
1863 efx_mcdi_mac_spoofing_supported(
1864 __in efx_nic_t *enp,
1865 __out boolean_t *supportedp)
1867 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
1870 if (emcop != NULL) {
1871 if ((rc = emcop->emco_feature_supported(enp,
1872 EFX_MCDI_FEATURE_MAC_SPOOFING, supportedp)) != 0)
1875 /* Earlier devices always supported MAC spoofing */
1876 *supportedp = B_TRUE;
1882 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1891 * Enter bist offline mode. This is a fw mode which puts the NIC into a state
1892 * where memory BIST tests can be run and not much else can interfere or happen.
1893 * A reboot is required to exit this mode.
1895 __checkReturn efx_rc_t
1896 efx_mcdi_bist_enable_offline(
1897 __in efx_nic_t *enp)
1902 EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN == 0);
1903 EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN == 0);
1905 req.emr_cmd = MC_CMD_ENABLE_OFFLINE_BIST;
1906 req.emr_in_buf = NULL;
1907 req.emr_in_length = 0;
1908 req.emr_out_buf = NULL;
1909 req.emr_out_length = 0;
1911 efx_mcdi_execute(enp, &req);
1913 if (req.emr_rc != 0) {
1921 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1925 #endif /* EFX_OPTS_EF10() */
1927 __checkReturn efx_rc_t
1928 efx_mcdi_bist_start(
1929 __in efx_nic_t *enp,
1930 __in efx_bist_type_t type)
1933 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_START_BIST_IN_LEN,
1934 MC_CMD_START_BIST_OUT_LEN);
1937 req.emr_cmd = MC_CMD_START_BIST;
1938 req.emr_in_buf = payload;
1939 req.emr_in_length = MC_CMD_START_BIST_IN_LEN;
1940 req.emr_out_buf = payload;
1941 req.emr_out_length = MC_CMD_START_BIST_OUT_LEN;
1944 case EFX_BIST_TYPE_PHY_NORMAL:
1945 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST);
1947 case EFX_BIST_TYPE_PHY_CABLE_SHORT:
1948 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
1949 MC_CMD_PHY_BIST_CABLE_SHORT);
1951 case EFX_BIST_TYPE_PHY_CABLE_LONG:
1952 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
1953 MC_CMD_PHY_BIST_CABLE_LONG);
1955 case EFX_BIST_TYPE_MC_MEM:
1956 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
1957 MC_CMD_MC_MEM_BIST);
1959 case EFX_BIST_TYPE_SAT_MEM:
1960 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
1961 MC_CMD_PORT_MEM_BIST);
1963 case EFX_BIST_TYPE_REG:
1964 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
1971 efx_mcdi_execute(enp, &req);
1973 if (req.emr_rc != 0) {
1981 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1986 #endif /* EFSYS_OPT_BIST */
1989 /* Enable logging of some events (e.g. link state changes) */
1990 __checkReturn efx_rc_t
1992 __in efx_nic_t *enp)
1995 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LOG_CTRL_IN_LEN,
1996 MC_CMD_LOG_CTRL_OUT_LEN);
1999 req.emr_cmd = MC_CMD_LOG_CTRL;
2000 req.emr_in_buf = payload;
2001 req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN;
2002 req.emr_out_buf = payload;
2003 req.emr_out_length = MC_CMD_LOG_CTRL_OUT_LEN;
2005 MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST,
2006 MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ);
2007 MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST_EVQ, 0);
2009 efx_mcdi_execute(enp, &req);
2011 if (req.emr_rc != 0) {
2019 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2025 #if EFSYS_OPT_MAC_STATS
2027 __checkReturn efx_rc_t
2029 __in efx_nic_t *enp,
2030 __in uint32_t vport_id,
2031 __in_opt efsys_mem_t *esmp,
2032 __in efx_stats_action_t action,
2033 __in uint16_t period_ms)
2036 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_MAC_STATS_IN_LEN,
2037 MC_CMD_MAC_STATS_V2_OUT_DMA_LEN);
2038 int clear = (action == EFX_STATS_CLEAR);
2039 int upload = (action == EFX_STATS_UPLOAD);
2040 int enable = (action == EFX_STATS_ENABLE_NOEVENTS);
2041 int events = (action == EFX_STATS_ENABLE_EVENTS);
2042 int disable = (action == EFX_STATS_DISABLE);
2045 req.emr_cmd = MC_CMD_MAC_STATS;
2046 req.emr_in_buf = payload;
2047 req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN;
2048 req.emr_out_buf = payload;
2049 req.emr_out_length = MC_CMD_MAC_STATS_V2_OUT_DMA_LEN;
2051 MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD,
2052 MAC_STATS_IN_DMA, upload,
2053 MAC_STATS_IN_CLEAR, clear,
2054 MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable,
2055 MAC_STATS_IN_PERIODIC_ENABLE, enable | events,
2056 MAC_STATS_IN_PERIODIC_NOEVENT, !events,
2057 MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0);
2059 if (enable || events || upload) {
2060 const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
2063 /* Periodic stats or stats upload require a DMA buffer */
2069 if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) {
2070 /* MAC stats count too small for legacy MAC stats */
2075 bytes = encp->enc_mac_stats_nstats * sizeof (efx_qword_t);
2077 if (EFSYS_MEM_SIZE(esmp) < bytes) {
2078 /* DMA buffer too small */
2083 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO,
2084 EFSYS_MEM_ADDR(esmp) & 0xffffffff);
2085 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI,
2086 EFSYS_MEM_ADDR(esmp) >> 32);
2087 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes);
2091 * NOTE: Do not use EVB_PORT_ID_ASSIGNED when disabling periodic stats,
2092 * as this may fail (and leave periodic DMA enabled) if the
2093 * vadapter has already been deleted.
2095 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_PORT_ID,
2096 (disable ? EVB_PORT_ID_NULL : vport_id));
2098 efx_mcdi_execute(enp, &req);
2100 if (req.emr_rc != 0) {
2101 /* EF10: Expect ENOENT if no DMA queues are initialised */
2102 if ((req.emr_rc != ENOENT) ||
2103 (enp->en_rx_qcount + enp->en_tx_qcount != 0)) {
2118 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2123 __checkReturn efx_rc_t
2124 efx_mcdi_mac_stats_clear(
2125 __in efx_nic_t *enp)
2129 if ((rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, NULL,
2130 EFX_STATS_CLEAR, 0)) != 0)
2136 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2141 __checkReturn efx_rc_t
2142 efx_mcdi_mac_stats_upload(
2143 __in efx_nic_t *enp,
2144 __in efsys_mem_t *esmp)
2149 * The MC DMAs aggregate statistics for our convenience, so we can
2150 * avoid having to pull the statistics buffer into the cache to
2151 * maintain cumulative statistics.
2153 if ((rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp,
2154 EFX_STATS_UPLOAD, 0)) != 0)
2160 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2165 __checkReturn efx_rc_t
2166 efx_mcdi_mac_stats_periodic(
2167 __in efx_nic_t *enp,
2168 __in efsys_mem_t *esmp,
2169 __in uint16_t period_ms,
2170 __in boolean_t events)
2175 * The MC DMAs aggregate statistics for our convenience, so we can
2176 * avoid having to pull the statistics buffer into the cache to
2177 * maintain cumulative statistics.
2178 * Huntington uses a fixed 1sec period.
2179 * Medford uses a fixed 1sec period before v6.2.1.1033 firmware.
2182 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, NULL,
2183 EFX_STATS_DISABLE, 0);
2185 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp,
2186 EFX_STATS_ENABLE_EVENTS, period_ms);
2188 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp,
2189 EFX_STATS_ENABLE_NOEVENTS, period_ms);
2197 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2202 #endif /* EFSYS_OPT_MAC_STATS */
2204 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
2206 __checkReturn efx_rc_t
2207 efx_mcdi_intf_from_pcie(
2208 __in uint32_t pcie_intf,
2209 __out efx_pcie_interface_t *efx_intf)
2213 switch (pcie_intf) {
2214 case PCIE_INTERFACE_CALLER:
2215 *efx_intf = EFX_PCIE_INTERFACE_CALLER;
2217 case PCIE_INTERFACE_HOST_PRIMARY:
2218 *efx_intf = EFX_PCIE_INTERFACE_HOST_PRIMARY;
2220 case PCIE_INTERFACE_NIC_EMBEDDED:
2221 *efx_intf = EFX_PCIE_INTERFACE_NIC_EMBEDDED;
2231 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2237 * This function returns the pf and vf number of a function. If it is a pf the
2238 * vf number is 0xffff. The vf number is the index of the vf on that
2239 * function. So if you have 3 vfs on pf 0 the 3 vfs will return (pf=0,vf=0),
2240 * (pf=0,vf=1), (pf=0,vf=2) aand the pf will return (pf=0, vf=0xffff).
2242 __checkReturn efx_rc_t
2243 efx_mcdi_get_function_info(
2244 __in efx_nic_t *enp,
2245 __out uint32_t *pfp,
2246 __out_opt uint32_t *vfp,
2247 __out_opt efx_pcie_interface_t *intfp)
2249 efx_pcie_interface_t intf;
2251 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_FUNCTION_INFO_IN_LEN,
2252 MC_CMD_GET_FUNCTION_INFO_OUT_V2_LEN);
2256 req.emr_cmd = MC_CMD_GET_FUNCTION_INFO;
2257 req.emr_in_buf = payload;
2258 req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN;
2259 req.emr_out_buf = payload;
2260 req.emr_out_length = MC_CMD_GET_FUNCTION_INFO_OUT_V2_LEN;
2262 efx_mcdi_execute(enp, &req);
2264 if (req.emr_rc != 0) {
2269 if (req.emr_out_length_used < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) {
2274 *pfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_PF);
2276 *vfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_VF);
2278 if (req.emr_out_length < MC_CMD_GET_FUNCTION_INFO_OUT_V2_LEN) {
2279 intf = EFX_PCIE_INTERFACE_HOST_PRIMARY;
2281 pcie_intf = MCDI_OUT_DWORD(req,
2282 GET_FUNCTION_INFO_OUT_V2_INTF);
2284 rc = efx_mcdi_intf_from_pcie(pcie_intf, &intf);
2299 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2304 __checkReturn efx_rc_t
2305 efx_mcdi_privilege_mask(
2306 __in efx_nic_t *enp,
2309 __out uint32_t *maskp)
2312 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_PRIVILEGE_MASK_IN_LEN,
2313 MC_CMD_PRIVILEGE_MASK_OUT_LEN);
2316 req.emr_cmd = MC_CMD_PRIVILEGE_MASK;
2317 req.emr_in_buf = payload;
2318 req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN;
2319 req.emr_out_buf = payload;
2320 req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN;
2322 MCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION,
2323 PRIVILEGE_MASK_IN_FUNCTION_PF, pf,
2324 PRIVILEGE_MASK_IN_FUNCTION_VF, vf);
2326 efx_mcdi_execute(enp, &req);
2328 if (req.emr_rc != 0) {
2333 if (req.emr_out_length_used < MC_CMD_PRIVILEGE_MASK_OUT_LEN) {
2338 *maskp = MCDI_OUT_DWORD(req, PRIVILEGE_MASK_OUT_OLD_MASK);
2345 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2350 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
2352 __checkReturn efx_rc_t
2353 efx_mcdi_set_workaround(
2354 __in efx_nic_t *enp,
2356 __in boolean_t enabled,
2357 __out_opt uint32_t *flagsp)
2360 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_WORKAROUND_IN_LEN,
2361 MC_CMD_WORKAROUND_EXT_OUT_LEN);
2364 req.emr_cmd = MC_CMD_WORKAROUND;
2365 req.emr_in_buf = payload;
2366 req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN;
2367 req.emr_out_buf = payload;
2368 req.emr_out_length = MC_CMD_WORKAROUND_OUT_LEN;
2370 MCDI_IN_SET_DWORD(req, WORKAROUND_IN_TYPE, type);
2371 MCDI_IN_SET_DWORD(req, WORKAROUND_IN_ENABLED, enabled ? 1 : 0);
2373 efx_mcdi_execute_quiet(enp, &req);
2375 if (req.emr_rc != 0) {
2380 if (flagsp != NULL) {
2381 if (req.emr_out_length_used >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
2382 *flagsp = MCDI_OUT_DWORD(req, WORKAROUND_EXT_OUT_FLAGS);
2390 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2396 __checkReturn efx_rc_t
2397 efx_mcdi_get_workarounds(
2398 __in efx_nic_t *enp,
2399 __out_opt uint32_t *implementedp,
2400 __out_opt uint32_t *enabledp)
2403 EFX_MCDI_DECLARE_BUF(payload, 0, MC_CMD_GET_WORKAROUNDS_OUT_LEN);
2406 req.emr_cmd = MC_CMD_GET_WORKAROUNDS;
2407 req.emr_in_buf = NULL;
2408 req.emr_in_length = 0;
2409 req.emr_out_buf = payload;
2410 req.emr_out_length = MC_CMD_GET_WORKAROUNDS_OUT_LEN;
2412 efx_mcdi_execute(enp, &req);
2414 if (req.emr_rc != 0) {
2419 if (req.emr_out_length_used < MC_CMD_GET_WORKAROUNDS_OUT_LEN) {
2424 if (implementedp != NULL) {
2426 MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_IMPLEMENTED);
2429 if (enabledp != NULL) {
2430 *enabledp = MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_ENABLED);
2438 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2444 * Size of media information page in accordance with SFF-8472 and SFF-8436.
2445 * It is used in MCDI interface as well.
2447 #define EFX_PHY_MEDIA_INFO_PAGE_SIZE 0x80
2450 * Transceiver identifiers from SFF-8024 Table 4-1.
2452 #define EFX_SFF_TRANSCEIVER_ID_SFP 0x03 /* SFP/SFP+/SFP28 */
2453 #define EFX_SFF_TRANSCEIVER_ID_QSFP 0x0c /* QSFP */
2454 #define EFX_SFF_TRANSCEIVER_ID_QSFP_PLUS 0x0d /* QSFP+ or later */
2455 #define EFX_SFF_TRANSCEIVER_ID_QSFP28 0x11 /* QSFP28 or later */
2457 static __checkReturn efx_rc_t
2458 efx_mcdi_get_phy_media_info(
2459 __in efx_nic_t *enp,
2460 __in uint32_t mcdi_page,
2461 __in uint8_t offset,
2463 __out_bcount(len) uint8_t *data)
2466 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN,
2467 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(
2468 EFX_PHY_MEDIA_INFO_PAGE_SIZE));
2471 EFSYS_ASSERT((uint32_t)offset + len <= EFX_PHY_MEDIA_INFO_PAGE_SIZE);
2473 req.emr_cmd = MC_CMD_GET_PHY_MEDIA_INFO;
2474 req.emr_in_buf = payload;
2475 req.emr_in_length = MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN;
2476 req.emr_out_buf = payload;
2477 req.emr_out_length =
2478 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE);
2480 MCDI_IN_SET_DWORD(req, GET_PHY_MEDIA_INFO_IN_PAGE, mcdi_page);
2482 efx_mcdi_execute(enp, &req);
2484 if (req.emr_rc != 0) {
2489 if (req.emr_out_length_used !=
2490 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE)) {
2495 if (MCDI_OUT_DWORD(req, GET_PHY_MEDIA_INFO_OUT_DATALEN) !=
2496 EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
2502 MCDI_OUT2(req, uint8_t, GET_PHY_MEDIA_INFO_OUT_DATA) + offset,
2512 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2517 __checkReturn efx_rc_t
2518 efx_mcdi_phy_module_get_info(
2519 __in efx_nic_t *enp,
2520 __in uint8_t dev_addr,
2523 __out_bcount(len) uint8_t *data)
2525 efx_port_t *epp = &(enp->en_port);
2527 uint32_t mcdi_lower_page;
2528 uint32_t mcdi_upper_page;
2531 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
2534 * Map device address to MC_CMD_GET_PHY_MEDIA_INFO pages.
2535 * Offset plus length interface allows to access page 0 only.
2536 * I.e. non-zero upper pages are not accessible.
2537 * See SFF-8472 section 4 Memory Organization and SFF-8436 section 7.6
2538 * QSFP+ Memory Map for details on how information is structured
2541 switch (epp->ep_fixed_port_type) {
2542 case EFX_PHY_MEDIA_SFP_PLUS:
2543 case EFX_PHY_MEDIA_QSFP_PLUS:
2544 /* Port type supports modules */
2552 * For all supported port types, MCDI page 0 offset 0 holds the
2553 * transceiver identifier. Probe to determine the data layout.
2554 * Definitions from SFF-8024 Table 4-1.
2556 rc = efx_mcdi_get_phy_media_info(enp,
2557 0, 0, sizeof(id), &id);
2562 case EFX_SFF_TRANSCEIVER_ID_SFP:
2564 * In accordance with SFF-8472 Diagnostic Monitoring
2565 * Interface for Optical Transceivers section 4 Memory
2566 * Organization two 2-wire addresses are defined.
2569 /* Base information */
2570 case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE:
2572 * MCDI page 0 should be used to access lower
2573 * page 0 (0x00 - 0x7f) at the device address 0xA0.
2575 mcdi_lower_page = 0;
2577 * MCDI page 1 should be used to access upper
2578 * page 0 (0x80 - 0xff) at the device address 0xA0.
2580 mcdi_upper_page = 1;
2583 case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM:
2585 * MCDI page 2 should be used to access lower
2586 * page 0 (0x00 - 0x7f) at the device address 0xA2.
2588 mcdi_lower_page = 2;
2590 * MCDI page 3 should be used to access upper
2591 * page 0 (0x80 - 0xff) at the device address 0xA2.
2593 mcdi_upper_page = 3;
2600 case EFX_SFF_TRANSCEIVER_ID_QSFP:
2601 case EFX_SFF_TRANSCEIVER_ID_QSFP_PLUS:
2602 case EFX_SFF_TRANSCEIVER_ID_QSFP28:
2604 case EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP:
2606 * MCDI page -1 should be used to access lower page 0
2609 mcdi_lower_page = (uint32_t)-1;
2611 * MCDI page 0 should be used to access upper page 0
2614 mcdi_upper_page = 0;
2626 EFX_STATIC_ASSERT(EFX_PHY_MEDIA_INFO_PAGE_SIZE <= 0xFF);
2628 if (offset < EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
2630 MIN(len, EFX_PHY_MEDIA_INFO_PAGE_SIZE - offset);
2632 rc = efx_mcdi_get_phy_media_info(enp,
2633 mcdi_lower_page, (uint8_t)offset, (uint8_t)read_len, data);
2642 offset -= EFX_PHY_MEDIA_INFO_PAGE_SIZE;
2646 EFSYS_ASSERT3U(len, <=, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
2647 EFSYS_ASSERT3U(offset, <, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
2649 rc = efx_mcdi_get_phy_media_info(enp,
2650 mcdi_upper_page, (uint8_t)offset, (uint8_t)len, data);
2666 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2671 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
2673 #define INIT_EVQ_MAXNBUFS MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM
2676 # if (INIT_EVQ_MAXNBUFS < EF10_EVQ_MAXNBUFS)
2677 # error "INIT_EVQ_MAXNBUFS too small"
2679 #endif /* EFX_OPTS_EF10 */
2680 #if EFSYS_OPT_RIVERHEAD
2681 # if (INIT_EVQ_MAXNBUFS < RHEAD_EVQ_MAXNBUFS)
2682 # error "INIT_EVQ_MAXNBUFS too small"
2684 #endif /* EFSYS_OPT_RIVERHEAD */
2686 __checkReturn efx_rc_t
2688 __in efx_nic_t *enp,
2689 __in unsigned int instance,
2690 __in efsys_mem_t *esmp,
2693 __in uint32_t target_evq,
2695 __in uint32_t flags,
2696 __in boolean_t low_latency)
2698 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
2700 EFX_MCDI_DECLARE_BUF(payload,
2701 MC_CMD_INIT_EVQ_V2_IN_LEN(INIT_EVQ_MAXNBUFS),
2702 MC_CMD_INIT_EVQ_V2_OUT_LEN);
2703 boolean_t interrupting;
2704 int ev_extended_width;
2707 unsigned int evq_type;
2708 efx_qword_t *dma_addr;
2714 npages = efx_evq_nbufs(enp, nevs, flags);
2715 if (npages > INIT_EVQ_MAXNBUFS) {
2720 req.emr_cmd = MC_CMD_INIT_EVQ;
2721 req.emr_in_buf = payload;
2722 req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
2723 req.emr_out_buf = payload;
2724 req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
2726 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
2727 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
2729 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
2730 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
2733 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
2735 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TARGET_EVQ, target_evq);
2737 if (encp->enc_init_evq_v2_supported) {
2739 * On Medford the low latency license is required to enable RX
2740 * and event cut through and to disable RX batching. If event
2741 * queue type in flags is auto, we let the firmware decide the
2742 * settings to use. If the adapter has a low latency license,
2743 * it will choose the best settings for low latency, otherwise
2744 * it will choose the best settings for throughput.
2746 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
2747 case EFX_EVQ_FLAGS_TYPE_AUTO:
2748 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
2750 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
2751 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
2753 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
2754 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
2760 /* EvQ type controls merging, no manual settings */
2764 /* EvQ types other than manual are not supported */
2765 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL;
2767 * On Huntington RX and TX event batching can only be requested
2768 * together (even if the datapath firmware doesn't actually
2769 * support RX batching). If event cut through is enabled no RX
2770 * batching will occur.
2772 * So always enable RX and TX event batching, and enable event
2773 * cut through if we want low latency operation.
2776 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
2777 case EFX_EVQ_FLAGS_TYPE_AUTO:
2778 ev_cut_through = low_latency ? 1 : 0;
2780 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
2783 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
2793 * On EF100, extended width event queues have a different event
2794 * descriptor layout and are used to support descriptor proxy queues.
2796 ev_extended_width = 0;
2797 #if EFSYS_OPT_EV_EXTENDED_WIDTH
2798 if (encp->enc_init_evq_extended_width_supported) {
2799 if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH)
2800 ev_extended_width = 1;
2804 MCDI_IN_POPULATE_DWORD_8(req, INIT_EVQ_V2_IN_FLAGS,
2805 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
2806 INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
2807 INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
2808 INIT_EVQ_V2_IN_FLAG_CUT_THRU, ev_cut_through,
2809 INIT_EVQ_V2_IN_FLAG_RX_MERGE, ev_merge,
2810 INIT_EVQ_V2_IN_FLAG_TX_MERGE, ev_merge,
2811 INIT_EVQ_V2_IN_FLAG_TYPE, evq_type,
2812 INIT_EVQ_V2_IN_FLAG_EXT_WIDTH, ev_extended_width);
2814 /* If the value is zero then disable the timer */
2816 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
2817 MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
2818 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
2819 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
2823 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
2826 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
2827 MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
2828 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
2829 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
2832 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
2833 MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
2834 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
2836 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
2837 addr = EFSYS_MEM_ADDR(esmp);
2839 for (i = 0; i < npages; i++) {
2840 EFX_POPULATE_QWORD_2(*dma_addr,
2841 EFX_DWORD_1, (uint32_t)(addr >> 32),
2842 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
2845 addr += EFX_BUF_SIZE;
2848 efx_mcdi_execute(enp, &req);
2850 if (req.emr_rc != 0) {
2855 if (encp->enc_init_evq_v2_supported) {
2856 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
2860 EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
2861 MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
2863 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
2869 /* NOTE: ignore the returned IRQ param as firmware does not set it. */
2884 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2889 __checkReturn efx_rc_t
2891 __in efx_nic_t *enp,
2892 __in uint32_t instance)
2895 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN,
2896 MC_CMD_FINI_EVQ_OUT_LEN);
2899 req.emr_cmd = MC_CMD_FINI_EVQ;
2900 req.emr_in_buf = payload;
2901 req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
2902 req.emr_out_buf = payload;
2903 req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
2905 MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
2907 efx_mcdi_execute_quiet(enp, &req);
2909 if (req.emr_rc != 0) {
2918 * EALREADY is not an error, but indicates that the MC has rebooted and
2919 * that the EVQ has already been destroyed.
2922 EFSYS_PROBE1(fail1, efx_rc_t, rc);
2927 __checkReturn efx_rc_t
2929 __in efx_nic_t *enp,
2930 __in uint32_t ndescs,
2931 __in efx_evq_t *eep,
2932 __in uint32_t label,
2933 __in uint32_t instance,
2934 __in efsys_mem_t *esmp,
2935 __in const efx_mcdi_init_rxq_params_t *params)
2937 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
2939 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_RXQ_V5_IN_LEN,
2940 MC_CMD_INIT_RXQ_V5_OUT_LEN);
2941 int npages = efx_rxq_nbufs(enp, ndescs);
2943 efx_qword_t *dma_addr;
2947 boolean_t want_outer_classes;
2948 boolean_t no_cont_ev;
2950 EFSYS_ASSERT3U(ndescs, <=, encp->enc_rxq_max_ndescs);
2952 if ((esmp == NULL) ||
2953 (EFSYS_MEM_SIZE(esmp) < efx_rxq_size(enp, ndescs))) {
2958 no_cont_ev = (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV);
2959 if ((no_cont_ev == B_TRUE) && (params->disable_scatter == B_FALSE)) {
2960 /* TODO: Support scatter in NO_CONT_EV mode */
2965 if (params->ps_buf_size > 0)
2966 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM;
2967 else if (params->es_bufs_per_desc > 0)
2968 dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER;
2970 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;
2972 if (encp->enc_tunnel_encapsulations_supported != 0 &&
2973 !params->want_inner_classes) {
2975 * WANT_OUTER_CLASSES can only be specified on hardware which
2976 * supports tunnel encapsulation offloads, even though it is
2977 * effectively the behaviour the hardware gives.
2979 * Also, on hardware which does support such offloads, older
2980 * firmware rejects the flag if the offloads are not supported
2981 * by the current firmware variant, which means this may fail if
2982 * the capabilities are not updated when the firmware variant
2983 * changes. This is not an issue on newer firmware, as it was
2984 * changed in bug 69842 (v6.4.2.1007) to permit this flag to be
2985 * specified on all firmware variants.
2987 want_outer_classes = B_TRUE;
2989 want_outer_classes = B_FALSE;
2992 req.emr_cmd = MC_CMD_INIT_RXQ;
2993 req.emr_in_buf = payload;
2994 req.emr_in_length = MC_CMD_INIT_RXQ_V5_IN_LEN;
2995 req.emr_out_buf = payload;
2996 req.emr_out_length = MC_CMD_INIT_RXQ_V5_OUT_LEN;
2998 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs);
2999 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, eep->ee_index);
3000 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label);
3001 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance);
3002 MCDI_IN_POPULATE_DWORD_10(req, INIT_RXQ_EXT_IN_FLAGS,
3003 INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0,
3004 INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0,
3005 INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0,
3006 INIT_RXQ_EXT_IN_CRC_MODE, 0,
3007 INIT_RXQ_EXT_IN_FLAG_PREFIX, 1,
3008 INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, params->disable_scatter,
3009 INIT_RXQ_EXT_IN_DMA_MODE,
3011 INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, params->ps_buf_size,
3012 INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES, want_outer_classes,
3013 INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV, no_cont_ev);
3014 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0);
3015 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, enp->en_vport_id);
3017 if (params->es_bufs_per_desc > 0) {
3018 MCDI_IN_SET_DWORD(req,
3019 INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET,
3020 params->es_bufs_per_desc);
3021 MCDI_IN_SET_DWORD(req,
3022 INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, params->es_max_dma_len);
3023 MCDI_IN_SET_DWORD(req,
3024 INIT_RXQ_V3_IN_ES_PACKET_STRIDE, params->es_buf_stride);
3025 MCDI_IN_SET_DWORD(req,
3026 INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT,
3027 params->hol_block_timeout);
3030 if (encp->enc_init_rxq_with_buffer_size)
3031 MCDI_IN_SET_DWORD(req, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES,
3034 MCDI_IN_SET_DWORD(req, INIT_RXQ_V5_IN_RX_PREFIX_ID, params->prefix_id);
3036 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
3037 addr = EFSYS_MEM_ADDR(esmp);
3039 for (i = 0; i < npages; i++) {
3040 EFX_POPULATE_QWORD_2(*dma_addr,
3041 EFX_DWORD_1, (uint32_t)(addr >> 32),
3042 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
3045 addr += EFX_BUF_SIZE;
3048 efx_mcdi_execute(enp, &req);
3050 if (req.emr_rc != 0) {
3062 EFSYS_PROBE1(fail1, efx_rc_t, rc);
3067 __checkReturn efx_rc_t
3069 __in efx_nic_t *enp,
3070 __in uint32_t instance)
3073 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_RXQ_IN_LEN,
3074 MC_CMD_FINI_RXQ_OUT_LEN);
3077 req.emr_cmd = MC_CMD_FINI_RXQ;
3078 req.emr_in_buf = payload;
3079 req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;
3080 req.emr_out_buf = payload;
3081 req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN;
3083 MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance);
3085 efx_mcdi_execute_quiet(enp, &req);
3087 if (req.emr_rc != 0) {
3096 * EALREADY is not an error, but indicates that the MC has rebooted and
3097 * that the RXQ has already been destroyed.
3100 EFSYS_PROBE1(fail1, efx_rc_t, rc);
3105 __checkReturn efx_rc_t
3107 __in efx_nic_t *enp,
3108 __in uint32_t ndescs,
3109 __in uint32_t target_evq,
3110 __in uint32_t label,
3111 __in uint32_t instance,
3112 __in uint16_t flags,
3113 __in efsys_mem_t *esmp)
3116 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_TXQ_EXT_IN_LEN,
3117 MC_CMD_INIT_TXQ_OUT_LEN);
3118 efx_qword_t *dma_addr;
3124 EFSYS_ASSERT(MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM >=
3125 efx_txq_nbufs(enp, enp->en_nic_cfg.enc_txq_max_ndescs));
3127 if ((esmp == NULL) ||
3128 (EFSYS_MEM_SIZE(esmp) < efx_txq_size(enp, ndescs))) {
3133 npages = efx_txq_nbufs(enp, ndescs);
3134 if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) {
3139 req.emr_cmd = MC_CMD_INIT_TXQ;
3140 req.emr_in_buf = payload;
3141 req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
3142 req.emr_out_buf = payload;
3143 req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
3145 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, ndescs);
3146 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
3147 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
3148 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
3150 MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS,
3151 INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
3152 INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
3153 (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
3154 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
3155 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
3156 INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN,
3157 (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0,
3158 INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN,
3159 (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
3160 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
3161 INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
3162 INIT_TXQ_IN_CRC_MODE, 0,
3163 INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
3165 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
3166 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, enp->en_vport_id);
3168 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
3169 addr = EFSYS_MEM_ADDR(esmp);
3171 for (i = 0; i < npages; i++) {
3172 EFX_POPULATE_QWORD_2(*dma_addr,
3173 EFX_DWORD_1, (uint32_t)(addr >> 32),
3174 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
3177 addr += EFX_BUF_SIZE;
3180 efx_mcdi_execute(enp, &req);
3182 if (req.emr_rc != 0) {
3194 EFSYS_PROBE1(fail1, efx_rc_t, rc);
3199 __checkReturn efx_rc_t
3201 __in efx_nic_t *enp,
3202 __in uint32_t instance)
3205 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_TXQ_IN_LEN,
3206 MC_CMD_FINI_TXQ_OUT_LEN);
3209 req.emr_cmd = MC_CMD_FINI_TXQ;
3210 req.emr_in_buf = payload;
3211 req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
3212 req.emr_out_buf = payload;
3213 req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
3215 MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
3217 efx_mcdi_execute_quiet(enp, &req);
3219 if (req.emr_rc != 0) {
3228 * EALREADY is not an error, but indicates that the MC has rebooted and
3229 * that the TXQ has already been destroyed.
3232 EFSYS_PROBE1(fail1, efx_rc_t, rc);
3237 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
3239 #endif /* EFSYS_OPT_MCDI */