return (rc);
}
+#if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
+
+#define INIT_EVQ_MAXNBUFS MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM
+
#if EFX_OPTS_EF10()
+# if (INIT_EVQ_MAXNBUFS < EF10_EVQ_MAXNBUFS)
+# error "INIT_EVQ_MAXNBUFS too small"
+# endif
+#endif /* EFX_OPTS_EF10 */
+#if EFSYS_OPT_RIVERHEAD
+# if (INIT_EVQ_MAXNBUFS < RHEAD_EVQ_MAXNBUFS)
+# error "INIT_EVQ_MAXNBUFS too small"
+# endif
+#endif /* EFSYS_OPT_RIVERHEAD */
__checkReturn efx_rc_t
efx_mcdi_init_evq(
__in uint32_t flags,
__in boolean_t low_latency)
{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
efx_mcdi_req_t req;
EFX_MCDI_DECLARE_BUF(payload,
- MC_CMD_INIT_EVQ_V2_IN_LEN(EF10_EVQ_MAXNBUFS),
+ MC_CMD_INIT_EVQ_V2_IN_LEN(INIT_EVQ_MAXNBUFS),
MC_CMD_INIT_EVQ_V2_OUT_LEN);
+ boolean_t interrupting;
+ int ev_cut_through;
+ int ev_merge;
+ unsigned int evq_type;
efx_qword_t *dma_addr;
uint64_t addr;
int npages;
int i;
- boolean_t interrupting;
- int ev_cut_through;
efx_rc_t rc;
npages = efx_evq_nbufs(enp, nevs);
- if (npages > EF10_EVQ_MAXNBUFS) {
+ if (npages > INIT_EVQ_MAXNBUFS) {
rc = EINVAL;
goto fail1;
}
interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
- /*
- * On Huntington RX and TX event batching can only be requested together
- * (even if the datapath firmware doesn't actually support RX
- * batching). If event cut through is enabled no RX batching will occur.
- *
- * So always enable RX and TX event batching, and enable event cut
- * through if we want low latency operation.
- */
- switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
- case EFX_EVQ_FLAGS_TYPE_AUTO:
- ev_cut_through = low_latency ? 1 : 0;
- break;
- case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
+ if (encp->enc_init_evq_v2_supported) {
+ /*
+ * On Medford the low latency license is required to enable RX
+ * and event cut through and to disable RX batching. If event
+ * queue type in flags is auto, we let the firmware decide the
+ * settings to use. If the adapter has a low latency license,
+ * it will choose the best settings for low latency, otherwise
+ * it will choose the best settings for throughput.
+ */
+ switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
+ case EFX_EVQ_FLAGS_TYPE_AUTO:
+ evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
+ evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
+ evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+ /* EvQ type controls merging, no manual settings */
+ ev_merge = 0;
ev_cut_through = 0;
- break;
- case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
- ev_cut_through = 1;
- break;
- default:
- rc = EINVAL;
- goto fail2;
+ } else {
+ /* EvQ types other than manual are not supported */
+ evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL;
+ /*
+ * On Huntington RX and TX event batching can only be requested
+ * together (even if the datapath firmware doesn't actually
+ * support RX batching). If event cut through is enabled no RX
+ * batching will occur.
+ *
+ * So always enable RX and TX event batching, and enable event
+ * cut through if we want low latency operation.
+ */
+ ev_merge = 1;
+ switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
+ case EFX_EVQ_FLAGS_TYPE_AUTO:
+ ev_cut_through = low_latency ? 1 : 0;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
+ ev_cut_through = 0;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
+ ev_cut_through = 1;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
}
- MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_V2_IN_FLAGS,
+
+ MCDI_IN_POPULATE_DWORD_7(req, INIT_EVQ_V2_IN_FLAGS,
INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
INIT_EVQ_V2_IN_FLAG_CUT_THRU, ev_cut_through,
- INIT_EVQ_V2_IN_FLAG_RX_MERGE, 1,
- INIT_EVQ_V2_IN_FLAG_TX_MERGE, 1);
+ INIT_EVQ_V2_IN_FLAG_RX_MERGE, ev_merge,
+ INIT_EVQ_V2_IN_FLAG_TX_MERGE, ev_merge,
+ INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
/* If the value is zero then disable the timer */
if (us == 0) {
goto fail4;
}
- if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail5;
+ if (encp->enc_init_evq_v2_supported) {
+ if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail5;
+ }
+ EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
+ MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
+ } else {
+ if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail6;
+ }
}
/* NOTE: ignore the returned IRQ param as firmware does not set it. */
return (0);
+fail6:
+ EFSYS_PROBE(fail6);
fail5:
EFSYS_PROBE(fail5);
fail4:
}
__checkReturn efx_rc_t
-efx_mcdi_init_evq_v2(
+efx_mcdi_fini_evq(
__in efx_nic_t *enp,
- __in unsigned int instance,
+ __in uint32_t instance)
+{
+ efx_mcdi_req_t req;
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN,
+ MC_CMD_FINI_EVQ_OUT_LEN);
+ efx_rc_t rc;
+
+ req.emr_cmd = MC_CMD_FINI_EVQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ /*
+ * EALREADY is not an error, but indicates that the MC has rebooted and
+ * that the EVQ has already been destroyed.
+ */
+ if (rc != EALREADY)
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
+
+#if EFX_OPTS_EF10()
+
+ __checkReturn efx_rc_t
+efx_mcdi_init_rxq(
+ __in efx_nic_t *enp,
+ __in uint32_t ndescs,
+ __in efx_evq_t *eep,
+ __in uint32_t label,
+ __in uint32_t instance,
__in efsys_mem_t *esmp,
- __in size_t nevs,
- __in uint32_t irq,
- __in uint32_t us,
- __in uint32_t flags)
+ __in boolean_t disable_scatter,
+ __in boolean_t want_inner_classes,
+ __in uint32_t buf_size,
+ __in uint32_t ps_bufsize,
+ __in uint32_t es_bufs_per_desc,
+ __in uint32_t es_max_dma_len,
+ __in uint32_t es_buf_stride,
+ __in uint32_t hol_block_timeout)
{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
- EFX_MCDI_DECLARE_BUF(payload,
- MC_CMD_INIT_EVQ_V2_IN_LEN(EF10_EVQ_MAXNBUFS),
- MC_CMD_INIT_EVQ_V2_OUT_LEN);
- boolean_t interrupting;
- unsigned int evq_type;
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_RXQ_V4_IN_LEN,
+ MC_CMD_INIT_RXQ_V4_OUT_LEN);
+ int npages = efx_rxq_nbufs(enp, ndescs);
+ int i;
efx_qword_t *dma_addr;
uint64_t addr;
- int npages;
- int i;
efx_rc_t rc;
+ uint32_t dma_mode;
+ boolean_t want_outer_classes;
+ boolean_t no_cont_ev;
- npages = efx_evq_nbufs(enp, nevs);
- if (npages > EF10_EVQ_MAXNBUFS) {
+ EFSYS_ASSERT3U(ndescs, <=, encp->enc_rxq_max_ndescs);
+
+ if ((esmp == NULL) ||
+ (EFSYS_MEM_SIZE(esmp) < efx_rxq_size(enp, ndescs))) {
rc = EINVAL;
goto fail1;
}
- req.emr_cmd = MC_CMD_INIT_EVQ;
+ no_cont_ev = (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV);
+ if ((no_cont_ev == B_TRUE) && (disable_scatter == B_FALSE)) {
+ /* TODO: Support scatter in NO_CONT_EV mode */
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if (ps_bufsize > 0)
+ dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM;
+ else if (es_bufs_per_desc > 0)
+ dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER;
+ else
+ dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;
+
+ if (encp->enc_tunnel_encapsulations_supported != 0 &&
+ !want_inner_classes) {
+ /*
+ * WANT_OUTER_CLASSES can only be specified on hardware which
+ * supports tunnel encapsulation offloads, even though it is
+ * effectively the behaviour the hardware gives.
+ *
+ * Also, on hardware which does support such offloads, older
+ * firmware rejects the flag if the offloads are not supported
+ * by the current firmware variant, which means this may fail if
+ * the capabilities are not updated when the firmware variant
+ * changes. This is not an issue on newer firmware, as it was
+ * changed in bug 69842 (v6.4.2.1007) to permit this flag to be
+ * specified on all firmware variants.
+ */
+ want_outer_classes = B_TRUE;
+ } else {
+ want_outer_classes = B_FALSE;
+ }
+
+ req.emr_cmd = MC_CMD_INIT_RXQ;
req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
+ req.emr_in_length = MC_CMD_INIT_RXQ_V4_IN_LEN;
req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
+ req.emr_out_length = MC_CMD_INIT_RXQ_V4_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, eep->ee_index);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance);
+ MCDI_IN_POPULATE_DWORD_10(req, INIT_RXQ_EXT_IN_FLAGS,
+ INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0,
+ INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0,
+ INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0,
+ INIT_RXQ_EXT_IN_CRC_MODE, 0,
+ INIT_RXQ_EXT_IN_FLAG_PREFIX, 1,
+ INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, disable_scatter,
+ INIT_RXQ_EXT_IN_DMA_MODE,
+ dma_mode,
+ INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_bufsize,
+ INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES, want_outer_classes,
+ INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV, no_cont_ev);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, enp->en_vport_id);
+
+ if (es_bufs_per_desc > 0) {
+ MCDI_IN_SET_DWORD(req,
+ INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET,
+ es_bufs_per_desc);
+ MCDI_IN_SET_DWORD(req,
+ INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, es_max_dma_len);
+ MCDI_IN_SET_DWORD(req,
+ INIT_RXQ_V3_IN_ES_PACKET_STRIDE, es_buf_stride);
+ MCDI_IN_SET_DWORD(req,
+ INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT,
+ hol_block_timeout);
+ }
+
+ if (encp->enc_init_rxq_with_buffer_size)
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES,
+ buf_size);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
- MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
- MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
- MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
- interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
- EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
- switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
- case EFX_EVQ_FLAGS_TYPE_AUTO:
- evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
- break;
- case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
- evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
- break;
- case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
- evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
- break;
- default:
- rc = EINVAL;
- goto fail2;
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
}
- MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
- INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
- INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
- INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
- INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
- /* If the value is zero then disable the timer */
- if (us == 0) {
- MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
- MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
- MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
- MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
- } else {
- unsigned int ticks;
+ return (0);
- if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
- goto fail3;
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
- MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
- MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
- MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
- MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_fini_rxq(
+ __in efx_nic_t *enp,
+ __in uint32_t instance)
+{
+ efx_mcdi_req_t req;
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_RXQ_IN_LEN,
+ MC_CMD_FINI_RXQ_OUT_LEN);
+ efx_rc_t rc;
+
+ req.emr_cmd = MC_CMD_FINI_RXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
}
- MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
- MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
- MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
+ return (0);
- dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
+fail1:
+ /*
+ * EALREADY is not an error, but indicates that the MC has rebooted and
+ * that the RXQ has already been destroyed.
+ */
+ if (rc != EALREADY)
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_init_txq(
+ __in efx_nic_t *enp,
+ __in uint32_t ndescs,
+ __in uint32_t target_evq,
+ __in uint32_t label,
+ __in uint32_t instance,
+ __in uint16_t flags,
+ __in efsys_mem_t *esmp)
+{
+ efx_mcdi_req_t req;
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_TXQ_EXT_IN_LEN,
+ MC_CMD_INIT_TXQ_OUT_LEN);
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ int npages;
+ int i;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM >=
+ efx_txq_nbufs(enp, enp->en_nic_cfg.enc_txq_max_ndescs));
+
+ if ((esmp == NULL) ||
+ (EFSYS_MEM_SIZE(esmp) < efx_txq_size(enp, ndescs))) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ npages = efx_txq_nbufs(enp, ndescs);
+ if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ req.emr_cmd = MC_CMD_INIT_TXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, ndescs);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
+
+ MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS,
+ INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
+ INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
+ INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN,
+ (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0,
+ INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN,
+ (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
+ INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
+ INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
+ INIT_TXQ_IN_CRC_MODE, 0,
+ INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
+
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, enp->en_vport_id);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
addr = EFSYS_MEM_ADDR(esmp);
for (i = 0; i < npages; i++) {
if (req.emr_rc != 0) {
rc = req.emr_rc;
- goto fail4;
- }
-
- if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail5;
+ goto fail3;
}
- /* NOTE: ignore the returned IRQ param as firmware does not set it. */
-
- EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
- MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
-
return (0);
-fail5:
- EFSYS_PROBE(fail5);
-fail4:
- EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
}
__checkReturn efx_rc_t
-efx_mcdi_fini_evq(
+efx_mcdi_fini_txq(
__in efx_nic_t *enp,
__in uint32_t instance)
{
efx_mcdi_req_t req;
- EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN,
- MC_CMD_FINI_EVQ_OUT_LEN);
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_TXQ_IN_LEN,
+ MC_CMD_FINI_TXQ_OUT_LEN);
efx_rc_t rc;
- req.emr_cmd = MC_CMD_FINI_EVQ;
+ req.emr_cmd = MC_CMD_FINI_TXQ;
req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
+ req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
+ req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
- MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
+ MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
efx_mcdi_execute_quiet(enp, &req);
fail1:
/*
* EALREADY is not an error, but indicates that the MC has rebooted and
- * that the EVQ has already been destroyed.
+ * that the TXQ has already been destroyed.
*/
if (rc != EALREADY)
EFSYS_PROBE1(fail1, efx_rc_t, rc);