1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2018-2019 Solarflare Communications Inc.
11 #if EFSYS_OPT_RIVERHEAD
13 __checkReturn efx_rc_t
17 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
22 if ((rc = efx_mcdi_nic_board_cfg(enp)) != 0)
25 encp->enc_clk_mult = 1; /* not used for Riverhead */
28 * FIXME There are TxSend and TxSeg descriptors on Riverhead.
29 * TxSeg is bigger than TxSend.
31 encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_GZ_TX_SEND_LEN);
32 /* No boundary crossing limits */
33 encp->enc_tx_dma_desc_boundary = 0;
36 * Maximum number of bytes into the frame the TCP header can start for
37 * firmware assisted TSO to work.
38 * FIXME Get from design parameter DP_TSO_MAX_HDR_LEN.
40 encp->enc_tx_tso_tcp_header_offset_limit = 0;
43 * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
44 * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
45 * resources (allocated to this PCIe function), which is zero until
46 * after we have allocated VIs.
48 encp->enc_evq_limit = 1024;
49 encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
50 encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
52 encp->enc_buftbl_limit = UINT32_MAX;
55 * Enable firmware workarounds for hardware errata.
56 * Expected responses are:
58 * Success: workaround enabled or disabled as requested.
59 * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
60 * Firmware does not support the MC_CMD_WORKAROUND request.
61 * (assume that the workaround is not supported).
62 * - MC_CMD_ERR_ENOENT (reported as ENOENT):
63 * Firmware does not support the requested workaround.
64 * - MC_CMD_ERR_EPERM (reported as EACCES):
65 * Unprivileged function cannot enable/disable workarounds.
67 * See efx_mcdi_request_errcode() for MCDI error translations.
71 * Replay engine on Riverhead should suppress duplicate packets
72 * (e.g. because of exact multicast and all-multicast filters
73 * match) to the same RxQ.
75 encp->enc_bug26807_workaround = B_FALSE;
78 * Checksums for TSO sends should always be correct on Riverhead.
79 * FIXME: revisit when TSO support is implemented.
81 encp->enc_bug61297_workaround = B_FALSE;
83 encp->enc_evq_max_nevs = RHEAD_EVQ_MAXNEVS;
84 encp->enc_evq_min_nevs = RHEAD_EVQ_MINNEVS;
85 encp->enc_rxq_max_ndescs = RHEAD_RXQ_MAXNDESCS;
86 encp->enc_rxq_min_ndescs = RHEAD_RXQ_MINNDESCS;
87 encp->enc_txq_max_ndescs = RHEAD_TXQ_MAXNDESCS;
88 encp->enc_txq_min_ndescs = RHEAD_TXQ_MINNDESCS;
90 /* Riverhead FW does not support event queue timers yet. */
91 encp->enc_evq_timer_quantum_ns = 0;
92 encp->enc_evq_timer_max_us = 0;
94 encp->enc_ev_desc_size = RHEAD_EVQ_DESC_SIZE;
95 encp->enc_rx_desc_size = RHEAD_RXQ_DESC_SIZE;
96 encp->enc_tx_desc_size = RHEAD_TXQ_DESC_SIZE;
98 /* No required alignment for WPTR updates */
99 encp->enc_rx_push_align = 1;
101 /* Riverhead supports a single Rx prefix size. */
102 encp->enc_rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN;
104 /* Alignment for receive packet DMA buffers. */
105 encp->enc_rx_buf_align_start = 1;
107 /* Get the RX DMA end padding alignment configuration. */
108 if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) {
112 /* Assume largest tail padding size supported by hardware. */
115 encp->enc_rx_buf_align_end = end_padding;
118 * Riverhead stores a single global copy of VPD, not per-PF as on
121 encp->enc_vpd_is_global = B_TRUE;
123 rc = ef10_nic_get_port_mode_bandwidth(enp, &bandwidth);
126 encp->enc_required_pcie_bandwidth_mbps = bandwidth;
127 encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
136 EFSYS_PROBE1(fail1, efx_rc_t, rc);
141 __checkReturn efx_rc_t
145 const efx_nic_ops_t *enop = enp->en_enop;
146 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
147 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
150 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp));
152 /* Read and clear any assertion state */
153 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
156 /* Exit the assertion handler */
157 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
161 if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
164 /* Get remaining controller-specific board config */
165 if ((rc = enop->eno_board_cfg(enp)) != 0)
169 * Set default driver config limits (based on board config).
171 * FIXME: For now allocate a fixed number of VIs which is likely to be
172 * sufficient and small enough to allow multiple functions on the same
175 edcp->edc_min_vi_count = edcp->edc_max_vi_count =
176 MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
179 * The client driver must configure and enable PIO buffer support,
180 * but there is no PIO support on Riverhead anyway.
182 edcp->edc_max_piobuf_count = 0;
183 edcp->edc_pio_alloc_size = 0;
185 #if EFSYS_OPT_MAC_STATS
186 /* Wipe the MAC statistics */
187 if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
191 #if EFSYS_OPT_LOOPBACK
192 if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
198 #if EFSYS_OPT_LOOPBACK
202 #if EFSYS_OPT_MAC_STATS
213 EFSYS_PROBE1(fail1, efx_rc_t, rc);
218 __checkReturn efx_rc_t
219 rhead_nic_set_drv_limits(
220 __inout efx_nic_t *enp,
221 __in efx_drv_limits_t *edlp)
223 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
224 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
225 uint32_t min_evq_count, max_evq_count;
226 uint32_t min_rxq_count, max_rxq_count;
227 uint32_t min_txq_count, max_txq_count;
235 /* Get minimum required and maximum usable VI limits */
236 min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
237 min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
238 min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
240 edcp->edc_min_vi_count =
241 MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
243 max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
244 max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
245 max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
247 edcp->edc_max_vi_count =
248 MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
250 /* There is no PIO support on Riverhead */
251 edcp->edc_max_piobuf_count = 0;
252 edcp->edc_pio_alloc_size = 0;
257 EFSYS_PROBE1(fail1, efx_rc_t, rc);
262 __checkReturn efx_rc_t
268 /* ef10_nic_reset() is called to recover from BADASSERT failures. */
269 if ((rc = efx_mcdi_read_assertion(enp)) != 0)
271 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
274 if ((rc = efx_mcdi_entity_reset(enp)) != 0)
277 /* Clear RX/TX DMA queue errors */
278 enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
287 EFSYS_PROBE1(fail1, efx_rc_t, rc);
292 __checkReturn efx_rc_t
296 const efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
297 uint32_t min_vi_count, max_vi_count;
298 uint32_t vi_count, vi_base, vi_shift;
299 uint32_t vi_window_size;
302 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp));
303 EFSYS_ASSERT3U(edcp->edc_max_piobuf_count, ==, 0);
305 /* Enable reporting of some events (e.g. link change) */
306 if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
309 min_vi_count = edcp->edc_min_vi_count;
310 max_vi_count = edcp->edc_max_vi_count;
312 /* Ensure that the previously attached driver's VIs are freed */
313 if ((rc = efx_mcdi_free_vis(enp)) != 0)
317 * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
318 * fails then retrying the request for fewer VI resources may succeed.
321 if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
322 &vi_base, &vi_count, &vi_shift)) != 0)
325 EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
327 if (vi_count < min_vi_count) {
332 enp->en_arch.ef10.ena_vi_base = vi_base;
333 enp->en_arch.ef10.ena_vi_count = vi_count;
334 enp->en_arch.ef10.ena_vi_shift = vi_shift;
336 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=,
337 EFX_VI_WINDOW_SHIFT_INVALID);
338 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=,
339 EFX_VI_WINDOW_SHIFT_64K);
340 vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift;
342 /* Save UC memory mapping details */
343 enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
344 enp->en_arch.ef10.ena_uc_mem_map_size =
345 vi_window_size * enp->en_arch.ef10.ena_vi_count;
347 /* No WC memory mapping since PIO is not supported */
348 enp->en_arch.ef10.ena_pio_write_vi_base = 0;
349 enp->en_arch.ef10.ena_wc_mem_map_offset = 0;
350 enp->en_arch.ef10.ena_wc_mem_map_size = 0;
352 enp->en_vport_id = EVB_PORT_ID_NULL;
354 enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
361 (void) efx_mcdi_free_vis(enp);
368 EFSYS_PROBE1(fail1, efx_rc_t, rc);
373 __checkReturn efx_rc_t
374 rhead_nic_get_vi_pool(
376 __out uint32_t *vi_countp)
379 * Report VIs that the client driver can use.
380 * Do not include VIs used for PIO buffer writes.
382 *vi_countp = enp->en_arch.ef10.ena_vi_count;
387 __checkReturn efx_rc_t
388 rhead_nic_get_bar_region(
390 __in efx_nic_region_t region,
391 __out uint32_t *offsetp,
396 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp));
399 * TODO: Specify host memory mapping alignment and granularity
400 * in efx_drv_limits_t so that they can be taken into account
401 * when allocating extra VIs for PIO writes.
405 /* UC mapped memory BAR region for VI registers */
406 *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
407 *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
410 case EFX_REGION_PIO_WRITE_VI:
411 /* WC mapped memory BAR region for piobuf writes */
412 *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
413 *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
424 EFSYS_PROBE1(fail1, efx_rc_t, rc);
429 __checkReturn boolean_t
430 rhead_nic_hw_unavailable(
435 if (enp->en_reset_flags & EFX_RESET_HW_UNAVAIL)
438 EFX_BAR_READD(enp, ER_GZ_MC_SFT_STATUS, &dword, B_FALSE);
439 if (EFX_DWORD_FIELD(dword, EFX_DWORD_0) == 0xffffffff)
445 rhead_nic_set_hw_unavailable(enp);
451 rhead_nic_set_hw_unavailable(
454 EFSYS_PROBE(hw_unavail);
455 enp->en_reset_flags |= EFX_RESET_HW_UNAVAIL;
462 (void) efx_mcdi_free_vis(enp);
463 enp->en_arch.ef10.ena_vi_count = 0;
470 (void) efx_mcdi_drv_attach(enp, B_FALSE);
475 __checkReturn efx_rc_t
476 rhead_nic_register_test(
482 _NOTE(ARGUNUSED(enp))
483 _NOTE(CONSTANTCONDITION)
493 EFSYS_PROBE1(fail1, efx_rc_t, rc);
498 #endif /* EFSYS_OPT_DIAG */
500 #endif /* EFSYS_OPT_RIVERHEAD */