1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2012-2018 Solarflare Communications Inc.
11 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
14 static __checkReturn efx_rc_t
18 __in uint32_t target_evq,
20 __in uint32_t instance,
21 __in efsys_mem_t *esmp,
22 __in boolean_t disable_scatter,
23 __in boolean_t want_inner_classes,
24 __in uint32_t ps_bufsize,
25 __in uint32_t es_bufs_per_desc,
26 __in uint32_t es_max_dma_len,
27 __in uint32_t es_buf_stride,
28 __in uint32_t hol_block_timeout)
30 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
32 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_RXQ_V3_IN_LEN,
33 MC_CMD_INIT_RXQ_V3_OUT_LEN);
34 int npages = EFX_RXQ_NBUFS(ndescs);
36 efx_qword_t *dma_addr;
40 boolean_t want_outer_classes;
42 EFSYS_ASSERT3U(ndescs, <=, EFX_RXQ_MAXNDESCS);
44 if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_RXQ_SIZE(ndescs))) {
50 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM;
51 else if (es_bufs_per_desc > 0)
52 dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER;
54 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;
56 if (encp->enc_tunnel_encapsulations_supported != 0 &&
57 !want_inner_classes) {
59 * WANT_OUTER_CLASSES can only be specified on hardware which
60 * supports tunnel encapsulation offloads, even though it is
61 * effectively the behaviour the hardware gives.
63 * Also, on hardware which does support such offloads, older
64 * firmware rejects the flag if the offloads are not supported
65 * by the current firmware variant, which means this may fail if
66 * the capabilities are not updated when the firmware variant
67 * changes. This is not an issue on newer firmware, as it was
68 * changed in bug 69842 (v6.4.2.1007) to permit this flag to be
69 * specified on all firmware variants.
71 want_outer_classes = B_TRUE;
73 want_outer_classes = B_FALSE;
76 req.emr_cmd = MC_CMD_INIT_RXQ;
77 req.emr_in_buf = payload;
78 req.emr_in_length = MC_CMD_INIT_RXQ_V3_IN_LEN;
79 req.emr_out_buf = payload;
80 req.emr_out_length = MC_CMD_INIT_RXQ_V3_OUT_LEN;
82 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs);
83 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, target_evq);
84 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label);
85 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance);
86 MCDI_IN_POPULATE_DWORD_9(req, INIT_RXQ_EXT_IN_FLAGS,
87 INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0,
88 INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0,
89 INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0,
90 INIT_RXQ_EXT_IN_CRC_MODE, 0,
91 INIT_RXQ_EXT_IN_FLAG_PREFIX, 1,
92 INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, disable_scatter,
93 INIT_RXQ_EXT_IN_DMA_MODE,
95 INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_bufsize,
96 INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES, want_outer_classes);
97 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0);
98 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
100 if (es_bufs_per_desc > 0) {
101 MCDI_IN_SET_DWORD(req,
102 INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET,
104 MCDI_IN_SET_DWORD(req,
105 INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, es_max_dma_len);
106 MCDI_IN_SET_DWORD(req,
107 INIT_RXQ_V3_IN_ES_PACKET_STRIDE, es_buf_stride);
108 MCDI_IN_SET_DWORD(req,
109 INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT,
113 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
114 addr = EFSYS_MEM_ADDR(esmp);
116 for (i = 0; i < npages; i++) {
117 EFX_POPULATE_QWORD_2(*dma_addr,
118 EFX_DWORD_1, (uint32_t)(addr >> 32),
119 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
122 addr += EFX_BUF_SIZE;
125 efx_mcdi_execute(enp, &req);
127 if (req.emr_rc != 0) {
137 EFSYS_PROBE1(fail1, efx_rc_t, rc);
142 static __checkReturn efx_rc_t
145 __in uint32_t instance)
148 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_RXQ_IN_LEN,
149 MC_CMD_FINI_RXQ_OUT_LEN);
152 req.emr_cmd = MC_CMD_FINI_RXQ;
153 req.emr_in_buf = payload;
154 req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;
155 req.emr_out_buf = payload;
156 req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN;
158 MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance);
160 efx_mcdi_execute_quiet(enp, &req);
162 if (req.emr_rc != 0) {
171 * EALREADY is not an error, but indicates that the MC has rebooted and
172 * that the RXQ has already been destroyed.
175 EFSYS_PROBE1(fail1, efx_rc_t, rc);
180 #if EFSYS_OPT_RX_SCALE
181 static __checkReturn efx_rc_t
182 efx_mcdi_rss_context_alloc(
184 __in efx_rx_scale_context_type_t type,
185 __in uint32_t num_queues,
186 __out uint32_t *rss_contextp)
189 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN,
190 MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
191 uint32_t rss_context;
192 uint32_t context_type;
195 if (num_queues > EFX_MAXRSS) {
201 case EFX_RX_SCALE_EXCLUSIVE:
202 context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE;
204 case EFX_RX_SCALE_SHARED:
205 context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
212 req.emr_cmd = MC_CMD_RSS_CONTEXT_ALLOC;
213 req.emr_in_buf = payload;
214 req.emr_in_length = MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN;
215 req.emr_out_buf = payload;
216 req.emr_out_length = MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN;
218 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
219 EVB_PORT_ID_ASSIGNED);
220 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_TYPE, context_type);
223 * For exclusive contexts, NUM_QUEUES is only used to validate
224 * indirection table offsets.
225 * For shared contexts, the provided context will spread traffic over
226 * NUM_QUEUES many queues.
228 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, num_queues);
230 efx_mcdi_execute(enp, &req);
232 if (req.emr_rc != 0) {
237 if (req.emr_out_length_used < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) {
242 rss_context = MCDI_OUT_DWORD(req, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
243 if (rss_context == EF10_RSS_CONTEXT_INVALID) {
248 *rss_contextp = rss_context;
261 EFSYS_PROBE1(fail1, efx_rc_t, rc);
265 #endif /* EFSYS_OPT_RX_SCALE */
267 #if EFSYS_OPT_RX_SCALE
269 efx_mcdi_rss_context_free(
271 __in uint32_t rss_context)
274 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_FREE_IN_LEN,
275 MC_CMD_RSS_CONTEXT_FREE_OUT_LEN);
278 if (rss_context == EF10_RSS_CONTEXT_INVALID) {
283 req.emr_cmd = MC_CMD_RSS_CONTEXT_FREE;
284 req.emr_in_buf = payload;
285 req.emr_in_length = MC_CMD_RSS_CONTEXT_FREE_IN_LEN;
286 req.emr_out_buf = payload;
287 req.emr_out_length = MC_CMD_RSS_CONTEXT_FREE_OUT_LEN;
289 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, rss_context);
291 efx_mcdi_execute_quiet(enp, &req);
293 if (req.emr_rc != 0) {
303 EFSYS_PROBE1(fail1, efx_rc_t, rc);
307 #endif /* EFSYS_OPT_RX_SCALE */
309 #if EFSYS_OPT_RX_SCALE
311 efx_mcdi_rss_context_set_flags(
313 __in uint32_t rss_context,
314 __in efx_rx_hash_type_t type)
316 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
317 efx_rx_hash_type_t type_ipv4;
318 efx_rx_hash_type_t type_ipv4_tcp;
319 efx_rx_hash_type_t type_ipv6;
320 efx_rx_hash_type_t type_ipv6_tcp;
321 efx_rx_hash_type_t modes;
323 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN,
324 MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN);
327 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_LBN ==
328 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN);
329 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_WIDTH ==
330 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH);
331 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_LBN ==
332 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN);
333 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_WIDTH ==
334 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH);
335 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_LBN ==
336 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN);
337 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_WIDTH ==
338 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH);
339 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_LBN ==
340 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN);
341 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_WIDTH ==
342 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH);
344 if (rss_context == EF10_RSS_CONTEXT_INVALID) {
349 req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_FLAGS;
350 req.emr_in_buf = payload;
351 req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN;
352 req.emr_out_buf = payload;
353 req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN;
355 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
358 type_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE) | EFX_RX_HASH(IPV4_TCP, 2TUPLE) |
359 EFX_RX_HASH(IPV4_UDP, 2TUPLE);
360 type_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE);
361 type_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE) | EFX_RX_HASH(IPV6_TCP, 2TUPLE) |
362 EFX_RX_HASH(IPV6_UDP, 2TUPLE);
363 type_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE);
366 * Create a copy of the original hash type.
367 * The copy will be used to fill in RSS_MODE bits and
368 * may be cleared beforehand. The original variable
369 * and, thus, EN bits will remain unaffected.
374 * If the firmware lacks support for additional modes, RSS_MODE
375 * fields must contain zeros, otherwise the operation will fail.
377 if (encp->enc_rx_scale_additional_modes_supported == B_FALSE)
380 MCDI_IN_POPULATE_DWORD_10(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS,
381 RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN,
382 ((type & type_ipv4) == type_ipv4) ? 1 : 0,
383 RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN,
384 ((type & type_ipv4_tcp) == type_ipv4_tcp) ? 1 : 0,
385 RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN,
386 ((type & type_ipv6) == type_ipv6) ? 1 : 0,
387 RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN,
388 ((type & type_ipv6_tcp) == type_ipv6_tcp) ? 1 : 0,
389 RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE,
390 (modes >> EFX_RX_CLASS_IPV4_TCP_LBN) &
391 EFX_MASK32(EFX_RX_CLASS_IPV4_TCP),
392 RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE,
393 (modes >> EFX_RX_CLASS_IPV4_UDP_LBN) &
394 EFX_MASK32(EFX_RX_CLASS_IPV4_UDP),
395 RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE,
396 (modes >> EFX_RX_CLASS_IPV4_LBN) & EFX_MASK32(EFX_RX_CLASS_IPV4),
397 RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE,
398 (modes >> EFX_RX_CLASS_IPV6_TCP_LBN) &
399 EFX_MASK32(EFX_RX_CLASS_IPV6_TCP),
400 RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE,
401 (modes >> EFX_RX_CLASS_IPV6_UDP_LBN) &
402 EFX_MASK32(EFX_RX_CLASS_IPV6_UDP),
403 RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE,
404 (modes >> EFX_RX_CLASS_IPV6_LBN) & EFX_MASK32(EFX_RX_CLASS_IPV6));
406 efx_mcdi_execute(enp, &req);
408 if (req.emr_rc != 0) {
418 EFSYS_PROBE1(fail1, efx_rc_t, rc);
422 #endif /* EFSYS_OPT_RX_SCALE */
424 #if EFSYS_OPT_RX_SCALE
426 efx_mcdi_rss_context_set_key(
428 __in uint32_t rss_context,
429 __in_ecount(n) uint8_t *key,
433 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN,
434 MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN);
437 if (rss_context == EF10_RSS_CONTEXT_INVALID) {
442 req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_KEY;
443 req.emr_in_buf = payload;
444 req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN;
445 req.emr_out_buf = payload;
446 req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN;
448 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
451 EFSYS_ASSERT3U(n, ==, MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
452 if (n != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN) {
457 memcpy(MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY),
460 efx_mcdi_execute(enp, &req);
462 if (req.emr_rc != 0) {
474 EFSYS_PROBE1(fail1, efx_rc_t, rc);
478 #endif /* EFSYS_OPT_RX_SCALE */
480 #if EFSYS_OPT_RX_SCALE
482 efx_mcdi_rss_context_set_table(
484 __in uint32_t rss_context,
485 __in_ecount(n) unsigned int *table,
489 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN,
490 MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN);
494 if (rss_context == EF10_RSS_CONTEXT_INVALID) {
499 req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_TABLE;
500 req.emr_in_buf = payload;
501 req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN;
502 req.emr_out_buf = payload;
503 req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN;
505 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
509 MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE);
512 i < MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN;
514 req_table[i] = (n > 0) ? (uint8_t)table[i % n] : 0;
517 efx_mcdi_execute(enp, &req);
519 if (req.emr_rc != 0) {
529 EFSYS_PROBE1(fail1, efx_rc_t, rc);
533 #endif /* EFSYS_OPT_RX_SCALE */
536 __checkReturn efx_rc_t
540 #if EFSYS_OPT_RX_SCALE
542 if (efx_mcdi_rss_context_alloc(enp, EFX_RX_SCALE_EXCLUSIVE, EFX_MAXRSS,
543 &enp->en_rss_context) == 0) {
545 * Allocated an exclusive RSS context, which allows both the
546 * indirection table and key to be modified.
548 enp->en_rss_context_type = EFX_RX_SCALE_EXCLUSIVE;
549 enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
552 * Failed to allocate an exclusive RSS context. Continue
553 * operation without support for RSS. The pseudo-header in
554 * received packets will not contain a Toeplitz hash value.
556 enp->en_rss_context_type = EFX_RX_SCALE_UNAVAILABLE;
557 enp->en_hash_support = EFX_RX_HASH_UNAVAILABLE;
560 #endif /* EFSYS_OPT_RX_SCALE */
565 #if EFSYS_OPT_RX_SCATTER
566 __checkReturn efx_rc_t
567 ef10_rx_scatter_enable(
569 __in unsigned int buf_size)
571 _NOTE(ARGUNUSED(enp, buf_size))
574 #endif /* EFSYS_OPT_RX_SCATTER */
576 #if EFSYS_OPT_RX_SCALE
577 __checkReturn efx_rc_t
578 ef10_rx_scale_context_alloc(
580 __in efx_rx_scale_context_type_t type,
581 __in uint32_t num_queues,
582 __out uint32_t *rss_contextp)
586 rc = efx_mcdi_rss_context_alloc(enp, type, num_queues, rss_contextp);
593 EFSYS_PROBE1(fail1, efx_rc_t, rc);
596 #endif /* EFSYS_OPT_RX_SCALE */
598 #if EFSYS_OPT_RX_SCALE
599 __checkReturn efx_rc_t
600 ef10_rx_scale_context_free(
602 __in uint32_t rss_context)
606 rc = efx_mcdi_rss_context_free(enp, rss_context);
613 EFSYS_PROBE1(fail1, efx_rc_t, rc);
616 #endif /* EFSYS_OPT_RX_SCALE */
618 #if EFSYS_OPT_RX_SCALE
619 __checkReturn efx_rc_t
620 ef10_rx_scale_mode_set(
622 __in uint32_t rss_context,
623 __in efx_rx_hash_alg_t alg,
624 __in efx_rx_hash_type_t type,
625 __in boolean_t insert)
627 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
630 EFSYS_ASSERT3U(insert, ==, B_TRUE);
632 if ((encp->enc_rx_scale_hash_alg_mask & (1U << alg)) == 0 ||
638 if (rss_context == EFX_RSS_CONTEXT_DEFAULT) {
639 if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) {
643 rss_context = enp->en_rss_context;
646 if ((rc = efx_mcdi_rss_context_set_flags(enp,
647 rss_context, type)) != 0)
657 EFSYS_PROBE1(fail1, efx_rc_t, rc);
661 #endif /* EFSYS_OPT_RX_SCALE */
663 #if EFSYS_OPT_RX_SCALE
664 __checkReturn efx_rc_t
665 ef10_rx_scale_key_set(
667 __in uint32_t rss_context,
668 __in_ecount(n) uint8_t *key,
673 EFX_STATIC_ASSERT(EFX_RSS_KEY_SIZE ==
674 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
676 if (rss_context == EFX_RSS_CONTEXT_DEFAULT) {
677 if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) {
681 rss_context = enp->en_rss_context;
684 if ((rc = efx_mcdi_rss_context_set_key(enp, rss_context, key, n)) != 0)
692 EFSYS_PROBE1(fail1, efx_rc_t, rc);
696 #endif /* EFSYS_OPT_RX_SCALE */
698 #if EFSYS_OPT_RX_SCALE
699 __checkReturn efx_rc_t
700 ef10_rx_scale_tbl_set(
702 __in uint32_t rss_context,
703 __in_ecount(n) unsigned int *table,
709 if (rss_context == EFX_RSS_CONTEXT_DEFAULT) {
710 if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) {
714 rss_context = enp->en_rss_context;
717 if ((rc = efx_mcdi_rss_context_set_table(enp,
718 rss_context, table, n)) != 0)
726 EFSYS_PROBE1(fail1, efx_rc_t, rc);
730 #endif /* EFSYS_OPT_RX_SCALE */
734 * EF10 RX pseudo-header
735 * ---------------------
737 * Receive packets are prefixed by an (optional) 14 byte pseudo-header:
739 * +00: Toeplitz hash value.
740 * (32bit little-endian)
741 * +04: Outer VLAN tag. Zero if the packet did not have an outer VLAN tag.
743 * +06: Inner VLAN tag. Zero if the packet did not have an inner VLAN tag.
745 * +08: Packet Length. Zero if the RX datapath was in cut-through mode.
746 * (16bit little-endian)
747 * +10: MAC timestamp. Zero if timestamping is not enabled.
748 * (32bit little-endian)
750 * See "The RX Pseudo-header" in SF-109306-TC.
753 __checkReturn efx_rc_t
754 ef10_rx_prefix_pktlen(
756 __in uint8_t *buffer,
757 __out uint16_t *lengthp)
759 _NOTE(ARGUNUSED(enp))
762 * The RX pseudo-header contains the packet length, excluding the
763 * pseudo-header. If the hardware receive datapath was operating in
764 * cut-through mode then the length in the RX pseudo-header will be
765 * zero, and the packet length must be obtained from the DMA length
766 * reported in the RX event.
768 *lengthp = buffer[8] | (buffer[9] << 8);
772 #if EFSYS_OPT_RX_SCALE
773 __checkReturn uint32_t
776 __in efx_rx_hash_alg_t func,
777 __in uint8_t *buffer)
779 _NOTE(ARGUNUSED(enp))
782 case EFX_RX_HASHALG_PACKED_STREAM:
783 case EFX_RX_HASHALG_TOEPLITZ:
794 #endif /* EFSYS_OPT_RX_SCALE */
796 #if EFSYS_OPT_RX_PACKED_STREAM
798 * Fake length for RXQ descriptors in packed stream mode
799 * to make hardware happy
801 #define EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE 32
807 __in_ecount(ndescs) efsys_dma_addr_t *addrp,
809 __in unsigned int ndescs,
810 __in unsigned int completed,
811 __in unsigned int added)
818 _NOTE(ARGUNUSED(completed))
820 #if EFSYS_OPT_RX_PACKED_STREAM
822 * Real size of the buffer does not fit into ESF_DZ_RX_KER_BYTE_CNT
823 * and equal to 0 after applying mask. Hardware does not like it.
825 if (erp->er_ev_qstate->eers_rx_packed_stream)
826 size = EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE;
829 /* The client driver must not overfill the queue */
830 EFSYS_ASSERT3U(added - completed + ndescs, <=,
831 EFX_RXQ_LIMIT(erp->er_mask + 1));
833 id = added & (erp->er_mask);
834 for (i = 0; i < ndescs; i++) {
835 EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
836 unsigned int, id, efsys_dma_addr_t, addrp[i],
839 EFX_POPULATE_QWORD_3(qword,
840 ESF_DZ_RX_KER_BYTE_CNT, (uint32_t)(size),
841 ESF_DZ_RX_KER_BUF_ADDR_DW0,
842 (uint32_t)(addrp[i] & 0xffffffff),
843 ESF_DZ_RX_KER_BUF_ADDR_DW1,
844 (uint32_t)(addrp[i] >> 32));
846 offset = id * sizeof (efx_qword_t);
847 EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
849 id = (id + 1) & (erp->er_mask);
856 __in unsigned int added,
857 __inout unsigned int *pushedp)
859 efx_nic_t *enp = erp->er_enp;
860 unsigned int pushed = *pushedp;
864 /* Hardware has alignment restriction for WPTR */
865 wptr = P2ALIGN(added, EF10_RX_WPTR_ALIGN);
871 /* Push the populated descriptors out */
872 wptr &= erp->er_mask;
874 EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, wptr);
876 /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
877 EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
878 wptr, pushed & erp->er_mask);
879 EFSYS_PIO_WRITE_BARRIER();
880 EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
881 erp->er_index, &dword, B_FALSE);
884 #if EFSYS_OPT_RX_PACKED_STREAM
887 ef10_rx_qpush_ps_credits(
890 efx_nic_t *enp = erp->er_enp;
892 efx_evq_rxq_state_t *rxq_state = erp->er_ev_qstate;
895 EFSYS_ASSERT(rxq_state->eers_rx_packed_stream);
897 if (rxq_state->eers_rx_packed_stream_credits == 0)
901 * It is a bug if we think that FW has utilized more
902 * credits than it is allowed to have (maximum). However,
903 * make sure that we do not credit more than maximum anyway.
905 credits = MIN(rxq_state->eers_rx_packed_stream_credits,
906 EFX_RX_PACKED_STREAM_MAX_CREDITS);
907 EFX_POPULATE_DWORD_3(dword,
908 ERF_DZ_RX_DESC_MAGIC_DOORBELL, 1,
909 ERF_DZ_RX_DESC_MAGIC_CMD,
910 ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS,
911 ERF_DZ_RX_DESC_MAGIC_DATA, credits);
912 EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
913 erp->er_index, &dword, B_FALSE);
915 rxq_state->eers_rx_packed_stream_credits = 0;
919 * In accordance with SF-112241-TC the received data has the following layout:
920 * - 8 byte pseudo-header which consist of:
921 * - 4 byte little-endian timestamp
922 * - 2 byte little-endian captured length in bytes
923 * - 2 byte little-endian original packet length in bytes
924 * - captured packet bytes
925 * - optional padding to align to 64 bytes boundary
926 * - 64 bytes scratch space for the host software
928 __checkReturn uint8_t *
929 ef10_rx_qps_packet_info(
931 __in uint8_t *buffer,
932 __in uint32_t buffer_length,
933 __in uint32_t current_offset,
934 __out uint16_t *lengthp,
935 __out uint32_t *next_offsetp,
936 __out uint32_t *timestamp)
941 efx_evq_rxq_state_t *rxq_state = erp->er_ev_qstate;
943 EFSYS_ASSERT(rxq_state->eers_rx_packed_stream);
945 buffer += current_offset;
946 pkt_start = buffer + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE;
948 qwordp = (efx_qword_t *)buffer;
949 *timestamp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_TSTAMP);
950 *lengthp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_ORIG_LEN);
951 buf_len = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_CAP_LEN);
953 buf_len = P2ROUNDUP(buf_len + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE,
954 EFX_RX_PACKED_STREAM_ALIGNMENT);
956 current_offset + buf_len + EFX_RX_PACKED_STREAM_ALIGNMENT;
958 EFSYS_ASSERT3U(*next_offsetp, <=, buffer_length);
959 EFSYS_ASSERT3U(current_offset + *lengthp, <, *next_offsetp);
961 if ((*next_offsetp ^ current_offset) &
962 EFX_RX_PACKED_STREAM_MEM_PER_CREDIT)
963 rxq_state->eers_rx_packed_stream_credits++;
971 __checkReturn efx_rc_t
975 efx_nic_t *enp = erp->er_enp;
978 if ((rc = efx_mcdi_fini_rxq(enp, erp->er_index)) != 0)
985 * EALREADY is not an error, but indicates that the MC has rebooted and
986 * that the RXQ has already been destroyed. Callers need to know that
987 * the RXQ flush has completed to avoid waiting until timeout for a
988 * flush done event that will not be delivered.
991 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1001 _NOTE(ARGUNUSED(erp))
1005 __checkReturn efx_rc_t
1007 __in efx_nic_t *enp,
1008 __in unsigned int index,
1009 __in unsigned int label,
1010 __in efx_rxq_type_t type,
1011 __in const efx_rxq_type_data_t *type_data,
1012 __in efsys_mem_t *esmp,
1015 __in unsigned int flags,
1016 __in efx_evq_t *eep,
1017 __in efx_rxq_t *erp)
1019 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1021 boolean_t disable_scatter;
1022 boolean_t want_inner_classes;
1023 unsigned int ps_buf_size;
1024 uint32_t es_bufs_per_desc = 0;
1025 uint32_t es_max_dma_len = 0;
1026 uint32_t es_buf_stride = 0;
1027 uint32_t hol_block_timeout = 0;
1029 _NOTE(ARGUNUSED(id, erp, type_data))
1031 EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << ESF_DZ_RX_QLABEL_WIDTH));
1032 EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
1033 EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
1035 EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));
1036 EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));
1038 if (!ISP2(ndescs) ||
1039 (ndescs < EFX_RXQ_MINNDESCS) || (ndescs > EFX_RXQ_MAXNDESCS)) {
1043 if (index >= encp->enc_rxq_limit) {
1049 case EFX_RXQ_TYPE_DEFAULT:
1052 #if EFSYS_OPT_RX_PACKED_STREAM
1053 case EFX_RXQ_TYPE_PACKED_STREAM:
1054 switch (type_data->ertd_packed_stream.eps_buf_size) {
1055 case EFX_RXQ_PACKED_STREAM_BUF_SIZE_1M:
1056 ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M;
1058 case EFX_RXQ_PACKED_STREAM_BUF_SIZE_512K:
1059 ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K;
1061 case EFX_RXQ_PACKED_STREAM_BUF_SIZE_256K:
1062 ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K;
1064 case EFX_RXQ_PACKED_STREAM_BUF_SIZE_128K:
1065 ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K;
1067 case EFX_RXQ_PACKED_STREAM_BUF_SIZE_64K:
1068 ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K;
1075 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
1076 #if EFSYS_OPT_RX_ES_SUPER_BUFFER
1077 case EFX_RXQ_TYPE_ES_SUPER_BUFFER:
1080 type_data->ertd_es_super_buffer.eessb_bufs_per_desc;
1082 type_data->ertd_es_super_buffer.eessb_max_dma_len;
1084 type_data->ertd_es_super_buffer.eessb_buf_stride;
1086 type_data->ertd_es_super_buffer.eessb_hol_block_timeout;
1088 #endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
1094 #if EFSYS_OPT_RX_PACKED_STREAM
1095 if (ps_buf_size != 0) {
1096 /* Check if datapath firmware supports packed stream mode */
1097 if (encp->enc_rx_packed_stream_supported == B_FALSE) {
1101 /* Check if packed stream allows configurable buffer sizes */
1102 if ((ps_buf_size != MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M) &&
1103 (encp->enc_rx_var_packed_stream_supported == B_FALSE)) {
1108 #else /* EFSYS_OPT_RX_PACKED_STREAM */
1109 EFSYS_ASSERT(ps_buf_size == 0);
1110 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
1112 #if EFSYS_OPT_RX_ES_SUPER_BUFFER
1113 if (es_bufs_per_desc > 0) {
1114 if (encp->enc_rx_es_super_buffer_supported == B_FALSE) {
1118 if (!IS_P2ALIGNED(es_max_dma_len,
1119 EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) {
1123 if (!IS_P2ALIGNED(es_buf_stride,
1124 EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) {
1129 #else /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
1130 EFSYS_ASSERT(es_bufs_per_desc == 0);
1131 #endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
1133 /* Scatter can only be disabled if the firmware supports doing so */
1134 if (flags & EFX_RXQ_FLAG_SCATTER)
1135 disable_scatter = B_FALSE;
1137 disable_scatter = encp->enc_rx_disable_scatter_supported;
1139 if (flags & EFX_RXQ_FLAG_INNER_CLASSES)
1140 want_inner_classes = B_TRUE;
1142 want_inner_classes = B_FALSE;
1144 if ((rc = efx_mcdi_init_rxq(enp, ndescs, eep->ee_index, label, index,
1145 esmp, disable_scatter, want_inner_classes,
1146 ps_buf_size, es_bufs_per_desc, es_max_dma_len,
1147 es_buf_stride, hol_block_timeout)) != 0)
1151 erp->er_label = label;
1153 ef10_ev_rxlabel_init(eep, erp, label, type);
1155 erp->er_ev_qstate = &erp->er_eep->ee_rxq_state[label];
1160 EFSYS_PROBE(fail10);
1161 #if EFSYS_OPT_RX_ES_SUPER_BUFFER
1168 #endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
1169 #if EFSYS_OPT_RX_PACKED_STREAM
1174 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
1177 #if EFSYS_OPT_RX_PACKED_STREAM
1180 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
1184 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1191 __in efx_rxq_t *erp)
1193 efx_nic_t *enp = erp->er_enp;
1194 efx_evq_t *eep = erp->er_eep;
1195 unsigned int label = erp->er_label;
1197 ef10_ev_rxlabel_fini(eep, label);
1199 EFSYS_ASSERT(enp->en_rx_qcount != 0);
1200 --enp->en_rx_qcount;
1202 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
1207 __in efx_nic_t *enp)
1209 #if EFSYS_OPT_RX_SCALE
1210 if (enp->en_rss_context_type != EFX_RX_SCALE_UNAVAILABLE)
1211 (void) efx_mcdi_rss_context_free(enp, enp->en_rss_context);
1212 enp->en_rss_context = 0;
1213 enp->en_rss_context_type = EFX_RX_SCALE_UNAVAILABLE;
1215 _NOTE(ARGUNUSED(enp))
1216 #endif /* EFSYS_OPT_RX_SCALE */
1219 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */