1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2012-2018 Solarflare Communications Inc.
11 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
14 static __checkReturn efx_rc_t
18 __in uint32_t target_evq,
20 __in uint32_t instance,
21 __in efsys_mem_t *esmp,
22 __in boolean_t disable_scatter,
23 __in boolean_t want_inner_classes,
24 __in uint32_t ps_bufsize,
25 __in uint32_t es_bufs_per_desc,
26 __in uint32_t es_max_dma_len,
27 __in uint32_t es_buf_stride,
28 __in uint32_t hol_block_timeout)
30 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
32 uint8_t payload[MAX(MC_CMD_INIT_RXQ_V3_IN_LEN,
33 MC_CMD_INIT_RXQ_V3_OUT_LEN)];
34 int npages = EFX_RXQ_NBUFS(ndescs);
36 efx_qword_t *dma_addr;
40 boolean_t want_outer_classes;
42 EFSYS_ASSERT3U(ndescs, <=, EFX_RXQ_MAXNDESCS);
44 if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_RXQ_SIZE(ndescs))) {
50 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM;
51 else if (es_bufs_per_desc > 0)
52 dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER;
54 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;
56 if (encp->enc_tunnel_encapsulations_supported != 0 &&
57 !want_inner_classes) {
59 * WANT_OUTER_CLASSES can only be specified on hardware which
60 * supports tunnel encapsulation offloads, even though it is
61 * effectively the behaviour the hardware gives.
63 * Also, on hardware which does support such offloads, older
64 * firmware rejects the flag if the offloads are not supported
65 * by the current firmware variant, which means this may fail if
66 * the capabilities are not updated when the firmware variant
67 * changes. This is not an issue on newer firmware, as it was
68 * changed in bug 69842 (v6.4.2.1007) to permit this flag to be
69 * specified on all firmware variants.
71 want_outer_classes = B_TRUE;
73 want_outer_classes = B_FALSE;
76 (void) memset(payload, 0, sizeof (payload));
77 req.emr_cmd = MC_CMD_INIT_RXQ;
78 req.emr_in_buf = payload;
79 req.emr_in_length = MC_CMD_INIT_RXQ_V3_IN_LEN;
80 req.emr_out_buf = payload;
81 req.emr_out_length = MC_CMD_INIT_RXQ_V3_OUT_LEN;
83 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs);
84 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, target_evq);
85 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label);
86 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance);
87 MCDI_IN_POPULATE_DWORD_9(req, INIT_RXQ_EXT_IN_FLAGS,
88 INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0,
89 INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0,
90 INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0,
91 INIT_RXQ_EXT_IN_CRC_MODE, 0,
92 INIT_RXQ_EXT_IN_FLAG_PREFIX, 1,
93 INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, disable_scatter,
94 INIT_RXQ_EXT_IN_DMA_MODE,
96 INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_bufsize,
97 INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES, want_outer_classes);
98 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0);
99 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
101 if (es_bufs_per_desc > 0) {
102 MCDI_IN_SET_DWORD(req,
103 INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET,
105 MCDI_IN_SET_DWORD(req,
106 INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, es_max_dma_len);
107 MCDI_IN_SET_DWORD(req,
108 INIT_RXQ_V3_IN_ES_PACKET_STRIDE, es_buf_stride);
109 MCDI_IN_SET_DWORD(req,
110 INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT,
114 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
115 addr = EFSYS_MEM_ADDR(esmp);
117 for (i = 0; i < npages; i++) {
118 EFX_POPULATE_QWORD_2(*dma_addr,
119 EFX_DWORD_1, (uint32_t)(addr >> 32),
120 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
123 addr += EFX_BUF_SIZE;
126 efx_mcdi_execute(enp, &req);
128 if (req.emr_rc != 0) {
138 EFSYS_PROBE1(fail1, efx_rc_t, rc);
143 static __checkReturn efx_rc_t
146 __in uint32_t instance)
149 uint8_t payload[MAX(MC_CMD_FINI_RXQ_IN_LEN,
150 MC_CMD_FINI_RXQ_OUT_LEN)];
153 (void) memset(payload, 0, sizeof (payload));
154 req.emr_cmd = MC_CMD_FINI_RXQ;
155 req.emr_in_buf = payload;
156 req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;
157 req.emr_out_buf = payload;
158 req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN;
160 MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance);
162 efx_mcdi_execute_quiet(enp, &req);
164 if (req.emr_rc != 0) {
173 * EALREADY is not an error, but indicates that the MC has rebooted and
174 * that the RXQ has already been destroyed.
177 EFSYS_PROBE1(fail1, efx_rc_t, rc);
182 #if EFSYS_OPT_RX_SCALE
183 static __checkReturn efx_rc_t
184 efx_mcdi_rss_context_alloc(
186 __in efx_rx_scale_context_type_t type,
187 __in uint32_t num_queues,
188 __out uint32_t *rss_contextp)
191 uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN,
192 MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)];
193 uint32_t rss_context;
194 uint32_t context_type;
197 if (num_queues > EFX_MAXRSS) {
203 case EFX_RX_SCALE_EXCLUSIVE:
204 context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE;
206 case EFX_RX_SCALE_SHARED:
207 context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
214 (void) memset(payload, 0, sizeof (payload));
215 req.emr_cmd = MC_CMD_RSS_CONTEXT_ALLOC;
216 req.emr_in_buf = payload;
217 req.emr_in_length = MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN;
218 req.emr_out_buf = payload;
219 req.emr_out_length = MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN;
221 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
222 EVB_PORT_ID_ASSIGNED);
223 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_TYPE, context_type);
226 * For exclusive contexts, NUM_QUEUES is only used to validate
227 * indirection table offsets.
228 * For shared contexts, the provided context will spread traffic over
229 * NUM_QUEUES many queues.
231 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, num_queues);
233 efx_mcdi_execute(enp, &req);
235 if (req.emr_rc != 0) {
240 if (req.emr_out_length_used < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) {
245 rss_context = MCDI_OUT_DWORD(req, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
246 if (rss_context == EF10_RSS_CONTEXT_INVALID) {
251 *rss_contextp = rss_context;
264 EFSYS_PROBE1(fail1, efx_rc_t, rc);
268 #endif /* EFSYS_OPT_RX_SCALE */
270 #if EFSYS_OPT_RX_SCALE
272 efx_mcdi_rss_context_free(
274 __in uint32_t rss_context)
277 uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_FREE_IN_LEN,
278 MC_CMD_RSS_CONTEXT_FREE_OUT_LEN)];
281 if (rss_context == EF10_RSS_CONTEXT_INVALID) {
286 (void) memset(payload, 0, sizeof (payload));
287 req.emr_cmd = MC_CMD_RSS_CONTEXT_FREE;
288 req.emr_in_buf = payload;
289 req.emr_in_length = MC_CMD_RSS_CONTEXT_FREE_IN_LEN;
290 req.emr_out_buf = payload;
291 req.emr_out_length = MC_CMD_RSS_CONTEXT_FREE_OUT_LEN;
293 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, rss_context);
295 efx_mcdi_execute_quiet(enp, &req);
297 if (req.emr_rc != 0) {
307 EFSYS_PROBE1(fail1, efx_rc_t, rc);
311 #endif /* EFSYS_OPT_RX_SCALE */
313 #if EFSYS_OPT_RX_SCALE
315 efx_mcdi_rss_context_set_flags(
317 __in uint32_t rss_context,
318 __in efx_rx_hash_type_t type)
320 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
321 efx_rx_hash_type_t type_ipv4;
322 efx_rx_hash_type_t type_ipv4_tcp;
323 efx_rx_hash_type_t type_ipv6;
324 efx_rx_hash_type_t type_ipv6_tcp;
325 efx_rx_hash_type_t modes;
327 uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN,
328 MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN)];
331 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_LBN ==
332 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN);
333 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_WIDTH ==
334 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH);
335 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_LBN ==
336 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN);
337 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_WIDTH ==
338 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH);
339 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_LBN ==
340 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN);
341 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_WIDTH ==
342 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH);
343 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_LBN ==
344 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN);
345 EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_WIDTH ==
346 MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH);
348 if (rss_context == EF10_RSS_CONTEXT_INVALID) {
353 (void) memset(payload, 0, sizeof (payload));
354 req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_FLAGS;
355 req.emr_in_buf = payload;
356 req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN;
357 req.emr_out_buf = payload;
358 req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN;
360 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
363 type_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE) | EFX_RX_HASH(IPV4_TCP, 2TUPLE) |
364 EFX_RX_HASH(IPV4_UDP, 2TUPLE);
365 type_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE);
366 type_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE) | EFX_RX_HASH(IPV6_TCP, 2TUPLE) |
367 EFX_RX_HASH(IPV6_UDP, 2TUPLE);
368 type_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE);
371 * Create a copy of the original hash type.
372 * The copy will be used to fill in RSS_MODE bits and
373 * may be cleared beforehand. The original variable
374 * and, thus, EN bits will remain unaffected.
379 * If the firmware lacks support for additional modes, RSS_MODE
380 * fields must contain zeros, otherwise the operation will fail.
382 if (encp->enc_rx_scale_additional_modes_supported == B_FALSE)
385 #define EXTRACT_RSS_MODE(_type, _class) \
386 (EFX_EXTRACT_NATIVE(_type, 0, 31, \
387 EFX_LOW_BIT(EFX_RX_CLASS_##_class), \
388 EFX_HIGH_BIT(EFX_RX_CLASS_##_class)) & \
389 EFX_MASK32(EFX_RX_CLASS_##_class))
391 MCDI_IN_POPULATE_DWORD_10(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS,
392 RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN,
393 ((type & type_ipv4) == type_ipv4) ? 1 : 0,
394 RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN,
395 ((type & type_ipv4_tcp) == type_ipv4_tcp) ? 1 : 0,
396 RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN,
397 ((type & type_ipv6) == type_ipv6) ? 1 : 0,
398 RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN,
399 ((type & type_ipv6_tcp) == type_ipv6_tcp) ? 1 : 0,
400 RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE,
401 EXTRACT_RSS_MODE(modes, IPV4_TCP),
402 RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE,
403 EXTRACT_RSS_MODE(modes, IPV4_UDP),
404 RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE,
405 EXTRACT_RSS_MODE(modes, IPV4),
406 RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE,
407 EXTRACT_RSS_MODE(modes, IPV6_TCP),
408 RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE,
409 EXTRACT_RSS_MODE(modes, IPV6_UDP),
410 RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE,
411 EXTRACT_RSS_MODE(modes, IPV6));
413 #undef EXTRACT_RSS_MODE
415 efx_mcdi_execute(enp, &req);
417 if (req.emr_rc != 0) {
427 EFSYS_PROBE1(fail1, efx_rc_t, rc);
431 #endif /* EFSYS_OPT_RX_SCALE */
433 #if EFSYS_OPT_RX_SCALE
435 efx_mcdi_rss_context_set_key(
437 __in uint32_t rss_context,
438 __in_ecount(n) uint8_t *key,
442 uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN,
443 MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN)];
446 if (rss_context == EF10_RSS_CONTEXT_INVALID) {
451 (void) memset(payload, 0, sizeof (payload));
452 req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_KEY;
453 req.emr_in_buf = payload;
454 req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN;
455 req.emr_out_buf = payload;
456 req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN;
458 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
461 EFSYS_ASSERT3U(n, ==, MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
462 if (n != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN) {
467 memcpy(MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY),
470 efx_mcdi_execute(enp, &req);
472 if (req.emr_rc != 0) {
484 EFSYS_PROBE1(fail1, efx_rc_t, rc);
488 #endif /* EFSYS_OPT_RX_SCALE */
490 #if EFSYS_OPT_RX_SCALE
492 efx_mcdi_rss_context_set_table(
494 __in uint32_t rss_context,
495 __in_ecount(n) unsigned int *table,
499 uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN,
500 MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN)];
504 if (rss_context == EF10_RSS_CONTEXT_INVALID) {
509 (void) memset(payload, 0, sizeof (payload));
510 req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_TABLE;
511 req.emr_in_buf = payload;
512 req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN;
513 req.emr_out_buf = payload;
514 req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN;
516 MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
520 MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE);
523 i < MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN;
525 req_table[i] = (n > 0) ? (uint8_t)table[i % n] : 0;
528 efx_mcdi_execute(enp, &req);
530 if (req.emr_rc != 0) {
540 EFSYS_PROBE1(fail1, efx_rc_t, rc);
544 #endif /* EFSYS_OPT_RX_SCALE */
547 __checkReturn efx_rc_t
551 #if EFSYS_OPT_RX_SCALE
553 if (efx_mcdi_rss_context_alloc(enp, EFX_RX_SCALE_EXCLUSIVE, EFX_MAXRSS,
554 &enp->en_rss_context) == 0) {
556 * Allocated an exclusive RSS context, which allows both the
557 * indirection table and key to be modified.
559 enp->en_rss_context_type = EFX_RX_SCALE_EXCLUSIVE;
560 enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
563 * Failed to allocate an exclusive RSS context. Continue
564 * operation without support for RSS. The pseudo-header in
565 * received packets will not contain a Toeplitz hash value.
567 enp->en_rss_context_type = EFX_RX_SCALE_UNAVAILABLE;
568 enp->en_hash_support = EFX_RX_HASH_UNAVAILABLE;
571 #endif /* EFSYS_OPT_RX_SCALE */
576 #if EFSYS_OPT_RX_SCATTER
577 __checkReturn efx_rc_t
578 ef10_rx_scatter_enable(
580 __in unsigned int buf_size)
582 _NOTE(ARGUNUSED(enp, buf_size))
585 #endif /* EFSYS_OPT_RX_SCATTER */
587 #if EFSYS_OPT_RX_SCALE
588 __checkReturn efx_rc_t
589 ef10_rx_scale_context_alloc(
591 __in efx_rx_scale_context_type_t type,
592 __in uint32_t num_queues,
593 __out uint32_t *rss_contextp)
597 rc = efx_mcdi_rss_context_alloc(enp, type, num_queues, rss_contextp);
604 EFSYS_PROBE1(fail1, efx_rc_t, rc);
607 #endif /* EFSYS_OPT_RX_SCALE */
609 #if EFSYS_OPT_RX_SCALE
610 __checkReturn efx_rc_t
611 ef10_rx_scale_context_free(
613 __in uint32_t rss_context)
617 rc = efx_mcdi_rss_context_free(enp, rss_context);
624 EFSYS_PROBE1(fail1, efx_rc_t, rc);
627 #endif /* EFSYS_OPT_RX_SCALE */
629 #if EFSYS_OPT_RX_SCALE
630 __checkReturn efx_rc_t
631 ef10_rx_scale_mode_set(
633 __in uint32_t rss_context,
634 __in efx_rx_hash_alg_t alg,
635 __in efx_rx_hash_type_t type,
636 __in boolean_t insert)
638 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
641 EFSYS_ASSERT3U(insert, ==, B_TRUE);
643 if ((encp->enc_rx_scale_hash_alg_mask & (1U << alg)) == 0 ||
649 if (rss_context == EFX_RSS_CONTEXT_DEFAULT) {
650 if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) {
654 rss_context = enp->en_rss_context;
657 if ((rc = efx_mcdi_rss_context_set_flags(enp,
658 rss_context, type)) != 0)
668 EFSYS_PROBE1(fail1, efx_rc_t, rc);
672 #endif /* EFSYS_OPT_RX_SCALE */
674 #if EFSYS_OPT_RX_SCALE
675 __checkReturn efx_rc_t
676 ef10_rx_scale_key_set(
678 __in uint32_t rss_context,
679 __in_ecount(n) uint8_t *key,
684 EFX_STATIC_ASSERT(EFX_RSS_KEY_SIZE ==
685 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
687 if (rss_context == EFX_RSS_CONTEXT_DEFAULT) {
688 if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) {
692 rss_context = enp->en_rss_context;
695 if ((rc = efx_mcdi_rss_context_set_key(enp, rss_context, key, n)) != 0)
703 EFSYS_PROBE1(fail1, efx_rc_t, rc);
707 #endif /* EFSYS_OPT_RX_SCALE */
709 #if EFSYS_OPT_RX_SCALE
710 __checkReturn efx_rc_t
711 ef10_rx_scale_tbl_set(
713 __in uint32_t rss_context,
714 __in_ecount(n) unsigned int *table,
720 if (rss_context == EFX_RSS_CONTEXT_DEFAULT) {
721 if (enp->en_rss_context_type == EFX_RX_SCALE_UNAVAILABLE) {
725 rss_context = enp->en_rss_context;
728 if ((rc = efx_mcdi_rss_context_set_table(enp,
729 rss_context, table, n)) != 0)
737 EFSYS_PROBE1(fail1, efx_rc_t, rc);
741 #endif /* EFSYS_OPT_RX_SCALE */
745 * EF10 RX pseudo-header
746 * ---------------------
748 * Receive packets are prefixed by an (optional) 14 byte pseudo-header:
750 * +00: Toeplitz hash value.
751 * (32bit little-endian)
752 * +04: Outer VLAN tag. Zero if the packet did not have an outer VLAN tag.
754 * +06: Inner VLAN tag. Zero if the packet did not have an inner VLAN tag.
756 * +08: Packet Length. Zero if the RX datapath was in cut-through mode.
757 * (16bit little-endian)
758 * +10: MAC timestamp. Zero if timestamping is not enabled.
759 * (32bit little-endian)
761 * See "The RX Pseudo-header" in SF-109306-TC.
764 __checkReturn efx_rc_t
765 ef10_rx_prefix_pktlen(
767 __in uint8_t *buffer,
768 __out uint16_t *lengthp)
770 _NOTE(ARGUNUSED(enp))
773 * The RX pseudo-header contains the packet length, excluding the
774 * pseudo-header. If the hardware receive datapath was operating in
775 * cut-through mode then the length in the RX pseudo-header will be
776 * zero, and the packet length must be obtained from the DMA length
777 * reported in the RX event.
779 *lengthp = buffer[8] | (buffer[9] << 8);
783 #if EFSYS_OPT_RX_SCALE
784 __checkReturn uint32_t
787 __in efx_rx_hash_alg_t func,
788 __in uint8_t *buffer)
790 _NOTE(ARGUNUSED(enp))
793 case EFX_RX_HASHALG_PACKED_STREAM:
794 case EFX_RX_HASHALG_TOEPLITZ:
805 #endif /* EFSYS_OPT_RX_SCALE */
807 #if EFSYS_OPT_RX_PACKED_STREAM
809 * Fake length for RXQ descriptors in packed stream mode
810 * to make hardware happy
812 #define EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE 32
818 __in_ecount(ndescs) efsys_dma_addr_t *addrp,
820 __in unsigned int ndescs,
821 __in unsigned int completed,
822 __in unsigned int added)
829 _NOTE(ARGUNUSED(completed))
831 #if EFSYS_OPT_RX_PACKED_STREAM
833 * Real size of the buffer does not fit into ESF_DZ_RX_KER_BYTE_CNT
834 * and equal to 0 after applying mask. Hardware does not like it.
836 if (erp->er_ev_qstate->eers_rx_packed_stream)
837 size = EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE;
840 /* The client driver must not overfill the queue */
841 EFSYS_ASSERT3U(added - completed + ndescs, <=,
842 EFX_RXQ_LIMIT(erp->er_mask + 1));
844 id = added & (erp->er_mask);
845 for (i = 0; i < ndescs; i++) {
846 EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
847 unsigned int, id, efsys_dma_addr_t, addrp[i],
850 EFX_POPULATE_QWORD_3(qword,
851 ESF_DZ_RX_KER_BYTE_CNT, (uint32_t)(size),
852 ESF_DZ_RX_KER_BUF_ADDR_DW0,
853 (uint32_t)(addrp[i] & 0xffffffff),
854 ESF_DZ_RX_KER_BUF_ADDR_DW1,
855 (uint32_t)(addrp[i] >> 32));
857 offset = id * sizeof (efx_qword_t);
858 EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
860 id = (id + 1) & (erp->er_mask);
867 __in unsigned int added,
868 __inout unsigned int *pushedp)
870 efx_nic_t *enp = erp->er_enp;
871 unsigned int pushed = *pushedp;
875 /* Hardware has alignment restriction for WPTR */
876 wptr = P2ALIGN(added, EF10_RX_WPTR_ALIGN);
882 /* Push the populated descriptors out */
883 wptr &= erp->er_mask;
885 EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, wptr);
887 /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
888 EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
889 wptr, pushed & erp->er_mask);
890 EFSYS_PIO_WRITE_BARRIER();
891 EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
892 erp->er_index, &dword, B_FALSE);
895 #if EFSYS_OPT_RX_PACKED_STREAM
898 ef10_rx_qpush_ps_credits(
901 efx_nic_t *enp = erp->er_enp;
903 efx_evq_rxq_state_t *rxq_state = erp->er_ev_qstate;
906 EFSYS_ASSERT(rxq_state->eers_rx_packed_stream);
908 if (rxq_state->eers_rx_packed_stream_credits == 0)
912 * It is a bug if we think that FW has utilized more
913 * credits than it is allowed to have (maximum). However,
914 * make sure that we do not credit more than maximum anyway.
916 credits = MIN(rxq_state->eers_rx_packed_stream_credits,
917 EFX_RX_PACKED_STREAM_MAX_CREDITS);
918 EFX_POPULATE_DWORD_3(dword,
919 ERF_DZ_RX_DESC_MAGIC_DOORBELL, 1,
920 ERF_DZ_RX_DESC_MAGIC_CMD,
921 ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS,
922 ERF_DZ_RX_DESC_MAGIC_DATA, credits);
923 EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
924 erp->er_index, &dword, B_FALSE);
926 rxq_state->eers_rx_packed_stream_credits = 0;
930 * In accordance with SF-112241-TC the received data has the following layout:
931 * - 8 byte pseudo-header which consist of:
932 * - 4 byte little-endian timestamp
933 * - 2 byte little-endian captured length in bytes
934 * - 2 byte little-endian original packet length in bytes
935 * - captured packet bytes
936 * - optional padding to align to 64 bytes boundary
937 * - 64 bytes scratch space for the host software
939 __checkReturn uint8_t *
940 ef10_rx_qps_packet_info(
942 __in uint8_t *buffer,
943 __in uint32_t buffer_length,
944 __in uint32_t current_offset,
945 __out uint16_t *lengthp,
946 __out uint32_t *next_offsetp,
947 __out uint32_t *timestamp)
952 efx_evq_rxq_state_t *rxq_state = erp->er_ev_qstate;
954 EFSYS_ASSERT(rxq_state->eers_rx_packed_stream);
956 buffer += current_offset;
957 pkt_start = buffer + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE;
959 qwordp = (efx_qword_t *)buffer;
960 *timestamp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_TSTAMP);
961 *lengthp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_ORIG_LEN);
962 buf_len = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_CAP_LEN);
964 buf_len = P2ROUNDUP(buf_len + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE,
965 EFX_RX_PACKED_STREAM_ALIGNMENT);
967 current_offset + buf_len + EFX_RX_PACKED_STREAM_ALIGNMENT;
969 EFSYS_ASSERT3U(*next_offsetp, <=, buffer_length);
970 EFSYS_ASSERT3U(current_offset + *lengthp, <, *next_offsetp);
972 if ((*next_offsetp ^ current_offset) &
973 EFX_RX_PACKED_STREAM_MEM_PER_CREDIT)
974 rxq_state->eers_rx_packed_stream_credits++;
982 __checkReturn efx_rc_t
986 efx_nic_t *enp = erp->er_enp;
989 if ((rc = efx_mcdi_fini_rxq(enp, erp->er_index)) != 0)
996 * EALREADY is not an error, but indicates that the MC has rebooted and
997 * that the RXQ has already been destroyed. Callers need to know that
998 * the RXQ flush has completed to avoid waiting until timeout for a
999 * flush done event that will not be delivered.
1002 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1009 __in efx_rxq_t *erp)
1012 _NOTE(ARGUNUSED(erp))
1016 __checkReturn efx_rc_t
1018 __in efx_nic_t *enp,
1019 __in unsigned int index,
1020 __in unsigned int label,
1021 __in efx_rxq_type_t type,
1022 __in const efx_rxq_type_data_t *type_data,
1023 __in efsys_mem_t *esmp,
1026 __in unsigned int flags,
1027 __in efx_evq_t *eep,
1028 __in efx_rxq_t *erp)
1030 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1032 boolean_t disable_scatter;
1033 boolean_t want_inner_classes;
1034 unsigned int ps_buf_size;
1035 uint32_t es_bufs_per_desc = 0;
1036 uint32_t es_max_dma_len = 0;
1037 uint32_t es_buf_stride = 0;
1038 uint32_t hol_block_timeout = 0;
1040 _NOTE(ARGUNUSED(id, erp, type_data))
1042 EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << ESF_DZ_RX_QLABEL_WIDTH));
1043 EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
1044 EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
1046 EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));
1047 EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));
1049 if (!ISP2(ndescs) ||
1050 (ndescs < EFX_RXQ_MINNDESCS) || (ndescs > EFX_RXQ_MAXNDESCS)) {
1054 if (index >= encp->enc_rxq_limit) {
1060 case EFX_RXQ_TYPE_DEFAULT:
1063 #if EFSYS_OPT_RX_PACKED_STREAM
1064 case EFX_RXQ_TYPE_PACKED_STREAM:
1065 switch (type_data->ertd_packed_stream.eps_buf_size) {
1066 case EFX_RXQ_PACKED_STREAM_BUF_SIZE_1M:
1067 ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M;
1069 case EFX_RXQ_PACKED_STREAM_BUF_SIZE_512K:
1070 ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K;
1072 case EFX_RXQ_PACKED_STREAM_BUF_SIZE_256K:
1073 ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K;
1075 case EFX_RXQ_PACKED_STREAM_BUF_SIZE_128K:
1076 ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K;
1078 case EFX_RXQ_PACKED_STREAM_BUF_SIZE_64K:
1079 ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K;
1086 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
1087 #if EFSYS_OPT_RX_ES_SUPER_BUFFER
1088 case EFX_RXQ_TYPE_ES_SUPER_BUFFER:
1091 type_data->ertd_es_super_buffer.eessb_bufs_per_desc;
1093 type_data->ertd_es_super_buffer.eessb_max_dma_len;
1095 type_data->ertd_es_super_buffer.eessb_buf_stride;
1097 type_data->ertd_es_super_buffer.eessb_hol_block_timeout;
1099 #endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
1105 #if EFSYS_OPT_RX_PACKED_STREAM
1106 if (ps_buf_size != 0) {
1107 /* Check if datapath firmware supports packed stream mode */
1108 if (encp->enc_rx_packed_stream_supported == B_FALSE) {
1112 /* Check if packed stream allows configurable buffer sizes */
1113 if ((ps_buf_size != MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M) &&
1114 (encp->enc_rx_var_packed_stream_supported == B_FALSE)) {
1119 #else /* EFSYS_OPT_RX_PACKED_STREAM */
1120 EFSYS_ASSERT(ps_buf_size == 0);
1121 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
1123 #if EFSYS_OPT_RX_ES_SUPER_BUFFER
1124 if (es_bufs_per_desc > 0) {
1125 if (encp->enc_rx_es_super_buffer_supported == B_FALSE) {
1129 if (!IS_P2ALIGNED(es_max_dma_len,
1130 EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) {
1134 if (!IS_P2ALIGNED(es_buf_stride,
1135 EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) {
1140 #else /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
1141 EFSYS_ASSERT(es_bufs_per_desc == 0);
1142 #endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
1144 /* Scatter can only be disabled if the firmware supports doing so */
1145 if (flags & EFX_RXQ_FLAG_SCATTER)
1146 disable_scatter = B_FALSE;
1148 disable_scatter = encp->enc_rx_disable_scatter_supported;
1150 if (flags & EFX_RXQ_FLAG_INNER_CLASSES)
1151 want_inner_classes = B_TRUE;
1153 want_inner_classes = B_FALSE;
1155 if ((rc = efx_mcdi_init_rxq(enp, ndescs, eep->ee_index, label, index,
1156 esmp, disable_scatter, want_inner_classes,
1157 ps_buf_size, es_bufs_per_desc, es_max_dma_len,
1158 es_buf_stride, hol_block_timeout)) != 0)
1162 erp->er_label = label;
1164 ef10_ev_rxlabel_init(eep, erp, label, type);
1166 erp->er_ev_qstate = &erp->er_eep->ee_rxq_state[label];
1171 EFSYS_PROBE(fail10);
1172 #if EFSYS_OPT_RX_ES_SUPER_BUFFER
1179 #endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
1180 #if EFSYS_OPT_RX_PACKED_STREAM
1185 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
1188 #if EFSYS_OPT_RX_PACKED_STREAM
1191 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
1195 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1202 __in efx_rxq_t *erp)
1204 efx_nic_t *enp = erp->er_enp;
1205 efx_evq_t *eep = erp->er_eep;
1206 unsigned int label = erp->er_label;
1208 ef10_ev_rxlabel_fini(eep, label);
1210 EFSYS_ASSERT(enp->en_rx_qcount != 0);
1211 --enp->en_rx_qcount;
1213 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
1218 __in efx_nic_t *enp)
1220 #if EFSYS_OPT_RX_SCALE
1221 if (enp->en_rss_context_type != EFX_RX_SCALE_UNAVAILABLE)
1222 (void) efx_mcdi_rss_context_free(enp, enp->en_rss_context);
1223 enp->en_rss_context = 0;
1224 enp->en_rss_context_type = EFX_RX_SCALE_UNAVAILABLE;
1226 _NOTE(ARGUNUSED(enp))
1227 #endif /* EFSYS_OPT_RX_SCALE */
1230 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */