1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2007-2019 Solarflare Communications Inc.
13 static __checkReturn efx_rc_t
21 #if EFSYS_OPT_RX_SCATTER
22 static __checkReturn efx_rc_t
23 siena_rx_scatter_enable(
25 __in unsigned int buf_size);
26 #endif /* EFSYS_OPT_RX_SCATTER */
28 #if EFSYS_OPT_RX_SCALE
29 static __checkReturn efx_rc_t
30 siena_rx_scale_mode_set(
32 __in uint32_t rss_context,
33 __in efx_rx_hash_alg_t alg,
34 __in efx_rx_hash_type_t type,
35 __in boolean_t insert);
37 static __checkReturn efx_rc_t
38 siena_rx_scale_key_set(
40 __in uint32_t rss_context,
41 __in_ecount(n) uint8_t *key,
44 static __checkReturn efx_rc_t
45 siena_rx_scale_tbl_set(
47 __in uint32_t rss_context,
48 __in_ecount(n) unsigned int *table,
51 static __checkReturn uint32_t
54 __in efx_rx_hash_alg_t func,
55 __in uint8_t *buffer);
57 #endif /* EFSYS_OPT_RX_SCALE */
59 static __checkReturn efx_rc_t
60 siena_rx_prefix_pktlen(
63 __out uint16_t *lengthp);
68 __in_ecount(ndescs) efsys_dma_addr_t *addrp,
70 __in unsigned int ndescs,
71 __in unsigned int completed,
72 __in unsigned int added);
77 __in unsigned int added,
78 __inout unsigned int *pushedp);
80 #if EFSYS_OPT_RX_PACKED_STREAM
82 siena_rx_qpush_ps_credits(
85 static __checkReturn uint8_t *
86 siena_rx_qps_packet_info(
89 __in uint32_t buffer_length,
90 __in uint32_t current_offset,
91 __out uint16_t *lengthp,
92 __out uint32_t *next_offsetp,
93 __out uint32_t *timestamp);
96 static __checkReturn efx_rc_t
102 __in efx_rxq_t *erp);
104 static __checkReturn efx_rc_t
107 __in unsigned int index,
108 __in unsigned int label,
109 __in efx_rxq_type_t type,
110 __in_opt const efx_rxq_type_data_t *type_data,
111 __in efsys_mem_t *esmp,
114 __in unsigned int flags,
116 __in efx_rxq_t *erp);
120 __in efx_rxq_t *erp);
122 #endif /* EFSYS_OPT_SIENA */
126 static const efx_rx_ops_t __efx_rx_siena_ops = {
127 siena_rx_init, /* erxo_init */
128 siena_rx_fini, /* erxo_fini */
129 #if EFSYS_OPT_RX_SCATTER
130 siena_rx_scatter_enable, /* erxo_scatter_enable */
132 #if EFSYS_OPT_RX_SCALE
133 NULL, /* erxo_scale_context_alloc */
134 NULL, /* erxo_scale_context_free */
135 siena_rx_scale_mode_set, /* erxo_scale_mode_set */
136 siena_rx_scale_key_set, /* erxo_scale_key_set */
137 siena_rx_scale_tbl_set, /* erxo_scale_tbl_set */
138 siena_rx_prefix_hash, /* erxo_prefix_hash */
140 siena_rx_prefix_pktlen, /* erxo_prefix_pktlen */
141 siena_rx_qpost, /* erxo_qpost */
142 siena_rx_qpush, /* erxo_qpush */
143 #if EFSYS_OPT_RX_PACKED_STREAM
144 siena_rx_qpush_ps_credits, /* erxo_qpush_ps_credits */
145 siena_rx_qps_packet_info, /* erxo_qps_packet_info */
147 siena_rx_qflush, /* erxo_qflush */
148 siena_rx_qenable, /* erxo_qenable */
149 siena_rx_qcreate, /* erxo_qcreate */
150 siena_rx_qdestroy, /* erxo_qdestroy */
152 #endif /* EFSYS_OPT_SIENA */
155 static const efx_rx_ops_t __efx_rx_ef10_ops = {
156 ef10_rx_init, /* erxo_init */
157 ef10_rx_fini, /* erxo_fini */
158 #if EFSYS_OPT_RX_SCATTER
159 ef10_rx_scatter_enable, /* erxo_scatter_enable */
161 #if EFSYS_OPT_RX_SCALE
162 ef10_rx_scale_context_alloc, /* erxo_scale_context_alloc */
163 ef10_rx_scale_context_free, /* erxo_scale_context_free */
164 ef10_rx_scale_mode_set, /* erxo_scale_mode_set */
165 ef10_rx_scale_key_set, /* erxo_scale_key_set */
166 ef10_rx_scale_tbl_set, /* erxo_scale_tbl_set */
167 ef10_rx_prefix_hash, /* erxo_prefix_hash */
169 ef10_rx_prefix_pktlen, /* erxo_prefix_pktlen */
170 ef10_rx_qpost, /* erxo_qpost */
171 ef10_rx_qpush, /* erxo_qpush */
172 #if EFSYS_OPT_RX_PACKED_STREAM
173 ef10_rx_qpush_ps_credits, /* erxo_qpush_ps_credits */
174 ef10_rx_qps_packet_info, /* erxo_qps_packet_info */
176 ef10_rx_qflush, /* erxo_qflush */
177 ef10_rx_qenable, /* erxo_qenable */
178 ef10_rx_qcreate, /* erxo_qcreate */
179 ef10_rx_qdestroy, /* erxo_qdestroy */
181 #endif /* EFX_OPTS_EF10() */
184 __checkReturn efx_rc_t
186 __inout efx_nic_t *enp)
188 const efx_rx_ops_t *erxop;
191 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
192 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
194 if (!(enp->en_mod_flags & EFX_MOD_EV)) {
199 if (enp->en_mod_flags & EFX_MOD_RX) {
204 switch (enp->en_family) {
206 case EFX_FAMILY_SIENA:
207 erxop = &__efx_rx_siena_ops;
209 #endif /* EFSYS_OPT_SIENA */
211 #if EFSYS_OPT_HUNTINGTON
212 case EFX_FAMILY_HUNTINGTON:
213 erxop = &__efx_rx_ef10_ops;
215 #endif /* EFSYS_OPT_HUNTINGTON */
217 #if EFSYS_OPT_MEDFORD
218 case EFX_FAMILY_MEDFORD:
219 erxop = &__efx_rx_ef10_ops;
221 #endif /* EFSYS_OPT_MEDFORD */
223 #if EFSYS_OPT_MEDFORD2
224 case EFX_FAMILY_MEDFORD2:
225 erxop = &__efx_rx_ef10_ops;
227 #endif /* EFSYS_OPT_MEDFORD2 */
235 if ((rc = erxop->erxo_init(enp)) != 0)
238 enp->en_erxop = erxop;
239 enp->en_mod_flags |= EFX_MOD_RX;
249 EFSYS_PROBE1(fail1, efx_rc_t, rc);
251 enp->en_erxop = NULL;
252 enp->en_mod_flags &= ~EFX_MOD_RX;
260 const efx_rx_ops_t *erxop = enp->en_erxop;
262 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
263 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
264 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
265 EFSYS_ASSERT3U(enp->en_rx_qcount, ==, 0);
267 erxop->erxo_fini(enp);
269 enp->en_erxop = NULL;
270 enp->en_mod_flags &= ~EFX_MOD_RX;
273 #if EFSYS_OPT_RX_SCATTER
274 __checkReturn efx_rc_t
275 efx_rx_scatter_enable(
277 __in unsigned int buf_size)
279 const efx_rx_ops_t *erxop = enp->en_erxop;
282 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
283 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
285 if ((rc = erxop->erxo_scatter_enable(enp, buf_size)) != 0)
291 EFSYS_PROBE1(fail1, efx_rc_t, rc);
294 #endif /* EFSYS_OPT_RX_SCATTER */
296 #if EFSYS_OPT_RX_SCALE
297 __checkReturn efx_rc_t
298 efx_rx_scale_hash_flags_get(
300 __in efx_rx_hash_alg_t hash_alg,
301 __out_ecount_part(max_nflags, *nflagsp) unsigned int *flagsp,
302 __in unsigned int max_nflags,
303 __out unsigned int *nflagsp)
305 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
306 unsigned int nflags = 0;
309 if (flagsp == NULL || nflagsp == NULL) {
314 if ((encp->enc_rx_scale_hash_alg_mask & (1U << hash_alg)) == 0) {
319 /* Helper to add flags word to flags array without buffer overflow */
320 #define INSERT_FLAGS(_flags) \
322 if (nflags >= max_nflags) { \
326 *(flagsp + nflags) = (_flags); \
329 _NOTE(CONSTANTCONDITION) \
332 if (encp->enc_rx_scale_l4_hash_supported != B_FALSE) {
333 INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 4TUPLE));
334 INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 4TUPLE));
337 if ((encp->enc_rx_scale_l4_hash_supported != B_FALSE) &&
338 (encp->enc_rx_scale_additional_modes_supported != B_FALSE)) {
339 INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 2TUPLE_DST));
340 INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 2TUPLE_SRC));
342 INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 2TUPLE_DST));
343 INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 2TUPLE_SRC));
345 INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 4TUPLE));
346 INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 2TUPLE_DST));
347 INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 2TUPLE_SRC));
349 INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 4TUPLE));
350 INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 2TUPLE_DST));
351 INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 2TUPLE_SRC));
354 INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 2TUPLE));
355 INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 2TUPLE));
357 INSERT_FLAGS(EFX_RX_HASH(IPV4, 2TUPLE));
358 INSERT_FLAGS(EFX_RX_HASH(IPV6, 2TUPLE));
360 if (encp->enc_rx_scale_additional_modes_supported != B_FALSE) {
361 INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 1TUPLE_DST));
362 INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 1TUPLE_SRC));
364 INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 1TUPLE_DST));
365 INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 1TUPLE_SRC));
367 INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 2TUPLE));
368 INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 1TUPLE_DST));
369 INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 1TUPLE_SRC));
371 INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 2TUPLE));
372 INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 1TUPLE_DST));
373 INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 1TUPLE_SRC));
375 INSERT_FLAGS(EFX_RX_HASH(IPV4, 1TUPLE_DST));
376 INSERT_FLAGS(EFX_RX_HASH(IPV4, 1TUPLE_SRC));
378 INSERT_FLAGS(EFX_RX_HASH(IPV6, 1TUPLE_DST));
379 INSERT_FLAGS(EFX_RX_HASH(IPV6, 1TUPLE_SRC));
382 INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, DISABLE));
383 INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, DISABLE));
385 INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, DISABLE));
386 INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, DISABLE));
388 INSERT_FLAGS(EFX_RX_HASH(IPV4, DISABLE));
389 INSERT_FLAGS(EFX_RX_HASH(IPV6, DISABLE));
400 EFSYS_PROBE1(fail1, efx_rc_t, rc);
405 __checkReturn efx_rc_t
406 efx_rx_hash_default_support_get(
408 __out efx_rx_hash_support_t *supportp)
412 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
413 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
415 if (supportp == NULL) {
421 * Report the hashing support the client gets by default if it
422 * does not allocate an RSS context itself.
424 *supportp = enp->en_hash_support;
429 EFSYS_PROBE1(fail1, efx_rc_t, rc);
434 __checkReturn efx_rc_t
435 efx_rx_scale_default_support_get(
437 __out efx_rx_scale_context_type_t *typep)
441 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
442 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
450 * Report the RSS support the client gets by default if it
451 * does not allocate an RSS context itself.
453 *typep = enp->en_rss_context_type;
458 EFSYS_PROBE1(fail1, efx_rc_t, rc);
462 #endif /* EFSYS_OPT_RX_SCALE */
464 #if EFSYS_OPT_RX_SCALE
465 __checkReturn efx_rc_t
466 efx_rx_scale_context_alloc(
468 __in efx_rx_scale_context_type_t type,
469 __in uint32_t num_queues,
470 __out uint32_t *rss_contextp)
472 const efx_rx_ops_t *erxop = enp->en_erxop;
475 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
476 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
478 if (erxop->erxo_scale_context_alloc == NULL) {
482 if ((rc = erxop->erxo_scale_context_alloc(enp, type,
483 num_queues, rss_contextp)) != 0) {
492 EFSYS_PROBE1(fail1, efx_rc_t, rc);
495 #endif /* EFSYS_OPT_RX_SCALE */
497 #if EFSYS_OPT_RX_SCALE
498 __checkReturn efx_rc_t
499 efx_rx_scale_context_free(
501 __in uint32_t rss_context)
503 const efx_rx_ops_t *erxop = enp->en_erxop;
506 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
507 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
509 if (erxop->erxo_scale_context_free == NULL) {
513 if ((rc = erxop->erxo_scale_context_free(enp, rss_context)) != 0)
521 EFSYS_PROBE1(fail1, efx_rc_t, rc);
524 #endif /* EFSYS_OPT_RX_SCALE */
526 #if EFSYS_OPT_RX_SCALE
527 __checkReturn efx_rc_t
528 efx_rx_scale_mode_set(
530 __in uint32_t rss_context,
531 __in efx_rx_hash_alg_t alg,
532 __in efx_rx_hash_type_t type,
533 __in boolean_t insert)
535 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
536 const efx_rx_ops_t *erxop = enp->en_erxop;
537 efx_rx_hash_type_t type_check;
541 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
542 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
545 * Legacy flags and modern bits cannot be
546 * used at the same time in the hash type.
548 if ((type & EFX_RX_HASH_LEGACY_MASK) &&
549 (type & ~EFX_RX_HASH_LEGACY_MASK)) {
555 * If RSS hash type is represented by additional bits
556 * in the value, the latter need to be verified since
557 * not all bit combinations are valid RSS modes. Also,
558 * depending on the firmware, some valid combinations
559 * may be unsupported. Discern additional bits in the
560 * type value and try to recognise valid combinations.
561 * If some bits remain unrecognised, report the error.
563 type_check = type & ~EFX_RX_HASH_LEGACY_MASK;
564 if (type_check != 0) {
565 unsigned int type_flags[EFX_RX_HASH_NFLAGS];
566 unsigned int type_nflags;
568 rc = efx_rx_scale_hash_flags_get(enp, alg, type_flags,
569 EFX_ARRAY_SIZE(type_flags), &type_nflags);
573 for (i = 0; i < type_nflags; ++i) {
574 if ((type_check & type_flags[i]) == type_flags[i])
575 type_check &= ~(type_flags[i]);
578 if (type_check != 0) {
585 * Translate EFX_RX_HASH() flags to their legacy counterparts
586 * provided that the FW claims no support for additional modes.
588 if (encp->enc_rx_scale_additional_modes_supported == B_FALSE) {
589 efx_rx_hash_type_t t_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE) |
590 EFX_RX_HASH(IPV4_TCP, 2TUPLE);
591 efx_rx_hash_type_t t_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE) |
592 EFX_RX_HASH(IPV6_TCP, 2TUPLE);
593 efx_rx_hash_type_t t_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE);
594 efx_rx_hash_type_t t_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE);
596 if ((type & t_ipv4) == t_ipv4)
597 type |= EFX_RX_HASH_IPV4;
598 if ((type & t_ipv6) == t_ipv6)
599 type |= EFX_RX_HASH_IPV6;
601 if (encp->enc_rx_scale_l4_hash_supported == B_TRUE) {
602 if ((type & t_ipv4_tcp) == t_ipv4_tcp)
603 type |= EFX_RX_HASH_TCPIPV4;
604 if ((type & t_ipv6_tcp) == t_ipv6_tcp)
605 type |= EFX_RX_HASH_TCPIPV6;
608 type &= EFX_RX_HASH_LEGACY_MASK;
611 if (erxop->erxo_scale_mode_set != NULL) {
612 if ((rc = erxop->erxo_scale_mode_set(enp, rss_context, alg,
626 EFSYS_PROBE1(fail1, efx_rc_t, rc);
629 #endif /* EFSYS_OPT_RX_SCALE */
631 #if EFSYS_OPT_RX_SCALE
632 __checkReturn efx_rc_t
633 efx_rx_scale_key_set(
635 __in uint32_t rss_context,
636 __in_ecount(n) uint8_t *key,
639 const efx_rx_ops_t *erxop = enp->en_erxop;
642 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
643 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
645 if ((rc = erxop->erxo_scale_key_set(enp, rss_context, key, n)) != 0)
651 EFSYS_PROBE1(fail1, efx_rc_t, rc);
655 #endif /* EFSYS_OPT_RX_SCALE */
657 #if EFSYS_OPT_RX_SCALE
658 __checkReturn efx_rc_t
659 efx_rx_scale_tbl_set(
661 __in uint32_t rss_context,
662 __in_ecount(n) unsigned int *table,
665 const efx_rx_ops_t *erxop = enp->en_erxop;
668 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
669 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
671 if ((rc = erxop->erxo_scale_tbl_set(enp, rss_context, table, n)) != 0)
677 EFSYS_PROBE1(fail1, efx_rc_t, rc);
681 #endif /* EFSYS_OPT_RX_SCALE */
686 __in_ecount(ndescs) efsys_dma_addr_t *addrp,
688 __in unsigned int ndescs,
689 __in unsigned int completed,
690 __in unsigned int added)
692 efx_nic_t *enp = erp->er_enp;
693 const efx_rx_ops_t *erxop = enp->en_erxop;
695 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
696 EFSYS_ASSERT(erp->er_buf_size == 0 || size == erp->er_buf_size);
698 erxop->erxo_qpost(erp, addrp, size, ndescs, completed, added);
701 #if EFSYS_OPT_RX_PACKED_STREAM
704 efx_rx_qpush_ps_credits(
707 efx_nic_t *enp = erp->er_enp;
708 const efx_rx_ops_t *erxop = enp->en_erxop;
710 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
712 erxop->erxo_qpush_ps_credits(erp);
715 __checkReturn uint8_t *
716 efx_rx_qps_packet_info(
718 __in uint8_t *buffer,
719 __in uint32_t buffer_length,
720 __in uint32_t current_offset,
721 __out uint16_t *lengthp,
722 __out uint32_t *next_offsetp,
723 __out uint32_t *timestamp)
725 efx_nic_t *enp = erp->er_enp;
726 const efx_rx_ops_t *erxop = enp->en_erxop;
728 return (erxop->erxo_qps_packet_info(erp, buffer,
729 buffer_length, current_offset, lengthp,
730 next_offsetp, timestamp));
733 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
738 __in unsigned int added,
739 __inout unsigned int *pushedp)
741 efx_nic_t *enp = erp->er_enp;
742 const efx_rx_ops_t *erxop = enp->en_erxop;
744 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
746 erxop->erxo_qpush(erp, added, pushedp);
749 __checkReturn efx_rc_t
753 efx_nic_t *enp = erp->er_enp;
754 const efx_rx_ops_t *erxop = enp->en_erxop;
757 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
759 if ((rc = erxop->erxo_qflush(erp)) != 0)
765 EFSYS_PROBE1(fail1, efx_rc_t, rc);
772 __in const efx_nic_t *enp,
773 __in unsigned int ndescs)
775 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
777 return (ndescs * encp->enc_rx_desc_size);
780 __checkReturn unsigned int
782 __in const efx_nic_t *enp,
783 __in unsigned int ndescs)
785 return (EFX_DIV_ROUND_UP(efx_rxq_size(enp, ndescs), EFX_BUF_SIZE));
792 efx_nic_t *enp = erp->er_enp;
793 const efx_rx_ops_t *erxop = enp->en_erxop;
795 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
797 erxop->erxo_qenable(erp);
800 static __checkReturn efx_rc_t
801 efx_rx_qcreate_internal(
803 __in unsigned int index,
804 __in unsigned int label,
805 __in efx_rxq_type_t type,
806 __in_opt const efx_rxq_type_data_t *type_data,
807 __in efsys_mem_t *esmp,
810 __in unsigned int flags,
812 __deref_out efx_rxq_t **erpp)
814 const efx_rx_ops_t *erxop = enp->en_erxop;
816 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
819 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
820 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
822 EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
824 EFSYS_ASSERT(ISP2(encp->enc_rxq_max_ndescs));
825 EFSYS_ASSERT(ISP2(encp->enc_rxq_min_ndescs));
828 ndescs < encp->enc_rxq_min_ndescs ||
829 ndescs > encp->enc_rxq_max_ndescs) {
834 /* Allocate an RXQ object */
835 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_rxq_t), erp);
842 erp->er_magic = EFX_RXQ_MAGIC;
844 erp->er_index = index;
845 erp->er_mask = ndescs - 1;
848 if ((rc = erxop->erxo_qcreate(enp, index, label, type, type_data, esmp,
849 ndescs, id, flags, eep, erp)) != 0)
860 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
864 EFSYS_PROBE1(fail1, efx_rc_t, rc);
869 __checkReturn efx_rc_t
872 __in unsigned int index,
873 __in unsigned int label,
874 __in efx_rxq_type_t type,
875 __in size_t buf_size,
876 __in efsys_mem_t *esmp,
879 __in unsigned int flags,
881 __deref_out efx_rxq_t **erpp)
883 efx_rxq_type_data_t type_data;
885 memset(&type_data, 0, sizeof (type_data));
887 type_data.ertd_default.ed_buf_size = buf_size;
889 return efx_rx_qcreate_internal(enp, index, label, type, &type_data,
890 esmp, ndescs, id, flags, eep, erpp);
893 #if EFSYS_OPT_RX_PACKED_STREAM
895 __checkReturn efx_rc_t
896 efx_rx_qcreate_packed_stream(
898 __in unsigned int index,
899 __in unsigned int label,
900 __in uint32_t ps_buf_size,
901 __in efsys_mem_t *esmp,
904 __deref_out efx_rxq_t **erpp)
906 efx_rxq_type_data_t type_data;
908 memset(&type_data, 0, sizeof (type_data));
910 type_data.ertd_packed_stream.eps_buf_size = ps_buf_size;
912 return efx_rx_qcreate_internal(enp, index, label,
913 EFX_RXQ_TYPE_PACKED_STREAM, &type_data, esmp, ndescs,
914 0 /* id unused on EF10 */, EFX_RXQ_FLAG_NONE, eep, erpp);
919 #if EFSYS_OPT_RX_ES_SUPER_BUFFER
921 __checkReturn efx_rc_t
922 efx_rx_qcreate_es_super_buffer(
924 __in unsigned int index,
925 __in unsigned int label,
926 __in uint32_t n_bufs_per_desc,
927 __in uint32_t max_dma_len,
928 __in uint32_t buf_stride,
929 __in uint32_t hol_block_timeout,
930 __in efsys_mem_t *esmp,
932 __in unsigned int flags,
934 __deref_out efx_rxq_t **erpp)
937 efx_rxq_type_data_t type_data;
939 if (hol_block_timeout > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
944 memset(&type_data, 0, sizeof (type_data));
946 type_data.ertd_es_super_buffer.eessb_bufs_per_desc = n_bufs_per_desc;
947 type_data.ertd_es_super_buffer.eessb_max_dma_len = max_dma_len;
948 type_data.ertd_es_super_buffer.eessb_buf_stride = buf_stride;
949 type_data.ertd_es_super_buffer.eessb_hol_block_timeout =
952 rc = efx_rx_qcreate_internal(enp, index, label,
953 EFX_RXQ_TYPE_ES_SUPER_BUFFER, &type_data, esmp, ndescs,
954 0 /* id unused on EF10 */, flags, eep, erpp);
963 EFSYS_PROBE1(fail1, efx_rc_t, rc);
975 efx_nic_t *enp = erp->er_enp;
976 const efx_rx_ops_t *erxop = enp->en_erxop;
978 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
980 EFSYS_ASSERT(enp->en_rx_qcount != 0);
983 erxop->erxo_qdestroy(erp);
985 /* Free the RXQ object */
986 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
989 __checkReturn efx_rc_t
990 efx_pseudo_hdr_pkt_length_get(
992 __in uint8_t *buffer,
993 __out uint16_t *lengthp)
995 efx_nic_t *enp = erp->er_enp;
996 const efx_rx_ops_t *erxop = enp->en_erxop;
998 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
1000 return (erxop->erxo_prefix_pktlen(enp, buffer, lengthp));
1003 #if EFSYS_OPT_RX_SCALE
1004 __checkReturn uint32_t
1005 efx_pseudo_hdr_hash_get(
1006 __in efx_rxq_t *erp,
1007 __in efx_rx_hash_alg_t func,
1008 __in uint8_t *buffer)
1010 efx_nic_t *enp = erp->er_enp;
1011 const efx_rx_ops_t *erxop = enp->en_erxop;
1013 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
1015 EFSYS_ASSERT3U(enp->en_hash_support, ==, EFX_RX_HASH_AVAILABLE);
1016 return (erxop->erxo_prefix_hash(enp, func, buffer));
1018 #endif /* EFSYS_OPT_RX_SCALE */
1022 static __checkReturn efx_rc_t
1024 __in efx_nic_t *enp)
1029 EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
1031 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_DESC_PUSH_EN, 0);
1032 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0);
1033 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0);
1034 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0);
1035 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, 0);
1036 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, 0x3000 / 32);
1037 EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
1039 /* Zero the RSS table */
1040 for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS;
1042 EFX_ZERO_OWORD(oword);
1043 EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
1044 index, &oword, B_TRUE);
1047 #if EFSYS_OPT_RX_SCALE
1048 /* The RSS key and indirection table are writable. */
1049 enp->en_rss_context_type = EFX_RX_SCALE_EXCLUSIVE;
1051 /* Hardware can insert RX hash with/without RSS */
1052 enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
1053 #endif /* EFSYS_OPT_RX_SCALE */
1058 #if EFSYS_OPT_RX_SCATTER
1059 static __checkReturn efx_rc_t
1060 siena_rx_scatter_enable(
1061 __in efx_nic_t *enp,
1062 __in unsigned int buf_size)
1064 unsigned int nbuf32;
1068 nbuf32 = buf_size / 32;
1069 if ((nbuf32 == 0) ||
1070 (nbuf32 >= (1 << FRF_BZ_RX_USR_BUF_SIZE_WIDTH)) ||
1071 ((buf_size % 32) != 0)) {
1076 if (enp->en_rx_qcount > 0) {
1081 /* Set scatter buffer size */
1082 EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
1083 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, nbuf32);
1084 EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
1086 /* Enable scatter for packets not matching a filter */
1087 EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
1088 EFX_SET_OWORD_FIELD(oword, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1);
1089 EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
1096 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1100 #endif /* EFSYS_OPT_RX_SCATTER */
1103 #define EFX_RX_LFSR_HASH(_enp, _insert) \
1105 efx_oword_t oword; \
1107 EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
1108 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0); \
1109 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0); \
1110 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0); \
1111 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
1112 (_insert) ? 1 : 0); \
1113 EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
1115 if ((_enp)->en_family == EFX_FAMILY_SIENA) { \
1116 EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
1118 EFX_SET_OWORD_FIELD(oword, \
1119 FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 0); \
1120 EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
1124 _NOTE(CONSTANTCONDITION) \
1127 #define EFX_RX_TOEPLITZ_IPV4_HASH(_enp, _insert, _ip, _tcp) \
1129 efx_oword_t oword; \
1131 EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
1132 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 1); \
1133 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, \
1135 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, \
1137 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
1138 (_insert) ? 1 : 0); \
1139 EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
1141 _NOTE(CONSTANTCONDITION) \
1144 #define EFX_RX_TOEPLITZ_IPV6_HASH(_enp, _ip, _tcp, _rc) \
1146 efx_oword_t oword; \
1148 EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
1149 EFX_SET_OWORD_FIELD(oword, \
1150 FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1); \
1151 EFX_SET_OWORD_FIELD(oword, \
1152 FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, (_ip) ? 1 : 0); \
1153 EFX_SET_OWORD_FIELD(oword, \
1154 FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS, (_tcp) ? 0 : 1); \
1155 EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
1159 _NOTE(CONSTANTCONDITION) \
1163 #if EFSYS_OPT_RX_SCALE
1165 static __checkReturn efx_rc_t
1166 siena_rx_scale_mode_set(
1167 __in efx_nic_t *enp,
1168 __in uint32_t rss_context,
1169 __in efx_rx_hash_alg_t alg,
1170 __in efx_rx_hash_type_t type,
1171 __in boolean_t insert)
1175 if (rss_context != EFX_RSS_CONTEXT_DEFAULT) {
1181 case EFX_RX_HASHALG_LFSR:
1182 EFX_RX_LFSR_HASH(enp, insert);
1185 case EFX_RX_HASHALG_TOEPLITZ:
1186 EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert,
1187 (type & EFX_RX_HASH_IPV4) ? B_TRUE : B_FALSE,
1188 (type & EFX_RX_HASH_TCPIPV4) ? B_TRUE : B_FALSE);
1190 EFX_RX_TOEPLITZ_IPV6_HASH(enp,
1191 (type & EFX_RX_HASH_IPV6) ? B_TRUE : B_FALSE,
1192 (type & EFX_RX_HASH_TCPIPV6) ? B_TRUE : B_FALSE,
1211 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1213 EFX_RX_LFSR_HASH(enp, B_FALSE);
1219 #if EFSYS_OPT_RX_SCALE
1220 static __checkReturn efx_rc_t
1221 siena_rx_scale_key_set(
1222 __in efx_nic_t *enp,
1223 __in uint32_t rss_context,
1224 __in_ecount(n) uint8_t *key,
1229 unsigned int offset;
1232 if (rss_context != EFX_RSS_CONTEXT_DEFAULT) {
1239 /* Write Toeplitz IPv4 hash key */
1240 EFX_ZERO_OWORD(oword);
1241 for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
1242 offset > 0 && byte < n;
1244 oword.eo_u8[offset - 1] = key[byte++];
1246 EFX_BAR_WRITEO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
1250 /* Verify Toeplitz IPv4 hash key */
1251 EFX_BAR_READO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
1252 for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
1253 offset > 0 && byte < n;
1255 if (oword.eo_u8[offset - 1] != key[byte++]) {
1261 if ((enp->en_features & EFX_FEATURE_IPV6) == 0)
1266 /* Write Toeplitz IPv6 hash key 3 */
1267 EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
1268 for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
1269 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
1270 offset > 0 && byte < n;
1272 oword.eo_u8[offset - 1] = key[byte++];
1274 EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
1276 /* Write Toeplitz IPv6 hash key 2 */
1277 EFX_ZERO_OWORD(oword);
1278 for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
1279 FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
1280 offset > 0 && byte < n;
1282 oword.eo_u8[offset - 1] = key[byte++];
1284 EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
1286 /* Write Toeplitz IPv6 hash key 1 */
1287 EFX_ZERO_OWORD(oword);
1288 for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
1289 FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
1290 offset > 0 && byte < n;
1292 oword.eo_u8[offset - 1] = key[byte++];
1294 EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
1298 /* Verify Toeplitz IPv6 hash key 3 */
1299 EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
1300 for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
1301 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
1302 offset > 0 && byte < n;
1304 if (oword.eo_u8[offset - 1] != key[byte++]) {
1310 /* Verify Toeplitz IPv6 hash key 2 */
1311 EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
1312 for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
1313 FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
1314 offset > 0 && byte < n;
1316 if (oword.eo_u8[offset - 1] != key[byte++]) {
1322 /* Verify Toeplitz IPv6 hash key 1 */
1323 EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
1324 for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
1325 FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
1326 offset > 0 && byte < n;
1328 if (oword.eo_u8[offset - 1] != key[byte++]) {
1346 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1352 #if EFSYS_OPT_RX_SCALE
1353 static __checkReturn efx_rc_t
1354 siena_rx_scale_tbl_set(
1355 __in efx_nic_t *enp,
1356 __in uint32_t rss_context,
1357 __in_ecount(n) unsigned int *table,
1364 EFX_STATIC_ASSERT(EFX_RSS_TBL_SIZE == FR_BZ_RX_INDIRECTION_TBL_ROWS);
1365 EFX_STATIC_ASSERT(EFX_MAXRSS == (1 << FRF_BZ_IT_QUEUE_WIDTH));
1367 if (rss_context != EFX_RSS_CONTEXT_DEFAULT) {
1372 if (n > FR_BZ_RX_INDIRECTION_TBL_ROWS) {
1377 for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; index++) {
1380 /* Calculate the entry to place in the table */
1381 byte = (n > 0) ? (uint32_t)table[index % n] : 0;
1383 EFSYS_PROBE2(table, int, index, uint32_t, byte);
1385 EFX_POPULATE_OWORD_1(oword, FRF_BZ_IT_QUEUE, byte);
1387 /* Write the table */
1388 EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
1389 index, &oword, B_TRUE);
1392 for (index = FR_BZ_RX_INDIRECTION_TBL_ROWS - 1; index >= 0; --index) {
1395 /* Determine if we're starting a new batch */
1396 byte = (n > 0) ? (uint32_t)table[index % n] : 0;
1398 /* Read the table */
1399 EFX_BAR_TBL_READO(enp, FR_BZ_RX_INDIRECTION_TBL,
1400 index, &oword, B_TRUE);
1402 /* Verify the entry */
1403 if (EFX_OWORD_FIELD(oword, FRF_BZ_IT_QUEUE) != byte) {
1416 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1423 * Falcon/Siena pseudo-header
1424 * --------------------------
1426 * Receive packets are prefixed by an optional 16 byte pseudo-header.
1427 * The pseudo-header is a byte array of one of the forms:
1429 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1430 * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.TT.TT.TT.TT
1431 * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.LL.LL
1434 * TT.TT.TT.TT Toeplitz hash (32-bit big-endian)
1435 * LL.LL LFSR hash (16-bit big-endian)
1438 #if EFSYS_OPT_RX_SCALE
1439 static __checkReturn uint32_t
1440 siena_rx_prefix_hash(
1441 __in efx_nic_t *enp,
1442 __in efx_rx_hash_alg_t func,
1443 __in uint8_t *buffer)
1445 _NOTE(ARGUNUSED(enp))
1448 case EFX_RX_HASHALG_TOEPLITZ:
1449 return ((buffer[12] << 24) |
1450 (buffer[13] << 16) |
1454 case EFX_RX_HASHALG_LFSR:
1455 return ((buffer[14] << 8) | buffer[15]);
1462 #endif /* EFSYS_OPT_RX_SCALE */
1464 static __checkReturn efx_rc_t
1465 siena_rx_prefix_pktlen(
1466 __in efx_nic_t *enp,
1467 __in uint8_t *buffer,
1468 __out uint16_t *lengthp)
1470 _NOTE(ARGUNUSED(enp, buffer, lengthp))
1472 /* Not supported by Falcon/Siena hardware */
1480 __in efx_rxq_t *erp,
1481 __in_ecount(ndescs) efsys_dma_addr_t *addrp,
1483 __in unsigned int ndescs,
1484 __in unsigned int completed,
1485 __in unsigned int added)
1489 unsigned int offset;
1492 /* The client driver must not overfill the queue */
1493 EFSYS_ASSERT3U(added - completed + ndescs, <=,
1494 EFX_RXQ_LIMIT(erp->er_mask + 1));
1496 id = added & (erp->er_mask);
1497 for (i = 0; i < ndescs; i++) {
1498 EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
1499 unsigned int, id, efsys_dma_addr_t, addrp[i],
1502 EFX_POPULATE_QWORD_3(qword,
1503 FSF_AZ_RX_KER_BUF_SIZE, (uint32_t)(size),
1504 FSF_AZ_RX_KER_BUF_ADDR_DW0,
1505 (uint32_t)(addrp[i] & 0xffffffff),
1506 FSF_AZ_RX_KER_BUF_ADDR_DW1,
1507 (uint32_t)(addrp[i] >> 32));
1509 offset = id * sizeof (efx_qword_t);
1510 EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
1512 id = (id + 1) & (erp->er_mask);
1518 __in efx_rxq_t *erp,
1519 __in unsigned int added,
1520 __inout unsigned int *pushedp)
1522 efx_nic_t *enp = erp->er_enp;
1523 unsigned int pushed = *pushedp;
1528 /* All descriptors are pushed */
1531 /* Push the populated descriptors out */
1532 wptr = added & erp->er_mask;
1534 EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DESC_WPTR, wptr);
1536 /* Only write the third DWORD */
1537 EFX_POPULATE_DWORD_1(dword,
1538 EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3));
1540 /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
1541 EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
1542 wptr, pushed & erp->er_mask);
1543 EFSYS_PIO_WRITE_BARRIER();
1544 EFX_BAR_TBL_WRITED3(enp, FR_BZ_RX_DESC_UPD_REGP0,
1545 erp->er_index, &dword, B_FALSE);
1548 #if EFSYS_OPT_RX_PACKED_STREAM
1550 siena_rx_qpush_ps_credits(
1551 __in efx_rxq_t *erp)
1553 /* Not supported by Siena hardware */
1558 siena_rx_qps_packet_info(
1559 __in efx_rxq_t *erp,
1560 __in uint8_t *buffer,
1561 __in uint32_t buffer_length,
1562 __in uint32_t current_offset,
1563 __out uint16_t *lengthp,
1564 __out uint32_t *next_offsetp,
1565 __out uint32_t *timestamp)
1567 /* Not supported by Siena hardware */
1572 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
1574 static __checkReturn efx_rc_t
1576 __in efx_rxq_t *erp)
1578 efx_nic_t *enp = erp->er_enp;
1582 label = erp->er_index;
1584 /* Flush the queue */
1585 EFX_POPULATE_OWORD_2(oword, FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
1586 FRF_AZ_RX_FLUSH_DESCQ, label);
1587 EFX_BAR_WRITEO(enp, FR_AZ_RX_FLUSH_DESCQ_REG, &oword);
1594 __in efx_rxq_t *erp)
1596 efx_nic_t *enp = erp->er_enp;
1599 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
1601 EFX_BAR_TBL_READO(enp, FR_AZ_RX_DESC_PTR_TBL,
1602 erp->er_index, &oword, B_TRUE);
1604 EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DC_HW_RPTR, 0);
1605 EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_HW_RPTR, 0);
1606 EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_EN, 1);
1608 EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
1609 erp->er_index, &oword, B_TRUE);
1612 static __checkReturn efx_rc_t
1614 __in efx_nic_t *enp,
1615 __in unsigned int index,
1616 __in unsigned int label,
1617 __in efx_rxq_type_t type,
1618 __in_opt const efx_rxq_type_data_t *type_data,
1619 __in efsys_mem_t *esmp,
1622 __in unsigned int flags,
1623 __in efx_evq_t *eep,
1624 __in efx_rxq_t *erp)
1626 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1629 boolean_t jumbo = B_FALSE;
1632 _NOTE(ARGUNUSED(esmp))
1634 EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS ==
1635 (1 << FRF_AZ_RX_DESCQ_LABEL_WIDTH));
1636 EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
1638 if (index >= encp->enc_rxq_limit) {
1643 (1U << size) <= encp->enc_rxq_max_ndescs / encp->enc_rxq_min_ndescs;
1645 if ((1U << size) == (uint32_t)ndescs / encp->enc_rxq_min_ndescs)
1647 if (id + (1 << size) >= encp->enc_buftbl_limit) {
1653 case EFX_RXQ_TYPE_DEFAULT:
1654 erp->er_buf_size = type_data->ertd_default.ed_buf_size;
1662 if (flags & EFX_RXQ_FLAG_SCATTER) {
1663 #if EFSYS_OPT_RX_SCATTER
1668 #endif /* EFSYS_OPT_RX_SCATTER */
1671 /* Set up the new descriptor queue */
1672 EFX_POPULATE_OWORD_7(oword,
1673 FRF_AZ_RX_DESCQ_BUF_BASE_ID, id,
1674 FRF_AZ_RX_DESCQ_EVQ_ID, eep->ee_index,
1675 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
1676 FRF_AZ_RX_DESCQ_LABEL, label,
1677 FRF_AZ_RX_DESCQ_SIZE, size,
1678 FRF_AZ_RX_DESCQ_TYPE, 0,
1679 FRF_AZ_RX_DESCQ_JUMBO, jumbo);
1681 EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
1682 erp->er_index, &oword, B_TRUE);
1686 #if !EFSYS_OPT_RX_SCATTER
1695 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1702 __in efx_rxq_t *erp)
1704 efx_nic_t *enp = erp->er_enp;
1707 /* Purge descriptor queue */
1708 EFX_ZERO_OWORD(oword);
1710 EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
1711 erp->er_index, &oword, B_TRUE);
1716 __in efx_nic_t *enp)
1718 _NOTE(ARGUNUSED(enp))
1721 #endif /* EFSYS_OPT_SIENA */