1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2007-2018 Solarflare Communications Inc.
13 static __checkReturn efx_rc_t
21 #if EFSYS_OPT_RX_SCATTER
22 static __checkReturn efx_rc_t
23 siena_rx_scatter_enable(
25 __in unsigned int buf_size);
26 #endif /* EFSYS_OPT_RX_SCATTER */
28 #if EFSYS_OPT_RX_SCALE
29 static __checkReturn efx_rc_t
30 siena_rx_scale_mode_set(
32 __in uint32_t rss_context,
33 __in efx_rx_hash_alg_t alg,
34 __in efx_rx_hash_type_t type,
35 __in boolean_t insert);
37 static __checkReturn efx_rc_t
38 siena_rx_scale_key_set(
40 __in uint32_t rss_context,
41 __in_ecount(n) uint8_t *key,
44 static __checkReturn efx_rc_t
45 siena_rx_scale_tbl_set(
47 __in uint32_t rss_context,
48 __in_ecount(n) unsigned int *table,
51 static __checkReturn uint32_t
54 __in efx_rx_hash_alg_t func,
55 __in uint8_t *buffer);
57 #endif /* EFSYS_OPT_RX_SCALE */
59 static __checkReturn efx_rc_t
60 siena_rx_prefix_pktlen(
63 __out uint16_t *lengthp);
68 __in_ecount(ndescs) efsys_dma_addr_t *addrp,
70 __in unsigned int ndescs,
71 __in unsigned int completed,
72 __in unsigned int added);
77 __in unsigned int added,
78 __inout unsigned int *pushedp);
80 #if EFSYS_OPT_RX_PACKED_STREAM
82 siena_rx_qpush_ps_credits(
85 static __checkReturn uint8_t *
86 siena_rx_qps_packet_info(
89 __in uint32_t buffer_length,
90 __in uint32_t current_offset,
91 __out uint16_t *lengthp,
92 __out uint32_t *next_offsetp,
93 __out uint32_t *timestamp);
96 static __checkReturn efx_rc_t
102 __in efx_rxq_t *erp);
104 static __checkReturn efx_rc_t
107 __in unsigned int index,
108 __in unsigned int label,
109 __in efx_rxq_type_t type,
110 __in_opt const efx_rxq_type_data_t *type_data,
111 __in efsys_mem_t *esmp,
114 __in unsigned int flags,
116 __in efx_rxq_t *erp);
120 __in efx_rxq_t *erp);
122 #endif /* EFSYS_OPT_SIENA */
126 static const efx_rx_ops_t __efx_rx_siena_ops = {
127 siena_rx_init, /* erxo_init */
128 siena_rx_fini, /* erxo_fini */
129 #if EFSYS_OPT_RX_SCATTER
130 siena_rx_scatter_enable, /* erxo_scatter_enable */
132 #if EFSYS_OPT_RX_SCALE
133 NULL, /* erxo_scale_context_alloc */
134 NULL, /* erxo_scale_context_free */
135 siena_rx_scale_mode_set, /* erxo_scale_mode_set */
136 siena_rx_scale_key_set, /* erxo_scale_key_set */
137 siena_rx_scale_tbl_set, /* erxo_scale_tbl_set */
138 siena_rx_prefix_hash, /* erxo_prefix_hash */
140 siena_rx_prefix_pktlen, /* erxo_prefix_pktlen */
141 siena_rx_qpost, /* erxo_qpost */
142 siena_rx_qpush, /* erxo_qpush */
143 #if EFSYS_OPT_RX_PACKED_STREAM
144 siena_rx_qpush_ps_credits, /* erxo_qpush_ps_credits */
145 siena_rx_qps_packet_info, /* erxo_qps_packet_info */
147 siena_rx_qflush, /* erxo_qflush */
148 siena_rx_qenable, /* erxo_qenable */
149 siena_rx_qcreate, /* erxo_qcreate */
150 siena_rx_qdestroy, /* erxo_qdestroy */
152 #endif /* EFSYS_OPT_SIENA */
154 #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
155 static const efx_rx_ops_t __efx_rx_ef10_ops = {
156 ef10_rx_init, /* erxo_init */
157 ef10_rx_fini, /* erxo_fini */
158 #if EFSYS_OPT_RX_SCATTER
159 ef10_rx_scatter_enable, /* erxo_scatter_enable */
161 #if EFSYS_OPT_RX_SCALE
162 ef10_rx_scale_context_alloc, /* erxo_scale_context_alloc */
163 ef10_rx_scale_context_free, /* erxo_scale_context_free */
164 ef10_rx_scale_mode_set, /* erxo_scale_mode_set */
165 ef10_rx_scale_key_set, /* erxo_scale_key_set */
166 ef10_rx_scale_tbl_set, /* erxo_scale_tbl_set */
167 ef10_rx_prefix_hash, /* erxo_prefix_hash */
169 ef10_rx_prefix_pktlen, /* erxo_prefix_pktlen */
170 ef10_rx_qpost, /* erxo_qpost */
171 ef10_rx_qpush, /* erxo_qpush */
172 #if EFSYS_OPT_RX_PACKED_STREAM
173 ef10_rx_qpush_ps_credits, /* erxo_qpush_ps_credits */
174 ef10_rx_qps_packet_info, /* erxo_qps_packet_info */
176 ef10_rx_qflush, /* erxo_qflush */
177 ef10_rx_qenable, /* erxo_qenable */
178 ef10_rx_qcreate, /* erxo_qcreate */
179 ef10_rx_qdestroy, /* erxo_qdestroy */
181 #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
184 __checkReturn efx_rc_t
186 __inout efx_nic_t *enp)
188 const efx_rx_ops_t *erxop;
191 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
192 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
194 if (!(enp->en_mod_flags & EFX_MOD_EV)) {
199 if (enp->en_mod_flags & EFX_MOD_RX) {
204 switch (enp->en_family) {
206 case EFX_FAMILY_SIENA:
207 erxop = &__efx_rx_siena_ops;
209 #endif /* EFSYS_OPT_SIENA */
211 #if EFSYS_OPT_HUNTINGTON
212 case EFX_FAMILY_HUNTINGTON:
213 erxop = &__efx_rx_ef10_ops;
215 #endif /* EFSYS_OPT_HUNTINGTON */
217 #if EFSYS_OPT_MEDFORD
218 case EFX_FAMILY_MEDFORD:
219 erxop = &__efx_rx_ef10_ops;
221 #endif /* EFSYS_OPT_MEDFORD */
223 #if EFSYS_OPT_MEDFORD2
224 case EFX_FAMILY_MEDFORD2:
225 erxop = &__efx_rx_ef10_ops;
227 #endif /* EFSYS_OPT_MEDFORD2 */
235 if ((rc = erxop->erxo_init(enp)) != 0)
238 enp->en_erxop = erxop;
239 enp->en_mod_flags |= EFX_MOD_RX;
249 EFSYS_PROBE1(fail1, efx_rc_t, rc);
251 enp->en_erxop = NULL;
252 enp->en_mod_flags &= ~EFX_MOD_RX;
260 const efx_rx_ops_t *erxop = enp->en_erxop;
262 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
263 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
264 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
265 EFSYS_ASSERT3U(enp->en_rx_qcount, ==, 0);
267 erxop->erxo_fini(enp);
269 enp->en_erxop = NULL;
270 enp->en_mod_flags &= ~EFX_MOD_RX;
273 #if EFSYS_OPT_RX_SCATTER
274 __checkReturn efx_rc_t
275 efx_rx_scatter_enable(
277 __in unsigned int buf_size)
279 const efx_rx_ops_t *erxop = enp->en_erxop;
282 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
283 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
285 if ((rc = erxop->erxo_scatter_enable(enp, buf_size)) != 0)
291 EFSYS_PROBE1(fail1, efx_rc_t, rc);
294 #endif /* EFSYS_OPT_RX_SCATTER */
296 #if EFSYS_OPT_RX_SCALE
297 __checkReturn efx_rc_t
298 efx_rx_scale_hash_flags_get(
300 __in efx_rx_hash_alg_t hash_alg,
301 __out_ecount_part(max_nflags, *nflagsp) unsigned int *flagsp,
302 __in unsigned int max_nflags,
303 __out unsigned int *nflagsp)
305 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
306 unsigned int nflags = 0;
309 if (flagsp == NULL || nflagsp == NULL) {
314 if ((encp->enc_rx_scale_hash_alg_mask & (1U << hash_alg)) == 0) {
319 /* Helper to add flags word to flags array without buffer overflow */
320 #define INSERT_FLAGS(_flags) \
322 if (nflags >= max_nflags) { \
326 *(flagsp + nflags) = (_flags); \
329 _NOTE(CONSTANTCONDITION) \
332 if (encp->enc_rx_scale_l4_hash_supported != B_FALSE) {
333 INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 4TUPLE));
334 INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 4TUPLE));
337 if ((encp->enc_rx_scale_l4_hash_supported != B_FALSE) &&
338 (encp->enc_rx_scale_additional_modes_supported != B_FALSE)) {
339 INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 2TUPLE_DST));
340 INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 2TUPLE_SRC));
342 INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 2TUPLE_DST));
343 INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 2TUPLE_SRC));
345 INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 4TUPLE));
346 INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 2TUPLE_DST));
347 INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 2TUPLE_SRC));
349 INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 4TUPLE));
350 INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 2TUPLE_DST));
351 INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 2TUPLE_SRC));
354 INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 2TUPLE));
355 INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 2TUPLE));
357 INSERT_FLAGS(EFX_RX_HASH(IPV4, 2TUPLE));
358 INSERT_FLAGS(EFX_RX_HASH(IPV6, 2TUPLE));
360 if (encp->enc_rx_scale_additional_modes_supported != B_FALSE) {
361 INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 1TUPLE_DST));
362 INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, 1TUPLE_SRC));
364 INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 1TUPLE_DST));
365 INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, 1TUPLE_SRC));
367 INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 2TUPLE));
368 INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 1TUPLE_DST));
369 INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, 1TUPLE_SRC));
371 INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 2TUPLE));
372 INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 1TUPLE_DST));
373 INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, 1TUPLE_SRC));
375 INSERT_FLAGS(EFX_RX_HASH(IPV4, 1TUPLE_DST));
376 INSERT_FLAGS(EFX_RX_HASH(IPV4, 1TUPLE_SRC));
378 INSERT_FLAGS(EFX_RX_HASH(IPV6, 1TUPLE_DST));
379 INSERT_FLAGS(EFX_RX_HASH(IPV6, 1TUPLE_SRC));
382 INSERT_FLAGS(EFX_RX_HASH(IPV4_TCP, DISABLE));
383 INSERT_FLAGS(EFX_RX_HASH(IPV6_TCP, DISABLE));
385 INSERT_FLAGS(EFX_RX_HASH(IPV4_UDP, DISABLE));
386 INSERT_FLAGS(EFX_RX_HASH(IPV6_UDP, DISABLE));
388 INSERT_FLAGS(EFX_RX_HASH(IPV4, DISABLE));
389 INSERT_FLAGS(EFX_RX_HASH(IPV6, DISABLE));
400 EFSYS_PROBE1(fail1, efx_rc_t, rc);
405 __checkReturn efx_rc_t
406 efx_rx_hash_default_support_get(
408 __out efx_rx_hash_support_t *supportp)
412 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
413 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
415 if (supportp == NULL) {
421 * Report the hashing support the client gets by default if it
422 * does not allocate an RSS context itself.
424 *supportp = enp->en_hash_support;
429 EFSYS_PROBE1(fail1, efx_rc_t, rc);
434 __checkReturn efx_rc_t
435 efx_rx_scale_default_support_get(
437 __out efx_rx_scale_context_type_t *typep)
441 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
442 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
450 * Report the RSS support the client gets by default if it
451 * does not allocate an RSS context itself.
453 *typep = enp->en_rss_context_type;
458 EFSYS_PROBE1(fail1, efx_rc_t, rc);
462 #endif /* EFSYS_OPT_RX_SCALE */
464 #if EFSYS_OPT_RX_SCALE
465 __checkReturn efx_rc_t
466 efx_rx_scale_context_alloc(
468 __in efx_rx_scale_context_type_t type,
469 __in uint32_t num_queues,
470 __out uint32_t *rss_contextp)
472 const efx_rx_ops_t *erxop = enp->en_erxop;
475 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
476 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
478 if (erxop->erxo_scale_context_alloc == NULL) {
482 if ((rc = erxop->erxo_scale_context_alloc(enp, type,
483 num_queues, rss_contextp)) != 0) {
492 EFSYS_PROBE1(fail1, efx_rc_t, rc);
495 #endif /* EFSYS_OPT_RX_SCALE */
497 #if EFSYS_OPT_RX_SCALE
498 __checkReturn efx_rc_t
499 efx_rx_scale_context_free(
501 __in uint32_t rss_context)
503 const efx_rx_ops_t *erxop = enp->en_erxop;
506 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
507 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
509 if (erxop->erxo_scale_context_free == NULL) {
513 if ((rc = erxop->erxo_scale_context_free(enp, rss_context)) != 0)
521 EFSYS_PROBE1(fail1, efx_rc_t, rc);
524 #endif /* EFSYS_OPT_RX_SCALE */
526 #if EFSYS_OPT_RX_SCALE
527 __checkReturn efx_rc_t
528 efx_rx_scale_mode_set(
530 __in uint32_t rss_context,
531 __in efx_rx_hash_alg_t alg,
532 __in efx_rx_hash_type_t type,
533 __in boolean_t insert)
535 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
536 const efx_rx_ops_t *erxop = enp->en_erxop;
537 efx_rx_hash_type_t type_check;
541 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
542 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
545 * Legacy flags and modern bits cannot be
546 * used at the same time in the hash type.
548 if ((type & EFX_RX_HASH_LEGACY_MASK) &&
549 (type & ~EFX_RX_HASH_LEGACY_MASK)) {
555 * If RSS hash type is represented by additional bits
556 * in the value, the latter need to be verified since
557 * not all bit combinations are valid RSS modes. Also,
558 * depending on the firmware, some valid combinations
559 * may be unsupported. Discern additional bits in the
560 * type value and try to recognise valid combinations.
561 * If some bits remain unrecognised, report the error.
563 type_check = type & ~EFX_RX_HASH_LEGACY_MASK;
564 if (type_check != 0) {
565 unsigned int type_flags[EFX_RX_HASH_NFLAGS];
566 unsigned int type_nflags;
568 rc = efx_rx_scale_hash_flags_get(enp, alg, type_flags,
569 EFX_ARRAY_SIZE(type_flags), &type_nflags);
573 for (i = 0; i < type_nflags; ++i) {
574 if ((type_check & type_flags[i]) == type_flags[i])
575 type_check &= ~(type_flags[i]);
578 if (type_check != 0) {
585 * Translate EFX_RX_HASH() flags to their legacy counterparts
586 * provided that the FW claims no support for additional modes.
588 if (encp->enc_rx_scale_additional_modes_supported == B_FALSE) {
589 efx_rx_hash_type_t t_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE) |
590 EFX_RX_HASH(IPV4_TCP, 2TUPLE);
591 efx_rx_hash_type_t t_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE) |
592 EFX_RX_HASH(IPV6_TCP, 2TUPLE);
593 efx_rx_hash_type_t t_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE);
594 efx_rx_hash_type_t t_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE);
596 if ((type & t_ipv4) == t_ipv4)
597 type |= EFX_RX_HASH_IPV4;
598 if ((type & t_ipv6) == t_ipv6)
599 type |= EFX_RX_HASH_IPV6;
601 if (encp->enc_rx_scale_l4_hash_supported == B_TRUE) {
602 if ((type & t_ipv4_tcp) == t_ipv4_tcp)
603 type |= EFX_RX_HASH_TCPIPV4;
604 if ((type & t_ipv6_tcp) == t_ipv6_tcp)
605 type |= EFX_RX_HASH_TCPIPV6;
608 type &= EFX_RX_HASH_LEGACY_MASK;
611 if (erxop->erxo_scale_mode_set != NULL) {
612 if ((rc = erxop->erxo_scale_mode_set(enp, rss_context, alg,
626 EFSYS_PROBE1(fail1, efx_rc_t, rc);
629 #endif /* EFSYS_OPT_RX_SCALE */
631 #if EFSYS_OPT_RX_SCALE
632 __checkReturn efx_rc_t
633 efx_rx_scale_key_set(
635 __in uint32_t rss_context,
636 __in_ecount(n) uint8_t *key,
639 const efx_rx_ops_t *erxop = enp->en_erxop;
642 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
643 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
645 if ((rc = erxop->erxo_scale_key_set(enp, rss_context, key, n)) != 0)
651 EFSYS_PROBE1(fail1, efx_rc_t, rc);
655 #endif /* EFSYS_OPT_RX_SCALE */
657 #if EFSYS_OPT_RX_SCALE
658 __checkReturn efx_rc_t
659 efx_rx_scale_tbl_set(
661 __in uint32_t rss_context,
662 __in_ecount(n) unsigned int *table,
665 const efx_rx_ops_t *erxop = enp->en_erxop;
668 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
669 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
671 if ((rc = erxop->erxo_scale_tbl_set(enp, rss_context, table, n)) != 0)
677 EFSYS_PROBE1(fail1, efx_rc_t, rc);
681 #endif /* EFSYS_OPT_RX_SCALE */
686 __in_ecount(ndescs) efsys_dma_addr_t *addrp,
688 __in unsigned int ndescs,
689 __in unsigned int completed,
690 __in unsigned int added)
692 efx_nic_t *enp = erp->er_enp;
693 const efx_rx_ops_t *erxop = enp->en_erxop;
695 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
697 erxop->erxo_qpost(erp, addrp, size, ndescs, completed, added);
700 #if EFSYS_OPT_RX_PACKED_STREAM
703 efx_rx_qpush_ps_credits(
706 efx_nic_t *enp = erp->er_enp;
707 const efx_rx_ops_t *erxop = enp->en_erxop;
709 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
711 erxop->erxo_qpush_ps_credits(erp);
714 __checkReturn uint8_t *
715 efx_rx_qps_packet_info(
717 __in uint8_t *buffer,
718 __in uint32_t buffer_length,
719 __in uint32_t current_offset,
720 __out uint16_t *lengthp,
721 __out uint32_t *next_offsetp,
722 __out uint32_t *timestamp)
724 efx_nic_t *enp = erp->er_enp;
725 const efx_rx_ops_t *erxop = enp->en_erxop;
727 return (erxop->erxo_qps_packet_info(erp, buffer,
728 buffer_length, current_offset, lengthp,
729 next_offsetp, timestamp));
732 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
737 __in unsigned int added,
738 __inout unsigned int *pushedp)
740 efx_nic_t *enp = erp->er_enp;
741 const efx_rx_ops_t *erxop = enp->en_erxop;
743 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
745 erxop->erxo_qpush(erp, added, pushedp);
748 __checkReturn efx_rc_t
752 efx_nic_t *enp = erp->er_enp;
753 const efx_rx_ops_t *erxop = enp->en_erxop;
756 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
758 if ((rc = erxop->erxo_qflush(erp)) != 0)
764 EFSYS_PROBE1(fail1, efx_rc_t, rc);
771 __in const efx_nic_t *enp,
772 __in unsigned int ndescs)
774 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
776 return (ndescs * encp->enc_rx_desc_size);
779 __checkReturn unsigned int
781 __in const efx_nic_t *enp,
782 __in unsigned int ndescs)
784 return (EFX_DIV_ROUND_UP(efx_rxq_size(enp, ndescs), EFX_BUF_SIZE));
791 efx_nic_t *enp = erp->er_enp;
792 const efx_rx_ops_t *erxop = enp->en_erxop;
794 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
796 erxop->erxo_qenable(erp);
799 static __checkReturn efx_rc_t
800 efx_rx_qcreate_internal(
802 __in unsigned int index,
803 __in unsigned int label,
804 __in efx_rxq_type_t type,
805 __in_opt const efx_rxq_type_data_t *type_data,
806 __in efsys_mem_t *esmp,
809 __in unsigned int flags,
811 __deref_out efx_rxq_t **erpp)
813 const efx_rx_ops_t *erxop = enp->en_erxop;
815 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
818 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
819 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
821 EFSYS_ASSERT(ISP2(encp->enc_rxq_max_ndescs));
822 EFSYS_ASSERT(ISP2(encp->enc_rxq_min_ndescs));
825 ndescs < encp->enc_rxq_min_ndescs ||
826 ndescs > encp->enc_rxq_max_ndescs) {
831 /* Allocate an RXQ object */
832 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_rxq_t), erp);
839 erp->er_magic = EFX_RXQ_MAGIC;
841 erp->er_index = index;
842 erp->er_mask = ndescs - 1;
845 if ((rc = erxop->erxo_qcreate(enp, index, label, type, type_data, esmp,
846 ndescs, id, flags, eep, erp)) != 0)
857 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
861 EFSYS_PROBE1(fail1, efx_rc_t, rc);
866 __checkReturn efx_rc_t
869 __in unsigned int index,
870 __in unsigned int label,
871 __in efx_rxq_type_t type,
872 __in efsys_mem_t *esmp,
875 __in unsigned int flags,
877 __deref_out efx_rxq_t **erpp)
879 return efx_rx_qcreate_internal(enp, index, label, type, NULL,
880 esmp, ndescs, id, flags, eep, erpp);
883 #if EFSYS_OPT_RX_PACKED_STREAM
885 __checkReturn efx_rc_t
886 efx_rx_qcreate_packed_stream(
888 __in unsigned int index,
889 __in unsigned int label,
890 __in uint32_t ps_buf_size,
891 __in efsys_mem_t *esmp,
894 __deref_out efx_rxq_t **erpp)
896 efx_rxq_type_data_t type_data;
898 memset(&type_data, 0, sizeof (type_data));
900 type_data.ertd_packed_stream.eps_buf_size = ps_buf_size;
902 return efx_rx_qcreate_internal(enp, index, label,
903 EFX_RXQ_TYPE_PACKED_STREAM, &type_data, esmp, ndescs,
904 0 /* id unused on EF10 */, EFX_RXQ_FLAG_NONE, eep, erpp);
909 #if EFSYS_OPT_RX_ES_SUPER_BUFFER
911 __checkReturn efx_rc_t
912 efx_rx_qcreate_es_super_buffer(
914 __in unsigned int index,
915 __in unsigned int label,
916 __in uint32_t n_bufs_per_desc,
917 __in uint32_t max_dma_len,
918 __in uint32_t buf_stride,
919 __in uint32_t hol_block_timeout,
920 __in efsys_mem_t *esmp,
922 __in unsigned int flags,
924 __deref_out efx_rxq_t **erpp)
927 efx_rxq_type_data_t type_data;
929 if (hol_block_timeout > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
934 memset(&type_data, 0, sizeof (type_data));
936 type_data.ertd_es_super_buffer.eessb_bufs_per_desc = n_bufs_per_desc;
937 type_data.ertd_es_super_buffer.eessb_max_dma_len = max_dma_len;
938 type_data.ertd_es_super_buffer.eessb_buf_stride = buf_stride;
939 type_data.ertd_es_super_buffer.eessb_hol_block_timeout =
942 rc = efx_rx_qcreate_internal(enp, index, label,
943 EFX_RXQ_TYPE_ES_SUPER_BUFFER, &type_data, esmp, ndescs,
944 0 /* id unused on EF10 */, flags, eep, erpp);
953 EFSYS_PROBE1(fail1, efx_rc_t, rc);
965 efx_nic_t *enp = erp->er_enp;
966 const efx_rx_ops_t *erxop = enp->en_erxop;
968 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
970 erxop->erxo_qdestroy(erp);
973 __checkReturn efx_rc_t
974 efx_pseudo_hdr_pkt_length_get(
976 __in uint8_t *buffer,
977 __out uint16_t *lengthp)
979 efx_nic_t *enp = erp->er_enp;
980 const efx_rx_ops_t *erxop = enp->en_erxop;
982 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
984 return (erxop->erxo_prefix_pktlen(enp, buffer, lengthp));
987 #if EFSYS_OPT_RX_SCALE
988 __checkReturn uint32_t
989 efx_pseudo_hdr_hash_get(
991 __in efx_rx_hash_alg_t func,
992 __in uint8_t *buffer)
994 efx_nic_t *enp = erp->er_enp;
995 const efx_rx_ops_t *erxop = enp->en_erxop;
997 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
999 EFSYS_ASSERT3U(enp->en_hash_support, ==, EFX_RX_HASH_AVAILABLE);
1000 return (erxop->erxo_prefix_hash(enp, func, buffer));
1002 #endif /* EFSYS_OPT_RX_SCALE */
1006 static __checkReturn efx_rc_t
1008 __in efx_nic_t *enp)
1013 EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
1015 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_DESC_PUSH_EN, 0);
1016 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0);
1017 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0);
1018 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0);
1019 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, 0);
1020 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, 0x3000 / 32);
1021 EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
1023 /* Zero the RSS table */
1024 for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS;
1026 EFX_ZERO_OWORD(oword);
1027 EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
1028 index, &oword, B_TRUE);
1031 #if EFSYS_OPT_RX_SCALE
1032 /* The RSS key and indirection table are writable. */
1033 enp->en_rss_context_type = EFX_RX_SCALE_EXCLUSIVE;
1035 /* Hardware can insert RX hash with/without RSS */
1036 enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
1037 #endif /* EFSYS_OPT_RX_SCALE */
1042 #if EFSYS_OPT_RX_SCATTER
1043 static __checkReturn efx_rc_t
1044 siena_rx_scatter_enable(
1045 __in efx_nic_t *enp,
1046 __in unsigned int buf_size)
1048 unsigned int nbuf32;
1052 nbuf32 = buf_size / 32;
1053 if ((nbuf32 == 0) ||
1054 (nbuf32 >= (1 << FRF_BZ_RX_USR_BUF_SIZE_WIDTH)) ||
1055 ((buf_size % 32) != 0)) {
1060 if (enp->en_rx_qcount > 0) {
1065 /* Set scatter buffer size */
1066 EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
1067 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, nbuf32);
1068 EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
1070 /* Enable scatter for packets not matching a filter */
1071 EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
1072 EFX_SET_OWORD_FIELD(oword, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1);
1073 EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
1080 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1084 #endif /* EFSYS_OPT_RX_SCATTER */
1087 #define EFX_RX_LFSR_HASH(_enp, _insert) \
1089 efx_oword_t oword; \
1091 EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
1092 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0); \
1093 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0); \
1094 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0); \
1095 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
1096 (_insert) ? 1 : 0); \
1097 EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
1099 if ((_enp)->en_family == EFX_FAMILY_SIENA) { \
1100 EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
1102 EFX_SET_OWORD_FIELD(oword, \
1103 FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 0); \
1104 EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
1108 _NOTE(CONSTANTCONDITION) \
1111 #define EFX_RX_TOEPLITZ_IPV4_HASH(_enp, _insert, _ip, _tcp) \
1113 efx_oword_t oword; \
1115 EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
1116 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 1); \
1117 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, \
1119 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, \
1121 EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
1122 (_insert) ? 1 : 0); \
1123 EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
1125 _NOTE(CONSTANTCONDITION) \
1128 #define EFX_RX_TOEPLITZ_IPV6_HASH(_enp, _ip, _tcp, _rc) \
1130 efx_oword_t oword; \
1132 EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
1133 EFX_SET_OWORD_FIELD(oword, \
1134 FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1); \
1135 EFX_SET_OWORD_FIELD(oword, \
1136 FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, (_ip) ? 1 : 0); \
1137 EFX_SET_OWORD_FIELD(oword, \
1138 FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS, (_tcp) ? 0 : 1); \
1139 EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
1143 _NOTE(CONSTANTCONDITION) \
1147 #if EFSYS_OPT_RX_SCALE
1149 static __checkReturn efx_rc_t
1150 siena_rx_scale_mode_set(
1151 __in efx_nic_t *enp,
1152 __in uint32_t rss_context,
1153 __in efx_rx_hash_alg_t alg,
1154 __in efx_rx_hash_type_t type,
1155 __in boolean_t insert)
1159 if (rss_context != EFX_RSS_CONTEXT_DEFAULT) {
1165 case EFX_RX_HASHALG_LFSR:
1166 EFX_RX_LFSR_HASH(enp, insert);
1169 case EFX_RX_HASHALG_TOEPLITZ:
1170 EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert,
1171 (type & EFX_RX_HASH_IPV4) ? B_TRUE : B_FALSE,
1172 (type & EFX_RX_HASH_TCPIPV4) ? B_TRUE : B_FALSE);
1174 EFX_RX_TOEPLITZ_IPV6_HASH(enp,
1175 (type & EFX_RX_HASH_IPV6) ? B_TRUE : B_FALSE,
1176 (type & EFX_RX_HASH_TCPIPV6) ? B_TRUE : B_FALSE,
1195 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1197 EFX_RX_LFSR_HASH(enp, B_FALSE);
1203 #if EFSYS_OPT_RX_SCALE
1204 static __checkReturn efx_rc_t
1205 siena_rx_scale_key_set(
1206 __in efx_nic_t *enp,
1207 __in uint32_t rss_context,
1208 __in_ecount(n) uint8_t *key,
1213 unsigned int offset;
1216 if (rss_context != EFX_RSS_CONTEXT_DEFAULT) {
1223 /* Write Toeplitz IPv4 hash key */
1224 EFX_ZERO_OWORD(oword);
1225 for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
1226 offset > 0 && byte < n;
1228 oword.eo_u8[offset - 1] = key[byte++];
1230 EFX_BAR_WRITEO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
1234 /* Verify Toeplitz IPv4 hash key */
1235 EFX_BAR_READO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
1236 for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
1237 offset > 0 && byte < n;
1239 if (oword.eo_u8[offset - 1] != key[byte++]) {
1245 if ((enp->en_features & EFX_FEATURE_IPV6) == 0)
1250 /* Write Toeplitz IPv6 hash key 3 */
1251 EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
1252 for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
1253 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
1254 offset > 0 && byte < n;
1256 oword.eo_u8[offset - 1] = key[byte++];
1258 EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
1260 /* Write Toeplitz IPv6 hash key 2 */
1261 EFX_ZERO_OWORD(oword);
1262 for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
1263 FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
1264 offset > 0 && byte < n;
1266 oword.eo_u8[offset - 1] = key[byte++];
1268 EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
1270 /* Write Toeplitz IPv6 hash key 1 */
1271 EFX_ZERO_OWORD(oword);
1272 for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
1273 FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
1274 offset > 0 && byte < n;
1276 oword.eo_u8[offset - 1] = key[byte++];
1278 EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
1282 /* Verify Toeplitz IPv6 hash key 3 */
1283 EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
1284 for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
1285 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
1286 offset > 0 && byte < n;
1288 if (oword.eo_u8[offset - 1] != key[byte++]) {
1294 /* Verify Toeplitz IPv6 hash key 2 */
1295 EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
1296 for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
1297 FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
1298 offset > 0 && byte < n;
1300 if (oword.eo_u8[offset - 1] != key[byte++]) {
1306 /* Verify Toeplitz IPv6 hash key 1 */
1307 EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
1308 for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
1309 FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
1310 offset > 0 && byte < n;
1312 if (oword.eo_u8[offset - 1] != key[byte++]) {
1330 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1336 #if EFSYS_OPT_RX_SCALE
1337 static __checkReturn efx_rc_t
1338 siena_rx_scale_tbl_set(
1339 __in efx_nic_t *enp,
1340 __in uint32_t rss_context,
1341 __in_ecount(n) unsigned int *table,
1348 EFX_STATIC_ASSERT(EFX_RSS_TBL_SIZE == FR_BZ_RX_INDIRECTION_TBL_ROWS);
1349 EFX_STATIC_ASSERT(EFX_MAXRSS == (1 << FRF_BZ_IT_QUEUE_WIDTH));
1351 if (rss_context != EFX_RSS_CONTEXT_DEFAULT) {
1356 if (n > FR_BZ_RX_INDIRECTION_TBL_ROWS) {
1361 for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; index++) {
1364 /* Calculate the entry to place in the table */
1365 byte = (n > 0) ? (uint32_t)table[index % n] : 0;
1367 EFSYS_PROBE2(table, int, index, uint32_t, byte);
1369 EFX_POPULATE_OWORD_1(oword, FRF_BZ_IT_QUEUE, byte);
1371 /* Write the table */
1372 EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
1373 index, &oword, B_TRUE);
1376 for (index = FR_BZ_RX_INDIRECTION_TBL_ROWS - 1; index >= 0; --index) {
1379 /* Determine if we're starting a new batch */
1380 byte = (n > 0) ? (uint32_t)table[index % n] : 0;
1382 /* Read the table */
1383 EFX_BAR_TBL_READO(enp, FR_BZ_RX_INDIRECTION_TBL,
1384 index, &oword, B_TRUE);
1386 /* Verify the entry */
1387 if (EFX_OWORD_FIELD(oword, FRF_BZ_IT_QUEUE) != byte) {
1400 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1407 * Falcon/Siena pseudo-header
1408 * --------------------------
1410 * Receive packets are prefixed by an optional 16 byte pseudo-header.
1411 * The pseudo-header is a byte array of one of the forms:
1413 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1414 * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.TT.TT.TT.TT
1415 * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.LL.LL
1418 * TT.TT.TT.TT Toeplitz hash (32-bit big-endian)
1419 * LL.LL LFSR hash (16-bit big-endian)
1422 #if EFSYS_OPT_RX_SCALE
1423 static __checkReturn uint32_t
1424 siena_rx_prefix_hash(
1425 __in efx_nic_t *enp,
1426 __in efx_rx_hash_alg_t func,
1427 __in uint8_t *buffer)
1429 _NOTE(ARGUNUSED(enp))
1432 case EFX_RX_HASHALG_TOEPLITZ:
1433 return ((buffer[12] << 24) |
1434 (buffer[13] << 16) |
1438 case EFX_RX_HASHALG_LFSR:
1439 return ((buffer[14] << 8) | buffer[15]);
1446 #endif /* EFSYS_OPT_RX_SCALE */
1448 static __checkReturn efx_rc_t
1449 siena_rx_prefix_pktlen(
1450 __in efx_nic_t *enp,
1451 __in uint8_t *buffer,
1452 __out uint16_t *lengthp)
1454 _NOTE(ARGUNUSED(enp, buffer, lengthp))
1456 /* Not supported by Falcon/Siena hardware */
1464 __in efx_rxq_t *erp,
1465 __in_ecount(ndescs) efsys_dma_addr_t *addrp,
1467 __in unsigned int ndescs,
1468 __in unsigned int completed,
1469 __in unsigned int added)
1473 unsigned int offset;
1476 /* The client driver must not overfill the queue */
1477 EFSYS_ASSERT3U(added - completed + ndescs, <=,
1478 EFX_RXQ_LIMIT(erp->er_mask + 1));
1480 id = added & (erp->er_mask);
1481 for (i = 0; i < ndescs; i++) {
1482 EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
1483 unsigned int, id, efsys_dma_addr_t, addrp[i],
1486 EFX_POPULATE_QWORD_3(qword,
1487 FSF_AZ_RX_KER_BUF_SIZE, (uint32_t)(size),
1488 FSF_AZ_RX_KER_BUF_ADDR_DW0,
1489 (uint32_t)(addrp[i] & 0xffffffff),
1490 FSF_AZ_RX_KER_BUF_ADDR_DW1,
1491 (uint32_t)(addrp[i] >> 32));
1493 offset = id * sizeof (efx_qword_t);
1494 EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
1496 id = (id + 1) & (erp->er_mask);
1502 __in efx_rxq_t *erp,
1503 __in unsigned int added,
1504 __inout unsigned int *pushedp)
1506 efx_nic_t *enp = erp->er_enp;
1507 unsigned int pushed = *pushedp;
1512 /* All descriptors are pushed */
1515 /* Push the populated descriptors out */
1516 wptr = added & erp->er_mask;
1518 EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DESC_WPTR, wptr);
1520 /* Only write the third DWORD */
1521 EFX_POPULATE_DWORD_1(dword,
1522 EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3));
1524 /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
1525 EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
1526 wptr, pushed & erp->er_mask);
1527 EFSYS_PIO_WRITE_BARRIER();
1528 EFX_BAR_TBL_WRITED3(enp, FR_BZ_RX_DESC_UPD_REGP0,
1529 erp->er_index, &dword, B_FALSE);
1532 #if EFSYS_OPT_RX_PACKED_STREAM
1534 siena_rx_qpush_ps_credits(
1535 __in efx_rxq_t *erp)
1537 /* Not supported by Siena hardware */
1542 siena_rx_qps_packet_info(
1543 __in efx_rxq_t *erp,
1544 __in uint8_t *buffer,
1545 __in uint32_t buffer_length,
1546 __in uint32_t current_offset,
1547 __out uint16_t *lengthp,
1548 __out uint32_t *next_offsetp,
1549 __out uint32_t *timestamp)
1551 /* Not supported by Siena hardware */
1556 #endif /* EFSYS_OPT_RX_PACKED_STREAM */
1558 static __checkReturn efx_rc_t
1560 __in efx_rxq_t *erp)
1562 efx_nic_t *enp = erp->er_enp;
1566 label = erp->er_index;
1568 /* Flush the queue */
1569 EFX_POPULATE_OWORD_2(oword, FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
1570 FRF_AZ_RX_FLUSH_DESCQ, label);
1571 EFX_BAR_WRITEO(enp, FR_AZ_RX_FLUSH_DESCQ_REG, &oword);
1578 __in efx_rxq_t *erp)
1580 efx_nic_t *enp = erp->er_enp;
1583 EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
1585 EFX_BAR_TBL_READO(enp, FR_AZ_RX_DESC_PTR_TBL,
1586 erp->er_index, &oword, B_TRUE);
1588 EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DC_HW_RPTR, 0);
1589 EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_HW_RPTR, 0);
1590 EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_EN, 1);
1592 EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
1593 erp->er_index, &oword, B_TRUE);
1596 static __checkReturn efx_rc_t
1598 __in efx_nic_t *enp,
1599 __in unsigned int index,
1600 __in unsigned int label,
1601 __in efx_rxq_type_t type,
1602 __in_opt const efx_rxq_type_data_t *type_data,
1603 __in efsys_mem_t *esmp,
1606 __in unsigned int flags,
1607 __in efx_evq_t *eep,
1608 __in efx_rxq_t *erp)
1610 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1613 boolean_t jumbo = B_FALSE;
1616 _NOTE(ARGUNUSED(esmp))
1617 _NOTE(ARGUNUSED(type_data))
1619 EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS ==
1620 (1 << FRF_AZ_RX_DESCQ_LABEL_WIDTH));
1621 EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
1622 EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
1624 if (index >= encp->enc_rxq_limit) {
1629 (1U << size) <= encp->enc_rxq_max_ndescs / encp->enc_rxq_min_ndescs;
1631 if ((1U << size) == (uint32_t)ndescs / encp->enc_rxq_min_ndescs)
1633 if (id + (1 << size) >= encp->enc_buftbl_limit) {
1639 case EFX_RXQ_TYPE_DEFAULT:
1647 if (flags & EFX_RXQ_FLAG_SCATTER) {
1648 #if EFSYS_OPT_RX_SCATTER
1653 #endif /* EFSYS_OPT_RX_SCATTER */
1656 /* Set up the new descriptor queue */
1657 EFX_POPULATE_OWORD_7(oword,
1658 FRF_AZ_RX_DESCQ_BUF_BASE_ID, id,
1659 FRF_AZ_RX_DESCQ_EVQ_ID, eep->ee_index,
1660 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
1661 FRF_AZ_RX_DESCQ_LABEL, label,
1662 FRF_AZ_RX_DESCQ_SIZE, size,
1663 FRF_AZ_RX_DESCQ_TYPE, 0,
1664 FRF_AZ_RX_DESCQ_JUMBO, jumbo);
1666 EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
1667 erp->er_index, &oword, B_TRUE);
1671 #if !EFSYS_OPT_RX_SCATTER
1680 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1687 __in efx_rxq_t *erp)
1689 efx_nic_t *enp = erp->er_enp;
1692 EFSYS_ASSERT(enp->en_rx_qcount != 0);
1693 --enp->en_rx_qcount;
1695 /* Purge descriptor queue */
1696 EFX_ZERO_OWORD(oword);
1698 EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
1699 erp->er_index, &oword, B_TRUE);
1701 /* Free the RXQ object */
1702 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
1707 __in efx_nic_t *enp)
1709 _NOTE(ARGUNUSED(enp))
1712 #endif /* EFSYS_OPT_SIENA */