1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_mempool.h>
15 #include "sfc_debug.h"
19 #include "sfc_kvargs.h"
20 #include "sfc_tweak.h"
23 * Maximum number of Rx queue flush attempt in the case of failure or
26 #define SFC_RX_QFLUSH_ATTEMPTS (3)
29 * Time to wait between event queue polling attempts when waiting for Rx
30 * queue flush done or failed events.
32 #define SFC_RX_QFLUSH_POLL_WAIT_MS (1)
35 * Maximum number of event queue polling attempts when waiting for Rx queue
36 * flush done or failed events. It defines Rx queue flush attempt timeout
37 * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
39 #define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000)
42 sfc_rx_qflush_done(struct sfc_rxq_info *rxq_info)
44 rxq_info->state |= SFC_RXQ_FLUSHED;
45 rxq_info->state &= ~SFC_RXQ_FLUSHING;
49 sfc_rx_qflush_failed(struct sfc_rxq_info *rxq_info)
51 rxq_info->state |= SFC_RXQ_FLUSH_FAILED;
52 rxq_info->state &= ~SFC_RXQ_FLUSHING;
56 sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
58 unsigned int free_space;
60 void *objs[SFC_RX_REFILL_BULK];
61 efsys_dma_addr_t addr[RTE_DIM(objs)];
62 unsigned int added = rxq->added;
65 struct sfc_efx_rx_sw_desc *rxd;
67 uint16_t port_id = rxq->dp.dpq.port_id;
69 free_space = rxq->max_fill_level - (added - rxq->completed);
71 if (free_space < rxq->refill_threshold)
74 bulks = free_space / RTE_DIM(objs);
75 /* refill_threshold guarantees that bulks is positive */
76 SFC_ASSERT(bulks > 0);
78 id = added & rxq->ptr_mask;
80 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
81 RTE_DIM(objs)) < 0)) {
83 * It is hardly a safe way to increment counter
84 * from different contexts, but all PMDs do it.
86 rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
88 /* Return if we have posted nothing yet */
89 if (added == rxq->added)
95 for (i = 0; i < RTE_DIM(objs);
96 ++i, id = (id + 1) & rxq->ptr_mask) {
99 MBUF_RAW_ALLOC_CHECK(m);
101 rxd = &rxq->sw_desc[id];
104 m->data_off = RTE_PKTMBUF_HEADROOM;
107 addr[i] = rte_pktmbuf_iova(m);
110 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
111 RTE_DIM(objs), rxq->completed, added);
112 added += RTE_DIM(objs);
113 } while (--bulks > 0);
115 SFC_ASSERT(added != rxq->added);
117 efx_rx_qpush(rxq->common, added, &rxq->pushed);
121 sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
123 uint64_t mbuf_flags = 0;
125 switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
126 case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
127 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
130 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
133 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
134 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
135 PKT_RX_IP_CKSUM_UNKNOWN);
139 switch ((desc_flags &
140 (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
141 case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
142 case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
143 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
147 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
150 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
151 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
152 PKT_RX_L4_CKSUM_UNKNOWN);
160 sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
162 return RTE_PTYPE_L2_ETHER |
163 ((desc_flags & EFX_PKT_IPV4) ?
164 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
165 ((desc_flags & EFX_PKT_IPV6) ?
166 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
167 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
168 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
171 static const uint32_t *
172 sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
174 static const uint32_t ptypes[] = {
176 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
177 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
187 sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
193 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
196 mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
198 if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
199 m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
200 EFX_RX_HASHALG_TOEPLITZ,
203 m->ol_flags |= PKT_RX_RSS_HASH;
208 sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
210 struct sfc_dp_rxq *dp_rxq = rx_queue;
211 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
212 unsigned int completed;
213 unsigned int prefix_size = rxq->prefix_size;
214 unsigned int done_pkts = 0;
215 boolean_t discard_next = B_FALSE;
216 struct rte_mbuf *scatter_pkt = NULL;
218 if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
221 sfc_ev_qpoll(rxq->evq);
223 completed = rxq->completed;
224 while (completed != rxq->pending && done_pkts < nb_pkts) {
226 struct sfc_efx_rx_sw_desc *rxd;
228 unsigned int seg_len;
229 unsigned int desc_flags;
231 id = completed++ & rxq->ptr_mask;
232 rxd = &rxq->sw_desc[id];
234 desc_flags = rxd->flags;
239 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
242 if (desc_flags & EFX_PKT_PREFIX_LEN) {
246 rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
247 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
251 seg_len = rxd->size - prefix_size;
254 rte_pktmbuf_data_len(m) = seg_len;
255 rte_pktmbuf_pkt_len(m) = seg_len;
257 if (scatter_pkt != NULL) {
258 if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
259 rte_pktmbuf_free(scatter_pkt);
262 /* The packet to deliver */
266 if (desc_flags & EFX_PKT_CONT) {
267 /* The packet is scattered, more fragments to come */
269 /* Further fragments have no prefix */
274 /* Scattered packet is done */
276 /* The first fragment of the packet has prefix */
277 prefix_size = rxq->prefix_size;
280 sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
282 sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
285 * Extract RSS hash from the packet prefix and
286 * set the corresponding field (if needed and possible)
288 sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
290 m->data_off += prefix_size;
297 discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
298 rte_mbuf_raw_free(m);
302 /* pending is only moved when entire packet is received */
303 SFC_ASSERT(scatter_pkt == NULL);
305 rxq->completed = completed;
307 sfc_efx_rx_qrefill(rxq);
312 static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
314 sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
316 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
318 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
321 sfc_ev_qpoll(rxq->evq);
323 return rxq->pending - rxq->completed;
326 static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status;
328 sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
330 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
332 if (unlikely(offset > rxq->ptr_mask))
336 * Poll EvQ to derive up-to-date 'rxq->pending' figure;
337 * it is required for the queue to be running, but the
338 * check is omitted because API design assumes that it
339 * is the duty of the caller to satisfy all conditions
341 SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) ==
342 SFC_EFX_RXQ_FLAG_RUNNING);
343 sfc_ev_qpoll(rxq->evq);
346 * There is a handful of reserved entries in the ring,
347 * but an explicit check whether the offset points to
348 * a reserved entry is neglected since the two checks
349 * below rely on the figures which take the HW limits
350 * into account and thus if an entry is reserved, the
351 * checks will fail and UNAVAIL code will be returned
354 if (offset < (rxq->pending - rxq->completed))
355 return RTE_ETH_RX_DESC_DONE;
357 if (offset < (rxq->added - rxq->completed))
358 return RTE_ETH_RX_DESC_AVAIL;
360 return RTE_ETH_RX_DESC_UNAVAIL;
363 /** Get Rx datapath ops by the datapath RxQ handle */
364 const struct sfc_dp_rx *
365 sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
367 const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
368 struct rte_eth_dev *eth_dev;
369 struct sfc_adapter_priv *sap;
371 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
372 eth_dev = &rte_eth_devices[dpq->port_id];
374 sap = sfc_adapter_priv_by_eth_dev(eth_dev);
379 struct sfc_rxq_info *
380 sfc_rxq_info_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
382 const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
383 struct rte_eth_dev *eth_dev;
384 struct sfc_adapter *sa;
386 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
387 eth_dev = &rte_eth_devices[dpq->port_id];
389 sa = eth_dev->data->dev_private;
391 SFC_ASSERT(dpq->queue_id < sa->rxq_count);
392 return &sa->rxq_info[dpq->queue_id];
396 sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
398 struct sfc_rxq_info *rxq_info;
400 rxq_info = sfc_rxq_info_by_dp_rxq(dp_rxq);
402 SFC_ASSERT(rxq_info->rxq != NULL);
403 return rxq_info->rxq;
406 static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
408 sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
409 __rte_unused struct rte_mempool *mb_pool,
410 unsigned int *rxq_entries,
411 unsigned int *evq_entries,
412 unsigned int *rxq_max_fill_level)
414 *rxq_entries = nb_rx_desc;
415 *evq_entries = nb_rx_desc;
416 *rxq_max_fill_level = EFX_RXQ_LIMIT(*rxq_entries);
420 static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
422 sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
423 const struct rte_pci_addr *pci_addr, int socket_id,
424 const struct sfc_dp_rx_qcreate_info *info,
425 struct sfc_dp_rxq **dp_rxqp)
427 struct sfc_efx_rxq *rxq;
431 rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
432 RTE_CACHE_LINE_SIZE, socket_id);
436 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
439 rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
441 sizeof(*rxq->sw_desc),
442 RTE_CACHE_LINE_SIZE, socket_id);
443 if (rxq->sw_desc == NULL)
444 goto fail_desc_alloc;
446 /* efx datapath is bound to efx control path */
447 rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
448 if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
449 rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
450 rxq->ptr_mask = info->rxq_entries - 1;
451 rxq->batch_max = info->batch_max;
452 rxq->prefix_size = info->prefix_size;
453 rxq->max_fill_level = info->max_fill_level;
454 rxq->refill_threshold = info->refill_threshold;
455 rxq->buf_size = info->buf_size;
456 rxq->refill_mb_pool = info->refill_mb_pool;
468 static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
470 sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
472 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
474 rte_free(rxq->sw_desc);
478 static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
480 sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
481 __rte_unused unsigned int evq_read_ptr)
483 /* libefx-based datapath is specific to libefx-based PMD */
484 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
485 struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
487 rxq->common = crxq->common;
489 rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
491 sfc_efx_rx_qrefill(rxq);
493 rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
498 static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
500 sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
501 __rte_unused unsigned int *evq_read_ptr)
503 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
505 rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
507 /* libefx-based datapath is bound to libefx-based PMD and uses
508 * event queue structure directly. So, there is no necessity to
509 * return EvQ read pointer.
513 static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
515 sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
517 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
519 struct sfc_efx_rx_sw_desc *rxd;
521 for (i = rxq->completed; i != rxq->added; ++i) {
522 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
523 rte_mbuf_raw_free(rxd->mbuf);
525 /* Packed stream relies on 0 in inactive SW desc.
526 * Rx queue stop is not performance critical, so
527 * there is no harm to do it always.
533 rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
536 struct sfc_dp_rx sfc_efx_rx = {
538 .name = SFC_KVARG_DATAPATH_EFX,
542 .features = SFC_DP_RX_FEAT_SCATTER |
543 SFC_DP_RX_FEAT_CHECKSUM,
544 .qsize_up_rings = sfc_efx_rx_qsize_up_rings,
545 .qcreate = sfc_efx_rx_qcreate,
546 .qdestroy = sfc_efx_rx_qdestroy,
547 .qstart = sfc_efx_rx_qstart,
548 .qstop = sfc_efx_rx_qstop,
549 .qpurge = sfc_efx_rx_qpurge,
550 .supported_ptypes_get = sfc_efx_supported_ptypes_get,
551 .qdesc_npending = sfc_efx_rx_qdesc_npending,
552 .qdesc_status = sfc_efx_rx_qdesc_status,
553 .pkt_burst = sfc_efx_recv_pkts,
557 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
559 struct sfc_rxq_info *rxq_info;
561 unsigned int retry_count;
562 unsigned int wait_count;
565 rxq_info = &sa->rxq_info[sw_index];
567 SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);
570 * Retry Rx queue flushing in the case of flush failed or
571 * timeout. In the worst case it can delay for 6 seconds.
573 for (retry_count = 0;
574 ((rxq_info->state & SFC_RXQ_FLUSHED) == 0) &&
575 (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
577 rc = efx_rx_qflush(rxq->common);
579 rxq_info->state |= (rc == EALREADY) ?
580 SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
583 rxq_info->state &= ~SFC_RXQ_FLUSH_FAILED;
584 rxq_info->state |= SFC_RXQ_FLUSHING;
587 * Wait for Rx queue flush done or failed event at least
588 * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
589 * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
590 * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
594 rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
595 sfc_ev_qpoll(rxq->evq);
596 } while ((rxq_info->state & SFC_RXQ_FLUSHING) &&
597 (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
599 if (rxq_info->state & SFC_RXQ_FLUSHING)
600 sfc_err(sa, "RxQ %u flush timed out", sw_index);
602 if (rxq_info->state & SFC_RXQ_FLUSH_FAILED)
603 sfc_err(sa, "RxQ %u flush failed", sw_index);
605 if (rxq_info->state & SFC_RXQ_FLUSHED)
606 sfc_notice(sa, "RxQ %u flushed", sw_index);
609 sa->priv.dp_rx->qpurge(rxq_info->dp);
613 sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
615 struct sfc_rss *rss = &sa->rss;
616 boolean_t need_rss = (rss->channels > 0) ? B_TRUE : B_FALSE;
617 struct sfc_port *port = &sa->port;
621 * If promiscuous or all-multicast mode has been requested, setting
622 * filter for the default Rx queue might fail, in particular, while
623 * running over PCI function which is not a member of corresponding
624 * privilege groups; if this occurs, few iterations will be made to
625 * repeat this step without promiscuous and all-multicast flags set
628 rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, need_rss);
631 else if (rc != EOPNOTSUPP)
635 sfc_warn(sa, "promiscuous mode has been requested, "
636 "but the HW rejects it");
637 sfc_warn(sa, "promiscuous mode will be disabled");
639 port->promisc = B_FALSE;
640 rc = sfc_set_rx_mode(sa);
647 if (port->allmulti) {
648 sfc_warn(sa, "all-multicast mode has been requested, "
649 "but the HW rejects it");
650 sfc_warn(sa, "all-multicast mode will be disabled");
652 port->allmulti = B_FALSE;
653 rc = sfc_set_rx_mode(sa);
664 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
666 struct sfc_port *port = &sa->port;
667 struct sfc_rxq_info *rxq_info;
672 sfc_log_init(sa, "sw_index=%u", sw_index);
674 SFC_ASSERT(sw_index < sa->rxq_count);
676 rxq_info = &sa->rxq_info[sw_index];
678 SFC_ASSERT(rxq != NULL);
679 SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED);
683 rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
687 switch (rxq_info->type) {
688 case EFX_RXQ_TYPE_DEFAULT:
689 rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
690 &rxq->mem, rxq_info->entries, 0 /* not used on EF10 */,
691 rxq_info->type_flags, evq->common, &rxq->common);
693 case EFX_RXQ_TYPE_ES_SUPER_BUFFER: {
694 struct rte_mempool *mp = rxq_info->refill_mb_pool;
695 struct rte_mempool_info mp_info;
697 rc = rte_mempool_ops_get_info(mp, &mp_info);
699 /* Positive errno is used in the driver */
701 goto fail_mp_get_info;
703 if (mp_info.contig_block_size <= 0) {
705 goto fail_bad_contig_block_size;
707 rc = efx_rx_qcreate_es_super_buffer(sa->nic, rxq->hw_index, 0,
708 mp_info.contig_block_size, rxq->buf_size,
709 mp->header_size + mp->elt_size + mp->trailer_size,
710 sa->rxd_wait_timeout_ns,
711 &rxq->mem, rxq_info->entries, rxq_info->type_flags,
712 evq->common, &rxq->common);
719 goto fail_rx_qcreate;
721 efx_rx_qenable(rxq->common);
723 rc = sa->priv.dp_rx->qstart(rxq_info->dp, evq->read_ptr);
727 rxq_info->state |= SFC_RXQ_STARTED;
729 if ((sw_index == 0) && !port->isolated) {
730 rc = sfc_rx_default_rxq_set_filter(sa, rxq);
732 goto fail_mac_filter_default_rxq_set;
735 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
736 sa->eth_dev->data->rx_queue_state[sw_index] =
737 RTE_ETH_QUEUE_STATE_STARTED;
741 fail_mac_filter_default_rxq_set:
742 sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr);
745 sfc_rx_qflush(sa, sw_index);
748 fail_bad_contig_block_size:
757 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
759 struct sfc_rxq_info *rxq_info;
762 sfc_log_init(sa, "sw_index=%u", sw_index);
764 SFC_ASSERT(sw_index < sa->rxq_count);
766 rxq_info = &sa->rxq_info[sw_index];
769 if (rxq == NULL || rxq_info->state == SFC_RXQ_INITIALIZED)
771 SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);
773 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
774 sa->eth_dev->data->rx_queue_state[sw_index] =
775 RTE_ETH_QUEUE_STATE_STOPPED;
777 sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr);
780 efx_mac_filter_default_rxq_clear(sa->nic);
782 sfc_rx_qflush(sa, sw_index);
784 rxq_info->state = SFC_RXQ_INITIALIZED;
786 efx_rx_qdestroy(rxq->common);
788 sfc_ev_qstop(rxq->evq);
792 sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
794 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
797 caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
799 if (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_CHECKSUM) {
800 caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
801 caps |= DEV_RX_OFFLOAD_UDP_CKSUM;
802 caps |= DEV_RX_OFFLOAD_TCP_CKSUM;
805 if (encp->enc_tunnel_encapsulations_supported &&
806 (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
807 caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
813 sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
817 if (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_SCATTER)
818 caps |= DEV_RX_OFFLOAD_SCATTER;
824 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
825 const struct rte_eth_rxconf *rx_conf,
826 __rte_unused uint64_t offloads)
830 if (rx_conf->rx_thresh.pthresh != 0 ||
831 rx_conf->rx_thresh.hthresh != 0 ||
832 rx_conf->rx_thresh.wthresh != 0) {
834 "RxQ prefetch/host/writeback thresholds are not supported");
837 if (rx_conf->rx_free_thresh > rxq_max_fill_level) {
839 "RxQ free threshold too large: %u vs maximum %u",
840 rx_conf->rx_free_thresh, rxq_max_fill_level);
844 if (rx_conf->rx_drop_en == 0) {
845 sfc_err(sa, "RxQ drop disable is not supported");
853 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
858 /* The mbuf object itself is always cache line aligned */
859 order = rte_bsf32(RTE_CACHE_LINE_SIZE);
861 /* Data offset from mbuf object start */
862 data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
863 RTE_PKTMBUF_HEADROOM;
865 order = MIN(order, rte_bsf32(data_off));
871 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
873 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
874 const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
875 const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
877 unsigned int buf_aligned;
878 unsigned int start_alignment;
879 unsigned int end_padding_alignment;
881 /* Below it is assumed that both alignments are power of 2 */
882 SFC_ASSERT(rte_is_power_of_2(nic_align_start));
883 SFC_ASSERT(rte_is_power_of_2(nic_align_end));
886 * mbuf is always cache line aligned, double-check
887 * that it meets rx buffer start alignment requirements.
890 /* Start from mbuf pool data room size */
891 buf_size = rte_pktmbuf_data_room_size(mb_pool);
893 /* Remove headroom */
894 if (buf_size <= RTE_PKTMBUF_HEADROOM) {
896 "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
897 mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
900 buf_size -= RTE_PKTMBUF_HEADROOM;
902 /* Calculate guaranteed data start alignment */
903 buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
905 /* Reserve space for start alignment */
906 if (buf_aligned < nic_align_start) {
907 start_alignment = nic_align_start - buf_aligned;
908 if (buf_size <= start_alignment) {
910 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
912 rte_pktmbuf_data_room_size(mb_pool),
913 RTE_PKTMBUF_HEADROOM, start_alignment);
916 buf_aligned = nic_align_start;
917 buf_size -= start_alignment;
922 /* Make sure that end padding does not write beyond the buffer */
923 if (buf_aligned < nic_align_end) {
925 * Estimate space which can be lost. If guarnteed buffer
926 * size is odd, lost space is (nic_align_end - 1). More
927 * accurate formula is below.
929 end_padding_alignment = nic_align_end -
930 MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
931 if (buf_size <= end_padding_alignment) {
933 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
935 rte_pktmbuf_data_room_size(mb_pool),
936 RTE_PKTMBUF_HEADROOM, start_alignment,
937 end_padding_alignment);
940 buf_size -= end_padding_alignment;
943 * Start is aligned the same or better than end,
946 buf_size = P2ALIGN(buf_size, nic_align_end);
953 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
954 uint16_t nb_rx_desc, unsigned int socket_id,
955 const struct rte_eth_rxconf *rx_conf,
956 struct rte_mempool *mb_pool)
958 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
959 struct sfc_rss *rss = &sa->rss;
961 unsigned int rxq_entries;
962 unsigned int evq_entries;
963 unsigned int rxq_max_fill_level;
966 struct sfc_rxq_info *rxq_info;
969 struct sfc_dp_rx_qcreate_info info;
971 rc = sa->priv.dp_rx->qsize_up_rings(nb_rx_desc, mb_pool, &rxq_entries,
972 &evq_entries, &rxq_max_fill_level);
974 goto fail_size_up_rings;
975 SFC_ASSERT(rxq_entries >= EFX_RXQ_MINNDESCS);
976 SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
977 SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
979 offloads = rx_conf->offloads |
980 sa->eth_dev->data->dev_conf.rxmode.offloads;
981 rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
985 buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
987 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
993 if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
994 (~offloads & DEV_RX_OFFLOAD_SCATTER)) {
995 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
996 "object size is too small", sw_index);
997 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
998 "PDU size %u plus Rx prefix %u bytes",
999 sw_index, buf_size, (unsigned int)sa->port.pdu,
1000 encp->enc_rx_prefix_size);
1005 SFC_ASSERT(sw_index < sa->rxq_count);
1006 rxq_info = &sa->rxq_info[sw_index];
1008 SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
1009 rxq_info->entries = rxq_entries;
1011 if (sa->priv.dp_rx->dp.hw_fw_caps & SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER)
1012 rxq_info->type = EFX_RXQ_TYPE_ES_SUPER_BUFFER;
1014 rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
1016 rxq_info->type_flags =
1017 (offloads & DEV_RX_OFFLOAD_SCATTER) ?
1018 EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
1020 if ((encp->enc_tunnel_encapsulations_supported != 0) &&
1021 (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
1022 rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
1024 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
1025 evq_entries, socket_id, &evq);
1030 rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
1033 goto fail_rxq_alloc;
1035 rxq_info->rxq = rxq;
1038 rxq->hw_index = sw_index;
1039 rxq_info->refill_threshold =
1040 RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
1041 rxq_info->refill_mb_pool = mb_pool;
1042 rxq->buf_size = buf_size;
1044 rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
1045 socket_id, &rxq->mem);
1047 goto fail_dma_alloc;
1049 memset(&info, 0, sizeof(info));
1050 info.refill_mb_pool = rxq_info->refill_mb_pool;
1051 info.max_fill_level = rxq_max_fill_level;
1052 info.refill_threshold = rxq_info->refill_threshold;
1053 info.buf_size = buf_size;
1054 info.batch_max = encp->enc_rx_batch_max;
1055 info.prefix_size = encp->enc_rx_prefix_size;
1057 if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0)
1058 info.flags |= SFC_RXQ_FLAG_RSS_HASH;
1060 info.rxq_entries = rxq_info->entries;
1061 info.rxq_hw_ring = rxq->mem.esm_base;
1062 info.evq_entries = evq_entries;
1063 info.evq_hw_ring = evq->mem.esm_base;
1064 info.hw_index = rxq->hw_index;
1065 info.mem_bar = sa->mem_bar.esb_base;
1066 info.vi_window_shift = encp->enc_vi_window_shift;
1068 rc = sa->priv.dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
1069 &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
1070 socket_id, &info, &rxq_info->dp);
1072 goto fail_dp_rx_qcreate;
1074 evq->dp_rxq = rxq_info->dp;
1076 rxq_info->state = SFC_RXQ_INITIALIZED;
1078 rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
1083 sfc_dma_free(sa, &rxq->mem);
1086 rxq_info->rxq = NULL;
1093 rxq_info->entries = 0;
1097 sfc_log_init(sa, "failed %d", rc);
1102 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
1104 struct sfc_rxq_info *rxq_info;
1105 struct sfc_rxq *rxq;
1107 SFC_ASSERT(sw_index < sa->rxq_count);
1108 sa->eth_dev->data->rx_queues[sw_index] = NULL;
1110 rxq_info = &sa->rxq_info[sw_index];
1112 rxq = rxq_info->rxq;
1113 SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED);
1115 sa->priv.dp_rx->qdestroy(rxq_info->dp);
1116 rxq_info->dp = NULL;
1118 rxq_info->rxq = NULL;
1119 rxq_info->entries = 0;
1121 sfc_dma_free(sa, &rxq->mem);
1123 sfc_ev_qfini(rxq->evq);
1130 * Mapping between RTE RSS hash functions and their EFX counterparts.
1132 static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
1133 { ETH_RSS_NONFRAG_IPV4_TCP,
1134 EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
1135 { ETH_RSS_NONFRAG_IPV4_UDP,
1136 EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
1137 { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX,
1138 EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
1139 { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX,
1140 EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
1141 { ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER,
1142 EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
1143 EFX_RX_HASH(IPV4, 2TUPLE) },
1144 { ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER |
1146 EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
1147 EFX_RX_HASH(IPV6, 2TUPLE) }
1150 static efx_rx_hash_type_t
1151 sfc_rx_hash_types_mask_supp(efx_rx_hash_type_t hash_type,
1152 unsigned int *hash_type_flags_supported,
1153 unsigned int nb_hash_type_flags_supported)
1155 efx_rx_hash_type_t hash_type_masked = 0;
1158 for (i = 0; i < nb_hash_type_flags_supported; ++i) {
1159 unsigned int class_tuple_lbn[] = {
1160 EFX_RX_CLASS_IPV4_TCP_LBN,
1161 EFX_RX_CLASS_IPV4_UDP_LBN,
1162 EFX_RX_CLASS_IPV4_LBN,
1163 EFX_RX_CLASS_IPV6_TCP_LBN,
1164 EFX_RX_CLASS_IPV6_UDP_LBN,
1165 EFX_RX_CLASS_IPV6_LBN
1168 for (j = 0; j < RTE_DIM(class_tuple_lbn); ++j) {
1169 unsigned int tuple_mask = EFX_RX_CLASS_HASH_4TUPLE;
1172 tuple_mask <<= class_tuple_lbn[j];
1173 flag = hash_type & tuple_mask;
1175 if (flag == hash_type_flags_supported[i])
1176 hash_type_masked |= flag;
1180 return hash_type_masked;
1184 sfc_rx_hash_init(struct sfc_adapter *sa)
1186 struct sfc_rss *rss = &sa->rss;
1187 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1188 uint32_t alg_mask = encp->enc_rx_scale_hash_alg_mask;
1189 efx_rx_hash_alg_t alg;
1190 unsigned int flags_supp[EFX_RX_HASH_NFLAGS];
1191 unsigned int nb_flags_supp;
1192 struct sfc_rss_hf_rte_to_efx *hf_map;
1193 struct sfc_rss_hf_rte_to_efx *entry;
1194 efx_rx_hash_type_t efx_hash_types;
1198 if (alg_mask & (1U << EFX_RX_HASHALG_TOEPLITZ))
1199 alg = EFX_RX_HASHALG_TOEPLITZ;
1200 else if (alg_mask & (1U << EFX_RX_HASHALG_PACKED_STREAM))
1201 alg = EFX_RX_HASHALG_PACKED_STREAM;
1205 rc = efx_rx_scale_hash_flags_get(sa->nic, alg, flags_supp,
1206 RTE_DIM(flags_supp), &nb_flags_supp);
1210 hf_map = rte_calloc_socket("sfc-rss-hf-map",
1211 RTE_DIM(sfc_rss_hf_map),
1212 sizeof(*hf_map), 0, sa->socket_id);
1218 for (i = 0; i < RTE_DIM(sfc_rss_hf_map); ++i) {
1219 efx_rx_hash_type_t ht;
1221 ht = sfc_rx_hash_types_mask_supp(sfc_rss_hf_map[i].efx,
1222 flags_supp, nb_flags_supp);
1224 entry->rte = sfc_rss_hf_map[i].rte;
1226 efx_hash_types |= ht;
1231 rss->hash_alg = alg;
1232 rss->hf_map_nb_entries = (unsigned int)(entry - hf_map);
1233 rss->hf_map = hf_map;
1234 rss->hash_types = efx_hash_types;
1240 sfc_rx_hash_fini(struct sfc_adapter *sa)
1242 struct sfc_rss *rss = &sa->rss;
1244 rte_free(rss->hf_map);
1248 sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,
1249 efx_rx_hash_type_t *efx)
1251 struct sfc_rss *rss = &sa->rss;
1252 efx_rx_hash_type_t hash_types = 0;
1255 for (i = 0; i < rss->hf_map_nb_entries; ++i) {
1256 uint64_t rte_mask = rss->hf_map[i].rte;
1258 if ((rte & rte_mask) != 0) {
1260 hash_types |= rss->hf_map[i].efx;
1265 sfc_err(sa, "unsupported hash functions requested");
1275 sfc_rx_hf_efx_to_rte(struct sfc_adapter *sa, efx_rx_hash_type_t efx)
1277 struct sfc_rss *rss = &sa->rss;
1281 for (i = 0; i < rss->hf_map_nb_entries; ++i) {
1282 efx_rx_hash_type_t hash_type = rss->hf_map[i].efx;
1284 if ((efx & hash_type) == hash_type)
1285 rte |= rss->hf_map[i].rte;
1292 sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa,
1293 struct rte_eth_rss_conf *conf)
1295 struct sfc_rss *rss = &sa->rss;
1296 efx_rx_hash_type_t efx_hash_types = rss->hash_types;
1297 uint64_t rss_hf = sfc_rx_hf_efx_to_rte(sa, efx_hash_types);
1300 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
1301 if ((conf->rss_hf != 0 && conf->rss_hf != rss_hf) ||
1302 conf->rss_key != NULL)
1306 if (conf->rss_hf != 0) {
1307 rc = sfc_rx_hf_rte_to_efx(sa, conf->rss_hf, &efx_hash_types);
1312 if (conf->rss_key != NULL) {
1313 if (conf->rss_key_len != sizeof(rss->key)) {
1314 sfc_err(sa, "RSS key size is wrong (should be %lu)",
1318 rte_memcpy(rss->key, conf->rss_key, sizeof(rss->key));
1321 rss->hash_types = efx_hash_types;
1327 sfc_rx_rss_config(struct sfc_adapter *sa)
1329 struct sfc_rss *rss = &sa->rss;
1332 if (rss->channels > 0) {
1333 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1334 rss->hash_alg, rss->hash_types,
1339 rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1340 rss->key, sizeof(rss->key));
1344 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1345 rss->tbl, RTE_DIM(rss->tbl));
1353 sfc_rx_start(struct sfc_adapter *sa)
1355 unsigned int sw_index;
1358 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1360 rc = efx_rx_init(sa->nic);
1364 rc = sfc_rx_rss_config(sa);
1366 goto fail_rss_config;
1368 for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
1369 if (sa->rxq_info[sw_index].rxq != NULL &&
1370 (!sa->rxq_info[sw_index].deferred_start ||
1371 sa->rxq_info[sw_index].deferred_started)) {
1372 rc = sfc_rx_qstart(sa, sw_index);
1374 goto fail_rx_qstart;
1381 while (sw_index-- > 0)
1382 sfc_rx_qstop(sa, sw_index);
1385 efx_rx_fini(sa->nic);
1388 sfc_log_init(sa, "failed %d", rc);
1393 sfc_rx_stop(struct sfc_adapter *sa)
1395 unsigned int sw_index;
1397 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1399 sw_index = sa->rxq_count;
1400 while (sw_index-- > 0) {
1401 if (sa->rxq_info[sw_index].rxq != NULL)
1402 sfc_rx_qstop(sa, sw_index);
1405 efx_rx_fini(sa->nic);
1409 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
1411 struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
1412 unsigned int max_entries;
1414 max_entries = EFX_RXQ_MAXNDESCS;
1415 SFC_ASSERT(rte_is_power_of_2(max_entries));
1417 rxq_info->max_entries = max_entries;
1423 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
1425 uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
1426 sfc_rx_get_queue_offload_caps(sa);
1427 struct sfc_rss *rss = &sa->rss;
1430 switch (rxmode->mq_mode) {
1431 case ETH_MQ_RX_NONE:
1432 /* No special checks are required */
1435 if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
1436 sfc_err(sa, "RSS is not available");
1441 sfc_err(sa, "Rx multi-queue mode %u not supported",
1447 * Requested offloads are validated against supported by ethdev,
1448 * so unsupported offloads cannot be added as the result of
1451 if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
1452 (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) {
1453 sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
1454 rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
1457 if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
1458 (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
1459 sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
1460 rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
1467 * Destroy excess queues that are no longer needed after reconfiguration
1468 * or complete close.
1471 sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
1475 SFC_ASSERT(nb_rx_queues <= sa->rxq_count);
1477 sw_index = sa->rxq_count;
1478 while (--sw_index >= (int)nb_rx_queues) {
1479 if (sa->rxq_info[sw_index].rxq != NULL)
1480 sfc_rx_qfini(sa, sw_index);
1483 sa->rxq_count = nb_rx_queues;
1487 * Initialize Rx subsystem.
1489 * Called at device (re)configuration stage when number of receive queues is
1490 * specified together with other device level receive configuration.
1492 * It should be used to allocate NUMA-unaware resources.
1495 sfc_rx_configure(struct sfc_adapter *sa)
1497 struct sfc_rss *rss = &sa->rss;
1498 struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
1499 const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
1502 sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
1503 nb_rx_queues, sa->rxq_count);
1505 rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
1507 goto fail_check_mode;
1509 if (nb_rx_queues == sa->rxq_count)
1512 if (sa->rxq_info == NULL) {
1514 sa->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
1515 sizeof(sa->rxq_info[0]), 0,
1517 if (sa->rxq_info == NULL)
1518 goto fail_rxqs_alloc;
1520 struct sfc_rxq_info *new_rxq_info;
1522 if (nb_rx_queues < sa->rxq_count)
1523 sfc_rx_fini_queues(sa, nb_rx_queues);
1527 rte_realloc(sa->rxq_info,
1528 nb_rx_queues * sizeof(sa->rxq_info[0]), 0);
1529 if (new_rxq_info == NULL && nb_rx_queues > 0)
1530 goto fail_rxqs_realloc;
1532 sa->rxq_info = new_rxq_info;
1533 if (nb_rx_queues > sa->rxq_count)
1534 memset(&sa->rxq_info[sa->rxq_count], 0,
1535 (nb_rx_queues - sa->rxq_count) *
1536 sizeof(sa->rxq_info[0]));
1539 while (sa->rxq_count < nb_rx_queues) {
1540 rc = sfc_rx_qinit_info(sa, sa->rxq_count);
1542 goto fail_rx_qinit_info;
1548 rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
1549 MIN(sa->rxq_count, EFX_MAXRSS) : 0;
1551 if (rss->channels > 0) {
1552 struct rte_eth_rss_conf *adv_conf_rss;
1553 unsigned int sw_index;
1555 for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
1556 rss->tbl[sw_index] = sw_index % rss->channels;
1558 adv_conf_rss = &dev_conf->rx_adv_conf.rss_conf;
1559 rc = sfc_rx_process_adv_conf_rss(sa, adv_conf_rss);
1561 goto fail_rx_process_adv_conf_rss;
1566 fail_rx_process_adv_conf_rss:
1573 sfc_log_init(sa, "failed %d", rc);
1578 * Shutdown Rx subsystem.
1580 * Called at device close stage, for example, before device shutdown.
1583 sfc_rx_close(struct sfc_adapter *sa)
1585 struct sfc_rss *rss = &sa->rss;
1587 sfc_rx_fini_queues(sa, 0);
1591 rte_free(sa->rxq_info);
1592 sa->rxq_info = NULL;