1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_mempool.h>
15 #include "sfc_debug.h"
16 #include "sfc_flow_tunnel.h"
20 #include "sfc_mae_counter.h"
21 #include "sfc_kvargs.h"
22 #include "sfc_tweak.h"
25 * Maximum number of Rx queue flush attempt in the case of failure or
28 #define SFC_RX_QFLUSH_ATTEMPTS (3)
31 * Time to wait between event queue polling attempts when waiting for Rx
32 * queue flush done or failed events.
34 #define SFC_RX_QFLUSH_POLL_WAIT_MS (1)
37 * Maximum number of event queue polling attempts when waiting for Rx queue
38 * flush done or failed events. It defines Rx queue flush attempt timeout
39 * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
41 #define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000)
44 sfc_rx_qflush_done(struct sfc_rxq_info *rxq_info)
46 rxq_info->state |= SFC_RXQ_FLUSHED;
47 rxq_info->state &= ~SFC_RXQ_FLUSHING;
51 sfc_rx_qflush_failed(struct sfc_rxq_info *rxq_info)
53 rxq_info->state |= SFC_RXQ_FLUSH_FAILED;
54 rxq_info->state &= ~SFC_RXQ_FLUSHING;
57 /* This returns the running counter, which is not bounded by ring size */
59 sfc_rx_get_pushed(struct sfc_adapter *sa, struct sfc_dp_rxq *dp_rxq)
61 SFC_ASSERT(sa->priv.dp_rx->get_pushed != NULL);
63 return sa->priv.dp_rx->get_pushed(dp_rxq);
67 sfc_efx_rx_qprime(struct sfc_efx_rxq *rxq)
71 if (rxq->evq->read_ptr_primed != rxq->evq->read_ptr) {
72 rc = efx_ev_qprime(rxq->evq->common, rxq->evq->read_ptr);
74 rxq->evq->read_ptr_primed = rxq->evq->read_ptr;
80 sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
82 unsigned int free_space;
84 void *objs[SFC_RX_REFILL_BULK];
85 efsys_dma_addr_t addr[RTE_DIM(objs)];
86 unsigned int added = rxq->added;
89 struct sfc_efx_rx_sw_desc *rxd;
91 uint16_t port_id = rxq->dp.dpq.port_id;
93 free_space = rxq->max_fill_level - (added - rxq->completed);
95 if (free_space < rxq->refill_threshold)
98 bulks = free_space / RTE_DIM(objs);
99 /* refill_threshold guarantees that bulks is positive */
100 SFC_ASSERT(bulks > 0);
102 id = added & rxq->ptr_mask;
104 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
105 RTE_DIM(objs)) < 0)) {
107 * It is hardly a safe way to increment counter
108 * from different contexts, but all PMDs do it.
110 rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
112 /* Return if we have posted nothing yet */
113 if (added == rxq->added)
119 for (i = 0; i < RTE_DIM(objs);
120 ++i, id = (id + 1) & rxq->ptr_mask) {
123 __rte_mbuf_raw_sanity_check(m);
125 rxd = &rxq->sw_desc[id];
128 m->data_off = RTE_PKTMBUF_HEADROOM;
131 addr[i] = rte_pktmbuf_iova(m);
134 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
135 RTE_DIM(objs), rxq->completed, added);
136 added += RTE_DIM(objs);
137 } while (--bulks > 0);
139 SFC_ASSERT(added != rxq->added);
141 efx_rx_qpush(rxq->common, added, &rxq->pushed);
142 rxq->dp.dpq.dbells++;
146 sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
148 uint64_t mbuf_flags = 0;
150 switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
151 case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
152 mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
155 mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
158 RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN != 0);
159 SFC_ASSERT((mbuf_flags & RTE_MBUF_F_RX_IP_CKSUM_MASK) ==
160 RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN);
164 switch ((desc_flags &
165 (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
166 case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
167 case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
168 mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
172 mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
175 RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN != 0);
176 SFC_ASSERT((mbuf_flags & RTE_MBUF_F_RX_L4_CKSUM_MASK) ==
177 RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN);
185 sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
187 return RTE_PTYPE_L2_ETHER |
188 ((desc_flags & EFX_PKT_IPV4) ?
189 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
190 ((desc_flags & EFX_PKT_IPV6) ?
191 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
192 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
193 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
196 static const uint32_t *
197 sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
199 static const uint32_t ptypes[] = {
201 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
202 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
212 sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
218 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
221 mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
223 if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
224 m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
225 EFX_RX_HASHALG_TOEPLITZ,
228 m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
233 sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
235 struct sfc_dp_rxq *dp_rxq = rx_queue;
236 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
237 unsigned int completed;
238 unsigned int prefix_size = rxq->prefix_size;
239 unsigned int done_pkts = 0;
240 boolean_t discard_next = B_FALSE;
241 struct rte_mbuf *scatter_pkt = NULL;
243 if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
246 sfc_ev_qpoll(rxq->evq);
248 completed = rxq->completed;
249 while (completed != rxq->pending && done_pkts < nb_pkts) {
251 struct sfc_efx_rx_sw_desc *rxd;
253 unsigned int seg_len;
254 unsigned int desc_flags;
256 id = completed++ & rxq->ptr_mask;
257 rxd = &rxq->sw_desc[id];
259 desc_flags = rxd->flags;
264 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
267 if (desc_flags & EFX_PKT_PREFIX_LEN) {
271 rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
272 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
276 seg_len = rxd->size - prefix_size;
279 rte_pktmbuf_data_len(m) = seg_len;
280 rte_pktmbuf_pkt_len(m) = seg_len;
282 if (scatter_pkt != NULL) {
283 if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
284 rte_pktmbuf_free(scatter_pkt);
287 /* The packet to deliver */
291 if (desc_flags & EFX_PKT_CONT) {
292 /* The packet is scattered, more fragments to come */
294 /* Further fragments have no prefix */
299 /* Scattered packet is done */
301 /* The first fragment of the packet has prefix */
302 prefix_size = rxq->prefix_size;
305 sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
307 sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
310 * Extract RSS hash from the packet prefix and
311 * set the corresponding field (if needed and possible)
313 sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
315 m->data_off += prefix_size;
322 discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
323 rte_mbuf_raw_free(m);
327 /* pending is only moved when entire packet is received */
328 SFC_ASSERT(scatter_pkt == NULL);
330 rxq->completed = completed;
332 sfc_efx_rx_qrefill(rxq);
334 if (rxq->flags & SFC_EFX_RXQ_FLAG_INTR_EN)
335 sfc_efx_rx_qprime(rxq);
340 static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
342 sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
344 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
346 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
349 sfc_ev_qpoll(rxq->evq);
351 return rxq->pending - rxq->completed;
354 static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status;
356 sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
358 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
360 if (unlikely(offset > rxq->ptr_mask))
364 * Poll EvQ to derive up-to-date 'rxq->pending' figure;
365 * it is required for the queue to be running, but the
366 * check is omitted because API design assumes that it
367 * is the duty of the caller to satisfy all conditions
369 SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) ==
370 SFC_EFX_RXQ_FLAG_RUNNING);
371 sfc_ev_qpoll(rxq->evq);
374 * There is a handful of reserved entries in the ring,
375 * but an explicit check whether the offset points to
376 * a reserved entry is neglected since the two checks
377 * below rely on the figures which take the HW limits
378 * into account and thus if an entry is reserved, the
379 * checks will fail and UNAVAIL code will be returned
382 if (offset < (rxq->pending - rxq->completed))
383 return RTE_ETH_RX_DESC_DONE;
385 if (offset < (rxq->added - rxq->completed))
386 return RTE_ETH_RX_DESC_AVAIL;
388 return RTE_ETH_RX_DESC_UNAVAIL;
392 sfc_rx_check_scatter(size_t pdu, size_t rx_buf_size, uint32_t rx_prefix_size,
393 boolean_t rx_scatter_enabled, uint32_t rx_scatter_max,
396 uint32_t effective_rx_scatter_max;
397 uint32_t rx_scatter_bufs;
399 effective_rx_scatter_max = rx_scatter_enabled ? rx_scatter_max : 1;
400 rx_scatter_bufs = EFX_DIV_ROUND_UP(pdu + rx_prefix_size, rx_buf_size);
402 if (rx_scatter_bufs > effective_rx_scatter_max) {
403 if (rx_scatter_enabled)
404 *error = "Possible number of Rx scatter buffers exceeds maximum number";
406 *error = "Rx scatter is disabled and RxQ mbuf pool object size is too small";
413 /** Get Rx datapath ops by the datapath RxQ handle */
414 const struct sfc_dp_rx *
415 sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
417 const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
418 struct rte_eth_dev *eth_dev;
419 struct sfc_adapter_priv *sap;
421 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
422 eth_dev = &rte_eth_devices[dpq->port_id];
424 sap = sfc_adapter_priv_by_eth_dev(eth_dev);
429 struct sfc_rxq_info *
430 sfc_rxq_info_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
432 const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
433 struct rte_eth_dev *eth_dev;
434 struct sfc_adapter_shared *sas;
436 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
437 eth_dev = &rte_eth_devices[dpq->port_id];
439 sas = sfc_adapter_shared_by_eth_dev(eth_dev);
441 SFC_ASSERT(dpq->queue_id < sas->rxq_count);
442 return &sas->rxq_info[dpq->queue_id];
446 sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
448 const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
449 struct rte_eth_dev *eth_dev;
450 struct sfc_adapter *sa;
452 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
453 eth_dev = &rte_eth_devices[dpq->port_id];
455 sa = sfc_adapter_by_eth_dev(eth_dev);
457 SFC_ASSERT(dpq->queue_id < sfc_sa2shared(sa)->rxq_count);
458 return &sa->rxq_ctrl[dpq->queue_id];
461 static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
463 sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
464 __rte_unused struct sfc_dp_rx_hw_limits *limits,
465 __rte_unused struct rte_mempool *mb_pool,
466 unsigned int *rxq_entries,
467 unsigned int *evq_entries,
468 unsigned int *rxq_max_fill_level)
470 *rxq_entries = nb_rx_desc;
471 *evq_entries = nb_rx_desc;
472 *rxq_max_fill_level = EFX_RXQ_LIMIT(*rxq_entries);
476 static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
478 sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
479 const struct rte_pci_addr *pci_addr, int socket_id,
480 const struct sfc_dp_rx_qcreate_info *info,
481 struct sfc_dp_rxq **dp_rxqp)
483 struct sfc_efx_rxq *rxq;
487 if (info->nic_dma_info->nb_regions > 0)
491 rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
492 RTE_CACHE_LINE_SIZE, socket_id);
496 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
499 rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
501 sizeof(*rxq->sw_desc),
502 RTE_CACHE_LINE_SIZE, socket_id);
503 if (rxq->sw_desc == NULL)
504 goto fail_desc_alloc;
506 /* efx datapath is bound to efx control path */
507 rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
508 if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
509 rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
510 rxq->ptr_mask = info->rxq_entries - 1;
511 rxq->batch_max = info->batch_max;
512 rxq->prefix_size = info->prefix_size;
513 rxq->max_fill_level = info->max_fill_level;
514 rxq->refill_threshold = info->refill_threshold;
515 rxq->buf_size = info->buf_size;
516 rxq->refill_mb_pool = info->refill_mb_pool;
529 static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
531 sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
533 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
535 rte_free(rxq->sw_desc);
540 /* Use qstop and qstart functions in the case of qstart failure */
541 static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
542 static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
545 static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
547 sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
548 __rte_unused unsigned int evq_read_ptr,
549 const efx_rx_prefix_layout_t *pinfo)
551 /* libefx-based datapath is specific to libefx-based PMD */
552 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
553 struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
557 * libefx API is used to extract information from Rx prefix and
558 * it guarantees consistency. Just do length check to ensure
559 * that we reserved space in Rx buffers correctly.
561 if (rxq->prefix_size != pinfo->erpl_length)
564 rxq->common = crxq->common;
566 rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
568 sfc_efx_rx_qrefill(rxq);
570 rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
572 if (rxq->flags & SFC_EFX_RXQ_FLAG_INTR_EN) {
573 rc = sfc_efx_rx_qprime(rxq);
581 sfc_efx_rx_qstop(dp_rxq, NULL);
582 sfc_efx_rx_qpurge(dp_rxq);
587 sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
588 __rte_unused unsigned int *evq_read_ptr)
590 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
592 rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
594 /* libefx-based datapath is bound to libefx-based PMD and uses
595 * event queue structure directly. So, there is no necessity to
596 * return EvQ read pointer.
601 sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
603 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
605 struct sfc_efx_rx_sw_desc *rxd;
607 for (i = rxq->completed; i != rxq->added; ++i) {
608 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
609 rte_mbuf_raw_free(rxd->mbuf);
611 /* Packed stream relies on 0 in inactive SW desc.
612 * Rx queue stop is not performance critical, so
613 * there is no harm to do it always.
619 rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
622 static sfc_dp_rx_intr_enable_t sfc_efx_rx_intr_enable;
624 sfc_efx_rx_intr_enable(struct sfc_dp_rxq *dp_rxq)
626 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
629 rxq->flags |= SFC_EFX_RXQ_FLAG_INTR_EN;
630 if (rxq->flags & SFC_EFX_RXQ_FLAG_STARTED) {
631 rc = sfc_efx_rx_qprime(rxq);
633 rxq->flags &= ~SFC_EFX_RXQ_FLAG_INTR_EN;
638 static sfc_dp_rx_intr_disable_t sfc_efx_rx_intr_disable;
640 sfc_efx_rx_intr_disable(struct sfc_dp_rxq *dp_rxq)
642 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
644 /* Cannot disarm, just disable rearm */
645 rxq->flags &= ~SFC_EFX_RXQ_FLAG_INTR_EN;
649 struct sfc_dp_rx sfc_efx_rx = {
651 .name = SFC_KVARG_DATAPATH_EFX,
653 .hw_fw_caps = SFC_DP_HW_FW_CAP_RX_EFX,
655 .features = SFC_DP_RX_FEAT_INTR,
656 .dev_offload_capa = RTE_ETH_RX_OFFLOAD_CHECKSUM |
657 RTE_ETH_RX_OFFLOAD_RSS_HASH,
658 .queue_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER,
659 .qsize_up_rings = sfc_efx_rx_qsize_up_rings,
660 .qcreate = sfc_efx_rx_qcreate,
661 .qdestroy = sfc_efx_rx_qdestroy,
662 .qstart = sfc_efx_rx_qstart,
663 .qstop = sfc_efx_rx_qstop,
664 .qpurge = sfc_efx_rx_qpurge,
665 .supported_ptypes_get = sfc_efx_supported_ptypes_get,
666 .qdesc_npending = sfc_efx_rx_qdesc_npending,
667 .qdesc_status = sfc_efx_rx_qdesc_status,
668 .intr_enable = sfc_efx_rx_intr_enable,
669 .intr_disable = sfc_efx_rx_intr_disable,
670 .pkt_burst = sfc_efx_recv_pkts,
674 sfc_rx_qflush(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
676 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
677 sfc_ethdev_qid_t ethdev_qid;
678 struct sfc_rxq_info *rxq_info;
680 unsigned int retry_count;
681 unsigned int wait_count;
684 ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
685 rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
686 SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);
688 rxq = &sa->rxq_ctrl[sw_index];
691 * Retry Rx queue flushing in the case of flush failed or
692 * timeout. In the worst case it can delay for 6 seconds.
694 for (retry_count = 0;
695 ((rxq_info->state & SFC_RXQ_FLUSHED) == 0) &&
696 (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
698 rc = efx_rx_qflush(rxq->common);
700 rxq_info->state |= (rc == EALREADY) ?
701 SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
704 rxq_info->state &= ~SFC_RXQ_FLUSH_FAILED;
705 rxq_info->state |= SFC_RXQ_FLUSHING;
708 * Wait for Rx queue flush done or failed event at least
709 * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
710 * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
711 * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
715 rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
716 sfc_ev_qpoll(rxq->evq);
717 } while ((rxq_info->state & SFC_RXQ_FLUSHING) &&
718 (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
720 if (rxq_info->state & SFC_RXQ_FLUSHING)
721 sfc_err(sa, "RxQ %d (internal %u) flush timed out",
722 ethdev_qid, sw_index);
724 if (rxq_info->state & SFC_RXQ_FLUSH_FAILED)
725 sfc_err(sa, "RxQ %d (internal %u) flush failed",
726 ethdev_qid, sw_index);
728 if (rxq_info->state & SFC_RXQ_FLUSHED)
729 sfc_notice(sa, "RxQ %d (internal %u) flushed",
730 ethdev_qid, sw_index);
733 sa->priv.dp_rx->qpurge(rxq_info->dp);
737 sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
739 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
740 boolean_t need_rss = (rss->channels > 0) ? B_TRUE : B_FALSE;
741 struct sfc_port *port = &sa->port;
745 * If promiscuous or all-multicast mode has been requested, setting
746 * filter for the default Rx queue might fail, in particular, while
747 * running over PCI function which is not a member of corresponding
748 * privilege groups; if this occurs, few iterations will be made to
749 * repeat this step without promiscuous and all-multicast flags set
752 rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, need_rss);
755 else if (rc != EOPNOTSUPP)
759 sfc_warn(sa, "promiscuous mode has been requested, "
760 "but the HW rejects it");
761 sfc_warn(sa, "promiscuous mode will be disabled");
763 port->promisc = B_FALSE;
764 sa->eth_dev->data->promiscuous = 0;
765 rc = sfc_set_rx_mode_unchecked(sa);
772 if (port->allmulti) {
773 sfc_warn(sa, "all-multicast mode has been requested, "
774 "but the HW rejects it");
775 sfc_warn(sa, "all-multicast mode will be disabled");
777 port->allmulti = B_FALSE;
778 sa->eth_dev->data->all_multicast = 0;
779 rc = sfc_set_rx_mode_unchecked(sa);
790 sfc_rx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
792 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
793 sfc_ethdev_qid_t ethdev_qid;
794 struct sfc_rxq_info *rxq_info;
797 efx_rx_prefix_layout_t pinfo;
800 SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
801 ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
803 sfc_log_init(sa, "RxQ %d (internal %u)", ethdev_qid, sw_index);
805 rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
806 SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED);
808 rxq = &sa->rxq_ctrl[sw_index];
811 rc = sfc_ev_qstart(evq, sfc_evq_sw_index_by_rxq_sw_index(sa, sw_index));
815 switch (rxq_info->type) {
816 case EFX_RXQ_TYPE_DEFAULT:
817 rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
819 &rxq->mem, rxq_info->entries, 0 /* not used on EF10 */,
820 rxq_info->type_flags, evq->common, &rxq->common);
822 case EFX_RXQ_TYPE_ES_SUPER_BUFFER: {
823 struct rte_mempool *mp = rxq_info->refill_mb_pool;
824 struct rte_mempool_info mp_info;
826 rc = rte_mempool_ops_get_info(mp, &mp_info);
828 /* Positive errno is used in the driver */
830 goto fail_mp_get_info;
832 if (mp_info.contig_block_size <= 0) {
834 goto fail_bad_contig_block_size;
836 rc = efx_rx_qcreate_es_super_buffer(sa->nic, rxq->hw_index, 0,
837 mp_info.contig_block_size, rxq->buf_size,
838 mp->header_size + mp->elt_size + mp->trailer_size,
839 sa->rxd_wait_timeout_ns,
840 &rxq->mem, rxq_info->entries, rxq_info->type_flags,
841 evq->common, &rxq->common);
848 goto fail_rx_qcreate;
850 rc = efx_rx_prefix_get_layout(rxq->common, &pinfo);
852 goto fail_prefix_get_layout;
854 efx_rx_qenable(rxq->common);
856 rc = sa->priv.dp_rx->qstart(rxq_info->dp, evq->read_ptr, &pinfo);
860 rxq_info->state |= SFC_RXQ_STARTED;
862 if (ethdev_qid == 0 && !sfc_sa2shared(sa)->isolated) {
863 rc = sfc_rx_default_rxq_set_filter(sa, rxq);
865 goto fail_mac_filter_default_rxq_set;
868 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
869 if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
870 sa->eth_dev->data->rx_queue_state[ethdev_qid] =
871 RTE_ETH_QUEUE_STATE_STARTED;
875 fail_mac_filter_default_rxq_set:
876 sfc_rx_qflush(sa, sw_index);
877 sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr);
878 rxq_info->state = SFC_RXQ_INITIALIZED;
881 efx_rx_qdestroy(rxq->common);
883 fail_prefix_get_layout:
885 fail_bad_contig_block_size:
894 sfc_rx_qstop(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
896 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
897 sfc_ethdev_qid_t ethdev_qid;
898 struct sfc_rxq_info *rxq_info;
901 SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
902 ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
904 sfc_log_init(sa, "RxQ %d (internal %u)", ethdev_qid, sw_index);
906 rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
908 if (rxq_info->state == SFC_RXQ_INITIALIZED)
910 SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);
912 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
913 if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
914 sa->eth_dev->data->rx_queue_state[ethdev_qid] =
915 RTE_ETH_QUEUE_STATE_STOPPED;
917 rxq = &sa->rxq_ctrl[sw_index];
918 sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr);
921 efx_mac_filter_default_rxq_clear(sa->nic);
923 sfc_rx_qflush(sa, sw_index);
925 rxq_info->state = SFC_RXQ_INITIALIZED;
927 efx_rx_qdestroy(rxq->common);
929 sfc_ev_qstop(rxq->evq);
933 sfc_rx_get_offload_mask(struct sfc_adapter *sa)
935 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
936 uint64_t no_caps = 0;
938 if (encp->enc_tunnel_encapsulations_supported == 0)
939 no_caps |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
945 sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
947 uint64_t caps = sa->priv.dp_rx->dev_offload_capa;
949 return caps & sfc_rx_get_offload_mask(sa);
953 sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
955 return sa->priv.dp_rx->queue_offload_capa & sfc_rx_get_offload_mask(sa);
959 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
960 const struct rte_eth_rxconf *rx_conf,
961 __rte_unused uint64_t offloads)
965 if (rx_conf->rx_thresh.pthresh != 0 ||
966 rx_conf->rx_thresh.hthresh != 0 ||
967 rx_conf->rx_thresh.wthresh != 0) {
969 "RxQ prefetch/host/writeback thresholds are not supported");
972 if (rx_conf->rx_free_thresh > rxq_max_fill_level) {
974 "RxQ free threshold too large: %u vs maximum %u",
975 rx_conf->rx_free_thresh, rxq_max_fill_level);
979 if (rx_conf->rx_drop_en == 0) {
980 sfc_err(sa, "RxQ drop disable is not supported");
988 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
993 /* The mbuf object itself is always cache line aligned */
994 order = rte_bsf32(RTE_CACHE_LINE_SIZE);
996 /* Data offset from mbuf object start */
997 data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
998 RTE_PKTMBUF_HEADROOM;
1000 order = MIN(order, rte_bsf32(data_off));
1006 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
1008 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1009 const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
1010 const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
1012 unsigned int buf_aligned;
1013 unsigned int start_alignment;
1014 unsigned int end_padding_alignment;
1016 /* Below it is assumed that both alignments are power of 2 */
1017 SFC_ASSERT(rte_is_power_of_2(nic_align_start));
1018 SFC_ASSERT(rte_is_power_of_2(nic_align_end));
1021 * mbuf is always cache line aligned, double-check
1022 * that it meets rx buffer start alignment requirements.
1025 /* Start from mbuf pool data room size */
1026 buf_size = rte_pktmbuf_data_room_size(mb_pool);
1028 /* Remove headroom */
1029 if (buf_size <= RTE_PKTMBUF_HEADROOM) {
1031 "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
1032 mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
1035 buf_size -= RTE_PKTMBUF_HEADROOM;
1037 /* Calculate guaranteed data start alignment */
1038 buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
1040 /* Reserve space for start alignment */
1041 if (buf_aligned < nic_align_start) {
1042 start_alignment = nic_align_start - buf_aligned;
1043 if (buf_size <= start_alignment) {
1045 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
1047 rte_pktmbuf_data_room_size(mb_pool),
1048 RTE_PKTMBUF_HEADROOM, start_alignment);
1051 buf_aligned = nic_align_start;
1052 buf_size -= start_alignment;
1054 start_alignment = 0;
1057 /* Make sure that end padding does not write beyond the buffer */
1058 if (buf_aligned < nic_align_end) {
1060 * Estimate space which can be lost. If guaranteed buffer
1061 * size is odd, lost space is (nic_align_end - 1). More
1062 * accurate formula is below.
1064 end_padding_alignment = nic_align_end -
1065 MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
1066 if (buf_size <= end_padding_alignment) {
1068 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
1070 rte_pktmbuf_data_room_size(mb_pool),
1071 RTE_PKTMBUF_HEADROOM, start_alignment,
1072 end_padding_alignment);
1075 buf_size -= end_padding_alignment;
1078 * Start is aligned the same or better than end,
1079 * just align length.
1081 buf_size = EFX_P2ALIGN(uint32_t, buf_size, nic_align_end);
1088 sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
1089 uint16_t nb_rx_desc, unsigned int socket_id,
1090 const struct rte_eth_rxconf *rx_conf,
1091 struct rte_mempool *mb_pool)
1093 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
1094 sfc_ethdev_qid_t ethdev_qid;
1095 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1096 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1098 unsigned int rxq_entries;
1099 unsigned int evq_entries;
1100 unsigned int rxq_max_fill_level;
1103 struct sfc_rxq_info *rxq_info;
1104 struct sfc_evq *evq;
1105 struct sfc_rxq *rxq;
1106 struct sfc_dp_rx_qcreate_info info;
1107 struct sfc_dp_rx_hw_limits hw_limits;
1108 uint16_t rx_free_thresh;
1111 memset(&hw_limits, 0, sizeof(hw_limits));
1112 hw_limits.rxq_max_entries = sa->rxq_max_entries;
1113 hw_limits.rxq_min_entries = sa->rxq_min_entries;
1114 hw_limits.evq_max_entries = sa->evq_max_entries;
1115 hw_limits.evq_min_entries = sa->evq_min_entries;
1117 rc = sa->priv.dp_rx->qsize_up_rings(nb_rx_desc, &hw_limits, mb_pool,
1118 &rxq_entries, &evq_entries,
1119 &rxq_max_fill_level);
1121 goto fail_size_up_rings;
1122 SFC_ASSERT(rxq_entries >= sa->rxq_min_entries);
1123 SFC_ASSERT(rxq_entries <= sa->rxq_max_entries);
1124 SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
1126 ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
1128 offloads = rx_conf->offloads;
1129 /* Add device level Rx offloads if the queue is an ethdev Rx queue */
1130 if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
1131 offloads |= sa->eth_dev->data->dev_conf.rxmode.offloads;
1133 rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
1137 buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
1138 if (buf_size == 0) {
1140 "RxQ %d (internal %u) mbuf pool object size is too small",
1141 ethdev_qid, sw_index);
1146 if (!sfc_rx_check_scatter(sa->port.pdu, buf_size,
1147 encp->enc_rx_prefix_size,
1148 (offloads & RTE_ETH_RX_OFFLOAD_SCATTER),
1149 encp->enc_rx_scatter_max,
1151 sfc_err(sa, "RxQ %d (internal %u) MTU check failed: %s",
1152 ethdev_qid, sw_index, error);
1154 "RxQ %d (internal %u) calculated Rx buffer size is %u vs "
1155 "PDU size %u plus Rx prefix %u bytes",
1156 ethdev_qid, sw_index, buf_size,
1157 (unsigned int)sa->port.pdu, encp->enc_rx_prefix_size);
1162 SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
1163 rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
1165 SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
1166 rxq_info->entries = rxq_entries;
1168 if (sa->priv.dp_rx->dp.hw_fw_caps & SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER)
1169 rxq_info->type = EFX_RXQ_TYPE_ES_SUPER_BUFFER;
1171 rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
1173 rxq_info->type_flags |=
1174 (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ?
1175 EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
1177 if ((encp->enc_tunnel_encapsulations_supported != 0) &&
1178 (sfc_dp_rx_offload_capa(sa->priv.dp_rx) &
1179 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
1180 rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
1182 if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)
1183 rxq_info->type_flags |= EFX_RXQ_FLAG_RSS_HASH;
1185 if ((sa->negotiated_rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0)
1186 rxq_info->type_flags |= EFX_RXQ_FLAG_USER_FLAG;
1188 if ((sa->negotiated_rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0 ||
1189 sfc_ft_is_active(sa))
1190 rxq_info->type_flags |= EFX_RXQ_FLAG_USER_MARK;
1192 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
1193 evq_entries, socket_id, &evq);
1197 rxq = &sa->rxq_ctrl[sw_index];
1199 rxq->hw_index = sw_index;
1201 * If Rx refill threshold is specified (its value is non zero) in
1202 * Rx configuration, use specified value. Otherwise use 1/8 of
1203 * the Rx descriptors number as the default. It allows to keep
1204 * Rx ring full-enough and does not refill too aggressive if
1205 * packet rate is high.
1207 * Since PMD refills in bulks waiting for full bulk may be
1208 * refilled (basically round down), it is better to round up
1209 * here to mitigate it a bit.
1211 rx_free_thresh = (rx_conf->rx_free_thresh != 0) ?
1212 rx_conf->rx_free_thresh : EFX_DIV_ROUND_UP(nb_rx_desc, 8);
1213 /* Rx refill threshold cannot be smaller than refill bulk */
1214 rxq_info->refill_threshold =
1215 RTE_MAX(rx_free_thresh, SFC_RX_REFILL_BULK);
1216 rxq_info->refill_mb_pool = mb_pool;
1218 if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0 &&
1219 (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
1220 rxq_info->rxq_flags = SFC_RXQ_FLAG_RSS_HASH;
1222 rxq_info->rxq_flags = 0;
1224 rxq->buf_size = buf_size;
1226 rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_NIC_DMA_ADDR_RX_RING,
1227 efx_rxq_size(sa->nic, rxq_info->entries),
1228 socket_id, &rxq->mem);
1230 goto fail_dma_alloc;
1232 memset(&info, 0, sizeof(info));
1233 info.refill_mb_pool = rxq_info->refill_mb_pool;
1234 info.max_fill_level = rxq_max_fill_level;
1235 info.refill_threshold = rxq_info->refill_threshold;
1236 info.buf_size = buf_size;
1237 info.batch_max = encp->enc_rx_batch_max;
1238 info.prefix_size = encp->enc_rx_prefix_size;
1240 if (sfc_ft_is_active(sa))
1241 info.user_mark_mask = SFC_FT_USER_MARK_MASK;
1243 info.user_mark_mask = UINT32_MAX;
1245 info.flags = rxq_info->rxq_flags;
1246 info.rxq_entries = rxq_info->entries;
1247 info.rxq_hw_ring = rxq->mem.esm_base;
1248 info.evq_hw_index = sfc_evq_sw_index_by_rxq_sw_index(sa, sw_index);
1249 info.evq_entries = evq_entries;
1250 info.evq_hw_ring = evq->mem.esm_base;
1251 info.hw_index = rxq->hw_index;
1252 info.mem_bar = sa->mem_bar.esb_base;
1253 info.vi_window_shift = encp->enc_vi_window_shift;
1254 info.fcw_offset = sa->fcw_offset;
1256 info.nic_dma_info = &sas->nic_dma_info;
1258 rc = sa->priv.dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
1259 &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
1260 socket_id, &info, &rxq_info->dp);
1262 goto fail_dp_rx_qcreate;
1264 evq->dp_rxq = rxq_info->dp;
1266 rxq_info->state = SFC_RXQ_INITIALIZED;
1268 rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
1273 sfc_dma_free(sa, &rxq->mem);
1279 rxq_info->entries = 0;
1283 sfc_log_init(sa, "failed %d", rc);
1288 sfc_rx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
1290 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
1291 sfc_ethdev_qid_t ethdev_qid;
1292 struct sfc_rxq_info *rxq_info;
1293 struct sfc_rxq *rxq;
1295 SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
1296 ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
1298 if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
1299 sa->eth_dev->data->rx_queues[ethdev_qid] = NULL;
1301 rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
1303 SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED);
1305 sa->priv.dp_rx->qdestroy(rxq_info->dp);
1306 rxq_info->dp = NULL;
1308 rxq_info->state &= ~SFC_RXQ_INITIALIZED;
1309 rxq_info->entries = 0;
1311 rxq = &sa->rxq_ctrl[sw_index];
1313 sfc_dma_free(sa, &rxq->mem);
1315 sfc_ev_qfini(rxq->evq);
1320 * Mapping between RTE RSS hash functions and their EFX counterparts.
1322 static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
1323 { RTE_ETH_RSS_NONFRAG_IPV4_TCP,
1324 EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
1325 { RTE_ETH_RSS_NONFRAG_IPV4_UDP,
1326 EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
1327 { RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX,
1328 EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
1329 { RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX,
1330 EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
1331 { RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
1332 EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
1333 EFX_RX_HASH(IPV4, 2TUPLE) },
1334 { RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
1335 RTE_ETH_RSS_IPV6_EX,
1336 EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
1337 EFX_RX_HASH(IPV6, 2TUPLE) }
1340 static efx_rx_hash_type_t
1341 sfc_rx_hash_types_mask_supp(efx_rx_hash_type_t hash_type,
1342 unsigned int *hash_type_flags_supported,
1343 unsigned int nb_hash_type_flags_supported)
1345 efx_rx_hash_type_t hash_type_masked = 0;
1348 for (i = 0; i < nb_hash_type_flags_supported; ++i) {
1349 unsigned int class_tuple_lbn[] = {
1350 EFX_RX_CLASS_IPV4_TCP_LBN,
1351 EFX_RX_CLASS_IPV4_UDP_LBN,
1352 EFX_RX_CLASS_IPV4_LBN,
1353 EFX_RX_CLASS_IPV6_TCP_LBN,
1354 EFX_RX_CLASS_IPV6_UDP_LBN,
1355 EFX_RX_CLASS_IPV6_LBN
1358 for (j = 0; j < RTE_DIM(class_tuple_lbn); ++j) {
1359 unsigned int tuple_mask = EFX_RX_CLASS_HASH_4TUPLE;
1362 tuple_mask <<= class_tuple_lbn[j];
1363 flag = hash_type & tuple_mask;
1365 if (flag == hash_type_flags_supported[i])
1366 hash_type_masked |= flag;
1370 return hash_type_masked;
1374 sfc_rx_hash_init(struct sfc_adapter *sa)
1376 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1377 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1378 uint32_t alg_mask = encp->enc_rx_scale_hash_alg_mask;
1379 efx_rx_hash_alg_t alg;
1380 unsigned int flags_supp[EFX_RX_HASH_NFLAGS];
1381 unsigned int nb_flags_supp;
1382 struct sfc_rss_hf_rte_to_efx *hf_map;
1383 struct sfc_rss_hf_rte_to_efx *entry;
1384 efx_rx_hash_type_t efx_hash_types;
1388 if (alg_mask & (1U << EFX_RX_HASHALG_TOEPLITZ))
1389 alg = EFX_RX_HASHALG_TOEPLITZ;
1390 else if (alg_mask & (1U << EFX_RX_HASHALG_PACKED_STREAM))
1391 alg = EFX_RX_HASHALG_PACKED_STREAM;
1395 rc = efx_rx_scale_hash_flags_get(sa->nic, alg, flags_supp,
1396 RTE_DIM(flags_supp), &nb_flags_supp);
1400 hf_map = rte_calloc_socket("sfc-rss-hf-map",
1401 RTE_DIM(sfc_rss_hf_map),
1402 sizeof(*hf_map), 0, sa->socket_id);
1408 for (i = 0; i < RTE_DIM(sfc_rss_hf_map); ++i) {
1409 efx_rx_hash_type_t ht;
1411 ht = sfc_rx_hash_types_mask_supp(sfc_rss_hf_map[i].efx,
1412 flags_supp, nb_flags_supp);
1414 entry->rte = sfc_rss_hf_map[i].rte;
1416 efx_hash_types |= ht;
1421 rss->hash_alg = alg;
1422 rss->hf_map_nb_entries = (unsigned int)(entry - hf_map);
1423 rss->hf_map = hf_map;
1424 rss->hash_types = efx_hash_types;
1430 sfc_rx_hash_fini(struct sfc_adapter *sa)
1432 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1434 rte_free(rss->hf_map);
1438 sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,
1439 efx_rx_hash_type_t *efx)
1441 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1442 efx_rx_hash_type_t hash_types = 0;
1445 for (i = 0; i < rss->hf_map_nb_entries; ++i) {
1446 uint64_t rte_mask = rss->hf_map[i].rte;
1448 if ((rte & rte_mask) != 0) {
1450 hash_types |= rss->hf_map[i].efx;
1455 sfc_err(sa, "unsupported hash functions requested");
1465 sfc_rx_hf_efx_to_rte(struct sfc_rss *rss, efx_rx_hash_type_t efx)
1470 for (i = 0; i < rss->hf_map_nb_entries; ++i) {
1471 efx_rx_hash_type_t hash_type = rss->hf_map[i].efx;
1473 if ((efx & hash_type) == hash_type)
1474 rte |= rss->hf_map[i].rte;
1481 sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa,
1482 struct rte_eth_rss_conf *conf)
1484 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1485 efx_rx_hash_type_t efx_hash_types = rss->hash_types;
1486 uint64_t rss_hf = sfc_rx_hf_efx_to_rte(rss, efx_hash_types);
1489 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
1490 if ((conf->rss_hf != 0 && conf->rss_hf != rss_hf) ||
1491 conf->rss_key != NULL)
1495 if (conf->rss_hf != 0) {
1496 rc = sfc_rx_hf_rte_to_efx(sa, conf->rss_hf, &efx_hash_types);
1501 if (conf->rss_key != NULL) {
1502 if (conf->rss_key_len != sizeof(rss->key)) {
1503 sfc_err(sa, "RSS key size is wrong (should be %zu)",
1507 rte_memcpy(rss->key, conf->rss_key, sizeof(rss->key));
1510 rss->hash_types = efx_hash_types;
1516 sfc_rx_rss_config(struct sfc_adapter *sa)
1518 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1521 if (rss->channels > 0) {
1522 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1523 rss->hash_alg, rss->hash_types,
1528 rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1529 rss->key, sizeof(rss->key));
1533 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1534 rss->tbl, RTE_DIM(rss->tbl));
1541 struct sfc_rxq_info *
1542 sfc_rxq_info_by_ethdev_qid(struct sfc_adapter_shared *sas,
1543 sfc_ethdev_qid_t ethdev_qid)
1545 sfc_sw_index_t sw_index;
1547 SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_rxq_count);
1548 SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID);
1550 sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, ethdev_qid);
1551 return &sas->rxq_info[sw_index];
1555 sfc_rxq_ctrl_by_ethdev_qid(struct sfc_adapter *sa, sfc_ethdev_qid_t ethdev_qid)
1557 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
1558 sfc_sw_index_t sw_index;
1560 SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_rxq_count);
1561 SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID);
1563 sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, ethdev_qid);
1564 return &sa->rxq_ctrl[sw_index];
1568 sfc_rx_start(struct sfc_adapter *sa)
1570 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1571 sfc_sw_index_t sw_index;
1574 sfc_log_init(sa, "rxq_count=%u (internal %u)", sas->ethdev_rxq_count,
1577 rc = efx_rx_init(sa->nic);
1581 rc = sfc_rx_rss_config(sa);
1583 goto fail_rss_config;
1585 for (sw_index = 0; sw_index < sas->rxq_count; ++sw_index) {
1586 if (sas->rxq_info[sw_index].state == SFC_RXQ_INITIALIZED &&
1587 (!sas->rxq_info[sw_index].deferred_start ||
1588 sas->rxq_info[sw_index].deferred_started)) {
1589 rc = sfc_rx_qstart(sa, sw_index);
1591 goto fail_rx_qstart;
1598 while (sw_index-- > 0)
1599 sfc_rx_qstop(sa, sw_index);
1602 efx_rx_fini(sa->nic);
1605 sfc_log_init(sa, "failed %d", rc);
1610 sfc_rx_stop(struct sfc_adapter *sa)
1612 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1613 sfc_sw_index_t sw_index;
1615 sfc_log_init(sa, "rxq_count=%u (internal %u)", sas->ethdev_rxq_count,
1618 sw_index = sas->rxq_count;
1619 while (sw_index-- > 0) {
1620 if (sas->rxq_info[sw_index].state & SFC_RXQ_STARTED)
1621 sfc_rx_qstop(sa, sw_index);
1624 efx_rx_fini(sa->nic);
1628 sfc_rx_qinit_info(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
1629 unsigned int extra_efx_type_flags)
1631 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1632 struct sfc_rxq_info *rxq_info = &sas->rxq_info[sw_index];
1633 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1634 unsigned int max_entries;
1636 max_entries = encp->enc_rxq_max_ndescs;
1637 SFC_ASSERT(rte_is_power_of_2(max_entries));
1639 rxq_info->max_entries = max_entries;
1640 rxq_info->type_flags = extra_efx_type_flags;
1646 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
1648 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1649 uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
1650 sfc_rx_get_queue_offload_caps(sa);
1651 struct sfc_rss *rss = &sas->rss;
1654 switch (rxmode->mq_mode) {
1655 case RTE_ETH_MQ_RX_NONE:
1656 /* No special checks are required */
1658 case RTE_ETH_MQ_RX_RSS:
1659 if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
1660 sfc_err(sa, "RSS is not available");
1665 sfc_err(sa, "Rx multi-queue mode %u not supported",
1671 * Requested offloads are validated against supported by ethdev,
1672 * so unsupported offloads cannot be added as the result of
1675 if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM) !=
1676 (offloads_supported & RTE_ETH_RX_OFFLOAD_CHECKSUM)) {
1677 sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
1678 rxmode->offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
1681 if ((offloads_supported & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
1682 (~rxmode->offloads & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
1683 sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
1684 rxmode->offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
1691 * Destroy excess queues that are no longer needed after reconfiguration
1692 * or complete close.
1695 sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
1697 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1698 sfc_sw_index_t sw_index;
1699 sfc_ethdev_qid_t ethdev_qid;
1701 SFC_ASSERT(nb_rx_queues <= sas->ethdev_rxq_count);
1704 * Finalize only ethdev queues since other ones are finalized only
1705 * on device close and they may require additional deinitialization.
1707 ethdev_qid = sas->ethdev_rxq_count;
1708 while (--ethdev_qid >= (int)nb_rx_queues) {
1709 struct sfc_rxq_info *rxq_info;
1711 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, ethdev_qid);
1712 if (rxq_info->state & SFC_RXQ_INITIALIZED) {
1713 sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas,
1715 sfc_rx_qfini(sa, sw_index);
1720 sas->ethdev_rxq_count = nb_rx_queues;
1724 * Initialize Rx subsystem.
1726 * Called at device (re)configuration stage when number of receive queues is
1727 * specified together with other device level receive configuration.
1729 * It should be used to allocate NUMA-unaware resources.
1732 sfc_rx_configure(struct sfc_adapter *sa)
1734 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1735 struct sfc_rss *rss = &sas->rss;
1736 struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
1737 const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
1738 const unsigned int nb_rsrv_rx_queues = sfc_nb_reserved_rxq(sas);
1739 const unsigned int nb_rxq_total = nb_rx_queues + nb_rsrv_rx_queues;
1743 sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
1744 nb_rx_queues, sas->ethdev_rxq_count);
1746 rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
1748 goto fail_check_mode;
1750 if (nb_rxq_total == sas->rxq_count) {
1755 if (sas->rxq_info == NULL) {
1756 reconfigure = false;
1758 sas->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rxq_total,
1759 sizeof(sas->rxq_info[0]), 0,
1761 if (sas->rxq_info == NULL)
1762 goto fail_rxqs_alloc;
1765 * Allocate primary process only RxQ control from heap
1766 * since it should not be shared.
1769 sa->rxq_ctrl = calloc(nb_rxq_total, sizeof(sa->rxq_ctrl[0]));
1770 if (sa->rxq_ctrl == NULL)
1771 goto fail_rxqs_ctrl_alloc;
1773 struct sfc_rxq_info *new_rxq_info;
1774 struct sfc_rxq *new_rxq_ctrl;
1778 /* Do not uninitialize reserved queues */
1779 if (nb_rx_queues < sas->ethdev_rxq_count)
1780 sfc_rx_fini_queues(sa, nb_rx_queues);
1784 rte_realloc(sas->rxq_info,
1785 nb_rxq_total * sizeof(sas->rxq_info[0]), 0);
1786 if (new_rxq_info == NULL && nb_rxq_total > 0)
1787 goto fail_rxqs_realloc;
1790 new_rxq_ctrl = realloc(sa->rxq_ctrl,
1791 nb_rxq_total * sizeof(sa->rxq_ctrl[0]));
1792 if (new_rxq_ctrl == NULL && nb_rxq_total > 0)
1793 goto fail_rxqs_ctrl_realloc;
1795 sas->rxq_info = new_rxq_info;
1796 sa->rxq_ctrl = new_rxq_ctrl;
1797 if (nb_rxq_total > sas->rxq_count) {
1798 unsigned int rxq_count = sas->rxq_count;
1800 memset(&sas->rxq_info[rxq_count], 0,
1801 (nb_rxq_total - rxq_count) *
1802 sizeof(sas->rxq_info[0]));
1803 memset(&sa->rxq_ctrl[rxq_count], 0,
1804 (nb_rxq_total - rxq_count) *
1805 sizeof(sa->rxq_ctrl[0]));
1809 while (sas->ethdev_rxq_count < nb_rx_queues) {
1810 sfc_sw_index_t sw_index;
1812 sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas,
1813 sas->ethdev_rxq_count);
1814 rc = sfc_rx_qinit_info(sa, sw_index, 0);
1816 goto fail_rx_qinit_info;
1818 sas->ethdev_rxq_count++;
1821 sas->rxq_count = sas->ethdev_rxq_count + nb_rsrv_rx_queues;
1824 rc = sfc_mae_counter_rxq_init(sa);
1826 goto fail_count_rxq_init;
1830 rss->channels = (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) ?
1831 MIN(sas->ethdev_rxq_count, EFX_MAXRSS) : 0;
1833 if (rss->channels > 0) {
1834 struct rte_eth_rss_conf *adv_conf_rss;
1835 sfc_sw_index_t sw_index;
1837 for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
1838 rss->tbl[sw_index] = sw_index % rss->channels;
1840 adv_conf_rss = &dev_conf->rx_adv_conf.rss_conf;
1841 rc = sfc_rx_process_adv_conf_rss(sa, adv_conf_rss);
1843 goto fail_rx_process_adv_conf_rss;
1848 fail_rx_process_adv_conf_rss:
1850 sfc_mae_counter_rxq_fini(sa);
1852 fail_count_rxq_init:
1854 fail_rxqs_ctrl_realloc:
1856 fail_rxqs_ctrl_alloc:
1861 sfc_log_init(sa, "failed %d", rc);
1866 * Shutdown Rx subsystem.
1868 * Called at device close stage, for example, before device shutdown.
1871 sfc_rx_close(struct sfc_adapter *sa)
1873 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1875 sfc_rx_fini_queues(sa, 0);
1876 sfc_mae_counter_rxq_fini(sa);
1881 sa->rxq_ctrl = NULL;
1883 rte_free(sfc_sa2shared(sa)->rxq_info);
1884 sfc_sa2shared(sa)->rxq_info = NULL;