1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_mempool.h>
15 #include "sfc_debug.h"
19 #include "sfc_kvargs.h"
20 #include "sfc_tweak.h"
23 * Maximum number of Rx queue flush attempt in the case of failure or
26 #define SFC_RX_QFLUSH_ATTEMPTS (3)
29 * Time to wait between event queue polling attempts when waiting for Rx
30 * queue flush done or failed events.
32 #define SFC_RX_QFLUSH_POLL_WAIT_MS (1)
35 * Maximum number of event queue polling attempts when waiting for Rx queue
36 * flush done or failed events. It defines Rx queue flush attempt timeout
37 * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
39 #define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000)
42 sfc_rx_qflush_done(struct sfc_rxq_info *rxq_info)
44 rxq_info->state |= SFC_RXQ_FLUSHED;
45 rxq_info->state &= ~SFC_RXQ_FLUSHING;
49 sfc_rx_qflush_failed(struct sfc_rxq_info *rxq_info)
51 rxq_info->state |= SFC_RXQ_FLUSH_FAILED;
52 rxq_info->state &= ~SFC_RXQ_FLUSHING;
56 sfc_efx_rx_qprime(struct sfc_efx_rxq *rxq)
60 if (rxq->evq->read_ptr_primed != rxq->evq->read_ptr) {
61 rc = efx_ev_qprime(rxq->evq->common, rxq->evq->read_ptr);
63 rxq->evq->read_ptr_primed = rxq->evq->read_ptr;
69 sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
71 unsigned int free_space;
73 void *objs[SFC_RX_REFILL_BULK];
74 efsys_dma_addr_t addr[RTE_DIM(objs)];
75 unsigned int added = rxq->added;
78 struct sfc_efx_rx_sw_desc *rxd;
80 uint16_t port_id = rxq->dp.dpq.port_id;
82 free_space = rxq->max_fill_level - (added - rxq->completed);
84 if (free_space < rxq->refill_threshold)
87 bulks = free_space / RTE_DIM(objs);
88 /* refill_threshold guarantees that bulks is positive */
89 SFC_ASSERT(bulks > 0);
91 id = added & rxq->ptr_mask;
93 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
94 RTE_DIM(objs)) < 0)) {
96 * It is hardly a safe way to increment counter
97 * from different contexts, but all PMDs do it.
99 rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
101 /* Return if we have posted nothing yet */
102 if (added == rxq->added)
108 for (i = 0; i < RTE_DIM(objs);
109 ++i, id = (id + 1) & rxq->ptr_mask) {
112 MBUF_RAW_ALLOC_CHECK(m);
114 rxd = &rxq->sw_desc[id];
117 m->data_off = RTE_PKTMBUF_HEADROOM;
120 addr[i] = rte_pktmbuf_iova(m);
123 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
124 RTE_DIM(objs), rxq->completed, added);
125 added += RTE_DIM(objs);
126 } while (--bulks > 0);
128 SFC_ASSERT(added != rxq->added);
130 efx_rx_qpush(rxq->common, added, &rxq->pushed);
134 sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
136 uint64_t mbuf_flags = 0;
138 switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
139 case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
140 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
143 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
146 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
147 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
148 PKT_RX_IP_CKSUM_UNKNOWN);
152 switch ((desc_flags &
153 (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
154 case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
155 case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
156 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
160 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
163 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
164 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
165 PKT_RX_L4_CKSUM_UNKNOWN);
173 sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
175 return RTE_PTYPE_L2_ETHER |
176 ((desc_flags & EFX_PKT_IPV4) ?
177 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
178 ((desc_flags & EFX_PKT_IPV6) ?
179 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
180 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
181 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
184 static const uint32_t *
185 sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
187 static const uint32_t ptypes[] = {
189 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
190 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
200 sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
206 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
209 mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
211 if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
212 m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
213 EFX_RX_HASHALG_TOEPLITZ,
216 m->ol_flags |= PKT_RX_RSS_HASH;
221 sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
223 struct sfc_dp_rxq *dp_rxq = rx_queue;
224 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
225 unsigned int completed;
226 unsigned int prefix_size = rxq->prefix_size;
227 unsigned int done_pkts = 0;
228 boolean_t discard_next = B_FALSE;
229 struct rte_mbuf *scatter_pkt = NULL;
231 if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
234 sfc_ev_qpoll(rxq->evq);
236 completed = rxq->completed;
237 while (completed != rxq->pending && done_pkts < nb_pkts) {
239 struct sfc_efx_rx_sw_desc *rxd;
241 unsigned int seg_len;
242 unsigned int desc_flags;
244 id = completed++ & rxq->ptr_mask;
245 rxd = &rxq->sw_desc[id];
247 desc_flags = rxd->flags;
252 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
255 if (desc_flags & EFX_PKT_PREFIX_LEN) {
259 rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
260 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
264 seg_len = rxd->size - prefix_size;
267 rte_pktmbuf_data_len(m) = seg_len;
268 rte_pktmbuf_pkt_len(m) = seg_len;
270 if (scatter_pkt != NULL) {
271 if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
272 rte_pktmbuf_free(scatter_pkt);
275 /* The packet to deliver */
279 if (desc_flags & EFX_PKT_CONT) {
280 /* The packet is scattered, more fragments to come */
282 /* Further fragments have no prefix */
287 /* Scattered packet is done */
289 /* The first fragment of the packet has prefix */
290 prefix_size = rxq->prefix_size;
293 sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
295 sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
298 * Extract RSS hash from the packet prefix and
299 * set the corresponding field (if needed and possible)
301 sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
303 m->data_off += prefix_size;
310 discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
311 rte_mbuf_raw_free(m);
315 /* pending is only moved when entire packet is received */
316 SFC_ASSERT(scatter_pkt == NULL);
318 rxq->completed = completed;
320 sfc_efx_rx_qrefill(rxq);
322 if (rxq->flags & SFC_EFX_RXQ_FLAG_INTR_EN)
323 sfc_efx_rx_qprime(rxq);
328 static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
330 sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
332 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
334 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
337 sfc_ev_qpoll(rxq->evq);
339 return rxq->pending - rxq->completed;
342 static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status;
344 sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
346 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
348 if (unlikely(offset > rxq->ptr_mask))
352 * Poll EvQ to derive up-to-date 'rxq->pending' figure;
353 * it is required for the queue to be running, but the
354 * check is omitted because API design assumes that it
355 * is the duty of the caller to satisfy all conditions
357 SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) ==
358 SFC_EFX_RXQ_FLAG_RUNNING);
359 sfc_ev_qpoll(rxq->evq);
362 * There is a handful of reserved entries in the ring,
363 * but an explicit check whether the offset points to
364 * a reserved entry is neglected since the two checks
365 * below rely on the figures which take the HW limits
366 * into account and thus if an entry is reserved, the
367 * checks will fail and UNAVAIL code will be returned
370 if (offset < (rxq->pending - rxq->completed))
371 return RTE_ETH_RX_DESC_DONE;
373 if (offset < (rxq->added - rxq->completed))
374 return RTE_ETH_RX_DESC_AVAIL;
376 return RTE_ETH_RX_DESC_UNAVAIL;
380 sfc_rx_check_scatter(size_t pdu, size_t rx_buf_size, uint32_t rx_prefix_size,
381 boolean_t rx_scatter_enabled, const char **error)
383 if ((rx_buf_size < pdu + rx_prefix_size) && !rx_scatter_enabled) {
384 *error = "Rx scatter is disabled and RxQ mbuf pool object size is too small";
391 /** Get Rx datapath ops by the datapath RxQ handle */
392 const struct sfc_dp_rx *
393 sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
395 const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
396 struct rte_eth_dev *eth_dev;
397 struct sfc_adapter_priv *sap;
399 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
400 eth_dev = &rte_eth_devices[dpq->port_id];
402 sap = sfc_adapter_priv_by_eth_dev(eth_dev);
407 struct sfc_rxq_info *
408 sfc_rxq_info_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
410 const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
411 struct rte_eth_dev *eth_dev;
412 struct sfc_adapter_shared *sas;
414 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
415 eth_dev = &rte_eth_devices[dpq->port_id];
417 sas = sfc_adapter_shared_by_eth_dev(eth_dev);
419 SFC_ASSERT(dpq->queue_id < sas->rxq_count);
420 return &sas->rxq_info[dpq->queue_id];
424 sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
426 const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
427 struct rte_eth_dev *eth_dev;
428 struct sfc_adapter *sa;
430 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
431 eth_dev = &rte_eth_devices[dpq->port_id];
433 sa = sfc_adapter_by_eth_dev(eth_dev);
435 SFC_ASSERT(dpq->queue_id < sfc_sa2shared(sa)->rxq_count);
436 return &sa->rxq_ctrl[dpq->queue_id];
439 static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
441 sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
442 __rte_unused struct sfc_dp_rx_hw_limits *limits,
443 __rte_unused struct rte_mempool *mb_pool,
444 unsigned int *rxq_entries,
445 unsigned int *evq_entries,
446 unsigned int *rxq_max_fill_level)
448 *rxq_entries = nb_rx_desc;
449 *evq_entries = nb_rx_desc;
450 *rxq_max_fill_level = EFX_RXQ_LIMIT(*rxq_entries);
454 static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
456 sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
457 const struct rte_pci_addr *pci_addr, int socket_id,
458 const struct sfc_dp_rx_qcreate_info *info,
459 struct sfc_dp_rxq **dp_rxqp)
461 struct sfc_efx_rxq *rxq;
465 rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
466 RTE_CACHE_LINE_SIZE, socket_id);
470 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
473 rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
475 sizeof(*rxq->sw_desc),
476 RTE_CACHE_LINE_SIZE, socket_id);
477 if (rxq->sw_desc == NULL)
478 goto fail_desc_alloc;
480 /* efx datapath is bound to efx control path */
481 rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
482 if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
483 rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
484 rxq->ptr_mask = info->rxq_entries - 1;
485 rxq->batch_max = info->batch_max;
486 rxq->prefix_size = info->prefix_size;
487 rxq->max_fill_level = info->max_fill_level;
488 rxq->refill_threshold = info->refill_threshold;
489 rxq->buf_size = info->buf_size;
490 rxq->refill_mb_pool = info->refill_mb_pool;
502 static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
504 sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
506 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
508 rte_free(rxq->sw_desc);
513 /* Use qstop and qstart functions in the case of qstart failure */
514 static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
515 static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
518 static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
520 sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
521 __rte_unused unsigned int evq_read_ptr)
523 /* libefx-based datapath is specific to libefx-based PMD */
524 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
525 struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
528 rxq->common = crxq->common;
530 rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
532 sfc_efx_rx_qrefill(rxq);
534 rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
536 if (rxq->flags & SFC_EFX_RXQ_FLAG_INTR_EN) {
537 rc = sfc_efx_rx_qprime(rxq);
545 sfc_efx_rx_qstop(dp_rxq, NULL);
546 sfc_efx_rx_qpurge(dp_rxq);
551 sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
552 __rte_unused unsigned int *evq_read_ptr)
554 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
556 rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
558 /* libefx-based datapath is bound to libefx-based PMD and uses
559 * event queue structure directly. So, there is no necessity to
560 * return EvQ read pointer.
565 sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
567 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
569 struct sfc_efx_rx_sw_desc *rxd;
571 for (i = rxq->completed; i != rxq->added; ++i) {
572 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
573 rte_mbuf_raw_free(rxd->mbuf);
575 /* Packed stream relies on 0 in inactive SW desc.
576 * Rx queue stop is not performance critical, so
577 * there is no harm to do it always.
583 rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
586 static sfc_dp_rx_intr_enable_t sfc_efx_rx_intr_enable;
588 sfc_efx_rx_intr_enable(struct sfc_dp_rxq *dp_rxq)
590 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
593 rxq->flags |= SFC_EFX_RXQ_FLAG_INTR_EN;
594 if (rxq->flags & SFC_EFX_RXQ_FLAG_STARTED) {
595 rc = sfc_efx_rx_qprime(rxq);
597 rxq->flags &= ~SFC_EFX_RXQ_FLAG_INTR_EN;
602 static sfc_dp_rx_intr_disable_t sfc_efx_rx_intr_disable;
604 sfc_efx_rx_intr_disable(struct sfc_dp_rxq *dp_rxq)
606 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
608 /* Cannot disarm, just disable rearm */
609 rxq->flags &= ~SFC_EFX_RXQ_FLAG_INTR_EN;
613 struct sfc_dp_rx sfc_efx_rx = {
615 .name = SFC_KVARG_DATAPATH_EFX,
619 .features = SFC_DP_RX_FEAT_INTR,
620 .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
621 DEV_RX_OFFLOAD_RSS_HASH,
622 .queue_offload_capa = DEV_RX_OFFLOAD_SCATTER,
623 .qsize_up_rings = sfc_efx_rx_qsize_up_rings,
624 .qcreate = sfc_efx_rx_qcreate,
625 .qdestroy = sfc_efx_rx_qdestroy,
626 .qstart = sfc_efx_rx_qstart,
627 .qstop = sfc_efx_rx_qstop,
628 .qpurge = sfc_efx_rx_qpurge,
629 .supported_ptypes_get = sfc_efx_supported_ptypes_get,
630 .qdesc_npending = sfc_efx_rx_qdesc_npending,
631 .qdesc_status = sfc_efx_rx_qdesc_status,
632 .intr_enable = sfc_efx_rx_intr_enable,
633 .intr_disable = sfc_efx_rx_intr_disable,
634 .pkt_burst = sfc_efx_recv_pkts,
638 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
640 struct sfc_rxq_info *rxq_info;
642 unsigned int retry_count;
643 unsigned int wait_count;
646 rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
647 SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);
649 rxq = &sa->rxq_ctrl[sw_index];
652 * Retry Rx queue flushing in the case of flush failed or
653 * timeout. In the worst case it can delay for 6 seconds.
655 for (retry_count = 0;
656 ((rxq_info->state & SFC_RXQ_FLUSHED) == 0) &&
657 (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
659 rc = efx_rx_qflush(rxq->common);
661 rxq_info->state |= (rc == EALREADY) ?
662 SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
665 rxq_info->state &= ~SFC_RXQ_FLUSH_FAILED;
666 rxq_info->state |= SFC_RXQ_FLUSHING;
669 * Wait for Rx queue flush done or failed event at least
670 * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
671 * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
672 * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
676 rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
677 sfc_ev_qpoll(rxq->evq);
678 } while ((rxq_info->state & SFC_RXQ_FLUSHING) &&
679 (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
681 if (rxq_info->state & SFC_RXQ_FLUSHING)
682 sfc_err(sa, "RxQ %u flush timed out", sw_index);
684 if (rxq_info->state & SFC_RXQ_FLUSH_FAILED)
685 sfc_err(sa, "RxQ %u flush failed", sw_index);
687 if (rxq_info->state & SFC_RXQ_FLUSHED)
688 sfc_notice(sa, "RxQ %u flushed", sw_index);
691 sa->priv.dp_rx->qpurge(rxq_info->dp);
695 sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
697 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
698 boolean_t need_rss = (rss->channels > 0) ? B_TRUE : B_FALSE;
699 struct sfc_port *port = &sa->port;
703 * If promiscuous or all-multicast mode has been requested, setting
704 * filter for the default Rx queue might fail, in particular, while
705 * running over PCI function which is not a member of corresponding
706 * privilege groups; if this occurs, few iterations will be made to
707 * repeat this step without promiscuous and all-multicast flags set
710 rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, need_rss);
713 else if (rc != EOPNOTSUPP)
717 sfc_warn(sa, "promiscuous mode has been requested, "
718 "but the HW rejects it");
719 sfc_warn(sa, "promiscuous mode will be disabled");
721 port->promisc = B_FALSE;
722 sa->eth_dev->data->promiscuous = 0;
723 rc = sfc_set_rx_mode(sa);
730 if (port->allmulti) {
731 sfc_warn(sa, "all-multicast mode has been requested, "
732 "but the HW rejects it");
733 sfc_warn(sa, "all-multicast mode will be disabled");
735 port->allmulti = B_FALSE;
736 sa->eth_dev->data->all_multicast = 0;
737 rc = sfc_set_rx_mode(sa);
748 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
750 struct sfc_rxq_info *rxq_info;
755 sfc_log_init(sa, "sw_index=%u", sw_index);
757 SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
759 rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
760 SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED);
762 rxq = &sa->rxq_ctrl[sw_index];
765 rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
769 switch (rxq_info->type) {
770 case EFX_RXQ_TYPE_DEFAULT:
771 rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
773 &rxq->mem, rxq_info->entries, 0 /* not used on EF10 */,
774 rxq_info->type_flags, evq->common, &rxq->common);
776 case EFX_RXQ_TYPE_ES_SUPER_BUFFER: {
777 struct rte_mempool *mp = rxq_info->refill_mb_pool;
778 struct rte_mempool_info mp_info;
780 rc = rte_mempool_ops_get_info(mp, &mp_info);
782 /* Positive errno is used in the driver */
784 goto fail_mp_get_info;
786 if (mp_info.contig_block_size <= 0) {
788 goto fail_bad_contig_block_size;
790 rc = efx_rx_qcreate_es_super_buffer(sa->nic, rxq->hw_index, 0,
791 mp_info.contig_block_size, rxq->buf_size,
792 mp->header_size + mp->elt_size + mp->trailer_size,
793 sa->rxd_wait_timeout_ns,
794 &rxq->mem, rxq_info->entries, rxq_info->type_flags,
795 evq->common, &rxq->common);
802 goto fail_rx_qcreate;
804 efx_rx_qenable(rxq->common);
806 rc = sa->priv.dp_rx->qstart(rxq_info->dp, evq->read_ptr);
810 rxq_info->state |= SFC_RXQ_STARTED;
812 if (sw_index == 0 && !sfc_sa2shared(sa)->isolated) {
813 rc = sfc_rx_default_rxq_set_filter(sa, rxq);
815 goto fail_mac_filter_default_rxq_set;
818 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
819 sa->eth_dev->data->rx_queue_state[sw_index] =
820 RTE_ETH_QUEUE_STATE_STARTED;
824 fail_mac_filter_default_rxq_set:
825 sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr);
828 sfc_rx_qflush(sa, sw_index);
831 fail_bad_contig_block_size:
840 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
842 struct sfc_rxq_info *rxq_info;
845 sfc_log_init(sa, "sw_index=%u", sw_index);
847 SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
849 rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
851 if (rxq_info->state == SFC_RXQ_INITIALIZED)
853 SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);
855 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
856 sa->eth_dev->data->rx_queue_state[sw_index] =
857 RTE_ETH_QUEUE_STATE_STOPPED;
859 rxq = &sa->rxq_ctrl[sw_index];
860 sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr);
863 efx_mac_filter_default_rxq_clear(sa->nic);
865 sfc_rx_qflush(sa, sw_index);
867 rxq_info->state = SFC_RXQ_INITIALIZED;
869 efx_rx_qdestroy(rxq->common);
871 sfc_ev_qstop(rxq->evq);
875 sfc_rx_get_offload_mask(struct sfc_adapter *sa)
877 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
878 uint64_t no_caps = 0;
880 if (encp->enc_tunnel_encapsulations_supported == 0)
881 no_caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
887 sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
889 uint64_t caps = sa->priv.dp_rx->dev_offload_capa;
891 caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
893 return caps & sfc_rx_get_offload_mask(sa);
897 sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
899 return sa->priv.dp_rx->queue_offload_capa & sfc_rx_get_offload_mask(sa);
903 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
904 const struct rte_eth_rxconf *rx_conf,
905 __rte_unused uint64_t offloads)
909 if (rx_conf->rx_thresh.pthresh != 0 ||
910 rx_conf->rx_thresh.hthresh != 0 ||
911 rx_conf->rx_thresh.wthresh != 0) {
913 "RxQ prefetch/host/writeback thresholds are not supported");
916 if (rx_conf->rx_free_thresh > rxq_max_fill_level) {
918 "RxQ free threshold too large: %u vs maximum %u",
919 rx_conf->rx_free_thresh, rxq_max_fill_level);
923 if (rx_conf->rx_drop_en == 0) {
924 sfc_err(sa, "RxQ drop disable is not supported");
932 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
937 /* The mbuf object itself is always cache line aligned */
938 order = rte_bsf32(RTE_CACHE_LINE_SIZE);
940 /* Data offset from mbuf object start */
941 data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
942 RTE_PKTMBUF_HEADROOM;
944 order = MIN(order, rte_bsf32(data_off));
950 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
952 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
953 const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
954 const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
956 unsigned int buf_aligned;
957 unsigned int start_alignment;
958 unsigned int end_padding_alignment;
960 /* Below it is assumed that both alignments are power of 2 */
961 SFC_ASSERT(rte_is_power_of_2(nic_align_start));
962 SFC_ASSERT(rte_is_power_of_2(nic_align_end));
965 * mbuf is always cache line aligned, double-check
966 * that it meets rx buffer start alignment requirements.
969 /* Start from mbuf pool data room size */
970 buf_size = rte_pktmbuf_data_room_size(mb_pool);
972 /* Remove headroom */
973 if (buf_size <= RTE_PKTMBUF_HEADROOM) {
975 "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
976 mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
979 buf_size -= RTE_PKTMBUF_HEADROOM;
981 /* Calculate guaranteed data start alignment */
982 buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
984 /* Reserve space for start alignment */
985 if (buf_aligned < nic_align_start) {
986 start_alignment = nic_align_start - buf_aligned;
987 if (buf_size <= start_alignment) {
989 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
991 rte_pktmbuf_data_room_size(mb_pool),
992 RTE_PKTMBUF_HEADROOM, start_alignment);
995 buf_aligned = nic_align_start;
996 buf_size -= start_alignment;
1001 /* Make sure that end padding does not write beyond the buffer */
1002 if (buf_aligned < nic_align_end) {
1004 * Estimate space which can be lost. If guarnteed buffer
1005 * size is odd, lost space is (nic_align_end - 1). More
1006 * accurate formula is below.
1008 end_padding_alignment = nic_align_end -
1009 MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
1010 if (buf_size <= end_padding_alignment) {
1012 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
1014 rte_pktmbuf_data_room_size(mb_pool),
1015 RTE_PKTMBUF_HEADROOM, start_alignment,
1016 end_padding_alignment);
1019 buf_size -= end_padding_alignment;
1022 * Start is aligned the same or better than end,
1023 * just align length.
1025 buf_size = EFX_P2ALIGN(uint32_t, buf_size, nic_align_end);
1032 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
1033 uint16_t nb_rx_desc, unsigned int socket_id,
1034 const struct rte_eth_rxconf *rx_conf,
1035 struct rte_mempool *mb_pool)
1037 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1038 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1040 unsigned int rxq_entries;
1041 unsigned int evq_entries;
1042 unsigned int rxq_max_fill_level;
1045 struct sfc_rxq_info *rxq_info;
1046 struct sfc_evq *evq;
1047 struct sfc_rxq *rxq;
1048 struct sfc_dp_rx_qcreate_info info;
1049 struct sfc_dp_rx_hw_limits hw_limits;
1050 uint16_t rx_free_thresh;
1053 memset(&hw_limits, 0, sizeof(hw_limits));
1054 hw_limits.rxq_max_entries = sa->rxq_max_entries;
1055 hw_limits.rxq_min_entries = sa->rxq_min_entries;
1056 hw_limits.evq_max_entries = sa->evq_max_entries;
1057 hw_limits.evq_min_entries = sa->evq_min_entries;
1059 rc = sa->priv.dp_rx->qsize_up_rings(nb_rx_desc, &hw_limits, mb_pool,
1060 &rxq_entries, &evq_entries,
1061 &rxq_max_fill_level);
1063 goto fail_size_up_rings;
1064 SFC_ASSERT(rxq_entries >= sa->rxq_min_entries);
1065 SFC_ASSERT(rxq_entries <= sa->rxq_max_entries);
1066 SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
1068 offloads = rx_conf->offloads |
1069 sa->eth_dev->data->dev_conf.rxmode.offloads;
1070 rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
1074 buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
1075 if (buf_size == 0) {
1076 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
1082 if (!sfc_rx_check_scatter(sa->port.pdu, buf_size,
1083 encp->enc_rx_prefix_size,
1084 (offloads & DEV_RX_OFFLOAD_SCATTER),
1086 sfc_err(sa, "RxQ %u MTU check failed: %s", sw_index, error);
1087 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
1088 "PDU size %u plus Rx prefix %u bytes",
1089 sw_index, buf_size, (unsigned int)sa->port.pdu,
1090 encp->enc_rx_prefix_size);
1095 SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
1096 rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
1098 SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
1099 rxq_info->entries = rxq_entries;
1101 if (sa->priv.dp_rx->dp.hw_fw_caps & SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER)
1102 rxq_info->type = EFX_RXQ_TYPE_ES_SUPER_BUFFER;
1104 rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
1106 rxq_info->type_flags =
1107 (offloads & DEV_RX_OFFLOAD_SCATTER) ?
1108 EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
1110 if ((encp->enc_tunnel_encapsulations_supported != 0) &&
1111 (sfc_dp_rx_offload_capa(sa->priv.dp_rx) &
1112 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
1113 rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
1115 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
1116 evq_entries, socket_id, &evq);
1120 rxq = &sa->rxq_ctrl[sw_index];
1122 rxq->hw_index = sw_index;
1124 * If Rx refill threshold is specified (its value is non zero) in
1125 * Rx configuration, use specified value. Otherwise use 1/8 of
1126 * the Rx descriptors number as the default. It allows to keep
1127 * Rx ring full-enough and does not refill too aggressive if
1128 * packet rate is high.
1130 * Since PMD refills in bulks waiting for full bulk may be
1131 * refilled (basically round down), it is better to round up
1132 * here to mitigate it a bit.
1134 rx_free_thresh = (rx_conf->rx_free_thresh != 0) ?
1135 rx_conf->rx_free_thresh : EFX_DIV_ROUND_UP(nb_rx_desc, 8);
1136 /* Rx refill threshold cannot be smaller than refill bulk */
1137 rxq_info->refill_threshold =
1138 RTE_MAX(rx_free_thresh, SFC_RX_REFILL_BULK);
1139 rxq_info->refill_mb_pool = mb_pool;
1140 rxq->buf_size = buf_size;
1142 rc = sfc_dma_alloc(sa, "rxq", sw_index,
1143 efx_rxq_size(sa->nic, rxq_info->entries),
1144 socket_id, &rxq->mem);
1146 goto fail_dma_alloc;
1148 memset(&info, 0, sizeof(info));
1149 info.refill_mb_pool = rxq_info->refill_mb_pool;
1150 info.max_fill_level = rxq_max_fill_level;
1151 info.refill_threshold = rxq_info->refill_threshold;
1152 info.buf_size = buf_size;
1153 info.batch_max = encp->enc_rx_batch_max;
1154 info.prefix_size = encp->enc_rx_prefix_size;
1156 if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0)
1157 info.flags |= SFC_RXQ_FLAG_RSS_HASH;
1159 info.rxq_entries = rxq_info->entries;
1160 info.rxq_hw_ring = rxq->mem.esm_base;
1161 info.evq_hw_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
1162 info.evq_entries = evq_entries;
1163 info.evq_hw_ring = evq->mem.esm_base;
1164 info.hw_index = rxq->hw_index;
1165 info.mem_bar = sa->mem_bar.esb_base;
1166 info.vi_window_shift = encp->enc_vi_window_shift;
1168 rc = sa->priv.dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
1169 &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
1170 socket_id, &info, &rxq_info->dp);
1172 goto fail_dp_rx_qcreate;
1174 evq->dp_rxq = rxq_info->dp;
1176 rxq_info->state = SFC_RXQ_INITIALIZED;
1178 rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
1183 sfc_dma_free(sa, &rxq->mem);
1189 rxq_info->entries = 0;
1193 sfc_log_init(sa, "failed %d", rc);
1198 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
1200 struct sfc_rxq_info *rxq_info;
1201 struct sfc_rxq *rxq;
1203 SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
1204 sa->eth_dev->data->rx_queues[sw_index] = NULL;
1206 rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
1208 SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED);
1210 sa->priv.dp_rx->qdestroy(rxq_info->dp);
1211 rxq_info->dp = NULL;
1213 rxq_info->state &= ~SFC_RXQ_INITIALIZED;
1214 rxq_info->entries = 0;
1216 rxq = &sa->rxq_ctrl[sw_index];
1218 sfc_dma_free(sa, &rxq->mem);
1220 sfc_ev_qfini(rxq->evq);
1225 * Mapping between RTE RSS hash functions and their EFX counterparts.
1227 static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
1228 { ETH_RSS_NONFRAG_IPV4_TCP,
1229 EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
1230 { ETH_RSS_NONFRAG_IPV4_UDP,
1231 EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
1232 { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX,
1233 EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
1234 { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX,
1235 EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
1236 { ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER,
1237 EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
1238 EFX_RX_HASH(IPV4, 2TUPLE) },
1239 { ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER |
1241 EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
1242 EFX_RX_HASH(IPV6, 2TUPLE) }
1245 static efx_rx_hash_type_t
1246 sfc_rx_hash_types_mask_supp(efx_rx_hash_type_t hash_type,
1247 unsigned int *hash_type_flags_supported,
1248 unsigned int nb_hash_type_flags_supported)
1250 efx_rx_hash_type_t hash_type_masked = 0;
1253 for (i = 0; i < nb_hash_type_flags_supported; ++i) {
1254 unsigned int class_tuple_lbn[] = {
1255 EFX_RX_CLASS_IPV4_TCP_LBN,
1256 EFX_RX_CLASS_IPV4_UDP_LBN,
1257 EFX_RX_CLASS_IPV4_LBN,
1258 EFX_RX_CLASS_IPV6_TCP_LBN,
1259 EFX_RX_CLASS_IPV6_UDP_LBN,
1260 EFX_RX_CLASS_IPV6_LBN
1263 for (j = 0; j < RTE_DIM(class_tuple_lbn); ++j) {
1264 unsigned int tuple_mask = EFX_RX_CLASS_HASH_4TUPLE;
1267 tuple_mask <<= class_tuple_lbn[j];
1268 flag = hash_type & tuple_mask;
1270 if (flag == hash_type_flags_supported[i])
1271 hash_type_masked |= flag;
1275 return hash_type_masked;
1279 sfc_rx_hash_init(struct sfc_adapter *sa)
1281 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1282 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1283 uint32_t alg_mask = encp->enc_rx_scale_hash_alg_mask;
1284 efx_rx_hash_alg_t alg;
1285 unsigned int flags_supp[EFX_RX_HASH_NFLAGS];
1286 unsigned int nb_flags_supp;
1287 struct sfc_rss_hf_rte_to_efx *hf_map;
1288 struct sfc_rss_hf_rte_to_efx *entry;
1289 efx_rx_hash_type_t efx_hash_types;
1293 if (alg_mask & (1U << EFX_RX_HASHALG_TOEPLITZ))
1294 alg = EFX_RX_HASHALG_TOEPLITZ;
1295 else if (alg_mask & (1U << EFX_RX_HASHALG_PACKED_STREAM))
1296 alg = EFX_RX_HASHALG_PACKED_STREAM;
1300 rc = efx_rx_scale_hash_flags_get(sa->nic, alg, flags_supp,
1301 RTE_DIM(flags_supp), &nb_flags_supp);
1305 hf_map = rte_calloc_socket("sfc-rss-hf-map",
1306 RTE_DIM(sfc_rss_hf_map),
1307 sizeof(*hf_map), 0, sa->socket_id);
1313 for (i = 0; i < RTE_DIM(sfc_rss_hf_map); ++i) {
1314 efx_rx_hash_type_t ht;
1316 ht = sfc_rx_hash_types_mask_supp(sfc_rss_hf_map[i].efx,
1317 flags_supp, nb_flags_supp);
1319 entry->rte = sfc_rss_hf_map[i].rte;
1321 efx_hash_types |= ht;
1326 rss->hash_alg = alg;
1327 rss->hf_map_nb_entries = (unsigned int)(entry - hf_map);
1328 rss->hf_map = hf_map;
1329 rss->hash_types = efx_hash_types;
1335 sfc_rx_hash_fini(struct sfc_adapter *sa)
1337 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1339 rte_free(rss->hf_map);
1343 sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,
1344 efx_rx_hash_type_t *efx)
1346 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1347 efx_rx_hash_type_t hash_types = 0;
1350 for (i = 0; i < rss->hf_map_nb_entries; ++i) {
1351 uint64_t rte_mask = rss->hf_map[i].rte;
1353 if ((rte & rte_mask) != 0) {
1355 hash_types |= rss->hf_map[i].efx;
1360 sfc_err(sa, "unsupported hash functions requested");
1370 sfc_rx_hf_efx_to_rte(struct sfc_rss *rss, efx_rx_hash_type_t efx)
1375 for (i = 0; i < rss->hf_map_nb_entries; ++i) {
1376 efx_rx_hash_type_t hash_type = rss->hf_map[i].efx;
1378 if ((efx & hash_type) == hash_type)
1379 rte |= rss->hf_map[i].rte;
1386 sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa,
1387 struct rte_eth_rss_conf *conf)
1389 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1390 efx_rx_hash_type_t efx_hash_types = rss->hash_types;
1391 uint64_t rss_hf = sfc_rx_hf_efx_to_rte(rss, efx_hash_types);
1394 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
1395 if ((conf->rss_hf != 0 && conf->rss_hf != rss_hf) ||
1396 conf->rss_key != NULL)
1400 if (conf->rss_hf != 0) {
1401 rc = sfc_rx_hf_rte_to_efx(sa, conf->rss_hf, &efx_hash_types);
1406 if (conf->rss_key != NULL) {
1407 if (conf->rss_key_len != sizeof(rss->key)) {
1408 sfc_err(sa, "RSS key size is wrong (should be %zu)",
1412 rte_memcpy(rss->key, conf->rss_key, sizeof(rss->key));
1415 rss->hash_types = efx_hash_types;
1421 sfc_rx_rss_config(struct sfc_adapter *sa)
1423 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1426 if (rss->channels > 0) {
1427 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1428 rss->hash_alg, rss->hash_types,
1433 rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1434 rss->key, sizeof(rss->key));
1438 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1439 rss->tbl, RTE_DIM(rss->tbl));
1447 sfc_rx_start(struct sfc_adapter *sa)
1449 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1450 unsigned int sw_index;
1453 sfc_log_init(sa, "rxq_count=%u", sas->rxq_count);
1455 rc = efx_rx_init(sa->nic);
1459 rc = sfc_rx_rss_config(sa);
1461 goto fail_rss_config;
1463 for (sw_index = 0; sw_index < sas->rxq_count; ++sw_index) {
1464 if (sas->rxq_info[sw_index].state == SFC_RXQ_INITIALIZED &&
1465 (!sas->rxq_info[sw_index].deferred_start ||
1466 sas->rxq_info[sw_index].deferred_started)) {
1467 rc = sfc_rx_qstart(sa, sw_index);
1469 goto fail_rx_qstart;
1476 while (sw_index-- > 0)
1477 sfc_rx_qstop(sa, sw_index);
1480 efx_rx_fini(sa->nic);
1483 sfc_log_init(sa, "failed %d", rc);
1488 sfc_rx_stop(struct sfc_adapter *sa)
1490 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1491 unsigned int sw_index;
1493 sfc_log_init(sa, "rxq_count=%u", sas->rxq_count);
1495 sw_index = sas->rxq_count;
1496 while (sw_index-- > 0) {
1497 if (sas->rxq_info[sw_index].state & SFC_RXQ_STARTED)
1498 sfc_rx_qstop(sa, sw_index);
1501 efx_rx_fini(sa->nic);
1505 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
1507 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1508 struct sfc_rxq_info *rxq_info = &sas->rxq_info[sw_index];
1509 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1510 unsigned int max_entries;
1512 max_entries = encp->enc_rxq_max_ndescs;
1513 SFC_ASSERT(rte_is_power_of_2(max_entries));
1515 rxq_info->max_entries = max_entries;
1521 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
1523 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1524 uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
1525 sfc_rx_get_queue_offload_caps(sa);
1526 struct sfc_rss *rss = &sas->rss;
1529 switch (rxmode->mq_mode) {
1530 case ETH_MQ_RX_NONE:
1531 /* No special checks are required */
1534 if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
1535 sfc_err(sa, "RSS is not available");
1540 sfc_err(sa, "Rx multi-queue mode %u not supported",
1546 * Requested offloads are validated against supported by ethdev,
1547 * so unsupported offloads cannot be added as the result of
1550 if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
1551 (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) {
1552 sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
1553 rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
1556 if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
1557 (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
1558 sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
1559 rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
1562 if ((offloads_supported & DEV_RX_OFFLOAD_RSS_HASH) &&
1563 (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG))
1564 rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
1570 * Destroy excess queues that are no longer needed after reconfiguration
1571 * or complete close.
1574 sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
1576 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1579 SFC_ASSERT(nb_rx_queues <= sas->rxq_count);
1581 sw_index = sas->rxq_count;
1582 while (--sw_index >= (int)nb_rx_queues) {
1583 if (sas->rxq_info[sw_index].state & SFC_RXQ_INITIALIZED)
1584 sfc_rx_qfini(sa, sw_index);
1587 sas->rxq_count = nb_rx_queues;
1591 * Initialize Rx subsystem.
1593 * Called at device (re)configuration stage when number of receive queues is
1594 * specified together with other device level receive configuration.
1596 * It should be used to allocate NUMA-unaware resources.
1599 sfc_rx_configure(struct sfc_adapter *sa)
1601 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1602 struct sfc_rss *rss = &sas->rss;
1603 struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
1604 const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
1607 sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
1608 nb_rx_queues, sas->rxq_count);
1610 rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
1612 goto fail_check_mode;
1614 if (nb_rx_queues == sas->rxq_count)
1617 if (sas->rxq_info == NULL) {
1619 sas->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
1620 sizeof(sas->rxq_info[0]), 0,
1622 if (sas->rxq_info == NULL)
1623 goto fail_rxqs_alloc;
1626 * Allocate primary process only RxQ control from heap
1627 * since it should not be shared.
1630 sa->rxq_ctrl = calloc(nb_rx_queues, sizeof(sa->rxq_ctrl[0]));
1631 if (sa->rxq_ctrl == NULL)
1632 goto fail_rxqs_ctrl_alloc;
1634 struct sfc_rxq_info *new_rxq_info;
1635 struct sfc_rxq *new_rxq_ctrl;
1637 if (nb_rx_queues < sas->rxq_count)
1638 sfc_rx_fini_queues(sa, nb_rx_queues);
1642 rte_realloc(sas->rxq_info,
1643 nb_rx_queues * sizeof(sas->rxq_info[0]), 0);
1644 if (new_rxq_info == NULL && nb_rx_queues > 0)
1645 goto fail_rxqs_realloc;
1648 new_rxq_ctrl = realloc(sa->rxq_ctrl,
1649 nb_rx_queues * sizeof(sa->rxq_ctrl[0]));
1650 if (new_rxq_ctrl == NULL && nb_rx_queues > 0)
1651 goto fail_rxqs_ctrl_realloc;
1653 sas->rxq_info = new_rxq_info;
1654 sa->rxq_ctrl = new_rxq_ctrl;
1655 if (nb_rx_queues > sas->rxq_count) {
1656 memset(&sas->rxq_info[sas->rxq_count], 0,
1657 (nb_rx_queues - sas->rxq_count) *
1658 sizeof(sas->rxq_info[0]));
1659 memset(&sa->rxq_ctrl[sas->rxq_count], 0,
1660 (nb_rx_queues - sas->rxq_count) *
1661 sizeof(sa->rxq_ctrl[0]));
1665 while (sas->rxq_count < nb_rx_queues) {
1666 rc = sfc_rx_qinit_info(sa, sas->rxq_count);
1668 goto fail_rx_qinit_info;
1674 rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
1675 MIN(sas->rxq_count, EFX_MAXRSS) : 0;
1677 if (rss->channels > 0) {
1678 struct rte_eth_rss_conf *adv_conf_rss;
1679 unsigned int sw_index;
1681 for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
1682 rss->tbl[sw_index] = sw_index % rss->channels;
1684 adv_conf_rss = &dev_conf->rx_adv_conf.rss_conf;
1685 rc = sfc_rx_process_adv_conf_rss(sa, adv_conf_rss);
1687 goto fail_rx_process_adv_conf_rss;
1692 fail_rx_process_adv_conf_rss:
1694 fail_rxqs_ctrl_realloc:
1696 fail_rxqs_ctrl_alloc:
1701 sfc_log_init(sa, "failed %d", rc);
1706 * Shutdown Rx subsystem.
1708 * Called at device close stage, for example, before device shutdown.
1711 sfc_rx_close(struct sfc_adapter *sa)
1713 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1715 sfc_rx_fini_queues(sa, 0);
1720 sa->rxq_ctrl = NULL;
1722 rte_free(sfc_sa2shared(sa)->rxq_info);
1723 sfc_sa2shared(sa)->rxq_info = NULL;