1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
10 #include <rte_mempool.h>
15 #include "sfc_debug.h"
19 #include "sfc_kvargs.h"
20 #include "sfc_tweak.h"
23 * Maximum number of Rx queue flush attempt in the case of failure or
26 #define SFC_RX_QFLUSH_ATTEMPTS (3)
29 * Time to wait between event queue polling attempts when waiting for Rx
30 * queue flush done or failed events.
32 #define SFC_RX_QFLUSH_POLL_WAIT_MS (1)
35 * Maximum number of event queue polling attempts when waiting for Rx queue
36 * flush done or failed events. It defines Rx queue flush attempt timeout
37 * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
39 #define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000)
42 sfc_rx_qflush_done(struct sfc_rxq *rxq)
44 rxq->state |= SFC_RXQ_FLUSHED;
45 rxq->state &= ~SFC_RXQ_FLUSHING;
49 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
51 rxq->state |= SFC_RXQ_FLUSH_FAILED;
52 rxq->state &= ~SFC_RXQ_FLUSHING;
56 sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
58 unsigned int free_space;
60 void *objs[SFC_RX_REFILL_BULK];
61 efsys_dma_addr_t addr[RTE_DIM(objs)];
62 unsigned int added = rxq->added;
65 struct sfc_efx_rx_sw_desc *rxd;
67 uint16_t port_id = rxq->dp.dpq.port_id;
69 free_space = rxq->max_fill_level - (added - rxq->completed);
71 if (free_space < rxq->refill_threshold)
74 bulks = free_space / RTE_DIM(objs);
75 /* refill_threshold guarantees that bulks is positive */
76 SFC_ASSERT(bulks > 0);
78 id = added & rxq->ptr_mask;
80 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
81 RTE_DIM(objs)) < 0)) {
83 * It is hardly a safe way to increment counter
84 * from different contexts, but all PMDs do it.
86 rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
88 /* Return if we have posted nothing yet */
89 if (added == rxq->added)
95 for (i = 0; i < RTE_DIM(objs);
96 ++i, id = (id + 1) & rxq->ptr_mask) {
99 rxd = &rxq->sw_desc[id];
102 SFC_ASSERT(rte_mbuf_refcnt_read(m) == 1);
103 m->data_off = RTE_PKTMBUF_HEADROOM;
104 SFC_ASSERT(m->next == NULL);
105 SFC_ASSERT(m->nb_segs == 1);
108 addr[i] = rte_pktmbuf_iova(m);
111 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
112 RTE_DIM(objs), rxq->completed, added);
113 added += RTE_DIM(objs);
114 } while (--bulks > 0);
116 SFC_ASSERT(added != rxq->added);
118 efx_rx_qpush(rxq->common, added, &rxq->pushed);
122 sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
124 uint64_t mbuf_flags = 0;
126 switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
127 case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
128 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
131 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
134 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
135 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
136 PKT_RX_IP_CKSUM_UNKNOWN);
140 switch ((desc_flags &
141 (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
142 case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
143 case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
144 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
148 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
151 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
152 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
153 PKT_RX_L4_CKSUM_UNKNOWN);
161 sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
163 return RTE_PTYPE_L2_ETHER |
164 ((desc_flags & EFX_PKT_IPV4) ?
165 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
166 ((desc_flags & EFX_PKT_IPV6) ?
167 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
168 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
169 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
172 static const uint32_t *
173 sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
175 static const uint32_t ptypes[] = {
177 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
178 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
188 sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
194 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
197 mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
199 if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
200 m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
201 EFX_RX_HASHALG_TOEPLITZ,
204 m->ol_flags |= PKT_RX_RSS_HASH;
209 sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
211 struct sfc_dp_rxq *dp_rxq = rx_queue;
212 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
213 unsigned int completed;
214 unsigned int prefix_size = rxq->prefix_size;
215 unsigned int done_pkts = 0;
216 boolean_t discard_next = B_FALSE;
217 struct rte_mbuf *scatter_pkt = NULL;
219 if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
222 sfc_ev_qpoll(rxq->evq);
224 completed = rxq->completed;
225 while (completed != rxq->pending && done_pkts < nb_pkts) {
227 struct sfc_efx_rx_sw_desc *rxd;
229 unsigned int seg_len;
230 unsigned int desc_flags;
232 id = completed++ & rxq->ptr_mask;
233 rxd = &rxq->sw_desc[id];
235 desc_flags = rxd->flags;
240 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
243 if (desc_flags & EFX_PKT_PREFIX_LEN) {
247 rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
248 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
252 seg_len = rxd->size - prefix_size;
255 rte_pktmbuf_data_len(m) = seg_len;
256 rte_pktmbuf_pkt_len(m) = seg_len;
258 if (scatter_pkt != NULL) {
259 if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
260 rte_pktmbuf_free(scatter_pkt);
263 /* The packet to deliver */
267 if (desc_flags & EFX_PKT_CONT) {
268 /* The packet is scattered, more fragments to come */
270 /* Further fragments have no prefix */
275 /* Scattered packet is done */
277 /* The first fragment of the packet has prefix */
278 prefix_size = rxq->prefix_size;
281 sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
283 sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
286 * Extract RSS hash from the packet prefix and
287 * set the corresponding field (if needed and possible)
289 sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
291 m->data_off += prefix_size;
298 discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
299 rte_mempool_put(rxq->refill_mb_pool, m);
303 /* pending is only moved when entire packet is received */
304 SFC_ASSERT(scatter_pkt == NULL);
306 rxq->completed = completed;
308 sfc_efx_rx_qrefill(rxq);
313 static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
315 sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
317 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
319 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
322 sfc_ev_qpoll(rxq->evq);
324 return rxq->pending - rxq->completed;
327 static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status;
329 sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
331 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
333 if (unlikely(offset > rxq->ptr_mask))
337 * Poll EvQ to derive up-to-date 'rxq->pending' figure;
338 * it is required for the queue to be running, but the
339 * check is omitted because API design assumes that it
340 * is the duty of the caller to satisfy all conditions
342 SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) ==
343 SFC_EFX_RXQ_FLAG_RUNNING);
344 sfc_ev_qpoll(rxq->evq);
347 * There is a handful of reserved entries in the ring,
348 * but an explicit check whether the offset points to
349 * a reserved entry is neglected since the two checks
350 * below rely on the figures which take the HW limits
351 * into account and thus if an entry is reserved, the
352 * checks will fail and UNAVAIL code will be returned
355 if (offset < (rxq->pending - rxq->completed))
356 return RTE_ETH_RX_DESC_DONE;
358 if (offset < (rxq->added - rxq->completed))
359 return RTE_ETH_RX_DESC_AVAIL;
361 return RTE_ETH_RX_DESC_UNAVAIL;
365 sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
367 const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
368 struct rte_eth_dev *eth_dev;
369 struct sfc_adapter *sa;
372 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
373 eth_dev = &rte_eth_devices[dpq->port_id];
375 sa = eth_dev->data->dev_private;
377 SFC_ASSERT(dpq->queue_id < sa->rxq_count);
378 rxq = sa->rxq_info[dpq->queue_id].rxq;
380 SFC_ASSERT(rxq != NULL);
384 static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
386 sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
387 unsigned int *rxq_entries,
388 unsigned int *evq_entries,
389 unsigned int *rxq_max_fill_level)
391 *rxq_entries = nb_rx_desc;
392 *evq_entries = nb_rx_desc;
393 *rxq_max_fill_level = EFX_RXQ_LIMIT(*rxq_entries);
397 static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
399 sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
400 const struct rte_pci_addr *pci_addr, int socket_id,
401 const struct sfc_dp_rx_qcreate_info *info,
402 struct sfc_dp_rxq **dp_rxqp)
404 struct sfc_efx_rxq *rxq;
408 rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
409 RTE_CACHE_LINE_SIZE, socket_id);
413 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
416 rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
418 sizeof(*rxq->sw_desc),
419 RTE_CACHE_LINE_SIZE, socket_id);
420 if (rxq->sw_desc == NULL)
421 goto fail_desc_alloc;
423 /* efx datapath is bound to efx control path */
424 rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
425 if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
426 rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
427 rxq->ptr_mask = info->rxq_entries - 1;
428 rxq->batch_max = info->batch_max;
429 rxq->prefix_size = info->prefix_size;
430 rxq->max_fill_level = info->max_fill_level;
431 rxq->refill_threshold = info->refill_threshold;
432 rxq->buf_size = info->buf_size;
433 rxq->refill_mb_pool = info->refill_mb_pool;
445 static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
447 sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
449 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
451 rte_free(rxq->sw_desc);
455 static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
457 sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
458 __rte_unused unsigned int evq_read_ptr)
460 /* libefx-based datapath is specific to libefx-based PMD */
461 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
462 struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
464 rxq->common = crxq->common;
466 rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
468 sfc_efx_rx_qrefill(rxq);
470 rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
475 static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
477 sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
478 __rte_unused unsigned int *evq_read_ptr)
480 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
482 rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
484 /* libefx-based datapath is bound to libefx-based PMD and uses
485 * event queue structure directly. So, there is no necessity to
486 * return EvQ read pointer.
490 static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
492 sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
494 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
496 struct sfc_efx_rx_sw_desc *rxd;
498 for (i = rxq->completed; i != rxq->added; ++i) {
499 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
500 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
502 /* Packed stream relies on 0 in inactive SW desc.
503 * Rx queue stop is not performance critical, so
504 * there is no harm to do it always.
510 rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
513 struct sfc_dp_rx sfc_efx_rx = {
515 .name = SFC_KVARG_DATAPATH_EFX,
519 .features = SFC_DP_RX_FEAT_SCATTER,
520 .qsize_up_rings = sfc_efx_rx_qsize_up_rings,
521 .qcreate = sfc_efx_rx_qcreate,
522 .qdestroy = sfc_efx_rx_qdestroy,
523 .qstart = sfc_efx_rx_qstart,
524 .qstop = sfc_efx_rx_qstop,
525 .qpurge = sfc_efx_rx_qpurge,
526 .supported_ptypes_get = sfc_efx_supported_ptypes_get,
527 .qdesc_npending = sfc_efx_rx_qdesc_npending,
528 .qdesc_status = sfc_efx_rx_qdesc_status,
529 .pkt_burst = sfc_efx_recv_pkts,
533 sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
537 SFC_ASSERT(sw_index < sa->rxq_count);
538 rxq = sa->rxq_info[sw_index].rxq;
540 if (rxq == NULL || (rxq->state & SFC_RXQ_STARTED) == 0)
543 return sa->dp_rx->qdesc_npending(rxq->dp);
547 sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset)
549 struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
551 return offset < rxq->evq->sa->dp_rx->qdesc_npending(dp_rxq);
555 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
558 unsigned int retry_count;
559 unsigned int wait_count;
562 rxq = sa->rxq_info[sw_index].rxq;
563 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
566 * Retry Rx queue flushing in the case of flush failed or
567 * timeout. In the worst case it can delay for 6 seconds.
569 for (retry_count = 0;
570 ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
571 (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
573 rc = efx_rx_qflush(rxq->common);
575 rxq->state |= (rc == EALREADY) ?
576 SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
579 rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
580 rxq->state |= SFC_RXQ_FLUSHING;
583 * Wait for Rx queue flush done or failed event at least
584 * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
585 * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
586 * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
590 rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
591 sfc_ev_qpoll(rxq->evq);
592 } while ((rxq->state & SFC_RXQ_FLUSHING) &&
593 (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
595 if (rxq->state & SFC_RXQ_FLUSHING)
596 sfc_err(sa, "RxQ %u flush timed out", sw_index);
598 if (rxq->state & SFC_RXQ_FLUSH_FAILED)
599 sfc_err(sa, "RxQ %u flush failed", sw_index);
601 if (rxq->state & SFC_RXQ_FLUSHED)
602 sfc_notice(sa, "RxQ %u flushed", sw_index);
605 sa->dp_rx->qpurge(rxq->dp);
609 sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
611 boolean_t rss = (sa->rss_channels > 0) ? B_TRUE : B_FALSE;
612 struct sfc_port *port = &sa->port;
616 * If promiscuous or all-multicast mode has been requested, setting
617 * filter for the default Rx queue might fail, in particular, while
618 * running over PCI function which is not a member of corresponding
619 * privilege groups; if this occurs, few iterations will be made to
620 * repeat this step without promiscuous and all-multicast flags set
623 rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss);
626 else if (rc != EOPNOTSUPP)
630 sfc_warn(sa, "promiscuous mode has been requested, "
631 "but the HW rejects it");
632 sfc_warn(sa, "promiscuous mode will be disabled");
634 port->promisc = B_FALSE;
635 rc = sfc_set_rx_mode(sa);
642 if (port->allmulti) {
643 sfc_warn(sa, "all-multicast mode has been requested, "
644 "but the HW rejects it");
645 sfc_warn(sa, "all-multicast mode will be disabled");
647 port->allmulti = B_FALSE;
648 rc = sfc_set_rx_mode(sa);
659 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
661 struct sfc_port *port = &sa->port;
662 struct sfc_rxq_info *rxq_info;
667 sfc_log_init(sa, "sw_index=%u", sw_index);
669 SFC_ASSERT(sw_index < sa->rxq_count);
671 rxq_info = &sa->rxq_info[sw_index];
673 SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
677 rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
681 rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
682 &rxq->mem, rxq_info->entries,
683 0 /* not used on EF10 */, rxq_info->type_flags,
684 evq->common, &rxq->common);
686 goto fail_rx_qcreate;
688 efx_rx_qenable(rxq->common);
690 rc = sa->dp_rx->qstart(rxq->dp, evq->read_ptr);
694 rxq->state |= SFC_RXQ_STARTED;
696 if ((sw_index == 0) && !port->isolated) {
697 rc = sfc_rx_default_rxq_set_filter(sa, rxq);
699 goto fail_mac_filter_default_rxq_set;
702 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
703 sa->eth_dev->data->rx_queue_state[sw_index] =
704 RTE_ETH_QUEUE_STATE_STARTED;
708 fail_mac_filter_default_rxq_set:
709 sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
712 sfc_rx_qflush(sa, sw_index);
722 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
724 struct sfc_rxq_info *rxq_info;
727 sfc_log_init(sa, "sw_index=%u", sw_index);
729 SFC_ASSERT(sw_index < sa->rxq_count);
731 rxq_info = &sa->rxq_info[sw_index];
734 if (rxq->state == SFC_RXQ_INITIALIZED)
736 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
738 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
739 sa->eth_dev->data->rx_queue_state[sw_index] =
740 RTE_ETH_QUEUE_STATE_STOPPED;
742 sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
745 efx_mac_filter_default_rxq_clear(sa->nic);
747 sfc_rx_qflush(sa, sw_index);
749 rxq->state = SFC_RXQ_INITIALIZED;
751 efx_rx_qdestroy(rxq->common);
753 sfc_ev_qstop(rxq->evq);
757 sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
759 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
762 caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
763 caps |= DEV_RX_OFFLOAD_CRC_STRIP;
764 caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
765 caps |= DEV_RX_OFFLOAD_UDP_CKSUM;
766 caps |= DEV_RX_OFFLOAD_TCP_CKSUM;
768 if (encp->enc_tunnel_encapsulations_supported &&
769 (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
770 caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
776 sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
780 if (sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)
781 caps |= DEV_RX_OFFLOAD_SCATTER;
787 sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
788 const char *verdict, uint64_t offloads)
790 unsigned long long bit;
792 while ((bit = __builtin_ffsll(offloads)) != 0) {
793 uint64_t flag = (1ULL << --bit);
795 sfc_err(sa, "Rx %s offload %s %s", offload_group,
796 rte_eth_dev_rx_offload_name(flag), verdict);
803 sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested)
805 uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads;
806 uint64_t supported = sfc_rx_get_dev_offload_caps(sa) |
807 sfc_rx_get_queue_offload_caps(sa);
808 uint64_t rejected = requested & ~supported;
809 uint64_t missing = (requested & mandatory) ^ mandatory;
810 boolean_t mismatch = B_FALSE;
813 sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected);
818 sfc_rx_log_offloads(sa, "queue", "must be set", missing);
826 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
827 const struct rte_eth_rxconf *rx_conf)
829 uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
830 sfc_rx_get_queue_offload_caps(sa);
833 if (rx_conf->rx_thresh.pthresh != 0 ||
834 rx_conf->rx_thresh.hthresh != 0 ||
835 rx_conf->rx_thresh.wthresh != 0) {
837 "RxQ prefetch/host/writeback thresholds are not supported");
840 if (rx_conf->rx_free_thresh > rxq_max_fill_level) {
842 "RxQ free threshold too large: %u vs maximum %u",
843 rx_conf->rx_free_thresh, rxq_max_fill_level);
847 if (rx_conf->rx_drop_en == 0) {
848 sfc_err(sa, "RxQ drop disable is not supported");
852 if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
853 DEV_RX_OFFLOAD_CHECKSUM)
854 sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
856 if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
857 (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
858 sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
860 if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads))
867 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
872 /* The mbuf object itself is always cache line aligned */
873 order = rte_bsf32(RTE_CACHE_LINE_SIZE);
875 /* Data offset from mbuf object start */
876 data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
877 RTE_PKTMBUF_HEADROOM;
879 order = MIN(order, rte_bsf32(data_off));
885 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
887 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
888 const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
889 const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
891 unsigned int buf_aligned;
892 unsigned int start_alignment;
893 unsigned int end_padding_alignment;
895 /* Below it is assumed that both alignments are power of 2 */
896 SFC_ASSERT(rte_is_power_of_2(nic_align_start));
897 SFC_ASSERT(rte_is_power_of_2(nic_align_end));
900 * mbuf is always cache line aligned, double-check
901 * that it meets rx buffer start alignment requirements.
904 /* Start from mbuf pool data room size */
905 buf_size = rte_pktmbuf_data_room_size(mb_pool);
907 /* Remove headroom */
908 if (buf_size <= RTE_PKTMBUF_HEADROOM) {
910 "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
911 mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
914 buf_size -= RTE_PKTMBUF_HEADROOM;
916 /* Calculate guaranteed data start alignment */
917 buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
919 /* Reserve space for start alignment */
920 if (buf_aligned < nic_align_start) {
921 start_alignment = nic_align_start - buf_aligned;
922 if (buf_size <= start_alignment) {
924 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
926 rte_pktmbuf_data_room_size(mb_pool),
927 RTE_PKTMBUF_HEADROOM, start_alignment);
930 buf_aligned = nic_align_start;
931 buf_size -= start_alignment;
936 /* Make sure that end padding does not write beyond the buffer */
937 if (buf_aligned < nic_align_end) {
939 * Estimate space which can be lost. If guarnteed buffer
940 * size is odd, lost space is (nic_align_end - 1). More
941 * accurate formula is below.
943 end_padding_alignment = nic_align_end -
944 MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
945 if (buf_size <= end_padding_alignment) {
947 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
949 rte_pktmbuf_data_room_size(mb_pool),
950 RTE_PKTMBUF_HEADROOM, start_alignment,
951 end_padding_alignment);
954 buf_size -= end_padding_alignment;
957 * Start is aligned the same or better than end,
960 buf_size = P2ALIGN(buf_size, nic_align_end);
967 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
968 uint16_t nb_rx_desc, unsigned int socket_id,
969 const struct rte_eth_rxconf *rx_conf,
970 struct rte_mempool *mb_pool)
972 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
974 unsigned int rxq_entries;
975 unsigned int evq_entries;
976 unsigned int rxq_max_fill_level;
978 struct sfc_rxq_info *rxq_info;
981 struct sfc_dp_rx_qcreate_info info;
983 rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, &rxq_entries, &evq_entries,
984 &rxq_max_fill_level);
986 goto fail_size_up_rings;
987 SFC_ASSERT(rxq_entries >= EFX_RXQ_MINNDESCS);
988 SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
989 SFC_ASSERT(rxq_entries >= nb_rx_desc);
990 SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
992 rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
996 buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
998 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
1004 if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
1005 (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) {
1006 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
1007 "object size is too small", sw_index);
1008 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
1009 "PDU size %u plus Rx prefix %u bytes",
1010 sw_index, buf_size, (unsigned int)sa->port.pdu,
1011 encp->enc_rx_prefix_size);
1016 SFC_ASSERT(sw_index < sa->rxq_count);
1017 rxq_info = &sa->rxq_info[sw_index];
1019 SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
1020 rxq_info->entries = rxq_entries;
1021 rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
1022 rxq_info->type_flags =
1023 (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
1024 EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
1026 if ((encp->enc_tunnel_encapsulations_supported != 0) &&
1027 (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
1028 rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
1030 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
1031 evq_entries, socket_id, &evq);
1036 rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
1039 goto fail_rxq_alloc;
1041 rxq_info->rxq = rxq;
1044 rxq->hw_index = sw_index;
1045 rxq->refill_threshold =
1046 RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
1047 rxq->refill_mb_pool = mb_pool;
1049 rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
1050 socket_id, &rxq->mem);
1052 goto fail_dma_alloc;
1054 memset(&info, 0, sizeof(info));
1055 info.refill_mb_pool = rxq->refill_mb_pool;
1056 info.max_fill_level = rxq_max_fill_level;
1057 info.refill_threshold = rxq->refill_threshold;
1058 info.buf_size = buf_size;
1059 info.batch_max = encp->enc_rx_batch_max;
1060 info.prefix_size = encp->enc_rx_prefix_size;
1062 if (sa->hash_support == EFX_RX_HASH_AVAILABLE && sa->rss_channels > 0)
1063 info.flags |= SFC_RXQ_FLAG_RSS_HASH;
1065 info.rxq_entries = rxq_info->entries;
1066 info.rxq_hw_ring = rxq->mem.esm_base;
1067 info.evq_entries = evq_entries;
1068 info.evq_hw_ring = evq->mem.esm_base;
1069 info.hw_index = rxq->hw_index;
1070 info.mem_bar = sa->mem_bar.esb_base;
1071 info.vi_window_shift = encp->enc_vi_window_shift;
1073 rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
1074 &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
1075 socket_id, &info, &rxq->dp);
1077 goto fail_dp_rx_qcreate;
1079 evq->dp_rxq = rxq->dp;
1081 rxq->state = SFC_RXQ_INITIALIZED;
1083 rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
1088 sfc_dma_free(sa, &rxq->mem);
1091 rxq_info->rxq = NULL;
1098 rxq_info->entries = 0;
1102 sfc_log_init(sa, "failed %d", rc);
1107 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
1109 struct sfc_rxq_info *rxq_info;
1110 struct sfc_rxq *rxq;
1112 SFC_ASSERT(sw_index < sa->rxq_count);
1114 rxq_info = &sa->rxq_info[sw_index];
1116 rxq = rxq_info->rxq;
1117 SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
1119 sa->dp_rx->qdestroy(rxq->dp);
1122 rxq_info->rxq = NULL;
1123 rxq_info->entries = 0;
1125 sfc_dma_free(sa, &rxq->mem);
1127 sfc_ev_qfini(rxq->evq);
1134 sfc_rte_to_efx_hash_type(uint64_t rss_hf)
1136 efx_rx_hash_type_t efx_hash_types = 0;
1138 if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1139 ETH_RSS_NONFRAG_IPV4_OTHER)) != 0)
1140 efx_hash_types |= EFX_RX_HASH_IPV4;
1142 if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
1143 efx_hash_types |= EFX_RX_HASH_TCPIPV4;
1145 if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1146 ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0)
1147 efx_hash_types |= EFX_RX_HASH_IPV6;
1149 if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0)
1150 efx_hash_types |= EFX_RX_HASH_TCPIPV6;
1152 return efx_hash_types;
1156 sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types)
1158 uint64_t rss_hf = 0;
1160 if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0)
1161 rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1162 ETH_RSS_NONFRAG_IPV4_OTHER);
1164 if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0)
1165 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1167 if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0)
1168 rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1169 ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX);
1171 if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0)
1172 rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX);
1178 sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa,
1179 struct rte_eth_rss_conf *conf)
1181 efx_rx_hash_type_t efx_hash_types = sa->rss_hash_types;
1183 if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
1184 if ((conf->rss_hf != 0 && conf->rss_hf != SFC_RSS_OFFLOADS) ||
1185 conf->rss_key != NULL)
1189 if (conf->rss_hf != 0) {
1190 if ((conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) {
1191 sfc_err(sa, "unsupported hash functions requested");
1195 efx_hash_types = sfc_rte_to_efx_hash_type(conf->rss_hf);
1198 if (conf->rss_key != NULL) {
1199 if (conf->rss_key_len != sizeof(sa->rss_key)) {
1200 sfc_err(sa, "RSS key size is wrong (should be %lu)",
1201 sizeof(sa->rss_key));
1204 rte_memcpy(sa->rss_key, conf->rss_key, sizeof(sa->rss_key));
1207 sa->rss_hash_types = efx_hash_types;
1213 sfc_rx_rss_config(struct sfc_adapter *sa)
1217 if (sa->rss_channels > 0) {
1218 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1219 EFX_RX_HASHALG_TOEPLITZ,
1220 sa->rss_hash_types, B_TRUE);
1224 rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1226 sizeof(sa->rss_key));
1230 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1231 sa->rss_tbl, RTE_DIM(sa->rss_tbl));
1239 sfc_rx_start(struct sfc_adapter *sa)
1241 unsigned int sw_index;
1244 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1246 rc = efx_rx_init(sa->nic);
1250 rc = sfc_rx_rss_config(sa);
1252 goto fail_rss_config;
1254 for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
1255 if ((!sa->rxq_info[sw_index].deferred_start ||
1256 sa->rxq_info[sw_index].deferred_started)) {
1257 rc = sfc_rx_qstart(sa, sw_index);
1259 goto fail_rx_qstart;
1266 while (sw_index-- > 0)
1267 sfc_rx_qstop(sa, sw_index);
1270 efx_rx_fini(sa->nic);
1273 sfc_log_init(sa, "failed %d", rc);
1278 sfc_rx_stop(struct sfc_adapter *sa)
1280 unsigned int sw_index;
1282 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1284 sw_index = sa->rxq_count;
1285 while (sw_index-- > 0) {
1286 if (sa->rxq_info[sw_index].rxq != NULL)
1287 sfc_rx_qstop(sa, sw_index);
1290 efx_rx_fini(sa->nic);
1294 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
1296 struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
1297 unsigned int max_entries;
1299 max_entries = EFX_RXQ_MAXNDESCS;
1300 SFC_ASSERT(rte_is_power_of_2(max_entries));
1302 rxq_info->max_entries = max_entries;
1308 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
1310 uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
1311 sfc_rx_get_queue_offload_caps(sa);
1312 uint64_t offloads_rejected = rxmode->offloads & ~offloads_supported;
1315 switch (rxmode->mq_mode) {
1316 case ETH_MQ_RX_NONE:
1317 /* No special checks are required */
1320 if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) {
1321 sfc_err(sa, "RSS is not available");
1326 sfc_err(sa, "Rx multi-queue mode %u not supported",
1331 if (offloads_rejected) {
1332 sfc_rx_log_offloads(sa, "device", "is unsupported",
1337 if (~rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
1338 sfc_warn(sa, "FCS stripping cannot be disabled - always on");
1339 rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
1340 rxmode->hw_strip_crc = 1;
1347 * Destroy excess queues that are no longer needed after reconfiguration
1348 * or complete close.
1351 sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
1355 SFC_ASSERT(nb_rx_queues <= sa->rxq_count);
1357 sw_index = sa->rxq_count;
1358 while (--sw_index >= (int)nb_rx_queues) {
1359 if (sa->rxq_info[sw_index].rxq != NULL)
1360 sfc_rx_qfini(sa, sw_index);
1363 sa->rxq_count = nb_rx_queues;
1367 * Initialize Rx subsystem.
1369 * Called at device (re)configuration stage when number of receive queues is
1370 * specified together with other device level receive configuration.
1372 * It should be used to allocate NUMA-unaware resources.
1375 sfc_rx_configure(struct sfc_adapter *sa)
1377 struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
1378 const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
1381 sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
1382 nb_rx_queues, sa->rxq_count);
1384 rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
1386 goto fail_check_mode;
1388 if (nb_rx_queues == sa->rxq_count)
1391 if (sa->rxq_info == NULL) {
1393 sa->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
1394 sizeof(sa->rxq_info[0]), 0,
1396 if (sa->rxq_info == NULL)
1397 goto fail_rxqs_alloc;
1399 struct sfc_rxq_info *new_rxq_info;
1401 if (nb_rx_queues < sa->rxq_count)
1402 sfc_rx_fini_queues(sa, nb_rx_queues);
1406 rte_realloc(sa->rxq_info,
1407 nb_rx_queues * sizeof(sa->rxq_info[0]), 0);
1408 if (new_rxq_info == NULL && nb_rx_queues > 0)
1409 goto fail_rxqs_realloc;
1411 sa->rxq_info = new_rxq_info;
1412 if (nb_rx_queues > sa->rxq_count)
1413 memset(&sa->rxq_info[sa->rxq_count], 0,
1414 (nb_rx_queues - sa->rxq_count) *
1415 sizeof(sa->rxq_info[0]));
1418 while (sa->rxq_count < nb_rx_queues) {
1419 rc = sfc_rx_qinit_info(sa, sa->rxq_count);
1421 goto fail_rx_qinit_info;
1426 sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
1427 MIN(sa->rxq_count, EFX_MAXRSS) : 0;
1429 if (sa->rss_channels > 0) {
1430 struct rte_eth_rss_conf *adv_conf_rss;
1431 unsigned int sw_index;
1433 for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
1434 sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
1436 adv_conf_rss = &dev_conf->rx_adv_conf.rss_conf;
1437 rc = sfc_rx_process_adv_conf_rss(sa, adv_conf_rss);
1439 goto fail_rx_process_adv_conf_rss;
1445 fail_rx_process_adv_conf_rss:
1452 sfc_log_init(sa, "failed %d", rc);
1457 * Shutdown Rx subsystem.
1459 * Called at device close stage, for example, before device shutdown.
1462 sfc_rx_close(struct sfc_adapter *sa)
1464 sfc_rx_fini_queues(sa, 0);
1466 sa->rss_channels = 0;
1468 rte_free(sa->rxq_info);
1469 sa->rxq_info = NULL;