4 * Copyright (c) 2016-2017 Solarflare Communications Inc.
7 * This software was jointly developed between OKTET Labs (under contract
8 * for Solarflare) and Solarflare Communications, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <rte_mempool.h>
37 #include "sfc_debug.h"
41 #include "sfc_kvargs.h"
42 #include "sfc_tweak.h"
45 * Maximum number of Rx queue flush attempt in the case of failure or
48 #define SFC_RX_QFLUSH_ATTEMPTS (3)
51 * Time to wait between event queue polling attempts when waiting for Rx
52 * queue flush done or failed events.
54 #define SFC_RX_QFLUSH_POLL_WAIT_MS (1)
57 * Maximum number of event queue polling attempts when waiting for Rx queue
58 * flush done or failed events. It defines Rx queue flush attempt timeout
59 * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
61 #define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000)
64 sfc_rx_qflush_done(struct sfc_rxq *rxq)
66 rxq->state |= SFC_RXQ_FLUSHED;
67 rxq->state &= ~SFC_RXQ_FLUSHING;
71 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
73 rxq->state |= SFC_RXQ_FLUSH_FAILED;
74 rxq->state &= ~SFC_RXQ_FLUSHING;
78 sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
80 unsigned int free_space;
82 void *objs[SFC_RX_REFILL_BULK];
83 efsys_dma_addr_t addr[RTE_DIM(objs)];
84 unsigned int added = rxq->added;
87 struct sfc_efx_rx_sw_desc *rxd;
89 uint16_t port_id = rxq->dp.dpq.port_id;
91 free_space = rxq->max_fill_level - (added - rxq->completed);
93 if (free_space < rxq->refill_threshold)
96 bulks = free_space / RTE_DIM(objs);
97 /* refill_threshold guarantees that bulks is positive */
98 SFC_ASSERT(bulks > 0);
100 id = added & rxq->ptr_mask;
102 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
103 RTE_DIM(objs)) < 0)) {
105 * It is hardly a safe way to increment counter
106 * from different contexts, but all PMDs do it.
108 rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
110 /* Return if we have posted nothing yet */
111 if (added == rxq->added)
117 for (i = 0; i < RTE_DIM(objs);
118 ++i, id = (id + 1) & rxq->ptr_mask) {
121 rxd = &rxq->sw_desc[id];
124 SFC_ASSERT(rte_mbuf_refcnt_read(m) == 1);
125 m->data_off = RTE_PKTMBUF_HEADROOM;
126 SFC_ASSERT(m->next == NULL);
127 SFC_ASSERT(m->nb_segs == 1);
130 addr[i] = rte_pktmbuf_iova(m);
133 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
134 RTE_DIM(objs), rxq->completed, added);
135 added += RTE_DIM(objs);
136 } while (--bulks > 0);
138 SFC_ASSERT(added != rxq->added);
140 efx_rx_qpush(rxq->common, added, &rxq->pushed);
144 sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
146 uint64_t mbuf_flags = 0;
148 switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
149 case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
150 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
153 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
156 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
157 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
158 PKT_RX_IP_CKSUM_UNKNOWN);
162 switch ((desc_flags &
163 (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
164 case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
165 case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
166 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
170 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
173 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
174 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
175 PKT_RX_L4_CKSUM_UNKNOWN);
183 sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
185 return RTE_PTYPE_L2_ETHER |
186 ((desc_flags & EFX_PKT_IPV4) ?
187 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
188 ((desc_flags & EFX_PKT_IPV6) ?
189 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
190 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
191 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
194 static const uint32_t *
195 sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
197 static const uint32_t ptypes[] = {
199 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
200 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
209 #if EFSYS_OPT_RX_SCALE
211 sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
217 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
220 mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
222 if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
223 m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
224 EFX_RX_HASHALG_TOEPLITZ,
227 m->ol_flags |= PKT_RX_RSS_HASH;
232 sfc_efx_rx_set_rss_hash(__rte_unused struct sfc_efx_rxq *rxq,
233 __rte_unused unsigned int flags,
234 __rte_unused struct rte_mbuf *m)
240 sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
242 struct sfc_dp_rxq *dp_rxq = rx_queue;
243 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
244 unsigned int completed;
245 unsigned int prefix_size = rxq->prefix_size;
246 unsigned int done_pkts = 0;
247 boolean_t discard_next = B_FALSE;
248 struct rte_mbuf *scatter_pkt = NULL;
250 if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
253 sfc_ev_qpoll(rxq->evq);
255 completed = rxq->completed;
256 while (completed != rxq->pending && done_pkts < nb_pkts) {
258 struct sfc_efx_rx_sw_desc *rxd;
260 unsigned int seg_len;
261 unsigned int desc_flags;
263 id = completed++ & rxq->ptr_mask;
264 rxd = &rxq->sw_desc[id];
266 desc_flags = rxd->flags;
271 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
274 if (desc_flags & EFX_PKT_PREFIX_LEN) {
278 rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
279 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
283 seg_len = rxd->size - prefix_size;
286 rte_pktmbuf_data_len(m) = seg_len;
287 rte_pktmbuf_pkt_len(m) = seg_len;
289 if (scatter_pkt != NULL) {
290 if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
291 rte_pktmbuf_free(scatter_pkt);
294 /* The packet to deliver */
298 if (desc_flags & EFX_PKT_CONT) {
299 /* The packet is scattered, more fragments to come */
301 /* Further fragments have no prefix */
306 /* Scattered packet is done */
308 /* The first fragment of the packet has prefix */
309 prefix_size = rxq->prefix_size;
312 sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
314 sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
317 * Extract RSS hash from the packet prefix and
318 * set the corresponding field (if needed and possible)
320 sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
322 m->data_off += prefix_size;
329 discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
330 rte_mempool_put(rxq->refill_mb_pool, m);
334 /* pending is only moved when entire packet is received */
335 SFC_ASSERT(scatter_pkt == NULL);
337 rxq->completed = completed;
339 sfc_efx_rx_qrefill(rxq);
344 static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
346 sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
348 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
350 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
353 sfc_ev_qpoll(rxq->evq);
355 return rxq->pending - rxq->completed;
358 static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status;
360 sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
362 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
364 if (unlikely(offset > rxq->ptr_mask))
368 * Poll EvQ to derive up-to-date 'rxq->pending' figure;
369 * it is required for the queue to be running, but the
370 * check is omitted because API design assumes that it
371 * is the duty of the caller to satisfy all conditions
373 SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) ==
374 SFC_EFX_RXQ_FLAG_RUNNING);
375 sfc_ev_qpoll(rxq->evq);
378 * There is a handful of reserved entries in the ring,
379 * but an explicit check whether the offset points to
380 * a reserved entry is neglected since the two checks
381 * below rely on the figures which take the HW limits
382 * into account and thus if an entry is reserved, the
383 * checks will fail and UNAVAIL code will be returned
386 if (offset < (rxq->pending - rxq->completed))
387 return RTE_ETH_RX_DESC_DONE;
389 if (offset < (rxq->added - rxq->completed))
390 return RTE_ETH_RX_DESC_AVAIL;
392 return RTE_ETH_RX_DESC_UNAVAIL;
396 sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
398 const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
399 struct rte_eth_dev *eth_dev;
400 struct sfc_adapter *sa;
403 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
404 eth_dev = &rte_eth_devices[dpq->port_id];
406 sa = eth_dev->data->dev_private;
408 SFC_ASSERT(dpq->queue_id < sa->rxq_count);
409 rxq = sa->rxq_info[dpq->queue_id].rxq;
411 SFC_ASSERT(rxq != NULL);
415 static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
417 sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
418 unsigned int *rxq_entries,
419 unsigned int *evq_entries,
420 unsigned int *rxq_max_fill_level)
422 *rxq_entries = nb_rx_desc;
423 *evq_entries = nb_rx_desc;
424 *rxq_max_fill_level = EFX_RXQ_LIMIT(*rxq_entries);
428 static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
430 sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
431 const struct rte_pci_addr *pci_addr, int socket_id,
432 const struct sfc_dp_rx_qcreate_info *info,
433 struct sfc_dp_rxq **dp_rxqp)
435 struct sfc_efx_rxq *rxq;
439 rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
440 RTE_CACHE_LINE_SIZE, socket_id);
444 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
447 rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
449 sizeof(*rxq->sw_desc),
450 RTE_CACHE_LINE_SIZE, socket_id);
451 if (rxq->sw_desc == NULL)
452 goto fail_desc_alloc;
454 /* efx datapath is bound to efx control path */
455 rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
456 if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
457 rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
458 rxq->ptr_mask = info->rxq_entries - 1;
459 rxq->batch_max = info->batch_max;
460 rxq->prefix_size = info->prefix_size;
461 rxq->max_fill_level = info->max_fill_level;
462 rxq->refill_threshold = info->refill_threshold;
463 rxq->buf_size = info->buf_size;
464 rxq->refill_mb_pool = info->refill_mb_pool;
476 static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
478 sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
480 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
482 rte_free(rxq->sw_desc);
486 static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
488 sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
489 __rte_unused unsigned int evq_read_ptr)
491 /* libefx-based datapath is specific to libefx-based PMD */
492 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
493 struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
495 rxq->common = crxq->common;
497 rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
499 sfc_efx_rx_qrefill(rxq);
501 rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
506 static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
508 sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
509 __rte_unused unsigned int *evq_read_ptr)
511 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
513 rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
515 /* libefx-based datapath is bound to libefx-based PMD and uses
516 * event queue structure directly. So, there is no necessity to
517 * return EvQ read pointer.
521 static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
523 sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
525 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
527 struct sfc_efx_rx_sw_desc *rxd;
529 for (i = rxq->completed; i != rxq->added; ++i) {
530 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
531 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
533 /* Packed stream relies on 0 in inactive SW desc.
534 * Rx queue stop is not performance critical, so
535 * there is no harm to do it always.
541 rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
544 struct sfc_dp_rx sfc_efx_rx = {
546 .name = SFC_KVARG_DATAPATH_EFX,
550 .features = SFC_DP_RX_FEAT_SCATTER,
551 .qsize_up_rings = sfc_efx_rx_qsize_up_rings,
552 .qcreate = sfc_efx_rx_qcreate,
553 .qdestroy = sfc_efx_rx_qdestroy,
554 .qstart = sfc_efx_rx_qstart,
555 .qstop = sfc_efx_rx_qstop,
556 .qpurge = sfc_efx_rx_qpurge,
557 .supported_ptypes_get = sfc_efx_supported_ptypes_get,
558 .qdesc_npending = sfc_efx_rx_qdesc_npending,
559 .qdesc_status = sfc_efx_rx_qdesc_status,
560 .pkt_burst = sfc_efx_recv_pkts,
564 sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
568 SFC_ASSERT(sw_index < sa->rxq_count);
569 rxq = sa->rxq_info[sw_index].rxq;
571 if (rxq == NULL || (rxq->state & SFC_RXQ_STARTED) == 0)
574 return sa->dp_rx->qdesc_npending(rxq->dp);
578 sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset)
580 struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
582 return offset < rxq->evq->sa->dp_rx->qdesc_npending(dp_rxq);
586 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
589 unsigned int retry_count;
590 unsigned int wait_count;
593 rxq = sa->rxq_info[sw_index].rxq;
594 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
597 * Retry Rx queue flushing in the case of flush failed or
598 * timeout. In the worst case it can delay for 6 seconds.
600 for (retry_count = 0;
601 ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
602 (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
604 rc = efx_rx_qflush(rxq->common);
606 rxq->state |= (rc == EALREADY) ?
607 SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
610 rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
611 rxq->state |= SFC_RXQ_FLUSHING;
614 * Wait for Rx queue flush done or failed event at least
615 * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
616 * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
617 * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
621 rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
622 sfc_ev_qpoll(rxq->evq);
623 } while ((rxq->state & SFC_RXQ_FLUSHING) &&
624 (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
626 if (rxq->state & SFC_RXQ_FLUSHING)
627 sfc_err(sa, "RxQ %u flush timed out", sw_index);
629 if (rxq->state & SFC_RXQ_FLUSH_FAILED)
630 sfc_err(sa, "RxQ %u flush failed", sw_index);
632 if (rxq->state & SFC_RXQ_FLUSHED)
633 sfc_info(sa, "RxQ %u flushed", sw_index);
636 sa->dp_rx->qpurge(rxq->dp);
640 sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
642 boolean_t rss = (sa->rss_channels > 0) ? B_TRUE : B_FALSE;
643 struct sfc_port *port = &sa->port;
647 * If promiscuous or all-multicast mode has been requested, setting
648 * filter for the default Rx queue might fail, in particular, while
649 * running over PCI function which is not a member of corresponding
650 * privilege groups; if this occurs, few iterations will be made to
651 * repeat this step without promiscuous and all-multicast flags set
654 rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss);
657 else if (rc != EOPNOTSUPP)
661 sfc_warn(sa, "promiscuous mode has been requested, "
662 "but the HW rejects it");
663 sfc_warn(sa, "promiscuous mode will be disabled");
665 port->promisc = B_FALSE;
666 rc = sfc_set_rx_mode(sa);
673 if (port->allmulti) {
674 sfc_warn(sa, "all-multicast mode has been requested, "
675 "but the HW rejects it");
676 sfc_warn(sa, "all-multicast mode will be disabled");
678 port->allmulti = B_FALSE;
679 rc = sfc_set_rx_mode(sa);
690 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
692 struct sfc_port *port = &sa->port;
693 struct sfc_rxq_info *rxq_info;
698 sfc_log_init(sa, "sw_index=%u", sw_index);
700 SFC_ASSERT(sw_index < sa->rxq_count);
702 rxq_info = &sa->rxq_info[sw_index];
704 SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
708 rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
712 rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
713 &rxq->mem, rxq_info->entries,
714 0 /* not used on EF10 */, rxq_info->type_flags,
715 evq->common, &rxq->common);
717 goto fail_rx_qcreate;
719 efx_rx_qenable(rxq->common);
721 rc = sa->dp_rx->qstart(rxq->dp, evq->read_ptr);
725 rxq->state |= SFC_RXQ_STARTED;
727 if ((sw_index == 0) && !port->isolated) {
728 rc = sfc_rx_default_rxq_set_filter(sa, rxq);
730 goto fail_mac_filter_default_rxq_set;
733 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
734 sa->eth_dev->data->rx_queue_state[sw_index] =
735 RTE_ETH_QUEUE_STATE_STARTED;
739 fail_mac_filter_default_rxq_set:
740 sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
743 sfc_rx_qflush(sa, sw_index);
753 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
755 struct sfc_rxq_info *rxq_info;
758 sfc_log_init(sa, "sw_index=%u", sw_index);
760 SFC_ASSERT(sw_index < sa->rxq_count);
762 rxq_info = &sa->rxq_info[sw_index];
765 if (rxq->state == SFC_RXQ_INITIALIZED)
767 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
769 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
770 sa->eth_dev->data->rx_queue_state[sw_index] =
771 RTE_ETH_QUEUE_STATE_STOPPED;
773 sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
776 efx_mac_filter_default_rxq_clear(sa->nic);
778 sfc_rx_qflush(sa, sw_index);
780 rxq->state = SFC_RXQ_INITIALIZED;
782 efx_rx_qdestroy(rxq->common);
784 sfc_ev_qstop(rxq->evq);
788 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
789 const struct rte_eth_rxconf *rx_conf)
793 if (rx_conf->rx_thresh.pthresh != 0 ||
794 rx_conf->rx_thresh.hthresh != 0 ||
795 rx_conf->rx_thresh.wthresh != 0) {
797 "RxQ prefetch/host/writeback thresholds are not supported");
800 if (rx_conf->rx_free_thresh > rxq_max_fill_level) {
802 "RxQ free threshold too large: %u vs maximum %u",
803 rx_conf->rx_free_thresh, rxq_max_fill_level);
807 if (rx_conf->rx_drop_en == 0) {
808 sfc_err(sa, "RxQ drop disable is not supported");
816 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
821 /* The mbuf object itself is always cache line aligned */
822 order = rte_bsf32(RTE_CACHE_LINE_SIZE);
824 /* Data offset from mbuf object start */
825 data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
826 RTE_PKTMBUF_HEADROOM;
828 order = MIN(order, rte_bsf32(data_off));
830 return 1u << (order - 1);
834 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
836 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
837 const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
838 const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
840 unsigned int buf_aligned;
841 unsigned int start_alignment;
842 unsigned int end_padding_alignment;
844 /* Below it is assumed that both alignments are power of 2 */
845 SFC_ASSERT(rte_is_power_of_2(nic_align_start));
846 SFC_ASSERT(rte_is_power_of_2(nic_align_end));
849 * mbuf is always cache line aligned, double-check
850 * that it meets rx buffer start alignment requirements.
853 /* Start from mbuf pool data room size */
854 buf_size = rte_pktmbuf_data_room_size(mb_pool);
856 /* Remove headroom */
857 if (buf_size <= RTE_PKTMBUF_HEADROOM) {
859 "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
860 mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
863 buf_size -= RTE_PKTMBUF_HEADROOM;
865 /* Calculate guaranteed data start alignment */
866 buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
868 /* Reserve space for start alignment */
869 if (buf_aligned < nic_align_start) {
870 start_alignment = nic_align_start - buf_aligned;
871 if (buf_size <= start_alignment) {
873 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
875 rte_pktmbuf_data_room_size(mb_pool),
876 RTE_PKTMBUF_HEADROOM, start_alignment);
879 buf_aligned = nic_align_start;
880 buf_size -= start_alignment;
885 /* Make sure that end padding does not write beyond the buffer */
886 if (buf_aligned < nic_align_end) {
888 * Estimate space which can be lost. If guarnteed buffer
889 * size is odd, lost space is (nic_align_end - 1). More
890 * accurate formula is below.
892 end_padding_alignment = nic_align_end -
893 MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
894 if (buf_size <= end_padding_alignment) {
896 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
898 rte_pktmbuf_data_room_size(mb_pool),
899 RTE_PKTMBUF_HEADROOM, start_alignment,
900 end_padding_alignment);
903 buf_size -= end_padding_alignment;
906 * Start is aligned the same or better than end,
909 buf_size = P2ALIGN(buf_size, nic_align_end);
916 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
917 uint16_t nb_rx_desc, unsigned int socket_id,
918 const struct rte_eth_rxconf *rx_conf,
919 struct rte_mempool *mb_pool)
921 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
923 unsigned int rxq_entries;
924 unsigned int evq_entries;
925 unsigned int rxq_max_fill_level;
927 struct sfc_rxq_info *rxq_info;
930 struct sfc_dp_rx_qcreate_info info;
932 rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, &rxq_entries, &evq_entries,
933 &rxq_max_fill_level);
935 goto fail_size_up_rings;
936 SFC_ASSERT(rxq_entries >= EFX_RXQ_MINNDESCS);
937 SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
938 SFC_ASSERT(rxq_entries >= nb_rx_desc);
939 SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
941 rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
945 buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
947 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
953 if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
954 !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
955 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
956 "object size is too small", sw_index);
957 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
958 "PDU size %u plus Rx prefix %u bytes",
959 sw_index, buf_size, (unsigned int)sa->port.pdu,
960 encp->enc_rx_prefix_size);
965 SFC_ASSERT(sw_index < sa->rxq_count);
966 rxq_info = &sa->rxq_info[sw_index];
968 SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
969 rxq_info->entries = rxq_entries;
970 rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
971 rxq_info->type_flags =
972 sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
973 EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
975 if ((encp->enc_tunnel_encapsulations_supported != 0) &&
976 (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
977 rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
979 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
980 evq_entries, socket_id, &evq);
985 rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
993 rxq->hw_index = sw_index;
994 rxq->refill_threshold =
995 RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
996 rxq->refill_mb_pool = mb_pool;
998 rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
999 socket_id, &rxq->mem);
1001 goto fail_dma_alloc;
1003 memset(&info, 0, sizeof(info));
1004 info.refill_mb_pool = rxq->refill_mb_pool;
1005 info.max_fill_level = rxq_max_fill_level;
1006 info.refill_threshold = rxq->refill_threshold;
1007 info.buf_size = buf_size;
1008 info.batch_max = encp->enc_rx_batch_max;
1009 info.prefix_size = encp->enc_rx_prefix_size;
1011 #if EFSYS_OPT_RX_SCALE
1012 if (sa->hash_support == EFX_RX_HASH_AVAILABLE && sa->rss_channels > 0)
1013 info.flags |= SFC_RXQ_FLAG_RSS_HASH;
1016 info.rxq_entries = rxq_info->entries;
1017 info.rxq_hw_ring = rxq->mem.esm_base;
1018 info.evq_entries = evq_entries;
1019 info.evq_hw_ring = evq->mem.esm_base;
1020 info.hw_index = rxq->hw_index;
1021 info.mem_bar = sa->mem_bar.esb_base;
1023 rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
1024 &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
1025 socket_id, &info, &rxq->dp);
1027 goto fail_dp_rx_qcreate;
1029 evq->dp_rxq = rxq->dp;
1031 rxq->state = SFC_RXQ_INITIALIZED;
1033 rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
1038 sfc_dma_free(sa, &rxq->mem);
1041 rxq_info->rxq = NULL;
1048 rxq_info->entries = 0;
1052 sfc_log_init(sa, "failed %d", rc);
1057 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
1059 struct sfc_rxq_info *rxq_info;
1060 struct sfc_rxq *rxq;
1062 SFC_ASSERT(sw_index < sa->rxq_count);
1064 rxq_info = &sa->rxq_info[sw_index];
1066 rxq = rxq_info->rxq;
1067 SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
1069 sa->dp_rx->qdestroy(rxq->dp);
1072 rxq_info->rxq = NULL;
1073 rxq_info->entries = 0;
1075 sfc_dma_free(sa, &rxq->mem);
1077 sfc_ev_qfini(rxq->evq);
1083 #if EFSYS_OPT_RX_SCALE
1085 sfc_rte_to_efx_hash_type(uint64_t rss_hf)
1087 efx_rx_hash_type_t efx_hash_types = 0;
1089 if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1090 ETH_RSS_NONFRAG_IPV4_OTHER)) != 0)
1091 efx_hash_types |= EFX_RX_HASH_IPV4;
1093 if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
1094 efx_hash_types |= EFX_RX_HASH_TCPIPV4;
1096 if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1097 ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0)
1098 efx_hash_types |= EFX_RX_HASH_IPV6;
1100 if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0)
1101 efx_hash_types |= EFX_RX_HASH_TCPIPV6;
1103 return efx_hash_types;
1107 sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types)
1109 uint64_t rss_hf = 0;
1111 if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0)
1112 rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1113 ETH_RSS_NONFRAG_IPV4_OTHER);
1115 if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0)
1116 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1118 if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0)
1119 rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1120 ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX);
1122 if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0)
1123 rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX);
1129 #if EFSYS_OPT_RX_SCALE
1131 sfc_rx_rss_config(struct sfc_adapter *sa)
1135 if (sa->rss_channels > 0) {
1136 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1137 EFX_RX_HASHALG_TOEPLITZ,
1138 sa->rss_hash_types, B_TRUE);
1142 rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1144 sizeof(sa->rss_key));
1148 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1149 sa->rss_tbl, RTE_DIM(sa->rss_tbl));
1157 sfc_rx_rss_config(__rte_unused struct sfc_adapter *sa)
1164 sfc_rx_start(struct sfc_adapter *sa)
1166 unsigned int sw_index;
1169 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1171 rc = efx_rx_init(sa->nic);
1175 rc = sfc_rx_rss_config(sa);
1177 goto fail_rss_config;
1179 for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
1180 if ((!sa->rxq_info[sw_index].deferred_start ||
1181 sa->rxq_info[sw_index].deferred_started)) {
1182 rc = sfc_rx_qstart(sa, sw_index);
1184 goto fail_rx_qstart;
1191 while (sw_index-- > 0)
1192 sfc_rx_qstop(sa, sw_index);
1195 efx_rx_fini(sa->nic);
1198 sfc_log_init(sa, "failed %d", rc);
1203 sfc_rx_stop(struct sfc_adapter *sa)
1205 unsigned int sw_index;
1207 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1209 sw_index = sa->rxq_count;
1210 while (sw_index-- > 0) {
1211 if (sa->rxq_info[sw_index].rxq != NULL)
1212 sfc_rx_qstop(sa, sw_index);
1215 efx_rx_fini(sa->nic);
1219 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
1221 struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
1222 unsigned int max_entries;
1224 max_entries = EFX_RXQ_MAXNDESCS;
1225 SFC_ASSERT(rte_is_power_of_2(max_entries));
1227 rxq_info->max_entries = max_entries;
1233 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
1237 switch (rxmode->mq_mode) {
1238 case ETH_MQ_RX_NONE:
1239 /* No special checks are required */
1241 #if EFSYS_OPT_RX_SCALE
1243 if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) {
1244 sfc_err(sa, "RSS is not available");
1250 sfc_err(sa, "Rx multi-queue mode %u not supported",
1255 if (rxmode->header_split) {
1256 sfc_err(sa, "Header split on Rx not supported");
1260 if (rxmode->hw_vlan_filter) {
1261 sfc_err(sa, "HW VLAN filtering not supported");
1265 if (rxmode->hw_vlan_strip) {
1266 sfc_err(sa, "HW VLAN stripping not supported");
1270 if (rxmode->hw_vlan_extend) {
1272 "Q-in-Q HW VLAN stripping not supported");
1276 if (!rxmode->hw_strip_crc) {
1278 "FCS stripping control not supported - always stripped");
1279 rxmode->hw_strip_crc = 1;
1282 if (rxmode->enable_scatter &&
1283 (~sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)) {
1284 sfc_err(sa, "Rx scatter not supported by %s datapath",
1285 sa->dp_rx->dp.name);
1289 if (rxmode->enable_lro) {
1290 sfc_err(sa, "LRO not supported");
1298 * Destroy excess queues that are no longer needed after reconfiguration
1299 * or complete close.
1302 sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
1306 SFC_ASSERT(nb_rx_queues <= sa->rxq_count);
1308 sw_index = sa->rxq_count;
1309 while (--sw_index >= (int)nb_rx_queues) {
1310 if (sa->rxq_info[sw_index].rxq != NULL)
1311 sfc_rx_qfini(sa, sw_index);
1314 sa->rxq_count = nb_rx_queues;
1318 * Initialize Rx subsystem.
1320 * Called at device (re)configuration stage when number of receive queues is
1321 * specified together with other device level receive configuration.
1323 * It should be used to allocate NUMA-unaware resources.
1326 sfc_rx_configure(struct sfc_adapter *sa)
1328 struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
1329 const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
1332 sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
1333 nb_rx_queues, sa->rxq_count);
1335 rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
1337 goto fail_check_mode;
1339 if (nb_rx_queues == sa->rxq_count)
1342 if (sa->rxq_info == NULL) {
1344 sa->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
1345 sizeof(sa->rxq_info[0]), 0,
1347 if (sa->rxq_info == NULL)
1348 goto fail_rxqs_alloc;
1350 struct sfc_rxq_info *new_rxq_info;
1352 if (nb_rx_queues < sa->rxq_count)
1353 sfc_rx_fini_queues(sa, nb_rx_queues);
1357 rte_realloc(sa->rxq_info,
1358 nb_rx_queues * sizeof(sa->rxq_info[0]), 0);
1359 if (new_rxq_info == NULL && nb_rx_queues > 0)
1360 goto fail_rxqs_realloc;
1362 sa->rxq_info = new_rxq_info;
1363 if (nb_rx_queues > sa->rxq_count)
1364 memset(&sa->rxq_info[sa->rxq_count], 0,
1365 (nb_rx_queues - sa->rxq_count) *
1366 sizeof(sa->rxq_info[0]));
1369 while (sa->rxq_count < nb_rx_queues) {
1370 rc = sfc_rx_qinit_info(sa, sa->rxq_count);
1372 goto fail_rx_qinit_info;
1377 #if EFSYS_OPT_RX_SCALE
1378 sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
1379 MIN(sa->rxq_count, EFX_MAXRSS) : 0;
1381 if (sa->rss_channels > 0) {
1382 unsigned int sw_index;
1384 for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
1385 sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
1398 sfc_log_init(sa, "failed %d", rc);
1403 * Shutdown Rx subsystem.
1405 * Called at device close stage, for example, before device shutdown.
1408 sfc_rx_close(struct sfc_adapter *sa)
1410 sfc_rx_fini_queues(sa, 0);
1412 sa->rss_channels = 0;
1414 rte_free(sa->rxq_info);
1415 sa->rxq_info = NULL;