4 * Copyright (c) 2016-2017 Solarflare Communications Inc.
7 * This software was jointly developed between OKTET Labs (under contract
8 * for Solarflare) and Solarflare Communications, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <rte_mempool.h>
37 #include "sfc_debug.h"
41 #include "sfc_kvargs.h"
42 #include "sfc_tweak.h"
45 * Maximum number of Rx queue flush attempt in the case of failure or
48 #define SFC_RX_QFLUSH_ATTEMPTS (3)
51 * Time to wait between event queue polling attempts when waiting for Rx
52 * queue flush done or failed events.
54 #define SFC_RX_QFLUSH_POLL_WAIT_MS (1)
57 * Maximum number of event queue polling attempts when waiting for Rx queue
58 * flush done or failed events. It defines Rx queue flush attempt timeout
59 * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
61 #define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000)
64 sfc_rx_qflush_done(struct sfc_rxq *rxq)
66 rxq->state |= SFC_RXQ_FLUSHED;
67 rxq->state &= ~SFC_RXQ_FLUSHING;
71 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
73 rxq->state |= SFC_RXQ_FLUSH_FAILED;
74 rxq->state &= ~SFC_RXQ_FLUSHING;
78 sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
80 unsigned int free_space;
82 void *objs[SFC_RX_REFILL_BULK];
83 efsys_dma_addr_t addr[RTE_DIM(objs)];
84 unsigned int added = rxq->added;
87 struct sfc_efx_rx_sw_desc *rxd;
89 uint16_t port_id = rxq->dp.dpq.port_id;
91 free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
92 (added - rxq->completed);
94 if (free_space < rxq->refill_threshold)
97 bulks = free_space / RTE_DIM(objs);
98 /* refill_threshold guarantees that bulks is positive */
99 SFC_ASSERT(bulks > 0);
101 id = added & rxq->ptr_mask;
103 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
104 RTE_DIM(objs)) < 0)) {
106 * It is hardly a safe way to increment counter
107 * from different contexts, but all PMDs do it.
109 rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
111 /* Return if we have posted nothing yet */
112 if (added == rxq->added)
118 for (i = 0; i < RTE_DIM(objs);
119 ++i, id = (id + 1) & rxq->ptr_mask) {
122 rxd = &rxq->sw_desc[id];
125 SFC_ASSERT(rte_mbuf_refcnt_read(m) == 1);
126 m->data_off = RTE_PKTMBUF_HEADROOM;
127 SFC_ASSERT(m->next == NULL);
128 SFC_ASSERT(m->nb_segs == 1);
131 addr[i] = rte_pktmbuf_iova(m);
134 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
135 RTE_DIM(objs), rxq->completed, added);
136 added += RTE_DIM(objs);
137 } while (--bulks > 0);
139 SFC_ASSERT(added != rxq->added);
141 efx_rx_qpush(rxq->common, added, &rxq->pushed);
145 sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
147 uint64_t mbuf_flags = 0;
149 switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
150 case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
151 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
154 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
157 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
158 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
159 PKT_RX_IP_CKSUM_UNKNOWN);
163 switch ((desc_flags &
164 (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
165 case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
166 case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
167 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
171 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
174 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
175 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
176 PKT_RX_L4_CKSUM_UNKNOWN);
184 sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
186 return RTE_PTYPE_L2_ETHER |
187 ((desc_flags & EFX_PKT_IPV4) ?
188 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
189 ((desc_flags & EFX_PKT_IPV6) ?
190 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
191 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
192 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
195 static const uint32_t *
196 sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
198 static const uint32_t ptypes[] = {
200 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
201 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
210 #if EFSYS_OPT_RX_SCALE
212 sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
218 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
221 mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
223 if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
224 m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
225 EFX_RX_HASHALG_TOEPLITZ,
228 m->ol_flags |= PKT_RX_RSS_HASH;
233 sfc_efx_rx_set_rss_hash(__rte_unused struct sfc_efx_rxq *rxq,
234 __rte_unused unsigned int flags,
235 __rte_unused struct rte_mbuf *m)
241 sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
243 struct sfc_dp_rxq *dp_rxq = rx_queue;
244 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
245 unsigned int completed;
246 unsigned int prefix_size = rxq->prefix_size;
247 unsigned int done_pkts = 0;
248 boolean_t discard_next = B_FALSE;
249 struct rte_mbuf *scatter_pkt = NULL;
251 if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
254 sfc_ev_qpoll(rxq->evq);
256 completed = rxq->completed;
257 while (completed != rxq->pending && done_pkts < nb_pkts) {
259 struct sfc_efx_rx_sw_desc *rxd;
261 unsigned int seg_len;
262 unsigned int desc_flags;
264 id = completed++ & rxq->ptr_mask;
265 rxd = &rxq->sw_desc[id];
267 desc_flags = rxd->flags;
272 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
275 if (desc_flags & EFX_PKT_PREFIX_LEN) {
279 rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
280 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
284 seg_len = rxd->size - prefix_size;
287 rte_pktmbuf_data_len(m) = seg_len;
288 rte_pktmbuf_pkt_len(m) = seg_len;
290 if (scatter_pkt != NULL) {
291 if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
292 rte_pktmbuf_free(scatter_pkt);
295 /* The packet to deliver */
299 if (desc_flags & EFX_PKT_CONT) {
300 /* The packet is scattered, more fragments to come */
302 /* Further fragments have no prefix */
307 /* Scattered packet is done */
309 /* The first fragment of the packet has prefix */
310 prefix_size = rxq->prefix_size;
313 sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
315 sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
318 * Extract RSS hash from the packet prefix and
319 * set the corresponding field (if needed and possible)
321 sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
323 m->data_off += prefix_size;
330 discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
331 rte_mempool_put(rxq->refill_mb_pool, m);
335 /* pending is only moved when entire packet is received */
336 SFC_ASSERT(scatter_pkt == NULL);
338 rxq->completed = completed;
340 sfc_efx_rx_qrefill(rxq);
345 static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
347 sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
349 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
351 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
354 sfc_ev_qpoll(rxq->evq);
356 return rxq->pending - rxq->completed;
359 static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status;
361 sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
363 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
365 if (unlikely(offset > rxq->ptr_mask))
369 * Poll EvQ to derive up-to-date 'rxq->pending' figure;
370 * it is required for the queue to be running, but the
371 * check is omitted because API design assumes that it
372 * is the duty of the caller to satisfy all conditions
374 SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) ==
375 SFC_EFX_RXQ_FLAG_RUNNING);
376 sfc_ev_qpoll(rxq->evq);
379 * There is a handful of reserved entries in the ring,
380 * but an explicit check whether the offset points to
381 * a reserved entry is neglected since the two checks
382 * below rely on the figures which take the HW limits
383 * into account and thus if an entry is reserved, the
384 * checks will fail and UNAVAIL code will be returned
387 if (offset < (rxq->pending - rxq->completed))
388 return RTE_ETH_RX_DESC_DONE;
390 if (offset < (rxq->added - rxq->completed))
391 return RTE_ETH_RX_DESC_AVAIL;
393 return RTE_ETH_RX_DESC_UNAVAIL;
397 sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
399 const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
400 struct rte_eth_dev *eth_dev;
401 struct sfc_adapter *sa;
404 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
405 eth_dev = &rte_eth_devices[dpq->port_id];
407 sa = eth_dev->data->dev_private;
409 SFC_ASSERT(dpq->queue_id < sa->rxq_count);
410 rxq = sa->rxq_info[dpq->queue_id].rxq;
412 SFC_ASSERT(rxq != NULL);
416 static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
418 sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
419 unsigned int *rxq_entries,
420 unsigned int *evq_entries,
421 unsigned int *rxq_max_fill_level)
423 *rxq_entries = nb_rx_desc;
424 *evq_entries = nb_rx_desc;
425 *rxq_max_fill_level = EFX_RXQ_LIMIT(*rxq_entries);
429 static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
431 sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
432 const struct rte_pci_addr *pci_addr, int socket_id,
433 const struct sfc_dp_rx_qcreate_info *info,
434 struct sfc_dp_rxq **dp_rxqp)
436 struct sfc_efx_rxq *rxq;
440 rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
441 RTE_CACHE_LINE_SIZE, socket_id);
445 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
448 rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
450 sizeof(*rxq->sw_desc),
451 RTE_CACHE_LINE_SIZE, socket_id);
452 if (rxq->sw_desc == NULL)
453 goto fail_desc_alloc;
455 /* efx datapath is bound to efx control path */
456 rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
457 if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
458 rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
459 rxq->ptr_mask = info->rxq_entries - 1;
460 rxq->batch_max = info->batch_max;
461 rxq->prefix_size = info->prefix_size;
462 rxq->refill_threshold = info->refill_threshold;
463 rxq->buf_size = info->buf_size;
464 rxq->refill_mb_pool = info->refill_mb_pool;
476 static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
478 sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
480 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
482 rte_free(rxq->sw_desc);
486 static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
488 sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
489 __rte_unused unsigned int evq_read_ptr)
491 /* libefx-based datapath is specific to libefx-based PMD */
492 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
493 struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
495 rxq->common = crxq->common;
497 rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
499 sfc_efx_rx_qrefill(rxq);
501 rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
506 static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
508 sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
509 __rte_unused unsigned int *evq_read_ptr)
511 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
513 rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
515 /* libefx-based datapath is bound to libefx-based PMD and uses
516 * event queue structure directly. So, there is no necessity to
517 * return EvQ read pointer.
521 static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
523 sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
525 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
527 struct sfc_efx_rx_sw_desc *rxd;
529 for (i = rxq->completed; i != rxq->added; ++i) {
530 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
531 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
533 /* Packed stream relies on 0 in inactive SW desc.
534 * Rx queue stop is not performance critical, so
535 * there is no harm to do it always.
541 rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
544 struct sfc_dp_rx sfc_efx_rx = {
546 .name = SFC_KVARG_DATAPATH_EFX,
550 .features = SFC_DP_RX_FEAT_SCATTER,
551 .qsize_up_rings = sfc_efx_rx_qsize_up_rings,
552 .qcreate = sfc_efx_rx_qcreate,
553 .qdestroy = sfc_efx_rx_qdestroy,
554 .qstart = sfc_efx_rx_qstart,
555 .qstop = sfc_efx_rx_qstop,
556 .qpurge = sfc_efx_rx_qpurge,
557 .supported_ptypes_get = sfc_efx_supported_ptypes_get,
558 .qdesc_npending = sfc_efx_rx_qdesc_npending,
559 .qdesc_status = sfc_efx_rx_qdesc_status,
560 .pkt_burst = sfc_efx_recv_pkts,
564 sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
568 SFC_ASSERT(sw_index < sa->rxq_count);
569 rxq = sa->rxq_info[sw_index].rxq;
571 if (rxq == NULL || (rxq->state & SFC_RXQ_STARTED) == 0)
574 return sa->dp_rx->qdesc_npending(rxq->dp);
578 sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset)
580 struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
582 return offset < rxq->evq->sa->dp_rx->qdesc_npending(dp_rxq);
586 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
589 unsigned int retry_count;
590 unsigned int wait_count;
593 rxq = sa->rxq_info[sw_index].rxq;
594 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
597 * Retry Rx queue flushing in the case of flush failed or
598 * timeout. In the worst case it can delay for 6 seconds.
600 for (retry_count = 0;
601 ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
602 (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
604 rc = efx_rx_qflush(rxq->common);
606 rxq->state |= (rc == EALREADY) ?
607 SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
610 rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
611 rxq->state |= SFC_RXQ_FLUSHING;
614 * Wait for Rx queue flush done or failed event at least
615 * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
616 * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
617 * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
621 rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
622 sfc_ev_qpoll(rxq->evq);
623 } while ((rxq->state & SFC_RXQ_FLUSHING) &&
624 (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
626 if (rxq->state & SFC_RXQ_FLUSHING)
627 sfc_err(sa, "RxQ %u flush timed out", sw_index);
629 if (rxq->state & SFC_RXQ_FLUSH_FAILED)
630 sfc_err(sa, "RxQ %u flush failed", sw_index);
632 if (rxq->state & SFC_RXQ_FLUSHED)
633 sfc_info(sa, "RxQ %u flushed", sw_index);
636 sa->dp_rx->qpurge(rxq->dp);
640 sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
642 boolean_t rss = (sa->rss_channels > 0) ? B_TRUE : B_FALSE;
643 struct sfc_port *port = &sa->port;
647 * If promiscuous or all-multicast mode has been requested, setting
648 * filter for the default Rx queue might fail, in particular, while
649 * running over PCI function which is not a member of corresponding
650 * privilege groups; if this occurs, few iterations will be made to
651 * repeat this step without promiscuous and all-multicast flags set
654 rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss);
657 else if (rc != EOPNOTSUPP)
661 sfc_warn(sa, "promiscuous mode has been requested, "
662 "but the HW rejects it");
663 sfc_warn(sa, "promiscuous mode will be disabled");
665 port->promisc = B_FALSE;
666 rc = sfc_set_rx_mode(sa);
673 if (port->allmulti) {
674 sfc_warn(sa, "all-multicast mode has been requested, "
675 "but the HW rejects it");
676 sfc_warn(sa, "all-multicast mode will be disabled");
678 port->allmulti = B_FALSE;
679 rc = sfc_set_rx_mode(sa);
690 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
692 struct sfc_port *port = &sa->port;
693 struct sfc_rxq_info *rxq_info;
698 sfc_log_init(sa, "sw_index=%u", sw_index);
700 SFC_ASSERT(sw_index < sa->rxq_count);
702 rxq_info = &sa->rxq_info[sw_index];
704 SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
708 rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
712 rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
713 &rxq->mem, rxq_info->entries,
714 0 /* not used on EF10 */, rxq_info->type_flags,
715 evq->common, &rxq->common);
717 goto fail_rx_qcreate;
719 efx_rx_qenable(rxq->common);
721 rc = sa->dp_rx->qstart(rxq->dp, evq->read_ptr);
725 rxq->state |= SFC_RXQ_STARTED;
727 if ((sw_index == 0) && !port->isolated) {
728 rc = sfc_rx_default_rxq_set_filter(sa, rxq);
730 goto fail_mac_filter_default_rxq_set;
733 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
734 sa->eth_dev->data->rx_queue_state[sw_index] =
735 RTE_ETH_QUEUE_STATE_STARTED;
739 fail_mac_filter_default_rxq_set:
740 sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
743 sfc_rx_qflush(sa, sw_index);
753 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
755 struct sfc_rxq_info *rxq_info;
758 sfc_log_init(sa, "sw_index=%u", sw_index);
760 SFC_ASSERT(sw_index < sa->rxq_count);
762 rxq_info = &sa->rxq_info[sw_index];
765 if (rxq->state == SFC_RXQ_INITIALIZED)
767 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
769 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
770 sa->eth_dev->data->rx_queue_state[sw_index] =
771 RTE_ETH_QUEUE_STATE_STOPPED;
773 sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
776 efx_mac_filter_default_rxq_clear(sa->nic);
778 sfc_rx_qflush(sa, sw_index);
780 rxq->state = SFC_RXQ_INITIALIZED;
782 efx_rx_qdestroy(rxq->common);
784 sfc_ev_qstop(rxq->evq);
788 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
789 const struct rte_eth_rxconf *rx_conf)
793 if (rx_conf->rx_thresh.pthresh != 0 ||
794 rx_conf->rx_thresh.hthresh != 0 ||
795 rx_conf->rx_thresh.wthresh != 0) {
797 "RxQ prefetch/host/writeback thresholds are not supported");
800 if (rx_conf->rx_free_thresh > rxq_max_fill_level) {
802 "RxQ free threshold too large: %u vs maximum %u",
803 rx_conf->rx_free_thresh, rxq_max_fill_level);
807 if (rx_conf->rx_drop_en == 0) {
808 sfc_err(sa, "RxQ drop disable is not supported");
816 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
821 /* The mbuf object itself is always cache line aligned */
822 order = rte_bsf32(RTE_CACHE_LINE_SIZE);
824 /* Data offset from mbuf object start */
825 data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
826 RTE_PKTMBUF_HEADROOM;
828 order = MIN(order, rte_bsf32(data_off));
830 return 1u << (order - 1);
834 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
836 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
837 const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
838 const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
840 unsigned int buf_aligned;
841 unsigned int start_alignment;
842 unsigned int end_padding_alignment;
844 /* Below it is assumed that both alignments are power of 2 */
845 SFC_ASSERT(rte_is_power_of_2(nic_align_start));
846 SFC_ASSERT(rte_is_power_of_2(nic_align_end));
849 * mbuf is always cache line aligned, double-check
850 * that it meets rx buffer start alignment requirements.
853 /* Start from mbuf pool data room size */
854 buf_size = rte_pktmbuf_data_room_size(mb_pool);
856 /* Remove headroom */
857 if (buf_size <= RTE_PKTMBUF_HEADROOM) {
859 "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
860 mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
863 buf_size -= RTE_PKTMBUF_HEADROOM;
865 /* Calculate guaranteed data start alignment */
866 buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
868 /* Reserve space for start alignment */
869 if (buf_aligned < nic_align_start) {
870 start_alignment = nic_align_start - buf_aligned;
871 if (buf_size <= start_alignment) {
873 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
875 rte_pktmbuf_data_room_size(mb_pool),
876 RTE_PKTMBUF_HEADROOM, start_alignment);
879 buf_aligned = nic_align_start;
880 buf_size -= start_alignment;
885 /* Make sure that end padding does not write beyond the buffer */
886 if (buf_aligned < nic_align_end) {
888 * Estimate space which can be lost. If guarnteed buffer
889 * size is odd, lost space is (nic_align_end - 1). More
890 * accurate formula is below.
892 end_padding_alignment = nic_align_end -
893 MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
894 if (buf_size <= end_padding_alignment) {
896 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
898 rte_pktmbuf_data_room_size(mb_pool),
899 RTE_PKTMBUF_HEADROOM, start_alignment,
900 end_padding_alignment);
903 buf_size -= end_padding_alignment;
906 * Start is aligned the same or better than end,
909 buf_size = P2ALIGN(buf_size, nic_align_end);
916 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
917 uint16_t nb_rx_desc, unsigned int socket_id,
918 const struct rte_eth_rxconf *rx_conf,
919 struct rte_mempool *mb_pool)
921 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
923 unsigned int rxq_entries;
924 unsigned int evq_entries;
925 unsigned int rxq_max_fill_level;
927 struct sfc_rxq_info *rxq_info;
930 struct sfc_dp_rx_qcreate_info info;
932 rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, &rxq_entries, &evq_entries,
933 &rxq_max_fill_level);
935 goto fail_size_up_rings;
937 rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
941 buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
943 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
949 if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
950 !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
951 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
952 "object size is too small", sw_index);
953 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
954 "PDU size %u plus Rx prefix %u bytes",
955 sw_index, buf_size, (unsigned int)sa->port.pdu,
956 encp->enc_rx_prefix_size);
961 SFC_ASSERT(sw_index < sa->rxq_count);
962 rxq_info = &sa->rxq_info[sw_index];
964 SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
965 rxq_info->entries = rxq_entries;
966 rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
967 rxq_info->type_flags =
968 sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
969 EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
971 if ((encp->enc_tunnel_encapsulations_supported != 0) &&
972 (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
973 rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
975 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
976 evq_entries, socket_id, &evq);
981 rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
989 rxq->hw_index = sw_index;
990 rxq->refill_threshold =
991 RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
992 rxq->refill_mb_pool = mb_pool;
994 rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
995 socket_id, &rxq->mem);
999 memset(&info, 0, sizeof(info));
1000 info.refill_mb_pool = rxq->refill_mb_pool;
1001 info.refill_threshold = rxq->refill_threshold;
1002 info.buf_size = buf_size;
1003 info.batch_max = encp->enc_rx_batch_max;
1004 info.prefix_size = encp->enc_rx_prefix_size;
1006 #if EFSYS_OPT_RX_SCALE
1007 if (sa->hash_support == EFX_RX_HASH_AVAILABLE && sa->rss_channels > 0)
1008 info.flags |= SFC_RXQ_FLAG_RSS_HASH;
1011 info.rxq_entries = rxq_info->entries;
1012 info.rxq_hw_ring = rxq->mem.esm_base;
1013 info.evq_entries = evq_entries;
1014 info.evq_hw_ring = evq->mem.esm_base;
1015 info.hw_index = rxq->hw_index;
1016 info.mem_bar = sa->mem_bar.esb_base;
1018 rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
1019 &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
1020 socket_id, &info, &rxq->dp);
1022 goto fail_dp_rx_qcreate;
1024 evq->dp_rxq = rxq->dp;
1026 rxq->state = SFC_RXQ_INITIALIZED;
1028 rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
1033 sfc_dma_free(sa, &rxq->mem);
1036 rxq_info->rxq = NULL;
1043 rxq_info->entries = 0;
1047 sfc_log_init(sa, "failed %d", rc);
1052 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
1054 struct sfc_rxq_info *rxq_info;
1055 struct sfc_rxq *rxq;
1057 SFC_ASSERT(sw_index < sa->rxq_count);
1059 rxq_info = &sa->rxq_info[sw_index];
1061 rxq = rxq_info->rxq;
1062 SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
1064 sa->dp_rx->qdestroy(rxq->dp);
1067 rxq_info->rxq = NULL;
1068 rxq_info->entries = 0;
1070 sfc_dma_free(sa, &rxq->mem);
1072 sfc_ev_qfini(rxq->evq);
1078 #if EFSYS_OPT_RX_SCALE
1080 sfc_rte_to_efx_hash_type(uint64_t rss_hf)
1082 efx_rx_hash_type_t efx_hash_types = 0;
1084 if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1085 ETH_RSS_NONFRAG_IPV4_OTHER)) != 0)
1086 efx_hash_types |= EFX_RX_HASH_IPV4;
1088 if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
1089 efx_hash_types |= EFX_RX_HASH_TCPIPV4;
1091 if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1092 ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0)
1093 efx_hash_types |= EFX_RX_HASH_IPV6;
1095 if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0)
1096 efx_hash_types |= EFX_RX_HASH_TCPIPV6;
1098 return efx_hash_types;
1102 sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types)
1104 uint64_t rss_hf = 0;
1106 if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0)
1107 rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1108 ETH_RSS_NONFRAG_IPV4_OTHER);
1110 if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0)
1111 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1113 if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0)
1114 rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1115 ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX);
1117 if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0)
1118 rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX);
1124 #if EFSYS_OPT_RX_SCALE
1126 sfc_rx_rss_config(struct sfc_adapter *sa)
1130 if (sa->rss_channels > 0) {
1131 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1132 EFX_RX_HASHALG_TOEPLITZ,
1133 sa->rss_hash_types, B_TRUE);
1137 rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1139 sizeof(sa->rss_key));
1143 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1144 sa->rss_tbl, RTE_DIM(sa->rss_tbl));
1152 sfc_rx_rss_config(__rte_unused struct sfc_adapter *sa)
1159 sfc_rx_start(struct sfc_adapter *sa)
1161 unsigned int sw_index;
1164 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1166 rc = efx_rx_init(sa->nic);
1170 rc = sfc_rx_rss_config(sa);
1172 goto fail_rss_config;
1174 for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
1175 if ((!sa->rxq_info[sw_index].deferred_start ||
1176 sa->rxq_info[sw_index].deferred_started)) {
1177 rc = sfc_rx_qstart(sa, sw_index);
1179 goto fail_rx_qstart;
1186 while (sw_index-- > 0)
1187 sfc_rx_qstop(sa, sw_index);
1190 efx_rx_fini(sa->nic);
1193 sfc_log_init(sa, "failed %d", rc);
1198 sfc_rx_stop(struct sfc_adapter *sa)
1200 unsigned int sw_index;
1202 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1204 sw_index = sa->rxq_count;
1205 while (sw_index-- > 0) {
1206 if (sa->rxq_info[sw_index].rxq != NULL)
1207 sfc_rx_qstop(sa, sw_index);
1210 efx_rx_fini(sa->nic);
1214 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
1216 struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
1217 unsigned int max_entries;
1219 max_entries = EFX_RXQ_MAXNDESCS;
1220 SFC_ASSERT(rte_is_power_of_2(max_entries));
1222 rxq_info->max_entries = max_entries;
1228 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
1232 switch (rxmode->mq_mode) {
1233 case ETH_MQ_RX_NONE:
1234 /* No special checks are required */
1236 #if EFSYS_OPT_RX_SCALE
1238 if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) {
1239 sfc_err(sa, "RSS is not available");
1245 sfc_err(sa, "Rx multi-queue mode %u not supported",
1250 if (rxmode->header_split) {
1251 sfc_err(sa, "Header split on Rx not supported");
1255 if (rxmode->hw_vlan_filter) {
1256 sfc_err(sa, "HW VLAN filtering not supported");
1260 if (rxmode->hw_vlan_strip) {
1261 sfc_err(sa, "HW VLAN stripping not supported");
1265 if (rxmode->hw_vlan_extend) {
1267 "Q-in-Q HW VLAN stripping not supported");
1271 if (!rxmode->hw_strip_crc) {
1273 "FCS stripping control not supported - always stripped");
1274 rxmode->hw_strip_crc = 1;
1277 if (rxmode->enable_scatter &&
1278 (~sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)) {
1279 sfc_err(sa, "Rx scatter not supported by %s datapath",
1280 sa->dp_rx->dp.name);
1284 if (rxmode->enable_lro) {
1285 sfc_err(sa, "LRO not supported");
1293 * Destroy excess queues that are no longer needed after reconfiguration
1294 * or complete close.
1297 sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
1301 SFC_ASSERT(nb_rx_queues <= sa->rxq_count);
1303 sw_index = sa->rxq_count;
1304 while (--sw_index >= (int)nb_rx_queues) {
1305 if (sa->rxq_info[sw_index].rxq != NULL)
1306 sfc_rx_qfini(sa, sw_index);
1309 sa->rxq_count = nb_rx_queues;
1313 * Initialize Rx subsystem.
1315 * Called at device (re)configuration stage when number of receive queues is
1316 * specified together with other device level receive configuration.
1318 * It should be used to allocate NUMA-unaware resources.
1321 sfc_rx_configure(struct sfc_adapter *sa)
1323 struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
1324 const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
1327 sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
1328 nb_rx_queues, sa->rxq_count);
1330 rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
1332 goto fail_check_mode;
1334 if (nb_rx_queues == sa->rxq_count)
1337 if (sa->rxq_info == NULL) {
1339 sa->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
1340 sizeof(sa->rxq_info[0]), 0,
1342 if (sa->rxq_info == NULL)
1343 goto fail_rxqs_alloc;
1345 struct sfc_rxq_info *new_rxq_info;
1347 if (nb_rx_queues < sa->rxq_count)
1348 sfc_rx_fini_queues(sa, nb_rx_queues);
1352 rte_realloc(sa->rxq_info,
1353 nb_rx_queues * sizeof(sa->rxq_info[0]), 0);
1354 if (new_rxq_info == NULL && nb_rx_queues > 0)
1355 goto fail_rxqs_realloc;
1357 sa->rxq_info = new_rxq_info;
1358 if (nb_rx_queues > sa->rxq_count)
1359 memset(&sa->rxq_info[sa->rxq_count], 0,
1360 (nb_rx_queues - sa->rxq_count) *
1361 sizeof(sa->rxq_info[0]));
1364 while (sa->rxq_count < nb_rx_queues) {
1365 rc = sfc_rx_qinit_info(sa, sa->rxq_count);
1367 goto fail_rx_qinit_info;
1372 #if EFSYS_OPT_RX_SCALE
1373 sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
1374 MIN(sa->rxq_count, EFX_MAXRSS) : 0;
1376 if (sa->rss_channels > 0) {
1377 unsigned int sw_index;
1379 for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
1380 sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
1393 sfc_log_init(sa, "failed %d", rc);
1398 * Shutdown Rx subsystem.
1400 * Called at device close stage, for example, before device shutdown.
1403 sfc_rx_close(struct sfc_adapter *sa)
1405 sfc_rx_fini_queues(sa, 0);
1407 sa->rss_channels = 0;
1409 rte_free(sa->rxq_info);
1410 sa->rxq_info = NULL;