4 * Copyright (c) 2016-2017 Solarflare Communications Inc.
7 * This software was jointly developed between OKTET Labs (under contract
8 * for Solarflare) and Solarflare Communications, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <rte_mempool.h>
37 #include "sfc_debug.h"
41 #include "sfc_kvargs.h"
42 #include "sfc_tweak.h"
45 * Maximum number of Rx queue flush attempt in the case of failure or
48 #define SFC_RX_QFLUSH_ATTEMPTS (3)
51 * Time to wait between event queue polling attempts when waiting for Rx
52 * queue flush done or failed events.
54 #define SFC_RX_QFLUSH_POLL_WAIT_MS (1)
57 * Maximum number of event queue polling attempts when waiting for Rx queue
58 * flush done or failed events. It defines Rx queue flush attempt timeout
59 * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
61 #define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000)
64 sfc_rx_qflush_done(struct sfc_rxq *rxq)
66 rxq->state |= SFC_RXQ_FLUSHED;
67 rxq->state &= ~SFC_RXQ_FLUSHING;
71 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
73 rxq->state |= SFC_RXQ_FLUSH_FAILED;
74 rxq->state &= ~SFC_RXQ_FLUSHING;
78 sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
80 unsigned int free_space;
82 void *objs[SFC_RX_REFILL_BULK];
83 efsys_dma_addr_t addr[RTE_DIM(objs)];
84 unsigned int added = rxq->added;
87 struct sfc_efx_rx_sw_desc *rxd;
89 uint16_t port_id = rxq->dp.dpq.port_id;
91 free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
92 (added - rxq->completed);
94 if (free_space < rxq->refill_threshold)
97 bulks = free_space / RTE_DIM(objs);
98 /* refill_threshold guarantees that bulks is positive */
99 SFC_ASSERT(bulks > 0);
101 id = added & rxq->ptr_mask;
103 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
104 RTE_DIM(objs)) < 0)) {
106 * It is hardly a safe way to increment counter
107 * from different contexts, but all PMDs do it.
109 rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
111 /* Return if we have posted nothing yet */
112 if (added == rxq->added)
118 for (i = 0; i < RTE_DIM(objs);
119 ++i, id = (id + 1) & rxq->ptr_mask) {
122 rxd = &rxq->sw_desc[id];
125 SFC_ASSERT(rte_mbuf_refcnt_read(m) == 1);
126 m->data_off = RTE_PKTMBUF_HEADROOM;
127 SFC_ASSERT(m->next == NULL);
128 SFC_ASSERT(m->nb_segs == 1);
131 addr[i] = rte_pktmbuf_mtophys(m);
134 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
135 RTE_DIM(objs), rxq->completed, added);
136 added += RTE_DIM(objs);
137 } while (--bulks > 0);
139 SFC_ASSERT(added != rxq->added);
141 efx_rx_qpush(rxq->common, added, &rxq->pushed);
145 sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
147 uint64_t mbuf_flags = 0;
149 switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
150 case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
151 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
154 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
157 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
158 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
159 PKT_RX_IP_CKSUM_UNKNOWN);
163 switch ((desc_flags &
164 (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
165 case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
166 case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
167 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
171 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
174 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
175 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
176 PKT_RX_L4_CKSUM_UNKNOWN);
184 sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
186 return RTE_PTYPE_L2_ETHER |
187 ((desc_flags & EFX_PKT_IPV4) ?
188 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
189 ((desc_flags & EFX_PKT_IPV6) ?
190 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
191 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
192 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
195 static const uint32_t *
196 sfc_efx_supported_ptypes_get(void)
198 static const uint32_t ptypes[] = {
200 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
201 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
211 sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
214 #if EFSYS_OPT_RX_SCALE
218 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
221 mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
223 if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
224 m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
225 EFX_RX_HASHALG_TOEPLITZ,
228 m->ol_flags |= PKT_RX_RSS_HASH;
234 sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
236 struct sfc_dp_rxq *dp_rxq = rx_queue;
237 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
238 unsigned int completed;
239 unsigned int prefix_size = rxq->prefix_size;
240 unsigned int done_pkts = 0;
241 boolean_t discard_next = B_FALSE;
242 struct rte_mbuf *scatter_pkt = NULL;
244 if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
247 sfc_ev_qpoll(rxq->evq);
249 completed = rxq->completed;
250 while (completed != rxq->pending && done_pkts < nb_pkts) {
252 struct sfc_efx_rx_sw_desc *rxd;
254 unsigned int seg_len;
255 unsigned int desc_flags;
257 id = completed++ & rxq->ptr_mask;
258 rxd = &rxq->sw_desc[id];
260 desc_flags = rxd->flags;
265 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
268 if (desc_flags & EFX_PKT_PREFIX_LEN) {
272 rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
273 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
277 seg_len = rxd->size - prefix_size;
280 rte_pktmbuf_data_len(m) = seg_len;
281 rte_pktmbuf_pkt_len(m) = seg_len;
283 if (scatter_pkt != NULL) {
284 if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
285 rte_pktmbuf_free(scatter_pkt);
288 /* The packet to deliver */
292 if (desc_flags & EFX_PKT_CONT) {
293 /* The packet is scattered, more fragments to come */
295 /* Further fragments have no prefix */
300 /* Scattered packet is done */
302 /* The first fragment of the packet has prefix */
303 prefix_size = rxq->prefix_size;
306 sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
308 sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
311 * Extract RSS hash from the packet prefix and
312 * set the corresponding field (if needed and possible)
314 sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
316 m->data_off += prefix_size;
323 discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
324 rte_mempool_put(rxq->refill_mb_pool, m);
328 /* pending is only moved when entire packet is received */
329 SFC_ASSERT(scatter_pkt == NULL);
331 rxq->completed = completed;
333 sfc_efx_rx_qrefill(rxq);
338 static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
340 sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
342 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
344 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
347 sfc_ev_qpoll(rxq->evq);
349 return rxq->pending - rxq->completed;
353 sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
355 const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
356 struct rte_eth_dev *eth_dev;
357 struct sfc_adapter *sa;
360 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
361 eth_dev = &rte_eth_devices[dpq->port_id];
363 sa = eth_dev->data->dev_private;
365 SFC_ASSERT(dpq->queue_id < sa->rxq_count);
366 rxq = sa->rxq_info[dpq->queue_id].rxq;
368 SFC_ASSERT(rxq != NULL);
372 static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
374 sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
375 const struct rte_pci_addr *pci_addr, int socket_id,
376 const struct sfc_dp_rx_qcreate_info *info,
377 struct sfc_dp_rxq **dp_rxqp)
379 struct sfc_efx_rxq *rxq;
383 rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
384 RTE_CACHE_LINE_SIZE, socket_id);
388 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
391 rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
393 sizeof(*rxq->sw_desc),
394 RTE_CACHE_LINE_SIZE, socket_id);
395 if (rxq->sw_desc == NULL)
396 goto fail_desc_alloc;
398 /* efx datapath is bound to efx control path */
399 rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
400 if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
401 rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
402 rxq->ptr_mask = info->rxq_entries - 1;
403 rxq->batch_max = info->batch_max;
404 rxq->prefix_size = info->prefix_size;
405 rxq->refill_threshold = info->refill_threshold;
406 rxq->buf_size = info->buf_size;
407 rxq->refill_mb_pool = info->refill_mb_pool;
419 static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
421 sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
423 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
425 rte_free(rxq->sw_desc);
429 static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
431 sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
432 __rte_unused unsigned int evq_read_ptr)
434 /* libefx-based datapath is specific to libefx-based PMD */
435 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
436 struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
438 rxq->common = crxq->common;
440 rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
442 sfc_efx_rx_qrefill(rxq);
444 rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
449 static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
451 sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
452 __rte_unused unsigned int *evq_read_ptr)
454 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
456 rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
458 /* libefx-based datapath is bound to libefx-based PMD and uses
459 * event queue structure directly. So, there is no necessity to
460 * return EvQ read pointer.
464 static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
466 sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
468 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
470 struct sfc_efx_rx_sw_desc *rxd;
472 for (i = rxq->completed; i != rxq->added; ++i) {
473 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
474 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
476 /* Packed stream relies on 0 in inactive SW desc.
477 * Rx queue stop is not performance critical, so
478 * there is no harm to do it always.
484 rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
487 struct sfc_dp_rx sfc_efx_rx = {
489 .name = SFC_KVARG_DATAPATH_EFX,
493 .features = SFC_DP_RX_FEAT_SCATTER,
494 .qcreate = sfc_efx_rx_qcreate,
495 .qdestroy = sfc_efx_rx_qdestroy,
496 .qstart = sfc_efx_rx_qstart,
497 .qstop = sfc_efx_rx_qstop,
498 .qpurge = sfc_efx_rx_qpurge,
499 .supported_ptypes_get = sfc_efx_supported_ptypes_get,
500 .qdesc_npending = sfc_efx_rx_qdesc_npending,
501 .pkt_burst = sfc_efx_recv_pkts,
505 sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
509 SFC_ASSERT(sw_index < sa->rxq_count);
510 rxq = sa->rxq_info[sw_index].rxq;
512 if (rxq == NULL || (rxq->state & SFC_RXQ_STARTED) == 0)
515 return sa->dp_rx->qdesc_npending(rxq->dp);
519 sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset)
521 struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
523 return offset < rxq->evq->sa->dp_rx->qdesc_npending(dp_rxq);
527 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
530 unsigned int retry_count;
531 unsigned int wait_count;
534 rxq = sa->rxq_info[sw_index].rxq;
535 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
538 * Retry Rx queue flushing in the case of flush failed or
539 * timeout. In the worst case it can delay for 6 seconds.
541 for (retry_count = 0;
542 ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
543 (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
545 rc = efx_rx_qflush(rxq->common);
547 rxq->state |= (rc == EALREADY) ?
548 SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
551 rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
552 rxq->state |= SFC_RXQ_FLUSHING;
555 * Wait for Rx queue flush done or failed event at least
556 * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
557 * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
558 * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
562 rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
563 sfc_ev_qpoll(rxq->evq);
564 } while ((rxq->state & SFC_RXQ_FLUSHING) &&
565 (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
567 if (rxq->state & SFC_RXQ_FLUSHING)
568 sfc_err(sa, "RxQ %u flush timed out", sw_index);
570 if (rxq->state & SFC_RXQ_FLUSH_FAILED)
571 sfc_err(sa, "RxQ %u flush failed", sw_index);
573 if (rxq->state & SFC_RXQ_FLUSHED)
574 sfc_info(sa, "RxQ %u flushed", sw_index);
577 sa->dp_rx->qpurge(rxq->dp);
581 sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
583 boolean_t rss = (sa->rss_channels > 0) ? B_TRUE : B_FALSE;
584 struct sfc_port *port = &sa->port;
588 * If promiscuous or all-multicast mode has been requested, setting
589 * filter for the default Rx queue might fail, in particular, while
590 * running over PCI function which is not a member of corresponding
591 * privilege groups; if this occurs, few iterations will be made to
592 * repeat this step without promiscuous and all-multicast flags set
595 rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss);
598 else if (rc != EOPNOTSUPP)
602 sfc_warn(sa, "promiscuous mode has been requested, "
603 "but the HW rejects it");
604 sfc_warn(sa, "promiscuous mode will be disabled");
606 port->promisc = B_FALSE;
607 rc = sfc_set_rx_mode(sa);
614 if (port->allmulti) {
615 sfc_warn(sa, "all-multicast mode has been requested, "
616 "but the HW rejects it");
617 sfc_warn(sa, "all-multicast mode will be disabled");
619 port->allmulti = B_FALSE;
620 rc = sfc_set_rx_mode(sa);
631 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
633 struct sfc_port *port = &sa->port;
634 struct sfc_rxq_info *rxq_info;
639 sfc_log_init(sa, "sw_index=%u", sw_index);
641 SFC_ASSERT(sw_index < sa->rxq_count);
643 rxq_info = &sa->rxq_info[sw_index];
645 SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
649 rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
653 rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
654 &rxq->mem, rxq_info->entries,
655 0 /* not used on EF10 */, evq->common,
658 goto fail_rx_qcreate;
660 efx_rx_qenable(rxq->common);
662 rc = sa->dp_rx->qstart(rxq->dp, evq->read_ptr);
666 rxq->state |= SFC_RXQ_STARTED;
668 if ((sw_index == 0) && !port->isolated) {
669 rc = sfc_rx_default_rxq_set_filter(sa, rxq);
671 goto fail_mac_filter_default_rxq_set;
674 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
675 sa->eth_dev->data->rx_queue_state[sw_index] =
676 RTE_ETH_QUEUE_STATE_STARTED;
680 fail_mac_filter_default_rxq_set:
681 sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
684 sfc_rx_qflush(sa, sw_index);
694 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
696 struct sfc_rxq_info *rxq_info;
699 sfc_log_init(sa, "sw_index=%u", sw_index);
701 SFC_ASSERT(sw_index < sa->rxq_count);
703 rxq_info = &sa->rxq_info[sw_index];
706 if (rxq->state == SFC_RXQ_INITIALIZED)
708 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
710 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
711 sa->eth_dev->data->rx_queue_state[sw_index] =
712 RTE_ETH_QUEUE_STATE_STOPPED;
714 sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
717 efx_mac_filter_default_rxq_clear(sa->nic);
719 sfc_rx_qflush(sa, sw_index);
721 rxq->state = SFC_RXQ_INITIALIZED;
723 efx_rx_qdestroy(rxq->common);
725 sfc_ev_qstop(rxq->evq);
729 sfc_rx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_rx_desc,
730 const struct rte_eth_rxconf *rx_conf)
732 const uint16_t rx_free_thresh_max = EFX_RXQ_LIMIT(nb_rx_desc);
735 if (rx_conf->rx_thresh.pthresh != 0 ||
736 rx_conf->rx_thresh.hthresh != 0 ||
737 rx_conf->rx_thresh.wthresh != 0) {
739 "RxQ prefetch/host/writeback thresholds are not supported");
743 if (rx_conf->rx_free_thresh > rx_free_thresh_max) {
745 "RxQ free threshold too large: %u vs maximum %u",
746 rx_conf->rx_free_thresh, rx_free_thresh_max);
750 if (rx_conf->rx_drop_en == 0) {
751 sfc_err(sa, "RxQ drop disable is not supported");
759 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
764 /* The mbuf object itself is always cache line aligned */
765 order = rte_bsf32(RTE_CACHE_LINE_SIZE);
767 /* Data offset from mbuf object start */
768 data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
769 RTE_PKTMBUF_HEADROOM;
771 order = MIN(order, rte_bsf32(data_off));
773 return 1u << (order - 1);
777 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
779 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
780 const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
781 const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
783 unsigned int buf_aligned;
784 unsigned int start_alignment;
785 unsigned int end_padding_alignment;
787 /* Below it is assumed that both alignments are power of 2 */
788 SFC_ASSERT(rte_is_power_of_2(nic_align_start));
789 SFC_ASSERT(rte_is_power_of_2(nic_align_end));
792 * mbuf is always cache line aligned, double-check
793 * that it meets rx buffer start alignment requirements.
796 /* Start from mbuf pool data room size */
797 buf_size = rte_pktmbuf_data_room_size(mb_pool);
799 /* Remove headroom */
800 if (buf_size <= RTE_PKTMBUF_HEADROOM) {
802 "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
803 mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
806 buf_size -= RTE_PKTMBUF_HEADROOM;
808 /* Calculate guaranteed data start alignment */
809 buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
811 /* Reserve space for start alignment */
812 if (buf_aligned < nic_align_start) {
813 start_alignment = nic_align_start - buf_aligned;
814 if (buf_size <= start_alignment) {
816 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
818 rte_pktmbuf_data_room_size(mb_pool),
819 RTE_PKTMBUF_HEADROOM, start_alignment);
822 buf_aligned = nic_align_start;
823 buf_size -= start_alignment;
828 /* Make sure that end padding does not write beyond the buffer */
829 if (buf_aligned < nic_align_end) {
831 * Estimate space which can be lost. If guarnteed buffer
832 * size is odd, lost space is (nic_align_end - 1). More
833 * accurate formula is below.
835 end_padding_alignment = nic_align_end -
836 MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
837 if (buf_size <= end_padding_alignment) {
839 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
841 rte_pktmbuf_data_room_size(mb_pool),
842 RTE_PKTMBUF_HEADROOM, start_alignment,
843 end_padding_alignment);
846 buf_size -= end_padding_alignment;
849 * Start is aligned the same or better than end,
852 buf_size = P2ALIGN(buf_size, nic_align_end);
859 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
860 uint16_t nb_rx_desc, unsigned int socket_id,
861 const struct rte_eth_rxconf *rx_conf,
862 struct rte_mempool *mb_pool)
864 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
867 struct sfc_rxq_info *rxq_info;
870 struct sfc_dp_rx_qcreate_info info;
872 rc = sfc_rx_qcheck_conf(sa, nb_rx_desc, rx_conf);
876 buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
878 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
884 if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
885 !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
886 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
887 "object size is too small", sw_index);
888 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
889 "PDU size %u plus Rx prefix %u bytes",
890 sw_index, buf_size, (unsigned int)sa->port.pdu,
891 encp->enc_rx_prefix_size);
896 SFC_ASSERT(sw_index < sa->rxq_count);
897 rxq_info = &sa->rxq_info[sw_index];
899 SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
900 rxq_info->entries = nb_rx_desc;
902 sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
903 EFX_RXQ_TYPE_SCATTER : EFX_RXQ_TYPE_DEFAULT;
905 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
906 rxq_info->entries, socket_id, &evq);
911 rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
919 rxq->hw_index = sw_index;
920 rxq->refill_threshold =
921 RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
922 rxq->refill_mb_pool = mb_pool;
924 rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
925 socket_id, &rxq->mem);
929 memset(&info, 0, sizeof(info));
930 info.refill_mb_pool = rxq->refill_mb_pool;
931 info.refill_threshold = rxq->refill_threshold;
932 info.buf_size = buf_size;
933 info.batch_max = encp->enc_rx_batch_max;
934 info.prefix_size = encp->enc_rx_prefix_size;
936 #if EFSYS_OPT_RX_SCALE
937 if (sa->hash_support == EFX_RX_HASH_AVAILABLE && sa->rss_channels > 0)
938 info.flags |= SFC_RXQ_FLAG_RSS_HASH;
941 info.rxq_entries = rxq_info->entries;
942 info.rxq_hw_ring = rxq->mem.esm_base;
943 info.evq_entries = rxq_info->entries;
944 info.evq_hw_ring = evq->mem.esm_base;
945 info.hw_index = rxq->hw_index;
946 info.mem_bar = sa->mem_bar.esb_base;
948 rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
949 &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
950 socket_id, &info, &rxq->dp);
952 goto fail_dp_rx_qcreate;
954 evq->dp_rxq = rxq->dp;
956 rxq->state = SFC_RXQ_INITIALIZED;
958 rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
963 sfc_dma_free(sa, &rxq->mem);
966 rxq_info->rxq = NULL;
973 rxq_info->entries = 0;
976 sfc_log_init(sa, "failed %d", rc);
981 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
983 struct sfc_rxq_info *rxq_info;
986 SFC_ASSERT(sw_index < sa->rxq_count);
988 rxq_info = &sa->rxq_info[sw_index];
991 SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
993 sa->dp_rx->qdestroy(rxq->dp);
996 rxq_info->rxq = NULL;
997 rxq_info->entries = 0;
999 sfc_dma_free(sa, &rxq->mem);
1001 sfc_ev_qfini(rxq->evq);
1007 #if EFSYS_OPT_RX_SCALE
1009 sfc_rte_to_efx_hash_type(uint64_t rss_hf)
1011 efx_rx_hash_type_t efx_hash_types = 0;
1013 if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1014 ETH_RSS_NONFRAG_IPV4_OTHER)) != 0)
1015 efx_hash_types |= EFX_RX_HASH_IPV4;
1017 if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
1018 efx_hash_types |= EFX_RX_HASH_TCPIPV4;
1020 if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1021 ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0)
1022 efx_hash_types |= EFX_RX_HASH_IPV6;
1024 if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0)
1025 efx_hash_types |= EFX_RX_HASH_TCPIPV6;
1027 return efx_hash_types;
1031 sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types)
1033 uint64_t rss_hf = 0;
1035 if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0)
1036 rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1037 ETH_RSS_NONFRAG_IPV4_OTHER);
1039 if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0)
1040 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1042 if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0)
1043 rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1044 ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX);
1046 if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0)
1047 rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX);
1054 sfc_rx_rss_config(struct sfc_adapter *sa)
1058 #if EFSYS_OPT_RX_SCALE
1059 if (sa->rss_channels > 0) {
1060 rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
1061 sa->rss_hash_types, B_TRUE);
1065 rc = efx_rx_scale_key_set(sa->nic, sa->rss_key,
1066 sizeof(sa->rss_key));
1070 rc = efx_rx_scale_tbl_set(sa->nic, sa->rss_tbl,
1071 sizeof(sa->rss_tbl));
1080 sfc_rx_start(struct sfc_adapter *sa)
1082 unsigned int sw_index;
1085 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1087 rc = efx_rx_init(sa->nic);
1091 rc = sfc_rx_rss_config(sa);
1093 goto fail_rss_config;
1095 for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
1096 if ((!sa->rxq_info[sw_index].deferred_start ||
1097 sa->rxq_info[sw_index].deferred_started)) {
1098 rc = sfc_rx_qstart(sa, sw_index);
1100 goto fail_rx_qstart;
1107 while (sw_index-- > 0)
1108 sfc_rx_qstop(sa, sw_index);
1111 efx_rx_fini(sa->nic);
1114 sfc_log_init(sa, "failed %d", rc);
1119 sfc_rx_stop(struct sfc_adapter *sa)
1121 unsigned int sw_index;
1123 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1125 sw_index = sa->rxq_count;
1126 while (sw_index-- > 0) {
1127 if (sa->rxq_info[sw_index].rxq != NULL)
1128 sfc_rx_qstop(sa, sw_index);
1131 efx_rx_fini(sa->nic);
1135 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
1137 struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
1138 unsigned int max_entries;
1140 max_entries = EFX_RXQ_MAXNDESCS;
1141 SFC_ASSERT(rte_is_power_of_2(max_entries));
1143 rxq_info->max_entries = max_entries;
1149 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
1153 switch (rxmode->mq_mode) {
1154 case ETH_MQ_RX_NONE:
1155 /* No special checks are required */
1157 #if EFSYS_OPT_RX_SCALE
1159 if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) {
1160 sfc_err(sa, "RSS is not available");
1166 sfc_err(sa, "Rx multi-queue mode %u not supported",
1171 if (rxmode->header_split) {
1172 sfc_err(sa, "Header split on Rx not supported");
1176 if (rxmode->hw_vlan_filter) {
1177 sfc_err(sa, "HW VLAN filtering not supported");
1181 if (rxmode->hw_vlan_strip) {
1182 sfc_err(sa, "HW VLAN stripping not supported");
1186 if (rxmode->hw_vlan_extend) {
1188 "Q-in-Q HW VLAN stripping not supported");
1192 if (!rxmode->hw_strip_crc) {
1194 "FCS stripping control not supported - always stripped");
1195 rxmode->hw_strip_crc = 1;
1198 if (rxmode->enable_scatter &&
1199 (~sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)) {
1200 sfc_err(sa, "Rx scatter not supported by %s datapath",
1201 sa->dp_rx->dp.name);
1205 if (rxmode->enable_lro) {
1206 sfc_err(sa, "LRO not supported");
1214 * Destroy excess queues that are no longer needed after reconfiguration
1215 * or complete close.
1218 sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
1222 SFC_ASSERT(nb_rx_queues <= sa->rxq_count);
1224 sw_index = sa->rxq_count;
1225 while (--sw_index >= (int)nb_rx_queues) {
1226 if (sa->rxq_info[sw_index].rxq != NULL)
1227 sfc_rx_qfini(sa, sw_index);
1230 sa->rxq_count = nb_rx_queues;
1234 * Initialize Rx subsystem.
1236 * Called at device (re)configuration stage when number of receive queues is
1237 * specified together with other device level receive configuration.
1239 * It should be used to allocate NUMA-unaware resources.
1242 sfc_rx_configure(struct sfc_adapter *sa)
1244 struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
1245 const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
1246 unsigned int sw_index;
1249 sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
1250 nb_rx_queues, sa->rxq_count);
1252 rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
1254 goto fail_check_mode;
1256 if (nb_rx_queues == sa->rxq_count)
1259 if (sa->rxq_info == NULL) {
1261 sa->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
1262 sizeof(sa->rxq_info[0]), 0,
1264 if (sa->rxq_info == NULL)
1265 goto fail_rxqs_alloc;
1267 struct sfc_rxq_info *new_rxq_info;
1269 if (nb_rx_queues < sa->rxq_count)
1270 sfc_rx_fini_queues(sa, nb_rx_queues);
1274 rte_realloc(sa->rxq_info,
1275 nb_rx_queues * sizeof(sa->rxq_info[0]), 0);
1276 if (new_rxq_info == NULL && nb_rx_queues > 0)
1277 goto fail_rxqs_realloc;
1279 sa->rxq_info = new_rxq_info;
1280 if (nb_rx_queues > sa->rxq_count)
1281 memset(&sa->rxq_info[sa->rxq_count], 0,
1282 (nb_rx_queues - sa->rxq_count) *
1283 sizeof(sa->rxq_info[0]));
1286 while (sa->rxq_count < nb_rx_queues) {
1287 rc = sfc_rx_qinit_info(sa, sa->rxq_count);
1289 goto fail_rx_qinit_info;
1294 #if EFSYS_OPT_RX_SCALE
1295 sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
1296 MIN(sa->rxq_count, EFX_MAXRSS) : 0;
1298 if (sa->rss_channels > 0) {
1299 for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
1300 sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
1313 sfc_log_init(sa, "failed %d", rc);
1318 * Shutdown Rx subsystem.
1320 * Called at device close stage, for example, before device shutdown.
1323 sfc_rx_close(struct sfc_adapter *sa)
1325 sfc_rx_fini_queues(sa, 0);
1327 sa->rss_channels = 0;
1329 rte_free(sa->rxq_info);
1330 sa->rxq_info = NULL;