4 * Copyright (c) 2016-2017 Solarflare Communications Inc.
7 * This software was jointly developed between OKTET Labs (under contract
8 * for Solarflare) and Solarflare Communications, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <rte_mempool.h>
37 #include "sfc_debug.h"
41 #include "sfc_tweak.h"
44 * Maximum number of Rx queue flush attempt in the case of failure or
47 #define SFC_RX_QFLUSH_ATTEMPTS (3)
50 * Time to wait between event queue polling attempts when waiting for Rx
51 * queue flush done or failed events.
53 #define SFC_RX_QFLUSH_POLL_WAIT_MS (1)
56 * Maximum number of event queue polling attempts when waiting for Rx queue
57 * flush done or failed events. It defines Rx queue flush attempt timeout
58 * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
60 #define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000)
63 sfc_rx_qflush_done(struct sfc_rxq *rxq)
65 rxq->state |= SFC_RXQ_FLUSHED;
66 rxq->state &= ~SFC_RXQ_FLUSHING;
70 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
72 rxq->state |= SFC_RXQ_FLUSH_FAILED;
73 rxq->state &= ~SFC_RXQ_FLUSHING;
77 sfc_rx_qrefill(struct sfc_rxq *rxq)
79 unsigned int free_space;
81 void *objs[SFC_RX_REFILL_BULK];
82 efsys_dma_addr_t addr[RTE_DIM(objs)];
83 unsigned int added = rxq->added;
86 struct sfc_rx_sw_desc *rxd;
88 uint8_t port_id = rxq->port_id;
90 free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
91 (added - rxq->completed);
93 if (free_space < rxq->refill_threshold)
96 bulks = free_space / RTE_DIM(objs);
98 id = added & rxq->ptr_mask;
100 if (rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
101 RTE_DIM(objs)) < 0) {
103 * It is hardly a safe way to increment counter
104 * from different contexts, but all PMDs do it.
106 rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
111 for (i = 0; i < RTE_DIM(objs);
112 ++i, id = (id + 1) & rxq->ptr_mask) {
115 rxd = &rxq->sw_desc[id];
118 rte_mbuf_refcnt_set(m, 1);
119 m->data_off = RTE_PKTMBUF_HEADROOM;
124 addr[i] = rte_pktmbuf_mtophys(m);
127 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
128 RTE_DIM(objs), rxq->completed, added);
129 added += RTE_DIM(objs);
132 /* Push doorbell if something is posted */
133 if (rxq->added != added) {
135 efx_rx_qpush(rxq->common, added, &rxq->pushed);
140 sfc_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
142 uint64_t mbuf_flags = 0;
144 switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
145 case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
146 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
149 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
152 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
153 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
154 PKT_RX_IP_CKSUM_UNKNOWN);
158 switch ((desc_flags &
159 (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
160 case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
161 case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
162 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
166 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
169 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
170 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
171 PKT_RX_L4_CKSUM_UNKNOWN);
179 sfc_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
181 return RTE_PTYPE_L2_ETHER |
182 ((desc_flags & EFX_PKT_IPV4) ?
183 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
184 ((desc_flags & EFX_PKT_IPV6) ?
185 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
186 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
187 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
191 sfc_rx_set_rss_hash(struct sfc_rxq *rxq, unsigned int flags, struct rte_mbuf *m)
193 #if EFSYS_OPT_RX_SCALE
197 if ((rxq->flags & SFC_RXQ_FLAG_RSS_HASH) == 0)
200 mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
202 if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
203 m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
204 EFX_RX_HASHALG_TOEPLITZ,
207 m->ol_flags |= PKT_RX_RSS_HASH;
213 sfc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
215 struct sfc_rxq *rxq = rx_queue;
216 unsigned int completed;
217 unsigned int prefix_size = rxq->prefix_size;
218 unsigned int done_pkts = 0;
219 boolean_t discard_next = B_FALSE;
220 struct rte_mbuf *scatter_pkt = NULL;
222 if (unlikely((rxq->state & SFC_RXQ_RUNNING) == 0))
225 sfc_ev_qpoll(rxq->evq);
227 completed = rxq->completed;
228 while (completed != rxq->pending && done_pkts < nb_pkts) {
230 struct sfc_rx_sw_desc *rxd;
232 unsigned int seg_len;
233 unsigned int desc_flags;
235 id = completed++ & rxq->ptr_mask;
236 rxd = &rxq->sw_desc[id];
238 desc_flags = rxd->flags;
243 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
246 if (desc_flags & EFX_PKT_PREFIX_LEN) {
250 rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
251 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
255 seg_len = rxd->size - prefix_size;
258 rte_pktmbuf_data_len(m) = seg_len;
259 rte_pktmbuf_pkt_len(m) = seg_len;
261 if (scatter_pkt != NULL) {
262 if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
263 rte_mempool_put(rxq->refill_mb_pool,
267 /* The packet to deliver */
271 if (desc_flags & EFX_PKT_CONT) {
272 /* The packet is scattered, more fragments to come */
274 /* Futher fragments have no prefix */
279 /* Scattered packet is done */
281 /* The first fragment of the packet has prefix */
282 prefix_size = rxq->prefix_size;
284 m->ol_flags = sfc_rx_desc_flags_to_offload_flags(desc_flags);
285 m->packet_type = sfc_rx_desc_flags_to_packet_type(desc_flags);
288 * Extract RSS hash from the packet prefix and
289 * set the corresponding field (if needed and possible)
291 sfc_rx_set_rss_hash(rxq, desc_flags, m);
293 m->data_off += prefix_size;
300 discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
301 rte_mempool_put(rxq->refill_mb_pool, m);
305 /* pending is only moved when entire packet is received */
306 SFC_ASSERT(scatter_pkt == NULL);
308 rxq->completed = completed;
316 sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
320 SFC_ASSERT(sw_index < sa->rxq_count);
321 rxq = sa->rxq_info[sw_index].rxq;
323 if (rxq == NULL || (rxq->state & SFC_RXQ_RUNNING) == 0)
326 sfc_ev_qpoll(rxq->evq);
328 return rxq->pending - rxq->completed;
332 sfc_rx_qdesc_done(struct sfc_rxq *rxq, unsigned int offset)
334 if ((rxq->state & SFC_RXQ_RUNNING) == 0)
337 sfc_ev_qpoll(rxq->evq);
339 return offset < (rxq->pending - rxq->completed);
343 sfc_rx_qpurge(struct sfc_rxq *rxq)
346 struct sfc_rx_sw_desc *rxd;
348 for (i = rxq->completed; i != rxq->added; ++i) {
349 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
350 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
356 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
359 unsigned int retry_count;
360 unsigned int wait_count;
362 rxq = sa->rxq_info[sw_index].rxq;
363 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
366 * Retry Rx queue flushing in the case of flush failed or
367 * timeout. In the worst case it can delay for 6 seconds.
369 for (retry_count = 0;
370 ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
371 (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
373 if (efx_rx_qflush(rxq->common) != 0) {
374 rxq->state |= SFC_RXQ_FLUSH_FAILED;
377 rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
378 rxq->state |= SFC_RXQ_FLUSHING;
381 * Wait for Rx queue flush done or failed event at least
382 * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
383 * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
384 * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
388 rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
389 sfc_ev_qpoll(rxq->evq);
390 } while ((rxq->state & SFC_RXQ_FLUSHING) &&
391 (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
393 if (rxq->state & SFC_RXQ_FLUSHING)
394 sfc_err(sa, "RxQ %u flush timed out", sw_index);
396 if (rxq->state & SFC_RXQ_FLUSH_FAILED)
397 sfc_err(sa, "RxQ %u flush failed", sw_index);
399 if (rxq->state & SFC_RXQ_FLUSHED)
400 sfc_info(sa, "RxQ %u flushed", sw_index);
407 sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
409 boolean_t rss = (sa->rss_channels > 1) ? B_TRUE : B_FALSE;
410 struct sfc_port *port = &sa->port;
414 * If promiscuous or all-multicast mode has been requested, setting
415 * filter for the default Rx queue might fail, in particular, while
416 * running over PCI function which is not a member of corresponding
417 * privilege groups; if this occurs, few iterations will be made to
418 * repeat this step without promiscuous and all-multicast flags set
421 rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss);
424 else if (rc != EOPNOTSUPP)
428 sfc_warn(sa, "promiscuous mode has been requested, "
429 "but the HW rejects it");
430 sfc_warn(sa, "promiscuous mode will be disabled");
432 port->promisc = B_FALSE;
433 rc = sfc_set_rx_mode(sa);
440 if (port->allmulti) {
441 sfc_warn(sa, "all-multicast mode has been requested, "
442 "but the HW rejects it");
443 sfc_warn(sa, "all-multicast mode will be disabled");
445 port->allmulti = B_FALSE;
446 rc = sfc_set_rx_mode(sa);
457 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
459 struct sfc_rxq_info *rxq_info;
464 sfc_log_init(sa, "sw_index=%u", sw_index);
466 SFC_ASSERT(sw_index < sa->rxq_count);
468 rxq_info = &sa->rxq_info[sw_index];
470 SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
474 rc = sfc_ev_qstart(sa, evq->evq_index);
478 rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
479 &rxq->mem, rxq_info->entries,
480 0 /* not used on EF10 */, evq->common,
483 goto fail_rx_qcreate;
485 efx_rx_qenable(rxq->common);
487 rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
489 rxq->state |= (SFC_RXQ_STARTED | SFC_RXQ_RUNNING);
494 rc = sfc_rx_default_rxq_set_filter(sa, rxq);
496 goto fail_mac_filter_default_rxq_set;
499 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
500 sa->eth_dev->data->rx_queue_state[sw_index] =
501 RTE_ETH_QUEUE_STATE_STARTED;
505 fail_mac_filter_default_rxq_set:
506 sfc_rx_qflush(sa, sw_index);
509 sfc_ev_qstop(sa, evq->evq_index);
516 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
518 struct sfc_rxq_info *rxq_info;
521 sfc_log_init(sa, "sw_index=%u", sw_index);
523 SFC_ASSERT(sw_index < sa->rxq_count);
525 rxq_info = &sa->rxq_info[sw_index];
528 if (rxq->state == SFC_RXQ_INITIALIZED)
530 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
532 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
533 sa->eth_dev->data->rx_queue_state[sw_index] =
534 RTE_ETH_QUEUE_STATE_STOPPED;
536 rxq->state &= ~SFC_RXQ_RUNNING;
539 efx_mac_filter_default_rxq_clear(sa->nic);
541 sfc_rx_qflush(sa, sw_index);
543 rxq->state = SFC_RXQ_INITIALIZED;
545 efx_rx_qdestroy(rxq->common);
547 sfc_ev_qstop(sa, rxq->evq->evq_index);
551 sfc_rx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_rx_desc,
552 const struct rte_eth_rxconf *rx_conf)
554 const uint16_t rx_free_thresh_max = EFX_RXQ_LIMIT(nb_rx_desc);
557 if (rx_conf->rx_thresh.pthresh != 0 ||
558 rx_conf->rx_thresh.hthresh != 0 ||
559 rx_conf->rx_thresh.wthresh != 0) {
561 "RxQ prefetch/host/writeback thresholds are not supported");
565 if (rx_conf->rx_free_thresh > rx_free_thresh_max) {
567 "RxQ free threshold too large: %u vs maximum %u",
568 rx_conf->rx_free_thresh, rx_free_thresh_max);
572 if (rx_conf->rx_drop_en == 0) {
573 sfc_err(sa, "RxQ drop disable is not supported");
581 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
586 /* The mbuf object itself is always cache line aligned */
587 order = rte_bsf32(RTE_CACHE_LINE_SIZE);
589 /* Data offset from mbuf object start */
590 data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
591 RTE_PKTMBUF_HEADROOM;
593 order = MIN(order, rte_bsf32(data_off));
595 return 1u << (order - 1);
599 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
601 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
602 const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
603 const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
605 unsigned int buf_aligned;
606 unsigned int start_alignment;
607 unsigned int end_padding_alignment;
609 /* Below it is assumed that both alignments are power of 2 */
610 SFC_ASSERT(rte_is_power_of_2(nic_align_start));
611 SFC_ASSERT(rte_is_power_of_2(nic_align_end));
614 * mbuf is always cache line aligned, double-check
615 * that it meets rx buffer start alignment requirements.
618 /* Start from mbuf pool data room size */
619 buf_size = rte_pktmbuf_data_room_size(mb_pool);
621 /* Remove headroom */
622 if (buf_size <= RTE_PKTMBUF_HEADROOM) {
624 "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
625 mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
628 buf_size -= RTE_PKTMBUF_HEADROOM;
630 /* Calculate guaranteed data start alignment */
631 buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
633 /* Reserve space for start alignment */
634 if (buf_aligned < nic_align_start) {
635 start_alignment = nic_align_start - buf_aligned;
636 if (buf_size <= start_alignment) {
638 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
640 rte_pktmbuf_data_room_size(mb_pool),
641 RTE_PKTMBUF_HEADROOM, start_alignment);
644 buf_aligned = nic_align_start;
645 buf_size -= start_alignment;
650 /* Make sure that end padding does not write beyond the buffer */
651 if (buf_aligned < nic_align_end) {
653 * Estimate space which can be lost. If guarnteed buffer
654 * size is odd, lost space is (nic_align_end - 1). More
655 * accurate formula is below.
657 end_padding_alignment = nic_align_end -
658 MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
659 if (buf_size <= end_padding_alignment) {
661 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
663 rte_pktmbuf_data_room_size(mb_pool),
664 RTE_PKTMBUF_HEADROOM, start_alignment,
665 end_padding_alignment);
668 buf_size -= end_padding_alignment;
671 * Start is aligned the same or better than end,
674 buf_size = P2ALIGN(buf_size, nic_align_end);
681 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
682 uint16_t nb_rx_desc, unsigned int socket_id,
683 const struct rte_eth_rxconf *rx_conf,
684 struct rte_mempool *mb_pool)
686 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
689 struct sfc_rxq_info *rxq_info;
690 unsigned int evq_index;
694 rc = sfc_rx_qcheck_conf(sa, nb_rx_desc, rx_conf);
698 buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
700 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
706 if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
707 !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
708 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
709 "object size is too small", sw_index);
710 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
711 "PDU size %u plus Rx prefix %u bytes",
712 sw_index, buf_size, (unsigned int)sa->port.pdu,
713 encp->enc_rx_prefix_size);
718 SFC_ASSERT(sw_index < sa->rxq_count);
719 rxq_info = &sa->rxq_info[sw_index];
721 SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
722 rxq_info->entries = nb_rx_desc;
724 sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
725 EFX_RXQ_TYPE_SCATTER : EFX_RXQ_TYPE_DEFAULT;
727 evq_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
729 rc = sfc_ev_qinit(sa, evq_index, rxq_info->entries, socket_id);
733 evq = sa->evq_info[evq_index].evq;
736 rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
741 rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
742 socket_id, &rxq->mem);
747 rxq->sw_desc = rte_calloc_socket("sfc-rxq-sw_desc", rxq_info->entries,
748 sizeof(*rxq->sw_desc),
749 RTE_CACHE_LINE_SIZE, socket_id);
750 if (rxq->sw_desc == NULL)
751 goto fail_desc_alloc;
755 rxq->ptr_mask = rxq_info->entries - 1;
756 rxq->refill_threshold = rx_conf->rx_free_thresh;
757 rxq->refill_mb_pool = mb_pool;
758 rxq->buf_size = buf_size;
759 rxq->hw_index = sw_index;
760 rxq->port_id = sa->eth_dev->data->port_id;
762 /* Cache limits required on datapath in RxQ structure */
763 rxq->batch_max = encp->enc_rx_batch_max;
764 rxq->prefix_size = encp->enc_rx_prefix_size;
766 #if EFSYS_OPT_RX_SCALE
767 if (sa->hash_support == EFX_RX_HASH_AVAILABLE)
768 rxq->flags |= SFC_RXQ_FLAG_RSS_HASH;
771 rxq->state = SFC_RXQ_INITIALIZED;
774 rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
779 sfc_dma_free(sa, &rxq->mem);
785 sfc_ev_qfini(sa, evq_index);
788 rxq_info->entries = 0;
791 sfc_log_init(sa, "failed %d", rc);
796 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
798 struct sfc_rxq_info *rxq_info;
801 SFC_ASSERT(sw_index < sa->rxq_count);
803 rxq_info = &sa->rxq_info[sw_index];
806 SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
808 rxq_info->rxq = NULL;
809 rxq_info->entries = 0;
811 rte_free(rxq->sw_desc);
812 sfc_dma_free(sa, &rxq->mem);
816 #if EFSYS_OPT_RX_SCALE
818 sfc_rte_to_efx_hash_type(uint64_t rss_hf)
820 efx_rx_hash_type_t efx_hash_types = 0;
822 if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
823 ETH_RSS_NONFRAG_IPV4_OTHER)) != 0)
824 efx_hash_types |= EFX_RX_HASH_IPV4;
826 if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
827 efx_hash_types |= EFX_RX_HASH_TCPIPV4;
829 if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
830 ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0)
831 efx_hash_types |= EFX_RX_HASH_IPV6;
833 if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0)
834 efx_hash_types |= EFX_RX_HASH_TCPIPV6;
836 return efx_hash_types;
840 sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types)
844 if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0)
845 rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
846 ETH_RSS_NONFRAG_IPV4_OTHER);
848 if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0)
849 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
851 if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0)
852 rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
853 ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX);
855 if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0)
856 rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX);
863 sfc_rx_rss_config(struct sfc_adapter *sa)
867 #if EFSYS_OPT_RX_SCALE
868 if (sa->rss_channels > 1) {
869 rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
870 sa->rss_hash_types, B_TRUE);
874 rc = efx_rx_scale_key_set(sa->nic, sa->rss_key,
875 sizeof(sa->rss_key));
879 rc = efx_rx_scale_tbl_set(sa->nic, sa->rss_tbl,
880 sizeof(sa->rss_tbl));
889 sfc_rx_start(struct sfc_adapter *sa)
891 unsigned int sw_index;
894 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
896 rc = efx_rx_init(sa->nic);
900 rc = sfc_rx_rss_config(sa);
902 goto fail_rss_config;
904 for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
905 if ((!sa->rxq_info[sw_index].deferred_start ||
906 sa->rxq_info[sw_index].deferred_started)) {
907 rc = sfc_rx_qstart(sa, sw_index);
916 while (sw_index-- > 0)
917 sfc_rx_qstop(sa, sw_index);
920 efx_rx_fini(sa->nic);
923 sfc_log_init(sa, "failed %d", rc);
928 sfc_rx_stop(struct sfc_adapter *sa)
930 unsigned int sw_index;
932 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
934 sw_index = sa->rxq_count;
935 while (sw_index-- > 0) {
936 if (sa->rxq_info[sw_index].rxq != NULL)
937 sfc_rx_qstop(sa, sw_index);
940 efx_rx_fini(sa->nic);
944 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
946 struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
947 unsigned int max_entries;
949 max_entries = EFX_RXQ_MAXNDESCS;
950 SFC_ASSERT(rte_is_power_of_2(max_entries));
952 rxq_info->max_entries = max_entries;
958 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
962 switch (rxmode->mq_mode) {
964 /* No special checks are required */
966 #if EFSYS_OPT_RX_SCALE
968 if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) {
969 sfc_err(sa, "RSS is not available");
975 sfc_err(sa, "Rx multi-queue mode %u not supported",
980 if (rxmode->header_split) {
981 sfc_err(sa, "Header split on Rx not supported");
985 if (rxmode->hw_vlan_filter) {
986 sfc_err(sa, "HW VLAN filtering not supported");
990 if (rxmode->hw_vlan_strip) {
991 sfc_err(sa, "HW VLAN stripping not supported");
995 if (rxmode->hw_vlan_extend) {
997 "Q-in-Q HW VLAN stripping not supported");
1001 if (!rxmode->hw_strip_crc) {
1003 "FCS stripping control not supported - always stripped");
1004 rxmode->hw_strip_crc = 1;
1007 if (rxmode->enable_lro) {
1008 sfc_err(sa, "LRO not supported");
1016 * Initialize Rx subsystem.
1018 * Called at device configuration stage when number of receive queues is
1019 * specified together with other device level receive configuration.
1021 * It should be used to allocate NUMA-unaware resources.
1024 sfc_rx_init(struct sfc_adapter *sa)
1026 struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
1027 unsigned int sw_index;
1030 rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
1032 goto fail_check_mode;
1034 sa->rxq_count = sa->eth_dev->data->nb_rx_queues;
1037 sa->rxq_info = rte_calloc_socket("sfc-rxqs", sa->rxq_count,
1038 sizeof(struct sfc_rxq_info), 0,
1040 if (sa->rxq_info == NULL)
1041 goto fail_rxqs_alloc;
1043 for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
1044 rc = sfc_rx_qinit_info(sa, sw_index);
1046 goto fail_rx_qinit_info;
1049 #if EFSYS_OPT_RX_SCALE
1050 sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
1051 MIN(sa->rxq_count, EFX_MAXRSS) : 1;
1053 if (sa->rss_channels > 1) {
1054 for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
1055 sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
1062 rte_free(sa->rxq_info);
1063 sa->rxq_info = NULL;
1068 sfc_log_init(sa, "failed %d", rc);
1073 * Shutdown Rx subsystem.
1075 * Called at device close stage, for example, before device
1076 * reconfiguration or shutdown.
1079 sfc_rx_fini(struct sfc_adapter *sa)
1081 unsigned int sw_index;
1083 sw_index = sa->rxq_count;
1084 while (sw_index-- > 0) {
1085 if (sa->rxq_info[sw_index].rxq != NULL)
1086 sfc_rx_qfini(sa, sw_index);
1089 rte_free(sa->rxq_info);
1090 sa->rxq_info = NULL;