2 * Copyright (c) 2016 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <rte_mempool.h>
35 #include "sfc_debug.h"
39 #include "sfc_tweak.h"
42 * Maximum number of Rx queue flush attempt in the case of failure or
45 #define SFC_RX_QFLUSH_ATTEMPTS (3)
48 * Time to wait between event queue polling attempts when waiting for Rx
49 * queue flush done or failed events.
51 #define SFC_RX_QFLUSH_POLL_WAIT_MS (1)
54 * Maximum number of event queue polling attempts when waiting for Rx queue
55 * flush done or failed events. It defines Rx queue flush attempt timeout
56 * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
58 #define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000)
61 sfc_rx_qflush_done(struct sfc_rxq *rxq)
63 rxq->state |= SFC_RXQ_FLUSHED;
64 rxq->state &= ~SFC_RXQ_FLUSHING;
68 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
70 rxq->state |= SFC_RXQ_FLUSH_FAILED;
71 rxq->state &= ~SFC_RXQ_FLUSHING;
75 sfc_rx_qrefill(struct sfc_rxq *rxq)
77 unsigned int free_space;
79 void *objs[SFC_RX_REFILL_BULK];
80 efsys_dma_addr_t addr[RTE_DIM(objs)];
81 unsigned int added = rxq->added;
84 struct sfc_rx_sw_desc *rxd;
86 uint8_t port_id = rxq->port_id;
88 free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
89 (added - rxq->completed);
91 if (free_space < rxq->refill_threshold)
94 bulks = free_space / RTE_DIM(objs);
96 id = added & rxq->ptr_mask;
98 if (rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
101 * It is hardly a safe way to increment counter
102 * from different contexts, but all PMDs do it.
104 rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
109 for (i = 0; i < RTE_DIM(objs);
110 ++i, id = (id + 1) & rxq->ptr_mask) {
113 rxd = &rxq->sw_desc[id];
116 rte_mbuf_refcnt_set(m, 1);
117 m->data_off = RTE_PKTMBUF_HEADROOM;
122 addr[i] = rte_pktmbuf_mtophys(m);
125 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
126 RTE_DIM(objs), rxq->completed, added);
127 added += RTE_DIM(objs);
130 /* Push doorbell if something is posted */
131 if (rxq->added != added) {
133 efx_rx_qpush(rxq->common, added, &rxq->pushed);
138 sfc_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
140 uint64_t mbuf_flags = 0;
142 switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
143 case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
144 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
147 mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
150 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
151 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
152 PKT_RX_IP_CKSUM_UNKNOWN);
156 switch ((desc_flags &
157 (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
158 case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
159 case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
160 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
164 mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
167 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
168 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
169 PKT_RX_L4_CKSUM_UNKNOWN);
177 sfc_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
179 return RTE_PTYPE_L2_ETHER |
180 ((desc_flags & EFX_PKT_IPV4) ?
181 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
182 ((desc_flags & EFX_PKT_IPV6) ?
183 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
184 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
185 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
189 sfc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
191 struct sfc_rxq *rxq = rx_queue;
192 unsigned int completed;
193 unsigned int prefix_size = rxq->prefix_size;
194 unsigned int done_pkts = 0;
195 boolean_t discard_next = B_FALSE;
196 struct rte_mbuf *scatter_pkt = NULL;
198 if (unlikely((rxq->state & SFC_RXQ_RUNNING) == 0))
201 sfc_ev_qpoll(rxq->evq);
203 completed = rxq->completed;
204 while (completed != rxq->pending && done_pkts < nb_pkts) {
206 struct sfc_rx_sw_desc *rxd;
208 unsigned int seg_len;
209 unsigned int desc_flags;
211 id = completed++ & rxq->ptr_mask;
212 rxd = &rxq->sw_desc[id];
214 desc_flags = rxd->flags;
219 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
222 if (desc_flags & EFX_PKT_PREFIX_LEN) {
226 rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
227 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
231 seg_len = rxd->size - prefix_size;
234 m->data_off += prefix_size;
235 rte_pktmbuf_data_len(m) = seg_len;
236 rte_pktmbuf_pkt_len(m) = seg_len;
238 if (scatter_pkt != NULL) {
239 if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
240 rte_mempool_put(rxq->refill_mb_pool,
244 /* The packet to deliver */
248 if (desc_flags & EFX_PKT_CONT) {
249 /* The packet is scattered, more fragments to come */
251 /* Futher fragments have no prefix */
256 /* Scattered packet is done */
258 /* The first fragment of the packet has prefix */
259 prefix_size = rxq->prefix_size;
261 m->ol_flags = sfc_rx_desc_flags_to_offload_flags(desc_flags);
262 m->packet_type = sfc_rx_desc_flags_to_packet_type(desc_flags);
269 discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
270 rte_mempool_put(rxq->refill_mb_pool, m);
274 /* pending is only moved when entire packet is received */
275 SFC_ASSERT(scatter_pkt == NULL);
277 rxq->completed = completed;
285 sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
289 SFC_ASSERT(sw_index < sa->rxq_count);
290 rxq = sa->rxq_info[sw_index].rxq;
292 if (rxq == NULL || (rxq->state & SFC_RXQ_RUNNING) == 0)
295 sfc_ev_qpoll(rxq->evq);
297 return rxq->pending - rxq->completed;
301 sfc_rx_qdesc_done(struct sfc_rxq *rxq, unsigned int offset)
303 if ((rxq->state & SFC_RXQ_RUNNING) == 0)
306 sfc_ev_qpoll(rxq->evq);
308 return offset < (rxq->pending - rxq->completed);
312 sfc_rx_qpurge(struct sfc_rxq *rxq)
315 struct sfc_rx_sw_desc *rxd;
317 for (i = rxq->completed; i != rxq->added; ++i) {
318 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
319 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
325 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
328 unsigned int retry_count;
329 unsigned int wait_count;
331 rxq = sa->rxq_info[sw_index].rxq;
332 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
335 * Retry Rx queue flushing in the case of flush failed or
336 * timeout. In the worst case it can delay for 6 seconds.
338 for (retry_count = 0;
339 ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
340 (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
342 if (efx_rx_qflush(rxq->common) != 0) {
343 rxq->state |= SFC_RXQ_FLUSH_FAILED;
346 rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
347 rxq->state |= SFC_RXQ_FLUSHING;
350 * Wait for Rx queue flush done or failed event at least
351 * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
352 * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
353 * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
357 rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
358 sfc_ev_qpoll(rxq->evq);
359 } while ((rxq->state & SFC_RXQ_FLUSHING) &&
360 (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
362 if (rxq->state & SFC_RXQ_FLUSHING)
363 sfc_err(sa, "RxQ %u flush timed out", sw_index);
365 if (rxq->state & SFC_RXQ_FLUSH_FAILED)
366 sfc_err(sa, "RxQ %u flush failed", sw_index);
368 if (rxq->state & SFC_RXQ_FLUSHED)
369 sfc_info(sa, "RxQ %u flushed", sw_index);
376 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
378 struct sfc_rxq_info *rxq_info;
383 sfc_log_init(sa, "sw_index=%u", sw_index);
385 SFC_ASSERT(sw_index < sa->rxq_count);
387 rxq_info = &sa->rxq_info[sw_index];
389 SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
393 rc = sfc_ev_qstart(sa, evq->evq_index);
397 rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
398 &rxq->mem, rxq_info->entries,
399 0 /* not used on EF10 */, evq->common,
402 goto fail_rx_qcreate;
404 efx_rx_qenable(rxq->common);
406 rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
408 rxq->state |= (SFC_RXQ_STARTED | SFC_RXQ_RUNNING);
413 rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common,
416 goto fail_mac_filter_default_rxq_set;
419 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
420 sa->eth_dev->data->rx_queue_state[sw_index] =
421 RTE_ETH_QUEUE_STATE_STARTED;
425 fail_mac_filter_default_rxq_set:
426 sfc_rx_qflush(sa, sw_index);
429 sfc_ev_qstop(sa, evq->evq_index);
436 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
438 struct sfc_rxq_info *rxq_info;
441 sfc_log_init(sa, "sw_index=%u", sw_index);
443 SFC_ASSERT(sw_index < sa->rxq_count);
445 rxq_info = &sa->rxq_info[sw_index];
448 if (rxq->state == SFC_RXQ_INITIALIZED)
450 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
452 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
453 sa->eth_dev->data->rx_queue_state[sw_index] =
454 RTE_ETH_QUEUE_STATE_STOPPED;
456 rxq->state &= ~SFC_RXQ_RUNNING;
459 efx_mac_filter_default_rxq_clear(sa->nic);
461 sfc_rx_qflush(sa, sw_index);
463 rxq->state = SFC_RXQ_INITIALIZED;
465 efx_rx_qdestroy(rxq->common);
467 sfc_ev_qstop(sa, rxq->evq->evq_index);
471 sfc_rx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_rx_desc,
472 const struct rte_eth_rxconf *rx_conf)
474 const uint16_t rx_free_thresh_max = EFX_RXQ_LIMIT(nb_rx_desc);
477 if (rx_conf->rx_thresh.pthresh != 0 ||
478 rx_conf->rx_thresh.hthresh != 0 ||
479 rx_conf->rx_thresh.wthresh != 0) {
481 "RxQ prefetch/host/writeback thresholds are not supported");
485 if (rx_conf->rx_free_thresh > rx_free_thresh_max) {
487 "RxQ free threshold too large: %u vs maximum %u",
488 rx_conf->rx_free_thresh, rx_free_thresh_max);
492 if (rx_conf->rx_drop_en == 0) {
493 sfc_err(sa, "RxQ drop disable is not supported");
501 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
506 /* The mbuf object itself is always cache line aligned */
507 order = rte_bsf32(RTE_CACHE_LINE_SIZE);
509 /* Data offset from mbuf object start */
510 data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
511 RTE_PKTMBUF_HEADROOM;
513 order = MIN(order, rte_bsf32(data_off));
515 return 1u << (order - 1);
519 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
521 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
522 const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
523 const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
525 unsigned int buf_aligned;
526 unsigned int start_alignment;
527 unsigned int end_padding_alignment;
529 /* Below it is assumed that both alignments are power of 2 */
530 SFC_ASSERT(rte_is_power_of_2(nic_align_start));
531 SFC_ASSERT(rte_is_power_of_2(nic_align_end));
534 * mbuf is always cache line aligned, double-check
535 * that it meets rx buffer start alignment requirements.
538 /* Start from mbuf pool data room size */
539 buf_size = rte_pktmbuf_data_room_size(mb_pool);
541 /* Remove headroom */
542 if (buf_size <= RTE_PKTMBUF_HEADROOM) {
544 "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
545 mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
548 buf_size -= RTE_PKTMBUF_HEADROOM;
550 /* Calculate guaranteed data start alignment */
551 buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
553 /* Reserve space for start alignment */
554 if (buf_aligned < nic_align_start) {
555 start_alignment = nic_align_start - buf_aligned;
556 if (buf_size <= start_alignment) {
558 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
560 rte_pktmbuf_data_room_size(mb_pool),
561 RTE_PKTMBUF_HEADROOM, start_alignment);
564 buf_aligned = nic_align_start;
565 buf_size -= start_alignment;
570 /* Make sure that end padding does not write beyond the buffer */
571 if (buf_aligned < nic_align_end) {
573 * Estimate space which can be lost. If guarnteed buffer
574 * size is odd, lost space is (nic_align_end - 1). More
575 * accurate formula is below.
577 end_padding_alignment = nic_align_end -
578 MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
579 if (buf_size <= end_padding_alignment) {
581 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
583 rte_pktmbuf_data_room_size(mb_pool),
584 RTE_PKTMBUF_HEADROOM, start_alignment,
585 end_padding_alignment);
588 buf_size -= end_padding_alignment;
591 * Start is aligned the same or better than end,
594 buf_size = P2ALIGN(buf_size, nic_align_end);
601 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
602 uint16_t nb_rx_desc, unsigned int socket_id,
603 const struct rte_eth_rxconf *rx_conf,
604 struct rte_mempool *mb_pool)
606 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
609 struct sfc_rxq_info *rxq_info;
610 unsigned int evq_index;
614 rc = sfc_rx_qcheck_conf(sa, nb_rx_desc, rx_conf);
618 buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
620 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
626 if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
627 !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
628 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
629 "object size is too small", sw_index);
630 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
631 "PDU size %u plus Rx prefix %u bytes",
632 sw_index, buf_size, (unsigned int)sa->port.pdu,
633 encp->enc_rx_prefix_size);
638 SFC_ASSERT(sw_index < sa->rxq_count);
639 rxq_info = &sa->rxq_info[sw_index];
641 SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
642 rxq_info->entries = nb_rx_desc;
644 sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
645 EFX_RXQ_TYPE_SCATTER : EFX_RXQ_TYPE_DEFAULT;
647 evq_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
649 rc = sfc_ev_qinit(sa, evq_index, rxq_info->entries, socket_id);
653 evq = sa->evq_info[evq_index].evq;
656 rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
661 rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
662 socket_id, &rxq->mem);
667 rxq->sw_desc = rte_calloc_socket("sfc-rxq-sw_desc", rxq_info->entries,
668 sizeof(*rxq->sw_desc),
669 RTE_CACHE_LINE_SIZE, socket_id);
670 if (rxq->sw_desc == NULL)
671 goto fail_desc_alloc;
675 rxq->ptr_mask = rxq_info->entries - 1;
676 rxq->refill_threshold = rx_conf->rx_free_thresh;
677 rxq->refill_mb_pool = mb_pool;
678 rxq->buf_size = buf_size;
679 rxq->hw_index = sw_index;
680 rxq->port_id = sa->eth_dev->data->port_id;
682 /* Cache limits required on datapath in RxQ structure */
683 rxq->batch_max = encp->enc_rx_batch_max;
684 rxq->prefix_size = encp->enc_rx_prefix_size;
686 rxq->state = SFC_RXQ_INITIALIZED;
689 rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
694 sfc_dma_free(sa, &rxq->mem);
700 sfc_ev_qfini(sa, evq_index);
703 rxq_info->entries = 0;
706 sfc_log_init(sa, "failed %d", rc);
711 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
713 struct sfc_rxq_info *rxq_info;
716 SFC_ASSERT(sw_index < sa->rxq_count);
718 rxq_info = &sa->rxq_info[sw_index];
721 SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
723 rxq_info->rxq = NULL;
724 rxq_info->entries = 0;
726 rte_free(rxq->sw_desc);
727 sfc_dma_free(sa, &rxq->mem);
732 sfc_rx_start(struct sfc_adapter *sa)
734 unsigned int sw_index;
737 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
739 rc = efx_rx_init(sa->nic);
743 for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
744 if ((!sa->rxq_info[sw_index].deferred_start ||
745 sa->rxq_info[sw_index].deferred_started)) {
746 rc = sfc_rx_qstart(sa, sw_index);
755 while (sw_index-- > 0)
756 sfc_rx_qstop(sa, sw_index);
758 efx_rx_fini(sa->nic);
761 sfc_log_init(sa, "failed %d", rc);
766 sfc_rx_stop(struct sfc_adapter *sa)
768 unsigned int sw_index;
770 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
772 sw_index = sa->rxq_count;
773 while (sw_index-- > 0) {
774 if (sa->rxq_info[sw_index].rxq != NULL)
775 sfc_rx_qstop(sa, sw_index);
778 efx_rx_fini(sa->nic);
782 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
784 struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
785 unsigned int max_entries;
787 max_entries = EFX_RXQ_MAXNDESCS;
788 SFC_ASSERT(rte_is_power_of_2(max_entries));
790 rxq_info->max_entries = max_entries;
796 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
800 switch (rxmode->mq_mode) {
802 /* No special checks are required */
805 sfc_err(sa, "Rx multi-queue mode %u not supported",
810 if (rxmode->header_split) {
811 sfc_err(sa, "Header split on Rx not supported");
815 if (rxmode->hw_vlan_filter) {
816 sfc_err(sa, "HW VLAN filtering not supported");
820 if (rxmode->hw_vlan_strip) {
821 sfc_err(sa, "HW VLAN stripping not supported");
825 if (rxmode->hw_vlan_extend) {
827 "Q-in-Q HW VLAN stripping not supported");
831 if (!rxmode->hw_strip_crc) {
833 "FCS stripping control not supported - always stripped");
834 rxmode->hw_strip_crc = 1;
837 if (rxmode->enable_lro) {
838 sfc_err(sa, "LRO not supported");
846 * Initialize Rx subsystem.
848 * Called at device configuration stage when number of receive queues is
849 * specified together with other device level receive configuration.
851 * It should be used to allocate NUMA-unaware resources.
854 sfc_rx_init(struct sfc_adapter *sa)
856 struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
857 unsigned int sw_index;
860 rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
862 goto fail_check_mode;
864 sa->rxq_count = sa->eth_dev->data->nb_rx_queues;
867 sa->rxq_info = rte_calloc_socket("sfc-rxqs", sa->rxq_count,
868 sizeof(struct sfc_rxq_info), 0,
870 if (sa->rxq_info == NULL)
871 goto fail_rxqs_alloc;
873 for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
874 rc = sfc_rx_qinit_info(sa, sw_index);
876 goto fail_rx_qinit_info;
882 rte_free(sa->rxq_info);
888 sfc_log_init(sa, "failed %d", rc);
893 * Shutdown Rx subsystem.
895 * Called at device close stage, for example, before device
896 * reconfiguration or shutdown.
899 sfc_rx_fini(struct sfc_adapter *sa)
901 unsigned int sw_index;
903 sw_index = sa->rxq_count;
904 while (sw_index-- > 0) {
905 if (sa->rxq_info[sw_index].rxq != NULL)
906 sfc_rx_qfini(sa, sw_index);
909 rte_free(sa->rxq_info);