2 * Copyright (c) 2016 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <rte_mempool.h>
38 #include "sfc_tweak.h"
41 * Maximum number of Rx queue flush attempt in the case of failure or
44 #define SFC_RX_QFLUSH_ATTEMPTS (3)
47 * Time to wait between event queue polling attempts when waiting for Rx
48 * queue flush done or failed events.
50 #define SFC_RX_QFLUSH_POLL_WAIT_MS (1)
53 * Maximum number of event queue polling attempts when waiting for Rx queue
54 * flush done or failed events. It defines Rx queue flush attempt timeout
55 * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
57 #define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000)
60 sfc_rx_qflush_done(struct sfc_rxq *rxq)
62 rxq->state |= SFC_RXQ_FLUSHED;
63 rxq->state &= ~SFC_RXQ_FLUSHING;
67 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
69 rxq->state |= SFC_RXQ_FLUSH_FAILED;
70 rxq->state &= ~SFC_RXQ_FLUSHING;
74 sfc_rx_qrefill(struct sfc_rxq *rxq)
76 unsigned int free_space;
78 void *objs[SFC_RX_REFILL_BULK];
79 efsys_dma_addr_t addr[RTE_DIM(objs)];
80 unsigned int added = rxq->added;
83 struct sfc_rx_sw_desc *rxd;
85 uint8_t port_id = rxq->port_id;
87 free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
88 (added - rxq->completed);
89 bulks = free_space / RTE_DIM(objs);
91 id = added & rxq->ptr_mask;
93 if (rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
96 * It is hardly a safe way to increment counter
97 * from different contexts, but all PMDs do it.
99 rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
104 for (i = 0; i < RTE_DIM(objs);
105 ++i, id = (id + 1) & rxq->ptr_mask) {
108 rxd = &rxq->sw_desc[id];
111 rte_mbuf_refcnt_set(m, 1);
112 m->data_off = RTE_PKTMBUF_HEADROOM;
117 addr[i] = rte_pktmbuf_mtophys(m);
120 efx_rx_qpost(rxq->common, addr, rxq->buf_size,
121 RTE_DIM(objs), rxq->completed, added);
122 added += RTE_DIM(objs);
125 /* Push doorbell if something is posted */
126 if (rxq->added != added) {
128 efx_rx_qpush(rxq->common, added, &rxq->pushed);
133 sfc_rx_qpurge(struct sfc_rxq *rxq)
136 struct sfc_rx_sw_desc *rxd;
138 for (i = rxq->completed; i != rxq->added; ++i) {
139 rxd = &rxq->sw_desc[i & rxq->ptr_mask];
140 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
146 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
149 unsigned int retry_count;
150 unsigned int wait_count;
152 rxq = sa->rxq_info[sw_index].rxq;
153 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
156 * Retry Rx queue flushing in the case of flush failed or
157 * timeout. In the worst case it can delay for 6 seconds.
159 for (retry_count = 0;
160 ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
161 (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
163 if (efx_rx_qflush(rxq->common) != 0) {
164 rxq->state |= SFC_RXQ_FLUSH_FAILED;
167 rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
168 rxq->state |= SFC_RXQ_FLUSHING;
171 * Wait for Rx queue flush done or failed event at least
172 * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
173 * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
174 * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
178 rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
179 sfc_ev_qpoll(rxq->evq);
180 } while ((rxq->state & SFC_RXQ_FLUSHING) &&
181 (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
183 if (rxq->state & SFC_RXQ_FLUSHING)
184 sfc_err(sa, "RxQ %u flush timed out", sw_index);
186 if (rxq->state & SFC_RXQ_FLUSH_FAILED)
187 sfc_err(sa, "RxQ %u flush failed", sw_index);
189 if (rxq->state & SFC_RXQ_FLUSHED)
190 sfc_info(sa, "RxQ %u flushed", sw_index);
197 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
199 struct sfc_rxq_info *rxq_info;
204 sfc_log_init(sa, "sw_index=%u", sw_index);
206 SFC_ASSERT(sw_index < sa->rxq_count);
208 rxq_info = &sa->rxq_info[sw_index];
210 SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
214 rc = sfc_ev_qstart(sa, evq->evq_index);
218 rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
219 &rxq->mem, rxq_info->entries,
220 0 /* not used on EF10 */, evq->common,
223 goto fail_rx_qcreate;
225 efx_rx_qenable(rxq->common);
227 rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
229 rxq->state |= SFC_RXQ_STARTED;
234 rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common,
237 goto fail_mac_filter_default_rxq_set;
240 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
241 sa->eth_dev->data->rx_queue_state[sw_index] =
242 RTE_ETH_QUEUE_STATE_STARTED;
246 fail_mac_filter_default_rxq_set:
247 sfc_rx_qflush(sa, sw_index);
250 sfc_ev_qstop(sa, evq->evq_index);
257 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
259 struct sfc_rxq_info *rxq_info;
262 sfc_log_init(sa, "sw_index=%u", sw_index);
264 SFC_ASSERT(sw_index < sa->rxq_count);
266 rxq_info = &sa->rxq_info[sw_index];
268 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
270 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
271 sa->eth_dev->data->rx_queue_state[sw_index] =
272 RTE_ETH_QUEUE_STATE_STOPPED;
275 efx_mac_filter_default_rxq_clear(sa->nic);
277 sfc_rx_qflush(sa, sw_index);
279 rxq->state = SFC_RXQ_INITIALIZED;
281 efx_rx_qdestroy(rxq->common);
283 sfc_ev_qstop(sa, rxq->evq->evq_index);
287 sfc_rx_qcheck_conf(struct sfc_adapter *sa,
288 const struct rte_eth_rxconf *rx_conf)
292 if (rx_conf->rx_thresh.pthresh != 0 ||
293 rx_conf->rx_thresh.hthresh != 0 ||
294 rx_conf->rx_thresh.wthresh != 0) {
296 "RxQ prefetch/host/writeback thresholds are not supported");
300 if (rx_conf->rx_free_thresh != 0) {
301 sfc_err(sa, "RxQ free threshold is not supported");
305 if (rx_conf->rx_drop_en == 0) {
306 sfc_err(sa, "RxQ drop disable is not supported");
310 if (rx_conf->rx_deferred_start != 0) {
311 sfc_err(sa, "RxQ deferred start is not supported");
319 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
324 /* The mbuf object itself is always cache line aligned */
325 order = rte_bsf32(RTE_CACHE_LINE_SIZE);
327 /* Data offset from mbuf object start */
328 data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
329 RTE_PKTMBUF_HEADROOM;
331 order = MIN(order, rte_bsf32(data_off));
333 return 1u << (order - 1);
337 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
339 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
340 const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
341 const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
343 unsigned int buf_aligned;
344 unsigned int start_alignment;
345 unsigned int end_padding_alignment;
347 /* Below it is assumed that both alignments are power of 2 */
348 SFC_ASSERT(rte_is_power_of_2(nic_align_start));
349 SFC_ASSERT(rte_is_power_of_2(nic_align_end));
352 * mbuf is always cache line aligned, double-check
353 * that it meets rx buffer start alignment requirements.
356 /* Start from mbuf pool data room size */
357 buf_size = rte_pktmbuf_data_room_size(mb_pool);
359 /* Remove headroom */
360 if (buf_size <= RTE_PKTMBUF_HEADROOM) {
362 "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
363 mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
366 buf_size -= RTE_PKTMBUF_HEADROOM;
368 /* Calculate guaranteed data start alignment */
369 buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
371 /* Reserve space for start alignment */
372 if (buf_aligned < nic_align_start) {
373 start_alignment = nic_align_start - buf_aligned;
374 if (buf_size <= start_alignment) {
376 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
378 rte_pktmbuf_data_room_size(mb_pool),
379 RTE_PKTMBUF_HEADROOM, start_alignment);
382 buf_aligned = nic_align_start;
383 buf_size -= start_alignment;
388 /* Make sure that end padding does not write beyond the buffer */
389 if (buf_aligned < nic_align_end) {
391 * Estimate space which can be lost. If guarnteed buffer
392 * size is odd, lost space is (nic_align_end - 1). More
393 * accurate formula is below.
395 end_padding_alignment = nic_align_end -
396 MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
397 if (buf_size <= end_padding_alignment) {
399 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
401 rte_pktmbuf_data_room_size(mb_pool),
402 RTE_PKTMBUF_HEADROOM, start_alignment,
403 end_padding_alignment);
406 buf_size -= end_padding_alignment;
409 * Start is aligned the same or better than end,
412 buf_size = P2ALIGN(buf_size, nic_align_end);
419 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
420 uint16_t nb_rx_desc, unsigned int socket_id,
421 const struct rte_eth_rxconf *rx_conf,
422 struct rte_mempool *mb_pool)
424 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
427 struct sfc_rxq_info *rxq_info;
428 unsigned int evq_index;
432 rc = sfc_rx_qcheck_conf(sa, rx_conf);
436 buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
438 sfc_err(sa, "RxQ %u mbuf pool object size is too small",
444 if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
445 !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
446 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
447 "object size is too small", sw_index);
448 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
449 "PDU size %u plus Rx prefix %u bytes",
450 sw_index, buf_size, (unsigned int)sa->port.pdu,
451 encp->enc_rx_prefix_size);
456 SFC_ASSERT(sw_index < sa->rxq_count);
457 rxq_info = &sa->rxq_info[sw_index];
459 SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
460 rxq_info->entries = nb_rx_desc;
461 rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
463 evq_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index);
465 rc = sfc_ev_qinit(sa, evq_index, rxq_info->entries, socket_id);
469 evq = sa->evq_info[evq_index].evq;
472 rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
477 rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
478 socket_id, &rxq->mem);
483 rxq->sw_desc = rte_calloc_socket("sfc-rxq-sw_desc", rxq_info->entries,
484 sizeof(*rxq->sw_desc),
485 RTE_CACHE_LINE_SIZE, socket_id);
486 if (rxq->sw_desc == NULL)
487 goto fail_desc_alloc;
491 rxq->ptr_mask = rxq_info->entries - 1;
492 rxq->refill_mb_pool = mb_pool;
493 rxq->buf_size = buf_size;
494 rxq->hw_index = sw_index;
495 rxq->port_id = sa->eth_dev->data->port_id;
497 rxq->state = SFC_RXQ_INITIALIZED;
504 sfc_dma_free(sa, &rxq->mem);
510 sfc_ev_qfini(sa, evq_index);
513 rxq_info->entries = 0;
516 sfc_log_init(sa, "failed %d", rc);
521 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
523 struct sfc_rxq_info *rxq_info;
526 SFC_ASSERT(sw_index < sa->rxq_count);
528 rxq_info = &sa->rxq_info[sw_index];
531 SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
533 rxq_info->rxq = NULL;
534 rxq_info->entries = 0;
536 rte_free(rxq->sw_desc);
537 sfc_dma_free(sa, &rxq->mem);
542 sfc_rx_start(struct sfc_adapter *sa)
544 unsigned int sw_index;
547 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
549 rc = efx_rx_init(sa->nic);
553 for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
554 rc = sfc_rx_qstart(sa, sw_index);
562 while (sw_index-- > 0)
563 sfc_rx_qstop(sa, sw_index);
565 efx_rx_fini(sa->nic);
568 sfc_log_init(sa, "failed %d", rc);
573 sfc_rx_stop(struct sfc_adapter *sa)
575 unsigned int sw_index;
577 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
579 sw_index = sa->rxq_count;
580 while (sw_index-- > 0) {
581 if (sa->rxq_info[sw_index].rxq != NULL)
582 sfc_rx_qstop(sa, sw_index);
585 efx_rx_fini(sa->nic);
589 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
591 struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
592 unsigned int max_entries;
594 max_entries = EFX_RXQ_MAXNDESCS;
595 SFC_ASSERT(rte_is_power_of_2(max_entries));
597 rxq_info->max_entries = max_entries;
603 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
607 switch (rxmode->mq_mode) {
609 /* No special checks are required */
612 sfc_err(sa, "Rx multi-queue mode %u not supported",
617 if (rxmode->header_split) {
618 sfc_err(sa, "Header split on Rx not supported");
622 if (rxmode->hw_vlan_filter) {
623 sfc_err(sa, "HW VLAN filtering not supported");
627 if (rxmode->hw_vlan_strip) {
628 sfc_err(sa, "HW VLAN stripping not supported");
632 if (rxmode->hw_vlan_extend) {
634 "Q-in-Q HW VLAN stripping not supported");
638 if (!rxmode->hw_strip_crc) {
640 "FCS stripping control not supported - always stripped");
641 rxmode->hw_strip_crc = 1;
644 if (rxmode->enable_scatter) {
645 sfc_err(sa, "Scatter on Rx not supported");
649 if (rxmode->enable_lro) {
650 sfc_err(sa, "LRO not supported");
658 * Initialize Rx subsystem.
660 * Called at device configuration stage when number of receive queues is
661 * specified together with other device level receive configuration.
663 * It should be used to allocate NUMA-unaware resources.
666 sfc_rx_init(struct sfc_adapter *sa)
668 struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
669 unsigned int sw_index;
672 rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
674 goto fail_check_mode;
676 sa->rxq_count = sa->eth_dev->data->nb_rx_queues;
679 sa->rxq_info = rte_calloc_socket("sfc-rxqs", sa->rxq_count,
680 sizeof(struct sfc_rxq_info), 0,
682 if (sa->rxq_info == NULL)
683 goto fail_rxqs_alloc;
685 for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
686 rc = sfc_rx_qinit_info(sa, sw_index);
688 goto fail_rx_qinit_info;
694 rte_free(sa->rxq_info);
700 sfc_log_init(sa, "failed %d", rc);
705 * Shutdown Rx subsystem.
707 * Called at device close stage, for example, before device
708 * reconfiguration or shutdown.
711 sfc_rx_fini(struct sfc_adapter *sa)
713 unsigned int sw_index;
715 sw_index = sa->rxq_count;
716 while (sw_index-- > 0) {
717 if (sa->rxq_info[sw_index].rxq != NULL)
718 sfc_rx_qfini(sa, sw_index);
721 rte_free(sa->rxq_info);