2 * Copyright (c) 2016 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "sfc_debug.h"
35 #include "sfc_tweak.h"
38 * Maximum number of TX queue flush attempts in case of
39 * failure or flush timeout
41 #define SFC_TX_QFLUSH_ATTEMPTS (3)
44 * Time to wait between event queue polling attempts when waiting for TX
45 * queue flush done or flush failed events
47 #define SFC_TX_QFLUSH_POLL_WAIT_MS (1)
50 * Maximum number of event queue polling attempts when waiting for TX queue
51 * flush done or flush failed events; it defines TX queue flush attempt timeout
52 * together with SFC_TX_QFLUSH_POLL_WAIT_MS
54 #define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000)
57 sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc,
58 const struct rte_eth_txconf *tx_conf)
60 unsigned int flags = tx_conf->txq_flags;
63 if (tx_conf->tx_rs_thresh != 0) {
64 sfc_err(sa, "RS bit in transmit descriptor is not supported");
68 if (tx_conf->tx_free_thresh > EFX_TXQ_LIMIT(nb_tx_desc)) {
70 "TxQ free threshold too large: %u vs maximum %u",
71 tx_conf->tx_free_thresh, EFX_TXQ_LIMIT(nb_tx_desc));
75 if (tx_conf->tx_thresh.pthresh != 0 ||
76 tx_conf->tx_thresh.hthresh != 0 ||
77 tx_conf->tx_thresh.wthresh != 0) {
79 "prefetch/host/writeback thresholds are not supported");
83 if ((flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) {
84 sfc_err(sa, "VLAN offload is not supported");
88 if ((flags & ETH_TXQ_FLAGS_NOXSUMSCTP) == 0) {
89 sfc_err(sa, "SCTP offload is not supported");
93 /* We either perform both TCP and UDP offload, or no offload at all */
94 if (((flags & ETH_TXQ_FLAGS_NOXSUMTCP) == 0) !=
95 ((flags & ETH_TXQ_FLAGS_NOXSUMUDP) == 0)) {
96 sfc_err(sa, "TCP and UDP offloads can't be set independently");
104 sfc_tx_qflush_done(struct sfc_txq *txq)
106 txq->state |= SFC_TXQ_FLUSHED;
107 txq->state &= ~SFC_TXQ_FLUSHING;
111 sfc_tx_reap(struct sfc_txq *txq)
113 unsigned int completed;
116 sfc_ev_qpoll(txq->evq);
118 for (completed = txq->completed;
119 completed != txq->pending; completed++) {
120 struct sfc_tx_sw_desc *txd;
122 txd = &txq->sw_ring[completed & txq->ptr_mask];
124 if (txd->mbuf != NULL) {
125 rte_pktmbuf_free(txd->mbuf);
130 txq->completed = completed;
134 sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
135 uint16_t nb_tx_desc, unsigned int socket_id,
136 const struct rte_eth_txconf *tx_conf)
138 struct sfc_txq_info *txq_info;
141 unsigned int evq_index = sfc_evq_index_by_txq_sw_index(sa, sw_index);
144 sfc_log_init(sa, "TxQ = %u", sw_index);
146 rc = sfc_tx_qcheck_conf(sa, nb_tx_desc, tx_conf);
150 SFC_ASSERT(sw_index < sa->txq_count);
151 txq_info = &sa->txq_info[sw_index];
153 SFC_ASSERT(nb_tx_desc <= sa->txq_max_entries);
154 txq_info->entries = nb_tx_desc;
156 rc = sfc_ev_qinit(sa, evq_index, txq_info->entries, socket_id);
160 evq = sa->evq_info[evq_index].evq;
163 txq = rte_zmalloc_socket("sfc-txq", sizeof(*txq), 0, socket_id);
167 rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
168 socket_id, &txq->mem);
173 txq->pend_desc = rte_calloc_socket("sfc-txq-pend-desc",
174 EFX_TXQ_LIMIT(txq_info->entries),
175 sizeof(efx_desc_t), 0, socket_id);
176 if (txq->pend_desc == NULL)
177 goto fail_pend_desc_alloc;
180 txq->sw_ring = rte_calloc_socket("sfc-txq-desc", txq_info->entries,
181 sizeof(*txq->sw_ring), 0, socket_id);
182 if (txq->sw_ring == NULL)
183 goto fail_desc_alloc;
185 txq->state = SFC_TXQ_INITIALIZED;
186 txq->ptr_mask = txq_info->entries - 1;
187 txq->free_thresh = (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
188 SFC_TX_DEFAULT_FREE_THRESH;
189 txq->hw_index = sw_index;
190 txq->flags = tx_conf->txq_flags;
196 txq_info->deferred_start = (tx_conf->tx_deferred_start != 0);
201 rte_free(txq->pend_desc);
203 fail_pend_desc_alloc:
204 sfc_dma_free(sa, &txq->mem);
210 sfc_ev_qfini(sa, evq_index);
213 txq_info->entries = 0;
216 sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc);
221 sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
223 struct sfc_txq_info *txq_info;
226 sfc_log_init(sa, "TxQ = %u", sw_index);
228 SFC_ASSERT(sw_index < sa->txq_count);
229 txq_info = &sa->txq_info[sw_index];
232 SFC_ASSERT(txq != NULL);
233 SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
235 txq_info->txq = NULL;
236 txq_info->entries = 0;
238 rte_free(txq->sw_ring);
239 rte_free(txq->pend_desc);
240 sfc_dma_free(sa, &txq->mem);
245 sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
247 sfc_log_init(sa, "TxQ = %u", sw_index);
253 sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
257 switch (txmode->mq_mode) {
261 sfc_err(sa, "Tx multi-queue mode %u not supported",
267 * These features are claimed to be i40e-specific,
268 * but it does make sense to double-check their absence
270 if (txmode->hw_vlan_reject_tagged) {
271 sfc_err(sa, "Rejecting tagged packets not supported");
275 if (txmode->hw_vlan_reject_untagged) {
276 sfc_err(sa, "Rejecting untagged packets not supported");
280 if (txmode->hw_vlan_insert_pvid) {
281 sfc_err(sa, "Port-based VLAN insertion not supported");
289 sfc_tx_init(struct sfc_adapter *sa)
291 const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
292 unsigned int sw_index;
295 rc = sfc_tx_check_mode(sa, &dev_conf->txmode);
297 goto fail_check_mode;
299 sa->txq_count = sa->eth_dev->data->nb_tx_queues;
301 sa->txq_info = rte_calloc_socket("sfc-txqs", sa->txq_count,
302 sizeof(sa->txq_info[0]), 0,
304 if (sa->txq_info == NULL)
305 goto fail_txqs_alloc;
307 for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
308 rc = sfc_tx_qinit_info(sa, sw_index);
310 goto fail_tx_qinit_info;
316 rte_free(sa->txq_info);
323 sfc_log_init(sa, "failed (rc = %d)", rc);
328 sfc_tx_fini(struct sfc_adapter *sa)
332 sw_index = sa->txq_count;
333 while (--sw_index >= 0) {
334 if (sa->txq_info[sw_index].txq != NULL)
335 sfc_tx_qfini(sa, sw_index);
338 rte_free(sa->txq_info);
344 sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
346 struct rte_eth_dev_data *dev_data;
347 struct sfc_txq_info *txq_info;
351 unsigned int desc_index;
354 sfc_log_init(sa, "TxQ = %u", sw_index);
356 SFC_ASSERT(sw_index < sa->txq_count);
357 txq_info = &sa->txq_info[sw_index];
361 SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
365 rc = sfc_ev_qstart(sa, evq->evq_index);
370 * It seems that DPDK has no controls regarding IPv4 offloads,
371 * hence, we always enable it here
373 if ((txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP) ||
374 (txq->flags & ETH_TXQ_FLAGS_NOXSUMUDP))
375 flags = EFX_TXQ_CKSUM_IPV4;
377 flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP;
379 rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem,
380 txq_info->entries, 0 /* not used on EF10 */,
382 &txq->common, &desc_index);
384 goto fail_tx_qcreate;
386 txq->added = txq->pending = txq->completed = desc_index;
388 efx_tx_qenable(txq->common);
390 txq->state |= (SFC_TXQ_STARTED | SFC_TXQ_RUNNING);
393 * It seems to be used by DPDK for debug purposes only ('rte_ether')
395 dev_data = sa->eth_dev->data;
396 dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED;
401 sfc_ev_qstop(sa, evq->evq_index);
408 sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
410 struct rte_eth_dev_data *dev_data;
411 struct sfc_txq_info *txq_info;
413 unsigned int retry_count;
414 unsigned int wait_count;
417 sfc_log_init(sa, "TxQ = %u", sw_index);
419 SFC_ASSERT(sw_index < sa->txq_count);
420 txq_info = &sa->txq_info[sw_index];
424 if (txq->state == SFC_TXQ_INITIALIZED)
427 SFC_ASSERT(txq->state & SFC_TXQ_STARTED);
429 txq->state &= ~SFC_TXQ_RUNNING;
432 * Retry TX queue flushing in case of flush failed or
433 * timeout; in the worst case it can delay for 6 seconds
435 for (retry_count = 0;
436 ((txq->state & SFC_TXQ_FLUSHED) == 0) &&
437 (retry_count < SFC_TX_QFLUSH_ATTEMPTS);
439 if (efx_tx_qflush(txq->common) != 0) {
440 txq->state |= SFC_TXQ_FLUSHING;
445 * Wait for TX queue flush done or flush failed event at least
446 * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more
447 * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied
448 * by SFC_TX_QFLUSH_POLL_ATTEMPTS)
452 rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS);
453 sfc_ev_qpoll(txq->evq);
454 } while ((txq->state & SFC_TXQ_FLUSHING) &&
455 wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS);
457 if (txq->state & SFC_TXQ_FLUSHING)
458 sfc_err(sa, "TxQ %u flush timed out", sw_index);
460 if (txq->state & SFC_TXQ_FLUSHED)
461 sfc_info(sa, "TxQ %u flushed", sw_index);
466 for (txds = 0; txds < txq_info->entries; txds++) {
467 if (txq->sw_ring[txds].mbuf != NULL) {
468 rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
469 txq->sw_ring[txds].mbuf = NULL;
473 txq->state = SFC_TXQ_INITIALIZED;
475 efx_tx_qdestroy(txq->common);
477 sfc_ev_qstop(sa, txq->evq->evq_index);
480 * It seems to be used by DPDK for debug purposes only ('rte_ether')
482 dev_data = sa->eth_dev->data;
483 dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED;
487 sfc_tx_start(struct sfc_adapter *sa)
489 unsigned int sw_index;
492 sfc_log_init(sa, "txq_count = %u", sa->txq_count);
494 rc = efx_tx_init(sa->nic);
496 goto fail_efx_tx_init;
498 for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
499 if (!(sa->txq_info[sw_index].deferred_start) ||
500 sa->txq_info[sw_index].deferred_started) {
501 rc = sfc_tx_qstart(sa, sw_index);
510 while (sw_index-- > 0)
511 sfc_tx_qstop(sa, sw_index);
513 efx_tx_fini(sa->nic);
516 sfc_log_init(sa, "failed (rc = %d)", rc);
521 sfc_tx_stop(struct sfc_adapter *sa)
523 unsigned int sw_index;
525 sfc_log_init(sa, "txq_count = %u", sa->txq_count);
527 sw_index = sa->txq_count;
528 while (sw_index-- > 0) {
529 if (sa->txq_info[sw_index].txq != NULL)
530 sfc_tx_qstop(sa, sw_index);
533 efx_tx_fini(sa->nic);
537 sfc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
539 struct sfc_txq *txq = (struct sfc_txq *)tx_queue;
540 unsigned int added = txq->added;
541 unsigned int pushed = added;
542 unsigned int pkts_sent = 0;
543 efx_desc_t *pend = &txq->pend_desc[0];
544 const unsigned int hard_max_fill = EFX_TXQ_LIMIT(txq->ptr_mask + 1);
545 const unsigned int soft_max_fill = hard_max_fill - txq->free_thresh;
546 unsigned int fill_level = added - txq->completed;
549 struct rte_mbuf **pktp;
551 if (unlikely((txq->state & SFC_TXQ_RUNNING) == 0))
555 * If insufficient space for a single packet is present,
556 * we should reap; otherwise, we shouldn't do that all the time
557 * to avoid latency increase
559 reap_done = (fill_level > soft_max_fill);
564 * Recalculate fill level since 'txq->completed'
565 * might have changed on reap
567 fill_level = added - txq->completed;
570 for (pkts_sent = 0, pktp = &tx_pkts[0];
571 (pkts_sent < nb_pkts) && (fill_level <= soft_max_fill);
572 pkts_sent++, pktp++) {
573 struct rte_mbuf *m_seg = *pktp;
574 size_t pkt_len = m_seg->pkt_len;
575 unsigned int pkt_descs = 0;
577 for (; m_seg != NULL; m_seg = m_seg->next) {
578 efsys_dma_addr_t next_frag;
581 seg_len = m_seg->data_len;
582 next_frag = rte_mbuf_data_dma_addr(m_seg);
585 efsys_dma_addr_t frag_addr = next_frag;
588 next_frag = RTE_ALIGN(frag_addr + 1,
589 SFC_TX_SEG_BOUNDARY);
590 frag_len = MIN(next_frag - frag_addr, seg_len);
594 efx_tx_qdesc_dma_create(txq->common,
600 } while (seg_len != 0);
605 fill_level += pkt_descs;
606 if (unlikely(fill_level > hard_max_fill)) {
608 * Our estimation for maximum number of descriptors
609 * required to send a packet seems to be wrong.
610 * Try to reap (if we haven't yet).
615 fill_level = added - txq->completed;
616 if (fill_level > hard_max_fill) {
626 /* Assign mbuf to the last used desc */
627 txq->sw_ring[(added - 1) & txq->ptr_mask].mbuf = *pktp;
630 if (likely(pkts_sent > 0)) {
631 rc = efx_tx_qdesc_post(txq->common, txq->pend_desc,
632 pend - &txq->pend_desc[0],
633 txq->completed, &txq->added);
636 if (likely(pushed != txq->added))
637 efx_tx_qpush(txq->common, txq->added, pushed);
640 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE