2 * Copyright (c) 2016 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "sfc_debug.h"
35 #include "sfc_tweak.h"
38 * Maximum number of TX queue flush attempts in case of
39 * failure or flush timeout
41 #define SFC_TX_QFLUSH_ATTEMPTS (3)
44 * Time to wait between event queue polling attempts when waiting for TX
45 * queue flush done or flush failed events
47 #define SFC_TX_QFLUSH_POLL_WAIT_MS (1)
50 * Maximum number of event queue polling attempts when waiting for TX queue
51 * flush done or flush failed events; it defines TX queue flush attempt timeout
52 * together with SFC_TX_QFLUSH_POLL_WAIT_MS
54 #define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000)
57 sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc,
58 const struct rte_eth_txconf *tx_conf)
60 unsigned int flags = tx_conf->txq_flags;
61 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
64 if (tx_conf->tx_rs_thresh != 0) {
65 sfc_err(sa, "RS bit in transmit descriptor is not supported");
69 if (tx_conf->tx_free_thresh > EFX_TXQ_LIMIT(nb_tx_desc)) {
71 "TxQ free threshold too large: %u vs maximum %u",
72 tx_conf->tx_free_thresh, EFX_TXQ_LIMIT(nb_tx_desc));
76 if (tx_conf->tx_thresh.pthresh != 0 ||
77 tx_conf->tx_thresh.hthresh != 0 ||
78 tx_conf->tx_thresh.wthresh != 0) {
80 "prefetch/host/writeback thresholds are not supported");
84 if (!encp->enc_hw_tx_insert_vlan_enabled &&
85 (flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) {
86 sfc_err(sa, "VLAN offload is not supported");
90 if ((flags & ETH_TXQ_FLAGS_NOXSUMSCTP) == 0) {
91 sfc_err(sa, "SCTP offload is not supported");
95 /* We either perform both TCP and UDP offload, or no offload at all */
96 if (((flags & ETH_TXQ_FLAGS_NOXSUMTCP) == 0) !=
97 ((flags & ETH_TXQ_FLAGS_NOXSUMUDP) == 0)) {
98 sfc_err(sa, "TCP and UDP offloads can't be set independently");
106 sfc_tx_qflush_done(struct sfc_txq *txq)
108 txq->state |= SFC_TXQ_FLUSHED;
109 txq->state &= ~SFC_TXQ_FLUSHING;
113 sfc_tx_reap(struct sfc_txq *txq)
115 unsigned int completed;
118 sfc_ev_qpoll(txq->evq);
120 for (completed = txq->completed;
121 completed != txq->pending; completed++) {
122 struct sfc_tx_sw_desc *txd;
124 txd = &txq->sw_ring[completed & txq->ptr_mask];
126 if (txd->mbuf != NULL) {
127 rte_pktmbuf_free(txd->mbuf);
132 txq->completed = completed;
136 sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
137 uint16_t nb_tx_desc, unsigned int socket_id,
138 const struct rte_eth_txconf *tx_conf)
140 struct sfc_txq_info *txq_info;
143 unsigned int evq_index = sfc_evq_index_by_txq_sw_index(sa, sw_index);
146 sfc_log_init(sa, "TxQ = %u", sw_index);
148 rc = sfc_tx_qcheck_conf(sa, nb_tx_desc, tx_conf);
152 SFC_ASSERT(sw_index < sa->txq_count);
153 txq_info = &sa->txq_info[sw_index];
155 SFC_ASSERT(nb_tx_desc <= sa->txq_max_entries);
156 txq_info->entries = nb_tx_desc;
158 rc = sfc_ev_qinit(sa, evq_index, txq_info->entries, socket_id);
162 evq = sa->evq_info[evq_index].evq;
165 txq = rte_zmalloc_socket("sfc-txq", sizeof(*txq), 0, socket_id);
169 rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
170 socket_id, &txq->mem);
175 txq->pend_desc = rte_calloc_socket("sfc-txq-pend-desc",
176 EFX_TXQ_LIMIT(txq_info->entries),
177 sizeof(efx_desc_t), 0, socket_id);
178 if (txq->pend_desc == NULL)
179 goto fail_pend_desc_alloc;
182 txq->sw_ring = rte_calloc_socket("sfc-txq-desc", txq_info->entries,
183 sizeof(*txq->sw_ring), 0, socket_id);
184 if (txq->sw_ring == NULL)
185 goto fail_desc_alloc;
187 txq->state = SFC_TXQ_INITIALIZED;
188 txq->ptr_mask = txq_info->entries - 1;
189 txq->free_thresh = (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
190 SFC_TX_DEFAULT_FREE_THRESH;
191 txq->hw_index = sw_index;
192 txq->flags = tx_conf->txq_flags;
198 txq_info->deferred_start = (tx_conf->tx_deferred_start != 0);
203 rte_free(txq->pend_desc);
205 fail_pend_desc_alloc:
206 sfc_dma_free(sa, &txq->mem);
212 sfc_ev_qfini(sa, evq_index);
215 txq_info->entries = 0;
218 sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc);
223 sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
225 struct sfc_txq_info *txq_info;
228 sfc_log_init(sa, "TxQ = %u", sw_index);
230 SFC_ASSERT(sw_index < sa->txq_count);
231 txq_info = &sa->txq_info[sw_index];
234 SFC_ASSERT(txq != NULL);
235 SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
237 txq_info->txq = NULL;
238 txq_info->entries = 0;
240 rte_free(txq->sw_ring);
241 rte_free(txq->pend_desc);
242 sfc_dma_free(sa, &txq->mem);
247 sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
249 sfc_log_init(sa, "TxQ = %u", sw_index);
255 sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
259 switch (txmode->mq_mode) {
263 sfc_err(sa, "Tx multi-queue mode %u not supported",
269 * These features are claimed to be i40e-specific,
270 * but it does make sense to double-check their absence
272 if (txmode->hw_vlan_reject_tagged) {
273 sfc_err(sa, "Rejecting tagged packets not supported");
277 if (txmode->hw_vlan_reject_untagged) {
278 sfc_err(sa, "Rejecting untagged packets not supported");
282 if (txmode->hw_vlan_insert_pvid) {
283 sfc_err(sa, "Port-based VLAN insertion not supported");
291 sfc_tx_init(struct sfc_adapter *sa)
293 const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
294 unsigned int sw_index;
297 rc = sfc_tx_check_mode(sa, &dev_conf->txmode);
299 goto fail_check_mode;
301 sa->txq_count = sa->eth_dev->data->nb_tx_queues;
303 sa->txq_info = rte_calloc_socket("sfc-txqs", sa->txq_count,
304 sizeof(sa->txq_info[0]), 0,
306 if (sa->txq_info == NULL)
307 goto fail_txqs_alloc;
309 for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
310 rc = sfc_tx_qinit_info(sa, sw_index);
312 goto fail_tx_qinit_info;
318 rte_free(sa->txq_info);
325 sfc_log_init(sa, "failed (rc = %d)", rc);
330 sfc_tx_fini(struct sfc_adapter *sa)
334 sw_index = sa->txq_count;
335 while (--sw_index >= 0) {
336 if (sa->txq_info[sw_index].txq != NULL)
337 sfc_tx_qfini(sa, sw_index);
340 rte_free(sa->txq_info);
346 sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
348 struct rte_eth_dev_data *dev_data;
349 struct sfc_txq_info *txq_info;
353 unsigned int desc_index;
356 sfc_log_init(sa, "TxQ = %u", sw_index);
358 SFC_ASSERT(sw_index < sa->txq_count);
359 txq_info = &sa->txq_info[sw_index];
363 SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
367 rc = sfc_ev_qstart(sa, evq->evq_index);
372 * It seems that DPDK has no controls regarding IPv4 offloads,
373 * hence, we always enable it here
375 if ((txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP) ||
376 (txq->flags & ETH_TXQ_FLAGS_NOXSUMUDP))
377 flags = EFX_TXQ_CKSUM_IPV4;
379 flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP;
381 rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem,
382 txq_info->entries, 0 /* not used on EF10 */,
384 &txq->common, &desc_index);
386 goto fail_tx_qcreate;
388 txq->added = txq->pending = txq->completed = desc_index;
389 txq->hw_vlan_tci = 0;
391 efx_tx_qenable(txq->common);
393 txq->state |= (SFC_TXQ_STARTED | SFC_TXQ_RUNNING);
396 * It seems to be used by DPDK for debug purposes only ('rte_ether')
398 dev_data = sa->eth_dev->data;
399 dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED;
404 sfc_ev_qstop(sa, evq->evq_index);
411 sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
413 struct rte_eth_dev_data *dev_data;
414 struct sfc_txq_info *txq_info;
416 unsigned int retry_count;
417 unsigned int wait_count;
420 sfc_log_init(sa, "TxQ = %u", sw_index);
422 SFC_ASSERT(sw_index < sa->txq_count);
423 txq_info = &sa->txq_info[sw_index];
427 if (txq->state == SFC_TXQ_INITIALIZED)
430 SFC_ASSERT(txq->state & SFC_TXQ_STARTED);
432 txq->state &= ~SFC_TXQ_RUNNING;
435 * Retry TX queue flushing in case of flush failed or
436 * timeout; in the worst case it can delay for 6 seconds
438 for (retry_count = 0;
439 ((txq->state & SFC_TXQ_FLUSHED) == 0) &&
440 (retry_count < SFC_TX_QFLUSH_ATTEMPTS);
442 if (efx_tx_qflush(txq->common) != 0) {
443 txq->state |= SFC_TXQ_FLUSHING;
448 * Wait for TX queue flush done or flush failed event at least
449 * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more
450 * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied
451 * by SFC_TX_QFLUSH_POLL_ATTEMPTS)
455 rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS);
456 sfc_ev_qpoll(txq->evq);
457 } while ((txq->state & SFC_TXQ_FLUSHING) &&
458 wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS);
460 if (txq->state & SFC_TXQ_FLUSHING)
461 sfc_err(sa, "TxQ %u flush timed out", sw_index);
463 if (txq->state & SFC_TXQ_FLUSHED)
464 sfc_info(sa, "TxQ %u flushed", sw_index);
469 for (txds = 0; txds < txq_info->entries; txds++) {
470 if (txq->sw_ring[txds].mbuf != NULL) {
471 rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
472 txq->sw_ring[txds].mbuf = NULL;
476 txq->state = SFC_TXQ_INITIALIZED;
478 efx_tx_qdestroy(txq->common);
480 sfc_ev_qstop(sa, txq->evq->evq_index);
483 * It seems to be used by DPDK for debug purposes only ('rte_ether')
485 dev_data = sa->eth_dev->data;
486 dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED;
490 sfc_tx_start(struct sfc_adapter *sa)
492 unsigned int sw_index;
495 sfc_log_init(sa, "txq_count = %u", sa->txq_count);
497 rc = efx_tx_init(sa->nic);
499 goto fail_efx_tx_init;
501 for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
502 if (!(sa->txq_info[sw_index].deferred_start) ||
503 sa->txq_info[sw_index].deferred_started) {
504 rc = sfc_tx_qstart(sa, sw_index);
513 while (sw_index-- > 0)
514 sfc_tx_qstop(sa, sw_index);
516 efx_tx_fini(sa->nic);
519 sfc_log_init(sa, "failed (rc = %d)", rc);
524 sfc_tx_stop(struct sfc_adapter *sa)
526 unsigned int sw_index;
528 sfc_log_init(sa, "txq_count = %u", sa->txq_count);
530 sw_index = sa->txq_count;
531 while (sw_index-- > 0) {
532 if (sa->txq_info[sw_index].txq != NULL)
533 sfc_tx_qstop(sa, sw_index);
536 efx_tx_fini(sa->nic);
540 * The function is used to insert or update VLAN tag;
541 * the firmware has state of the firmware tag to insert per TxQ
542 * (controlled by option descriptors), hence, if the tag of the
543 * packet to be sent is different from one remembered by the firmware,
544 * the function will update it
547 sfc_tx_maybe_insert_tag(struct sfc_txq *txq, struct rte_mbuf *m,
550 uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ?
553 if (this_tag == txq->hw_vlan_tci)
557 * The expression inside SFC_ASSERT() is not desired to be checked in
558 * a non-debug build because it might be too expensive on the data path
560 SFC_ASSERT(efx_nic_cfg_get(txq->evq->sa->nic)->enc_hw_tx_insert_vlan_enabled);
562 efx_tx_qdesc_vlantci_create(txq->common, rte_cpu_to_be_16(this_tag),
565 txq->hw_vlan_tci = this_tag;
571 sfc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
573 struct sfc_txq *txq = (struct sfc_txq *)tx_queue;
574 unsigned int added = txq->added;
575 unsigned int pushed = added;
576 unsigned int pkts_sent = 0;
577 efx_desc_t *pend = &txq->pend_desc[0];
578 const unsigned int hard_max_fill = EFX_TXQ_LIMIT(txq->ptr_mask + 1);
579 const unsigned int soft_max_fill = hard_max_fill - txq->free_thresh;
580 unsigned int fill_level = added - txq->completed;
583 struct rte_mbuf **pktp;
585 if (unlikely((txq->state & SFC_TXQ_RUNNING) == 0))
589 * If insufficient space for a single packet is present,
590 * we should reap; otherwise, we shouldn't do that all the time
591 * to avoid latency increase
593 reap_done = (fill_level > soft_max_fill);
598 * Recalculate fill level since 'txq->completed'
599 * might have changed on reap
601 fill_level = added - txq->completed;
604 for (pkts_sent = 0, pktp = &tx_pkts[0];
605 (pkts_sent < nb_pkts) && (fill_level <= soft_max_fill);
606 pkts_sent++, pktp++) {
607 struct rte_mbuf *m_seg = *pktp;
608 size_t pkt_len = m_seg->pkt_len;
609 unsigned int pkt_descs = 0;
612 * Here VLAN TCI is expected to be zero in case if no
613 * DEV_TX_VLAN_OFFLOAD capability is advertised;
614 * if the calling app ignores the absence of
615 * DEV_TX_VLAN_OFFLOAD and pushes VLAN TCI, then
616 * TX_ERROR will occur
618 pkt_descs += sfc_tx_maybe_insert_tag(txq, m_seg, &pend);
620 for (; m_seg != NULL; m_seg = m_seg->next) {
621 efsys_dma_addr_t next_frag;
624 seg_len = m_seg->data_len;
625 next_frag = rte_mbuf_data_dma_addr(m_seg);
628 efsys_dma_addr_t frag_addr = next_frag;
631 next_frag = RTE_ALIGN(frag_addr + 1,
632 SFC_TX_SEG_BOUNDARY);
633 frag_len = MIN(next_frag - frag_addr, seg_len);
637 efx_tx_qdesc_dma_create(txq->common,
643 } while (seg_len != 0);
648 fill_level += pkt_descs;
649 if (unlikely(fill_level > hard_max_fill)) {
651 * Our estimation for maximum number of descriptors
652 * required to send a packet seems to be wrong.
653 * Try to reap (if we haven't yet).
658 fill_level = added - txq->completed;
659 if (fill_level > hard_max_fill) {
669 /* Assign mbuf to the last used desc */
670 txq->sw_ring[(added - 1) & txq->ptr_mask].mbuf = *pktp;
673 if (likely(pkts_sent > 0)) {
674 rc = efx_tx_qdesc_post(txq->common, txq->pend_desc,
675 pend - &txq->pend_desc[0],
676 txq->completed, &txq->added);
679 if (likely(pushed != txq->added))
680 efx_tx_qpush(txq->common, txq->added, pushed);
683 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE