4 * Copyright (c) 2016-2017 Solarflare Communications Inc.
7 * This software was jointly developed between OKTET Labs (under contract
8 * for Solarflare) and Solarflare Communications, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include "sfc_debug.h"
37 #include "sfc_tweak.h"
40 * Maximum number of TX queue flush attempts in case of
41 * failure or flush timeout
43 #define SFC_TX_QFLUSH_ATTEMPTS (3)
46 * Time to wait between event queue polling attempts when waiting for TX
47 * queue flush done or flush failed events
49 #define SFC_TX_QFLUSH_POLL_WAIT_MS (1)
52 * Maximum number of event queue polling attempts when waiting for TX queue
53 * flush done or flush failed events; it defines TX queue flush attempt timeout
54 * together with SFC_TX_QFLUSH_POLL_WAIT_MS
56 #define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000)
59 sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc,
60 const struct rte_eth_txconf *tx_conf)
62 unsigned int flags = tx_conf->txq_flags;
63 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
66 if (tx_conf->tx_rs_thresh != 0) {
67 sfc_err(sa, "RS bit in transmit descriptor is not supported");
71 if (tx_conf->tx_free_thresh > EFX_TXQ_LIMIT(nb_tx_desc)) {
73 "TxQ free threshold too large: %u vs maximum %u",
74 tx_conf->tx_free_thresh, EFX_TXQ_LIMIT(nb_tx_desc));
78 if (tx_conf->tx_thresh.pthresh != 0 ||
79 tx_conf->tx_thresh.hthresh != 0 ||
80 tx_conf->tx_thresh.wthresh != 0) {
82 "prefetch/host/writeback thresholds are not supported");
86 if (!encp->enc_hw_tx_insert_vlan_enabled &&
87 (flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) {
88 sfc_err(sa, "VLAN offload is not supported");
92 if ((flags & ETH_TXQ_FLAGS_NOXSUMSCTP) == 0) {
93 sfc_err(sa, "SCTP offload is not supported");
97 /* We either perform both TCP and UDP offload, or no offload at all */
98 if (((flags & ETH_TXQ_FLAGS_NOXSUMTCP) == 0) !=
99 ((flags & ETH_TXQ_FLAGS_NOXSUMUDP) == 0)) {
100 sfc_err(sa, "TCP and UDP offloads can't be set independently");
108 sfc_tx_qflush_done(struct sfc_txq *txq)
110 txq->state |= SFC_TXQ_FLUSHED;
111 txq->state &= ~SFC_TXQ_FLUSHING;
115 sfc_tx_reap(struct sfc_txq *txq)
117 unsigned int completed;
120 sfc_ev_qpoll(txq->evq);
122 for (completed = txq->completed;
123 completed != txq->pending; completed++) {
124 struct sfc_tx_sw_desc *txd;
126 txd = &txq->sw_ring[completed & txq->ptr_mask];
128 if (txd->mbuf != NULL) {
129 rte_pktmbuf_free(txd->mbuf);
134 txq->completed = completed;
138 sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
139 uint16_t nb_tx_desc, unsigned int socket_id,
140 const struct rte_eth_txconf *tx_conf)
142 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
143 struct sfc_txq_info *txq_info;
146 unsigned int evq_index = sfc_evq_index_by_txq_sw_index(sa, sw_index);
149 sfc_log_init(sa, "TxQ = %u", sw_index);
151 rc = sfc_tx_qcheck_conf(sa, nb_tx_desc, tx_conf);
155 SFC_ASSERT(sw_index < sa->txq_count);
156 txq_info = &sa->txq_info[sw_index];
158 SFC_ASSERT(nb_tx_desc <= sa->txq_max_entries);
159 txq_info->entries = nb_tx_desc;
161 rc = sfc_ev_qinit(sa, evq_index, txq_info->entries, socket_id);
165 evq = sa->evq_info[evq_index].evq;
168 txq = rte_zmalloc_socket("sfc-txq", sizeof(*txq), 0, socket_id);
172 rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
173 socket_id, &txq->mem);
178 txq->pend_desc = rte_calloc_socket("sfc-txq-pend-desc",
179 EFX_TXQ_LIMIT(txq_info->entries),
180 sizeof(efx_desc_t), 0, socket_id);
181 if (txq->pend_desc == NULL)
182 goto fail_pend_desc_alloc;
185 txq->sw_ring = rte_calloc_socket("sfc-txq-desc", txq_info->entries,
186 sizeof(*txq->sw_ring), 0, socket_id);
187 if (txq->sw_ring == NULL)
188 goto fail_desc_alloc;
191 rc = sfc_tso_alloc_tsoh_objs(txq->sw_ring, txq_info->entries,
194 goto fail_alloc_tsoh_objs;
197 txq->state = SFC_TXQ_INITIALIZED;
198 txq->ptr_mask = txq_info->entries - 1;
199 txq->free_thresh = (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
200 SFC_TX_DEFAULT_FREE_THRESH;
201 txq->dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
202 txq->hw_index = sw_index;
203 txq->flags = tx_conf->txq_flags;
209 txq_info->deferred_start = (tx_conf->tx_deferred_start != 0);
213 fail_alloc_tsoh_objs:
214 rte_free(txq->sw_ring);
217 rte_free(txq->pend_desc);
219 fail_pend_desc_alloc:
220 sfc_dma_free(sa, &txq->mem);
226 sfc_ev_qfini(sa, evq_index);
229 txq_info->entries = 0;
232 sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc);
237 sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
239 struct sfc_txq_info *txq_info;
242 sfc_log_init(sa, "TxQ = %u", sw_index);
244 SFC_ASSERT(sw_index < sa->txq_count);
245 txq_info = &sa->txq_info[sw_index];
248 SFC_ASSERT(txq != NULL);
249 SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
251 sfc_tso_free_tsoh_objs(txq->sw_ring, txq_info->entries);
253 txq_info->txq = NULL;
254 txq_info->entries = 0;
256 rte_free(txq->sw_ring);
257 rte_free(txq->pend_desc);
258 sfc_dma_free(sa, &txq->mem);
263 sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
265 sfc_log_init(sa, "TxQ = %u", sw_index);
271 sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
275 switch (txmode->mq_mode) {
279 sfc_err(sa, "Tx multi-queue mode %u not supported",
285 * These features are claimed to be i40e-specific,
286 * but it does make sense to double-check their absence
288 if (txmode->hw_vlan_reject_tagged) {
289 sfc_err(sa, "Rejecting tagged packets not supported");
293 if (txmode->hw_vlan_reject_untagged) {
294 sfc_err(sa, "Rejecting untagged packets not supported");
298 if (txmode->hw_vlan_insert_pvid) {
299 sfc_err(sa, "Port-based VLAN insertion not supported");
307 sfc_tx_init(struct sfc_adapter *sa)
309 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
310 const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
311 unsigned int sw_index;
315 * The datapath implementation assumes absence of boundary
316 * limits on Tx DMA descriptors. Addition of these checks on
317 * datapath would simply make the datapath slower.
319 if (encp->enc_tx_dma_desc_boundary != 0) {
321 goto fail_tx_dma_desc_boundary;
324 rc = sfc_tx_check_mode(sa, &dev_conf->txmode);
326 goto fail_check_mode;
328 sa->txq_count = sa->eth_dev->data->nb_tx_queues;
330 sa->txq_info = rte_calloc_socket("sfc-txqs", sa->txq_count,
331 sizeof(sa->txq_info[0]), 0,
333 if (sa->txq_info == NULL)
334 goto fail_txqs_alloc;
336 for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
337 rc = sfc_tx_qinit_info(sa, sw_index);
339 goto fail_tx_qinit_info;
345 rte_free(sa->txq_info);
352 fail_tx_dma_desc_boundary:
353 sfc_log_init(sa, "failed (rc = %d)", rc);
358 sfc_tx_fini(struct sfc_adapter *sa)
362 sw_index = sa->txq_count;
363 while (--sw_index >= 0) {
364 if (sa->txq_info[sw_index].txq != NULL)
365 sfc_tx_qfini(sa, sw_index);
368 rte_free(sa->txq_info);
374 sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
376 struct rte_eth_dev_data *dev_data;
377 struct sfc_txq_info *txq_info;
381 unsigned int desc_index;
384 sfc_log_init(sa, "TxQ = %u", sw_index);
386 SFC_ASSERT(sw_index < sa->txq_count);
387 txq_info = &sa->txq_info[sw_index];
391 SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
395 rc = sfc_ev_qstart(sa, evq->evq_index);
400 * It seems that DPDK has no controls regarding IPv4 offloads,
401 * hence, we always enable it here
403 if ((txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP) ||
404 (txq->flags & ETH_TXQ_FLAGS_NOXSUMUDP)) {
405 flags = EFX_TXQ_CKSUM_IPV4;
407 flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP;
410 flags |= EFX_TXQ_FATSOV2;
413 rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem,
414 txq_info->entries, 0 /* not used on EF10 */,
416 &txq->common, &desc_index);
418 if (sa->tso && (rc == ENOSPC))
419 sfc_err(sa, "ran out of TSO contexts");
421 goto fail_tx_qcreate;
424 txq->added = txq->pending = txq->completed = desc_index;
425 txq->hw_vlan_tci = 0;
427 efx_tx_qenable(txq->common);
429 txq->state |= (SFC_TXQ_STARTED | SFC_TXQ_RUNNING);
432 * It seems to be used by DPDK for debug purposes only ('rte_ether')
434 dev_data = sa->eth_dev->data;
435 dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED;
440 sfc_ev_qstop(sa, evq->evq_index);
447 sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
449 struct rte_eth_dev_data *dev_data;
450 struct sfc_txq_info *txq_info;
452 unsigned int retry_count;
453 unsigned int wait_count;
456 sfc_log_init(sa, "TxQ = %u", sw_index);
458 SFC_ASSERT(sw_index < sa->txq_count);
459 txq_info = &sa->txq_info[sw_index];
463 if (txq->state == SFC_TXQ_INITIALIZED)
466 SFC_ASSERT(txq->state & SFC_TXQ_STARTED);
468 txq->state &= ~SFC_TXQ_RUNNING;
471 * Retry TX queue flushing in case of flush failed or
472 * timeout; in the worst case it can delay for 6 seconds
474 for (retry_count = 0;
475 ((txq->state & SFC_TXQ_FLUSHED) == 0) &&
476 (retry_count < SFC_TX_QFLUSH_ATTEMPTS);
478 if (efx_tx_qflush(txq->common) != 0) {
479 txq->state |= SFC_TXQ_FLUSHING;
484 * Wait for TX queue flush done or flush failed event at least
485 * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more
486 * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied
487 * by SFC_TX_QFLUSH_POLL_ATTEMPTS)
491 rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS);
492 sfc_ev_qpoll(txq->evq);
493 } while ((txq->state & SFC_TXQ_FLUSHING) &&
494 wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS);
496 if (txq->state & SFC_TXQ_FLUSHING)
497 sfc_err(sa, "TxQ %u flush timed out", sw_index);
499 if (txq->state & SFC_TXQ_FLUSHED)
500 sfc_info(sa, "TxQ %u flushed", sw_index);
505 for (txds = 0; txds < txq_info->entries; txds++) {
506 if (txq->sw_ring[txds].mbuf != NULL) {
507 rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
508 txq->sw_ring[txds].mbuf = NULL;
512 txq->state = SFC_TXQ_INITIALIZED;
514 efx_tx_qdestroy(txq->common);
516 sfc_ev_qstop(sa, txq->evq->evq_index);
519 * It seems to be used by DPDK for debug purposes only ('rte_ether')
521 dev_data = sa->eth_dev->data;
522 dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED;
526 sfc_tx_start(struct sfc_adapter *sa)
528 unsigned int sw_index;
531 sfc_log_init(sa, "txq_count = %u", sa->txq_count);
534 if (!efx_nic_cfg_get(sa->nic)->enc_fw_assisted_tso_v2_enabled) {
535 sfc_warn(sa, "TSO support was unable to be restored");
540 rc = efx_tx_init(sa->nic);
542 goto fail_efx_tx_init;
544 for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
545 if (!(sa->txq_info[sw_index].deferred_start) ||
546 sa->txq_info[sw_index].deferred_started) {
547 rc = sfc_tx_qstart(sa, sw_index);
556 while (sw_index-- > 0)
557 sfc_tx_qstop(sa, sw_index);
559 efx_tx_fini(sa->nic);
562 sfc_log_init(sa, "failed (rc = %d)", rc);
567 sfc_tx_stop(struct sfc_adapter *sa)
569 unsigned int sw_index;
571 sfc_log_init(sa, "txq_count = %u", sa->txq_count);
573 sw_index = sa->txq_count;
574 while (sw_index-- > 0) {
575 if (sa->txq_info[sw_index].txq != NULL)
576 sfc_tx_qstop(sa, sw_index);
579 efx_tx_fini(sa->nic);
583 * The function is used to insert or update VLAN tag;
584 * the firmware has state of the firmware tag to insert per TxQ
585 * (controlled by option descriptors), hence, if the tag of the
586 * packet to be sent is different from one remembered by the firmware,
587 * the function will update it
590 sfc_tx_maybe_insert_tag(struct sfc_txq *txq, struct rte_mbuf *m,
593 uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ?
596 if (this_tag == txq->hw_vlan_tci)
600 * The expression inside SFC_ASSERT() is not desired to be checked in
601 * a non-debug build because it might be too expensive on the data path
603 SFC_ASSERT(efx_nic_cfg_get(txq->evq->sa->nic)->enc_hw_tx_insert_vlan_enabled);
605 efx_tx_qdesc_vlantci_create(txq->common, rte_cpu_to_be_16(this_tag),
608 txq->hw_vlan_tci = this_tag;
614 sfc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
616 struct sfc_txq *txq = (struct sfc_txq *)tx_queue;
617 unsigned int added = txq->added;
618 unsigned int pushed = added;
619 unsigned int pkts_sent = 0;
620 efx_desc_t *pend = &txq->pend_desc[0];
621 const unsigned int hard_max_fill = EFX_TXQ_LIMIT(txq->ptr_mask + 1);
622 const unsigned int soft_max_fill = hard_max_fill - txq->free_thresh;
623 unsigned int fill_level = added - txq->completed;
626 struct rte_mbuf **pktp;
628 if (unlikely((txq->state & SFC_TXQ_RUNNING) == 0))
632 * If insufficient space for a single packet is present,
633 * we should reap; otherwise, we shouldn't do that all the time
634 * to avoid latency increase
636 reap_done = (fill_level > soft_max_fill);
641 * Recalculate fill level since 'txq->completed'
642 * might have changed on reap
644 fill_level = added - txq->completed;
647 for (pkts_sent = 0, pktp = &tx_pkts[0];
648 (pkts_sent < nb_pkts) && (fill_level <= soft_max_fill);
649 pkts_sent++, pktp++) {
650 struct rte_mbuf *m_seg = *pktp;
651 size_t pkt_len = m_seg->pkt_len;
652 unsigned int pkt_descs = 0;
656 * Here VLAN TCI is expected to be zero in case if no
657 * DEV_TX_VLAN_OFFLOAD capability is advertised;
658 * if the calling app ignores the absence of
659 * DEV_TX_VLAN_OFFLOAD and pushes VLAN TCI, then
660 * TX_ERROR will occur
662 pkt_descs += sfc_tx_maybe_insert_tag(txq, m_seg, &pend);
664 if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
666 * We expect correct 'pkt->l[2, 3, 4]_len' values
667 * to be set correctly by the caller
669 if (sfc_tso_do(txq, added, &m_seg, &in_off, &pend,
670 &pkt_descs, &pkt_len) != 0) {
671 /* We may have reached this place for
672 * one of the following reasons:
674 * 1) Packet header length is greater
675 * than SFC_TSOH_STD_LEN
676 * 2) TCP header starts at more then
677 * 208 bytes into the frame
679 * We will deceive RTE saying that we have sent
680 * the packet, but we will actually drop it.
681 * Hence, we should revert 'pend' to the
682 * previous state (in case we have added
683 * VLAN descriptor) and start processing
684 * another one packet. But the original
685 * mbuf shouldn't be orphaned
689 rte_pktmbuf_free(*pktp);
695 * We've only added 2 FATSOv2 option descriptors
696 * and 1 descriptor for the linearized packet header.
697 * The outstanding work will be done in the same manner
698 * as for the usual non-TSO path
702 for (; m_seg != NULL; m_seg = m_seg->next) {
703 efsys_dma_addr_t next_frag;
706 seg_len = m_seg->data_len;
707 next_frag = rte_mbuf_data_dma_addr(m_seg);
710 * If we've started TSO transaction few steps earlier,
711 * we'll skip packet header using an offset in the
712 * current segment (which has been set to the
713 * first one containing payload)
720 efsys_dma_addr_t frag_addr = next_frag;
724 * It is assumed here that there is no
725 * limitation on address boundary
726 * crossing by DMA descriptor.
728 frag_len = MIN(seg_len, txq->dma_desc_size_max);
729 next_frag += frag_len;
733 efx_tx_qdesc_dma_create(txq->common,
739 } while (seg_len != 0);
744 fill_level += pkt_descs;
745 if (unlikely(fill_level > hard_max_fill)) {
747 * Our estimation for maximum number of descriptors
748 * required to send a packet seems to be wrong.
749 * Try to reap (if we haven't yet).
754 fill_level = added - txq->completed;
755 if (fill_level > hard_max_fill) {
765 /* Assign mbuf to the last used desc */
766 txq->sw_ring[(added - 1) & txq->ptr_mask].mbuf = *pktp;
769 if (likely(pkts_sent > 0)) {
770 rc = efx_tx_qdesc_post(txq->common, txq->pend_desc,
771 pend - &txq->pend_desc[0],
772 txq->completed, &txq->added);
775 if (likely(pushed != txq->added))
776 efx_tx_qpush(txq->common, txq->added, pushed);
779 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE