2 * Copyright (c) 2016 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "sfc_debug.h"
35 #include "sfc_tweak.h"
38 * Maximum number of TX queue flush attempts in case of
39 * failure or flush timeout
41 #define SFC_TX_QFLUSH_ATTEMPTS (3)
44 * Time to wait between event queue polling attempts when waiting for TX
45 * queue flush done or flush failed events
47 #define SFC_TX_QFLUSH_POLL_WAIT_MS (1)
50 * Maximum number of event queue polling attempts when waiting for TX queue
51 * flush done or flush failed events; it defines TX queue flush attempt timeout
52 * together with SFC_TX_QFLUSH_POLL_WAIT_MS
54 #define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000)
57 sfc_tx_qcheck_conf(struct sfc_adapter *sa,
58 const struct rte_eth_txconf *tx_conf)
60 unsigned int flags = tx_conf->txq_flags;
63 if (tx_conf->tx_rs_thresh != 0) {
64 sfc_err(sa, "RS bit in transmit descriptor is not supported");
68 if (tx_conf->tx_free_thresh != 0) {
70 "setting explicit TX free threshold is not supported");
74 if (tx_conf->tx_deferred_start != 0) {
75 sfc_err(sa, "TX queue deferred start is not supported (yet)");
79 if (tx_conf->tx_thresh.pthresh != 0 ||
80 tx_conf->tx_thresh.hthresh != 0 ||
81 tx_conf->tx_thresh.wthresh != 0) {
83 "prefetch/host/writeback thresholds are not supported");
87 if ((flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) {
88 sfc_err(sa, "VLAN offload is not supported");
92 if ((flags & ETH_TXQ_FLAGS_NOXSUMSCTP) == 0) {
93 sfc_err(sa, "SCTP offload is not supported");
97 /* We either perform both TCP and UDP offload, or no offload at all */
98 if (((flags & ETH_TXQ_FLAGS_NOXSUMTCP) == 0) !=
99 ((flags & ETH_TXQ_FLAGS_NOXSUMUDP) == 0)) {
100 sfc_err(sa, "TCP and UDP offloads can't be set independently");
108 sfc_tx_qflush_done(struct sfc_txq *txq)
110 txq->state |= SFC_TXQ_FLUSHED;
111 txq->state &= ~SFC_TXQ_FLUSHING;
115 sfc_tx_reap(struct sfc_txq *txq)
117 unsigned int completed;
120 sfc_ev_qpoll(txq->evq);
122 for (completed = txq->completed;
123 completed != txq->pending; completed++) {
124 struct sfc_tx_sw_desc *txd;
126 txd = &txq->sw_ring[completed & txq->ptr_mask];
128 if (txd->mbuf != NULL) {
129 rte_pktmbuf_free(txd->mbuf);
134 txq->completed = completed;
138 sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
139 uint16_t nb_tx_desc, unsigned int socket_id,
140 const struct rte_eth_txconf *tx_conf)
142 struct sfc_txq_info *txq_info;
145 unsigned int evq_index = sfc_evq_index_by_txq_sw_index(sa, sw_index);
148 sfc_log_init(sa, "TxQ = %u", sw_index);
150 rc = sfc_tx_qcheck_conf(sa, tx_conf);
154 SFC_ASSERT(sw_index < sa->txq_count);
155 txq_info = &sa->txq_info[sw_index];
157 SFC_ASSERT(nb_tx_desc <= sa->txq_max_entries);
158 txq_info->entries = nb_tx_desc;
160 rc = sfc_ev_qinit(sa, evq_index, txq_info->entries, socket_id);
164 evq = sa->evq_info[evq_index].evq;
167 txq = rte_zmalloc_socket("sfc-txq", sizeof(*txq), 0, socket_id);
171 rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
172 socket_id, &txq->mem);
177 txq->pend_desc = rte_calloc_socket("sfc-txq-pend-desc",
178 EFX_TXQ_LIMIT(txq_info->entries),
179 sizeof(efx_desc_t), 0, socket_id);
180 if (txq->pend_desc == NULL)
181 goto fail_pend_desc_alloc;
184 txq->sw_ring = rte_calloc_socket("sfc-txq-desc", txq_info->entries,
185 sizeof(*txq->sw_ring), 0, socket_id);
186 if (txq->sw_ring == NULL)
187 goto fail_desc_alloc;
189 txq->state = SFC_TXQ_INITIALIZED;
190 txq->ptr_mask = txq_info->entries - 1;
191 txq->hw_index = sw_index;
192 txq->flags = tx_conf->txq_flags;
202 rte_free(txq->pend_desc);
204 fail_pend_desc_alloc:
205 sfc_dma_free(sa, &txq->mem);
211 sfc_ev_qfini(sa, evq_index);
214 txq_info->entries = 0;
217 sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc);
222 sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
224 struct sfc_txq_info *txq_info;
227 sfc_log_init(sa, "TxQ = %u", sw_index);
229 SFC_ASSERT(sw_index < sa->txq_count);
230 txq_info = &sa->txq_info[sw_index];
233 SFC_ASSERT(txq != NULL);
234 SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
236 txq_info->txq = NULL;
237 txq_info->entries = 0;
239 rte_free(txq->sw_ring);
240 rte_free(txq->pend_desc);
241 sfc_dma_free(sa, &txq->mem);
246 sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
248 sfc_log_init(sa, "TxQ = %u", sw_index);
254 sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
258 switch (txmode->mq_mode) {
262 sfc_err(sa, "Tx multi-queue mode %u not supported",
268 * These features are claimed to be i40e-specific,
269 * but it does make sense to double-check their absence
271 if (txmode->hw_vlan_reject_tagged) {
272 sfc_err(sa, "Rejecting tagged packets not supported");
276 if (txmode->hw_vlan_reject_untagged) {
277 sfc_err(sa, "Rejecting untagged packets not supported");
281 if (txmode->hw_vlan_insert_pvid) {
282 sfc_err(sa, "Port-based VLAN insertion not supported");
290 sfc_tx_init(struct sfc_adapter *sa)
292 const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
293 unsigned int sw_index;
296 rc = sfc_tx_check_mode(sa, &dev_conf->txmode);
298 goto fail_check_mode;
300 sa->txq_count = sa->eth_dev->data->nb_tx_queues;
302 sa->txq_info = rte_calloc_socket("sfc-txqs", sa->txq_count,
303 sizeof(sa->txq_info[0]), 0,
305 if (sa->txq_info == NULL)
306 goto fail_txqs_alloc;
308 for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
309 rc = sfc_tx_qinit_info(sa, sw_index);
311 goto fail_tx_qinit_info;
317 rte_free(sa->txq_info);
324 sfc_log_init(sa, "failed (rc = %d)", rc);
329 sfc_tx_fini(struct sfc_adapter *sa)
333 sw_index = sa->txq_count;
334 while (--sw_index >= 0) {
335 if (sa->txq_info[sw_index].txq != NULL)
336 sfc_tx_qfini(sa, sw_index);
339 rte_free(sa->txq_info);
345 sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
347 struct rte_eth_dev_data *dev_data;
348 struct sfc_txq_info *txq_info;
352 unsigned int desc_index;
355 sfc_log_init(sa, "TxQ = %u", sw_index);
357 SFC_ASSERT(sw_index < sa->txq_count);
358 txq_info = &sa->txq_info[sw_index];
362 SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
366 rc = sfc_ev_qstart(sa, evq->evq_index);
371 * It seems that DPDK has no controls regarding IPv4 offloads,
372 * hence, we always enable it here
374 if ((txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP) ||
375 (txq->flags & ETH_TXQ_FLAGS_NOXSUMUDP))
376 flags = EFX_TXQ_CKSUM_IPV4;
378 flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP;
380 rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem,
381 txq_info->entries, 0 /* not used on EF10 */,
383 &txq->common, &desc_index);
385 goto fail_tx_qcreate;
387 txq->added = txq->pending = txq->completed = desc_index;
389 efx_tx_qenable(txq->common);
391 txq->state |= (SFC_TXQ_STARTED | SFC_TXQ_RUNNING);
394 * It seems to be used by DPDK for debug purposes only ('rte_ether')
396 dev_data = sa->eth_dev->data;
397 dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED;
402 sfc_ev_qstop(sa, evq->evq_index);
409 sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
411 struct rte_eth_dev_data *dev_data;
412 struct sfc_txq_info *txq_info;
414 unsigned int retry_count;
415 unsigned int wait_count;
418 sfc_log_init(sa, "TxQ = %u", sw_index);
420 SFC_ASSERT(sw_index < sa->txq_count);
421 txq_info = &sa->txq_info[sw_index];
425 SFC_ASSERT(txq->state & SFC_TXQ_STARTED);
427 txq->state &= ~SFC_TXQ_RUNNING;
430 * Retry TX queue flushing in case of flush failed or
431 * timeout; in the worst case it can delay for 6 seconds
433 for (retry_count = 0;
434 ((txq->state & SFC_TXQ_FLUSHED) == 0) &&
435 (retry_count < SFC_TX_QFLUSH_ATTEMPTS);
437 if (efx_tx_qflush(txq->common) != 0) {
438 txq->state |= SFC_TXQ_FLUSHING;
443 * Wait for TX queue flush done or flush failed event at least
444 * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more
445 * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied
446 * by SFC_TX_QFLUSH_POLL_ATTEMPTS)
450 rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS);
451 sfc_ev_qpoll(txq->evq);
452 } while ((txq->state & SFC_TXQ_FLUSHING) &&
453 wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS);
455 if (txq->state & SFC_TXQ_FLUSHING)
456 sfc_err(sa, "TxQ %u flush timed out", sw_index);
458 if (txq->state & SFC_TXQ_FLUSHED)
459 sfc_info(sa, "TxQ %u flushed", sw_index);
464 for (txds = 0; txds < txq_info->entries; txds++) {
465 if (txq->sw_ring[txds].mbuf != NULL) {
466 rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
467 txq->sw_ring[txds].mbuf = NULL;
471 txq->state = SFC_TXQ_INITIALIZED;
473 efx_tx_qdestroy(txq->common);
475 sfc_ev_qstop(sa, txq->evq->evq_index);
478 * It seems to be used by DPDK for debug purposes only ('rte_ether')
480 dev_data = sa->eth_dev->data;
481 dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED;
485 sfc_tx_start(struct sfc_adapter *sa)
487 unsigned int sw_index;
490 sfc_log_init(sa, "txq_count = %u", sa->txq_count);
492 rc = efx_tx_init(sa->nic);
494 goto fail_efx_tx_init;
496 for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
497 rc = sfc_tx_qstart(sa, sw_index);
505 while (sw_index-- > 0)
506 sfc_tx_qstop(sa, sw_index);
508 efx_tx_fini(sa->nic);
511 sfc_log_init(sa, "failed (rc = %d)", rc);
516 sfc_tx_stop(struct sfc_adapter *sa)
518 unsigned int sw_index;
520 sfc_log_init(sa, "txq_count = %u", sa->txq_count);
522 sw_index = sa->txq_count;
523 while (sw_index-- > 0) {
524 if (sa->txq_info[sw_index].txq != NULL)
525 sfc_tx_qstop(sa, sw_index);
528 efx_tx_fini(sa->nic);
532 sfc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
534 struct sfc_txq *txq = (struct sfc_txq *)tx_queue;
535 unsigned int added = txq->added;
536 unsigned int pushed = added;
537 unsigned int pkts_sent = 0;
538 efx_desc_t *pend = &txq->pend_desc[0];
539 const unsigned int hard_max_fill = EFX_TXQ_LIMIT(txq->ptr_mask + 1);
540 const unsigned int soft_max_fill = hard_max_fill -
542 unsigned int fill_level = added - txq->completed;
545 struct rte_mbuf **pktp;
547 if (unlikely((txq->state & SFC_TXQ_RUNNING) == 0))
551 * If insufficient space for a single packet is present,
552 * we should reap; otherwise, we shouldn't do that all the time
553 * to avoid latency increase
555 reap_done = (fill_level > soft_max_fill);
560 * Recalculate fill level since 'txq->completed'
561 * might have changed on reap
563 fill_level = added - txq->completed;
566 for (pkts_sent = 0, pktp = &tx_pkts[0];
567 (pkts_sent < nb_pkts) && (fill_level <= soft_max_fill);
568 pkts_sent++, pktp++) {
569 struct rte_mbuf *m_seg = *pktp;
570 size_t pkt_len = m_seg->pkt_len;
571 unsigned int pkt_descs = 0;
573 for (; m_seg != NULL; m_seg = m_seg->next) {
574 efsys_dma_addr_t next_frag;
577 seg_len = m_seg->data_len;
578 next_frag = rte_mbuf_data_dma_addr(m_seg);
581 efsys_dma_addr_t frag_addr = next_frag;
584 next_frag = RTE_ALIGN(frag_addr + 1,
585 SFC_TX_SEG_BOUNDARY);
586 frag_len = MIN(next_frag - frag_addr, seg_len);
590 efx_tx_qdesc_dma_create(txq->common,
596 } while (seg_len != 0);
601 fill_level += pkt_descs;
602 if (unlikely(fill_level > hard_max_fill)) {
604 * Our estimation for maximum number of descriptors
605 * required to send a packet seems to be wrong.
606 * Try to reap (if we haven't yet).
611 fill_level = added - txq->completed;
612 if (fill_level > hard_max_fill) {
622 /* Assign mbuf to the last used desc */
623 txq->sw_ring[(added - 1) & txq->ptr_mask].mbuf = *pktp;
626 if (likely(pkts_sent > 0)) {
627 rc = efx_tx_qdesc_post(txq->common, txq->pend_desc,
628 pend - &txq->pend_desc[0],
629 txq->completed, &txq->added);
632 if (likely(pushed != txq->added))
633 efx_tx_qpush(txq->common, txq->added, pushed);
636 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE