1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
11 #include "sfc_debug.h"
15 #include "sfc_tweak.h"
16 #include "sfc_kvargs.h"
19 * Maximum number of TX queue flush attempts in case of
20 * failure or flush timeout
22 #define SFC_TX_QFLUSH_ATTEMPTS (3)
25 * Time to wait between event queue polling attempts when waiting for TX
26 * queue flush done or flush failed events
28 #define SFC_TX_QFLUSH_POLL_WAIT_MS (1)
31 * Maximum number of event queue polling attempts when waiting for TX queue
32 * flush done or flush failed events; it defines TX queue flush attempt timeout
33 * together with SFC_TX_QFLUSH_POLL_WAIT_MS
35 #define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000)
38 sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa)
40 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
43 if ((sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) &&
44 encp->enc_hw_tx_insert_vlan_enabled)
45 caps |= DEV_TX_OFFLOAD_VLAN_INSERT;
47 if (sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
48 caps |= DEV_TX_OFFLOAD_MULTI_SEGS;
50 if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL) &&
51 (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT))
52 caps |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
58 sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa)
60 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
63 caps |= DEV_TX_OFFLOAD_IPV4_CKSUM;
64 caps |= DEV_TX_OFFLOAD_UDP_CKSUM;
65 caps |= DEV_TX_OFFLOAD_TCP_CKSUM;
67 if (encp->enc_tunnel_encapsulations_supported)
68 caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
71 caps |= DEV_TX_OFFLOAD_TCP_TSO;
77 sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
78 const struct rte_eth_txconf *tx_conf,
83 if (tx_conf->tx_rs_thresh != 0) {
84 sfc_err(sa, "RS bit in transmit descriptor is not supported");
88 if (tx_conf->tx_free_thresh > txq_max_fill_level) {
90 "TxQ free threshold too large: %u vs maximum %u",
91 tx_conf->tx_free_thresh, txq_max_fill_level);
95 if (tx_conf->tx_thresh.pthresh != 0 ||
96 tx_conf->tx_thresh.hthresh != 0 ||
97 tx_conf->tx_thresh.wthresh != 0) {
99 "prefetch/host/writeback thresholds are not supported");
102 /* We either perform both TCP and UDP offload, or no offload at all */
103 if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
104 ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
105 sfc_err(sa, "TCP and UDP offloads can't be set independently");
113 sfc_tx_qflush_done(struct sfc_txq *txq)
115 txq->state |= SFC_TXQ_FLUSHED;
116 txq->state &= ~SFC_TXQ_FLUSHING;
120 sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
121 uint16_t nb_tx_desc, unsigned int socket_id,
122 const struct rte_eth_txconf *tx_conf)
124 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
125 unsigned int txq_entries;
126 unsigned int evq_entries;
127 unsigned int txq_max_fill_level;
128 struct sfc_txq_info *txq_info;
132 struct sfc_dp_tx_qcreate_info info;
135 sfc_log_init(sa, "TxQ = %u", sw_index);
137 rc = sa->dp_tx->qsize_up_rings(nb_tx_desc, &txq_entries, &evq_entries,
138 &txq_max_fill_level);
140 goto fail_size_up_rings;
141 SFC_ASSERT(txq_entries >= EFX_TXQ_MINNDESCS);
142 SFC_ASSERT(txq_entries <= sa->txq_max_entries);
143 SFC_ASSERT(txq_entries >= nb_tx_desc);
144 SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
146 offloads = tx_conf->offloads |
147 sa->eth_dev->data->dev_conf.txmode.offloads;
148 rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads);
152 SFC_ASSERT(sw_index < sa->txq_count);
153 txq_info = &sa->txq_info[sw_index];
155 txq_info->entries = txq_entries;
157 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index,
158 evq_entries, socket_id, &evq);
163 txq = rte_zmalloc_socket("sfc-txq", sizeof(*txq), 0, socket_id);
169 txq->hw_index = sw_index;
172 (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
173 SFC_TX_DEFAULT_FREE_THRESH;
174 txq->flags = tx_conf->txq_flags;
175 txq->offloads = offloads;
177 rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
178 socket_id, &txq->mem);
182 memset(&info, 0, sizeof(info));
183 info.max_fill_level = txq_max_fill_level;
184 info.free_thresh = txq->free_thresh;
185 info.flags = tx_conf->txq_flags;
186 info.offloads = offloads;
187 info.txq_entries = txq_info->entries;
188 info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
189 info.txq_hw_ring = txq->mem.esm_base;
190 info.evq_entries = evq_entries;
191 info.evq_hw_ring = evq->mem.esm_base;
192 info.hw_index = txq->hw_index;
193 info.mem_bar = sa->mem_bar.esb_base;
194 info.vi_window_shift = encp->enc_vi_window_shift;
196 rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
197 &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
198 socket_id, &info, &txq->dp);
200 goto fail_dp_tx_qinit;
202 evq->dp_txq = txq->dp;
204 txq->state = SFC_TXQ_INITIALIZED;
206 txq_info->deferred_start = (tx_conf->tx_deferred_start != 0);
211 sfc_dma_free(sa, &txq->mem);
214 txq_info->txq = NULL;
221 txq_info->entries = 0;
225 sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc);
230 sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
232 struct sfc_txq_info *txq_info;
235 sfc_log_init(sa, "TxQ = %u", sw_index);
237 SFC_ASSERT(sw_index < sa->txq_count);
238 txq_info = &sa->txq_info[sw_index];
241 SFC_ASSERT(txq != NULL);
242 SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
244 sa->dp_tx->qdestroy(txq->dp);
247 txq_info->txq = NULL;
248 txq_info->entries = 0;
250 sfc_dma_free(sa, &txq->mem);
252 sfc_ev_qfini(txq->evq);
259 sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
261 sfc_log_init(sa, "TxQ = %u", sw_index);
267 sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
271 switch (txmode->mq_mode) {
275 sfc_err(sa, "Tx multi-queue mode %u not supported",
281 * These features are claimed to be i40e-specific,
282 * but it does make sense to double-check their absence
284 if (txmode->hw_vlan_reject_tagged) {
285 sfc_err(sa, "Rejecting tagged packets not supported");
289 if (txmode->hw_vlan_reject_untagged) {
290 sfc_err(sa, "Rejecting untagged packets not supported");
294 if (txmode->hw_vlan_insert_pvid) {
295 sfc_err(sa, "Port-based VLAN insertion not supported");
303 * Destroy excess queues that are no longer needed after reconfiguration
307 sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues)
311 SFC_ASSERT(nb_tx_queues <= sa->txq_count);
313 sw_index = sa->txq_count;
314 while (--sw_index >= (int)nb_tx_queues) {
315 if (sa->txq_info[sw_index].txq != NULL)
316 sfc_tx_qfini(sa, sw_index);
319 sa->txq_count = nb_tx_queues;
323 sfc_tx_configure(struct sfc_adapter *sa)
325 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
326 const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
327 const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues;
330 sfc_log_init(sa, "nb_tx_queues=%u (old %u)",
331 nb_tx_queues, sa->txq_count);
334 * The datapath implementation assumes absence of boundary
335 * limits on Tx DMA descriptors. Addition of these checks on
336 * datapath would simply make the datapath slower.
338 if (encp->enc_tx_dma_desc_boundary != 0) {
340 goto fail_tx_dma_desc_boundary;
343 rc = sfc_tx_check_mode(sa, &dev_conf->txmode);
345 goto fail_check_mode;
347 if (nb_tx_queues == sa->txq_count)
350 if (sa->txq_info == NULL) {
351 sa->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues,
352 sizeof(sa->txq_info[0]), 0,
354 if (sa->txq_info == NULL)
355 goto fail_txqs_alloc;
357 struct sfc_txq_info *new_txq_info;
359 if (nb_tx_queues < sa->txq_count)
360 sfc_tx_fini_queues(sa, nb_tx_queues);
363 rte_realloc(sa->txq_info,
364 nb_tx_queues * sizeof(sa->txq_info[0]), 0);
365 if (new_txq_info == NULL && nb_tx_queues > 0)
366 goto fail_txqs_realloc;
368 sa->txq_info = new_txq_info;
369 if (nb_tx_queues > sa->txq_count)
370 memset(&sa->txq_info[sa->txq_count], 0,
371 (nb_tx_queues - sa->txq_count) *
372 sizeof(sa->txq_info[0]));
375 while (sa->txq_count < nb_tx_queues) {
376 rc = sfc_tx_qinit_info(sa, sa->txq_count);
378 goto fail_tx_qinit_info;
392 fail_tx_dma_desc_boundary:
393 sfc_log_init(sa, "failed (rc = %d)", rc);
398 sfc_tx_close(struct sfc_adapter *sa)
400 sfc_tx_fini_queues(sa, 0);
402 rte_free(sa->txq_info);
407 sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
409 uint64_t offloads_supported = sfc_tx_get_dev_offload_caps(sa) |
410 sfc_tx_get_queue_offload_caps(sa);
411 struct rte_eth_dev_data *dev_data;
412 struct sfc_txq_info *txq_info;
416 unsigned int desc_index;
419 sfc_log_init(sa, "TxQ = %u", sw_index);
421 SFC_ASSERT(sw_index < sa->txq_count);
422 txq_info = &sa->txq_info[sw_index];
426 SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
430 rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index));
435 * The absence of ETH_TXQ_FLAGS_IGNORE is associated with a legacy
436 * application which expects that IPv4 checksum offload is enabled
437 * all the time as there is no legacy flag to turn off the offload.
439 if ((txq->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ||
440 (~txq->flags & ETH_TXQ_FLAGS_IGNORE))
441 flags |= EFX_TXQ_CKSUM_IPV4;
443 if ((txq->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
444 ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) &&
445 (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)))
446 flags |= EFX_TXQ_CKSUM_INNER_IPV4;
448 if ((txq->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
449 (txq->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
450 flags |= EFX_TXQ_CKSUM_TCPUDP;
452 if ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) &&
453 (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
454 flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
458 * The absence of ETH_TXQ_FLAGS_IGNORE is associated with a legacy
459 * application. In turn, the absence of ETH_TXQ_FLAGS_NOXSUMTCP is
460 * associated specifically with a legacy application which expects
461 * both TCP checksum offload and TSO to be enabled because the legacy
462 * API does not provide a dedicated mechanism to control TSO.
464 if ((txq->offloads & DEV_TX_OFFLOAD_TCP_TSO) ||
465 ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) &&
466 (~txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP)))
467 flags |= EFX_TXQ_FATSOV2;
469 rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem,
470 txq_info->entries, 0 /* not used on EF10 */,
472 &txq->common, &desc_index);
474 if (sa->tso && (rc == ENOSPC))
475 sfc_err(sa, "ran out of TSO contexts");
477 goto fail_tx_qcreate;
480 efx_tx_qenable(txq->common);
482 txq->state |= SFC_TXQ_STARTED;
484 rc = sa->dp_tx->qstart(txq->dp, evq->read_ptr, desc_index);
489 * It seems to be used by DPDK for debug purposes only ('rte_ether')
491 dev_data = sa->eth_dev->data;
492 dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED;
497 txq->state = SFC_TXQ_INITIALIZED;
498 efx_tx_qdestroy(txq->common);
508 sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
510 struct rte_eth_dev_data *dev_data;
511 struct sfc_txq_info *txq_info;
513 unsigned int retry_count;
514 unsigned int wait_count;
517 sfc_log_init(sa, "TxQ = %u", sw_index);
519 SFC_ASSERT(sw_index < sa->txq_count);
520 txq_info = &sa->txq_info[sw_index];
524 if (txq->state == SFC_TXQ_INITIALIZED)
527 SFC_ASSERT(txq->state & SFC_TXQ_STARTED);
529 sa->dp_tx->qstop(txq->dp, &txq->evq->read_ptr);
532 * Retry TX queue flushing in case of flush failed or
533 * timeout; in the worst case it can delay for 6 seconds
535 for (retry_count = 0;
536 ((txq->state & SFC_TXQ_FLUSHED) == 0) &&
537 (retry_count < SFC_TX_QFLUSH_ATTEMPTS);
539 rc = efx_tx_qflush(txq->common);
541 txq->state |= (rc == EALREADY) ?
542 SFC_TXQ_FLUSHED : SFC_TXQ_FLUSH_FAILED;
547 * Wait for TX queue flush done or flush failed event at least
548 * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more
549 * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied
550 * by SFC_TX_QFLUSH_POLL_ATTEMPTS)
554 rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS);
555 sfc_ev_qpoll(txq->evq);
556 } while ((txq->state & SFC_TXQ_FLUSHING) &&
557 wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS);
559 if (txq->state & SFC_TXQ_FLUSHING)
560 sfc_err(sa, "TxQ %u flush timed out", sw_index);
562 if (txq->state & SFC_TXQ_FLUSHED)
563 sfc_notice(sa, "TxQ %u flushed", sw_index);
566 sa->dp_tx->qreap(txq->dp);
568 txq->state = SFC_TXQ_INITIALIZED;
570 efx_tx_qdestroy(txq->common);
572 sfc_ev_qstop(txq->evq);
575 * It seems to be used by DPDK for debug purposes only ('rte_ether')
577 dev_data = sa->eth_dev->data;
578 dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED;
582 sfc_tx_start(struct sfc_adapter *sa)
584 unsigned int sw_index;
587 sfc_log_init(sa, "txq_count = %u", sa->txq_count);
590 if (!efx_nic_cfg_get(sa->nic)->enc_fw_assisted_tso_v2_enabled) {
591 sfc_warn(sa, "TSO support was unable to be restored");
596 rc = efx_tx_init(sa->nic);
598 goto fail_efx_tx_init;
600 for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
601 if (!(sa->txq_info[sw_index].deferred_start) ||
602 sa->txq_info[sw_index].deferred_started) {
603 rc = sfc_tx_qstart(sa, sw_index);
612 while (sw_index-- > 0)
613 sfc_tx_qstop(sa, sw_index);
615 efx_tx_fini(sa->nic);
618 sfc_log_init(sa, "failed (rc = %d)", rc);
623 sfc_tx_stop(struct sfc_adapter *sa)
625 unsigned int sw_index;
627 sfc_log_init(sa, "txq_count = %u", sa->txq_count);
629 sw_index = sa->txq_count;
630 while (sw_index-- > 0) {
631 if (sa->txq_info[sw_index].txq != NULL)
632 sfc_tx_qstop(sa, sw_index);
635 efx_tx_fini(sa->nic);
639 sfc_efx_tx_reap(struct sfc_efx_txq *txq)
641 unsigned int completed;
643 sfc_ev_qpoll(txq->evq);
645 for (completed = txq->completed;
646 completed != txq->pending; completed++) {
647 struct sfc_efx_tx_sw_desc *txd;
649 txd = &txq->sw_ring[completed & txq->ptr_mask];
651 if (txd->mbuf != NULL) {
652 rte_pktmbuf_free(txd->mbuf);
657 txq->completed = completed;
661 * The function is used to insert or update VLAN tag;
662 * the firmware has state of the firmware tag to insert per TxQ
663 * (controlled by option descriptors), hence, if the tag of the
664 * packet to be sent is different from one remembered by the firmware,
665 * the function will update it
668 sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m,
671 uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ?
674 if (this_tag == txq->hw_vlan_tci)
678 * The expression inside SFC_ASSERT() is not desired to be checked in
679 * a non-debug build because it might be too expensive on the data path
681 SFC_ASSERT(efx_nic_cfg_get(txq->evq->sa->nic)->enc_hw_tx_insert_vlan_enabled);
683 efx_tx_qdesc_vlantci_create(txq->common, rte_cpu_to_be_16(this_tag),
686 txq->hw_vlan_tci = this_tag;
692 sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
694 struct sfc_dp_txq *dp_txq = (struct sfc_dp_txq *)tx_queue;
695 struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
696 unsigned int added = txq->added;
697 unsigned int pushed = added;
698 unsigned int pkts_sent = 0;
699 efx_desc_t *pend = &txq->pend_desc[0];
700 const unsigned int hard_max_fill = txq->max_fill_level;
701 const unsigned int soft_max_fill = hard_max_fill - txq->free_thresh;
702 unsigned int fill_level = added - txq->completed;
705 struct rte_mbuf **pktp;
707 if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) == 0))
711 * If insufficient space for a single packet is present,
712 * we should reap; otherwise, we shouldn't do that all the time
713 * to avoid latency increase
715 reap_done = (fill_level > soft_max_fill);
718 sfc_efx_tx_reap(txq);
720 * Recalculate fill level since 'txq->completed'
721 * might have changed on reap
723 fill_level = added - txq->completed;
726 for (pkts_sent = 0, pktp = &tx_pkts[0];
727 (pkts_sent < nb_pkts) && (fill_level <= soft_max_fill);
728 pkts_sent++, pktp++) {
729 struct rte_mbuf *m_seg = *pktp;
730 size_t pkt_len = m_seg->pkt_len;
731 unsigned int pkt_descs = 0;
735 * Here VLAN TCI is expected to be zero in case if no
736 * DEV_TX_OFFLOAD_VLAN_INSERT capability is advertised;
737 * if the calling app ignores the absence of
738 * DEV_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
739 * TX_ERROR will occur
741 pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
743 if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
745 * We expect correct 'pkt->l[2, 3, 4]_len' values
746 * to be set correctly by the caller
748 if (sfc_efx_tso_do(txq, added, &m_seg, &in_off, &pend,
749 &pkt_descs, &pkt_len) != 0) {
750 /* We may have reached this place for
751 * one of the following reasons:
753 * 1) Packet header length is greater
754 * than SFC_TSOH_STD_LEN
755 * 2) TCP header starts at more then
756 * 208 bytes into the frame
758 * We will deceive RTE saying that we have sent
759 * the packet, but we will actually drop it.
760 * Hence, we should revert 'pend' to the
761 * previous state (in case we have added
762 * VLAN descriptor) and start processing
763 * another one packet. But the original
764 * mbuf shouldn't be orphaned
768 rte_pktmbuf_free(*pktp);
774 * We've only added 2 FATSOv2 option descriptors
775 * and 1 descriptor for the linearized packet header.
776 * The outstanding work will be done in the same manner
777 * as for the usual non-TSO path
781 for (; m_seg != NULL; m_seg = m_seg->next) {
782 efsys_dma_addr_t next_frag;
785 seg_len = m_seg->data_len;
786 next_frag = rte_mbuf_data_iova(m_seg);
789 * If we've started TSO transaction few steps earlier,
790 * we'll skip packet header using an offset in the
791 * current segment (which has been set to the
792 * first one containing payload)
799 efsys_dma_addr_t frag_addr = next_frag;
803 * It is assumed here that there is no
804 * limitation on address boundary
805 * crossing by DMA descriptor.
807 frag_len = MIN(seg_len, txq->dma_desc_size_max);
808 next_frag += frag_len;
812 efx_tx_qdesc_dma_create(txq->common,
818 } while (seg_len != 0);
823 fill_level += pkt_descs;
824 if (unlikely(fill_level > hard_max_fill)) {
826 * Our estimation for maximum number of descriptors
827 * required to send a packet seems to be wrong.
828 * Try to reap (if we haven't yet).
831 sfc_efx_tx_reap(txq);
833 fill_level = added - txq->completed;
834 if (fill_level > hard_max_fill) {
844 /* Assign mbuf to the last used desc */
845 txq->sw_ring[(added - 1) & txq->ptr_mask].mbuf = *pktp;
848 if (likely(pkts_sent > 0)) {
849 rc = efx_tx_qdesc_post(txq->common, txq->pend_desc,
850 pend - &txq->pend_desc[0],
851 txq->completed, &txq->added);
854 if (likely(pushed != txq->added))
855 efx_tx_qpush(txq->common, txq->added, pushed);
858 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
860 sfc_efx_tx_reap(txq);
868 sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq)
870 const struct sfc_dp_queue *dpq = &dp_txq->dpq;
871 struct rte_eth_dev *eth_dev;
872 struct sfc_adapter *sa;
875 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
876 eth_dev = &rte_eth_devices[dpq->port_id];
878 sa = eth_dev->data->dev_private;
880 SFC_ASSERT(dpq->queue_id < sa->txq_count);
881 txq = sa->txq_info[dpq->queue_id].txq;
883 SFC_ASSERT(txq != NULL);
887 static sfc_dp_tx_qsize_up_rings_t sfc_efx_tx_qsize_up_rings;
889 sfc_efx_tx_qsize_up_rings(uint16_t nb_tx_desc,
890 unsigned int *txq_entries,
891 unsigned int *evq_entries,
892 unsigned int *txq_max_fill_level)
894 *txq_entries = nb_tx_desc;
895 *evq_entries = nb_tx_desc;
896 *txq_max_fill_level = EFX_TXQ_LIMIT(*txq_entries);
900 static sfc_dp_tx_qcreate_t sfc_efx_tx_qcreate;
902 sfc_efx_tx_qcreate(uint16_t port_id, uint16_t queue_id,
903 const struct rte_pci_addr *pci_addr,
905 const struct sfc_dp_tx_qcreate_info *info,
906 struct sfc_dp_txq **dp_txqp)
908 struct sfc_efx_txq *txq;
909 struct sfc_txq *ctrl_txq;
913 txq = rte_zmalloc_socket("sfc-efx-txq", sizeof(*txq),
914 RTE_CACHE_LINE_SIZE, socket_id);
918 sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
921 txq->pend_desc = rte_calloc_socket("sfc-efx-txq-pend-desc",
922 EFX_TXQ_LIMIT(info->txq_entries),
923 sizeof(*txq->pend_desc), 0,
925 if (txq->pend_desc == NULL)
926 goto fail_pend_desc_alloc;
929 txq->sw_ring = rte_calloc_socket("sfc-efx-txq-sw_ring",
931 sizeof(*txq->sw_ring),
932 RTE_CACHE_LINE_SIZE, socket_id);
933 if (txq->sw_ring == NULL)
934 goto fail_sw_ring_alloc;
936 ctrl_txq = sfc_txq_by_dp_txq(&txq->dp);
937 if (ctrl_txq->evq->sa->tso) {
938 rc = sfc_efx_tso_alloc_tsoh_objs(txq->sw_ring,
939 info->txq_entries, socket_id);
941 goto fail_alloc_tsoh_objs;
944 txq->evq = ctrl_txq->evq;
945 txq->ptr_mask = info->txq_entries - 1;
946 txq->max_fill_level = info->max_fill_level;
947 txq->free_thresh = info->free_thresh;
948 txq->dma_desc_size_max = info->dma_desc_size_max;
953 fail_alloc_tsoh_objs:
954 rte_free(txq->sw_ring);
957 rte_free(txq->pend_desc);
959 fail_pend_desc_alloc:
966 static sfc_dp_tx_qdestroy_t sfc_efx_tx_qdestroy;
968 sfc_efx_tx_qdestroy(struct sfc_dp_txq *dp_txq)
970 struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
972 sfc_efx_tso_free_tsoh_objs(txq->sw_ring, txq->ptr_mask + 1);
973 rte_free(txq->sw_ring);
974 rte_free(txq->pend_desc);
978 static sfc_dp_tx_qstart_t sfc_efx_tx_qstart;
980 sfc_efx_tx_qstart(struct sfc_dp_txq *dp_txq,
981 __rte_unused unsigned int evq_read_ptr,
982 unsigned int txq_desc_index)
984 /* libefx-based datapath is specific to libefx-based PMD */
985 struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
986 struct sfc_txq *ctrl_txq = sfc_txq_by_dp_txq(dp_txq);
988 txq->common = ctrl_txq->common;
990 txq->pending = txq->completed = txq->added = txq_desc_index;
991 txq->hw_vlan_tci = 0;
993 txq->flags |= (SFC_EFX_TXQ_FLAG_STARTED | SFC_EFX_TXQ_FLAG_RUNNING);
998 static sfc_dp_tx_qstop_t sfc_efx_tx_qstop;
1000 sfc_efx_tx_qstop(struct sfc_dp_txq *dp_txq,
1001 __rte_unused unsigned int *evq_read_ptr)
1003 struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
1005 txq->flags &= ~SFC_EFX_TXQ_FLAG_RUNNING;
1008 static sfc_dp_tx_qreap_t sfc_efx_tx_qreap;
1010 sfc_efx_tx_qreap(struct sfc_dp_txq *dp_txq)
1012 struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
1015 sfc_efx_tx_reap(txq);
1017 for (txds = 0; txds <= txq->ptr_mask; txds++) {
1018 if (txq->sw_ring[txds].mbuf != NULL) {
1019 rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
1020 txq->sw_ring[txds].mbuf = NULL;
1024 txq->flags &= ~SFC_EFX_TXQ_FLAG_STARTED;
1027 static sfc_dp_tx_qdesc_status_t sfc_efx_tx_qdesc_status;
1029 sfc_efx_tx_qdesc_status(struct sfc_dp_txq *dp_txq, uint16_t offset)
1031 struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
1033 if (unlikely(offset > txq->ptr_mask))
1036 if (unlikely(offset >= txq->max_fill_level))
1037 return RTE_ETH_TX_DESC_UNAVAIL;
1040 * Poll EvQ to derive up-to-date 'txq->pending' figure;
1041 * it is required for the queue to be running, but the
1042 * check is omitted because API design assumes that it
1043 * is the duty of the caller to satisfy all conditions
1045 SFC_ASSERT((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) ==
1046 SFC_EFX_TXQ_FLAG_RUNNING);
1047 sfc_ev_qpoll(txq->evq);
1050 * Ring tail is 'txq->pending', and although descriptors
1051 * between 'txq->completed' and 'txq->pending' are still
1052 * in use by the driver, they should be reported as DONE
1054 if (unlikely(offset < (txq->added - txq->pending)))
1055 return RTE_ETH_TX_DESC_FULL;
1058 * There is no separate return value for unused descriptors;
1059 * the latter will be reported as DONE because genuine DONE
1060 * descriptors will be freed anyway in SW on the next burst
1062 return RTE_ETH_TX_DESC_DONE;
1065 struct sfc_dp_tx sfc_efx_tx = {
1067 .name = SFC_KVARG_DATAPATH_EFX,
1071 .features = SFC_DP_TX_FEAT_VLAN_INSERT |
1072 SFC_DP_TX_FEAT_TSO |
1073 SFC_DP_TX_FEAT_MULTI_POOL |
1074 SFC_DP_TX_FEAT_REFCNT |
1075 SFC_DP_TX_FEAT_MULTI_SEG,
1076 .qsize_up_rings = sfc_efx_tx_qsize_up_rings,
1077 .qcreate = sfc_efx_tx_qcreate,
1078 .qdestroy = sfc_efx_tx_qdestroy,
1079 .qstart = sfc_efx_tx_qstart,
1080 .qstop = sfc_efx_tx_qstop,
1081 .qreap = sfc_efx_tx_qreap,
1082 .qdesc_status = sfc_efx_tx_qdesc_status,
1083 .pkt_burst = sfc_efx_xmit_pkts,