1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
11 #include "sfc_debug.h"
15 #include "sfc_tweak.h"
16 #include "sfc_kvargs.h"
19 * Maximum number of TX queue flush attempts in case of
20 * failure or flush timeout
22 #define SFC_TX_QFLUSH_ATTEMPTS (3)
25 * Time to wait between event queue polling attempts when waiting for TX
26 * queue flush done or flush failed events
28 #define SFC_TX_QFLUSH_POLL_WAIT_MS (1)
31 * Maximum number of event queue polling attempts when waiting for TX queue
32 * flush done or flush failed events; it defines TX queue flush attempt timeout
33 * together with SFC_TX_QFLUSH_POLL_WAIT_MS
35 #define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000)
38 sfc_tx_get_offload_mask(struct sfc_adapter *sa)
40 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
43 if (!encp->enc_hw_tx_insert_vlan_enabled)
44 no_caps |= DEV_TX_OFFLOAD_VLAN_INSERT;
46 if (!encp->enc_tunnel_encapsulations_supported)
47 no_caps |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
50 no_caps |= DEV_TX_OFFLOAD_TCP_TSO;
53 (encp->enc_tunnel_encapsulations_supported &
54 (1u << EFX_TUNNEL_PROTOCOL_VXLAN)) == 0)
55 no_caps |= DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
58 (encp->enc_tunnel_encapsulations_supported &
59 (1u << EFX_TUNNEL_PROTOCOL_GENEVE)) == 0)
60 no_caps |= DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
66 sfc_tx_get_dev_offload_caps(struct sfc_adapter *sa)
68 return sa->priv.dp_tx->dev_offload_capa & sfc_tx_get_offload_mask(sa);
72 sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa)
74 return sa->priv.dp_tx->queue_offload_capa & sfc_tx_get_offload_mask(sa);
78 sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
79 const struct rte_eth_txconf *tx_conf,
84 if (tx_conf->tx_rs_thresh != 0) {
85 sfc_err(sa, "RS bit in transmit descriptor is not supported");
89 if (tx_conf->tx_free_thresh > txq_max_fill_level) {
91 "TxQ free threshold too large: %u vs maximum %u",
92 tx_conf->tx_free_thresh, txq_max_fill_level);
96 if (tx_conf->tx_thresh.pthresh != 0 ||
97 tx_conf->tx_thresh.hthresh != 0 ||
98 tx_conf->tx_thresh.wthresh != 0) {
100 "prefetch/host/writeback thresholds are not supported");
103 /* We either perform both TCP and UDP offload, or no offload at all */
104 if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
105 ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
106 sfc_err(sa, "TCP and UDP offloads can't be set independently");
114 sfc_tx_qflush_done(struct sfc_txq_info *txq_info)
116 txq_info->state |= SFC_TXQ_FLUSHED;
117 txq_info->state &= ~SFC_TXQ_FLUSHING;
121 sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
122 uint16_t nb_tx_desc, unsigned int socket_id,
123 const struct rte_eth_txconf *tx_conf)
125 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
126 unsigned int txq_entries;
127 unsigned int evq_entries;
128 unsigned int txq_max_fill_level;
129 struct sfc_txq_info *txq_info;
133 struct sfc_dp_tx_qcreate_info info;
135 struct sfc_dp_tx_hw_limits hw_limits;
137 sfc_log_init(sa, "TxQ = %u", sw_index);
139 memset(&hw_limits, 0, sizeof(hw_limits));
140 hw_limits.txq_max_entries = sa->txq_max_entries;
141 hw_limits.txq_min_entries = sa->txq_min_entries;
143 rc = sa->priv.dp_tx->qsize_up_rings(nb_tx_desc, &hw_limits,
144 &txq_entries, &evq_entries,
145 &txq_max_fill_level);
147 goto fail_size_up_rings;
148 SFC_ASSERT(txq_entries >= sa->txq_min_entries);
149 SFC_ASSERT(txq_entries <= sa->txq_max_entries);
150 SFC_ASSERT(txq_entries >= nb_tx_desc);
151 SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
153 offloads = tx_conf->offloads |
154 sa->eth_dev->data->dev_conf.txmode.offloads;
155 rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads);
159 SFC_ASSERT(sw_index < sfc_sa2shared(sa)->txq_count);
160 txq_info = &sfc_sa2shared(sa)->txq_info[sw_index];
162 txq_info->entries = txq_entries;
164 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index,
165 evq_entries, socket_id, &evq);
169 txq = &sa->txq_ctrl[sw_index];
170 txq->hw_index = sw_index;
172 txq_info->free_thresh =
173 (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
174 SFC_TX_DEFAULT_FREE_THRESH;
175 txq_info->offloads = offloads;
177 rc = sfc_dma_alloc(sa, "txq", sw_index,
178 efx_txq_size(sa->nic, txq_info->entries),
179 socket_id, &txq->mem);
183 memset(&info, 0, sizeof(info));
184 info.max_fill_level = txq_max_fill_level;
185 info.free_thresh = txq_info->free_thresh;
186 info.offloads = offloads;
187 info.txq_entries = txq_info->entries;
188 info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
189 info.txq_hw_ring = txq->mem.esm_base;
190 info.evq_entries = evq_entries;
191 info.evq_hw_ring = evq->mem.esm_base;
192 info.hw_index = txq->hw_index;
193 info.mem_bar = sa->mem_bar.esb_base;
194 info.vi_window_shift = encp->enc_vi_window_shift;
195 info.tso_tcp_header_offset_limit =
196 encp->enc_tx_tso_tcp_header_offset_limit;
197 info.tso_max_nb_header_descs =
198 RTE_MIN(encp->enc_tx_tso_max_header_ndescs,
199 (uint32_t)UINT16_MAX);
200 info.tso_max_header_len =
201 RTE_MIN(encp->enc_tx_tso_max_header_length,
202 (uint32_t)UINT16_MAX);
203 info.tso_max_nb_payload_descs =
204 RTE_MIN(encp->enc_tx_tso_max_payload_ndescs,
205 (uint32_t)UINT16_MAX);
206 info.tso_max_payload_len = encp->enc_tx_tso_max_payload_length;
207 info.tso_max_nb_outgoing_frames = encp->enc_tx_tso_max_nframes;
209 rc = sa->priv.dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
210 &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
211 socket_id, &info, &txq_info->dp);
213 goto fail_dp_tx_qinit;
215 evq->dp_txq = txq_info->dp;
217 txq_info->state = SFC_TXQ_INITIALIZED;
219 txq_info->deferred_start = (tx_conf->tx_deferred_start != 0);
224 sfc_dma_free(sa, &txq->mem);
230 txq_info->entries = 0;
234 sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc);
239 sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
241 struct sfc_txq_info *txq_info;
244 sfc_log_init(sa, "TxQ = %u", sw_index);
246 SFC_ASSERT(sw_index < sfc_sa2shared(sa)->txq_count);
247 sa->eth_dev->data->tx_queues[sw_index] = NULL;
249 txq_info = &sfc_sa2shared(sa)->txq_info[sw_index];
251 SFC_ASSERT(txq_info->state == SFC_TXQ_INITIALIZED);
253 sa->priv.dp_tx->qdestroy(txq_info->dp);
256 txq_info->state &= ~SFC_TXQ_INITIALIZED;
257 txq_info->entries = 0;
259 txq = &sa->txq_ctrl[sw_index];
261 sfc_dma_free(sa, &txq->mem);
263 sfc_ev_qfini(txq->evq);
268 sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
270 sfc_log_init(sa, "TxQ = %u", sw_index);
276 sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
280 switch (txmode->mq_mode) {
284 sfc_err(sa, "Tx multi-queue mode %u not supported",
290 * These features are claimed to be i40e-specific,
291 * but it does make sense to double-check their absence
293 if (txmode->hw_vlan_reject_tagged) {
294 sfc_err(sa, "Rejecting tagged packets not supported");
298 if (txmode->hw_vlan_reject_untagged) {
299 sfc_err(sa, "Rejecting untagged packets not supported");
303 if (txmode->hw_vlan_insert_pvid) {
304 sfc_err(sa, "Port-based VLAN insertion not supported");
312 * Destroy excess queues that are no longer needed after reconfiguration
316 sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues)
318 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
321 SFC_ASSERT(nb_tx_queues <= sas->txq_count);
323 sw_index = sas->txq_count;
324 while (--sw_index >= (int)nb_tx_queues) {
325 if (sas->txq_info[sw_index].state & SFC_TXQ_INITIALIZED)
326 sfc_tx_qfini(sa, sw_index);
329 sas->txq_count = nb_tx_queues;
333 sfc_tx_configure(struct sfc_adapter *sa)
335 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
336 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
337 const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
338 const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues;
341 sfc_log_init(sa, "nb_tx_queues=%u (old %u)",
342 nb_tx_queues, sas->txq_count);
345 * The datapath implementation assumes absence of boundary
346 * limits on Tx DMA descriptors. Addition of these checks on
347 * datapath would simply make the datapath slower.
349 if (encp->enc_tx_dma_desc_boundary != 0) {
351 goto fail_tx_dma_desc_boundary;
354 rc = sfc_tx_check_mode(sa, &dev_conf->txmode);
356 goto fail_check_mode;
358 if (nb_tx_queues == sas->txq_count)
361 if (sas->txq_info == NULL) {
362 sas->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues,
363 sizeof(sas->txq_info[0]), 0,
365 if (sas->txq_info == NULL)
366 goto fail_txqs_alloc;
369 * Allocate primary process only TxQ control from heap
370 * since it should not be shared.
373 sa->txq_ctrl = calloc(nb_tx_queues, sizeof(sa->txq_ctrl[0]));
374 if (sa->txq_ctrl == NULL)
375 goto fail_txqs_ctrl_alloc;
377 struct sfc_txq_info *new_txq_info;
378 struct sfc_txq *new_txq_ctrl;
380 if (nb_tx_queues < sas->txq_count)
381 sfc_tx_fini_queues(sa, nb_tx_queues);
384 rte_realloc(sas->txq_info,
385 nb_tx_queues * sizeof(sas->txq_info[0]), 0);
386 if (new_txq_info == NULL && nb_tx_queues > 0)
387 goto fail_txqs_realloc;
389 new_txq_ctrl = realloc(sa->txq_ctrl,
390 nb_tx_queues * sizeof(sa->txq_ctrl[0]));
391 if (new_txq_ctrl == NULL && nb_tx_queues > 0)
392 goto fail_txqs_ctrl_realloc;
394 sas->txq_info = new_txq_info;
395 sa->txq_ctrl = new_txq_ctrl;
396 if (nb_tx_queues > sas->txq_count) {
397 memset(&sas->txq_info[sas->txq_count], 0,
398 (nb_tx_queues - sas->txq_count) *
399 sizeof(sas->txq_info[0]));
400 memset(&sa->txq_ctrl[sas->txq_count], 0,
401 (nb_tx_queues - sas->txq_count) *
402 sizeof(sa->txq_ctrl[0]));
406 while (sas->txq_count < nb_tx_queues) {
407 rc = sfc_tx_qinit_info(sa, sas->txq_count);
409 goto fail_tx_qinit_info;
418 fail_txqs_ctrl_realloc:
420 fail_txqs_ctrl_alloc:
425 fail_tx_dma_desc_boundary:
426 sfc_log_init(sa, "failed (rc = %d)", rc);
431 sfc_tx_close(struct sfc_adapter *sa)
433 sfc_tx_fini_queues(sa, 0);
438 rte_free(sfc_sa2shared(sa)->txq_info);
439 sfc_sa2shared(sa)->txq_info = NULL;
443 sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
445 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
446 uint64_t offloads_supported = sfc_tx_get_dev_offload_caps(sa) |
447 sfc_tx_get_queue_offload_caps(sa);
448 struct rte_eth_dev_data *dev_data;
449 struct sfc_txq_info *txq_info;
453 unsigned int desc_index;
456 sfc_log_init(sa, "TxQ = %u", sw_index);
458 SFC_ASSERT(sw_index < sas->txq_count);
459 txq_info = &sas->txq_info[sw_index];
461 SFC_ASSERT(txq_info->state == SFC_TXQ_INITIALIZED);
463 txq = &sa->txq_ctrl[sw_index];
466 rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index));
470 if (txq_info->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
471 flags |= EFX_TXQ_CKSUM_IPV4;
473 if (txq_info->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
474 flags |= EFX_TXQ_CKSUM_INNER_IPV4;
476 if ((txq_info->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
477 (txq_info->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
478 flags |= EFX_TXQ_CKSUM_TCPUDP;
480 if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
481 flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
484 if (txq_info->offloads & (DEV_TX_OFFLOAD_TCP_TSO |
485 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
486 DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
487 flags |= EFX_TXQ_FATSOV2;
489 rc = efx_tx_qcreate(sa->nic, txq->hw_index, 0, &txq->mem,
490 txq_info->entries, 0 /* not used on EF10 */,
492 &txq->common, &desc_index);
494 if (sa->tso && (rc == ENOSPC))
495 sfc_err(sa, "ran out of TSO contexts");
497 goto fail_tx_qcreate;
500 efx_tx_qenable(txq->common);
502 txq_info->state |= SFC_TXQ_STARTED;
504 rc = sa->priv.dp_tx->qstart(txq_info->dp, evq->read_ptr, desc_index);
509 * It seems to be used by DPDK for debug purposes only ('rte_ether')
511 dev_data = sa->eth_dev->data;
512 dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED;
517 txq_info->state = SFC_TXQ_INITIALIZED;
518 efx_tx_qdestroy(txq->common);
528 sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
530 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
531 struct rte_eth_dev_data *dev_data;
532 struct sfc_txq_info *txq_info;
534 unsigned int retry_count;
535 unsigned int wait_count;
538 sfc_log_init(sa, "TxQ = %u", sw_index);
540 SFC_ASSERT(sw_index < sas->txq_count);
541 txq_info = &sas->txq_info[sw_index];
543 if (txq_info->state == SFC_TXQ_INITIALIZED)
546 SFC_ASSERT(txq_info->state & SFC_TXQ_STARTED);
548 txq = &sa->txq_ctrl[sw_index];
549 sa->priv.dp_tx->qstop(txq_info->dp, &txq->evq->read_ptr);
552 * Retry TX queue flushing in case of flush failed or
553 * timeout; in the worst case it can delay for 6 seconds
555 for (retry_count = 0;
556 ((txq_info->state & SFC_TXQ_FLUSHED) == 0) &&
557 (retry_count < SFC_TX_QFLUSH_ATTEMPTS);
559 rc = efx_tx_qflush(txq->common);
561 txq_info->state |= (rc == EALREADY) ?
562 SFC_TXQ_FLUSHED : SFC_TXQ_FLUSH_FAILED;
567 * Wait for TX queue flush done or flush failed event at least
568 * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more
569 * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied
570 * by SFC_TX_QFLUSH_POLL_ATTEMPTS)
574 rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS);
575 sfc_ev_qpoll(txq->evq);
576 } while ((txq_info->state & SFC_TXQ_FLUSHING) &&
577 wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS);
579 if (txq_info->state & SFC_TXQ_FLUSHING)
580 sfc_err(sa, "TxQ %u flush timed out", sw_index);
582 if (txq_info->state & SFC_TXQ_FLUSHED)
583 sfc_notice(sa, "TxQ %u flushed", sw_index);
586 sa->priv.dp_tx->qreap(txq_info->dp);
588 txq_info->state = SFC_TXQ_INITIALIZED;
590 efx_tx_qdestroy(txq->common);
592 sfc_ev_qstop(txq->evq);
595 * It seems to be used by DPDK for debug purposes only ('rte_ether')
597 dev_data = sa->eth_dev->data;
598 dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED;
602 sfc_tx_start(struct sfc_adapter *sa)
604 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
605 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
606 unsigned int sw_index;
609 sfc_log_init(sa, "txq_count = %u", sas->txq_count);
612 if (!encp->enc_fw_assisted_tso_v2_enabled &&
613 !encp->enc_tso_v3_enabled) {
614 sfc_warn(sa, "TSO support was unable to be restored");
616 sa->tso_encap = B_FALSE;
620 if (sa->tso_encap && !encp->enc_fw_assisted_tso_v2_encap_enabled &&
621 !encp->enc_tso_v3_enabled) {
622 sfc_warn(sa, "Encapsulated TSO support was unable to be restored");
623 sa->tso_encap = B_FALSE;
626 rc = efx_tx_init(sa->nic);
628 goto fail_efx_tx_init;
630 for (sw_index = 0; sw_index < sas->txq_count; ++sw_index) {
631 if (sas->txq_info[sw_index].state == SFC_TXQ_INITIALIZED &&
632 (!(sas->txq_info[sw_index].deferred_start) ||
633 sas->txq_info[sw_index].deferred_started)) {
634 rc = sfc_tx_qstart(sa, sw_index);
643 while (sw_index-- > 0)
644 sfc_tx_qstop(sa, sw_index);
646 efx_tx_fini(sa->nic);
649 sfc_log_init(sa, "failed (rc = %d)", rc);
654 sfc_tx_stop(struct sfc_adapter *sa)
656 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
657 unsigned int sw_index;
659 sfc_log_init(sa, "txq_count = %u", sas->txq_count);
661 sw_index = sas->txq_count;
662 while (sw_index-- > 0) {
663 if (sas->txq_info[sw_index].state & SFC_TXQ_STARTED)
664 sfc_tx_qstop(sa, sw_index);
667 efx_tx_fini(sa->nic);
671 sfc_efx_tx_reap(struct sfc_efx_txq *txq)
673 unsigned int completed;
675 sfc_ev_qpoll(txq->evq);
677 for (completed = txq->completed;
678 completed != txq->pending; completed++) {
679 struct sfc_efx_tx_sw_desc *txd;
681 txd = &txq->sw_ring[completed & txq->ptr_mask];
683 if (txd->mbuf != NULL) {
684 rte_pktmbuf_free(txd->mbuf);
689 txq->completed = completed;
693 * The function is used to insert or update VLAN tag;
694 * the firmware has state of the firmware tag to insert per TxQ
695 * (controlled by option descriptors), hence, if the tag of the
696 * packet to be sent is different from one remembered by the firmware,
697 * the function will update it
700 sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m,
703 uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ?
706 if (this_tag == txq->hw_vlan_tci)
710 * The expression inside SFC_ASSERT() is not desired to be checked in
711 * a non-debug build because it might be too expensive on the data path
713 SFC_ASSERT(efx_nic_cfg_get(txq->evq->sa->nic)->enc_hw_tx_insert_vlan_enabled);
715 efx_tx_qdesc_vlantci_create(txq->common, rte_cpu_to_be_16(this_tag),
718 txq->hw_vlan_tci = this_tag;
724 sfc_efx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
727 struct sfc_dp_txq *dp_txq = tx_queue;
728 struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
729 const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic);
732 for (i = 0; i < nb_pkts; i++) {
736 * EFX Tx datapath may require extra VLAN descriptor if VLAN
737 * insertion offload is requested regardless the offload
738 * requested/supported.
740 ret = sfc_dp_tx_prepare_pkt(tx_pkts[i], 0, SFC_TSOH_STD_LEN,
741 encp->enc_tx_tso_tcp_header_offset_limit,
742 txq->max_fill_level, EFX_TX_FATSOV2_OPT_NDESCS,
744 if (unlikely(ret != 0)) {
754 sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
756 struct sfc_dp_txq *dp_txq = (struct sfc_dp_txq *)tx_queue;
757 struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
758 unsigned int added = txq->added;
759 unsigned int pushed = added;
760 unsigned int pkts_sent = 0;
761 efx_desc_t *pend = &txq->pend_desc[0];
762 const unsigned int hard_max_fill = txq->max_fill_level;
763 const unsigned int soft_max_fill = hard_max_fill - txq->free_thresh;
764 unsigned int fill_level = added - txq->completed;
767 struct rte_mbuf **pktp;
769 if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) == 0))
773 * If insufficient space for a single packet is present,
774 * we should reap; otherwise, we shouldn't do that all the time
775 * to avoid latency increase
777 reap_done = (fill_level > soft_max_fill);
780 sfc_efx_tx_reap(txq);
782 * Recalculate fill level since 'txq->completed'
783 * might have changed on reap
785 fill_level = added - txq->completed;
788 for (pkts_sent = 0, pktp = &tx_pkts[0];
789 (pkts_sent < nb_pkts) && (fill_level <= soft_max_fill);
790 pkts_sent++, pktp++) {
791 uint16_t hw_vlan_tci_prev = txq->hw_vlan_tci;
792 struct rte_mbuf *m_seg = *pktp;
793 size_t pkt_len = m_seg->pkt_len;
794 unsigned int pkt_descs = 0;
798 * Here VLAN TCI is expected to be zero in case if no
799 * DEV_TX_OFFLOAD_VLAN_INSERT capability is advertised;
800 * if the calling app ignores the absence of
801 * DEV_TX_OFFLOAD_VLAN_INSERT and pushes VLAN TCI, then
802 * TX_ERROR will occur
804 pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
806 if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
808 * We expect correct 'pkt->l[2, 3, 4]_len' values
809 * to be set correctly by the caller
811 if (sfc_efx_tso_do(txq, added, &m_seg, &in_off, &pend,
812 &pkt_descs, &pkt_len) != 0) {
813 /* We may have reached this place if packet
814 * header linearization is needed but the
815 * header length is greater than
818 * We will deceive RTE saying that we have sent
819 * the packet, but we will actually drop it.
820 * Hence, we should revert 'pend' to the
821 * previous state (in case we have added
822 * VLAN descriptor) and start processing
823 * another one packet. But the original
824 * mbuf shouldn't be orphaned
827 txq->hw_vlan_tci = hw_vlan_tci_prev;
829 rte_pktmbuf_free(*pktp);
835 * We've only added 2 FATSOv2 option descriptors
836 * and 1 descriptor for the linearized packet header.
837 * The outstanding work will be done in the same manner
838 * as for the usual non-TSO path
842 for (; m_seg != NULL; m_seg = m_seg->next) {
843 efsys_dma_addr_t next_frag;
846 seg_len = m_seg->data_len;
847 next_frag = rte_mbuf_data_iova(m_seg);
850 * If we've started TSO transaction few steps earlier,
851 * we'll skip packet header using an offset in the
852 * current segment (which has been set to the
853 * first one containing payload)
860 efsys_dma_addr_t frag_addr = next_frag;
864 * It is assumed here that there is no
865 * limitation on address boundary
866 * crossing by DMA descriptor.
868 frag_len = MIN(seg_len, txq->dma_desc_size_max);
869 next_frag += frag_len;
873 efx_tx_qdesc_dma_create(txq->common,
879 } while (seg_len != 0);
884 fill_level += pkt_descs;
885 if (unlikely(fill_level > hard_max_fill)) {
887 * Our estimation for maximum number of descriptors
888 * required to send a packet seems to be wrong.
889 * Try to reap (if we haven't yet).
892 sfc_efx_tx_reap(txq);
894 fill_level = added - txq->completed;
895 if (fill_level > hard_max_fill) {
897 txq->hw_vlan_tci = hw_vlan_tci_prev;
902 txq->hw_vlan_tci = hw_vlan_tci_prev;
907 /* Assign mbuf to the last used desc */
908 txq->sw_ring[(added - 1) & txq->ptr_mask].mbuf = *pktp;
911 if (likely(pkts_sent > 0)) {
912 rc = efx_tx_qdesc_post(txq->common, txq->pend_desc,
913 pend - &txq->pend_desc[0],
914 txq->completed, &txq->added);
917 if (likely(pushed != txq->added))
918 efx_tx_qpush(txq->common, txq->added, pushed);
921 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
923 sfc_efx_tx_reap(txq);
930 const struct sfc_dp_tx *
931 sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq)
933 const struct sfc_dp_queue *dpq = &dp_txq->dpq;
934 struct rte_eth_dev *eth_dev;
935 struct sfc_adapter_priv *sap;
937 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
938 eth_dev = &rte_eth_devices[dpq->port_id];
940 sap = sfc_adapter_priv_by_eth_dev(eth_dev);
945 struct sfc_txq_info *
946 sfc_txq_info_by_dp_txq(const struct sfc_dp_txq *dp_txq)
948 const struct sfc_dp_queue *dpq = &dp_txq->dpq;
949 struct rte_eth_dev *eth_dev;
950 struct sfc_adapter_shared *sas;
952 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
953 eth_dev = &rte_eth_devices[dpq->port_id];
955 sas = sfc_adapter_shared_by_eth_dev(eth_dev);
957 SFC_ASSERT(dpq->queue_id < sas->txq_count);
958 return &sas->txq_info[dpq->queue_id];
962 sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq)
964 const struct sfc_dp_queue *dpq = &dp_txq->dpq;
965 struct rte_eth_dev *eth_dev;
966 struct sfc_adapter *sa;
968 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
969 eth_dev = &rte_eth_devices[dpq->port_id];
971 sa = sfc_adapter_by_eth_dev(eth_dev);
973 SFC_ASSERT(dpq->queue_id < sfc_sa2shared(sa)->txq_count);
974 return &sa->txq_ctrl[dpq->queue_id];
977 static sfc_dp_tx_qsize_up_rings_t sfc_efx_tx_qsize_up_rings;
979 sfc_efx_tx_qsize_up_rings(uint16_t nb_tx_desc,
980 __rte_unused struct sfc_dp_tx_hw_limits *limits,
981 unsigned int *txq_entries,
982 unsigned int *evq_entries,
983 unsigned int *txq_max_fill_level)
985 *txq_entries = nb_tx_desc;
986 *evq_entries = nb_tx_desc;
987 *txq_max_fill_level = EFX_TXQ_LIMIT(*txq_entries);
991 static sfc_dp_tx_qcreate_t sfc_efx_tx_qcreate;
993 sfc_efx_tx_qcreate(uint16_t port_id, uint16_t queue_id,
994 const struct rte_pci_addr *pci_addr,
996 const struct sfc_dp_tx_qcreate_info *info,
997 struct sfc_dp_txq **dp_txqp)
999 struct sfc_efx_txq *txq;
1000 struct sfc_txq *ctrl_txq;
1004 txq = rte_zmalloc_socket("sfc-efx-txq", sizeof(*txq),
1005 RTE_CACHE_LINE_SIZE, socket_id);
1007 goto fail_txq_alloc;
1009 sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
1012 txq->pend_desc = rte_calloc_socket("sfc-efx-txq-pend-desc",
1013 EFX_TXQ_LIMIT(info->txq_entries),
1014 sizeof(*txq->pend_desc), 0,
1016 if (txq->pend_desc == NULL)
1017 goto fail_pend_desc_alloc;
1020 txq->sw_ring = rte_calloc_socket("sfc-efx-txq-sw_ring",
1022 sizeof(*txq->sw_ring),
1023 RTE_CACHE_LINE_SIZE, socket_id);
1024 if (txq->sw_ring == NULL)
1025 goto fail_sw_ring_alloc;
1027 ctrl_txq = sfc_txq_by_dp_txq(&txq->dp);
1028 if (ctrl_txq->evq->sa->tso) {
1029 rc = sfc_efx_tso_alloc_tsoh_objs(txq->sw_ring,
1030 info->txq_entries, socket_id);
1032 goto fail_alloc_tsoh_objs;
1035 txq->evq = ctrl_txq->evq;
1036 txq->ptr_mask = info->txq_entries - 1;
1037 txq->max_fill_level = info->max_fill_level;
1038 txq->free_thresh = info->free_thresh;
1039 txq->dma_desc_size_max = info->dma_desc_size_max;
1041 *dp_txqp = &txq->dp;
1044 fail_alloc_tsoh_objs:
1045 rte_free(txq->sw_ring);
1048 rte_free(txq->pend_desc);
1050 fail_pend_desc_alloc:
1057 static sfc_dp_tx_qdestroy_t sfc_efx_tx_qdestroy;
1059 sfc_efx_tx_qdestroy(struct sfc_dp_txq *dp_txq)
1061 struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
1063 sfc_efx_tso_free_tsoh_objs(txq->sw_ring, txq->ptr_mask + 1);
1064 rte_free(txq->sw_ring);
1065 rte_free(txq->pend_desc);
1069 static sfc_dp_tx_qstart_t sfc_efx_tx_qstart;
1071 sfc_efx_tx_qstart(struct sfc_dp_txq *dp_txq,
1072 __rte_unused unsigned int evq_read_ptr,
1073 unsigned int txq_desc_index)
1075 /* libefx-based datapath is specific to libefx-based PMD */
1076 struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
1077 struct sfc_txq *ctrl_txq = sfc_txq_by_dp_txq(dp_txq);
1079 txq->common = ctrl_txq->common;
1081 txq->pending = txq->completed = txq->added = txq_desc_index;
1082 txq->hw_vlan_tci = 0;
1084 txq->flags |= (SFC_EFX_TXQ_FLAG_STARTED | SFC_EFX_TXQ_FLAG_RUNNING);
1089 static sfc_dp_tx_qstop_t sfc_efx_tx_qstop;
1091 sfc_efx_tx_qstop(struct sfc_dp_txq *dp_txq,
1092 __rte_unused unsigned int *evq_read_ptr)
1094 struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
1096 txq->flags &= ~SFC_EFX_TXQ_FLAG_RUNNING;
1099 static sfc_dp_tx_qreap_t sfc_efx_tx_qreap;
1101 sfc_efx_tx_qreap(struct sfc_dp_txq *dp_txq)
1103 struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
1106 sfc_efx_tx_reap(txq);
1108 for (txds = 0; txds <= txq->ptr_mask; txds++) {
1109 if (txq->sw_ring[txds].mbuf != NULL) {
1110 rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
1111 txq->sw_ring[txds].mbuf = NULL;
1115 txq->flags &= ~SFC_EFX_TXQ_FLAG_STARTED;
1118 static sfc_dp_tx_qdesc_status_t sfc_efx_tx_qdesc_status;
1120 sfc_efx_tx_qdesc_status(struct sfc_dp_txq *dp_txq, uint16_t offset)
1122 struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
1124 if (unlikely(offset > txq->ptr_mask))
1127 if (unlikely(offset >= txq->max_fill_level))
1128 return RTE_ETH_TX_DESC_UNAVAIL;
1131 * Poll EvQ to derive up-to-date 'txq->pending' figure;
1132 * it is required for the queue to be running, but the
1133 * check is omitted because API design assumes that it
1134 * is the duty of the caller to satisfy all conditions
1136 SFC_ASSERT((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) ==
1137 SFC_EFX_TXQ_FLAG_RUNNING);
1138 sfc_ev_qpoll(txq->evq);
1141 * Ring tail is 'txq->pending', and although descriptors
1142 * between 'txq->completed' and 'txq->pending' are still
1143 * in use by the driver, they should be reported as DONE
1145 if (unlikely(offset < (txq->added - txq->pending)))
1146 return RTE_ETH_TX_DESC_FULL;
1149 * There is no separate return value for unused descriptors;
1150 * the latter will be reported as DONE because genuine DONE
1151 * descriptors will be freed anyway in SW on the next burst
1153 return RTE_ETH_TX_DESC_DONE;
1156 struct sfc_dp_tx sfc_efx_tx = {
1158 .name = SFC_KVARG_DATAPATH_EFX,
1160 .hw_fw_caps = SFC_DP_HW_FW_CAP_TX_EFX,
1163 .dev_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1164 DEV_TX_OFFLOAD_MULTI_SEGS,
1165 .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
1166 DEV_TX_OFFLOAD_UDP_CKSUM |
1167 DEV_TX_OFFLOAD_TCP_CKSUM |
1168 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1169 DEV_TX_OFFLOAD_TCP_TSO,
1170 .qsize_up_rings = sfc_efx_tx_qsize_up_rings,
1171 .qcreate = sfc_efx_tx_qcreate,
1172 .qdestroy = sfc_efx_tx_qdestroy,
1173 .qstart = sfc_efx_tx_qstart,
1174 .qstop = sfc_efx_tx_qstop,
1175 .qreap = sfc_efx_tx_qreap,
1176 .qdesc_status = sfc_efx_tx_qdesc_status,
1177 .pkt_prepare = sfc_efx_prepare_pkts,
1178 .pkt_burst = sfc_efx_xmit_pkts,