1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
16 #include "efx_types.h"
18 #include "efx_regs_ef10.h"
20 #include "sfc_dp_tx.h"
21 #include "sfc_tweak.h"
22 #include "sfc_kvargs.h"
25 #define sfc_ef10_tx_err(dpq, ...) \
26 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
28 /** Maximum length of the DMA descriptor data */
29 #define SFC_EF10_TX_DMA_DESC_LEN_MAX \
30 ((1u << ESF_DZ_TX_KER_BYTE_CNT_WIDTH) - 1)
33 * Maximum number of descriptors/buffers in the Tx ring.
34 * It should guarantee that corresponding event queue never overfill.
35 * EF10 native datapath uses event queue of the same size as Tx queue.
36 * Maximum number of events on datapath can be estimated as number of
37 * Tx queue entries (one event per Tx buffer in the worst case) plus
38 * Tx error and flush events.
40 #define SFC_EF10_TXQ_LIMIT(_ndesc) \
41 ((_ndesc) - 1 /* head must not step on tail */ - \
42 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
43 1 /* Rx error */ - 1 /* flush */)
45 struct sfc_ef10_tx_sw_desc {
46 struct rte_mbuf *mbuf;
51 #define SFC_EF10_TXQ_STARTED 0x1
52 #define SFC_EF10_TXQ_NOT_RUNNING 0x2
53 #define SFC_EF10_TXQ_EXCEPTION 0x4
55 unsigned int ptr_mask;
57 unsigned int completed;
58 unsigned int max_fill_level;
59 unsigned int free_thresh;
60 unsigned int evq_read_ptr;
61 struct sfc_ef10_tx_sw_desc *sw_ring;
62 efx_qword_t *txq_hw_ring;
63 volatile void *doorbell;
64 efx_qword_t *evq_hw_ring;
66 /* Datapath transmit queue anchor */
70 static inline struct sfc_ef10_txq *
71 sfc_ef10_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
73 return container_of(dp_txq, struct sfc_ef10_txq, dp);
77 sfc_ef10_tx_get_event(struct sfc_ef10_txq *txq, efx_qword_t *tx_ev)
79 volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring;
82 * Exception flag is set when reap is done.
83 * It is never done twice per packet burst get and absence of
84 * the flag is checked on burst get entry.
86 SFC_ASSERT((txq->flags & SFC_EF10_TXQ_EXCEPTION) == 0);
88 *tx_ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask];
90 if (!sfc_ef10_ev_present(*tx_ev))
93 if (unlikely(EFX_QWORD_FIELD(*tx_ev, FSF_AZ_EV_CODE) !=
94 FSE_AZ_EV_CODE_TX_EV)) {
96 * Do not move read_ptr to keep the event for exception
97 * handling by the control path.
99 txq->flags |= SFC_EF10_TXQ_EXCEPTION;
100 sfc_ef10_tx_err(&txq->dp.dpq,
101 "TxQ exception at EvQ read ptr %#x",
111 sfc_ef10_tx_process_events(struct sfc_ef10_txq *txq)
113 const unsigned int curr_done = txq->completed - 1;
114 unsigned int anew_done = curr_done;
117 while (sfc_ef10_tx_get_event(txq, &tx_ev)) {
119 * DROP_EVENT is an internal to the NIC, software should
120 * never see it and, therefore, may ignore it.
123 /* Update the latest done descriptor */
124 anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
126 return (anew_done - curr_done) & txq->ptr_mask;
130 sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
132 const unsigned int old_read_ptr = txq->evq_read_ptr;
133 const unsigned int ptr_mask = txq->ptr_mask;
134 unsigned int completed = txq->completed;
135 unsigned int pending = completed;
137 pending += sfc_ef10_tx_process_events(txq);
139 if (pending != completed) {
140 struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
144 struct sfc_ef10_tx_sw_desc *txd;
147 txd = &txq->sw_ring[completed & ptr_mask];
148 if (txd->mbuf == NULL)
151 m = rte_pktmbuf_prefree_seg(txd->mbuf);
156 if ((nb == RTE_DIM(bulk)) ||
157 ((nb != 0) && (m->pool != bulk[0]->pool))) {
158 rte_mempool_put_bulk(bulk[0]->pool,
164 } while (++completed != pending);
167 rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
169 txq->completed = completed;
172 sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
177 sfc_ef10_tx_qdesc_dma_create(rte_iova_t addr, uint16_t size, bool eop,
180 EFX_POPULATE_QWORD_4(*edp,
181 ESF_DZ_TX_KER_TYPE, 0,
182 ESF_DZ_TX_KER_CONT, !eop,
183 ESF_DZ_TX_KER_BYTE_CNT, size,
184 ESF_DZ_TX_KER_BUF_ADDR, addr);
188 sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added,
195 * This improves performance by pushing a TX descriptor at the same
196 * time as the doorbell. The descriptor must be added to the TXQ,
197 * so that can be used if the hardware decides not to use the pushed
200 desc.eq_u64[0] = txq->txq_hw_ring[pushed & txq->ptr_mask].eq_u64[0];
201 EFX_POPULATE_OWORD_3(oword,
202 ERF_DZ_TX_DESC_WPTR, added & txq->ptr_mask,
203 ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
204 ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
206 /* DMA sync to device is not required */
209 * rte_io_wmb() which guarantees that the STORE operations
210 * (i.e. Tx and event descriptor updates) that precede
211 * the rte_io_wmb() call are visible to NIC before the STORE
212 * operations that follow it (i.e. doorbell write).
216 *(volatile __m128i *)txq->doorbell = oword.eo_u128[0];
220 sfc_ef10_tx_pkt_descs_max(const struct rte_mbuf *m)
222 unsigned int extra_descs_per_seg;
223 unsigned int extra_descs_per_pkt;
226 * VLAN offload is not supported yet, so no extra descriptors
227 * are required for VLAN option descriptor.
230 /** Maximum length of the mbuf segment data */
231 #define SFC_MBUF_SEG_LEN_MAX UINT16_MAX
232 RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);
235 * Each segment is already counted once below. So, calculate
236 * how many extra DMA descriptors may be required per segment in
237 * the worst case because of maximum DMA descriptor length limit.
238 * If maximum segment length is less or equal to maximum DMA
239 * descriptor length, no extra DMA descriptors are required.
241 extra_descs_per_seg =
242 (SFC_MBUF_SEG_LEN_MAX - 1) / SFC_EF10_TX_DMA_DESC_LEN_MAX;
244 /** Maximum length of the packet */
245 #define SFC_MBUF_PKT_LEN_MAX UINT32_MAX
246 RTE_BUILD_BUG_ON(sizeof(m->pkt_len) != 4);
249 * One more limitation on maximum number of extra DMA descriptors
250 * comes from slicing entire packet because of DMA descriptor length
251 * limit taking into account that there is at least one segment
252 * which is already counted below (so division of the maximum
253 * packet length minus one with round down).
254 * TSO is not supported yet, so packet length is limited by
257 extra_descs_per_pkt =
258 (RTE_MIN((unsigned int)EFX_MAC_PDU_MAX,
259 SFC_MBUF_PKT_LEN_MAX) - 1) /
260 SFC_EF10_TX_DMA_DESC_LEN_MAX;
262 return m->nb_segs + RTE_MIN(m->nb_segs * extra_descs_per_seg,
263 extra_descs_per_pkt);
267 sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
269 struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
271 unsigned int dma_desc_space;
273 struct rte_mbuf **pktp;
274 struct rte_mbuf **pktp_end;
276 if (unlikely(txq->flags &
277 (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
281 dma_desc_space = txq->max_fill_level - (added - txq->completed);
283 reap_done = (dma_desc_space < txq->free_thresh);
285 sfc_ef10_tx_reap(txq);
286 dma_desc_space = txq->max_fill_level - (added - txq->completed);
289 for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
292 struct rte_mbuf *m_seg = *pktp;
293 unsigned int pkt_start = added;
296 if (likely(pktp + 1 != pktp_end))
297 rte_mbuf_prefetch_part1(pktp[1]);
299 if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space) {
303 /* Push already prepared descriptors before polling */
304 if (added != txq->added) {
305 sfc_ef10_tx_qpush(txq, added, txq->added);
309 sfc_ef10_tx_reap(txq);
311 dma_desc_space = txq->max_fill_level -
312 (added - txq->completed);
313 if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space)
317 pkt_len = m_seg->pkt_len;
319 rte_iova_t seg_addr = rte_mbuf_data_iova(m_seg);
320 unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
321 unsigned int id = added & txq->ptr_mask;
323 SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX);
327 sfc_ef10_tx_qdesc_dma_create(seg_addr,
328 seg_len, (pkt_len == 0),
329 &txq->txq_hw_ring[id]);
332 * rte_pktmbuf_free() is commonly used in DPDK for
333 * recycling packets - the function checks every
334 * segment's reference counter and returns the
335 * buffer to its pool whenever possible;
336 * nevertheless, freeing mbuf segments one by one
337 * may entail some performance decline;
338 * from this point, sfc_efx_tx_reap() does the same job
339 * on its own and frees buffers in bulks (all mbufs
340 * within a bulk belong to the same pool);
341 * from this perspective, individual segment pointers
342 * must be associated with the corresponding SW
343 * descriptors independently so that only one loop
344 * is sufficient on reap to inspect all the buffers
346 txq->sw_ring[id].mbuf = m_seg;
350 } while ((m_seg = m_seg->next) != 0);
352 dma_desc_space -= (added - pkt_start);
355 if (likely(added != txq->added)) {
356 sfc_ef10_tx_qpush(txq, added, txq->added);
360 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
362 sfc_ef10_tx_reap(txq);
365 return pktp - &tx_pkts[0];
369 sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq)
371 const unsigned int old_read_ptr = txq->evq_read_ptr;
372 const unsigned int ptr_mask = txq->ptr_mask;
373 unsigned int completed = txq->completed;
374 unsigned int pending = completed;
376 pending += sfc_ef10_tx_process_events(txq);
378 if (pending != completed) {
379 struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
383 struct sfc_ef10_tx_sw_desc *txd;
385 txd = &txq->sw_ring[completed & ptr_mask];
387 if (nb == RTE_DIM(bulk)) {
388 rte_mempool_put_bulk(bulk[0]->pool,
393 bulk[nb++] = txd->mbuf;
394 } while (++completed != pending);
396 rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
398 txq->completed = completed;
401 sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
407 sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
410 struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
411 unsigned int ptr_mask;
413 unsigned int dma_desc_space;
415 struct rte_mbuf **pktp;
416 struct rte_mbuf **pktp_end;
418 if (unlikely(txq->flags &
419 (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
422 ptr_mask = txq->ptr_mask;
424 dma_desc_space = txq->max_fill_level - (added - txq->completed);
426 reap_done = (dma_desc_space < RTE_MAX(txq->free_thresh, nb_pkts));
428 sfc_ef10_simple_tx_reap(txq);
429 dma_desc_space = txq->max_fill_level - (added - txq->completed);
432 pktp_end = &tx_pkts[MIN(nb_pkts, dma_desc_space)];
433 for (pktp = &tx_pkts[0]; pktp != pktp_end; ++pktp) {
434 struct rte_mbuf *pkt = *pktp;
435 unsigned int id = added & ptr_mask;
437 SFC_ASSERT(rte_pktmbuf_data_len(pkt) <=
438 SFC_EF10_TX_DMA_DESC_LEN_MAX);
440 sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_iova(pkt),
441 rte_pktmbuf_data_len(pkt),
442 true, &txq->txq_hw_ring[id]);
444 txq->sw_ring[id].mbuf = pkt;
449 if (likely(added != txq->added)) {
450 sfc_ef10_tx_qpush(txq, added, txq->added);
454 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
456 sfc_ef10_simple_tx_reap(txq);
459 return pktp - &tx_pkts[0];
462 static sfc_dp_tx_get_dev_info_t sfc_ef10_get_dev_info;
464 sfc_ef10_get_dev_info(struct rte_eth_dev_info *dev_info)
467 * Number of descriptors just defines maximum number of pushed
468 * descriptors (fill level).
470 dev_info->tx_desc_lim.nb_min = 1;
471 dev_info->tx_desc_lim.nb_align = 1;
474 static sfc_dp_tx_qsize_up_rings_t sfc_ef10_tx_qsize_up_rings;
476 sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc,
477 unsigned int *txq_entries,
478 unsigned int *evq_entries,
479 unsigned int *txq_max_fill_level)
482 * rte_ethdev API guarantees that the number meets min, max and
483 * alignment requirements.
485 if (nb_tx_desc <= EFX_TXQ_MINNDESCS)
486 *txq_entries = EFX_TXQ_MINNDESCS;
488 *txq_entries = rte_align32pow2(nb_tx_desc);
490 *evq_entries = *txq_entries;
492 *txq_max_fill_level = RTE_MIN(nb_tx_desc,
493 SFC_EF10_TXQ_LIMIT(*evq_entries));
497 static sfc_dp_tx_qcreate_t sfc_ef10_tx_qcreate;
499 sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
500 const struct rte_pci_addr *pci_addr, int socket_id,
501 const struct sfc_dp_tx_qcreate_info *info,
502 struct sfc_dp_txq **dp_txqp)
504 struct sfc_ef10_txq *txq;
508 if (info->txq_entries != info->evq_entries)
512 txq = rte_zmalloc_socket("sfc-ef10-txq", sizeof(*txq),
513 RTE_CACHE_LINE_SIZE, socket_id);
517 sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
520 txq->sw_ring = rte_calloc_socket("sfc-ef10-txq-sw_ring",
522 sizeof(*txq->sw_ring),
523 RTE_CACHE_LINE_SIZE, socket_id);
524 if (txq->sw_ring == NULL)
525 goto fail_sw_ring_alloc;
527 txq->flags = SFC_EF10_TXQ_NOT_RUNNING;
528 txq->ptr_mask = info->txq_entries - 1;
529 txq->max_fill_level = info->max_fill_level;
530 txq->free_thresh = info->free_thresh;
531 txq->txq_hw_ring = info->txq_hw_ring;
532 txq->doorbell = (volatile uint8_t *)info->mem_bar +
533 ER_DZ_TX_DESC_UPD_REG_OFST +
534 info->hw_index * ER_DZ_TX_DESC_UPD_REG_STEP;
535 txq->evq_hw_ring = info->evq_hw_ring;
548 static sfc_dp_tx_qdestroy_t sfc_ef10_tx_qdestroy;
550 sfc_ef10_tx_qdestroy(struct sfc_dp_txq *dp_txq)
552 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
554 rte_free(txq->sw_ring);
558 static sfc_dp_tx_qstart_t sfc_ef10_tx_qstart;
560 sfc_ef10_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
561 unsigned int txq_desc_index)
563 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
565 txq->evq_read_ptr = evq_read_ptr;
566 txq->added = txq->completed = txq_desc_index;
568 txq->flags |= SFC_EF10_TXQ_STARTED;
569 txq->flags &= ~(SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION);
574 static sfc_dp_tx_qstop_t sfc_ef10_tx_qstop;
576 sfc_ef10_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
578 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
580 txq->flags |= SFC_EF10_TXQ_NOT_RUNNING;
582 *evq_read_ptr = txq->evq_read_ptr;
585 static sfc_dp_tx_qtx_ev_t sfc_ef10_tx_qtx_ev;
587 sfc_ef10_tx_qtx_ev(struct sfc_dp_txq *dp_txq, __rte_unused unsigned int id)
589 __rte_unused struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
591 SFC_ASSERT(txq->flags & SFC_EF10_TXQ_NOT_RUNNING);
594 * It is safe to ignore Tx event since we reap all mbufs on
595 * queue purge anyway.
601 static sfc_dp_tx_qreap_t sfc_ef10_tx_qreap;
603 sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
605 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
606 unsigned int completed;
608 for (completed = txq->completed; completed != txq->added; ++completed) {
609 struct sfc_ef10_tx_sw_desc *txd;
611 txd = &txq->sw_ring[completed & txq->ptr_mask];
612 if (txd->mbuf != NULL) {
613 rte_pktmbuf_free_seg(txd->mbuf);
618 txq->flags &= ~SFC_EF10_TXQ_STARTED;
621 static sfc_dp_tx_qdesc_status_t sfc_ef10_tx_qdesc_status;
623 sfc_ef10_tx_qdesc_status(__rte_unused struct sfc_dp_txq *dp_txq,
624 __rte_unused uint16_t offset)
629 struct sfc_dp_tx sfc_ef10_tx = {
631 .name = SFC_KVARG_DATAPATH_EF10,
633 .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
635 .features = SFC_DP_TX_FEAT_MULTI_SEG |
636 SFC_DP_TX_FEAT_MULTI_POOL |
637 SFC_DP_TX_FEAT_REFCNT |
638 SFC_DP_TX_FEAT_MULTI_PROCESS,
639 .get_dev_info = sfc_ef10_get_dev_info,
640 .qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
641 .qcreate = sfc_ef10_tx_qcreate,
642 .qdestroy = sfc_ef10_tx_qdestroy,
643 .qstart = sfc_ef10_tx_qstart,
644 .qtx_ev = sfc_ef10_tx_qtx_ev,
645 .qstop = sfc_ef10_tx_qstop,
646 .qreap = sfc_ef10_tx_qreap,
647 .qdesc_status = sfc_ef10_tx_qdesc_status,
648 .pkt_burst = sfc_ef10_xmit_pkts,
651 struct sfc_dp_tx sfc_ef10_simple_tx = {
653 .name = SFC_KVARG_DATAPATH_EF10_SIMPLE,
656 .features = SFC_DP_TX_FEAT_MULTI_PROCESS,
657 .get_dev_info = sfc_ef10_get_dev_info,
658 .qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
659 .qcreate = sfc_ef10_tx_qcreate,
660 .qdestroy = sfc_ef10_tx_qdestroy,
661 .qstart = sfc_ef10_tx_qstart,
662 .qtx_ev = sfc_ef10_tx_qtx_ev,
663 .qstop = sfc_ef10_tx_qstop,
664 .qreap = sfc_ef10_tx_qreap,
665 .qdesc_status = sfc_ef10_tx_qdesc_status,
666 .pkt_burst = sfc_ef10_simple_xmit_pkts,