1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2018-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
16 #include "efx_types.h"
18 #include "efx_regs_ef100.h"
20 #include "sfc_debug.h"
21 #include "sfc_dp_tx.h"
22 #include "sfc_tweak.h"
23 #include "sfc_kvargs.h"
24 #include "sfc_ef100.h"
27 #define sfc_ef100_tx_err(_txq, ...) \
28 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, ERR, &(_txq)->dp.dpq, __VA_ARGS__)
30 #define sfc_ef100_tx_debug(_txq, ...) \
31 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, DEBUG, &(_txq)->dp.dpq, \
35 /** Maximum length of the send descriptor data */
36 #define SFC_EF100_TX_SEND_DESC_LEN_MAX \
37 ((1u << ESF_GZ_TX_SEND_LEN_WIDTH) - 1)
40 * Maximum number of descriptors/buffers in the Tx ring.
41 * It should guarantee that corresponding event queue never overfill.
42 * EF100 native datapath uses event queue of the same size as Tx queue.
43 * Maximum number of events on datapath can be estimated as number of
44 * Tx queue entries (one event per Tx buffer in the worst case) plus
45 * Tx error and flush events.
47 #define SFC_EF100_TXQ_LIMIT(_ndesc) \
48 ((_ndesc) - 1 /* head must not step on tail */ - \
49 1 /* Rx error */ - 1 /* flush */)
51 struct sfc_ef100_tx_sw_desc {
52 struct rte_mbuf *mbuf;
55 struct sfc_ef100_txq {
57 #define SFC_EF100_TXQ_STARTED 0x1
58 #define SFC_EF100_TXQ_NOT_RUNNING 0x2
59 #define SFC_EF100_TXQ_EXCEPTION 0x4
61 unsigned int ptr_mask;
63 unsigned int completed;
64 unsigned int max_fill_level;
65 unsigned int free_thresh;
66 struct sfc_ef100_tx_sw_desc *sw_ring;
67 efx_oword_t *txq_hw_ring;
68 volatile void *doorbell;
71 unsigned int evq_read_ptr;
72 unsigned int evq_phase_bit_shift;
73 volatile efx_qword_t *evq_hw_ring;
75 /* Datapath transmit queue anchor */
79 static inline struct sfc_ef100_txq *
80 sfc_ef100_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
82 return container_of(dp_txq, struct sfc_ef100_txq, dp);
86 sfc_ef100_tx_get_event(struct sfc_ef100_txq *txq, efx_qword_t *ev)
88 volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring;
91 * Exception flag is set when reap is done.
92 * It is never done twice per packet burst get, and absence of
93 * the flag is checked on burst get entry.
95 SFC_ASSERT((txq->flags & SFC_EF100_TXQ_EXCEPTION) == 0);
97 *ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask];
99 if (!sfc_ef100_ev_present(ev,
100 (txq->evq_read_ptr >> txq->evq_phase_bit_shift) & 1))
103 if (unlikely(!sfc_ef100_ev_type_is(ev,
104 ESE_GZ_EF100_EV_TX_COMPLETION))) {
106 * Do not move read_ptr to keep the event for exception
107 * handling by the control path.
109 txq->flags |= SFC_EF100_TXQ_EXCEPTION;
110 sfc_ef100_tx_err(txq,
111 "TxQ exception at EvQ ptr %u(%#x), event %08x:%08x",
112 txq->evq_read_ptr, txq->evq_read_ptr & txq->ptr_mask,
113 EFX_QWORD_FIELD(*ev, EFX_DWORD_1),
114 EFX_QWORD_FIELD(*ev, EFX_DWORD_0));
118 sfc_ef100_tx_debug(txq, "TxQ got event %08x:%08x at %u (%#x)",
119 EFX_QWORD_FIELD(*ev, EFX_DWORD_1),
120 EFX_QWORD_FIELD(*ev, EFX_DWORD_0),
122 txq->evq_read_ptr & txq->ptr_mask);
129 sfc_ef100_tx_process_events(struct sfc_ef100_txq *txq)
131 unsigned int num_descs = 0;
134 while (sfc_ef100_tx_get_event(txq, &tx_ev))
135 num_descs += EFX_QWORD_FIELD(tx_ev, ESF_GZ_EV_TXCMPL_NUM_DESC);
141 sfc_ef100_tx_reap_num_descs(struct sfc_ef100_txq *txq, unsigned int num_descs)
144 unsigned int completed = txq->completed;
145 unsigned int pending = completed + num_descs;
146 struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
150 struct sfc_ef100_tx_sw_desc *txd;
153 txd = &txq->sw_ring[completed & txq->ptr_mask];
154 if (txd->mbuf == NULL)
157 m = rte_pktmbuf_prefree_seg(txd->mbuf);
163 if (nb == RTE_DIM(bulk) ||
164 (nb != 0 && m->pool != bulk[0]->pool)) {
165 rte_mempool_put_bulk(bulk[0]->pool,
171 } while (++completed != pending);
174 rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
176 txq->completed = completed;
181 sfc_ef100_tx_reap(struct sfc_ef100_txq *txq)
183 sfc_ef100_tx_reap_num_descs(txq, sfc_ef100_tx_process_events(txq));
187 sfc_ef100_tx_qdesc_send_create(const struct rte_mbuf *m, efx_oword_t *tx_desc)
189 EFX_POPULATE_OWORD_4(*tx_desc,
190 ESF_GZ_TX_SEND_ADDR, rte_mbuf_data_iova(m),
191 ESF_GZ_TX_SEND_LEN, rte_pktmbuf_data_len(m),
192 ESF_GZ_TX_SEND_NUM_SEGS, 1,
193 ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEND);
197 sfc_ef100_tx_qpush(struct sfc_ef100_txq *txq, unsigned int added)
201 EFX_POPULATE_DWORD_1(dword, ERF_GZ_TX_RING_PIDX, added & txq->ptr_mask);
203 /* DMA sync to device is not required */
206 * rte_write32() has rte_io_wmb() which guarantees that the STORE
207 * operations (i.e. Rx and event descriptor updates) that precede
208 * the rte_io_wmb() call are visible to NIC before the STORE
209 * operations that follow it (i.e. doorbell write).
211 rte_write32(dword.ed_u32[0], txq->doorbell);
213 sfc_ef100_tx_debug(txq, "TxQ pushed doorbell at pidx %u (added=%u)",
214 EFX_DWORD_FIELD(dword, ERF_GZ_TX_RING_PIDX),
219 sfc_ef100_tx_pkt_descs_max(const struct rte_mbuf *m)
221 /** Maximum length of an mbuf segment data */
222 #define SFC_MBUF_SEG_LEN_MAX UINT16_MAX
223 RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);
226 * mbuf segment cannot be bigger than maximum segment length and
227 * maximum packet length since TSO is not supported yet.
228 * Make sure that the first segment does not need fragmentation
229 * (split into many Tx descriptors).
231 RTE_BUILD_BUG_ON(SFC_EF100_TX_SEND_DESC_LEN_MAX <
232 RTE_MIN((unsigned int)EFX_MAC_PDU_MAX, SFC_MBUF_SEG_LEN_MAX));
234 SFC_ASSERT(m->nb_segs == 1);
239 sfc_ef100_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
241 struct sfc_ef100_txq * const txq = sfc_ef100_txq_by_dp_txq(tx_queue);
243 unsigned int dma_desc_space;
245 struct rte_mbuf **pktp;
246 struct rte_mbuf **pktp_end;
248 if (unlikely(txq->flags &
249 (SFC_EF100_TXQ_NOT_RUNNING | SFC_EF100_TXQ_EXCEPTION)))
253 dma_desc_space = txq->max_fill_level - (added - txq->completed);
255 reap_done = (dma_desc_space < txq->free_thresh);
257 sfc_ef100_tx_reap(txq);
258 dma_desc_space = txq->max_fill_level - (added - txq->completed);
261 for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
264 struct rte_mbuf *m_seg = *pktp;
265 unsigned int pkt_start = added;
268 if (likely(pktp + 1 != pktp_end))
269 rte_mbuf_prefetch_part1(pktp[1]);
271 if (sfc_ef100_tx_pkt_descs_max(m_seg) > dma_desc_space) {
275 /* Push already prepared descriptors before polling */
276 if (added != txq->added) {
277 sfc_ef100_tx_qpush(txq, added);
281 sfc_ef100_tx_reap(txq);
283 dma_desc_space = txq->max_fill_level -
284 (added - txq->completed);
285 if (sfc_ef100_tx_pkt_descs_max(m_seg) > dma_desc_space)
289 id = added++ & txq->ptr_mask;
290 sfc_ef100_tx_qdesc_send_create(m_seg, &txq->txq_hw_ring[id]);
293 * rte_pktmbuf_free() is commonly used in DPDK for
294 * recycling packets - the function checks every
295 * segment's reference counter and returns the
296 * buffer to its pool whenever possible;
297 * nevertheless, freeing mbuf segments one by one
298 * may entail some performance decline;
299 * from this point, sfc_efx_tx_reap() does the same job
300 * on its own and frees buffers in bulks (all mbufs
301 * within a bulk belong to the same pool);
302 * from this perspective, individual segment pointers
303 * must be associated with the corresponding SW
304 * descriptors independently so that only one loop
305 * is sufficient on reap to inspect all the buffers
307 txq->sw_ring[id].mbuf = m_seg;
309 dma_desc_space -= (added - pkt_start);
312 if (likely(added != txq->added)) {
313 sfc_ef100_tx_qpush(txq, added);
317 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
319 sfc_ef100_tx_reap(txq);
322 return pktp - &tx_pkts[0];
325 static sfc_dp_tx_get_dev_info_t sfc_ef100_get_dev_info;
327 sfc_ef100_get_dev_info(struct rte_eth_dev_info *dev_info)
330 * Number of descriptors just defines maximum number of pushed
331 * descriptors (fill level).
333 dev_info->tx_desc_lim.nb_min = 1;
334 dev_info->tx_desc_lim.nb_align = 1;
337 static sfc_dp_tx_qsize_up_rings_t sfc_ef100_tx_qsize_up_rings;
339 sfc_ef100_tx_qsize_up_rings(uint16_t nb_tx_desc,
340 struct sfc_dp_tx_hw_limits *limits,
341 unsigned int *txq_entries,
342 unsigned int *evq_entries,
343 unsigned int *txq_max_fill_level)
346 * rte_ethdev API guarantees that the number meets min, max and
347 * alignment requirements.
349 if (nb_tx_desc <= limits->txq_min_entries)
350 *txq_entries = limits->txq_min_entries;
352 *txq_entries = rte_align32pow2(nb_tx_desc);
354 *evq_entries = *txq_entries;
356 *txq_max_fill_level = RTE_MIN(nb_tx_desc,
357 SFC_EF100_TXQ_LIMIT(*evq_entries));
361 static sfc_dp_tx_qcreate_t sfc_ef100_tx_qcreate;
363 sfc_ef100_tx_qcreate(uint16_t port_id, uint16_t queue_id,
364 const struct rte_pci_addr *pci_addr, int socket_id,
365 const struct sfc_dp_tx_qcreate_info *info,
366 struct sfc_dp_txq **dp_txqp)
368 struct sfc_ef100_txq *txq;
372 if (info->txq_entries != info->evq_entries)
376 txq = rte_zmalloc_socket("sfc-ef100-txq", sizeof(*txq),
377 RTE_CACHE_LINE_SIZE, socket_id);
381 sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
384 txq->sw_ring = rte_calloc_socket("sfc-ef100-txq-sw_ring",
386 sizeof(*txq->sw_ring),
387 RTE_CACHE_LINE_SIZE, socket_id);
388 if (txq->sw_ring == NULL)
389 goto fail_sw_ring_alloc;
391 txq->flags = SFC_EF100_TXQ_NOT_RUNNING;
392 txq->ptr_mask = info->txq_entries - 1;
393 txq->max_fill_level = info->max_fill_level;
394 txq->free_thresh = info->free_thresh;
395 txq->evq_phase_bit_shift = rte_bsf32(info->evq_entries);
396 txq->txq_hw_ring = info->txq_hw_ring;
397 txq->doorbell = (volatile uint8_t *)info->mem_bar +
398 ER_GZ_TX_RING_DOORBELL_OFST +
399 (info->hw_index << info->vi_window_shift);
400 txq->evq_hw_ring = info->evq_hw_ring;
402 sfc_ef100_tx_debug(txq, "TxQ doorbell is %p", txq->doorbell);
415 static sfc_dp_tx_qdestroy_t sfc_ef100_tx_qdestroy;
417 sfc_ef100_tx_qdestroy(struct sfc_dp_txq *dp_txq)
419 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
421 rte_free(txq->sw_ring);
425 static sfc_dp_tx_qstart_t sfc_ef100_tx_qstart;
427 sfc_ef100_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
428 unsigned int txq_desc_index)
430 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
432 txq->evq_read_ptr = evq_read_ptr;
433 txq->added = txq->completed = txq_desc_index;
435 txq->flags |= SFC_EF100_TXQ_STARTED;
436 txq->flags &= ~(SFC_EF100_TXQ_NOT_RUNNING | SFC_EF100_TXQ_EXCEPTION);
441 static sfc_dp_tx_qstop_t sfc_ef100_tx_qstop;
443 sfc_ef100_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
445 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
447 txq->flags |= SFC_EF100_TXQ_NOT_RUNNING;
449 *evq_read_ptr = txq->evq_read_ptr;
452 static sfc_dp_tx_qtx_ev_t sfc_ef100_tx_qtx_ev;
454 sfc_ef100_tx_qtx_ev(struct sfc_dp_txq *dp_txq, unsigned int num_descs)
456 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
458 SFC_ASSERT(txq->flags & SFC_EF100_TXQ_NOT_RUNNING);
460 sfc_ef100_tx_reap_num_descs(txq, num_descs);
465 static sfc_dp_tx_qreap_t sfc_ef100_tx_qreap;
467 sfc_ef100_tx_qreap(struct sfc_dp_txq *dp_txq)
469 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
470 unsigned int completed;
472 for (completed = txq->completed; completed != txq->added; ++completed) {
473 struct sfc_ef100_tx_sw_desc *txd;
475 txd = &txq->sw_ring[completed & txq->ptr_mask];
476 if (txd->mbuf != NULL) {
477 rte_pktmbuf_free_seg(txd->mbuf);
482 txq->flags &= ~SFC_EF100_TXQ_STARTED;
486 sfc_ef100_tx_qdesc_npending(struct sfc_ef100_txq *txq)
488 const unsigned int evq_old_read_ptr = txq->evq_read_ptr;
489 unsigned int npending = 0;
492 if (unlikely(txq->flags &
493 (SFC_EF100_TXQ_NOT_RUNNING | SFC_EF100_TXQ_EXCEPTION)))
496 while (sfc_ef100_tx_get_event(txq, &tx_ev))
497 npending += EFX_QWORD_FIELD(tx_ev, ESF_GZ_EV_TXCMPL_NUM_DESC);
500 * The function does not process events, so return event queue read
501 * pointer to the original position to allow the events that were
502 * read to be processed later
504 txq->evq_read_ptr = evq_old_read_ptr;
509 static sfc_dp_tx_qdesc_status_t sfc_ef100_tx_qdesc_status;
511 sfc_ef100_tx_qdesc_status(struct sfc_dp_txq *dp_txq, uint16_t offset)
513 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
514 unsigned int pushed = txq->added - txq->completed;
516 if (unlikely(offset > txq->ptr_mask))
519 if (unlikely(offset >= txq->max_fill_level))
520 return RTE_ETH_TX_DESC_UNAVAIL;
522 return (offset >= pushed ||
523 offset < sfc_ef100_tx_qdesc_npending(txq)) ?
524 RTE_ETH_TX_DESC_DONE : RTE_ETH_TX_DESC_FULL;
527 struct sfc_dp_tx sfc_ef100_tx = {
529 .name = SFC_KVARG_DATAPATH_EF100,
531 .hw_fw_caps = SFC_DP_HW_FW_CAP_EF100,
533 .features = SFC_DP_TX_FEAT_MULTI_PROCESS,
534 .dev_offload_capa = 0,
535 .queue_offload_capa = 0,
536 .get_dev_info = sfc_ef100_get_dev_info,
537 .qsize_up_rings = sfc_ef100_tx_qsize_up_rings,
538 .qcreate = sfc_ef100_tx_qcreate,
539 .qdestroy = sfc_ef100_tx_qdestroy,
540 .qstart = sfc_ef100_tx_qstart,
541 .qtx_ev = sfc_ef100_tx_qtx_ev,
542 .qstop = sfc_ef100_tx_qstop,
543 .qreap = sfc_ef100_tx_qreap,
544 .qdesc_status = sfc_ef100_tx_qdesc_status,
545 .pkt_burst = sfc_ef100_xmit_pkts,