4 * Copyright (c) 2016 Solarflare Communications Inc.
7 * This software was jointly developed between OKTET Labs (under contract
8 * for Solarflare) and Solarflare Communications, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include "efx_types.h"
40 #include "efx_regs_ef10.h"
42 #include "sfc_dp_tx.h"
43 #include "sfc_tweak.h"
44 #include "sfc_kvargs.h"
47 #define sfc_ef10_tx_err(dpq, ...) \
48 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
50 /** Maximum length of the mbuf segment data */
51 #define SFC_MBUF_SEG_LEN_MAX \
52 ((1u << (8 * sizeof(((struct rte_mbuf *)0)->data_len))) - 1)
54 /** Maximum length of the DMA descriptor data */
55 #define SFC_EF10_TX_DMA_DESC_LEN_MAX \
56 ((1u << ESF_DZ_TX_KER_BYTE_CNT_WIDTH) - 1)
58 /** Maximum number of DMA descriptors per mbuf segment */
59 #define SFC_EF10_TX_MBUF_SEG_DESCS_MAX \
60 SFC_DIV_ROUND_UP(SFC_MBUF_SEG_LEN_MAX, \
61 SFC_EF10_TX_DMA_DESC_LEN_MAX)
64 * Maximum number of descriptors/buffers in the Tx ring.
65 * It should guarantee that corresponding event queue never overfill.
66 * EF10 native datapath uses event queue of the same size as Tx queue.
67 * Maximum number of events on datapath can be estimated as number of
68 * Tx queue entries (one event per Tx buffer in the worst case) plus
69 * Tx error and flush events.
71 #define SFC_EF10_TXQ_LIMIT(_ndesc) \
72 ((_ndesc) - 1 /* head must not step on tail */ - \
73 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
74 1 /* Rx error */ - 1 /* flush */)
76 struct sfc_ef10_tx_sw_desc {
77 struct rte_mbuf *mbuf;
82 #define SFC_EF10_TXQ_STARTED 0x1
83 #define SFC_EF10_TXQ_NOT_RUNNING 0x2
84 #define SFC_EF10_TXQ_EXCEPTION 0x4
86 unsigned int ptr_mask;
88 unsigned int completed;
89 unsigned int free_thresh;
90 unsigned int evq_read_ptr;
91 struct sfc_ef10_tx_sw_desc *sw_ring;
92 efx_qword_t *txq_hw_ring;
93 volatile void *doorbell;
94 efx_qword_t *evq_hw_ring;
96 /* Datapath transmit queue anchor */
100 static inline struct sfc_ef10_txq *
101 sfc_ef10_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
103 return container_of(dp_txq, struct sfc_ef10_txq, dp);
107 sfc_ef10_tx_get_event(struct sfc_ef10_txq *txq, efx_qword_t *tx_ev)
109 volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring;
112 * Exception flag is set when reap is done.
113 * It is never done twice per packet burst get and absence of
114 * the flag is checked on burst get entry.
116 SFC_ASSERT((txq->flags & SFC_EF10_TXQ_EXCEPTION) == 0);
118 *tx_ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask];
120 if (!sfc_ef10_ev_present(*tx_ev))
123 if (unlikely(EFX_QWORD_FIELD(*tx_ev, FSF_AZ_EV_CODE) !=
124 FSE_AZ_EV_CODE_TX_EV)) {
126 * Do not move read_ptr to keep the event for exception
127 * handling by the control path.
129 txq->flags |= SFC_EF10_TXQ_EXCEPTION;
130 sfc_ef10_tx_err(&txq->dp.dpq,
131 "TxQ exception at EvQ read ptr %#x",
141 sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
143 const unsigned int old_read_ptr = txq->evq_read_ptr;
144 const unsigned int ptr_mask = txq->ptr_mask;
145 unsigned int completed = txq->completed;
146 unsigned int pending = completed;
147 const unsigned int curr_done = pending - 1;
148 unsigned int anew_done = curr_done;
151 while (sfc_ef10_tx_get_event(txq, &tx_ev)) {
153 * DROP_EVENT is an internal to the NIC, software should
154 * never see it and, therefore, may ignore it.
157 /* Update the latest done descriptor */
158 anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
160 pending += (anew_done - curr_done) & ptr_mask;
162 if (pending != completed) {
164 struct sfc_ef10_tx_sw_desc *txd;
166 txd = &txq->sw_ring[completed & ptr_mask];
168 if (txd->mbuf != NULL) {
169 rte_pktmbuf_free(txd->mbuf);
172 } while (++completed != pending);
174 txq->completed = completed;
177 sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
182 sfc_ef10_tx_qdesc_dma_create(phys_addr_t addr, uint16_t size, bool eop,
185 EFX_POPULATE_QWORD_4(*edp,
186 ESF_DZ_TX_KER_TYPE, 0,
187 ESF_DZ_TX_KER_CONT, !eop,
188 ESF_DZ_TX_KER_BYTE_CNT, size,
189 ESF_DZ_TX_KER_BUF_ADDR, addr);
193 sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added,
200 * This improves performance by pushing a TX descriptor at the same
201 * time as the doorbell. The descriptor must be added to the TXQ,
202 * so that can be used if the hardware decides not to use the pushed
205 desc.eq_u64[0] = txq->txq_hw_ring[pushed & txq->ptr_mask].eq_u64[0];
206 EFX_POPULATE_OWORD_3(oword,
207 ERF_DZ_TX_DESC_WPTR, added & txq->ptr_mask,
208 ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
209 ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
211 /* DMA sync to device is not required */
214 * rte_io_wmb() which guarantees that the STORE operations
215 * (i.e. Tx and event descriptor updates) that precede
216 * the rte_io_wmb() call are visible to NIC before the STORE
217 * operations that follow it (i.e. doorbell write).
221 *(volatile __m128i *)txq->doorbell = oword.eo_u128[0];
225 sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
227 struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
228 unsigned int ptr_mask;
230 unsigned int dma_desc_space;
232 struct rte_mbuf **pktp;
233 struct rte_mbuf **pktp_end;
235 if (unlikely(txq->flags &
236 (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
239 ptr_mask = txq->ptr_mask;
241 dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
242 (added - txq->completed);
244 reap_done = (dma_desc_space < txq->free_thresh);
246 sfc_ef10_tx_reap(txq);
247 dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
248 (added - txq->completed);
251 for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
254 struct rte_mbuf *m_seg = *pktp;
255 unsigned int pkt_start = added;
258 if (likely(pktp + 1 != pktp_end))
259 rte_mbuf_prefetch_part1(pktp[1]);
261 if (m_seg->nb_segs * SFC_EF10_TX_MBUF_SEG_DESCS_MAX >
266 /* Push already prepared descriptors before polling */
267 if (added != txq->added) {
268 sfc_ef10_tx_qpush(txq, added, txq->added);
272 sfc_ef10_tx_reap(txq);
274 dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
275 (added - txq->completed);
276 if (m_seg->nb_segs * SFC_EF10_TX_MBUF_SEG_DESCS_MAX >
281 pkt_len = m_seg->pkt_len;
283 phys_addr_t seg_addr = rte_mbuf_data_dma_addr(m_seg);
284 unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
286 SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX);
290 sfc_ef10_tx_qdesc_dma_create(seg_addr,
291 seg_len, (pkt_len == 0),
292 &txq->txq_hw_ring[added & ptr_mask]);
295 } while ((m_seg = m_seg->next) != 0);
297 dma_desc_space -= (added - pkt_start);
299 /* Assign mbuf to the last used desc */
300 txq->sw_ring[(added - 1) & ptr_mask].mbuf = *pktp;
303 if (likely(added != txq->added)) {
304 sfc_ef10_tx_qpush(txq, added, txq->added);
308 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
310 sfc_ef10_tx_reap(txq);
313 return pktp - &tx_pkts[0];
317 static sfc_dp_tx_qcreate_t sfc_ef10_tx_qcreate;
319 sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
320 const struct rte_pci_addr *pci_addr, int socket_id,
321 const struct sfc_dp_tx_qcreate_info *info,
322 struct sfc_dp_txq **dp_txqp)
324 struct sfc_ef10_txq *txq;
328 if (info->txq_entries != info->evq_entries)
332 txq = rte_zmalloc_socket("sfc-ef10-txq", sizeof(*txq),
333 RTE_CACHE_LINE_SIZE, socket_id);
337 sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
340 txq->sw_ring = rte_calloc_socket("sfc-ef10-txq-sw_ring",
342 sizeof(*txq->sw_ring),
343 RTE_CACHE_LINE_SIZE, socket_id);
344 if (txq->sw_ring == NULL)
345 goto fail_sw_ring_alloc;
347 txq->flags = SFC_EF10_TXQ_NOT_RUNNING;
348 txq->ptr_mask = info->txq_entries - 1;
349 txq->free_thresh = info->free_thresh;
350 txq->txq_hw_ring = info->txq_hw_ring;
351 txq->doorbell = (volatile uint8_t *)info->mem_bar +
352 ER_DZ_TX_DESC_UPD_REG_OFST +
353 info->hw_index * ER_DZ_TX_DESC_UPD_REG_STEP;
354 txq->evq_hw_ring = info->evq_hw_ring;
367 static sfc_dp_tx_qdestroy_t sfc_ef10_tx_qdestroy;
369 sfc_ef10_tx_qdestroy(struct sfc_dp_txq *dp_txq)
371 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
373 rte_free(txq->sw_ring);
377 static sfc_dp_tx_qstart_t sfc_ef10_tx_qstart;
379 sfc_ef10_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
380 unsigned int txq_desc_index)
382 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
384 txq->evq_read_ptr = evq_read_ptr;
385 txq->added = txq->completed = txq_desc_index;
387 txq->flags |= SFC_EF10_TXQ_STARTED;
388 txq->flags &= ~(SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION);
393 static sfc_dp_tx_qstop_t sfc_ef10_tx_qstop;
395 sfc_ef10_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
397 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
399 txq->flags |= SFC_EF10_TXQ_NOT_RUNNING;
401 *evq_read_ptr = txq->evq_read_ptr;
404 static sfc_dp_tx_qtx_ev_t sfc_ef10_tx_qtx_ev;
406 sfc_ef10_tx_qtx_ev(struct sfc_dp_txq *dp_txq, __rte_unused unsigned int id)
408 __rte_unused struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
410 SFC_ASSERT(txq->flags & SFC_EF10_TXQ_NOT_RUNNING);
413 * It is safe to ignore Tx event since we reap all mbufs on
414 * queue purge anyway.
420 static sfc_dp_tx_qreap_t sfc_ef10_tx_qreap;
422 sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
424 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
427 for (txds = 0; txds <= txq->ptr_mask; ++txds) {
428 if (txq->sw_ring[txds].mbuf != NULL) {
429 rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
430 txq->sw_ring[txds].mbuf = NULL;
434 txq->flags &= ~SFC_EF10_TXQ_STARTED;
437 struct sfc_dp_tx sfc_ef10_tx = {
439 .name = SFC_KVARG_DATAPATH_EF10,
441 .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
443 .features = SFC_DP_TX_FEAT_MULTI_SEG,
444 .qcreate = sfc_ef10_tx_qcreate,
445 .qdestroy = sfc_ef10_tx_qdestroy,
446 .qstart = sfc_ef10_tx_qstart,
447 .qtx_ev = sfc_ef10_tx_qtx_ev,
448 .qstop = sfc_ef10_tx_qstop,
449 .qreap = sfc_ef10_tx_qreap,
450 .pkt_burst = sfc_ef10_xmit_pkts,