4 * Copyright (c) 2016 Solarflare Communications Inc.
7 * This software was jointly developed between OKTET Labs (under contract
8 * for Solarflare) and Solarflare Communications, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include "efx_types.h"
40 #include "efx_regs_ef10.h"
42 #include "sfc_dp_tx.h"
43 #include "sfc_tweak.h"
44 #include "sfc_kvargs.h"
47 #define sfc_ef10_tx_err(dpq, ...) \
48 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
50 /** Maximum length of the DMA descriptor data */
51 #define SFC_EF10_TX_DMA_DESC_LEN_MAX \
52 ((1u << ESF_DZ_TX_KER_BYTE_CNT_WIDTH) - 1)
55 * Maximum number of descriptors/buffers in the Tx ring.
56 * It should guarantee that corresponding event queue never overfill.
57 * EF10 native datapath uses event queue of the same size as Tx queue.
58 * Maximum number of events on datapath can be estimated as number of
59 * Tx queue entries (one event per Tx buffer in the worst case) plus
60 * Tx error and flush events.
62 #define SFC_EF10_TXQ_LIMIT(_ndesc) \
63 ((_ndesc) - 1 /* head must not step on tail */ - \
64 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
65 1 /* Rx error */ - 1 /* flush */)
67 struct sfc_ef10_tx_sw_desc {
68 struct rte_mbuf *mbuf;
73 #define SFC_EF10_TXQ_STARTED 0x1
74 #define SFC_EF10_TXQ_NOT_RUNNING 0x2
75 #define SFC_EF10_TXQ_EXCEPTION 0x4
77 unsigned int ptr_mask;
79 unsigned int completed;
80 unsigned int free_thresh;
81 unsigned int evq_read_ptr;
82 struct sfc_ef10_tx_sw_desc *sw_ring;
83 efx_qword_t *txq_hw_ring;
84 volatile void *doorbell;
85 efx_qword_t *evq_hw_ring;
87 /* Datapath transmit queue anchor */
91 static inline struct sfc_ef10_txq *
92 sfc_ef10_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
94 return container_of(dp_txq, struct sfc_ef10_txq, dp);
98 sfc_ef10_tx_get_event(struct sfc_ef10_txq *txq, efx_qword_t *tx_ev)
100 volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring;
103 * Exception flag is set when reap is done.
104 * It is never done twice per packet burst get and absence of
105 * the flag is checked on burst get entry.
107 SFC_ASSERT((txq->flags & SFC_EF10_TXQ_EXCEPTION) == 0);
109 *tx_ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask];
111 if (!sfc_ef10_ev_present(*tx_ev))
114 if (unlikely(EFX_QWORD_FIELD(*tx_ev, FSF_AZ_EV_CODE) !=
115 FSE_AZ_EV_CODE_TX_EV)) {
117 * Do not move read_ptr to keep the event for exception
118 * handling by the control path.
120 txq->flags |= SFC_EF10_TXQ_EXCEPTION;
121 sfc_ef10_tx_err(&txq->dp.dpq,
122 "TxQ exception at EvQ read ptr %#x",
132 sfc_ef10_tx_process_events(struct sfc_ef10_txq *txq)
134 const unsigned int curr_done = txq->completed - 1;
135 unsigned int anew_done = curr_done;
138 while (sfc_ef10_tx_get_event(txq, &tx_ev)) {
140 * DROP_EVENT is an internal to the NIC, software should
141 * never see it and, therefore, may ignore it.
144 /* Update the latest done descriptor */
145 anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
147 return (anew_done - curr_done) & txq->ptr_mask;
151 sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
153 const unsigned int old_read_ptr = txq->evq_read_ptr;
154 const unsigned int ptr_mask = txq->ptr_mask;
155 unsigned int completed = txq->completed;
156 unsigned int pending = completed;
158 pending += sfc_ef10_tx_process_events(txq);
160 if (pending != completed) {
161 struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
165 struct sfc_ef10_tx_sw_desc *txd;
168 txd = &txq->sw_ring[completed & ptr_mask];
169 if (txd->mbuf == NULL)
172 m = rte_pktmbuf_prefree_seg(txd->mbuf);
177 if ((nb == RTE_DIM(bulk)) ||
178 ((nb != 0) && (m->pool != bulk[0]->pool))) {
179 rte_mempool_put_bulk(bulk[0]->pool,
185 } while (++completed != pending);
188 rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
190 txq->completed = completed;
193 sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
198 sfc_ef10_tx_qdesc_dma_create(phys_addr_t addr, uint16_t size, bool eop,
201 EFX_POPULATE_QWORD_4(*edp,
202 ESF_DZ_TX_KER_TYPE, 0,
203 ESF_DZ_TX_KER_CONT, !eop,
204 ESF_DZ_TX_KER_BYTE_CNT, size,
205 ESF_DZ_TX_KER_BUF_ADDR, addr);
209 sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added,
216 * This improves performance by pushing a TX descriptor at the same
217 * time as the doorbell. The descriptor must be added to the TXQ,
218 * so that can be used if the hardware decides not to use the pushed
221 desc.eq_u64[0] = txq->txq_hw_ring[pushed & txq->ptr_mask].eq_u64[0];
222 EFX_POPULATE_OWORD_3(oword,
223 ERF_DZ_TX_DESC_WPTR, added & txq->ptr_mask,
224 ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
225 ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
227 /* DMA sync to device is not required */
230 * rte_io_wmb() which guarantees that the STORE operations
231 * (i.e. Tx and event descriptor updates) that precede
232 * the rte_io_wmb() call are visible to NIC before the STORE
233 * operations that follow it (i.e. doorbell write).
237 *(volatile __m128i *)txq->doorbell = oword.eo_u128[0];
241 sfc_ef10_tx_pkt_descs_max(const struct rte_mbuf *m)
243 unsigned int extra_descs_per_seg;
244 unsigned int extra_descs_per_pkt;
247 * VLAN offload is not supported yet, so no extra descriptors
248 * are required for VLAN option descriptor.
251 /** Maximum length of the mbuf segment data */
252 #define SFC_MBUF_SEG_LEN_MAX UINT16_MAX
253 RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);
256 * Each segment is already counted once below. So, calculate
257 * how many extra DMA descriptors may be required per segment in
258 * the worst case because of maximum DMA descriptor length limit.
259 * If maximum segment length is less or equal to maximum DMA
260 * descriptor length, no extra DMA descriptors are required.
262 extra_descs_per_seg =
263 (SFC_MBUF_SEG_LEN_MAX - 1) / SFC_EF10_TX_DMA_DESC_LEN_MAX;
265 /** Maximum length of the packet */
266 #define SFC_MBUF_PKT_LEN_MAX UINT32_MAX
267 RTE_BUILD_BUG_ON(sizeof(m->pkt_len) != 4);
270 * One more limitation on maximum number of extra DMA descriptors
271 * comes from slicing entire packet because of DMA descriptor length
272 * limit taking into account that there is at least one segment
273 * which is already counted below (so division of the maximum
274 * packet length minus one with round down).
275 * TSO is not supported yet, so packet length is limited by
278 extra_descs_per_pkt =
279 (RTE_MIN((unsigned int)EFX_MAC_PDU_MAX,
280 SFC_MBUF_PKT_LEN_MAX) - 1) /
281 SFC_EF10_TX_DMA_DESC_LEN_MAX;
283 return m->nb_segs + RTE_MIN(m->nb_segs * extra_descs_per_seg,
284 extra_descs_per_pkt);
288 sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
290 struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
291 unsigned int ptr_mask;
293 unsigned int dma_desc_space;
295 struct rte_mbuf **pktp;
296 struct rte_mbuf **pktp_end;
298 if (unlikely(txq->flags &
299 (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
302 ptr_mask = txq->ptr_mask;
304 dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
305 (added - txq->completed);
307 reap_done = (dma_desc_space < txq->free_thresh);
309 sfc_ef10_tx_reap(txq);
310 dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
311 (added - txq->completed);
314 for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
317 struct rte_mbuf *m_seg = *pktp;
318 unsigned int pkt_start = added;
321 if (likely(pktp + 1 != pktp_end))
322 rte_mbuf_prefetch_part1(pktp[1]);
324 if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space) {
328 /* Push already prepared descriptors before polling */
329 if (added != txq->added) {
330 sfc_ef10_tx_qpush(txq, added, txq->added);
334 sfc_ef10_tx_reap(txq);
336 dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
337 (added - txq->completed);
338 if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space)
342 pkt_len = m_seg->pkt_len;
344 phys_addr_t seg_addr = rte_mbuf_data_dma_addr(m_seg);
345 unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
346 unsigned int id = added & ptr_mask;
348 SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX);
352 sfc_ef10_tx_qdesc_dma_create(seg_addr,
353 seg_len, (pkt_len == 0),
354 &txq->txq_hw_ring[id]);
357 * rte_pktmbuf_free() is commonly used in DPDK for
358 * recycling packets - the function checks every
359 * segment's reference counter and returns the
360 * buffer to its pool whenever possible;
361 * nevertheless, freeing mbuf segments one by one
362 * may entail some performance decline;
363 * from this point, sfc_efx_tx_reap() does the same job
364 * on its own and frees buffers in bulks (all mbufs
365 * within a bulk belong to the same pool);
366 * from this perspective, individual segment pointers
367 * must be associated with the corresponding SW
368 * descriptors independently so that only one loop
369 * is sufficient on reap to inspect all the buffers
371 txq->sw_ring[id].mbuf = m_seg;
375 } while ((m_seg = m_seg->next) != 0);
377 dma_desc_space -= (added - pkt_start);
380 if (likely(added != txq->added)) {
381 sfc_ef10_tx_qpush(txq, added, txq->added);
385 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
387 sfc_ef10_tx_reap(txq);
390 return pktp - &tx_pkts[0];
394 sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq)
396 const unsigned int old_read_ptr = txq->evq_read_ptr;
397 const unsigned int ptr_mask = txq->ptr_mask;
398 unsigned int completed = txq->completed;
399 unsigned int pending = completed;
401 pending += sfc_ef10_tx_process_events(txq);
403 if (pending != completed) {
404 struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
408 struct sfc_ef10_tx_sw_desc *txd;
410 txd = &txq->sw_ring[completed & ptr_mask];
412 if (nb == RTE_DIM(bulk)) {
413 rte_mempool_put_bulk(bulk[0]->pool,
418 bulk[nb++] = txd->mbuf;
419 } while (++completed != pending);
421 rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
423 txq->completed = completed;
426 sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
432 sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
435 struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
436 unsigned int ptr_mask;
438 unsigned int dma_desc_space;
440 struct rte_mbuf **pktp;
441 struct rte_mbuf **pktp_end;
443 if (unlikely(txq->flags &
444 (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
447 ptr_mask = txq->ptr_mask;
449 dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
450 (added - txq->completed);
452 reap_done = (dma_desc_space < RTE_MAX(txq->free_thresh, nb_pkts));
454 sfc_ef10_simple_tx_reap(txq);
455 dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
456 (added - txq->completed);
459 pktp_end = &tx_pkts[MIN(nb_pkts, dma_desc_space)];
460 for (pktp = &tx_pkts[0]; pktp != pktp_end; ++pktp) {
461 struct rte_mbuf *pkt = *pktp;
462 unsigned int id = added & ptr_mask;
464 SFC_ASSERT(rte_pktmbuf_data_len(pkt) <=
465 SFC_EF10_TX_DMA_DESC_LEN_MAX);
467 sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_dma_addr(pkt),
468 rte_pktmbuf_data_len(pkt),
469 true, &txq->txq_hw_ring[id]);
471 txq->sw_ring[id].mbuf = pkt;
476 if (likely(added != txq->added)) {
477 sfc_ef10_tx_qpush(txq, added, txq->added);
481 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
483 sfc_ef10_simple_tx_reap(txq);
486 return pktp - &tx_pkts[0];
490 static sfc_dp_tx_qcreate_t sfc_ef10_tx_qcreate;
492 sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
493 const struct rte_pci_addr *pci_addr, int socket_id,
494 const struct sfc_dp_tx_qcreate_info *info,
495 struct sfc_dp_txq **dp_txqp)
497 struct sfc_ef10_txq *txq;
501 if (info->txq_entries != info->evq_entries)
505 txq = rte_zmalloc_socket("sfc-ef10-txq", sizeof(*txq),
506 RTE_CACHE_LINE_SIZE, socket_id);
510 sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
513 txq->sw_ring = rte_calloc_socket("sfc-ef10-txq-sw_ring",
515 sizeof(*txq->sw_ring),
516 RTE_CACHE_LINE_SIZE, socket_id);
517 if (txq->sw_ring == NULL)
518 goto fail_sw_ring_alloc;
520 txq->flags = SFC_EF10_TXQ_NOT_RUNNING;
521 txq->ptr_mask = info->txq_entries - 1;
522 txq->free_thresh = info->free_thresh;
523 txq->txq_hw_ring = info->txq_hw_ring;
524 txq->doorbell = (volatile uint8_t *)info->mem_bar +
525 ER_DZ_TX_DESC_UPD_REG_OFST +
526 info->hw_index * ER_DZ_TX_DESC_UPD_REG_STEP;
527 txq->evq_hw_ring = info->evq_hw_ring;
540 static sfc_dp_tx_qdestroy_t sfc_ef10_tx_qdestroy;
542 sfc_ef10_tx_qdestroy(struct sfc_dp_txq *dp_txq)
544 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
546 rte_free(txq->sw_ring);
550 static sfc_dp_tx_qstart_t sfc_ef10_tx_qstart;
552 sfc_ef10_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
553 unsigned int txq_desc_index)
555 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
557 txq->evq_read_ptr = evq_read_ptr;
558 txq->added = txq->completed = txq_desc_index;
560 txq->flags |= SFC_EF10_TXQ_STARTED;
561 txq->flags &= ~(SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION);
566 static sfc_dp_tx_qstop_t sfc_ef10_tx_qstop;
568 sfc_ef10_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
570 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
572 txq->flags |= SFC_EF10_TXQ_NOT_RUNNING;
574 *evq_read_ptr = txq->evq_read_ptr;
577 static sfc_dp_tx_qtx_ev_t sfc_ef10_tx_qtx_ev;
579 sfc_ef10_tx_qtx_ev(struct sfc_dp_txq *dp_txq, __rte_unused unsigned int id)
581 __rte_unused struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
583 SFC_ASSERT(txq->flags & SFC_EF10_TXQ_NOT_RUNNING);
586 * It is safe to ignore Tx event since we reap all mbufs on
587 * queue purge anyway.
593 static sfc_dp_tx_qreap_t sfc_ef10_tx_qreap;
595 sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
597 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
598 unsigned int completed;
600 for (completed = txq->completed; completed != txq->added; ++completed) {
601 struct sfc_ef10_tx_sw_desc *txd;
603 txd = &txq->sw_ring[completed & txq->ptr_mask];
604 if (txd->mbuf != NULL) {
605 rte_pktmbuf_free(txd->mbuf);
610 txq->flags &= ~SFC_EF10_TXQ_STARTED;
613 static sfc_dp_tx_qdesc_status_t sfc_ef10_tx_qdesc_status;
615 sfc_ef10_tx_qdesc_status(__rte_unused struct sfc_dp_txq *dp_txq,
616 __rte_unused uint16_t offset)
621 struct sfc_dp_tx sfc_ef10_tx = {
623 .name = SFC_KVARG_DATAPATH_EF10,
625 .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
627 .features = SFC_DP_TX_FEAT_MULTI_SEG |
628 SFC_DP_TX_FEAT_MULTI_POOL |
629 SFC_DP_TX_FEAT_REFCNT |
630 SFC_DP_TX_FEAT_MULTI_PROCESS,
631 .qcreate = sfc_ef10_tx_qcreate,
632 .qdestroy = sfc_ef10_tx_qdestroy,
633 .qstart = sfc_ef10_tx_qstart,
634 .qtx_ev = sfc_ef10_tx_qtx_ev,
635 .qstop = sfc_ef10_tx_qstop,
636 .qreap = sfc_ef10_tx_qreap,
637 .qdesc_status = sfc_ef10_tx_qdesc_status,
638 .pkt_burst = sfc_ef10_xmit_pkts,
641 struct sfc_dp_tx sfc_ef10_simple_tx = {
643 .name = SFC_KVARG_DATAPATH_EF10_SIMPLE,
646 .features = SFC_DP_TX_FEAT_MULTI_PROCESS,
647 .qcreate = sfc_ef10_tx_qcreate,
648 .qdestroy = sfc_ef10_tx_qdestroy,
649 .qstart = sfc_ef10_tx_qstart,
650 .qtx_ev = sfc_ef10_tx_qtx_ev,
651 .qstop = sfc_ef10_tx_qstop,
652 .qreap = sfc_ef10_tx_qreap,
653 .qdesc_status = sfc_ef10_tx_qdesc_status,
654 .pkt_burst = sfc_ef10_simple_xmit_pkts,