1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
18 #include "efx_types.h"
20 #include "efx_regs_ef10.h"
22 #include "sfc_dp_tx.h"
23 #include "sfc_tweak.h"
24 #include "sfc_kvargs.h"
28 #define sfc_ef10_tx_err(dpq, ...) \
29 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
31 /** Maximum length of the DMA descriptor data */
32 #define SFC_EF10_TX_DMA_DESC_LEN_MAX \
33 ((1u << ESF_DZ_TX_KER_BYTE_CNT_WIDTH) - 1)
36 * Maximum number of descriptors/buffers in the Tx ring.
37 * It should guarantee that corresponding event queue never overfill.
38 * EF10 native datapath uses event queue of the same size as Tx queue.
39 * Maximum number of events on datapath can be estimated as number of
40 * Tx queue entries (one event per Tx buffer in the worst case) plus
41 * Tx error and flush events.
43 #define SFC_EF10_TXQ_LIMIT(_ndesc) \
44 ((_ndesc) - 1 /* head must not step on tail */ - \
45 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
46 1 /* Rx error */ - 1 /* flush */)
48 struct sfc_ef10_tx_sw_desc {
49 struct rte_mbuf *mbuf;
54 #define SFC_EF10_TXQ_STARTED 0x1
55 #define SFC_EF10_TXQ_NOT_RUNNING 0x2
56 #define SFC_EF10_TXQ_EXCEPTION 0x4
58 unsigned int ptr_mask;
60 unsigned int completed;
61 unsigned int max_fill_level;
62 unsigned int free_thresh;
63 unsigned int evq_read_ptr;
64 struct sfc_ef10_tx_sw_desc *sw_ring;
65 efx_qword_t *txq_hw_ring;
66 volatile void *doorbell;
67 efx_qword_t *evq_hw_ring;
70 uint16_t tso_tcp_header_offset_limit;
72 /* Datapath transmit queue anchor */
76 static inline struct sfc_ef10_txq *
77 sfc_ef10_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
79 return container_of(dp_txq, struct sfc_ef10_txq, dp);
83 sfc_ef10_tx_get_event(struct sfc_ef10_txq *txq, efx_qword_t *tx_ev)
85 volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring;
88 * Exception flag is set when reap is done.
89 * It is never done twice per packet burst get and absence of
90 * the flag is checked on burst get entry.
92 SFC_ASSERT((txq->flags & SFC_EF10_TXQ_EXCEPTION) == 0);
94 *tx_ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask];
96 if (!sfc_ef10_ev_present(*tx_ev))
99 if (unlikely(EFX_QWORD_FIELD(*tx_ev, FSF_AZ_EV_CODE) !=
100 FSE_AZ_EV_CODE_TX_EV)) {
102 * Do not move read_ptr to keep the event for exception
103 * handling by the control path.
105 txq->flags |= SFC_EF10_TXQ_EXCEPTION;
106 sfc_ef10_tx_err(&txq->dp.dpq,
107 "TxQ exception at EvQ read ptr %#x",
117 sfc_ef10_tx_process_events(struct sfc_ef10_txq *txq)
119 const unsigned int curr_done = txq->completed - 1;
120 unsigned int anew_done = curr_done;
123 while (sfc_ef10_tx_get_event(txq, &tx_ev)) {
125 * DROP_EVENT is an internal to the NIC, software should
126 * never see it and, therefore, may ignore it.
129 /* Update the latest done descriptor */
130 anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
132 return (anew_done - curr_done) & txq->ptr_mask;
136 sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
138 const unsigned int old_read_ptr = txq->evq_read_ptr;
139 const unsigned int ptr_mask = txq->ptr_mask;
140 unsigned int completed = txq->completed;
141 unsigned int pending = completed;
143 pending += sfc_ef10_tx_process_events(txq);
145 if (pending != completed) {
146 struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
150 struct sfc_ef10_tx_sw_desc *txd;
153 txd = &txq->sw_ring[completed & ptr_mask];
154 if (txd->mbuf == NULL)
157 m = rte_pktmbuf_prefree_seg(txd->mbuf);
162 if ((nb == RTE_DIM(bulk)) ||
163 ((nb != 0) && (m->pool != bulk[0]->pool))) {
164 rte_mempool_put_bulk(bulk[0]->pool,
170 } while (++completed != pending);
173 rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
175 txq->completed = completed;
178 sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
183 sfc_ef10_tx_qdesc_dma_create(rte_iova_t addr, uint16_t size, bool eop,
186 EFX_POPULATE_QWORD_4(*edp,
187 ESF_DZ_TX_KER_TYPE, 0,
188 ESF_DZ_TX_KER_CONT, !eop,
189 ESF_DZ_TX_KER_BYTE_CNT, size,
190 ESF_DZ_TX_KER_BUF_ADDR, addr);
194 sfc_ef10_tx_qdesc_tso2_create(struct sfc_ef10_txq * const txq,
195 unsigned int added, uint16_t ipv4_id,
196 uint16_t outer_ipv4_id, uint32_t tcp_seq,
199 EFX_POPULATE_QWORD_5(txq->txq_hw_ring[added & txq->ptr_mask],
200 ESF_DZ_TX_DESC_IS_OPT, 1,
201 ESF_DZ_TX_OPTION_TYPE,
202 ESE_DZ_TX_OPTION_DESC_TSO,
203 ESF_DZ_TX_TSO_OPTION_TYPE,
204 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
205 ESF_DZ_TX_TSO_IP_ID, ipv4_id,
206 ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
207 EFX_POPULATE_QWORD_5(txq->txq_hw_ring[(added + 1) & txq->ptr_mask],
208 ESF_DZ_TX_DESC_IS_OPT, 1,
209 ESF_DZ_TX_OPTION_TYPE,
210 ESE_DZ_TX_OPTION_DESC_TSO,
211 ESF_DZ_TX_TSO_OPTION_TYPE,
212 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
213 ESF_DZ_TX_TSO_TCP_MSS, tcp_mss,
214 ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id);
218 sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added,
225 * This improves performance by pushing a TX descriptor at the same
226 * time as the doorbell. The descriptor must be added to the TXQ,
227 * so that can be used if the hardware decides not to use the pushed
230 desc.eq_u64[0] = txq->txq_hw_ring[pushed & txq->ptr_mask].eq_u64[0];
231 EFX_POPULATE_OWORD_3(oword,
232 ERF_DZ_TX_DESC_WPTR, added & txq->ptr_mask,
233 ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
234 ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
236 /* DMA sync to device is not required */
239 * rte_io_wmb() which guarantees that the STORE operations
240 * (i.e. Tx and event descriptor updates) that precede
241 * the rte_io_wmb() call are visible to NIC before the STORE
242 * operations that follow it (i.e. doorbell write).
246 *(volatile __m128i *)txq->doorbell = oword.eo_u128[0];
250 sfc_ef10_tx_pkt_descs_max(const struct rte_mbuf *m)
252 unsigned int extra_descs_per_seg;
253 unsigned int extra_descs_per_pkt;
256 * VLAN offload is not supported yet, so no extra descriptors
257 * are required for VLAN option descriptor.
260 /** Maximum length of the mbuf segment data */
261 #define SFC_MBUF_SEG_LEN_MAX UINT16_MAX
262 RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);
265 * Each segment is already counted once below. So, calculate
266 * how many extra DMA descriptors may be required per segment in
267 * the worst case because of maximum DMA descriptor length limit.
268 * If maximum segment length is less or equal to maximum DMA
269 * descriptor length, no extra DMA descriptors are required.
271 extra_descs_per_seg =
272 (SFC_MBUF_SEG_LEN_MAX - 1) / SFC_EF10_TX_DMA_DESC_LEN_MAX;
274 /** Maximum length of the packet */
275 #define SFC_MBUF_PKT_LEN_MAX UINT32_MAX
276 RTE_BUILD_BUG_ON(sizeof(m->pkt_len) != 4);
279 * One more limitation on maximum number of extra DMA descriptors
280 * comes from slicing entire packet because of DMA descriptor length
281 * limit taking into account that there is at least one segment
282 * which is already counted below (so division of the maximum
283 * packet length minus one with round down).
284 * TSO is not supported yet, so packet length is limited by
287 extra_descs_per_pkt =
288 (RTE_MIN((unsigned int)EFX_MAC_PDU_MAX,
289 SFC_MBUF_PKT_LEN_MAX) - 1) /
290 SFC_EF10_TX_DMA_DESC_LEN_MAX;
292 return m->nb_segs + RTE_MIN(m->nb_segs * extra_descs_per_seg,
293 extra_descs_per_pkt);
297 sfc_ef10_try_reap(struct sfc_ef10_txq * const txq, unsigned int added,
298 unsigned int needed_desc, unsigned int *dma_desc_space,
304 if (added != txq->added) {
305 sfc_ef10_tx_qpush(txq, added, txq->added);
309 sfc_ef10_tx_reap(txq);
313 * Recalculate DMA descriptor space since Tx reap may change
314 * the number of completed descriptors
316 *dma_desc_space = txq->max_fill_level -
317 (added - txq->completed);
319 return (needed_desc <= *dma_desc_space);
323 sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg,
324 unsigned int *added, unsigned int *dma_desc_space,
327 size_t iph_off = m_seg->l2_len;
328 size_t tcph_off = m_seg->l2_len + m_seg->l3_len;
329 size_t header_len = m_seg->l2_len + m_seg->l3_len + m_seg->l4_len;
330 /* Offset of the payload in the last segment that contains the header */
332 const struct tcp_hdr *th;
337 struct rte_mbuf *first_m_seg = m_seg;
338 unsigned int pkt_start = *added;
339 unsigned int needed_desc;
340 struct rte_mbuf *m_seg_to_free_up_to = first_m_seg;
343 /* Both checks may be done, so use bit OR to have only one branching */
344 if (unlikely((header_len > SFC_TSOH_STD_LEN) |
345 (tcph_off > txq->tso_tcp_header_offset_limit)))
349 * Preliminary estimation of required DMA descriptors, including extra
350 * descriptor for TSO header that is needed when the header is
351 * separated from payload in one segment. It does not include
352 * extra descriptors that may appear when a big segment is split across
353 * several descriptors.
355 needed_desc = m_seg->nb_segs +
356 (unsigned int)SFC_TSO_OPT_DESCS_NUM +
357 (unsigned int)SFC_TSO_HDR_DESCS_NUM;
359 if (needed_desc > *dma_desc_space &&
360 !sfc_ef10_try_reap(txq, pkt_start, needed_desc,
361 dma_desc_space, reap_done)) {
363 * If a future Tx reap may increase available DMA descriptor
364 * space, do not try to send the packet.
366 if (txq->completed != pkt_start)
369 * Do not allow to send packet if the maximum DMA
370 * descriptor space is not sufficient to hold TSO
371 * descriptors, header descriptor and at least 1
372 * segment descriptor.
374 if (*dma_desc_space < SFC_TSO_OPT_DESCS_NUM +
375 SFC_TSO_HDR_DESCS_NUM + 1)
379 /* Check if the header is not fragmented */
380 if (rte_pktmbuf_data_len(m_seg) >= header_len) {
381 hdr_addr = rte_pktmbuf_mtod(m_seg, uint8_t *);
382 hdr_iova = rte_mbuf_data_iova(m_seg);
383 if (rte_pktmbuf_data_len(m_seg) == header_len) {
384 /* Cannot send a packet that consists only of header */
385 if (unlikely(m_seg->next == NULL))
388 * Associate header mbuf with header descriptor
389 * which is located after TSO descriptors.
391 txq->sw_ring[(pkt_start + SFC_TSO_OPT_DESCS_NUM) &
392 txq->ptr_mask].mbuf = m_seg;
397 * If there is no payload offset (payload starts at the
398 * beginning of a segment) then an extra descriptor for
399 * separated header is not needed.
406 unsigned int copied_segs;
407 unsigned int hdr_addr_off = (*added & txq->ptr_mask) *
410 hdr_addr = txq->tsoh + hdr_addr_off;
411 hdr_iova = txq->tsoh_iova + hdr_addr_off;
412 copied_segs = sfc_tso_prepare_header(hdr_addr, header_len,
415 /* Cannot send a packet that consists only of header */
416 if (unlikely(m_seg == NULL))
419 m_seg_to_free_up_to = m_seg;
421 * Reduce the number of needed descriptors by the number of
422 * segments that entirely consist of header data.
424 needed_desc -= copied_segs;
426 /* Extra descriptor for separated header is not needed */
431 switch (first_m_seg->ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) {
433 const struct ipv4_hdr *iphe4;
435 iphe4 = (const struct ipv4_hdr *)(hdr_addr + iph_off);
436 rte_memcpy(&packet_id, &iphe4->packet_id, sizeof(uint16_t));
437 packet_id = rte_be_to_cpu_16(packet_id);
447 th = (const struct tcp_hdr *)(hdr_addr + tcph_off);
448 rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
449 sent_seq = rte_be_to_cpu_32(sent_seq);
451 sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, 0, sent_seq,
452 first_m_seg->tso_segsz);
453 (*added) += SFC_TSO_OPT_DESCS_NUM;
455 sfc_ef10_tx_qdesc_dma_create(hdr_iova, header_len, false,
456 &txq->txq_hw_ring[(*added) & txq->ptr_mask]);
460 rte_iova_t next_frag = rte_mbuf_data_iova(m_seg);
461 unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
469 rte_iova_t frag_addr = next_frag;
472 frag_len = RTE_MIN(seg_len,
473 SFC_EF10_TX_DMA_DESC_LEN_MAX);
475 next_frag += frag_len;
478 eop = (seg_len == 0 && m_seg->next == NULL);
480 id = (*added) & txq->ptr_mask;
484 * Initially we assume that one DMA descriptor is needed
485 * for every segment. When the segment is split across
486 * several DMA descriptors, increase the estimation.
488 needed_desc += (seg_len != 0);
491 * When no more descriptors can be added, but not all
492 * segments are processed.
494 if (*added - pkt_start == *dma_desc_space &&
496 !sfc_ef10_try_reap(txq, pkt_start, needed_desc,
497 dma_desc_space, reap_done)) {
499 struct rte_mbuf *m_next;
501 if (txq->completed != pkt_start) {
505 * Reset mbuf associations with added
508 for (i = pkt_start; i != *added; i++) {
509 id = i & txq->ptr_mask;
510 txq->sw_ring[id].mbuf = NULL;
515 /* Free the segments that cannot be sent */
516 for (m = m_seg->next; m != NULL; m = m_next) {
518 rte_pktmbuf_free_seg(m);
521 /* Ignore the rest of the segment */
525 sfc_ef10_tx_qdesc_dma_create(frag_addr, frag_len,
526 eop, &txq->txq_hw_ring[id]);
528 } while (seg_len != 0);
530 txq->sw_ring[id].mbuf = m_seg;
536 * Free segments which content was entirely copied to the TSO header
537 * memory space of Tx queue
539 for (m_seg = first_m_seg; m_seg != m_seg_to_free_up_to;) {
540 struct rte_mbuf *seg_to_free = m_seg;
543 rte_pktmbuf_free_seg(seg_to_free);
550 sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
552 struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
554 unsigned int dma_desc_space;
556 struct rte_mbuf **pktp;
557 struct rte_mbuf **pktp_end;
559 if (unlikely(txq->flags &
560 (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
564 dma_desc_space = txq->max_fill_level - (added - txq->completed);
566 reap_done = (dma_desc_space < txq->free_thresh);
568 sfc_ef10_tx_reap(txq);
569 dma_desc_space = txq->max_fill_level - (added - txq->completed);
572 for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
575 struct rte_mbuf *m_seg = *pktp;
576 unsigned int pkt_start = added;
579 if (likely(pktp + 1 != pktp_end))
580 rte_mbuf_prefetch_part1(pktp[1]);
582 if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
585 rc = sfc_ef10_xmit_tso_pkt(txq, m_seg, &added,
586 &dma_desc_space, &reap_done);
590 /* Packet can be sent in following xmit calls */
591 if (likely(rc == ENOSPC))
595 * Packet cannot be sent, tell RTE that
596 * it is sent, but actually drop it and
597 * continue with another packet
599 rte_pktmbuf_free(*pktp);
603 goto dma_desc_space_update;
606 if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space) {
610 /* Push already prepared descriptors before polling */
611 if (added != txq->added) {
612 sfc_ef10_tx_qpush(txq, added, txq->added);
616 sfc_ef10_tx_reap(txq);
618 dma_desc_space = txq->max_fill_level -
619 (added - txq->completed);
620 if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space)
624 pkt_len = m_seg->pkt_len;
626 rte_iova_t seg_addr = rte_mbuf_data_iova(m_seg);
627 unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
628 unsigned int id = added & txq->ptr_mask;
630 SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX);
634 sfc_ef10_tx_qdesc_dma_create(seg_addr,
635 seg_len, (pkt_len == 0),
636 &txq->txq_hw_ring[id]);
639 * rte_pktmbuf_free() is commonly used in DPDK for
640 * recycling packets - the function checks every
641 * segment's reference counter and returns the
642 * buffer to its pool whenever possible;
643 * nevertheless, freeing mbuf segments one by one
644 * may entail some performance decline;
645 * from this point, sfc_efx_tx_reap() does the same job
646 * on its own and frees buffers in bulks (all mbufs
647 * within a bulk belong to the same pool);
648 * from this perspective, individual segment pointers
649 * must be associated with the corresponding SW
650 * descriptors independently so that only one loop
651 * is sufficient on reap to inspect all the buffers
653 txq->sw_ring[id].mbuf = m_seg;
657 } while ((m_seg = m_seg->next) != 0);
659 dma_desc_space_update:
660 dma_desc_space -= (added - pkt_start);
663 if (likely(added != txq->added)) {
664 sfc_ef10_tx_qpush(txq, added, txq->added);
668 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
670 sfc_ef10_tx_reap(txq);
673 return pktp - &tx_pkts[0];
677 sfc_ef10_simple_tx_reap(struct sfc_ef10_txq *txq)
679 const unsigned int old_read_ptr = txq->evq_read_ptr;
680 const unsigned int ptr_mask = txq->ptr_mask;
681 unsigned int completed = txq->completed;
682 unsigned int pending = completed;
684 pending += sfc_ef10_tx_process_events(txq);
686 if (pending != completed) {
687 struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
691 struct sfc_ef10_tx_sw_desc *txd;
693 txd = &txq->sw_ring[completed & ptr_mask];
695 if (nb == RTE_DIM(bulk)) {
696 rte_mempool_put_bulk(bulk[0]->pool,
701 bulk[nb++] = txd->mbuf;
702 } while (++completed != pending);
704 rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
706 txq->completed = completed;
709 sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
715 sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
718 struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
719 unsigned int ptr_mask;
721 unsigned int dma_desc_space;
723 struct rte_mbuf **pktp;
724 struct rte_mbuf **pktp_end;
726 if (unlikely(txq->flags &
727 (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
730 ptr_mask = txq->ptr_mask;
732 dma_desc_space = txq->max_fill_level - (added - txq->completed);
734 reap_done = (dma_desc_space < RTE_MAX(txq->free_thresh, nb_pkts));
736 sfc_ef10_simple_tx_reap(txq);
737 dma_desc_space = txq->max_fill_level - (added - txq->completed);
740 pktp_end = &tx_pkts[MIN(nb_pkts, dma_desc_space)];
741 for (pktp = &tx_pkts[0]; pktp != pktp_end; ++pktp) {
742 struct rte_mbuf *pkt = *pktp;
743 unsigned int id = added & ptr_mask;
745 SFC_ASSERT(rte_pktmbuf_data_len(pkt) <=
746 SFC_EF10_TX_DMA_DESC_LEN_MAX);
748 sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_iova(pkt),
749 rte_pktmbuf_data_len(pkt),
750 true, &txq->txq_hw_ring[id]);
752 txq->sw_ring[id].mbuf = pkt;
757 if (likely(added != txq->added)) {
758 sfc_ef10_tx_qpush(txq, added, txq->added);
762 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
764 sfc_ef10_simple_tx_reap(txq);
767 return pktp - &tx_pkts[0];
770 static sfc_dp_tx_get_dev_info_t sfc_ef10_get_dev_info;
772 sfc_ef10_get_dev_info(struct rte_eth_dev_info *dev_info)
775 * Number of descriptors just defines maximum number of pushed
776 * descriptors (fill level).
778 dev_info->tx_desc_lim.nb_min = 1;
779 dev_info->tx_desc_lim.nb_align = 1;
782 static sfc_dp_tx_qsize_up_rings_t sfc_ef10_tx_qsize_up_rings;
784 sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc,
785 struct sfc_dp_tx_hw_limits *limits,
786 unsigned int *txq_entries,
787 unsigned int *evq_entries,
788 unsigned int *txq_max_fill_level)
791 * rte_ethdev API guarantees that the number meets min, max and
792 * alignment requirements.
794 if (nb_tx_desc <= limits->txq_min_entries)
795 *txq_entries = limits->txq_min_entries;
797 *txq_entries = rte_align32pow2(nb_tx_desc);
799 *evq_entries = *txq_entries;
801 *txq_max_fill_level = RTE_MIN(nb_tx_desc,
802 SFC_EF10_TXQ_LIMIT(*evq_entries));
806 static sfc_dp_tx_qcreate_t sfc_ef10_tx_qcreate;
808 sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
809 const struct rte_pci_addr *pci_addr, int socket_id,
810 const struct sfc_dp_tx_qcreate_info *info,
811 struct sfc_dp_txq **dp_txqp)
813 struct sfc_ef10_txq *txq;
817 if (info->txq_entries != info->evq_entries)
821 txq = rte_zmalloc_socket("sfc-ef10-txq", sizeof(*txq),
822 RTE_CACHE_LINE_SIZE, socket_id);
826 sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
829 txq->sw_ring = rte_calloc_socket("sfc-ef10-txq-sw_ring",
831 sizeof(*txq->sw_ring),
832 RTE_CACHE_LINE_SIZE, socket_id);
833 if (txq->sw_ring == NULL)
834 goto fail_sw_ring_alloc;
836 if (info->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
837 txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh",
842 if (txq->tsoh == NULL)
843 goto fail_tsoh_alloc;
845 txq->tsoh_iova = rte_malloc_virt2iova(txq->tsoh);
848 txq->flags = SFC_EF10_TXQ_NOT_RUNNING;
849 txq->ptr_mask = info->txq_entries - 1;
850 txq->max_fill_level = info->max_fill_level;
851 txq->free_thresh = info->free_thresh;
852 txq->txq_hw_ring = info->txq_hw_ring;
853 txq->doorbell = (volatile uint8_t *)info->mem_bar +
854 ER_DZ_TX_DESC_UPD_REG_OFST +
855 (info->hw_index << info->vi_window_shift);
856 txq->evq_hw_ring = info->evq_hw_ring;
857 txq->tso_tcp_header_offset_limit = info->tso_tcp_header_offset_limit;
863 rte_free(txq->sw_ring);
873 static sfc_dp_tx_qdestroy_t sfc_ef10_tx_qdestroy;
875 sfc_ef10_tx_qdestroy(struct sfc_dp_txq *dp_txq)
877 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
880 rte_free(txq->sw_ring);
884 static sfc_dp_tx_qstart_t sfc_ef10_tx_qstart;
886 sfc_ef10_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
887 unsigned int txq_desc_index)
889 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
891 txq->evq_read_ptr = evq_read_ptr;
892 txq->added = txq->completed = txq_desc_index;
894 txq->flags |= SFC_EF10_TXQ_STARTED;
895 txq->flags &= ~(SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION);
900 static sfc_dp_tx_qstop_t sfc_ef10_tx_qstop;
902 sfc_ef10_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
904 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
906 txq->flags |= SFC_EF10_TXQ_NOT_RUNNING;
908 *evq_read_ptr = txq->evq_read_ptr;
911 static sfc_dp_tx_qtx_ev_t sfc_ef10_tx_qtx_ev;
913 sfc_ef10_tx_qtx_ev(struct sfc_dp_txq *dp_txq, __rte_unused unsigned int id)
915 __rte_unused struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
917 SFC_ASSERT(txq->flags & SFC_EF10_TXQ_NOT_RUNNING);
920 * It is safe to ignore Tx event since we reap all mbufs on
921 * queue purge anyway.
927 static sfc_dp_tx_qreap_t sfc_ef10_tx_qreap;
929 sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
931 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
932 unsigned int completed;
934 for (completed = txq->completed; completed != txq->added; ++completed) {
935 struct sfc_ef10_tx_sw_desc *txd;
937 txd = &txq->sw_ring[completed & txq->ptr_mask];
938 if (txd->mbuf != NULL) {
939 rte_pktmbuf_free_seg(txd->mbuf);
944 txq->flags &= ~SFC_EF10_TXQ_STARTED;
948 sfc_ef10_tx_qdesc_npending(struct sfc_ef10_txq *txq)
950 const unsigned int curr_done = txq->completed - 1;
951 unsigned int anew_done = curr_done;
953 const unsigned int evq_old_read_ptr = txq->evq_read_ptr;
955 if (unlikely(txq->flags &
956 (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
959 while (sfc_ef10_tx_get_event(txq, &tx_ev))
960 anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
963 * The function does not process events, so return event queue read
964 * pointer to the original position to allow the events that were
965 * read to be processed later
967 txq->evq_read_ptr = evq_old_read_ptr;
969 return (anew_done - curr_done) & txq->ptr_mask;
972 static sfc_dp_tx_qdesc_status_t sfc_ef10_tx_qdesc_status;
974 sfc_ef10_tx_qdesc_status(struct sfc_dp_txq *dp_txq,
977 struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
978 unsigned int npending = sfc_ef10_tx_qdesc_npending(txq);
980 if (unlikely(offset > txq->ptr_mask))
983 if (unlikely(offset >= txq->max_fill_level))
984 return RTE_ETH_TX_DESC_UNAVAIL;
986 if (unlikely(offset < npending))
987 return RTE_ETH_TX_DESC_FULL;
989 return RTE_ETH_TX_DESC_DONE;
992 struct sfc_dp_tx sfc_ef10_tx = {
994 .name = SFC_KVARG_DATAPATH_EF10,
996 .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
998 .features = SFC_DP_TX_FEAT_TSO |
999 SFC_DP_TX_FEAT_MULTI_SEG |
1000 SFC_DP_TX_FEAT_MULTI_POOL |
1001 SFC_DP_TX_FEAT_REFCNT |
1002 SFC_DP_TX_FEAT_MULTI_PROCESS,
1003 .get_dev_info = sfc_ef10_get_dev_info,
1004 .qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
1005 .qcreate = sfc_ef10_tx_qcreate,
1006 .qdestroy = sfc_ef10_tx_qdestroy,
1007 .qstart = sfc_ef10_tx_qstart,
1008 .qtx_ev = sfc_ef10_tx_qtx_ev,
1009 .qstop = sfc_ef10_tx_qstop,
1010 .qreap = sfc_ef10_tx_qreap,
1011 .qdesc_status = sfc_ef10_tx_qdesc_status,
1012 .pkt_burst = sfc_ef10_xmit_pkts,
1015 struct sfc_dp_tx sfc_ef10_simple_tx = {
1017 .name = SFC_KVARG_DATAPATH_EF10_SIMPLE,
1020 .features = SFC_DP_TX_FEAT_MULTI_PROCESS,
1021 .get_dev_info = sfc_ef10_get_dev_info,
1022 .qsize_up_rings = sfc_ef10_tx_qsize_up_rings,
1023 .qcreate = sfc_ef10_tx_qcreate,
1024 .qdestroy = sfc_ef10_tx_qdestroy,
1025 .qstart = sfc_ef10_tx_qstart,
1026 .qtx_ev = sfc_ef10_tx_qtx_ev,
1027 .qstop = sfc_ef10_tx_qstop,
1028 .qreap = sfc_ef10_tx_qreap,
1029 .qdesc_status = sfc_ef10_tx_qdesc_status,
1030 .pkt_burst = sfc_ef10_simple_xmit_pkts,