1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2018-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
17 #include "efx_types.h"
19 #include "efx_regs_ef100.h"
21 #include "sfc_debug.h"
22 #include "sfc_dp_tx.h"
23 #include "sfc_tweak.h"
24 #include "sfc_kvargs.h"
25 #include "sfc_ef100.h"
28 #define sfc_ef100_tx_err(_txq, ...) \
29 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, ERR, &(_txq)->dp.dpq, __VA_ARGS__)
31 #define sfc_ef100_tx_debug(_txq, ...) \
32 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, DEBUG, &(_txq)->dp.dpq, \
36 /** Maximum length of the send descriptor data */
37 #define SFC_EF100_TX_SEND_DESC_LEN_MAX \
38 ((1u << ESF_GZ_TX_SEND_LEN_WIDTH) - 1)
40 /** Maximum length of the segment descriptor data */
41 #define SFC_EF100_TX_SEG_DESC_LEN_MAX \
42 ((1u << ESF_GZ_TX_SEG_LEN_WIDTH) - 1)
45 * Maximum number of descriptors/buffers in the Tx ring.
46 * It should guarantee that corresponding event queue never overfill.
47 * EF100 native datapath uses event queue of the same size as Tx queue.
48 * Maximum number of events on datapath can be estimated as number of
49 * Tx queue entries (one event per Tx buffer in the worst case) plus
50 * Tx error and flush events.
52 #define SFC_EF100_TXQ_LIMIT(_ndesc) \
53 ((_ndesc) - 1 /* head must not step on tail */ - \
54 1 /* Rx error */ - 1 /* flush */)
56 struct sfc_ef100_tx_sw_desc {
57 struct rte_mbuf *mbuf;
60 struct sfc_ef100_txq {
62 #define SFC_EF100_TXQ_STARTED 0x1
63 #define SFC_EF100_TXQ_NOT_RUNNING 0x2
64 #define SFC_EF100_TXQ_EXCEPTION 0x4
66 unsigned int ptr_mask;
68 unsigned int completed;
69 unsigned int max_fill_level;
70 unsigned int free_thresh;
71 struct sfc_ef100_tx_sw_desc *sw_ring;
72 efx_oword_t *txq_hw_ring;
73 volatile void *doorbell;
76 unsigned int evq_read_ptr;
77 unsigned int evq_phase_bit_shift;
78 volatile efx_qword_t *evq_hw_ring;
80 uint16_t tso_tcp_header_offset_limit;
81 uint16_t tso_max_nb_header_descs;
82 uint16_t tso_max_header_len;
83 uint16_t tso_max_nb_payload_descs;
84 uint32_t tso_max_payload_len;
85 uint32_t tso_max_nb_outgoing_frames;
87 /* Datapath transmit queue anchor */
91 static inline struct sfc_ef100_txq *
92 sfc_ef100_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
94 return container_of(dp_txq, struct sfc_ef100_txq, dp);
98 sfc_ef100_tx_prepare_pkt_tso(struct sfc_ef100_txq * const txq,
101 size_t header_len = m->l2_len + m->l3_len + m->l4_len;
102 size_t payload_len = m->pkt_len - header_len;
103 unsigned long mss_conformant_max_payload_len;
104 unsigned int nb_payload_descs;
106 mss_conformant_max_payload_len =
107 m->tso_segsz * txq->tso_max_nb_outgoing_frames;
110 * Don't really want to know exact number of payload segments.
111 * Just use total number of segments as upper limit. Practically
112 * maximum number of payload segments is significantly bigger
113 * than maximum number header segments, so we can neglect header
114 * segments excluded total number of segments to estimate number
115 * of payload segments required.
117 nb_payload_descs = m->nb_segs;
120 * Carry out multiple independent checks using bitwise OR
121 * to avoid unnecessary conditional branching.
123 if (unlikely((header_len > txq->tso_max_header_len) |
124 (nb_payload_descs > txq->tso_max_nb_payload_descs) |
125 (payload_len > txq->tso_max_payload_len) |
126 (payload_len > mss_conformant_max_payload_len) |
127 (m->pkt_len == header_len)))
134 sfc_ef100_tx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
137 struct sfc_ef100_txq * const txq = sfc_ef100_txq_by_dp_txq(tx_queue);
140 for (i = 0; i < nb_pkts; i++) {
141 struct rte_mbuf *m = tx_pkts[i];
142 unsigned int max_nb_header_segs = 0;
143 bool calc_phdr_cksum = false;
147 * Partial checksum offload is used in the case of
148 * inner TCP/UDP checksum offload. It requires
149 * pseudo-header checksum which is calculated below,
150 * but requires contiguous packet headers.
152 if ((m->ol_flags & PKT_TX_TUNNEL_MASK) &&
153 (m->ol_flags & PKT_TX_L4_MASK)) {
154 calc_phdr_cksum = true;
155 max_nb_header_segs = 1;
156 } else if (m->ol_flags & PKT_TX_TCP_SEG) {
157 max_nb_header_segs = txq->tso_max_nb_header_descs;
160 ret = sfc_dp_tx_prepare_pkt(m, max_nb_header_segs, 0,
161 txq->tso_tcp_header_offset_limit,
162 txq->max_fill_level, 1, 0);
163 if (unlikely(ret != 0)) {
168 if (m->ol_flags & PKT_TX_TCP_SEG) {
169 ret = sfc_ef100_tx_prepare_pkt_tso(txq, m);
170 if (unlikely(ret != 0)) {
174 } else if (m->nb_segs > EFX_MASK32(ESF_GZ_TX_SEND_NUM_SEGS)) {
179 if (calc_phdr_cksum) {
181 * Full checksum offload does IPv4 header checksum
182 * and does not require any assistance.
184 ret = rte_net_intel_cksum_flags_prepare(m,
185 m->ol_flags & ~PKT_TX_IP_CKSUM);
186 if (unlikely(ret != 0)) {
197 sfc_ef100_tx_get_event(struct sfc_ef100_txq *txq, efx_qword_t *ev)
199 volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring;
202 * Exception flag is set when reap is done.
203 * It is never done twice per packet burst get, and absence of
204 * the flag is checked on burst get entry.
206 SFC_ASSERT((txq->flags & SFC_EF100_TXQ_EXCEPTION) == 0);
208 *ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask];
210 if (!sfc_ef100_ev_present(ev,
211 (txq->evq_read_ptr >> txq->evq_phase_bit_shift) & 1))
214 if (unlikely(!sfc_ef100_ev_type_is(ev,
215 ESE_GZ_EF100_EV_TX_COMPLETION))) {
217 * Do not move read_ptr to keep the event for exception
218 * handling by the control path.
220 txq->flags |= SFC_EF100_TXQ_EXCEPTION;
221 sfc_ef100_tx_err(txq,
222 "TxQ exception at EvQ ptr %u(%#x), event %08x:%08x",
223 txq->evq_read_ptr, txq->evq_read_ptr & txq->ptr_mask,
224 EFX_QWORD_FIELD(*ev, EFX_DWORD_1),
225 EFX_QWORD_FIELD(*ev, EFX_DWORD_0));
229 sfc_ef100_tx_debug(txq, "TxQ got event %08x:%08x at %u (%#x)",
230 EFX_QWORD_FIELD(*ev, EFX_DWORD_1),
231 EFX_QWORD_FIELD(*ev, EFX_DWORD_0),
233 txq->evq_read_ptr & txq->ptr_mask);
240 sfc_ef100_tx_process_events(struct sfc_ef100_txq *txq)
242 unsigned int num_descs = 0;
245 while (sfc_ef100_tx_get_event(txq, &tx_ev))
246 num_descs += EFX_QWORD_FIELD(tx_ev, ESF_GZ_EV_TXCMPL_NUM_DESC);
252 sfc_ef100_tx_reap_num_descs(struct sfc_ef100_txq *txq, unsigned int num_descs)
255 unsigned int completed = txq->completed;
256 unsigned int pending = completed + num_descs;
257 struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
261 struct sfc_ef100_tx_sw_desc *txd;
264 txd = &txq->sw_ring[completed & txq->ptr_mask];
265 if (txd->mbuf == NULL)
268 m = rte_pktmbuf_prefree_seg(txd->mbuf);
274 if (nb == RTE_DIM(bulk) ||
275 (nb != 0 && m->pool != bulk[0]->pool)) {
276 rte_mempool_put_bulk(bulk[0]->pool,
282 } while (++completed != pending);
285 rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
287 txq->completed = completed;
292 sfc_ef100_tx_reap(struct sfc_ef100_txq *txq)
294 sfc_ef100_tx_reap_num_descs(txq, sfc_ef100_tx_process_events(txq));
298 sfc_ef100_tx_qdesc_cso_inner_l3(uint64_t tx_tunnel)
303 case PKT_TX_TUNNEL_VXLAN:
304 inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_VXLAN;
306 case PKT_TX_TUNNEL_GENEVE:
307 inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_GENEVE;
310 inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_OFF;
317 sfc_ef100_tx_qdesc_send_create(const struct rte_mbuf *m, efx_oword_t *tx_desc)
323 uint16_t part_cksum_w;
324 uint16_t l4_offset_w;
326 if ((m->ol_flags & PKT_TX_TUNNEL_MASK) == 0) {
327 outer_l3 = (m->ol_flags & PKT_TX_IP_CKSUM);
328 outer_l4 = (m->ol_flags & PKT_TX_L4_MASK);
329 inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_OFF;
330 partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_OFF;
334 outer_l3 = (m->ol_flags & PKT_TX_OUTER_IP_CKSUM);
335 outer_l4 = (m->ol_flags & PKT_TX_OUTER_UDP_CKSUM);
336 inner_l3 = sfc_ef100_tx_qdesc_cso_inner_l3(m->ol_flags &
339 switch (m->ol_flags & PKT_TX_L4_MASK) {
340 case PKT_TX_TCP_CKSUM:
341 partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_TCP;
342 part_cksum_w = offsetof(struct rte_tcp_hdr, cksum) >> 1;
344 case PKT_TX_UDP_CKSUM:
345 partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_UDP;
346 part_cksum_w = offsetof(struct rte_udp_hdr,
350 partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_OFF;
354 l4_offset_w = (m->outer_l2_len + m->outer_l3_len +
355 m->l2_len + m->l3_len) >> 1;
358 EFX_POPULATE_OWORD_10(*tx_desc,
359 ESF_GZ_TX_SEND_ADDR, rte_mbuf_data_iova(m),
360 ESF_GZ_TX_SEND_LEN, rte_pktmbuf_data_len(m),
361 ESF_GZ_TX_SEND_NUM_SEGS, m->nb_segs,
362 ESF_GZ_TX_SEND_CSO_PARTIAL_START_W, l4_offset_w,
363 ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W, part_cksum_w,
364 ESF_GZ_TX_SEND_CSO_PARTIAL_EN, partial_en,
365 ESF_GZ_TX_SEND_CSO_INNER_L3, inner_l3,
366 ESF_GZ_TX_SEND_CSO_OUTER_L3, outer_l3,
367 ESF_GZ_TX_SEND_CSO_OUTER_L4, outer_l4,
368 ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEND);
372 sfc_ef100_tx_qdesc_seg_create(rte_iova_t addr, uint16_t len,
373 efx_oword_t *tx_desc)
375 EFX_POPULATE_OWORD_3(*tx_desc,
376 ESF_GZ_TX_SEG_ADDR, addr,
377 ESF_GZ_TX_SEG_LEN, len,
378 ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEG);
382 sfc_ef100_tx_qdesc_tso_create(const struct rte_mbuf *m,
383 uint16_t nb_header_descs,
384 uint16_t nb_payload_descs,
385 size_t header_len, size_t payload_len,
386 size_t iph_off, size_t tcph_off,
387 efx_oword_t *tx_desc)
389 efx_oword_t tx_desc_extra_fields;
391 * If no tunnel encapsulation is present, then the ED_INNER
392 * fields should be used.
394 int ed_inner_ip_id = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
396 EFX_POPULATE_OWORD_7(*tx_desc,
397 ESF_GZ_TX_TSO_MSS, m->tso_segsz,
398 ESF_GZ_TX_TSO_HDR_NUM_SEGS, nb_header_descs,
399 ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS, nb_payload_descs,
400 ESF_GZ_TX_TSO_ED_INNER_IP4_ID, ed_inner_ip_id,
401 ESF_GZ_TX_TSO_ED_INNER_IP_LEN, 1,
402 ESF_GZ_TX_TSO_HDR_LEN_W, header_len >> 1,
403 ESF_GZ_TX_TSO_PAYLOAD_LEN, payload_len);
405 EFX_POPULATE_OWORD_5(tx_desc_extra_fields,
407 * Inner offsets are required for inner IPv4 ID
408 * and IP length edits.
410 ESF_GZ_TX_TSO_INNER_L3_OFF_W, iph_off >> 1,
411 ESF_GZ_TX_TSO_INNER_L4_OFF_W, tcph_off >> 1,
413 * Use outer full checksum offloads which do
414 * not require any extra information.
416 ESF_GZ_TX_TSO_CSO_OUTER_L3, 1,
417 ESF_GZ_TX_TSO_CSO_OUTER_L4, 1,
418 ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_TSO);
420 EFX_OR_OWORD(*tx_desc, tx_desc_extra_fields);
424 sfc_ef100_tx_qpush(struct sfc_ef100_txq *txq, unsigned int added)
428 EFX_POPULATE_DWORD_1(dword, ERF_GZ_TX_RING_PIDX, added & txq->ptr_mask);
430 /* DMA sync to device is not required */
433 * rte_write32() has rte_io_wmb() which guarantees that the STORE
434 * operations (i.e. Rx and event descriptor updates) that precede
435 * the rte_io_wmb() call are visible to NIC before the STORE
436 * operations that follow it (i.e. doorbell write).
438 rte_write32(dword.ed_u32[0], txq->doorbell);
440 sfc_ef100_tx_debug(txq, "TxQ pushed doorbell at pidx %u (added=%u)",
441 EFX_DWORD_FIELD(dword, ERF_GZ_TX_RING_PIDX),
446 sfc_ef100_tx_pkt_descs_max(const struct rte_mbuf *m)
448 unsigned int extra_descs = 0;
450 /** Maximum length of an mbuf segment data */
451 #define SFC_MBUF_SEG_LEN_MAX UINT16_MAX
452 RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);
454 if (m->ol_flags & PKT_TX_TCP_SEG) {
455 /* Tx TSO descriptor */
458 * Extra Tx segment descriptor may be required if header
459 * ends in the middle of segment.
464 * mbuf segment cannot be bigger than maximum segment length
465 * and maximum packet length since TSO is not supported yet.
466 * Make sure that the first segment does not need fragmentation
467 * (split into many Tx descriptors).
469 RTE_BUILD_BUG_ON(SFC_EF100_TX_SEND_DESC_LEN_MAX <
470 RTE_MIN((unsigned int)EFX_MAC_PDU_MAX,
471 SFC_MBUF_SEG_LEN_MAX));
475 * Any segment of scattered packet cannot be bigger than maximum
476 * segment length. Make sure that subsequent segments do not need
477 * fragmentation (split into many Tx descriptors).
479 RTE_BUILD_BUG_ON(SFC_EF100_TX_SEG_DESC_LEN_MAX < SFC_MBUF_SEG_LEN_MAX);
481 return m->nb_segs + extra_descs;
484 static struct rte_mbuf *
485 sfc_ef100_xmit_tso_pkt(struct sfc_ef100_txq * const txq,
486 struct rte_mbuf *m, unsigned int *added)
488 struct rte_mbuf *m_seg = m;
489 unsigned int nb_hdr_descs;
490 unsigned int nb_pld_descs;
491 unsigned int seg_split = 0;
492 unsigned int tso_desc_id;
497 size_t remaining_hdr_len;
500 tcph_off = iph_off + m->l3_len;
501 header_len = tcph_off + m->l4_len;
504 * Remember ID of the TX_TSO descriptor to be filled in.
505 * We can't fill it in right now since we need to calculate
506 * number of header and payload segments first and don't want
507 * to traverse it twice here.
509 tso_desc_id = (*added)++ & txq->ptr_mask;
511 remaining_hdr_len = header_len;
513 id = (*added)++ & txq->ptr_mask;
514 if (rte_pktmbuf_data_len(m_seg) <= remaining_hdr_len) {
515 /* The segment is fully header segment */
516 sfc_ef100_tx_qdesc_seg_create(
517 rte_mbuf_data_iova(m_seg),
518 rte_pktmbuf_data_len(m_seg),
519 &txq->txq_hw_ring[id]);
520 remaining_hdr_len -= rte_pktmbuf_data_len(m_seg);
523 * The segment must be split into header and
526 sfc_ef100_tx_qdesc_seg_create(
527 rte_mbuf_data_iova(m_seg),
529 &txq->txq_hw_ring[id]);
530 SFC_ASSERT(txq->sw_ring[id].mbuf == NULL);
532 id = (*added)++ & txq->ptr_mask;
533 sfc_ef100_tx_qdesc_seg_create(
534 rte_mbuf_data_iova(m_seg) + remaining_hdr_len,
535 rte_pktmbuf_data_len(m_seg) - remaining_hdr_len,
536 &txq->txq_hw_ring[id]);
537 remaining_hdr_len = 0;
540 txq->sw_ring[id].mbuf = m_seg;
542 } while (remaining_hdr_len > 0);
545 * If a segment is split into header and payload segments, added
546 * pointer counts it twice and we should correct it.
548 nb_hdr_descs = ((id - tso_desc_id) & txq->ptr_mask) - seg_split;
549 nb_pld_descs = m->nb_segs - nb_hdr_descs + seg_split;
551 sfc_ef100_tx_qdesc_tso_create(m, nb_hdr_descs, nb_pld_descs, header_len,
552 rte_pktmbuf_pkt_len(m) - header_len,
554 &txq->txq_hw_ring[tso_desc_id]);
560 sfc_ef100_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
562 struct sfc_ef100_txq * const txq = sfc_ef100_txq_by_dp_txq(tx_queue);
564 unsigned int dma_desc_space;
566 struct rte_mbuf **pktp;
567 struct rte_mbuf **pktp_end;
569 if (unlikely(txq->flags &
570 (SFC_EF100_TXQ_NOT_RUNNING | SFC_EF100_TXQ_EXCEPTION)))
574 dma_desc_space = txq->max_fill_level - (added - txq->completed);
576 reap_done = (dma_desc_space < txq->free_thresh);
578 sfc_ef100_tx_reap(txq);
579 dma_desc_space = txq->max_fill_level - (added - txq->completed);
582 for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
585 struct rte_mbuf *m_seg = *pktp;
586 unsigned int pkt_start = added;
589 if (likely(pktp + 1 != pktp_end))
590 rte_mbuf_prefetch_part1(pktp[1]);
592 if (sfc_ef100_tx_pkt_descs_max(m_seg) > dma_desc_space) {
596 /* Push already prepared descriptors before polling */
597 if (added != txq->added) {
598 sfc_ef100_tx_qpush(txq, added);
602 sfc_ef100_tx_reap(txq);
604 dma_desc_space = txq->max_fill_level -
605 (added - txq->completed);
606 if (sfc_ef100_tx_pkt_descs_max(m_seg) > dma_desc_space)
610 if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
611 m_seg = sfc_ef100_xmit_tso_pkt(txq, m_seg, &added);
613 id = added++ & txq->ptr_mask;
614 sfc_ef100_tx_qdesc_send_create(m_seg,
615 &txq->txq_hw_ring[id]);
618 * rte_pktmbuf_free() is commonly used in DPDK for
619 * recycling packets - the function checks every
620 * segment's reference counter and returns the
621 * buffer to its pool whenever possible;
622 * nevertheless, freeing mbuf segments one by one
623 * may entail some performance decline;
624 * from this point, sfc_efx_tx_reap() does the same job
625 * on its own and frees buffers in bulks (all mbufs
626 * within a bulk belong to the same pool);
627 * from this perspective, individual segment pointers
628 * must be associated with the corresponding SW
629 * descriptors independently so that only one loop
630 * is sufficient on reap to inspect all the buffers
632 txq->sw_ring[id].mbuf = m_seg;
636 while (m_seg != NULL) {
637 RTE_BUILD_BUG_ON(SFC_MBUF_SEG_LEN_MAX >
638 SFC_EF100_TX_SEG_DESC_LEN_MAX);
640 id = added++ & txq->ptr_mask;
641 sfc_ef100_tx_qdesc_seg_create(rte_mbuf_data_iova(m_seg),
642 rte_pktmbuf_data_len(m_seg),
643 &txq->txq_hw_ring[id]);
644 txq->sw_ring[id].mbuf = m_seg;
648 dma_desc_space -= (added - pkt_start);
651 if (likely(added != txq->added)) {
652 sfc_ef100_tx_qpush(txq, added);
656 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
658 sfc_ef100_tx_reap(txq);
661 return pktp - &tx_pkts[0];
664 static sfc_dp_tx_get_dev_info_t sfc_ef100_get_dev_info;
666 sfc_ef100_get_dev_info(struct rte_eth_dev_info *dev_info)
669 * Number of descriptors just defines maximum number of pushed
670 * descriptors (fill level).
672 dev_info->tx_desc_lim.nb_min = 1;
673 dev_info->tx_desc_lim.nb_align = 1;
676 static sfc_dp_tx_qsize_up_rings_t sfc_ef100_tx_qsize_up_rings;
678 sfc_ef100_tx_qsize_up_rings(uint16_t nb_tx_desc,
679 struct sfc_dp_tx_hw_limits *limits,
680 unsigned int *txq_entries,
681 unsigned int *evq_entries,
682 unsigned int *txq_max_fill_level)
685 * rte_ethdev API guarantees that the number meets min, max and
686 * alignment requirements.
688 if (nb_tx_desc <= limits->txq_min_entries)
689 *txq_entries = limits->txq_min_entries;
691 *txq_entries = rte_align32pow2(nb_tx_desc);
693 *evq_entries = *txq_entries;
695 *txq_max_fill_level = RTE_MIN(nb_tx_desc,
696 SFC_EF100_TXQ_LIMIT(*evq_entries));
700 static sfc_dp_tx_qcreate_t sfc_ef100_tx_qcreate;
702 sfc_ef100_tx_qcreate(uint16_t port_id, uint16_t queue_id,
703 const struct rte_pci_addr *pci_addr, int socket_id,
704 const struct sfc_dp_tx_qcreate_info *info,
705 struct sfc_dp_txq **dp_txqp)
707 struct sfc_ef100_txq *txq;
711 if (info->txq_entries != info->evq_entries)
715 txq = rte_zmalloc_socket("sfc-ef100-txq", sizeof(*txq),
716 RTE_CACHE_LINE_SIZE, socket_id);
720 sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
723 txq->sw_ring = rte_calloc_socket("sfc-ef100-txq-sw_ring",
725 sizeof(*txq->sw_ring),
726 RTE_CACHE_LINE_SIZE, socket_id);
727 if (txq->sw_ring == NULL)
728 goto fail_sw_ring_alloc;
730 txq->flags = SFC_EF100_TXQ_NOT_RUNNING;
731 txq->ptr_mask = info->txq_entries - 1;
732 txq->max_fill_level = info->max_fill_level;
733 txq->free_thresh = info->free_thresh;
734 txq->evq_phase_bit_shift = rte_bsf32(info->evq_entries);
735 txq->txq_hw_ring = info->txq_hw_ring;
736 txq->doorbell = (volatile uint8_t *)info->mem_bar +
737 ER_GZ_TX_RING_DOORBELL_OFST +
738 (info->hw_index << info->vi_window_shift);
739 txq->evq_hw_ring = info->evq_hw_ring;
741 txq->tso_tcp_header_offset_limit = info->tso_tcp_header_offset_limit;
742 txq->tso_max_nb_header_descs = info->tso_max_nb_header_descs;
743 txq->tso_max_header_len = info->tso_max_header_len;
744 txq->tso_max_nb_payload_descs = info->tso_max_nb_payload_descs;
745 txq->tso_max_payload_len = info->tso_max_payload_len;
746 txq->tso_max_nb_outgoing_frames = info->tso_max_nb_outgoing_frames;
748 sfc_ef100_tx_debug(txq, "TxQ doorbell is %p", txq->doorbell);
761 static sfc_dp_tx_qdestroy_t sfc_ef100_tx_qdestroy;
763 sfc_ef100_tx_qdestroy(struct sfc_dp_txq *dp_txq)
765 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
767 rte_free(txq->sw_ring);
771 static sfc_dp_tx_qstart_t sfc_ef100_tx_qstart;
773 sfc_ef100_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
774 unsigned int txq_desc_index)
776 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
778 txq->evq_read_ptr = evq_read_ptr;
779 txq->added = txq->completed = txq_desc_index;
781 txq->flags |= SFC_EF100_TXQ_STARTED;
782 txq->flags &= ~(SFC_EF100_TXQ_NOT_RUNNING | SFC_EF100_TXQ_EXCEPTION);
787 static sfc_dp_tx_qstop_t sfc_ef100_tx_qstop;
789 sfc_ef100_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
791 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
793 txq->flags |= SFC_EF100_TXQ_NOT_RUNNING;
795 *evq_read_ptr = txq->evq_read_ptr;
798 static sfc_dp_tx_qtx_ev_t sfc_ef100_tx_qtx_ev;
800 sfc_ef100_tx_qtx_ev(struct sfc_dp_txq *dp_txq, unsigned int num_descs)
802 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
804 SFC_ASSERT(txq->flags & SFC_EF100_TXQ_NOT_RUNNING);
806 sfc_ef100_tx_reap_num_descs(txq, num_descs);
811 static sfc_dp_tx_qreap_t sfc_ef100_tx_qreap;
813 sfc_ef100_tx_qreap(struct sfc_dp_txq *dp_txq)
815 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
816 unsigned int completed;
818 for (completed = txq->completed; completed != txq->added; ++completed) {
819 struct sfc_ef100_tx_sw_desc *txd;
821 txd = &txq->sw_ring[completed & txq->ptr_mask];
822 if (txd->mbuf != NULL) {
823 rte_pktmbuf_free_seg(txd->mbuf);
828 txq->flags &= ~SFC_EF100_TXQ_STARTED;
832 sfc_ef100_tx_qdesc_npending(struct sfc_ef100_txq *txq)
834 const unsigned int evq_old_read_ptr = txq->evq_read_ptr;
835 unsigned int npending = 0;
838 if (unlikely(txq->flags &
839 (SFC_EF100_TXQ_NOT_RUNNING | SFC_EF100_TXQ_EXCEPTION)))
842 while (sfc_ef100_tx_get_event(txq, &tx_ev))
843 npending += EFX_QWORD_FIELD(tx_ev, ESF_GZ_EV_TXCMPL_NUM_DESC);
846 * The function does not process events, so return event queue read
847 * pointer to the original position to allow the events that were
848 * read to be processed later
850 txq->evq_read_ptr = evq_old_read_ptr;
855 static sfc_dp_tx_qdesc_status_t sfc_ef100_tx_qdesc_status;
857 sfc_ef100_tx_qdesc_status(struct sfc_dp_txq *dp_txq, uint16_t offset)
859 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
860 unsigned int pushed = txq->added - txq->completed;
862 if (unlikely(offset > txq->ptr_mask))
865 if (unlikely(offset >= txq->max_fill_level))
866 return RTE_ETH_TX_DESC_UNAVAIL;
868 return (offset >= pushed ||
869 offset < sfc_ef100_tx_qdesc_npending(txq)) ?
870 RTE_ETH_TX_DESC_DONE : RTE_ETH_TX_DESC_FULL;
873 struct sfc_dp_tx sfc_ef100_tx = {
875 .name = SFC_KVARG_DATAPATH_EF100,
877 .hw_fw_caps = SFC_DP_HW_FW_CAP_EF100,
879 .features = SFC_DP_TX_FEAT_MULTI_PROCESS,
880 .dev_offload_capa = 0,
881 .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
882 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
883 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM |
884 DEV_TX_OFFLOAD_UDP_CKSUM |
885 DEV_TX_OFFLOAD_TCP_CKSUM |
886 DEV_TX_OFFLOAD_MULTI_SEGS |
887 DEV_TX_OFFLOAD_TCP_TSO,
888 .get_dev_info = sfc_ef100_get_dev_info,
889 .qsize_up_rings = sfc_ef100_tx_qsize_up_rings,
890 .qcreate = sfc_ef100_tx_qcreate,
891 .qdestroy = sfc_ef100_tx_qdestroy,
892 .qstart = sfc_ef100_tx_qstart,
893 .qtx_ev = sfc_ef100_tx_qtx_ev,
894 .qstop = sfc_ef100_tx_qstop,
895 .qreap = sfc_ef100_tx_qreap,
896 .qdesc_status = sfc_ef100_tx_qdesc_status,
897 .pkt_prepare = sfc_ef100_tx_prepare_pkts,
898 .pkt_burst = sfc_ef100_xmit_pkts,