1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2018-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <rte_mbuf_dyn.h>
18 #include "efx_types.h"
20 #include "efx_regs_ef100.h"
22 #include "sfc_debug.h"
23 #include "sfc_dp_tx.h"
24 #include "sfc_tweak.h"
25 #include "sfc_kvargs.h"
26 #include "sfc_ef100.h"
27 #include "sfc_nic_dma_dp.h"
30 #define sfc_ef100_tx_err(_txq, ...) \
31 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, ERR, &(_txq)->dp.dpq, __VA_ARGS__)
33 #define sfc_ef100_tx_debug(_txq, ...) \
34 SFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, DEBUG, &(_txq)->dp.dpq, \
38 /** Maximum length of the send descriptor data */
39 #define SFC_EF100_TX_SEND_DESC_LEN_MAX \
40 ((1u << ESF_GZ_TX_SEND_LEN_WIDTH) - 1)
42 /** Maximum length of the segment descriptor data */
43 #define SFC_EF100_TX_SEG_DESC_LEN_MAX \
44 ((1u << ESF_GZ_TX_SEG_LEN_WIDTH) - 1)
47 * Maximum number of descriptors/buffers in the Tx ring.
48 * It should guarantee that corresponding event queue never overfill.
49 * EF100 native datapath uses event queue of the same size as Tx queue.
50 * Maximum number of events on datapath can be estimated as number of
51 * Tx queue entries (one event per Tx buffer in the worst case) plus
52 * Tx error and flush events.
54 #define SFC_EF100_TXQ_LIMIT(_ndesc) \
55 ((_ndesc) - 1 /* head must not step on tail */ - \
56 1 /* Rx error */ - 1 /* flush */)
58 struct sfc_ef100_tx_sw_desc {
59 struct rte_mbuf *mbuf;
62 struct sfc_ef100_txq {
64 #define SFC_EF100_TXQ_STARTED 0x1
65 #define SFC_EF100_TXQ_NOT_RUNNING 0x2
66 #define SFC_EF100_TXQ_EXCEPTION 0x4
67 #define SFC_EF100_TXQ_NIC_DMA_MAP 0x8
69 unsigned int ptr_mask;
71 unsigned int completed;
72 unsigned int max_fill_level;
73 unsigned int free_thresh;
74 struct sfc_ef100_tx_sw_desc *sw_ring;
75 efx_oword_t *txq_hw_ring;
76 volatile void *doorbell;
79 unsigned int evq_read_ptr;
80 unsigned int evq_phase_bit_shift;
81 volatile efx_qword_t *evq_hw_ring;
83 uint16_t tso_tcp_header_offset_limit;
84 uint16_t tso_max_nb_header_descs;
85 uint16_t tso_max_header_len;
86 uint16_t tso_max_nb_payload_descs;
87 uint32_t tso_max_payload_len;
88 uint32_t tso_max_nb_outgoing_frames;
90 /* Datapath transmit queue anchor */
93 const struct sfc_nic_dma_info *nic_dma_info;
96 static inline struct sfc_ef100_txq *
97 sfc_ef100_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
99 return container_of(dp_txq, struct sfc_ef100_txq, dp);
103 sfc_ef100_tx_prepare_pkt_tso(struct sfc_ef100_txq * const txq,
106 size_t header_len = ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
107 m->outer_l2_len + m->outer_l3_len : 0) +
108 m->l2_len + m->l3_len + m->l4_len;
109 size_t payload_len = m->pkt_len - header_len;
110 unsigned long mss_conformant_max_payload_len;
111 unsigned int nb_payload_descs;
113 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
114 switch (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
117 case RTE_MBUF_F_TX_TUNNEL_VXLAN:
119 case RTE_MBUF_F_TX_TUNNEL_GENEVE:
126 mss_conformant_max_payload_len =
127 m->tso_segsz * txq->tso_max_nb_outgoing_frames;
130 * Don't really want to know exact number of payload segments.
131 * Just use total number of segments as upper limit. Practically
132 * maximum number of payload segments is significantly bigger
133 * than maximum number header segments, so we can neglect header
134 * segments excluded total number of segments to estimate number
135 * of payload segments required.
137 nb_payload_descs = m->nb_segs;
140 * Carry out multiple independent checks using bitwise OR
141 * to avoid unnecessary conditional branching.
143 if (unlikely((header_len > txq->tso_max_header_len) |
144 (nb_payload_descs > txq->tso_max_nb_payload_descs) |
145 (payload_len > txq->tso_max_payload_len) |
146 (payload_len > mss_conformant_max_payload_len) |
147 (m->pkt_len == header_len)))
154 sfc_ef100_tx_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
157 struct sfc_ef100_txq * const txq = sfc_ef100_txq_by_dp_txq(tx_queue);
160 for (i = 0; i < nb_pkts; i++) {
161 struct rte_mbuf *m = tx_pkts[i];
162 unsigned int max_nb_header_segs = 0;
163 bool calc_phdr_cksum = false;
167 * Partial checksum offload is used in the case of
168 * inner TCP/UDP checksum offload. It requires
169 * pseudo-header checksum which is calculated below,
170 * but requires contiguous packet headers.
172 if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
173 (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)) {
174 calc_phdr_cksum = true;
175 max_nb_header_segs = 1;
176 } else if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
177 max_nb_header_segs = txq->tso_max_nb_header_descs;
180 ret = sfc_dp_tx_prepare_pkt(m, max_nb_header_segs, 0,
181 txq->tso_tcp_header_offset_limit,
182 txq->max_fill_level, 1, 0);
183 if (unlikely(ret != 0)) {
188 if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
189 ret = sfc_ef100_tx_prepare_pkt_tso(txq, m);
190 if (unlikely(ret != 0)) {
194 } else if (m->nb_segs > EFX_MASK32(ESF_GZ_TX_SEND_NUM_SEGS)) {
199 if (calc_phdr_cksum) {
201 * Full checksum offload does IPv4 header checksum
202 * and does not require any assistance.
204 ret = rte_net_intel_cksum_flags_prepare(m,
205 m->ol_flags & ~RTE_MBUF_F_TX_IP_CKSUM);
206 if (unlikely(ret != 0)) {
217 sfc_ef100_tx_get_event(struct sfc_ef100_txq *txq, efx_qword_t *ev)
219 volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring;
222 * Exception flag is set when reap is done.
223 * It is never done twice per packet burst get, and absence of
224 * the flag is checked on burst get entry.
226 SFC_ASSERT((txq->flags & SFC_EF100_TXQ_EXCEPTION) == 0);
228 *ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask];
230 if (!sfc_ef100_ev_present(ev,
231 (txq->evq_read_ptr >> txq->evq_phase_bit_shift) & 1))
234 if (unlikely(!sfc_ef100_ev_type_is(ev,
235 ESE_GZ_EF100_EV_TX_COMPLETION))) {
237 * Do not move read_ptr to keep the event for exception
238 * handling by the control path.
240 txq->flags |= SFC_EF100_TXQ_EXCEPTION;
241 sfc_ef100_tx_err(txq,
242 "TxQ exception at EvQ ptr %u(%#x), event %08x:%08x",
243 txq->evq_read_ptr, txq->evq_read_ptr & txq->ptr_mask,
244 EFX_QWORD_FIELD(*ev, EFX_DWORD_1),
245 EFX_QWORD_FIELD(*ev, EFX_DWORD_0));
249 sfc_ef100_tx_debug(txq, "TxQ got event %08x:%08x at %u (%#x)",
250 EFX_QWORD_FIELD(*ev, EFX_DWORD_1),
251 EFX_QWORD_FIELD(*ev, EFX_DWORD_0),
253 txq->evq_read_ptr & txq->ptr_mask);
260 sfc_ef100_tx_process_events(struct sfc_ef100_txq *txq)
262 unsigned int num_descs = 0;
265 while (sfc_ef100_tx_get_event(txq, &tx_ev))
266 num_descs += EFX_QWORD_FIELD(tx_ev, ESF_GZ_EV_TXCMPL_NUM_DESC);
272 sfc_ef100_tx_reap_num_descs(struct sfc_ef100_txq *txq, unsigned int num_descs)
275 unsigned int completed = txq->completed;
276 unsigned int pending = completed + num_descs;
277 struct rte_mbuf *bulk[SFC_TX_REAP_BULK_SIZE];
281 struct sfc_ef100_tx_sw_desc *txd;
284 txd = &txq->sw_ring[completed & txq->ptr_mask];
285 if (txd->mbuf == NULL)
288 m = rte_pktmbuf_prefree_seg(txd->mbuf);
294 if (nb == RTE_DIM(bulk) ||
295 (nb != 0 && m->pool != bulk[0]->pool)) {
296 rte_mempool_put_bulk(bulk[0]->pool,
302 } while (++completed != pending);
305 rte_mempool_put_bulk(bulk[0]->pool, (void *)bulk, nb);
307 txq->completed = completed;
312 sfc_ef100_tx_reap(struct sfc_ef100_txq *txq)
314 sfc_ef100_tx_reap_num_descs(txq, sfc_ef100_tx_process_events(txq));
318 sfc_ef100_tx_qdesc_prefix_create(const struct rte_mbuf *m, efx_oword_t *tx_desc)
320 efx_mport_id_t *mport_id =
321 RTE_MBUF_DYNFIELD(m, sfc_dp_mport_offset, efx_mport_id_t *);
323 EFX_POPULATE_OWORD_3(*tx_desc,
324 ESF_GZ_TX_PREFIX_EGRESS_MPORT,
326 ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN, 1,
327 ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_PREFIX);
331 sfc_ef100_tx_qdesc_cso_inner_l3(uint64_t tx_tunnel)
336 case RTE_MBUF_F_TX_TUNNEL_VXLAN:
337 inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_VXLAN;
339 case RTE_MBUF_F_TX_TUNNEL_GENEVE:
340 inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_GENEVE;
343 inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_OFF;
350 sfc_ef100_tx_map(const struct sfc_ef100_txq *txq, rte_iova_t iova, size_t len,
351 rte_iova_t *dma_addr)
353 if ((txq->flags & SFC_EF100_TXQ_NIC_DMA_MAP) == 0) {
356 *dma_addr = sfc_nic_dma_map(txq->nic_dma_info, iova, len);
357 if (unlikely(*dma_addr == RTE_BAD_IOVA))
358 sfc_ef100_tx_err(txq, "failed to map DMA address on Tx");
364 sfc_ef100_tx_qdesc_send_create(const struct sfc_ef100_txq *txq,
365 const struct rte_mbuf *m, efx_oword_t *tx_desc)
371 uint16_t part_cksum_w;
372 uint16_t l4_offset_w;
376 if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) == 0) {
377 outer_l3 = (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
378 outer_l4 = (m->ol_flags & RTE_MBUF_F_TX_L4_MASK);
379 inner_l3 = ESE_GZ_TX_DESC_CS_INNER_L3_OFF;
380 partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_OFF;
384 outer_l3 = (m->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM);
385 outer_l4 = (m->ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM);
386 inner_l3 = sfc_ef100_tx_qdesc_cso_inner_l3(m->ol_flags &
387 RTE_MBUF_F_TX_TUNNEL_MASK);
389 switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
390 case RTE_MBUF_F_TX_TCP_CKSUM:
391 partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_TCP;
392 part_cksum_w = offsetof(struct rte_tcp_hdr, cksum) >> 1;
394 case RTE_MBUF_F_TX_UDP_CKSUM:
395 partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_UDP;
396 part_cksum_w = offsetof(struct rte_udp_hdr,
400 partial_en = ESE_GZ_TX_DESC_CSO_PARTIAL_EN_OFF;
404 l4_offset_w = (m->outer_l2_len + m->outer_l3_len +
405 m->l2_len + m->l3_len) >> 1;
408 rc = sfc_ef100_tx_map(txq, rte_mbuf_data_iova_default(m),
409 rte_pktmbuf_data_len(m), &dma_addr);
410 if (unlikely(rc != 0))
413 EFX_POPULATE_OWORD_10(*tx_desc,
414 ESF_GZ_TX_SEND_ADDR, dma_addr,
415 ESF_GZ_TX_SEND_LEN, rte_pktmbuf_data_len(m),
416 ESF_GZ_TX_SEND_NUM_SEGS, m->nb_segs,
417 ESF_GZ_TX_SEND_CSO_PARTIAL_START_W, l4_offset_w,
418 ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W, part_cksum_w,
419 ESF_GZ_TX_SEND_CSO_PARTIAL_EN, partial_en,
420 ESF_GZ_TX_SEND_CSO_INNER_L3, inner_l3,
421 ESF_GZ_TX_SEND_CSO_OUTER_L3, outer_l3,
422 ESF_GZ_TX_SEND_CSO_OUTER_L4, outer_l4,
423 ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEND);
425 if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
426 efx_oword_t tx_desc_extra_fields;
428 EFX_POPULATE_OWORD_2(tx_desc_extra_fields,
429 ESF_GZ_TX_SEND_VLAN_INSERT_EN, 1,
430 ESF_GZ_TX_SEND_VLAN_INSERT_TCI, m->vlan_tci);
432 EFX_OR_OWORD(*tx_desc, tx_desc_extra_fields);
439 sfc_ef100_tx_qdesc_seg_create(rte_iova_t addr, uint16_t len,
440 efx_oword_t *tx_desc)
442 EFX_POPULATE_OWORD_3(*tx_desc,
443 ESF_GZ_TX_SEG_ADDR, addr,
444 ESF_GZ_TX_SEG_LEN, len,
445 ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEG);
449 sfc_ef100_tx_qdesc_tso_create(const struct rte_mbuf *m,
450 uint16_t nb_header_descs,
451 uint16_t nb_payload_descs,
452 size_t header_len, size_t payload_len,
453 size_t outer_iph_off, size_t outer_udph_off,
454 size_t iph_off, size_t tcph_off,
455 efx_oword_t *tx_desc)
457 efx_oword_t tx_desc_extra_fields;
458 int ed_outer_udp_len = (outer_udph_off != 0) ? 1 : 0;
459 int ed_outer_ip_len = (outer_iph_off != 0) ? 1 : 0;
460 int ed_outer_ip_id = (outer_iph_off != 0) ?
461 ESE_GZ_TX_DESC_IP4_ID_INC_MOD16 : 0;
463 * If no tunnel encapsulation is present, then the ED_INNER
464 * fields should be used.
466 int ed_inner_ip_id = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
467 uint8_t inner_l3 = sfc_ef100_tx_qdesc_cso_inner_l3(
468 m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
470 EFX_POPULATE_OWORD_10(*tx_desc,
471 ESF_GZ_TX_TSO_MSS, m->tso_segsz,
472 ESF_GZ_TX_TSO_HDR_NUM_SEGS, nb_header_descs,
473 ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS, nb_payload_descs,
474 ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, ed_outer_ip_id,
475 ESF_GZ_TX_TSO_ED_INNER_IP4_ID, ed_inner_ip_id,
476 ESF_GZ_TX_TSO_ED_OUTER_IP_LEN, ed_outer_ip_len,
477 ESF_GZ_TX_TSO_ED_INNER_IP_LEN, 1,
478 ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN, ed_outer_udp_len,
479 ESF_GZ_TX_TSO_HDR_LEN_W, header_len >> 1,
480 ESF_GZ_TX_TSO_PAYLOAD_LEN, payload_len);
482 EFX_POPULATE_OWORD_9(tx_desc_extra_fields,
484 * Outer offsets are required for outer IPv4 ID
485 * and length edits in the case of tunnel TSO.
487 ESF_GZ_TX_TSO_OUTER_L3_OFF_W, outer_iph_off >> 1,
488 ESF_GZ_TX_TSO_OUTER_L4_OFF_W, outer_udph_off >> 1,
490 * Inner offsets are required for inner IPv4 ID
491 * and IP length edits and partial checksum
492 * offload in the case of tunnel TSO.
494 ESF_GZ_TX_TSO_INNER_L3_OFF_W, iph_off >> 1,
495 ESF_GZ_TX_TSO_INNER_L4_OFF_W, tcph_off >> 1,
496 ESF_GZ_TX_TSO_CSO_INNER_L4,
497 inner_l3 != ESE_GZ_TX_DESC_CS_INNER_L3_OFF,
498 ESF_GZ_TX_TSO_CSO_INNER_L3, inner_l3,
500 * Use outer full checksum offloads which do
501 * not require any extra information.
503 ESF_GZ_TX_TSO_CSO_OUTER_L3, 1,
504 ESF_GZ_TX_TSO_CSO_OUTER_L4, 1,
505 ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_TSO);
507 EFX_OR_OWORD(*tx_desc, tx_desc_extra_fields);
509 if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
510 EFX_POPULATE_OWORD_2(tx_desc_extra_fields,
511 ESF_GZ_TX_TSO_VLAN_INSERT_EN, 1,
512 ESF_GZ_TX_TSO_VLAN_INSERT_TCI, m->vlan_tci);
514 EFX_OR_OWORD(*tx_desc, tx_desc_extra_fields);
519 sfc_ef100_tx_qpush(struct sfc_ef100_txq *txq, unsigned int added)
523 EFX_POPULATE_DWORD_1(dword, ERF_GZ_TX_RING_PIDX, added & txq->ptr_mask);
525 /* DMA sync to device is not required */
528 * rte_write32() has rte_io_wmb() which guarantees that the STORE
529 * operations (i.e. Rx and event descriptor updates) that precede
530 * the rte_io_wmb() call are visible to NIC before the STORE
531 * operations that follow it (i.e. doorbell write).
533 rte_write32(dword.ed_u32[0], txq->doorbell);
534 txq->dp.dpq.dbells++;
536 sfc_ef100_tx_debug(txq, "TxQ pushed doorbell at pidx %u (added=%u)",
537 EFX_DWORD_FIELD(dword, ERF_GZ_TX_RING_PIDX),
542 sfc_ef100_tx_pkt_descs_max(const struct rte_mbuf *m)
544 unsigned int extra_descs = 0;
546 /** Maximum length of an mbuf segment data */
547 #define SFC_MBUF_SEG_LEN_MAX UINT16_MAX
548 RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);
550 if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
551 /* Tx TSO descriptor */
554 * Extra Tx segment descriptor may be required if header
555 * ends in the middle of segment.
560 * mbuf segment cannot be bigger than maximum segment length
561 * and maximum packet length since TSO is not supported yet.
562 * Make sure that the first segment does not need fragmentation
563 * (split into many Tx descriptors).
565 RTE_BUILD_BUG_ON(SFC_EF100_TX_SEND_DESC_LEN_MAX <
566 RTE_MIN((unsigned int)EFX_MAC_PDU_MAX,
567 SFC_MBUF_SEG_LEN_MAX));
570 if (m->ol_flags & sfc_dp_mport_override) {
571 /* Tx override prefix descriptor will be used */
576 * Any segment of scattered packet cannot be bigger than maximum
577 * segment length. Make sure that subsequent segments do not need
578 * fragmentation (split into many Tx descriptors).
580 RTE_BUILD_BUG_ON(SFC_EF100_TX_SEG_DESC_LEN_MAX < SFC_MBUF_SEG_LEN_MAX);
582 return m->nb_segs + extra_descs;
586 sfc_ef100_xmit_tso_pkt(struct sfc_ef100_txq * const txq,
587 struct rte_mbuf **m, unsigned int *added)
589 struct rte_mbuf *m_seg = *m;
590 unsigned int nb_hdr_descs;
591 unsigned int nb_pld_descs;
592 unsigned int seg_split = 0;
593 unsigned int tso_desc_id;
595 size_t outer_iph_off;
596 size_t outer_udph_off;
600 size_t remaining_hdr_len;
604 if (m_seg->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
605 outer_iph_off = m_seg->outer_l2_len;
606 outer_udph_off = outer_iph_off + m_seg->outer_l3_len;
611 iph_off = outer_udph_off + m_seg->l2_len;
612 tcph_off = iph_off + m_seg->l3_len;
613 header_len = tcph_off + m_seg->l4_len;
616 * Remember ID of the TX_TSO descriptor to be filled in.
617 * We can't fill it in right now since we need to calculate
618 * number of header and payload segments first and don't want
619 * to traverse it twice here.
621 tso_desc_id = (*added)++ & txq->ptr_mask;
623 remaining_hdr_len = header_len;
625 rc = sfc_ef100_tx_map(txq, rte_mbuf_data_iova(m_seg),
626 rte_pktmbuf_data_len(m_seg), &dma_addr);
627 if (unlikely(rc != 0))
630 id = (*added)++ & txq->ptr_mask;
631 if (rte_pktmbuf_data_len(m_seg) <= remaining_hdr_len) {
632 /* The segment is fully header segment */
633 sfc_ef100_tx_qdesc_seg_create(dma_addr,
634 rte_pktmbuf_data_len(m_seg),
635 &txq->txq_hw_ring[id]);
636 remaining_hdr_len -= rte_pktmbuf_data_len(m_seg);
639 * The segment must be split into header and
642 sfc_ef100_tx_qdesc_seg_create(dma_addr,
643 remaining_hdr_len, &txq->txq_hw_ring[id]);
644 txq->sw_ring[id].mbuf = NULL;
646 id = (*added)++ & txq->ptr_mask;
647 sfc_ef100_tx_qdesc_seg_create(
648 dma_addr + remaining_hdr_len,
649 rte_pktmbuf_data_len(m_seg) - remaining_hdr_len,
650 &txq->txq_hw_ring[id]);
651 remaining_hdr_len = 0;
654 txq->sw_ring[id].mbuf = m_seg;
656 } while (remaining_hdr_len > 0);
659 * If a segment is split into header and payload segments, added
660 * pointer counts it twice and we should correct it.
662 nb_hdr_descs = ((id - tso_desc_id) & txq->ptr_mask) - seg_split;
663 nb_pld_descs = (*m)->nb_segs - nb_hdr_descs + seg_split;
665 sfc_ef100_tx_qdesc_tso_create(*m, nb_hdr_descs, nb_pld_descs, header_len,
666 rte_pktmbuf_pkt_len(*m) - header_len,
667 outer_iph_off, outer_udph_off,
669 &txq->txq_hw_ring[tso_desc_id]);
676 sfc_ef100_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
678 struct sfc_ef100_txq * const txq = sfc_ef100_txq_by_dp_txq(tx_queue);
680 unsigned int dma_desc_space;
682 struct rte_mbuf **pktp;
683 struct rte_mbuf **pktp_end;
687 if (unlikely(txq->flags &
688 (SFC_EF100_TXQ_NOT_RUNNING | SFC_EF100_TXQ_EXCEPTION)))
692 dma_desc_space = txq->max_fill_level - (added - txq->completed);
694 reap_done = (dma_desc_space < txq->free_thresh);
696 sfc_ef100_tx_reap(txq);
697 dma_desc_space = txq->max_fill_level - (added - txq->completed);
700 for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
703 struct rte_mbuf *m_seg = *pktp;
704 unsigned int pkt_start = added;
707 if (likely(pktp + 1 != pktp_end))
708 rte_mbuf_prefetch_part1(pktp[1]);
710 if (sfc_ef100_tx_pkt_descs_max(m_seg) > dma_desc_space) {
714 /* Push already prepared descriptors before polling */
715 if (added != txq->added) {
716 sfc_ef100_tx_qpush(txq, added);
720 sfc_ef100_tx_reap(txq);
722 dma_desc_space = txq->max_fill_level -
723 (added - txq->completed);
724 if (sfc_ef100_tx_pkt_descs_max(m_seg) > dma_desc_space)
728 if (m_seg->ol_flags & sfc_dp_mport_override) {
729 id = added++ & txq->ptr_mask;
730 sfc_ef100_tx_qdesc_prefix_create(m_seg,
731 &txq->txq_hw_ring[id]);
732 txq->sw_ring[id].mbuf = NULL;
735 if (m_seg->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
736 rc = sfc_ef100_xmit_tso_pkt(txq, &m_seg, &added);
738 id = added++ & txq->ptr_mask;
739 rc = sfc_ef100_tx_qdesc_send_create(txq, m_seg,
740 &txq->txq_hw_ring[id]);
743 * rte_pktmbuf_free() is commonly used in DPDK for
744 * recycling packets - the function checks every
745 * segment's reference counter and returns the
746 * buffer to its pool whenever possible;
747 * nevertheless, freeing mbuf segments one by one
748 * may entail some performance decline;
749 * from this point, sfc_efx_tx_reap() does the same job
750 * on its own and frees buffers in bulks (all mbufs
751 * within a bulk belong to the same pool);
752 * from this perspective, individual segment pointers
753 * must be associated with the corresponding SW
754 * descriptors independently so that only one loop
755 * is sufficient on reap to inspect all the buffers
757 txq->sw_ring[id].mbuf = m_seg;
761 while (likely(rc == 0) && m_seg != NULL) {
762 RTE_BUILD_BUG_ON(SFC_MBUF_SEG_LEN_MAX >
763 SFC_EF100_TX_SEG_DESC_LEN_MAX);
765 id = added++ & txq->ptr_mask;
766 rc = sfc_ef100_tx_map(txq, rte_mbuf_data_iova(m_seg),
767 rte_pktmbuf_data_len(m_seg),
769 sfc_ef100_tx_qdesc_seg_create(dma_addr,
770 rte_pktmbuf_data_len(m_seg),
771 &txq->txq_hw_ring[id]);
772 txq->sw_ring[id].mbuf = m_seg;
776 if (likely(rc == 0)) {
777 dma_desc_space -= (added - pkt_start);
779 sfc_pkts_bytes_add(&txq->dp.dpq.stats, 1,
780 rte_pktmbuf_pkt_len(*pktp));
786 if (likely(added != txq->added)) {
787 sfc_ef100_tx_qpush(txq, added);
791 #if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
793 sfc_ef100_tx_reap(txq);
796 return pktp - &tx_pkts[0];
799 static sfc_dp_tx_get_dev_info_t sfc_ef100_get_dev_info;
801 sfc_ef100_get_dev_info(struct rte_eth_dev_info *dev_info)
804 * Number of descriptors just defines maximum number of pushed
805 * descriptors (fill level).
807 dev_info->tx_desc_lim.nb_min = 1;
808 dev_info->tx_desc_lim.nb_align = 1;
811 static sfc_dp_tx_qsize_up_rings_t sfc_ef100_tx_qsize_up_rings;
813 sfc_ef100_tx_qsize_up_rings(uint16_t nb_tx_desc,
814 struct sfc_dp_tx_hw_limits *limits,
815 unsigned int *txq_entries,
816 unsigned int *evq_entries,
817 unsigned int *txq_max_fill_level)
820 * rte_ethdev API guarantees that the number meets min, max and
821 * alignment requirements.
823 if (nb_tx_desc <= limits->txq_min_entries)
824 *txq_entries = limits->txq_min_entries;
826 *txq_entries = rte_align32pow2(nb_tx_desc);
828 *evq_entries = *txq_entries;
830 *txq_max_fill_level = RTE_MIN(nb_tx_desc,
831 SFC_EF100_TXQ_LIMIT(*evq_entries));
835 static sfc_dp_tx_qcreate_t sfc_ef100_tx_qcreate;
837 sfc_ef100_tx_qcreate(uint16_t port_id, uint16_t queue_id,
838 const struct rte_pci_addr *pci_addr, int socket_id,
839 const struct sfc_dp_tx_qcreate_info *info,
840 struct sfc_dp_txq **dp_txqp)
842 struct sfc_ef100_txq *txq;
846 if (info->txq_entries != info->evq_entries)
850 txq = rte_zmalloc_socket("sfc-ef100-txq", sizeof(*txq),
851 RTE_CACHE_LINE_SIZE, socket_id);
855 sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
858 txq->sw_ring = rte_calloc_socket("sfc-ef100-txq-sw_ring",
860 sizeof(*txq->sw_ring),
861 RTE_CACHE_LINE_SIZE, socket_id);
862 if (txq->sw_ring == NULL)
863 goto fail_sw_ring_alloc;
865 txq->flags = SFC_EF100_TXQ_NOT_RUNNING;
866 txq->ptr_mask = info->txq_entries - 1;
867 txq->max_fill_level = info->max_fill_level;
868 txq->free_thresh = info->free_thresh;
869 txq->evq_phase_bit_shift = rte_bsf32(info->evq_entries);
870 txq->txq_hw_ring = info->txq_hw_ring;
871 txq->doorbell = (volatile uint8_t *)info->mem_bar +
872 ER_GZ_TX_RING_DOORBELL_OFST +
873 (info->hw_index << info->vi_window_shift);
874 txq->evq_hw_ring = info->evq_hw_ring;
876 txq->tso_tcp_header_offset_limit = info->tso_tcp_header_offset_limit;
877 txq->tso_max_nb_header_descs = info->tso_max_nb_header_descs;
878 txq->tso_max_header_len = info->tso_max_header_len;
879 txq->tso_max_nb_payload_descs = info->tso_max_nb_payload_descs;
880 txq->tso_max_payload_len = info->tso_max_payload_len;
881 txq->tso_max_nb_outgoing_frames = info->tso_max_nb_outgoing_frames;
883 txq->nic_dma_info = info->nic_dma_info;
884 if (txq->nic_dma_info->nb_regions > 0)
885 txq->flags |= SFC_EF100_TXQ_NIC_DMA_MAP;
887 sfc_ef100_tx_debug(txq, "TxQ doorbell is %p", txq->doorbell);
900 static sfc_dp_tx_qdestroy_t sfc_ef100_tx_qdestroy;
902 sfc_ef100_tx_qdestroy(struct sfc_dp_txq *dp_txq)
904 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
906 rte_free(txq->sw_ring);
910 static sfc_dp_tx_qstart_t sfc_ef100_tx_qstart;
912 sfc_ef100_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
913 unsigned int txq_desc_index)
915 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
917 txq->evq_read_ptr = evq_read_ptr;
918 txq->added = txq->completed = txq_desc_index;
920 txq->flags |= SFC_EF100_TXQ_STARTED;
921 txq->flags &= ~(SFC_EF100_TXQ_NOT_RUNNING | SFC_EF100_TXQ_EXCEPTION);
926 static sfc_dp_tx_qstop_t sfc_ef100_tx_qstop;
928 sfc_ef100_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
930 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
932 txq->flags |= SFC_EF100_TXQ_NOT_RUNNING;
934 *evq_read_ptr = txq->evq_read_ptr;
937 static sfc_dp_tx_qtx_ev_t sfc_ef100_tx_qtx_ev;
939 sfc_ef100_tx_qtx_ev(struct sfc_dp_txq *dp_txq, unsigned int num_descs)
941 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
943 SFC_ASSERT(txq->flags & SFC_EF100_TXQ_NOT_RUNNING);
945 sfc_ef100_tx_reap_num_descs(txq, num_descs);
950 static sfc_dp_tx_qreap_t sfc_ef100_tx_qreap;
952 sfc_ef100_tx_qreap(struct sfc_dp_txq *dp_txq)
954 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
955 unsigned int completed;
957 for (completed = txq->completed; completed != txq->added; ++completed) {
958 struct sfc_ef100_tx_sw_desc *txd;
960 txd = &txq->sw_ring[completed & txq->ptr_mask];
961 if (txd->mbuf != NULL) {
962 rte_pktmbuf_free_seg(txd->mbuf);
967 txq->flags &= ~SFC_EF100_TXQ_STARTED;
971 sfc_ef100_tx_qdesc_npending(struct sfc_ef100_txq *txq)
973 const unsigned int evq_old_read_ptr = txq->evq_read_ptr;
974 unsigned int npending = 0;
977 if (unlikely(txq->flags &
978 (SFC_EF100_TXQ_NOT_RUNNING | SFC_EF100_TXQ_EXCEPTION)))
981 while (sfc_ef100_tx_get_event(txq, &tx_ev))
982 npending += EFX_QWORD_FIELD(tx_ev, ESF_GZ_EV_TXCMPL_NUM_DESC);
985 * The function does not process events, so return event queue read
986 * pointer to the original position to allow the events that were
987 * read to be processed later
989 txq->evq_read_ptr = evq_old_read_ptr;
994 static sfc_dp_tx_qdesc_status_t sfc_ef100_tx_qdesc_status;
996 sfc_ef100_tx_qdesc_status(struct sfc_dp_txq *dp_txq, uint16_t offset)
998 struct sfc_ef100_txq *txq = sfc_ef100_txq_by_dp_txq(dp_txq);
999 unsigned int pushed = txq->added - txq->completed;
1001 if (unlikely(offset > txq->ptr_mask))
1004 if (unlikely(offset >= txq->max_fill_level))
1005 return RTE_ETH_TX_DESC_UNAVAIL;
1007 return (offset >= pushed ||
1008 offset < sfc_ef100_tx_qdesc_npending(txq)) ?
1009 RTE_ETH_TX_DESC_DONE : RTE_ETH_TX_DESC_FULL;
1012 struct sfc_dp_tx sfc_ef100_tx = {
1014 .name = SFC_KVARG_DATAPATH_EF100,
1016 .hw_fw_caps = SFC_DP_HW_FW_CAP_EF100,
1018 .features = SFC_DP_TX_FEAT_MULTI_PROCESS |
1019 SFC_DP_TX_FEAT_STATS,
1020 .dev_offload_capa = 0,
1021 .queue_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
1022 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1023 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
1024 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
1025 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1026 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
1027 RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
1028 RTE_ETH_TX_OFFLOAD_TCP_TSO |
1029 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
1030 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO,
1031 .get_dev_info = sfc_ef100_get_dev_info,
1032 .qsize_up_rings = sfc_ef100_tx_qsize_up_rings,
1033 .qcreate = sfc_ef100_tx_qcreate,
1034 .qdestroy = sfc_ef100_tx_qdestroy,
1035 .qstart = sfc_ef100_tx_qstart,
1036 .qtx_ev = sfc_ef100_tx_qtx_ev,
1037 .qstop = sfc_ef100_tx_qstop,
1038 .qreap = sfc_ef100_tx_qreap,
1039 .qdesc_status = sfc_ef100_tx_qdesc_status,
1040 .pkt_prepare = sfc_ef100_tx_prepare_pkts,
1041 .pkt_burst = sfc_ef100_xmit_pkts,