1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
14 #include "sfc_debug.h"
20 sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
21 unsigned int txq_entries, unsigned int socket_id)
25 for (i = 0; i < txq_entries; ++i) {
26 sw_ring[i].tsoh = rte_malloc_socket("sfc-efx-txq-tsoh-obj",
30 if (sw_ring[i].tsoh == NULL)
31 goto fail_alloc_tsoh_objs;
38 rte_free(sw_ring[--i].tsoh);
44 sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
45 unsigned int txq_entries)
49 for (i = 0; i < txq_entries; ++i) {
50 rte_free(sw_ring[i].tsoh);
51 sw_ring[i].tsoh = NULL;
56 sfc_tso_prepare_header(uint8_t *tsoh, size_t header_len,
57 struct rte_mbuf **in_seg, size_t *in_off)
59 struct rte_mbuf *m = *in_seg;
60 size_t bytes_to_copy = 0;
61 size_t bytes_left = header_len;
62 unsigned int segments_copied = 0;
65 bytes_to_copy = MIN(bytes_left, m->data_len);
67 rte_memcpy(tsoh, rte_pktmbuf_mtod(m, uint8_t *),
70 bytes_left -= bytes_to_copy;
71 tsoh += bytes_to_copy;
75 SFC_ASSERT(m != NULL);
78 } while (bytes_left > 0);
80 if (bytes_to_copy == m->data_len) {
86 *in_off = bytes_to_copy;
89 return segments_copied;
93 sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
94 struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend,
95 unsigned int *pkt_descs, size_t *pkt_len)
98 const struct rte_tcp_hdr *th;
99 efsys_dma_addr_t header_paddr;
100 uint16_t packet_id = 0;
102 struct rte_mbuf *m = *in_seg;
103 size_t nh_off = m->l2_len; /* IP header offset */
104 size_t tcph_off = m->l2_len + m->l3_len; /* TCP header offset */
105 size_t header_len = m->l2_len + m->l3_len + m->l4_len;
107 idx += SFC_EF10_TSO_OPT_DESCS_NUM;
109 header_paddr = rte_pktmbuf_iova(m);
112 * Sometimes headers may be split across multiple mbufs. In such cases
113 * we need to glue those pieces and store them in some temporary place.
114 * Also, packet headers must be contiguous in memory, so that
115 * they can be referred to with a single DMA descriptor. EF10 has no
116 * limitations on address boundaries crossing by DMA descriptor data.
118 if (m->data_len < header_len) {
120 * Discard a packet if header linearization is needed but
121 * the header is too big.
122 * Duplicate Tx prepare check here to avoid spoil of
123 * memory if Tx prepare is skipped.
125 if (unlikely(header_len > SFC_TSOH_STD_LEN))
128 tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
129 sfc_tso_prepare_header(tsoh, header_len, in_seg, in_off);
131 header_paddr = rte_malloc_virt2iova((void *)tsoh);
133 if (m->data_len == header_len) {
137 *in_off = header_len;
140 tsoh = rte_pktmbuf_mtod(m, uint8_t *);
144 * 8000-series EF10 hardware requires that innermost IP length
145 * be greater than or equal to the value which each segment is
146 * supposed to have; otherwise, TCP checksum will be incorrect.
148 sfc_tso_innermost_ip_fix_len(m, tsoh, nh_off);
151 * Handle IP header. Tx prepare has debug-only checks that offload flags
152 * are correctly filled in in TSO mbuf. Use zero IPID if there is no
153 * IPv4 flag. If the packet is still IPv4, HW will simply start from
156 if (m->ol_flags & PKT_TX_IPV4)
157 packet_id = sfc_tso_ip4_get_ipid(tsoh, nh_off);
159 /* Handle TCP header */
160 th = (const struct rte_tcp_hdr *)(tsoh + tcph_off);
162 rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
163 sent_seq = rte_be_to_cpu_32(sent_seq);
165 efx_tx_qdesc_tso2_create(txq->common, packet_id, 0, sent_seq,
167 *pend, EFX_TX_FATSOV2_OPT_NDESCS);
169 *pend += EFX_TX_FATSOV2_OPT_NDESCS;
170 *pkt_descs += EFX_TX_FATSOV2_OPT_NDESCS;
172 efx_tx_qdesc_dma_create(txq->common, header_paddr, header_len,
175 *pkt_len -= header_len;