1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
14 #include "sfc_debug.h"
18 /** Standard TSO header length */
19 #define SFC_TSOH_STD_LEN 256
21 /** The number of TSO option descriptors that precede the packet descriptors */
22 #define SFC_TSO_OPDESCS_IDX_SHIFT 2
25 sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
26 unsigned int txq_entries, unsigned int socket_id)
30 for (i = 0; i < txq_entries; ++i) {
31 sw_ring[i].tsoh = rte_malloc_socket("sfc-efx-txq-tsoh-obj",
35 if (sw_ring[i].tsoh == NULL)
36 goto fail_alloc_tsoh_objs;
43 rte_free(sw_ring[--i].tsoh);
49 sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
50 unsigned int txq_entries)
54 for (i = 0; i < txq_entries; ++i) {
55 rte_free(sw_ring[i].tsoh);
56 sw_ring[i].tsoh = NULL;
61 sfc_efx_tso_prepare_header(struct sfc_efx_txq *txq, struct rte_mbuf **in_seg,
62 size_t *in_off, unsigned int idx, size_t bytes_left)
64 struct rte_mbuf *m = *in_seg;
65 size_t bytes_to_copy = 0;
66 uint8_t *tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
69 bytes_to_copy = MIN(bytes_left, m->data_len);
71 rte_memcpy(tsoh, rte_pktmbuf_mtod(m, uint8_t *),
74 bytes_left -= bytes_to_copy;
75 tsoh += bytes_to_copy;
79 SFC_ASSERT(m != NULL);
81 } while (bytes_left > 0);
83 if (bytes_to_copy == m->data_len) {
88 *in_off = bytes_to_copy;
93 sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
94 struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend,
95 unsigned int *pkt_descs, size_t *pkt_len)
98 const struct tcp_hdr *th;
99 efsys_dma_addr_t header_paddr;
102 struct rte_mbuf *m = *in_seg;
103 size_t nh_off = m->l2_len; /* IP header offset */
104 size_t tcph_off = m->l2_len + m->l3_len; /* TCP header offset */
105 size_t header_len = m->l2_len + m->l3_len + m->l4_len;
106 const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic);
108 idx += SFC_TSO_OPDESCS_IDX_SHIFT;
110 /* Packets which have too big headers should be discarded */
111 if (unlikely(header_len > SFC_TSOH_STD_LEN))
115 * The TCP header must start at most 208 bytes into the frame.
116 * If it starts later than this then the NIC won't realise
117 * it's a TCP packet and TSO edits won't be applied
119 if (unlikely(tcph_off > encp->enc_tx_tso_tcp_header_offset_limit))
122 header_paddr = rte_pktmbuf_iova(m);
125 * Sometimes headers may be split across multiple mbufs. In such cases
126 * we need to glue those pieces and store them in some temporary place.
127 * Also, packet headers must be contiguous in memory, so that
128 * they can be referred to with a single DMA descriptor. EF10 has no
129 * limitations on address boundaries crossing by DMA descriptor data.
131 if (m->data_len < header_len) {
132 sfc_efx_tso_prepare_header(txq, in_seg, in_off, idx,
134 tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
136 header_paddr = rte_malloc_virt2iova((void *)tsoh);
138 if (m->data_len == header_len) {
142 *in_off = header_len;
145 tsoh = rte_pktmbuf_mtod(m, uint8_t *);
148 /* Handle IP header */
149 if (m->ol_flags & PKT_TX_IPV4) {
150 const struct ipv4_hdr *iphe4;
152 iphe4 = (const struct ipv4_hdr *)(tsoh + nh_off);
153 rte_memcpy(&packet_id, &iphe4->packet_id, sizeof(uint16_t));
154 packet_id = rte_be_to_cpu_16(packet_id);
155 } else if (m->ol_flags & PKT_TX_IPV6) {
161 /* Handle TCP header */
162 th = (const struct tcp_hdr *)(tsoh + tcph_off);
164 rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
165 sent_seq = rte_be_to_cpu_32(sent_seq);
167 efx_tx_qdesc_tso2_create(txq->common, packet_id, sent_seq, m->tso_segsz,
168 *pend, EFX_TX_FATSOV2_OPT_NDESCS);
170 *pend += EFX_TX_FATSOV2_OPT_NDESCS;
171 *pkt_descs += EFX_TX_FATSOV2_OPT_NDESCS;
173 efx_tx_qdesc_dma_create(txq->common, header_paddr, header_len,
176 *pkt_len -= header_len;