2 * Copyright (c) 2016 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "sfc_debug.h"
38 /** Standard TSO header length */
39 #define SFC_TSOH_STD_LEN 256
41 /** The number of TSO option descriptors that precede the packet descriptors */
42 #define SFC_TSO_OPDESCS_IDX_SHIFT 2
45 sfc_tso_alloc_tsoh_objs(struct sfc_tx_sw_desc *sw_ring,
46 unsigned int txq_entries, unsigned int socket_id)
50 for (i = 0; i < txq_entries; ++i) {
51 sw_ring[i].tsoh = rte_malloc_socket("sfc-txq-tsoh-obj",
55 if (sw_ring[i].tsoh == NULL)
56 goto fail_alloc_tsoh_objs;
63 rte_free(sw_ring[--i].tsoh);
69 sfc_tso_free_tsoh_objs(struct sfc_tx_sw_desc *sw_ring, unsigned int txq_entries)
73 for (i = 0; i < txq_entries; ++i) {
74 rte_free(sw_ring[i].tsoh);
75 sw_ring[i].tsoh = NULL;
80 sfc_tso_prepare_header(struct sfc_txq *txq, struct rte_mbuf **in_seg,
81 size_t *in_off, unsigned int idx, size_t bytes_left)
83 struct rte_mbuf *m = *in_seg;
84 size_t bytes_to_copy = 0;
85 uint8_t *tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
88 bytes_to_copy = MIN(bytes_left, m->data_len);
90 rte_memcpy(tsoh, rte_pktmbuf_mtod(m, uint8_t *),
93 bytes_left -= bytes_to_copy;
94 tsoh += bytes_to_copy;
98 SFC_ASSERT(m != NULL);
100 } while (bytes_left > 0);
102 if (bytes_to_copy == m->data_len) {
107 *in_off = bytes_to_copy;
112 sfc_tso_do(struct sfc_txq *txq, unsigned int idx, struct rte_mbuf **in_seg,
113 size_t *in_off, efx_desc_t **pend, unsigned int *pkt_descs,
117 const struct tcp_hdr *th;
118 efsys_dma_addr_t header_paddr;
121 struct rte_mbuf *m = *in_seg;
122 size_t nh_off = m->l2_len; /* IP header offset */
123 size_t tcph_off = m->l2_len + m->l3_len; /* TCP header offset */
124 size_t header_len = m->l2_len + m->l3_len + m->l4_len;
125 const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic);
127 idx += SFC_TSO_OPDESCS_IDX_SHIFT;
129 /* Packets which have too big headers should be discarded */
130 if (unlikely(header_len > SFC_TSOH_STD_LEN))
134 * The TCP header must start at most 208 bytes into the frame.
135 * If it starts later than this then the NIC won't realise
136 * it's a TCP packet and TSO edits won't be applied
138 if (unlikely(tcph_off > encp->enc_tx_tso_tcp_header_offset_limit))
141 header_paddr = rte_pktmbuf_mtophys(m);
144 * Sometimes headers may be split across multiple mbufs. In such cases
145 * we need to glue those pieces and store them in some temporary place.
146 * Also, packet headers must be contiguous in memory, so that
147 * they can be referred to with a single DMA descriptor. EF10 has no
148 * limitations on address boundaries crossing by DMA descriptor data.
150 if (m->data_len < header_len) {
151 sfc_tso_prepare_header(txq, in_seg, in_off, idx, header_len);
152 tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
154 header_paddr = rte_malloc_virt2phy((void *)tsoh);
156 if (m->data_len == header_len) {
160 *in_off = header_len;
163 tsoh = rte_pktmbuf_mtod(m, uint8_t *);
166 /* Handle IP header */
167 if (m->ol_flags & PKT_TX_IPV4) {
168 const struct ipv4_hdr *iphe4;
170 iphe4 = (const struct ipv4_hdr *)(tsoh + nh_off);
171 rte_memcpy(&packet_id, &iphe4->packet_id, sizeof(uint16_t));
172 packet_id = rte_be_to_cpu_16(packet_id);
173 } else if (m->ol_flags & PKT_TX_IPV6) {
179 /* Handle TCP header */
180 th = (const struct tcp_hdr *)(tsoh + tcph_off);
182 rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
183 sent_seq = rte_be_to_cpu_32(sent_seq);
185 efx_tx_qdesc_tso2_create(txq->common, packet_id, sent_seq, m->tso_segsz,
186 *pend, EFX_TX_FATSOV2_OPT_NDESCS);
188 *pend += EFX_TX_FATSOV2_OPT_NDESCS;
189 *pkt_descs += EFX_TX_FATSOV2_OPT_NDESCS;
191 efx_tx_qdesc_dma_create(txq->common, header_paddr, header_len,
194 *pkt_len -= header_len;