X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_tso.c;h=29d0836b65c053335901ccf800b079bb1fc8ab4d;hb=7d5cfaa7508de0fd248b05effbf421a98317006a;hp=271861fcbd322bcd1d7d880bc4217f8eb15dc893;hpb=244cfa79a41c70f8c58692dd2601687a65e928c3;p=dpdk.git diff --git a/drivers/net/sfc/sfc_tso.c b/drivers/net/sfc/sfc_tso.c index 271861fcbd..29d0836b65 100644 --- a/drivers/net/sfc/sfc_tso.c +++ b/drivers/net/sfc/sfc_tso.c @@ -1,32 +1,10 @@ -/*- - * BSD LICENSE +/* SPDX-License-Identifier: BSD-3-Clause * - * Copyright (c) 2016-2017 Solarflare Communications Inc. - * All rights reserved. + * Copyright(c) 2019-2021 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. * * This software was jointly developed between OKTET Labs (under contract * for Solarflare) and Solarflare Communications, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include @@ -36,21 +14,16 @@ #include "sfc_debug.h" #include "sfc_tx.h" #include "sfc_ev.h" - -/** Standard TSO header length */ -#define SFC_TSOH_STD_LEN 256 - -/** The number of TSO option descriptors that precede the packet descriptors */ -#define SFC_TSO_OPDESCS_IDX_SHIFT 2 +#include "sfc_tso.h" int -sfc_tso_alloc_tsoh_objs(struct sfc_tx_sw_desc *sw_ring, - unsigned int txq_entries, unsigned int socket_id) +sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring, + unsigned int txq_entries, unsigned int socket_id) { unsigned int i; for (i = 0; i < txq_entries; ++i) { - sw_ring[i].tsoh = rte_malloc_socket("sfc-txq-tsoh-obj", + sw_ring[i].tsoh = rte_malloc_socket("sfc-efx-txq-tsoh-obj", SFC_TSOH_STD_LEN, RTE_CACHE_LINE_SIZE, socket_id); @@ -68,7 +41,8 @@ fail_alloc_tsoh_objs: } void -sfc_tso_free_tsoh_objs(struct sfc_tx_sw_desc *sw_ring, unsigned int txq_entries) +sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring, + unsigned int txq_entries) { unsigned int i; @@ -78,13 +52,14 @@ sfc_tso_free_tsoh_objs(struct sfc_tx_sw_desc *sw_ring, unsigned int txq_entries) } } -static void -sfc_tso_prepare_header(struct sfc_txq *txq, struct rte_mbuf **in_seg, - size_t *in_off, unsigned int idx, size_t bytes_left) +unsigned int +sfc_tso_prepare_header(uint8_t *tsoh, size_t header_len, + struct rte_mbuf **in_seg, size_t *in_off) { struct rte_mbuf *m = *in_seg; size_t bytes_to_copy = 0; - uint8_t *tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh; + size_t bytes_left = header_len; + unsigned int segments_copied = 0; do { bytes_to_copy = MIN(bytes_left, m->data_len); @@ -98,49 +73,40 @@ sfc_tso_prepare_header(struct sfc_txq *txq, struct rte_mbuf **in_seg, if (bytes_left > 0) { m = m->next; SFC_ASSERT(m != NULL); + segments_copied++; } } while (bytes_left > 0); if (bytes_to_copy == m->data_len) { *in_seg = m->next; *in_off = 0; + segments_copied++; } else { *in_seg = m; *in_off = bytes_to_copy; } + + return segments_copied; } int -sfc_tso_do(struct sfc_txq *txq, unsigned int idx, struct rte_mbuf **in_seg, - size_t *in_off, efx_desc_t **pend, unsigned int *pkt_descs, - size_t *pkt_len) +sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx, + struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend, + unsigned int *pkt_descs, size_t *pkt_len) { uint8_t *tsoh; - const struct tcp_hdr *th; + const struct rte_tcp_hdr *th; efsys_dma_addr_t header_paddr; - uint16_t packet_id; + uint16_t packet_id = 0; uint32_t sent_seq; struct rte_mbuf *m = *in_seg; size_t nh_off = m->l2_len; /* IP header offset */ size_t tcph_off = m->l2_len + m->l3_len; /* TCP header offset */ size_t header_len = m->l2_len + m->l3_len + m->l4_len; - const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic); - idx += SFC_TSO_OPDESCS_IDX_SHIFT; + idx += SFC_EF10_TSO_OPT_DESCS_NUM; - /* Packets which have too big headers should be discarded */ - if (unlikely(header_len > SFC_TSOH_STD_LEN)) - return EMSGSIZE; - - /* - * The TCP header must start at most 208 bytes into the frame. - * If it starts later than this then the NIC won't realise - * it's a TCP packet and TSO edits won't be applied - */ - if (unlikely(tcph_off > encp->enc_tx_tso_tcp_header_offset_limit)) - return EMSGSIZE; - - header_paddr = rte_pktmbuf_mtophys(m); + header_paddr = rte_pktmbuf_iova(m); /* * Sometimes headers may be split across multiple mbufs. In such cases @@ -150,10 +116,19 @@ sfc_tso_do(struct sfc_txq *txq, unsigned int idx, struct rte_mbuf **in_seg, * limitations on address boundaries crossing by DMA descriptor data. */ if (m->data_len < header_len) { - sfc_tso_prepare_header(txq, in_seg, in_off, idx, header_len); + /* + * Discard a packet if header linearization is needed but + * the header is too big. + * Duplicate Tx prepare check here to avoid spoil of + * memory if Tx prepare is skipped. + */ + if (unlikely(header_len > SFC_TSOH_STD_LEN)) + return EMSGSIZE; + tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh; + sfc_tso_prepare_header(tsoh, header_len, in_seg, in_off); - header_paddr = rte_malloc_virt2phy((void *)tsoh); + header_paddr = rte_malloc_virt2iova((void *)tsoh); } else { if (m->data_len == header_len) { *in_off = 0; @@ -165,26 +140,30 @@ sfc_tso_do(struct sfc_txq *txq, unsigned int idx, struct rte_mbuf **in_seg, tsoh = rte_pktmbuf_mtod(m, uint8_t *); } - /* Handle IP header */ - if (m->ol_flags & PKT_TX_IPV4) { - const struct ipv4_hdr *iphe4; + /* + * 8000-series EF10 hardware requires that innermost IP length + * be greater than or equal to the value which each segment is + * supposed to have; otherwise, TCP checksum will be incorrect. + */ + sfc_tso_innermost_ip_fix_len(m, tsoh, nh_off); - iphe4 = (const struct ipv4_hdr *)(tsoh + nh_off); - rte_memcpy(&packet_id, &iphe4->packet_id, sizeof(uint16_t)); - packet_id = rte_be_to_cpu_16(packet_id); - } else if (m->ol_flags & PKT_TX_IPV6) { - packet_id = 0; - } else { - return EINVAL; - } + /* + * Handle IP header. Tx prepare has debug-only checks that offload flags + * are correctly filled in in TSO mbuf. Use zero IPID if there is no + * IPv4 flag. If the packet is still IPv4, HW will simply start from + * zero IPID. + */ + if (m->ol_flags & PKT_TX_IPV4) + packet_id = sfc_tso_ip4_get_ipid(tsoh, nh_off); /* Handle TCP header */ - th = (const struct tcp_hdr *)(tsoh + tcph_off); + th = (const struct rte_tcp_hdr *)(tsoh + tcph_off); rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t)); sent_seq = rte_be_to_cpu_32(sent_seq); - efx_tx_qdesc_tso2_create(txq->common, packet_id, sent_seq, m->tso_segsz, + efx_tx_qdesc_tso2_create(txq->common, packet_id, 0, sent_seq, + m->tso_segsz, *pend, EFX_TX_FATSOV2_OPT_NDESCS); *pend += EFX_TX_FATSOV2_OPT_NDESCS;