X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_ef10_tx.c;h=b91c8068b19ad702dda278e84f0b6a5b27bb3262;hb=25ae7f1a5d9d127a46f8d62d1d689f77a78138fd;hp=b317997caecf175bae32c9fc74f641eab4c05cce;hpb=8c27fa78f12fcfa8620eb11684f6d549614bba22;p=dpdk.git diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c index b317997cae..b91c8068b1 100644 --- a/drivers/net/sfc/sfc_ef10_tx.c +++ b/drivers/net/sfc/sfc_ef10_tx.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * - * Copyright (c) 2016-2018 Solarflare Communications Inc. - * All rights reserved. + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. * * This software was jointly developed between OKTET Labs (under contract * for Solarflare) and Solarflare Communications, Inc. @@ -320,9 +320,10 @@ sfc_ef10_try_reap(struct sfc_ef10_txq * const txq, unsigned int added, } static uint16_t -sfc_ef10_prepare_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, +sfc_ef10_prepare_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { + struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue); uint16_t i; for (i = 0; i < nb_pkts; i++) { @@ -347,7 +348,10 @@ sfc_ef10_prepare_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, } } #endif - ret = sfc_dp_tx_prepare_pkt(m); + ret = sfc_dp_tx_prepare_pkt(m, + txq->tso_tcp_header_offset_limit, + txq->max_fill_level, + SFC_EF10_TSO_OPT_DESCS_NUM, 0); if (unlikely(ret != 0)) { rte_errno = ret; break; @@ -362,13 +366,16 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg, unsigned int *added, unsigned int *dma_desc_space, bool *reap_done) { - size_t iph_off = m_seg->l2_len; - size_t tcph_off = m_seg->l2_len + m_seg->l3_len; - size_t header_len = m_seg->l2_len + m_seg->l3_len + m_seg->l4_len; + size_t iph_off = ((m_seg->ol_flags & PKT_TX_TUNNEL_MASK) ? + m_seg->outer_l2_len + m_seg->outer_l3_len : 0) + + m_seg->l2_len; + size_t tcph_off = iph_off + m_seg->l3_len; + size_t header_len = tcph_off + m_seg->l4_len; /* Offset of the payload in the last segment that contains the header */ size_t in_off = 0; - const struct tcp_hdr *th; + const struct rte_tcp_hdr *th; uint16_t packet_id = 0; + uint16_t outer_packet_id = 0; uint32_t sent_seq; uint8_t *hdr_addr; rte_iova_t hdr_iova; @@ -378,9 +385,6 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg, struct rte_mbuf *m_seg_to_free_up_to = first_m_seg; bool eop; - if (unlikely(tcph_off > txq->tso_tcp_header_offset_limit)) - return EMSGSIZE; - /* * Preliminary estimation of required DMA descriptors, including extra * descriptor for TSO header that is needed when the header is @@ -446,6 +450,8 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg, /* * Discard a packet if header linearization is needed but * the header is too big. + * Duplicate Tx prepare check here to avoid spoil of + * memory if Tx prepare is skipped. */ if (unlikely(header_len > SFC_TSOH_STD_LEN)) return EMSGSIZE; @@ -476,20 +482,19 @@ sfc_ef10_xmit_tso_pkt(struct sfc_ef10_txq * const txq, struct rte_mbuf *m_seg, * filled in in TSO mbuf. Use zero IPID if there is no IPv4 flag. * If the packet is still IPv4, HW will simply start from zero IPID. */ - if (first_m_seg->ol_flags & PKT_TX_IPV4) { - const struct ipv4_hdr *iphe4; + if (first_m_seg->ol_flags & PKT_TX_IPV4) + packet_id = sfc_tso_ip4_get_ipid(hdr_addr, iph_off); - iphe4 = (const struct ipv4_hdr *)(hdr_addr + iph_off); - rte_memcpy(&packet_id, &iphe4->packet_id, sizeof(uint16_t)); - packet_id = rte_be_to_cpu_16(packet_id); - } + if (first_m_seg->ol_flags & PKT_TX_OUTER_IPV4) + outer_packet_id = sfc_tso_ip4_get_ipid(hdr_addr, + first_m_seg->outer_l2_len); - th = (const struct tcp_hdr *)(hdr_addr + tcph_off); + th = (const struct rte_tcp_hdr *)(hdr_addr + tcph_off); rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t)); sent_seq = rte_be_to_cpu_32(sent_seq); - sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, 0, sent_seq, - first_m_seg->tso_segsz); + sfc_ef10_tx_qdesc_tso2_create(txq, *added, packet_id, outer_packet_id, + sent_seq, first_m_seg->tso_segsz); (*added) += SFC_EF10_TSO_OPT_DESCS_NUM; sfc_ef10_tx_qdesc_dma_create(hdr_iova, header_len, false, @@ -929,7 +934,9 @@ sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id, if (txq->sw_ring == NULL) goto fail_sw_ring_alloc; - if (info->offloads & DEV_TX_OFFLOAD_TCP_TSO) { + if (info->offloads & (DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) { txq->tsoh = rte_calloc_socket("sfc-ef10-txq-tsoh", info->txq_entries, SFC_TSOH_STD_LEN, @@ -1091,11 +1098,15 @@ struct sfc_dp_tx sfc_ef10_tx = { .type = SFC_DP_TX, .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10, }, - .features = SFC_DP_TX_FEAT_TSO | - SFC_DP_TX_FEAT_MULTI_SEG | - SFC_DP_TX_FEAT_MULTI_POOL | - SFC_DP_TX_FEAT_REFCNT | - SFC_DP_TX_FEAT_MULTI_PROCESS, + .features = SFC_DP_TX_FEAT_MULTI_PROCESS, + .dev_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS, + .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO, .get_dev_info = sfc_ef10_get_dev_info, .qsize_up_rings = sfc_ef10_tx_qsize_up_rings, .qcreate = sfc_ef10_tx_qcreate, @@ -1115,6 +1126,11 @@ struct sfc_dp_tx sfc_ef10_simple_tx = { .type = SFC_DP_TX, }, .features = SFC_DP_TX_FEAT_MULTI_PROCESS, + .dev_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE, + .queue_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, .get_dev_info = sfc_ef10_get_dev_info, .qsize_up_rings = sfc_ef10_tx_qsize_up_rings, .qcreate = sfc_ef10_tx_qcreate,