X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_dp_tx.h;h=184711b887d0d900a06506861a146fb45f442fcf;hb=44db08d53be366d69bb7d16bffc3e55ba2d7398a;hp=eda9676c819b92eb48f37b8ff3fad7fe4ea3cae5;hpb=ab3ce1e0c19329c8a2d21480b0db55be259bc168;p=dpdk.git diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h index eda9676c81..184711b887 100644 --- a/drivers/net/sfc/sfc_dp_tx.h +++ b/drivers/net/sfc/sfc_dp_tx.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * - * Copyright (c) 2016-2018 Solarflare Communications Inc. - * All rights reserved. + * Copyright(c) 2019-2021 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. * * This software was jointly developed between OKTET Labs (under contract * for Solarflare) and Solarflare Communications, Inc. @@ -10,9 +10,11 @@ #ifndef _SFC_DP_TX_H #define _SFC_DP_TX_H -#include +#include #include "sfc_dp.h" +#include "sfc_debug.h" +#include "sfc_tso.h" #ifdef __cplusplus extern "C" { @@ -27,6 +29,12 @@ struct sfc_dp_txq { struct sfc_dp_queue dpq; }; +/** Datapath transmit queue descriptor number limitations */ +struct sfc_dp_tx_hw_limits { + unsigned int txq_max_entries; + unsigned int txq_min_entries; +}; + /** * Datapath transmit queue creation information. * @@ -57,6 +65,21 @@ struct sfc_dp_tx_qcreate_info { volatile void *mem_bar; /** VI window size shift */ unsigned int vi_window_shift; + /** + * Maximum number of bytes into the packet the TCP header can start for + * the hardware to apply TSO packet edits. + */ + uint16_t tso_tcp_header_offset_limit; + /** Maximum number of header DMA descriptors per TSOv3 transaction */ + uint16_t tso_max_nb_header_descs; + /** Maximum header length acceptable by TSOv3 transaction */ + uint16_t tso_max_header_len; + /** Maximum number of payload DMA descriptors per TSOv3 transaction */ + uint16_t tso_max_nb_payload_descs; + /** Maximum payload length per TSOv3 transaction */ + uint32_t tso_max_payload_len; + /** Maximum number of frames to be generated per TSOv3 transaction */ + uint32_t tso_max_nb_outgoing_frames; }; /** @@ -78,6 +101,7 @@ typedef void (sfc_dp_tx_get_dev_info_t)(struct rte_eth_dev_info *dev_info); * @return 0 or positive errno. */ typedef int (sfc_dp_tx_qsize_up_rings_t)(uint16_t nb_tx_desc, + struct sfc_dp_tx_hw_limits *limits, unsigned int *txq_entries, unsigned int *evq_entries, unsigned int *txq_max_fill_level); @@ -143,12 +167,18 @@ struct sfc_dp_tx { struct sfc_dp dp; unsigned int features; -#define SFC_DP_TX_FEAT_VLAN_INSERT 0x1 -#define SFC_DP_TX_FEAT_TSO 0x2 -#define SFC_DP_TX_FEAT_MULTI_SEG 0x4 -#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x8 -#define SFC_DP_TX_FEAT_MULTI_POOL 0x10 -#define SFC_DP_TX_FEAT_REFCNT 0x20 +#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x1 +#define SFC_DP_TX_FEAT_STATS 0x2 + /** + * Tx offload capabilities supported by the datapath on device + * level only if HW/FW supports it. + */ + uint64_t dev_offload_capa; + /** + * Tx offload capabilities supported by the datapath per-queue + * if HW/FW supports it. + */ + uint64_t queue_offload_capa; sfc_dp_tx_get_dev_info_t *get_dev_info; sfc_dp_tx_qsize_up_rings_t *qsize_up_rings; sfc_dp_tx_qcreate_t *qcreate; @@ -158,6 +188,7 @@ struct sfc_dp_tx { sfc_dp_tx_qtx_ev_t *qtx_ev; sfc_dp_tx_qreap_t *qreap; sfc_dp_tx_qdesc_status_t *qdesc_status; + eth_tx_prep_t pkt_prepare; eth_tx_burst_t pkt_burst; }; @@ -177,9 +208,150 @@ sfc_dp_find_tx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps) return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp); } +/** Get Tx datapath ops by the datapath TxQ handle */ +const struct sfc_dp_tx *sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq); + +static inline uint64_t +sfc_dp_tx_offload_capa(const struct sfc_dp_tx *dp_tx) +{ + return dp_tx->dev_offload_capa | dp_tx->queue_offload_capa; +} + +static inline unsigned int +sfc_dp_tx_pkt_extra_hdr_segs(struct rte_mbuf **m_seg, + unsigned int *header_len_remaining) +{ + unsigned int nb_extra_header_segs = 0; + + while (rte_pktmbuf_data_len(*m_seg) < *header_len_remaining) { + *header_len_remaining -= rte_pktmbuf_data_len(*m_seg); + *m_seg = (*m_seg)->next; + ++nb_extra_header_segs; + } + + return nb_extra_header_segs; +} + +static inline int +sfc_dp_tx_prepare_pkt(struct rte_mbuf *m, + unsigned int max_nb_header_segs, + unsigned int tso_bounce_buffer_len, + uint32_t tso_tcp_header_offset_limit, + unsigned int max_fill_level, + unsigned int nb_tso_descs, + unsigned int nb_vlan_descs) +{ + unsigned int descs_required = m->nb_segs; + unsigned int tcph_off = ((m->ol_flags & PKT_TX_TUNNEL_MASK) ? + m->outer_l2_len + m->outer_l3_len : 0) + + m->l2_len + m->l3_len; + unsigned int header_len = tcph_off + m->l4_len; + unsigned int header_len_remaining = header_len; + unsigned int nb_header_segs = 1; + struct rte_mbuf *m_seg = m; + +#ifdef RTE_LIBRTE_SFC_EFX_DEBUG + int ret; + + ret = rte_validate_tx_offload(m); + if (ret != 0) { + /* + * Negative error code is returned by rte_validate_tx_offload(), + * but positive are used inside net/sfc PMD. + */ + SFC_ASSERT(ret < 0); + return -ret; + } +#endif + + if (max_nb_header_segs != 0) { + /* There is a limit on the number of header segments. */ + + nb_header_segs += + sfc_dp_tx_pkt_extra_hdr_segs(&m_seg, + &header_len_remaining); + + if (unlikely(nb_header_segs > max_nb_header_segs)) { + /* + * The number of header segments is too large. + * + * If TSO is requested and if the datapath supports + * linearisation of TSO headers, allow the packet + * to proceed with additional checks below. + * Otherwise, throw an error. + */ + if ((m->ol_flags & PKT_TX_TCP_SEG) == 0 || + tso_bounce_buffer_len == 0) + return EINVAL; + } + } + + if (m->ol_flags & PKT_TX_TCP_SEG) { + switch (m->ol_flags & PKT_TX_TUNNEL_MASK) { + case 0: + break; + case PKT_TX_TUNNEL_VXLAN: + /* FALLTHROUGH */ + case PKT_TX_TUNNEL_GENEVE: + if (!(m->ol_flags & + (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6))) + return EINVAL; + } + + if (unlikely(tcph_off > tso_tcp_header_offset_limit)) + return EINVAL; + + descs_required += nb_tso_descs; + + /* + * If headers segments are already counted above, here + * nothing is done since remaining length is smaller + * then current segment size. + */ + nb_header_segs += + sfc_dp_tx_pkt_extra_hdr_segs(&m_seg, + &header_len_remaining); + + /* + * Extra descriptor which is required when (a part of) payload + * shares the same segment with (a part of) the header. + */ + if (rte_pktmbuf_data_len(m_seg) > header_len_remaining) + descs_required++; + + if (tso_bounce_buffer_len != 0) { + if (nb_header_segs > 1 && + unlikely(header_len > tso_bounce_buffer_len)) { + /* + * Header linearization is required and + * the header is too big to be linearized + */ + return EINVAL; + } + } + } + + /* + * The number of VLAN descriptors is added regardless of requested + * VLAN offload since VLAN is sticky and sending packet without VLAN + * insertion may require VLAN descriptor to reset the sticky to 0. + */ + descs_required += nb_vlan_descs; + + /* + * Max fill level must be sufficient to hold all required descriptors + * to send the packet entirely. + */ + if (descs_required > max_fill_level) + return ENOBUFS; + + return 0; +} + extern struct sfc_dp_tx sfc_efx_tx; extern struct sfc_dp_tx sfc_ef10_tx; extern struct sfc_dp_tx sfc_ef10_simple_tx; +extern struct sfc_dp_tx sfc_ef100_tx; #ifdef __cplusplus }