X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fsfc%2Fsfc_dp_tx.h;h=dcad4fe5855c91351e63b533c5aba5d70547d710;hb=4064574535e5f9a4f576432565e8dbac768dd156;hp=885094b67613c4a19186ab1ed129907d31436285;hpb=07685524c94af5068953c2e9e2356b00939ca343;p=dpdk.git diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h index 885094b676..dcad4fe585 100644 --- a/drivers/net/sfc/sfc_dp_tx.h +++ b/drivers/net/sfc/sfc_dp_tx.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * - * Copyright (c) 2016-2018 Solarflare Communications Inc. - * All rights reserved. + * Copyright(c) 2019-2020 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. * * This software was jointly developed between OKTET Labs (under contract * for Solarflare) and Solarflare Communications, Inc. @@ -14,6 +14,7 @@ #include "sfc_dp.h" #include "sfc_debug.h" +#include "sfc_tso.h" #ifdef __cplusplus extern "C" { @@ -156,12 +157,17 @@ struct sfc_dp_tx { struct sfc_dp dp; unsigned int features; -#define SFC_DP_TX_FEAT_VLAN_INSERT 0x1 -#define SFC_DP_TX_FEAT_TSO 0x2 -#define SFC_DP_TX_FEAT_MULTI_SEG 0x4 -#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x8 -#define SFC_DP_TX_FEAT_MULTI_POOL 0x10 -#define SFC_DP_TX_FEAT_REFCNT 0x20 +#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x1 + /** + * Tx offload capabilities supported by the datapath on device + * level only if HW/FW supports it. + */ + uint64_t dev_offload_capa; + /** + * Tx offload capabilities supported by the datapath per-queue + * if HW/FW supports it. + */ + uint64_t queue_offload_capa; sfc_dp_tx_get_dev_info_t *get_dev_info; sfc_dp_tx_qsize_up_rings_t *qsize_up_rings; sfc_dp_tx_qcreate_t *qcreate; @@ -194,9 +200,21 @@ sfc_dp_find_tx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps) /** Get Tx datapath ops by the datapath TxQ handle */ const struct sfc_dp_tx *sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq); +static inline uint64_t +sfc_dp_tx_offload_capa(const struct sfc_dp_tx *dp_tx) +{ + return dp_tx->dev_offload_capa | dp_tx->queue_offload_capa; +} + static inline int -sfc_dp_tx_prepare_pkt(struct rte_mbuf *m) +sfc_dp_tx_prepare_pkt(struct rte_mbuf *m, + uint32_t tso_tcp_header_offset_limit, + unsigned int max_fill_level, + unsigned int nb_tso_descs, + unsigned int nb_vlan_descs) { + unsigned int descs_required = m->nb_segs; + #ifdef RTE_LIBRTE_SFC_EFX_DEBUG int ret; @@ -209,10 +227,62 @@ sfc_dp_tx_prepare_pkt(struct rte_mbuf *m) SFC_ASSERT(ret < 0); return -ret; } -#else - RTE_SET_USED(m); #endif + if (m->ol_flags & PKT_TX_TCP_SEG) { + unsigned int tcph_off = m->l2_len + m->l3_len; + unsigned int header_len; + + switch (m->ol_flags & PKT_TX_TUNNEL_MASK) { + case 0: + break; + case PKT_TX_TUNNEL_VXLAN: + /* FALLTHROUGH */ + case PKT_TX_TUNNEL_GENEVE: + if (!(m->ol_flags & + (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6))) + return EINVAL; + + tcph_off += m->outer_l2_len + m->outer_l3_len; + } + + header_len = tcph_off + m->l4_len; + + if (unlikely(tcph_off > tso_tcp_header_offset_limit)) + return EINVAL; + + descs_required += nb_tso_descs; + + /* + * Extra descriptor that is required when a packet header + * is separated from remaining content of the first segment. + */ + if (rte_pktmbuf_data_len(m) > header_len) { + descs_required++; + } else if (rte_pktmbuf_data_len(m) < header_len && + unlikely(header_len > SFC_TSOH_STD_LEN)) { + /* + * Header linearization is required and + * the header is too big to be linearized + */ + return EINVAL; + } + } + + /* + * The number of VLAN descriptors is added regardless of requested + * VLAN offload since VLAN is sticky and sending packet without VLAN + * insertion may require VLAN descriptor to reset the sticky to 0. + */ + descs_required += nb_vlan_descs; + + /* + * Max fill level must be sufficient to hold all required descriptors + * to send the packet entirely. + */ + if (descs_required > max_fill_level) + return ENOBUFS; + return 0; }