X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_dp_tx.h;h=777807985b9b69a3d8f7184e46b250eb0310f0e8;hb=7d5cfaa7508de0fd248b05effbf421a98317006a;hp=1f922e597652a69448a4a61996b868b95c7b9864;hpb=dbdc82416b723b1f089bdcea99d5441016aa724d;p=dpdk.git diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h index 1f922e5976..777807985b 100644 --- a/drivers/net/sfc/sfc_dp_tx.h +++ b/drivers/net/sfc/sfc_dp_tx.h @@ -1,40 +1,20 @@ -/*- - * BSD LICENSE +/* SPDX-License-Identifier: BSD-3-Clause * - * Copyright (c) 2016 Solarflare Communications Inc. - * All rights reserved. + * Copyright(c) 2019-2021 Xilinx, Inc. + * Copyright(c) 2016-2019 Solarflare Communications Inc. * * This software was jointly developed between OKTET Labs (under contract * for Solarflare) and Solarflare Communications, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _SFC_DP_TX_H #define _SFC_DP_TX_H -#include +#include #include "sfc_dp.h" +#include "sfc_debug.h" +#include "sfc_tso.h" #ifdef __cplusplus extern "C" { @@ -49,6 +29,12 @@ struct sfc_dp_txq { struct sfc_dp_queue dpq; }; +/** Datapath transmit queue descriptor number limitations */ +struct sfc_dp_tx_hw_limits { + unsigned int txq_max_entries; + unsigned int txq_min_entries; +}; + /** * Datapath transmit queue creation information. * @@ -57,16 +43,69 @@ struct sfc_dp_txq { * readable. */ struct sfc_dp_tx_qcreate_info { + /** Maximum number of pushed Tx descriptors */ + unsigned int max_fill_level; /** Minimum number of unused Tx descriptors to do reap */ unsigned int free_thresh; - /** Transmit queue configuration flags */ - unsigned int flags; + /** Offloads enabled on the transmit queue */ + uint64_t offloads; /** Tx queue size */ unsigned int txq_entries; /** Maximum size of data in the DMA descriptor */ uint16_t dma_desc_size_max; + /** DMA-mapped Tx descriptors ring */ + void *txq_hw_ring; + /** Associated event queue size */ + unsigned int evq_entries; + /** Hardware event ring */ + void *evq_hw_ring; + /** The queue index in hardware (required to push right doorbell) */ + unsigned int hw_index; + /** Virtual address of the memory-mapped BAR to push Tx doorbell */ + volatile void *mem_bar; + /** VI window size shift */ + unsigned int vi_window_shift; + /** + * Maximum number of bytes into the packet the TCP header can start for + * the hardware to apply TSO packet edits. + */ + uint16_t tso_tcp_header_offset_limit; + /** Maximum number of header DMA descriptors per TSOv3 transaction */ + uint16_t tso_max_nb_header_descs; + /** Maximum header length acceptable by TSOv3 transaction */ + uint16_t tso_max_header_len; + /** Maximum number of payload DMA descriptors per TSOv3 transaction */ + uint16_t tso_max_nb_payload_descs; + /** Maximum payload length per TSOv3 transaction */ + uint32_t tso_max_payload_len; + /** Maximum number of frames to be generated per TSOv3 transaction */ + uint32_t tso_max_nb_outgoing_frames; }; +/** + * Get Tx datapath specific device info. + * + * @param dev_info Device info to be adjusted + */ +typedef void (sfc_dp_tx_get_dev_info_t)(struct rte_eth_dev_info *dev_info); + +/** + * Get size of transmit and event queue rings by the number of Tx + * descriptors. + * + * @param nb_tx_desc Number of Tx descriptors + * @param txq_entries Location for number of Tx ring entries + * @param evq_entries Location for number of event ring entries + * @param txq_max_fill_level Location for maximum Tx ring fill level + * + * @return 0 or positive errno. + */ +typedef int (sfc_dp_tx_qsize_up_rings_t)(uint16_t nb_tx_desc, + struct sfc_dp_tx_hw_limits *limits, + unsigned int *txq_entries, + unsigned int *evq_entries, + unsigned int *txq_max_fill_level); + /** * Allocate and initialize datapath transmit queue. * @@ -107,20 +146,48 @@ typedef int (sfc_dp_tx_qstart_t)(struct sfc_dp_txq *dp_txq, typedef void (sfc_dp_tx_qstop_t)(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr); +/** + * Transmit event handler used during queue flush only. + */ +typedef bool (sfc_dp_tx_qtx_ev_t)(struct sfc_dp_txq *dp_txq, unsigned int id); + /** * Transmit queue function called after the queue flush. */ typedef void (sfc_dp_tx_qreap_t)(struct sfc_dp_txq *dp_txq); +/** + * Check Tx descriptor status + */ +typedef int (sfc_dp_tx_qdesc_status_t)(struct sfc_dp_txq *dp_txq, + uint16_t offset); + /** Transmit datapath definition */ struct sfc_dp_tx { struct sfc_dp dp; + unsigned int features; +#define SFC_DP_TX_FEAT_MULTI_PROCESS 0x1 + /** + * Tx offload capabilities supported by the datapath on device + * level only if HW/FW supports it. + */ + uint64_t dev_offload_capa; + /** + * Tx offload capabilities supported by the datapath per-queue + * if HW/FW supports it. + */ + uint64_t queue_offload_capa; + sfc_dp_tx_get_dev_info_t *get_dev_info; + sfc_dp_tx_qsize_up_rings_t *qsize_up_rings; sfc_dp_tx_qcreate_t *qcreate; sfc_dp_tx_qdestroy_t *qdestroy; sfc_dp_tx_qstart_t *qstart; sfc_dp_tx_qstop_t *qstop; + sfc_dp_tx_qtx_ev_t *qtx_ev; sfc_dp_tx_qreap_t *qreap; + sfc_dp_tx_qdesc_status_t *qdesc_status; + eth_tx_prep_t pkt_prepare; eth_tx_burst_t pkt_burst; }; @@ -140,7 +207,150 @@ sfc_dp_find_tx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps) return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp); } +/** Get Tx datapath ops by the datapath TxQ handle */ +const struct sfc_dp_tx *sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq); + +static inline uint64_t +sfc_dp_tx_offload_capa(const struct sfc_dp_tx *dp_tx) +{ + return dp_tx->dev_offload_capa | dp_tx->queue_offload_capa; +} + +static inline unsigned int +sfc_dp_tx_pkt_extra_hdr_segs(struct rte_mbuf **m_seg, + unsigned int *header_len_remaining) +{ + unsigned int nb_extra_header_segs = 0; + + while (rte_pktmbuf_data_len(*m_seg) < *header_len_remaining) { + *header_len_remaining -= rte_pktmbuf_data_len(*m_seg); + *m_seg = (*m_seg)->next; + ++nb_extra_header_segs; + } + + return nb_extra_header_segs; +} + +static inline int +sfc_dp_tx_prepare_pkt(struct rte_mbuf *m, + unsigned int max_nb_header_segs, + unsigned int tso_bounce_buffer_len, + uint32_t tso_tcp_header_offset_limit, + unsigned int max_fill_level, + unsigned int nb_tso_descs, + unsigned int nb_vlan_descs) +{ + unsigned int descs_required = m->nb_segs; + unsigned int tcph_off = ((m->ol_flags & PKT_TX_TUNNEL_MASK) ? + m->outer_l2_len + m->outer_l3_len : 0) + + m->l2_len + m->l3_len; + unsigned int header_len = tcph_off + m->l4_len; + unsigned int header_len_remaining = header_len; + unsigned int nb_header_segs = 1; + struct rte_mbuf *m_seg = m; + +#ifdef RTE_LIBRTE_SFC_EFX_DEBUG + int ret; + + ret = rte_validate_tx_offload(m); + if (ret != 0) { + /* + * Negative error code is returned by rte_validate_tx_offload(), + * but positive are used inside net/sfc PMD. + */ + SFC_ASSERT(ret < 0); + return -ret; + } +#endif + + if (max_nb_header_segs != 0) { + /* There is a limit on the number of header segments. */ + + nb_header_segs += + sfc_dp_tx_pkt_extra_hdr_segs(&m_seg, + &header_len_remaining); + + if (unlikely(nb_header_segs > max_nb_header_segs)) { + /* + * The number of header segments is too large. + * + * If TSO is requested and if the datapath supports + * linearisation of TSO headers, allow the packet + * to proceed with additional checks below. + * Otherwise, throw an error. + */ + if ((m->ol_flags & PKT_TX_TCP_SEG) == 0 || + tso_bounce_buffer_len == 0) + return EINVAL; + } + } + + if (m->ol_flags & PKT_TX_TCP_SEG) { + switch (m->ol_flags & PKT_TX_TUNNEL_MASK) { + case 0: + break; + case PKT_TX_TUNNEL_VXLAN: + /* FALLTHROUGH */ + case PKT_TX_TUNNEL_GENEVE: + if (!(m->ol_flags & + (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6))) + return EINVAL; + } + + if (unlikely(tcph_off > tso_tcp_header_offset_limit)) + return EINVAL; + + descs_required += nb_tso_descs; + + /* + * If headers segments are already counted above, here + * nothing is done since remaining length is smaller + * then current segment size. + */ + nb_header_segs += + sfc_dp_tx_pkt_extra_hdr_segs(&m_seg, + &header_len_remaining); + + /* + * Extra descriptor which is required when (a part of) payload + * shares the same segment with (a part of) the header. + */ + if (rte_pktmbuf_data_len(m_seg) > header_len_remaining) + descs_required++; + + if (tso_bounce_buffer_len != 0) { + if (nb_header_segs > 1 && + unlikely(header_len > tso_bounce_buffer_len)) { + /* + * Header linearization is required and + * the header is too big to be linearized + */ + return EINVAL; + } + } + } + + /* + * The number of VLAN descriptors is added regardless of requested + * VLAN offload since VLAN is sticky and sending packet without VLAN + * insertion may require VLAN descriptor to reset the sticky to 0. + */ + descs_required += nb_vlan_descs; + + /* + * Max fill level must be sufficient to hold all required descriptors + * to send the packet entirely. + */ + if (descs_required > max_fill_level) + return ENOBUFS; + + return 0; +} + extern struct sfc_dp_tx sfc_efx_tx; +extern struct sfc_dp_tx sfc_ef10_tx; +extern struct sfc_dp_tx sfc_ef10_simple_tx; +extern struct sfc_dp_tx sfc_ef100_tx; #ifdef __cplusplus }