X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_dp_rx.h;h=760540ba22505c277d733ed37d4370b3158a6721;hb=refs%2Fheads%2Fdoc-20211126;hp=3f6a604ba520f75248fd6fa9aa395caded0d6cf0;hpb=1d8f3a8028cfb9be1d9e49195030de55afd2e01b;p=dpdk.git diff --git a/drivers/net/sfc/sfc_dp_rx.h b/drivers/net/sfc/sfc_dp_rx.h index 3f6a604ba5..760540ba22 100644 --- a/drivers/net/sfc/sfc_dp_rx.h +++ b/drivers/net/sfc/sfc_dp_rx.h @@ -1,41 +1,20 @@ -/*- - * BSD LICENSE +/* SPDX-License-Identifier: BSD-3-Clause * - * Copyright (c) 2017 Solarflare Communications Inc. - * All rights reserved. + * Copyright(c) 2019-2021 Xilinx, Inc. + * Copyright(c) 2017-2019 Solarflare Communications Inc. * * This software was jointly developed between OKTET Labs (under contract * for Solarflare) and Solarflare Communications, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _SFC_DP_RX_H #define _SFC_DP_RX_H #include -#include +#include #include "sfc_dp.h" +#include "sfc_nic_dma_dp.h" #ifdef __cplusplus extern "C" { @@ -50,6 +29,14 @@ struct sfc_dp_rxq { struct sfc_dp_queue dpq; }; +/** Datapath receive queue descriptor number limitations */ +struct sfc_dp_rx_hw_limits { + unsigned int rxq_max_entries; + unsigned int rxq_min_entries; + unsigned int evq_max_entries; + unsigned int evq_min_entries; +}; + /** * Datapath receive queue creation information. * @@ -60,6 +47,8 @@ struct sfc_dp_rxq { struct sfc_dp_rx_qcreate_info { /** Memory pool to allocate Rx buffer from */ struct rte_mempool *refill_mb_pool; + /** Maximum number of pushed Rx descriptors in the queue */ + unsigned int max_fill_level; /** Minimum number of unused Rx descriptors to do refill */ unsigned int refill_threshold; /** @@ -86,6 +75,8 @@ struct sfc_dp_rx_qcreate_info { /** DMA-mapped Rx descriptors ring */ void *rxq_hw_ring; + /** Event queue index in hardware */ + unsigned int evq_hw_index; /** Associated event queue size */ unsigned int evq_entries; /** Hardware event ring */ @@ -98,8 +89,56 @@ struct sfc_dp_rx_qcreate_info { * doorbell */ volatile void *mem_bar; + /** Function control window offset */ + efsys_dma_addr_t fcw_offset; + /** VI window size shift */ + unsigned int vi_window_shift; + + /** Mask to extract user bits from Rx prefix mark field */ + uint32_t user_mark_mask; + + /** NIC's DMA mapping information */ + const struct sfc_nic_dma_info *nic_dma_info; }; +/** + * Get Rx datapath specific device info. + * + * @param dev_info Device info to be adjusted + */ +typedef void (sfc_dp_rx_get_dev_info_t)(struct rte_eth_dev_info *dev_info); + +/** + * Test if an Rx datapath supports specific mempool ops. + * + * @param pool The name of the pool operations to test. + * + * @return Check status. + * @retval 0 Best mempool ops choice. + * @retval 1 Mempool ops are supported. + * @retval -ENOTSUP Mempool ops not supported. + */ +typedef int (sfc_dp_rx_pool_ops_supported_t)(const char *pool); + +/** + * Get size of receive and event queue rings by the number of Rx + * descriptors and mempool configuration. + * + * @param nb_rx_desc Number of Rx descriptors + * @param mb_pool mbuf pool with Rx buffers + * @param rxq_entries Location for number of Rx ring entries + * @param evq_entries Location for number of event ring entries + * @param rxq_max_fill_level Location for maximum Rx ring fill level + * + * @return 0 or positive errno. + */ +typedef int (sfc_dp_rx_qsize_up_rings_t)(uint16_t nb_rx_desc, + struct sfc_dp_rx_hw_limits *limits, + struct rte_mempool *mb_pool, + unsigned int *rxq_entries, + unsigned int *evq_entries, + unsigned int *rxq_max_fill_level); + /** * Allocate and initialize datapath receive queue. * @@ -129,7 +168,8 @@ typedef void (sfc_dp_rx_qdestroy_t)(struct sfc_dp_rxq *dp_rxq); * It handovers EvQ to the datapath. */ typedef int (sfc_dp_rx_qstart_t)(struct sfc_dp_rxq *dp_rxq, - unsigned int evq_read_ptr); + unsigned int evq_read_ptr, + const efx_rx_prefix_layout_t *pinfo); /** * Receive queue stop function called before flush. @@ -142,6 +182,12 @@ typedef void (sfc_dp_rx_qstop_t)(struct sfc_dp_rxq *dp_rxq, */ typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id); +/** + * Packed stream receive event handler used during queue flush only. + */ +typedef bool (sfc_dp_rx_qrx_ps_ev_t)(struct sfc_dp_rxq *dp_rxq, + unsigned int id); + /** * Receive queue purge function called after queue flush. * @@ -150,7 +196,8 @@ typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id); typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq); /** Get packet types recognized/classified */ -typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)(void); +typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)( + uint32_t tunnel_encaps); /** Get number of pending Rx descriptors */ typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq); @@ -158,23 +205,51 @@ typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq); /** Check Rx descriptor status */ typedef int (sfc_dp_rx_qdesc_status_t)(struct sfc_dp_rxq *dp_rxq, uint16_t offset); +/** Enable Rx interrupts */ +typedef int (sfc_dp_rx_intr_enable_t)(struct sfc_dp_rxq *dp_rxq); + +/** Disable Rx interrupts */ +typedef int (sfc_dp_rx_intr_disable_t)(struct sfc_dp_rxq *dp_rxq); + +/** Get number of pushed Rx buffers */ +typedef unsigned int (sfc_dp_rx_get_pushed_t)(struct sfc_dp_rxq *dp_rxq); /** Receive datapath definition */ struct sfc_dp_rx { struct sfc_dp dp; unsigned int features; -#define SFC_DP_RX_FEAT_SCATTER 0x1 -#define SFC_DP_RX_FEAT_MULTI_PROCESS 0x2 +#define SFC_DP_RX_FEAT_MULTI_PROCESS 0x1 +#define SFC_DP_RX_FEAT_FLOW_FLAG 0x2 +#define SFC_DP_RX_FEAT_FLOW_MARK 0x4 +#define SFC_DP_RX_FEAT_INTR 0x8 +#define SFC_DP_RX_FEAT_STATS 0x10 + /** + * Rx offload capabilities supported by the datapath on device + * level only if HW/FW supports it. + */ + uint64_t dev_offload_capa; + /** + * Rx offload capabilities supported by the datapath per-queue + * if HW/FW supports it. + */ + uint64_t queue_offload_capa; + sfc_dp_rx_get_dev_info_t *get_dev_info; + sfc_dp_rx_pool_ops_supported_t *pool_ops_supported; + sfc_dp_rx_qsize_up_rings_t *qsize_up_rings; sfc_dp_rx_qcreate_t *qcreate; sfc_dp_rx_qdestroy_t *qdestroy; sfc_dp_rx_qstart_t *qstart; sfc_dp_rx_qstop_t *qstop; sfc_dp_rx_qrx_ev_t *qrx_ev; + sfc_dp_rx_qrx_ps_ev_t *qrx_ps_ev; sfc_dp_rx_qpurge_t *qpurge; sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get; sfc_dp_rx_qdesc_npending_t *qdesc_npending; sfc_dp_rx_qdesc_status_t *qdesc_status; + sfc_dp_rx_intr_enable_t *intr_enable; + sfc_dp_rx_intr_disable_t *intr_disable; + sfc_dp_rx_get_pushed_t *get_pushed; eth_rx_burst_t pkt_burst; }; @@ -194,8 +269,19 @@ sfc_dp_find_rx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps) return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp); } +static inline uint64_t +sfc_dp_rx_offload_capa(const struct sfc_dp_rx *dp_rx) +{ + return dp_rx->dev_offload_capa | dp_rx->queue_offload_capa; +} + +/** Get Rx datapath ops by the datapath RxQ handle */ +const struct sfc_dp_rx *sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq); + extern struct sfc_dp_rx sfc_efx_rx; extern struct sfc_dp_rx sfc_ef10_rx; +extern struct sfc_dp_rx sfc_ef10_essb_rx; +extern struct sfc_dp_rx sfc_ef100_rx; #ifdef __cplusplus }