1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <ethdev_driver.h>
16 #include "sfc_debug.h"
18 #include "sfc_nic_dma_dp.h"
25 * Generic transmit queue information used on data path.
26 * It must be kept as small as it is possible since it is built into
27 * the structure used on datapath.
30 struct sfc_dp_queue dpq;
33 /** Datapath transmit queue descriptor number limitations */
34 struct sfc_dp_tx_hw_limits {
35 unsigned int txq_max_entries;
36 unsigned int txq_min_entries;
40 * Datapath transmit queue creation information.
42 * The structure is used just to pass information from control path to
43 * datapath. It could be just function arguments, but it would be hardly
46 struct sfc_dp_tx_qcreate_info {
47 /** Maximum number of pushed Tx descriptors */
48 unsigned int max_fill_level;
49 /** Minimum number of unused Tx descriptors to do reap */
50 unsigned int free_thresh;
51 /** Offloads enabled on the transmit queue */
54 unsigned int txq_entries;
55 /** Maximum size of data in the DMA descriptor */
56 uint16_t dma_desc_size_max;
57 /** DMA-mapped Tx descriptors ring */
59 /** Associated event queue size */
60 unsigned int evq_entries;
61 /** Hardware event ring */
63 /** The queue index in hardware (required to push right doorbell) */
64 unsigned int hw_index;
65 /** Virtual address of the memory-mapped BAR to push Tx doorbell */
66 volatile void *mem_bar;
67 /** VI window size shift */
68 unsigned int vi_window_shift;
70 * Maximum number of bytes into the packet the TCP header can start for
71 * the hardware to apply TSO packet edits.
73 uint16_t tso_tcp_header_offset_limit;
74 /** Maximum number of header DMA descriptors per TSOv3 transaction */
75 uint16_t tso_max_nb_header_descs;
76 /** Maximum header length acceptable by TSOv3 transaction */
77 uint16_t tso_max_header_len;
78 /** Maximum number of payload DMA descriptors per TSOv3 transaction */
79 uint16_t tso_max_nb_payload_descs;
80 /** Maximum payload length per TSOv3 transaction */
81 uint32_t tso_max_payload_len;
82 /** Maximum number of frames to be generated per TSOv3 transaction */
83 uint32_t tso_max_nb_outgoing_frames;
85 /** NIC's DMA mapping information */
86 const struct sfc_nic_dma_info *nic_dma_info;
90 * Get Tx datapath specific device info.
92 * @param dev_info Device info to be adjusted
94 typedef void (sfc_dp_tx_get_dev_info_t)(struct rte_eth_dev_info *dev_info);
97 * Get size of transmit and event queue rings by the number of Tx
100 * @param nb_tx_desc Number of Tx descriptors
101 * @param txq_entries Location for number of Tx ring entries
102 * @param evq_entries Location for number of event ring entries
103 * @param txq_max_fill_level Location for maximum Tx ring fill level
105 * @return 0 or positive errno.
107 typedef int (sfc_dp_tx_qsize_up_rings_t)(uint16_t nb_tx_desc,
108 struct sfc_dp_tx_hw_limits *limits,
109 unsigned int *txq_entries,
110 unsigned int *evq_entries,
111 unsigned int *txq_max_fill_level);
114 * Allocate and initialize datapath transmit queue.
116 * @param port_id The port identifier
117 * @param queue_id The queue identifier
118 * @param pci_addr PCI function address
119 * @param socket_id Socket identifier to allocate memory
120 * @param info Tx queue details wrapped in structure
121 * @param dp_txqp Location for generic datapath transmit queue pointer
123 * @return 0 or positive errno.
125 typedef int (sfc_dp_tx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
126 const struct rte_pci_addr *pci_addr,
128 const struct sfc_dp_tx_qcreate_info *info,
129 struct sfc_dp_txq **dp_txqp);
132 * Free resources allocated for datapath transmit queue.
134 typedef void (sfc_dp_tx_qdestroy_t)(struct sfc_dp_txq *dp_txq);
137 * Transmit queue start callback.
139 * It handovers EvQ to the datapath.
141 typedef int (sfc_dp_tx_qstart_t)(struct sfc_dp_txq *dp_txq,
142 unsigned int evq_read_ptr,
143 unsigned int txq_desc_index);
146 * Transmit queue stop function called before the queue flush.
148 * It returns EvQ to the control path.
150 typedef void (sfc_dp_tx_qstop_t)(struct sfc_dp_txq *dp_txq,
151 unsigned int *evq_read_ptr);
154 * Transmit event handler used during queue flush only.
156 typedef bool (sfc_dp_tx_qtx_ev_t)(struct sfc_dp_txq *dp_txq, unsigned int id);
159 * Transmit queue function called after the queue flush.
161 typedef void (sfc_dp_tx_qreap_t)(struct sfc_dp_txq *dp_txq);
164 * Check Tx descriptor status
166 typedef int (sfc_dp_tx_qdesc_status_t)(struct sfc_dp_txq *dp_txq,
169 /** Transmit datapath definition */
173 unsigned int features;
174 #define SFC_DP_TX_FEAT_MULTI_PROCESS 0x1
175 #define SFC_DP_TX_FEAT_STATS 0x2
177 * Tx offload capabilities supported by the datapath on device
178 * level only if HW/FW supports it.
180 uint64_t dev_offload_capa;
182 * Tx offload capabilities supported by the datapath per-queue
183 * if HW/FW supports it.
185 uint64_t queue_offload_capa;
186 sfc_dp_tx_get_dev_info_t *get_dev_info;
187 sfc_dp_tx_qsize_up_rings_t *qsize_up_rings;
188 sfc_dp_tx_qcreate_t *qcreate;
189 sfc_dp_tx_qdestroy_t *qdestroy;
190 sfc_dp_tx_qstart_t *qstart;
191 sfc_dp_tx_qstop_t *qstop;
192 sfc_dp_tx_qtx_ev_t *qtx_ev;
193 sfc_dp_tx_qreap_t *qreap;
194 sfc_dp_tx_qdesc_status_t *qdesc_status;
195 eth_tx_prep_t pkt_prepare;
196 eth_tx_burst_t pkt_burst;
199 static inline struct sfc_dp_tx *
200 sfc_dp_find_tx_by_name(struct sfc_dp_list *head, const char *name)
202 struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_TX, name);
204 return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp);
207 static inline struct sfc_dp_tx *
208 sfc_dp_find_tx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
210 struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_TX, avail_caps);
212 return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp);
215 /** Get Tx datapath ops by the datapath TxQ handle */
216 const struct sfc_dp_tx *sfc_dp_tx_by_dp_txq(const struct sfc_dp_txq *dp_txq);
218 static inline uint64_t
219 sfc_dp_tx_offload_capa(const struct sfc_dp_tx *dp_tx)
221 return dp_tx->dev_offload_capa | dp_tx->queue_offload_capa;
224 static inline unsigned int
225 sfc_dp_tx_pkt_extra_hdr_segs(struct rte_mbuf **m_seg,
226 unsigned int *header_len_remaining)
228 unsigned int nb_extra_header_segs = 0;
230 while (rte_pktmbuf_data_len(*m_seg) < *header_len_remaining) {
231 *header_len_remaining -= rte_pktmbuf_data_len(*m_seg);
232 *m_seg = (*m_seg)->next;
233 ++nb_extra_header_segs;
236 return nb_extra_header_segs;
240 sfc_dp_tx_prepare_pkt(struct rte_mbuf *m,
241 unsigned int max_nb_header_segs,
242 unsigned int tso_bounce_buffer_len,
243 uint32_t tso_tcp_header_offset_limit,
244 unsigned int max_fill_level,
245 unsigned int nb_tso_descs,
246 unsigned int nb_vlan_descs)
248 unsigned int descs_required = m->nb_segs;
249 unsigned int tcph_off = ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
250 m->outer_l2_len + m->outer_l3_len : 0) +
251 m->l2_len + m->l3_len;
252 unsigned int header_len = tcph_off + m->l4_len;
253 unsigned int header_len_remaining = header_len;
254 unsigned int nb_header_segs = 1;
255 struct rte_mbuf *m_seg = m;
257 #ifdef RTE_LIBRTE_SFC_EFX_DEBUG
260 ret = rte_validate_tx_offload(m);
263 * Negative error code is returned by rte_validate_tx_offload(),
264 * but positive are used inside net/sfc PMD.
271 if (max_nb_header_segs != 0) {
272 /* There is a limit on the number of header segments. */
275 sfc_dp_tx_pkt_extra_hdr_segs(&m_seg,
276 &header_len_remaining);
278 if (unlikely(nb_header_segs > max_nb_header_segs)) {
280 * The number of header segments is too large.
282 * If TSO is requested and if the datapath supports
283 * linearisation of TSO headers, allow the packet
284 * to proceed with additional checks below.
285 * Otherwise, throw an error.
287 if ((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0 ||
288 tso_bounce_buffer_len == 0)
293 if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
294 switch (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
297 case RTE_MBUF_F_TX_TUNNEL_VXLAN:
299 case RTE_MBUF_F_TX_TUNNEL_GENEVE:
301 (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IPV6)))
305 if (unlikely(tcph_off > tso_tcp_header_offset_limit))
308 descs_required += nb_tso_descs;
311 * If headers segments are already counted above, here
312 * nothing is done since remaining length is smaller
313 * then current segment size.
316 sfc_dp_tx_pkt_extra_hdr_segs(&m_seg,
317 &header_len_remaining);
320 * Extra descriptor which is required when (a part of) payload
321 * shares the same segment with (a part of) the header.
323 if (rte_pktmbuf_data_len(m_seg) > header_len_remaining)
326 if (tso_bounce_buffer_len != 0) {
327 if (nb_header_segs > 1 &&
328 unlikely(header_len > tso_bounce_buffer_len)) {
330 * Header linearization is required and
331 * the header is too big to be linearized
339 * The number of VLAN descriptors is added regardless of requested
340 * VLAN offload since VLAN is sticky and sending packet without VLAN
341 * insertion may require VLAN descriptor to reset the sticky to 0.
343 descs_required += nb_vlan_descs;
346 * Max fill level must be sufficient to hold all required descriptors
347 * to send the packet entirely.
349 if (descs_required > max_fill_level)
355 extern struct sfc_dp_tx sfc_efx_tx;
356 extern struct sfc_dp_tx sfc_ef10_tx;
357 extern struct sfc_dp_tx sfc_ef10_simple_tx;
358 extern struct sfc_dp_tx sfc_ef100_tx;
363 #endif /* _SFC_DP_TX_H */