4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _IXGBE_RXTX_H_
35 #define _IXGBE_RXTX_H_
38 * Rings setup and release.
40 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
41 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
42 * also optimize cache line size effect. H/W supports up to cache line size 128.
44 #define IXGBE_ALIGN 128
46 #define IXGBE_RXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_rx_desc))
47 #define IXGBE_TXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_tx_desc))
50 * Maximum number of Ring Descriptors.
52 * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
53 * descriptors should meet the following condition:
54 * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
56 #define IXGBE_MIN_RING_DESC 32
57 #define IXGBE_MAX_RING_DESC 4096
59 #define RTE_PMD_IXGBE_TX_MAX_BURST 32
60 #define RTE_PMD_IXGBE_RX_MAX_BURST 32
61 #define RTE_IXGBE_TX_MAX_FREE_BUF_SZ 64
63 #define RTE_IXGBE_DESCS_PER_LOOP 4
65 #ifdef RTE_IXGBE_INC_VECTOR
66 #define RTE_IXGBE_RXQ_REARM_THRESH 32
67 #define RTE_IXGBE_MAX_RX_BURST RTE_IXGBE_RXQ_REARM_THRESH
70 #define RX_RING_SZ ((IXGBE_MAX_RING_DESC + RTE_IXGBE_DESCS_PER_LOOP - 1) * \
71 sizeof(union ixgbe_adv_rx_desc))
73 #ifdef RTE_PMD_PACKET_PREFETCH
74 #define rte_packet_prefetch(p) rte_prefetch1(p)
76 #define rte_packet_prefetch(p) do {} while(0)
79 #define RTE_IXGBE_REGISTER_POLL_WAIT_10_MS 10
80 #define RTE_IXGBE_WAIT_100_US 100
81 #define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2
84 * Structure associated with each descriptor of the RX ring of a RX queue.
86 struct ixgbe_rx_entry {
87 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
90 struct ixgbe_scattered_rx_entry {
91 struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
95 * Structure associated with each descriptor of the TX ring of a TX queue.
97 struct ixgbe_tx_entry {
98 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
99 uint16_t next_id; /**< Index of next descriptor in ring. */
100 uint16_t last_id; /**< Index of last scattered descriptor. */
104 * Structure associated with each descriptor of the TX ring of a TX queue.
106 struct ixgbe_tx_entry_v {
107 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
111 * Structure associated with each RX queue.
113 struct ixgbe_rx_queue {
114 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
115 volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
116 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
117 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
118 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
119 struct ixgbe_rx_entry *sw_ring; /**< address of RX software ring. */
120 struct ixgbe_scattered_rx_entry *sw_sc_ring; /**< address of scattered Rx software ring. */
121 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
122 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
123 uint64_t mbuf_initializer; /**< value to init mbufs */
124 uint16_t nb_rx_desc; /**< number of RX descriptors. */
125 uint16_t rx_tail; /**< current value of RDT register. */
126 uint16_t nb_rx_hold; /**< number of held free RX desc. */
127 uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
128 uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
129 uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
130 uint16_t rx_using_sse;
131 /**< indicates that vector RX is in use */
132 #ifdef RTE_IXGBE_INC_VECTOR
133 uint16_t rxrearm_nb; /**< number of remaining to be re-armed */
134 uint16_t rxrearm_start; /**< the idx we start the re-arming from */
136 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
137 uint16_t queue_id; /**< RX queue index. */
138 uint16_t reg_idx; /**< RX queue register index. */
139 uint8_t port_id; /**< Device port identifier. */
140 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
141 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
142 uint8_t rx_deferred_start; /**< not in global dev start. */
143 /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
144 struct rte_mbuf fake_mbuf;
145 /** hold packets to return to application */
146 struct rte_mbuf *rx_stage[RTE_PMD_IXGBE_RX_MAX_BURST*2];
150 * IXGBE CTX Constants
152 enum ixgbe_advctx_num {
153 IXGBE_CTX_0 = 0, /**< CTX0 */
154 IXGBE_CTX_1 = 1, /**< CTX1 */
155 IXGBE_CTX_NUM = 2, /**< CTX NUMBER */
158 /** Offload features */
159 union ixgbe_tx_offload {
162 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
163 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
164 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
165 uint64_t tso_segsz:16; /**< TCP TSO segment size */
166 uint64_t vlan_tci:16;
167 /**< VLAN Tag Control Identifier (CPU order). */
169 /* fields for TX offloading of tunnels */
170 uint64_t outer_l3_len:8; /**< Outer L3 (IP) Hdr Length. */
171 uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
176 * Compare mask for vlan_macip_len.data,
177 * should be in sync with ixgbe_vlan_macip.f layout.
179 #define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
180 #define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
181 #define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
182 /** MAC+IP length. */
183 #define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
186 * Structure to check if new context need be built
189 struct ixgbe_advctx_info {
190 uint64_t flags; /**< ol_flags for context build. */
191 /**< tx offload: vlan, tso, l2-l3-l4 lengths. */
192 union ixgbe_tx_offload tx_offload;
193 /** compare mask for tx offload. */
194 union ixgbe_tx_offload tx_offload_mask;
198 * Structure associated with each TX queue.
200 struct ixgbe_tx_queue {
201 /** TX ring virtual address. */
202 volatile union ixgbe_adv_tx_desc *tx_ring;
203 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
205 struct ixgbe_tx_entry *sw_ring; /**< address of SW ring for scalar PMD. */
206 struct ixgbe_tx_entry_v *sw_ring_v; /**< address of SW ring for vector PMD */
208 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
209 uint16_t nb_tx_desc; /**< number of TX descriptors. */
210 uint16_t tx_tail; /**< current value of TDT reg. */
211 /**< Start freeing TX buffers if there are less free descriptors than
213 uint16_t tx_free_thresh;
214 /** Number of TX descriptors to use before RS bit is set. */
215 uint16_t tx_rs_thresh;
216 /** Number of TX descriptors used since RS bit was set. */
218 /** Index to last TX descriptor to have been cleaned. */
219 uint16_t last_desc_cleaned;
220 /** Total number of TX descriptors ready to be allocated. */
222 uint16_t tx_next_dd; /**< next desc to scan for DD bit */
223 uint16_t tx_next_rs; /**< next desc to set RS bit */
224 uint16_t queue_id; /**< TX queue index. */
225 uint16_t reg_idx; /**< TX queue register index. */
226 uint8_t port_id; /**< Device port identifier. */
227 uint8_t pthresh; /**< Prefetch threshold register. */
228 uint8_t hthresh; /**< Host threshold register. */
229 uint8_t wthresh; /**< Write-back threshold reg. */
230 uint32_t txq_flags; /**< Holds flags for this TXq */
231 uint32_t ctx_curr; /**< Hardware context states. */
232 /** Hardware context0 history. */
233 struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
234 const struct ixgbe_txq_ops *ops; /**< txq ops */
235 uint8_t tx_deferred_start; /**< not in global dev start. */
238 struct ixgbe_txq_ops {
239 void (*release_mbufs)(struct ixgbe_tx_queue *txq);
240 void (*free_swring)(struct ixgbe_tx_queue *txq);
241 void (*reset)(struct ixgbe_tx_queue *txq);
245 * The "simple" TX queue functions require that the following
246 * flags are set when the TX queue is configured:
247 * - ETH_TXQ_FLAGS_NOMULTSEGS
248 * - ETH_TXQ_FLAGS_NOVLANOFFL
249 * - ETH_TXQ_FLAGS_NOXSUMSCTP
250 * - ETH_TXQ_FLAGS_NOXSUMUDP
251 * - ETH_TXQ_FLAGS_NOXSUMTCP
252 * and that the RS bit threshold (tx_rs_thresh) is at least equal to
253 * RTE_PMD_IXGBE_TX_MAX_BURST.
255 #define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
256 ETH_TXQ_FLAGS_NOOFFLOADS)
259 * Populate descriptors with the following info:
260 * 1.) buffer_addr = phys_addr + headroom
261 * 2.) cmd_type_len = DCMD_DTYP_FLAGS | pkt_len
262 * 3.) olinfo_status = pkt_len << PAYLEN_SHIFT
265 /* Defines for Tx descriptor */
266 #define DCMD_DTYP_FLAGS (IXGBE_ADVTXD_DTYP_DATA |\
267 IXGBE_ADVTXD_DCMD_IFCS |\
268 IXGBE_ADVTXD_DCMD_DEXT |\
269 IXGBE_ADVTXD_DCMD_EOP)
272 /* Takes an ethdev and a queue and sets up the tx function to be used based on
273 * the queue parameters. Used in tx_queue_setup by primary process and then
274 * in dev_init by secondary process when attaching to an existing ethdev.
276 void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq);
279 * Sets the rx_pkt_burst callback in the ixgbe rte_eth_dev instance.
281 * Sets the callback based on the device parameters:
282 * - ixgbe_hw.rx_bulk_alloc_allowed
283 * - rte_eth_dev_data.scattered_rx
284 * - rte_eth_dev_data.lro
285 * - conditions checked in ixgbe_rx_vec_condition_check()
287 * This means that the parameters above have to be configured prior to calling
290 * @dev rte_eth_dev handle
292 void ixgbe_set_rx_function(struct rte_eth_dev *dev);
294 uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
296 uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue,
297 struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
298 int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
299 int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);
300 void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq);
302 #ifdef RTE_IXGBE_INC_VECTOR
304 uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
306 int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
308 #endif /* RTE_IXGBE_INC_VECTOR */
309 #endif /* _IXGBE_RXTX_H_ */