4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Data plane functions for mlx4 driver.
43 /* Verbs headers do not support -pedantic. */
45 #pragma GCC diagnostic ignored "-Wpedantic"
47 #include <infiniband/verbs.h>
49 #pragma GCC diagnostic error "-Wpedantic"
52 #include <rte_branch_prediction.h>
53 #include <rte_common.h>
56 #include <rte_mempool.h>
57 #include <rte_prefetch.h>
61 #include "mlx4_rxtx.h"
62 #include "mlx4_utils.h"
65 * Pointer-value pair structure used in tx_post_send for saving the first
66 * DWORD (32 byte) of a TXBB.
69 volatile struct mlx4_wqe_data_seg *dseg;
73 /** A table to translate Rx completion flags to packet type. */
74 uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = {
76 * The index to the array should have:
77 * bit[7] - MLX4_CQE_L2_TUNNEL
78 * bit[6] - MLX4_CQE_L2_TUNNEL_IPV4
79 * bit[5] - MLX4_CQE_STATUS_UDP
80 * bit[4] - MLX4_CQE_STATUS_TCP
81 * bit[3] - MLX4_CQE_STATUS_IPV4OPT
82 * bit[2] - MLX4_CQE_STATUS_IPV6
83 * bit[1] - MLX4_CQE_STATUS_IPV4F
84 * bit[0] - MLX4_CQE_STATUS_IPV4
85 * giving a total of up to 256 entries.
87 [0x00] = RTE_PTYPE_L2_ETHER,
88 [0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
89 [0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
91 [0x03] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
93 [0x04] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
94 [0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT,
95 [0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
97 [0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
99 [0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
101 [0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
103 [0x18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
105 [0x19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
107 [0x1a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
109 [0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
111 [0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
113 [0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
115 [0x28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
117 [0x29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
119 [0x2a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
121 /* Tunneled - L3 IPV6 */
122 [0x80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
123 [0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
124 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
125 [0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
126 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
127 RTE_PTYPE_INNER_L4_FRAG,
128 [0x83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
129 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
130 RTE_PTYPE_INNER_L4_FRAG,
131 [0x84] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
132 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
133 [0x88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
134 RTE_PTYPE_INNER_L3_IPV4_EXT,
135 [0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
136 RTE_PTYPE_INNER_L3_IPV4_EXT,
137 [0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
138 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG,
139 /* Tunneled - L3 IPV6, TCP */
140 [0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
141 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
142 RTE_PTYPE_INNER_L4_TCP,
143 [0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
144 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
145 RTE_PTYPE_INNER_L4_FRAG |
146 RTE_PTYPE_INNER_L4_TCP,
147 [0x93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
148 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
149 RTE_PTYPE_INNER_L4_FRAG |
150 RTE_PTYPE_INNER_L4_TCP,
151 [0x94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
152 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
153 RTE_PTYPE_INNER_L4_TCP,
154 [0x98] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
155 RTE_PTYPE_INNER_L3_IPV4_EXT |
156 RTE_PTYPE_INNER_L4_TCP,
157 [0x99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
158 RTE_PTYPE_INNER_L3_IPV4_EXT |
159 RTE_PTYPE_INNER_L4_TCP,
160 [0x9a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
161 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
162 RTE_PTYPE_INNER_L4_TCP,
163 /* Tunneled - L3 IPV6, UDP */
164 [0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
165 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
166 RTE_PTYPE_INNER_L4_UDP,
167 [0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
168 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
169 RTE_PTYPE_INNER_L4_FRAG |
170 RTE_PTYPE_INNER_L4_UDP,
171 [0xa3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
172 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
173 RTE_PTYPE_INNER_L4_FRAG |
174 RTE_PTYPE_INNER_L4_UDP,
175 [0xa4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
176 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
177 RTE_PTYPE_INNER_L4_UDP,
178 [0xa8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
179 RTE_PTYPE_INNER_L3_IPV4_EXT |
180 RTE_PTYPE_INNER_L4_UDP,
181 [0xa9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
182 RTE_PTYPE_INNER_L3_IPV4_EXT |
183 RTE_PTYPE_INNER_L4_UDP,
184 [0xaa] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
185 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
186 RTE_PTYPE_INNER_L4_UDP,
187 /* Tunneled - L3 IPV4 */
188 [0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
189 [0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
190 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
191 [0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
192 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
193 RTE_PTYPE_INNER_L4_FRAG,
194 [0xc3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
195 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
196 RTE_PTYPE_INNER_L4_FRAG,
197 [0xc4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
198 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
199 [0xc8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
200 RTE_PTYPE_INNER_L3_IPV4_EXT,
201 [0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
202 RTE_PTYPE_INNER_L3_IPV4_EXT,
203 [0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
204 RTE_PTYPE_INNER_L3_IPV4_EXT |
205 RTE_PTYPE_INNER_L4_FRAG,
206 /* Tunneled - L3 IPV4, TCP */
207 [0xd0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
208 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
209 RTE_PTYPE_INNER_L4_TCP,
210 [0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
211 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
212 RTE_PTYPE_INNER_L4_TCP,
213 [0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
214 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
215 RTE_PTYPE_INNER_L4_FRAG |
216 RTE_PTYPE_INNER_L4_TCP,
217 [0xd3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
218 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
219 RTE_PTYPE_INNER_L4_FRAG |
220 RTE_PTYPE_INNER_L4_TCP,
221 [0xd4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
222 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
223 RTE_PTYPE_INNER_L4_TCP,
224 [0xd8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
225 RTE_PTYPE_INNER_L3_IPV4_EXT |
226 RTE_PTYPE_INNER_L4_TCP,
227 [0xd9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
228 RTE_PTYPE_INNER_L3_IPV4_EXT |
229 RTE_PTYPE_INNER_L4_TCP,
230 [0xda] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
231 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
232 RTE_PTYPE_INNER_L4_TCP,
233 /* Tunneled - L3 IPV4, UDP */
234 [0xe0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
235 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
236 RTE_PTYPE_INNER_L4_UDP,
237 [0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
238 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
239 RTE_PTYPE_INNER_L4_UDP,
240 [0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
241 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
242 RTE_PTYPE_INNER_L4_FRAG |
243 RTE_PTYPE_INNER_L4_UDP,
244 [0xe3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
245 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
246 RTE_PTYPE_INNER_L4_FRAG |
247 RTE_PTYPE_INNER_L4_UDP,
248 [0xe4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
249 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
250 RTE_PTYPE_INNER_L4_UDP,
251 [0xe8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
252 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
253 [0xe9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
254 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
255 [0xea] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
256 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
257 RTE_PTYPE_INNER_L4_UDP,
261 * Stamp a WQE so it won't be reused by the HW.
263 * Routine is used when freeing WQE used by the chip or when failing
264 * building an WQ entry has failed leaving partial information on the queue.
267 * Pointer to the SQ structure.
268 * @param[in, out] wqe
269 * Pointer of WQE address to stamp. This value is modified on return to
270 * store the address of the next WQE.
276 mlx4_txq_stamp_freed_wqe(struct mlx4_sq *sq, volatile uint32_t **wqe)
278 uint32_t stamp = sq->stamp;
279 volatile uint32_t *next_txbb = *wqe;
280 /* Extract the size from the control segment of the WQE. */
281 uint32_t size = RTE_ALIGN((uint32_t)
282 ((((volatile struct mlx4_wqe_ctrl_seg *)
283 next_txbb)->fence_size & 0x3f) << 4),
285 uint32_t size_cd = size;
287 /* Optimize the common case when there is no wrap-around. */
288 if ((uintptr_t)next_txbb + size < (uintptr_t)sq->eob) {
289 /* Stamp the freed descriptor. */
292 next_txbb += MLX4_SQ_STAMP_DWORDS;
293 size_cd -= MLX4_TXBB_SIZE;
296 /* Stamp the freed descriptor. */
299 next_txbb += MLX4_SQ_STAMP_DWORDS;
300 if ((volatile uint8_t *)next_txbb >= sq->eob) {
301 next_txbb = (volatile uint32_t *)sq->buf;
302 /* Flip invalid stamping ownership. */
303 stamp ^= RTE_BE32(0x1 << MLX4_SQ_OWNER_BIT);
306 size_cd -= MLX4_TXBB_SIZE;
314 * Manage Tx completions.
316 * When sending a burst, mlx4_tx_burst() posts several WRs.
317 * To improve performance, a completion event is only required once every
318 * MLX4_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
319 * for other WRs, but this information would not be used anyway.
322 * Pointer to Tx queue structure.
325 mlx4_txq_complete(struct txq *txq, const unsigned int elts_n,
328 unsigned int elts_tail = txq->elts_tail;
329 struct mlx4_cq *cq = &txq->mcq;
330 volatile struct mlx4_cqe *cqe;
331 uint32_t cons_index = cq->cons_index;
332 volatile uint32_t *first_wqe;
333 volatile uint32_t *next_wqe = (volatile uint32_t *)
334 ((&(*txq->elts)[elts_tail])->wqe);
335 volatile uint32_t *last_wqe;
336 uint16_t mask = (((uintptr_t)sq->eob - (uintptr_t)sq->buf) >>
337 MLX4_TXBB_SHIFT) - 1;
340 * Traverse over all CQ entries reported and handle each WQ entry
344 cqe = (volatile struct mlx4_cqe *)mlx4_get_cqe(cq, cons_index);
345 if (unlikely(!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
346 !!(cons_index & cq->cqe_cnt)))
350 * Make sure we read the CQE after we read the ownership bit.
353 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
354 MLX4_CQE_OPCODE_ERROR)) {
355 volatile struct mlx4_err_cqe *cqe_err =
356 (volatile struct mlx4_err_cqe *)cqe;
357 ERROR("%p CQE error - vendor syndrome: 0x%x"
359 (void *)txq, cqe_err->vendor_err,
364 /* Get WQE address buy index from the CQE. */
365 last_wqe = (volatile uint32_t *)((uintptr_t)sq->buf +
366 ((rte_be_to_cpu_16(cqe->wqe_index) & mask) <<
369 /* Free next descriptor. */
370 first_wqe = next_wqe;
372 mlx4_txq_stamp_freed_wqe(sq, &next_wqe);
374 } while (first_wqe != last_wqe);
377 if (unlikely(pkts == 0))
379 /* Update CQ consumer index. */
380 cq->cons_index = cons_index;
381 *cq->set_ci_db = rte_cpu_to_be_32(cons_index & MLX4_CQ_DB_CI_MASK);
382 txq->elts_comp -= pkts;
384 if (elts_tail >= elts_n)
386 txq->elts_tail = elts_tail;
390 * Get memory pool (MP) from mbuf. If mbuf is indirect, the pool from which
391 * the cloned mbuf is allocated is returned instead.
397 * Memory pool where data is located for given mbuf.
399 static struct rte_mempool *
400 mlx4_txq_mb2mp(struct rte_mbuf *buf)
402 if (unlikely(RTE_MBUF_INDIRECT(buf)))
403 return rte_mbuf_from_indirect(buf)->pool;
408 * Write Tx data segment to the SQ.
411 * Pointer to data segment in SQ.
413 * Memory region lkey.
417 * Big endian bytes count of the data to send.
420 mlx4_fill_tx_data_seg(volatile struct mlx4_wqe_data_seg *dseg,
421 uint32_t lkey, uintptr_t addr, rte_be32_t byte_count)
423 dseg->addr = rte_cpu_to_be_64(addr);
424 dseg->lkey = rte_cpu_to_be_32(lkey);
425 #if RTE_CACHE_LINE_SIZE < 64
427 * Need a barrier here before writing the byte_count
428 * fields to make sure that all the data is visible
429 * before the byte_count field is set.
430 * Otherwise, if the segment begins a new cacheline,
431 * the HCA prefetcher could grab the 64-byte chunk and
432 * get a valid (!= 0xffffffff) byte count but stale
433 * data, and end up sending the wrong data.
436 #endif /* RTE_CACHE_LINE_SIZE */
437 dseg->byte_count = byte_count;
441 * Write data segments of multi-segment packet.
444 * Pointer to the first packet mbuf.
446 * Pointer to Tx queue structure.
448 * Pointer to the WQE control segment.
451 * Pointer to the next WQE control segment on success, NULL otherwise.
453 static volatile struct mlx4_wqe_ctrl_seg *
454 mlx4_tx_burst_segs(struct rte_mbuf *buf, struct txq *txq,
455 volatile struct mlx4_wqe_ctrl_seg *ctrl)
457 struct pv *pv = (struct pv *)txq->bounce_buf;
458 struct mlx4_sq *sq = &txq->msq;
459 struct rte_mbuf *sbuf = buf;
462 int nb_segs = buf->nb_segs;
464 volatile struct mlx4_wqe_data_seg *dseg =
465 (volatile struct mlx4_wqe_data_seg *)(ctrl + 1);
467 ctrl->fence_size = 1 + nb_segs;
468 wqe_size = RTE_ALIGN((uint32_t)(ctrl->fence_size << MLX4_SEG_SHIFT),
470 /* Validate WQE size and WQE space in the send queue. */
471 if (sq->remain_size < wqe_size ||
472 wqe_size > MLX4_MAX_WQE_SIZE)
475 * Fill the data segments with buffer information.
476 * First WQE TXBB head segment is always control segment,
477 * so jump to tail TXBB data segments code for the first
478 * WQE data segments filling.
482 /* Memory region key (big endian) for this memory pool. */
483 lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
484 if (unlikely(lkey == (uint32_t)-1)) {
485 DEBUG("%p: unable to get MP <-> MR association",
489 /* Handle WQE wraparound. */
491 (volatile struct mlx4_wqe_data_seg *)sq->eob)
492 dseg = (volatile struct mlx4_wqe_data_seg *)
494 dseg->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(sbuf, uintptr_t));
495 dseg->lkey = rte_cpu_to_be_32(lkey);
497 * This data segment starts at the beginning of a new
498 * TXBB, so we need to postpone its byte_count writing
501 pv[pv_counter].dseg = dseg;
503 * Zero length segment is treated as inline segment
506 pv[pv_counter++].val = rte_cpu_to_be_32(sbuf->data_len ?
507 sbuf->data_len : 0x80000000);
512 /* Jump to default if there are more than two segments remaining. */
515 lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
516 if (unlikely(lkey == (uint32_t)-1)) {
517 DEBUG("%p: unable to get MP <-> MR association",
521 mlx4_fill_tx_data_seg(dseg, lkey,
522 rte_pktmbuf_mtod(sbuf, uintptr_t),
523 rte_cpu_to_be_32(sbuf->data_len ?
531 lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
532 if (unlikely(lkey == (uint32_t)-1)) {
533 DEBUG("%p: unable to get MP <-> MR association",
537 mlx4_fill_tx_data_seg(dseg, lkey,
538 rte_pktmbuf_mtod(sbuf, uintptr_t),
539 rte_cpu_to_be_32(sbuf->data_len ?
547 lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
548 if (unlikely(lkey == (uint32_t)-1)) {
549 DEBUG("%p: unable to get MP <-> MR association",
553 mlx4_fill_tx_data_seg(dseg, lkey,
554 rte_pktmbuf_mtod(sbuf, uintptr_t),
555 rte_cpu_to_be_32(sbuf->data_len ?
568 /* Write the first DWORD of each TXBB save earlier. */
570 /* Need a barrier here before writing the byte_count. */
572 for (--pv_counter; pv_counter >= 0; pv_counter--)
573 pv[pv_counter].dseg->byte_count = pv[pv_counter].val;
575 sq->remain_size -= wqe_size;
576 /* Align next WQE address to the next TXBB. */
577 return (volatile struct mlx4_wqe_ctrl_seg *)
578 ((volatile uint8_t *)ctrl + wqe_size);
582 * DPDK callback for Tx.
585 * Generic pointer to Tx queue structure.
587 * Packets to transmit.
589 * Number of packets in array.
592 * Number of packets successfully transmitted (<= pkts_n).
595 mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
597 struct txq *txq = (struct txq *)dpdk_txq;
598 unsigned int elts_head = txq->elts_head;
599 const unsigned int elts_n = txq->elts_n;
600 unsigned int bytes_sent = 0;
603 struct mlx4_sq *sq = &txq->msq;
604 volatile struct mlx4_wqe_ctrl_seg *ctrl;
607 assert(txq->elts_comp_cd != 0);
608 if (likely(txq->elts_comp != 0))
609 mlx4_txq_complete(txq, elts_n, sq);
610 max = (elts_n - (elts_head - txq->elts_tail));
614 assert(max <= elts_n);
615 /* Always leave one free entry in the ring. */
619 elt = &(*txq->elts)[elts_head];
620 /* Each element saves its appropriate work queue. */
622 for (i = 0; (i != max); ++i) {
623 struct rte_mbuf *buf = pkts[i];
624 unsigned int elts_head_next =
625 (((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
626 struct txq_elt *elt_next = &(*txq->elts)[elts_head_next];
627 uint32_t owner_opcode = sq->owner_opcode;
628 volatile struct mlx4_wqe_data_seg *dseg =
629 (volatile struct mlx4_wqe_data_seg *)(ctrl + 1);
630 volatile struct mlx4_wqe_ctrl_seg *ctrl_next;
637 /* Clean up old buffer. */
638 if (likely(elt->buf != NULL)) {
639 struct rte_mbuf *tmp = elt->buf;
643 memset(&elt->buf, 0x66, sizeof(struct rte_mbuf *));
645 /* Faster than rte_pktmbuf_free(). */
647 struct rte_mbuf *next = tmp->next;
649 rte_pktmbuf_free_seg(tmp);
651 } while (tmp != NULL);
653 RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
654 if (buf->nb_segs == 1) {
655 /* Validate WQE space in the send queue. */
656 if (sq->remain_size < MLX4_TXBB_SIZE) {
660 lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(buf));
661 if (unlikely(lkey == (uint32_t)-1)) {
662 /* MR does not exist. */
663 DEBUG("%p: unable to get MP <-> MR association",
668 mlx4_fill_tx_data_seg(dseg++, lkey,
669 rte_pktmbuf_mtod(buf, uintptr_t),
670 rte_cpu_to_be_32(buf->data_len));
671 /* Set WQE size in 16-byte units. */
672 ctrl->fence_size = 0x2;
673 sq->remain_size -= MLX4_TXBB_SIZE;
674 /* Align next WQE address to the next TXBB. */
675 ctrl_next = ctrl + 0x4;
677 ctrl_next = mlx4_tx_burst_segs(buf, txq, ctrl);
683 /* Hold SQ ring wrap around. */
684 if ((volatile uint8_t *)ctrl_next >= sq->eob) {
685 ctrl_next = (volatile struct mlx4_wqe_ctrl_seg *)
686 ((volatile uint8_t *)ctrl_next - sq->size);
687 /* Flip HW valid ownership. */
688 sq->owner_opcode ^= 0x1 << MLX4_SQ_OWNER_BIT;
691 * For raw Ethernet, the SOLICIT flag is used to indicate
692 * that no ICRC should be calculated.
694 if (--txq->elts_comp_cd == 0) {
695 txq->elts_comp_cd = txq->elts_comp_cd_init;
696 srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT |
697 MLX4_WQE_CTRL_CQ_UPDATE);
699 srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT);
701 /* Enable HW checksum offload if requested */
704 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))) {
705 const uint64_t is_tunneled = (buf->ol_flags &
707 PKT_TX_TUNNEL_VXLAN));
709 if (is_tunneled && txq->csum_l2tun) {
710 owner_opcode |= MLX4_WQE_CTRL_IIP_HDR_CSUM |
711 MLX4_WQE_CTRL_IL4_HDR_CSUM;
712 if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
714 RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM);
717 RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM |
718 MLX4_WQE_CTRL_TCP_UDP_CSUM);
723 * Copy destination MAC address to the WQE, this allows
724 * loopback in eSwitch, so that VFs and PF can
725 * communicate with each other.
727 srcrb.flags16[0] = *(rte_pktmbuf_mtod(buf, uint16_t *));
728 ctrl->imm = *(rte_pktmbuf_mtod_offset(buf, uint32_t *,
733 ctrl->srcrb_flags = srcrb.flags;
735 * Make sure descriptor is fully written before
736 * setting ownership bit (because HW can start
737 * executing as soon as we do).
740 ctrl->owner_opcode = rte_cpu_to_be_32(owner_opcode);
742 bytes_sent += buf->pkt_len;
743 elts_head = elts_head_next;
744 elt_next->wqe = ctrl_next;
748 /* Take a shortcut if nothing must be sent. */
749 if (unlikely(i == 0))
751 /* Increment send statistics counters. */
752 txq->stats.opackets += i;
753 txq->stats.obytes += bytes_sent;
754 /* Make sure that descriptors are written before doorbell record. */
756 /* Ring QP doorbell. */
757 rte_write32(txq->msq.doorbell_qpn, txq->msq.db);
758 txq->elts_head = elts_head;
764 * Translate Rx completion flags to packet type.
770 * Packet type for struct rte_mbuf.
772 static inline uint32_t
773 rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe,
774 uint32_t l2tun_offload)
777 uint32_t pinfo = rte_be_to_cpu_32(cqe->vlan_my_qpn);
778 uint32_t status = rte_be_to_cpu_32(cqe->status);
781 * The index to the array should have:
782 * bit[7] - MLX4_CQE_L2_TUNNEL
783 * bit[6] - MLX4_CQE_L2_TUNNEL_IPV4
785 if (l2tun_offload && (pinfo & MLX4_CQE_L2_TUNNEL))
786 idx |= ((pinfo & MLX4_CQE_L2_TUNNEL) >> 20) |
787 ((pinfo & MLX4_CQE_L2_TUNNEL_IPV4) >> 19);
789 * The index to the array should have:
790 * bit[5] - MLX4_CQE_STATUS_UDP
791 * bit[4] - MLX4_CQE_STATUS_TCP
792 * bit[3] - MLX4_CQE_STATUS_IPV4OPT
793 * bit[2] - MLX4_CQE_STATUS_IPV6
794 * bit[1] - MLX4_CQE_STATUS_IPV4F
795 * bit[0] - MLX4_CQE_STATUS_IPV4
796 * giving a total of up to 256 entries.
798 idx |= ((status & MLX4_CQE_STATUS_PTYPE_MASK) >> 22);
799 return mlx4_ptype_table[idx];
803 * Translate Rx completion flags to offload flags.
806 * Rx completion flags returned by mlx4_cqe_flags().
808 * Whether Rx checksums are enabled.
810 * Whether Rx L2 tunnel checksums are enabled.
813 * Offload flags (ol_flags) in mbuf format.
815 static inline uint32_t
816 rxq_cq_to_ol_flags(uint32_t flags, int csum, int csum_l2tun)
818 uint32_t ol_flags = 0;
822 mlx4_transpose(flags,
823 MLX4_CQE_STATUS_IP_HDR_CSUM_OK,
824 PKT_RX_IP_CKSUM_GOOD) |
825 mlx4_transpose(flags,
826 MLX4_CQE_STATUS_TCP_UDP_CSUM_OK,
827 PKT_RX_L4_CKSUM_GOOD);
828 if ((flags & MLX4_CQE_L2_TUNNEL) && csum_l2tun)
830 mlx4_transpose(flags,
831 MLX4_CQE_L2_TUNNEL_IPOK,
832 PKT_RX_IP_CKSUM_GOOD) |
833 mlx4_transpose(flags,
834 MLX4_CQE_L2_TUNNEL_L4_CSUM,
835 PKT_RX_L4_CKSUM_GOOD);
840 * Extract checksum information from CQE flags.
843 * Pointer to CQE structure.
845 * Whether Rx checksums are enabled.
847 * Whether Rx L2 tunnel checksums are enabled.
850 * CQE checksum information.
852 static inline uint32_t
853 mlx4_cqe_flags(volatile struct mlx4_cqe *cqe, int csum, int csum_l2tun)
858 * The relevant bits are in different locations on their
859 * CQE fields therefore we can join them in one 32bit
863 flags = (rte_be_to_cpu_32(cqe->status) &
864 MLX4_CQE_STATUS_IPV4_CSUM_OK);
866 flags |= (rte_be_to_cpu_32(cqe->vlan_my_qpn) &
867 (MLX4_CQE_L2_TUNNEL |
868 MLX4_CQE_L2_TUNNEL_IPOK |
869 MLX4_CQE_L2_TUNNEL_L4_CSUM |
870 MLX4_CQE_L2_TUNNEL_IPV4));
875 * Poll one CQE from CQ.
878 * Pointer to the receive queue structure.
883 * Number of bytes of the CQE, 0 in case there is no completion.
886 mlx4_cq_poll_one(struct rxq *rxq, volatile struct mlx4_cqe **out)
889 volatile struct mlx4_cqe *cqe = NULL;
890 struct mlx4_cq *cq = &rxq->mcq;
892 cqe = (volatile struct mlx4_cqe *)mlx4_get_cqe(cq, cq->cons_index);
893 if (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
894 !!(cq->cons_index & cq->cqe_cnt))
897 * Make sure we read CQ entry contents after we've checked the
901 assert(!(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK));
902 assert((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) !=
903 MLX4_CQE_OPCODE_ERROR);
904 ret = rte_be_to_cpu_32(cqe->byte_cnt);
912 * DPDK callback for Rx with scattered packets support.
915 * Generic pointer to Rx queue structure.
917 * Array to store received packets.
919 * Maximum number of packets in array.
922 * Number of packets successfully received (<= pkts_n).
925 mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
927 struct rxq *rxq = dpdk_rxq;
928 const uint32_t wr_cnt = (1 << rxq->elts_n) - 1;
929 const uint16_t sges_n = rxq->sges_n;
930 struct rte_mbuf *pkt = NULL;
931 struct rte_mbuf *seg = NULL;
933 uint32_t rq_ci = rxq->rq_ci << sges_n;
937 volatile struct mlx4_cqe *cqe;
938 uint32_t idx = rq_ci & wr_cnt;
939 struct rte_mbuf *rep = (*rxq->elts)[idx];
940 volatile struct mlx4_wqe_data_seg *scat = &(*rxq->wqes)[idx];
942 /* Update the 'next' pointer of the previous segment. */
948 rep = rte_mbuf_raw_alloc(rxq->mp);
949 if (unlikely(rep == NULL)) {
950 ++rxq->stats.rx_nombuf;
953 * No buffers before we even started,
959 assert(pkt != (*rxq->elts)[idx]);
963 rte_mbuf_raw_free(pkt);
969 /* Looking for the new packet. */
970 len = mlx4_cq_poll_one(rxq, &cqe);
972 rte_mbuf_raw_free(rep);
975 if (unlikely(len < 0)) {
976 /* Rx error, packet is likely too large. */
977 rte_mbuf_raw_free(rep);
978 ++rxq->stats.idropped;
982 /* Update packet information. */
984 rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload);
985 pkt->ol_flags = PKT_RX_RSS_HASH;
986 pkt->hash.rss = cqe->immed_rss_invalid;
988 if (rxq->csum | rxq->csum_l2tun) {
995 rxq_cq_to_ol_flags(flags,
1001 rep->port = rxq->port_id;
1002 rep->data_len = seg->data_len;
1003 rep->data_off = seg->data_off;
1004 (*rxq->elts)[idx] = rep;
1006 * Fill NIC descriptor with the new buffer. The lkey and size
1007 * of the buffers are already known, only the buffer address
1010 scat->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
1011 if (len > seg->data_len) {
1012 len -= seg->data_len;
1017 /* The last segment. */
1018 seg->data_len = len;
1019 /* Increment bytes counter. */
1020 rxq->stats.ibytes += pkt->pkt_len;
1021 /* Return packet. */
1027 /* Align consumer index to the next stride. */
1032 if (unlikely(i == 0 && (rq_ci >> sges_n) == rxq->rq_ci))
1034 /* Update the consumer index. */
1035 rxq->rq_ci = rq_ci >> sges_n;
1037 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1038 *rxq->mcq.set_ci_db =
1039 rte_cpu_to_be_32(rxq->mcq.cons_index & MLX4_CQ_DB_CI_MASK);
1040 /* Increment packets counter. */
1041 rxq->stats.ipackets += i;
1046 * Dummy DPDK callback for Tx.
1048 * This function is used to temporarily replace the real callback during
1049 * unsafe control operations on the queue, or in case of error.
1052 * Generic pointer to Tx queue structure.
1054 * Packets to transmit.
1056 * Number of packets in array.
1059 * Number of packets successfully transmitted (<= pkts_n).
1062 mlx4_tx_burst_removed(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
1071 * Dummy DPDK callback for Rx.
1073 * This function is used to temporarily replace the real callback during
1074 * unsafe control operations on the queue, or in case of error.
1077 * Generic pointer to Rx queue structure.
1079 * Array to store received packets.
1081 * Maximum number of packets in array.
1084 * Number of packets successfully received (<= pkts_n).
1087 mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)