1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017 6WIND S.A.
3 * Copyright 2017 Mellanox Technologies, Ltd
8 * Data plane functions for mlx4 driver.
15 /* Verbs headers do not support -pedantic. */
17 #pragma GCC diagnostic ignored "-Wpedantic"
19 #include <infiniband/verbs.h>
21 #pragma GCC diagnostic error "-Wpedantic"
24 #include <rte_branch_prediction.h>
25 #include <rte_common.h>
28 #include <rte_mempool.h>
29 #include <rte_prefetch.h>
33 #include "mlx4_rxtx.h"
34 #include "mlx4_utils.h"
37 * Pointer-value pair structure used in tx_post_send for saving the first
38 * DWORD (32 byte) of a TXBB.
41 volatile struct mlx4_wqe_data_seg *dseg;
45 /** A table to translate Rx completion flags to packet type. */
46 uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = {
48 * The index to the array should have:
49 * bit[7] - MLX4_CQE_L2_TUNNEL
50 * bit[6] - MLX4_CQE_L2_TUNNEL_IPV4
51 * bit[5] - MLX4_CQE_STATUS_UDP
52 * bit[4] - MLX4_CQE_STATUS_TCP
53 * bit[3] - MLX4_CQE_STATUS_IPV4OPT
54 * bit[2] - MLX4_CQE_STATUS_IPV6
55 * bit[1] - MLX4_CQE_STATUS_IPV4F
56 * bit[0] - MLX4_CQE_STATUS_IPV4
57 * giving a total of up to 256 entries.
59 [0x00] = RTE_PTYPE_L2_ETHER,
60 [0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
62 [0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
64 [0x03] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
66 [0x04] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
67 [0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT,
68 [0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
70 [0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
72 [0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
74 [0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
76 [0x18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
78 [0x19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
80 [0x1a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
82 [0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
84 [0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
86 [0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
88 [0x28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
90 [0x29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
92 [0x2a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
94 /* Tunneled - L3 IPV6 */
95 [0x80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
96 [0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
97 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
98 [0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
99 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
100 RTE_PTYPE_INNER_L4_FRAG,
101 [0x83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
102 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
103 RTE_PTYPE_INNER_L4_FRAG,
104 [0x84] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
105 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
106 [0x88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
107 RTE_PTYPE_INNER_L3_IPV4_EXT,
108 [0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
109 RTE_PTYPE_INNER_L3_IPV4_EXT,
110 [0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
111 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG,
112 /* Tunneled - L3 IPV6, TCP */
113 [0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
114 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
115 RTE_PTYPE_INNER_L4_TCP,
116 [0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
117 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
118 RTE_PTYPE_INNER_L4_FRAG |
119 RTE_PTYPE_INNER_L4_TCP,
120 [0x93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
121 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
122 RTE_PTYPE_INNER_L4_FRAG |
123 RTE_PTYPE_INNER_L4_TCP,
124 [0x94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
125 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
126 RTE_PTYPE_INNER_L4_TCP,
127 [0x98] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
128 RTE_PTYPE_INNER_L3_IPV4_EXT |
129 RTE_PTYPE_INNER_L4_TCP,
130 [0x99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
131 RTE_PTYPE_INNER_L3_IPV4_EXT |
132 RTE_PTYPE_INNER_L4_TCP,
133 [0x9a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
134 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
135 RTE_PTYPE_INNER_L4_TCP,
136 /* Tunneled - L3 IPV6, UDP */
137 [0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
138 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
139 RTE_PTYPE_INNER_L4_UDP,
140 [0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
141 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
142 RTE_PTYPE_INNER_L4_FRAG |
143 RTE_PTYPE_INNER_L4_UDP,
144 [0xa3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
145 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
146 RTE_PTYPE_INNER_L4_FRAG |
147 RTE_PTYPE_INNER_L4_UDP,
148 [0xa4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
149 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
150 RTE_PTYPE_INNER_L4_UDP,
151 [0xa8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
152 RTE_PTYPE_INNER_L3_IPV4_EXT |
153 RTE_PTYPE_INNER_L4_UDP,
154 [0xa9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
155 RTE_PTYPE_INNER_L3_IPV4_EXT |
156 RTE_PTYPE_INNER_L4_UDP,
157 [0xaa] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
158 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
159 RTE_PTYPE_INNER_L4_UDP,
160 /* Tunneled - L3 IPV4 */
161 [0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
162 [0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
163 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
164 [0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
165 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
166 RTE_PTYPE_INNER_L4_FRAG,
167 [0xc3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
168 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
169 RTE_PTYPE_INNER_L4_FRAG,
170 [0xc4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
171 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
172 [0xc8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
173 RTE_PTYPE_INNER_L3_IPV4_EXT,
174 [0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
175 RTE_PTYPE_INNER_L3_IPV4_EXT,
176 [0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
177 RTE_PTYPE_INNER_L3_IPV4_EXT |
178 RTE_PTYPE_INNER_L4_FRAG,
179 /* Tunneled - L3 IPV4, TCP */
180 [0xd0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
181 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
182 RTE_PTYPE_INNER_L4_TCP,
183 [0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
184 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
185 RTE_PTYPE_INNER_L4_TCP,
186 [0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
187 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
188 RTE_PTYPE_INNER_L4_FRAG |
189 RTE_PTYPE_INNER_L4_TCP,
190 [0xd3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
191 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
192 RTE_PTYPE_INNER_L4_FRAG |
193 RTE_PTYPE_INNER_L4_TCP,
194 [0xd4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
195 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
196 RTE_PTYPE_INNER_L4_TCP,
197 [0xd8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
198 RTE_PTYPE_INNER_L3_IPV4_EXT |
199 RTE_PTYPE_INNER_L4_TCP,
200 [0xd9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
201 RTE_PTYPE_INNER_L3_IPV4_EXT |
202 RTE_PTYPE_INNER_L4_TCP,
203 [0xda] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
204 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
205 RTE_PTYPE_INNER_L4_TCP,
206 /* Tunneled - L3 IPV4, UDP */
207 [0xe0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
208 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
209 RTE_PTYPE_INNER_L4_UDP,
210 [0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
211 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
212 RTE_PTYPE_INNER_L4_UDP,
213 [0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
214 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
215 RTE_PTYPE_INNER_L4_FRAG |
216 RTE_PTYPE_INNER_L4_UDP,
217 [0xe3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
218 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
219 RTE_PTYPE_INNER_L4_FRAG |
220 RTE_PTYPE_INNER_L4_UDP,
221 [0xe4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
222 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
223 RTE_PTYPE_INNER_L4_UDP,
224 [0xe8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
225 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
226 [0xe9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
227 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
228 [0xea] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
229 RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
230 RTE_PTYPE_INNER_L4_UDP,
234 * Stamp TXBB burst so it won't be reused by the HW.
236 * Routine is used when freeing WQE used by the chip or when failing
237 * building an WQ entry has failed leaving partial information on the queue.
240 * Pointer to the SQ structure.
242 * Pointer to the first TXBB to stamp.
244 * Pointer to the followed end TXBB to stamp.
247 * Stamping burst size in byte units.
250 mlx4_txq_stamp_freed_wqe(struct mlx4_sq *sq, volatile uint32_t *start,
251 volatile uint32_t *end)
253 uint32_t stamp = sq->stamp;
254 int32_t size = (intptr_t)end - (intptr_t)start;
256 assert(start != end);
257 /* Hold SQ ring wrap around. */
259 size = (int32_t)sq->size + size;
262 start += MLX4_SQ_STAMP_DWORDS;
263 } while (start != (volatile uint32_t *)sq->eob);
264 start = (volatile uint32_t *)sq->buf;
265 /* Flip invalid stamping ownership. */
266 stamp ^= RTE_BE32(0x1 << MLX4_SQ_OWNER_BIT);
273 start += MLX4_SQ_STAMP_DWORDS;
274 } while (start != end);
275 return (uint32_t)size;
279 * Manage Tx completions.
281 * When sending a burst, mlx4_tx_burst() posts several WRs.
282 * To improve performance, a completion event is only required once every
283 * MLX4_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information
284 * for other WRs, but this information would not be used anyway.
287 * Pointer to Tx queue structure.
289 * Tx elements number mask.
291 * Pointer to the SQ structure.
294 mlx4_txq_complete(struct txq *txq, const unsigned int elts_m,
297 unsigned int elts_tail = txq->elts_tail;
298 struct mlx4_cq *cq = &txq->mcq;
299 volatile struct mlx4_cqe *cqe;
301 uint32_t cons_index = cq->cons_index;
302 volatile uint32_t *first_txbb;
305 * Traverse over all CQ entries reported and handle each WQ entry
309 cqe = (volatile struct mlx4_cqe *)mlx4_get_cqe(cq, cons_index);
310 if (unlikely(!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
311 !!(cons_index & cq->cqe_cnt)))
315 * Make sure we read the CQE after we read the ownership bit.
318 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
319 MLX4_CQE_OPCODE_ERROR)) {
320 volatile struct mlx4_err_cqe *cqe_err =
321 (volatile struct mlx4_err_cqe *)cqe;
322 ERROR("%p CQE error - vendor syndrome: 0x%x"
324 (void *)txq, cqe_err->vendor_err,
331 completed = (cons_index - cq->cons_index) * txq->elts_comp_cd_init;
332 if (unlikely(!completed))
334 /* First stamping address is the end of the last one. */
335 first_txbb = (&(*txq->elts)[elts_tail & elts_m])->eocb;
336 elts_tail += completed;
337 /* The new tail element holds the end address. */
338 sq->remain_size += mlx4_txq_stamp_freed_wqe(sq, first_txbb,
339 (&(*txq->elts)[elts_tail & elts_m])->eocb);
340 /* Update CQ consumer index. */
341 cq->cons_index = cons_index;
342 *cq->set_ci_db = rte_cpu_to_be_32(cons_index & MLX4_CQ_DB_CI_MASK);
343 txq->elts_tail = elts_tail;
347 * Get memory pool (MP) from mbuf. If mbuf is indirect, the pool from which
348 * the cloned mbuf is allocated is returned instead.
354 * Memory pool where data is located for given mbuf.
356 static struct rte_mempool *
357 mlx4_txq_mb2mp(struct rte_mbuf *buf)
359 if (unlikely(RTE_MBUF_INDIRECT(buf)))
360 return rte_mbuf_from_indirect(buf)->pool;
365 * Write Tx data segment to the SQ.
368 * Pointer to data segment in SQ.
370 * Memory region lkey.
374 * Big endian bytes count of the data to send.
377 mlx4_fill_tx_data_seg(volatile struct mlx4_wqe_data_seg *dseg,
378 uint32_t lkey, uintptr_t addr, rte_be32_t byte_count)
380 dseg->addr = rte_cpu_to_be_64(addr);
381 dseg->lkey = rte_cpu_to_be_32(lkey);
382 #if RTE_CACHE_LINE_SIZE < 64
384 * Need a barrier here before writing the byte_count
385 * fields to make sure that all the data is visible
386 * before the byte_count field is set.
387 * Otherwise, if the segment begins a new cacheline,
388 * the HCA prefetcher could grab the 64-byte chunk and
389 * get a valid (!= 0xffffffff) byte count but stale
390 * data, and end up sending the wrong data.
393 #endif /* RTE_CACHE_LINE_SIZE */
394 dseg->byte_count = byte_count;
398 * Write data segments of multi-segment packet.
401 * Pointer to the first packet mbuf.
403 * Pointer to Tx queue structure.
405 * Pointer to the WQE control segment.
408 * Pointer to the next WQE control segment on success, NULL otherwise.
410 static volatile struct mlx4_wqe_ctrl_seg *
411 mlx4_tx_burst_segs(struct rte_mbuf *buf, struct txq *txq,
412 volatile struct mlx4_wqe_ctrl_seg *ctrl)
414 struct pv *pv = (struct pv *)txq->bounce_buf;
415 struct mlx4_sq *sq = &txq->msq;
416 struct rte_mbuf *sbuf = buf;
419 int nb_segs = buf->nb_segs;
421 volatile struct mlx4_wqe_data_seg *dseg =
422 (volatile struct mlx4_wqe_data_seg *)(ctrl + 1);
424 ctrl->fence_size = 1 + nb_segs;
425 wqe_size = RTE_ALIGN((uint32_t)(ctrl->fence_size << MLX4_SEG_SHIFT),
427 /* Validate WQE size and WQE space in the send queue. */
428 if (sq->remain_size < wqe_size ||
429 wqe_size > MLX4_MAX_WQE_SIZE)
432 * Fill the data segments with buffer information.
433 * First WQE TXBB head segment is always control segment,
434 * so jump to tail TXBB data segments code for the first
435 * WQE data segments filling.
439 /* Memory region key (big endian) for this memory pool. */
440 lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
441 if (unlikely(lkey == (uint32_t)-1)) {
442 DEBUG("%p: unable to get MP <-> MR association",
446 /* Handle WQE wraparound. */
448 (volatile struct mlx4_wqe_data_seg *)sq->eob)
449 dseg = (volatile struct mlx4_wqe_data_seg *)
451 dseg->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(sbuf, uintptr_t));
452 dseg->lkey = rte_cpu_to_be_32(lkey);
454 * This data segment starts at the beginning of a new
455 * TXBB, so we need to postpone its byte_count writing
458 pv[pv_counter].dseg = dseg;
460 * Zero length segment is treated as inline segment
463 pv[pv_counter++].val = rte_cpu_to_be_32(sbuf->data_len ?
464 sbuf->data_len : 0x80000000);
469 /* Jump to default if there are more than two segments remaining. */
472 lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
473 if (unlikely(lkey == (uint32_t)-1)) {
474 DEBUG("%p: unable to get MP <-> MR association",
478 mlx4_fill_tx_data_seg(dseg, lkey,
479 rte_pktmbuf_mtod(sbuf, uintptr_t),
480 rte_cpu_to_be_32(sbuf->data_len ?
488 lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
489 if (unlikely(lkey == (uint32_t)-1)) {
490 DEBUG("%p: unable to get MP <-> MR association",
494 mlx4_fill_tx_data_seg(dseg, lkey,
495 rte_pktmbuf_mtod(sbuf, uintptr_t),
496 rte_cpu_to_be_32(sbuf->data_len ?
504 lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
505 if (unlikely(lkey == (uint32_t)-1)) {
506 DEBUG("%p: unable to get MP <-> MR association",
510 mlx4_fill_tx_data_seg(dseg, lkey,
511 rte_pktmbuf_mtod(sbuf, uintptr_t),
512 rte_cpu_to_be_32(sbuf->data_len ?
525 /* Write the first DWORD of each TXBB save earlier. */
527 /* Need a barrier here before writing the byte_count. */
529 for (--pv_counter; pv_counter >= 0; pv_counter--)
530 pv[pv_counter].dseg->byte_count = pv[pv_counter].val;
532 sq->remain_size -= wqe_size;
533 /* Align next WQE address to the next TXBB. */
534 return (volatile struct mlx4_wqe_ctrl_seg *)
535 ((volatile uint8_t *)ctrl + wqe_size);
539 * DPDK callback for Tx.
542 * Generic pointer to Tx queue structure.
544 * Packets to transmit.
546 * Number of packets in array.
549 * Number of packets successfully transmitted (<= pkts_n).
552 mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
554 struct txq *txq = (struct txq *)dpdk_txq;
555 unsigned int elts_head = txq->elts_head;
556 const unsigned int elts_n = txq->elts_n;
557 const unsigned int elts_m = elts_n - 1;
558 unsigned int bytes_sent = 0;
560 unsigned int max = elts_head - txq->elts_tail;
561 struct mlx4_sq *sq = &txq->msq;
562 volatile struct mlx4_wqe_ctrl_seg *ctrl;
565 assert(txq->elts_comp_cd != 0);
566 if (likely(max >= txq->elts_comp_cd_init))
567 mlx4_txq_complete(txq, elts_m, sq);
570 assert(max <= elts_n);
571 /* Always leave one free entry in the ring. */
575 elt = &(*txq->elts)[elts_head & elts_m];
576 /* First Tx burst element saves the next WQE control segment. */
578 for (i = 0; (i != max); ++i) {
579 struct rte_mbuf *buf = pkts[i];
580 struct txq_elt *elt_next = &(*txq->elts)[++elts_head & elts_m];
581 uint32_t owner_opcode = sq->owner_opcode;
582 volatile struct mlx4_wqe_data_seg *dseg =
583 (volatile struct mlx4_wqe_data_seg *)(ctrl + 1);
584 volatile struct mlx4_wqe_ctrl_seg *ctrl_next;
591 /* Clean up old buffer. */
592 if (likely(elt->buf != NULL)) {
593 struct rte_mbuf *tmp = elt->buf;
597 memset(&elt->buf, 0x66, sizeof(struct rte_mbuf *));
599 /* Faster than rte_pktmbuf_free(). */
601 struct rte_mbuf *next = tmp->next;
603 rte_pktmbuf_free_seg(tmp);
605 } while (tmp != NULL);
607 RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
608 if (buf->nb_segs == 1) {
609 /* Validate WQE space in the send queue. */
610 if (sq->remain_size < MLX4_TXBB_SIZE) {
614 lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(buf));
615 if (unlikely(lkey == (uint32_t)-1)) {
616 /* MR does not exist. */
617 DEBUG("%p: unable to get MP <-> MR association",
622 mlx4_fill_tx_data_seg(dseg++, lkey,
623 rte_pktmbuf_mtod(buf, uintptr_t),
624 rte_cpu_to_be_32(buf->data_len));
625 /* Set WQE size in 16-byte units. */
626 ctrl->fence_size = 0x2;
627 sq->remain_size -= MLX4_TXBB_SIZE;
628 /* Align next WQE address to the next TXBB. */
629 ctrl_next = ctrl + 0x4;
631 ctrl_next = mlx4_tx_burst_segs(buf, txq, ctrl);
637 /* Hold SQ ring wrap around. */
638 if ((volatile uint8_t *)ctrl_next >= sq->eob) {
639 ctrl_next = (volatile struct mlx4_wqe_ctrl_seg *)
640 ((volatile uint8_t *)ctrl_next - sq->size);
641 /* Flip HW valid ownership. */
642 sq->owner_opcode ^= 0x1 << MLX4_SQ_OWNER_BIT;
645 * For raw Ethernet, the SOLICIT flag is used to indicate
646 * that no ICRC should be calculated.
648 if (--txq->elts_comp_cd == 0) {
649 /* Save the completion burst end address. */
650 elt_next->eocb = (volatile uint32_t *)ctrl_next;
651 txq->elts_comp_cd = txq->elts_comp_cd_init;
652 srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT |
653 MLX4_WQE_CTRL_CQ_UPDATE);
655 srcrb.flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT);
657 /* Enable HW checksum offload if requested */
660 (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))) {
661 const uint64_t is_tunneled = (buf->ol_flags &
663 PKT_TX_TUNNEL_VXLAN));
665 if (is_tunneled && txq->csum_l2tun) {
666 owner_opcode |= MLX4_WQE_CTRL_IIP_HDR_CSUM |
667 MLX4_WQE_CTRL_IL4_HDR_CSUM;
668 if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
670 RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM);
673 RTE_BE32(MLX4_WQE_CTRL_IP_HDR_CSUM |
674 MLX4_WQE_CTRL_TCP_UDP_CSUM);
679 * Copy destination MAC address to the WQE, this allows
680 * loopback in eSwitch, so that VFs and PF can
681 * communicate with each other.
683 srcrb.flags16[0] = *(rte_pktmbuf_mtod(buf, uint16_t *));
684 ctrl->imm = *(rte_pktmbuf_mtod_offset(buf, uint32_t *,
689 ctrl->srcrb_flags = srcrb.flags;
691 * Make sure descriptor is fully written before
692 * setting ownership bit (because HW can start
693 * executing as soon as we do).
696 ctrl->owner_opcode = rte_cpu_to_be_32(owner_opcode);
698 bytes_sent += buf->pkt_len;
702 /* Take a shortcut if nothing must be sent. */
703 if (unlikely(i == 0))
705 /* Save WQE address of the next Tx burst element. */
707 /* Increment send statistics counters. */
708 txq->stats.opackets += i;
709 txq->stats.obytes += bytes_sent;
710 /* Make sure that descriptors are written before doorbell record. */
712 /* Ring QP doorbell. */
713 rte_write32(txq->msq.doorbell_qpn, txq->msq.db);
719 * Translate Rx completion flags to packet type.
725 * Packet type for struct rte_mbuf.
727 static inline uint32_t
728 rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe,
729 uint32_t l2tun_offload)
732 uint32_t pinfo = rte_be_to_cpu_32(cqe->vlan_my_qpn);
733 uint32_t status = rte_be_to_cpu_32(cqe->status);
736 * The index to the array should have:
737 * bit[7] - MLX4_CQE_L2_TUNNEL
738 * bit[6] - MLX4_CQE_L2_TUNNEL_IPV4
740 if (l2tun_offload && (pinfo & MLX4_CQE_L2_TUNNEL))
741 idx |= ((pinfo & MLX4_CQE_L2_TUNNEL) >> 20) |
742 ((pinfo & MLX4_CQE_L2_TUNNEL_IPV4) >> 19);
744 * The index to the array should have:
745 * bit[5] - MLX4_CQE_STATUS_UDP
746 * bit[4] - MLX4_CQE_STATUS_TCP
747 * bit[3] - MLX4_CQE_STATUS_IPV4OPT
748 * bit[2] - MLX4_CQE_STATUS_IPV6
749 * bit[1] - MLX4_CQE_STATUS_IPV4F
750 * bit[0] - MLX4_CQE_STATUS_IPV4
751 * giving a total of up to 256 entries.
753 idx |= ((status & MLX4_CQE_STATUS_PTYPE_MASK) >> 22);
754 return mlx4_ptype_table[idx];
758 * Translate Rx completion flags to offload flags.
761 * Rx completion flags returned by mlx4_cqe_flags().
763 * Whether Rx checksums are enabled.
765 * Whether Rx L2 tunnel checksums are enabled.
768 * Offload flags (ol_flags) in mbuf format.
770 static inline uint32_t
771 rxq_cq_to_ol_flags(uint32_t flags, int csum, int csum_l2tun)
773 uint32_t ol_flags = 0;
777 mlx4_transpose(flags,
778 MLX4_CQE_STATUS_IP_HDR_CSUM_OK,
779 PKT_RX_IP_CKSUM_GOOD) |
780 mlx4_transpose(flags,
781 MLX4_CQE_STATUS_TCP_UDP_CSUM_OK,
782 PKT_RX_L4_CKSUM_GOOD);
783 if ((flags & MLX4_CQE_L2_TUNNEL) && csum_l2tun)
785 mlx4_transpose(flags,
786 MLX4_CQE_L2_TUNNEL_IPOK,
787 PKT_RX_IP_CKSUM_GOOD) |
788 mlx4_transpose(flags,
789 MLX4_CQE_L2_TUNNEL_L4_CSUM,
790 PKT_RX_L4_CKSUM_GOOD);
795 * Extract checksum information from CQE flags.
798 * Pointer to CQE structure.
800 * Whether Rx checksums are enabled.
802 * Whether Rx L2 tunnel checksums are enabled.
805 * CQE checksum information.
807 static inline uint32_t
808 mlx4_cqe_flags(volatile struct mlx4_cqe *cqe, int csum, int csum_l2tun)
813 * The relevant bits are in different locations on their
814 * CQE fields therefore we can join them in one 32bit
818 flags = (rte_be_to_cpu_32(cqe->status) &
819 MLX4_CQE_STATUS_IPV4_CSUM_OK);
821 flags |= (rte_be_to_cpu_32(cqe->vlan_my_qpn) &
822 (MLX4_CQE_L2_TUNNEL |
823 MLX4_CQE_L2_TUNNEL_IPOK |
824 MLX4_CQE_L2_TUNNEL_L4_CSUM |
825 MLX4_CQE_L2_TUNNEL_IPV4));
830 * Poll one CQE from CQ.
833 * Pointer to the receive queue structure.
838 * Number of bytes of the CQE, 0 in case there is no completion.
841 mlx4_cq_poll_one(struct rxq *rxq, volatile struct mlx4_cqe **out)
844 volatile struct mlx4_cqe *cqe = NULL;
845 struct mlx4_cq *cq = &rxq->mcq;
847 cqe = (volatile struct mlx4_cqe *)mlx4_get_cqe(cq, cq->cons_index);
848 if (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
849 !!(cq->cons_index & cq->cqe_cnt))
852 * Make sure we read CQ entry contents after we've checked the
856 assert(!(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK));
857 assert((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) !=
858 MLX4_CQE_OPCODE_ERROR);
859 ret = rte_be_to_cpu_32(cqe->byte_cnt);
867 * DPDK callback for Rx with scattered packets support.
870 * Generic pointer to Rx queue structure.
872 * Array to store received packets.
874 * Maximum number of packets in array.
877 * Number of packets successfully received (<= pkts_n).
880 mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
882 struct rxq *rxq = dpdk_rxq;
883 const uint32_t wr_cnt = (1 << rxq->elts_n) - 1;
884 const uint16_t sges_n = rxq->sges_n;
885 struct rte_mbuf *pkt = NULL;
886 struct rte_mbuf *seg = NULL;
888 uint32_t rq_ci = rxq->rq_ci << sges_n;
892 volatile struct mlx4_cqe *cqe;
893 uint32_t idx = rq_ci & wr_cnt;
894 struct rte_mbuf *rep = (*rxq->elts)[idx];
895 volatile struct mlx4_wqe_data_seg *scat = &(*rxq->wqes)[idx];
897 /* Update the 'next' pointer of the previous segment. */
903 rep = rte_mbuf_raw_alloc(rxq->mp);
904 if (unlikely(rep == NULL)) {
905 ++rxq->stats.rx_nombuf;
908 * No buffers before we even started,
914 assert(pkt != (*rxq->elts)[idx]);
918 rte_mbuf_raw_free(pkt);
924 /* Looking for the new packet. */
925 len = mlx4_cq_poll_one(rxq, &cqe);
927 rte_mbuf_raw_free(rep);
930 if (unlikely(len < 0)) {
931 /* Rx error, packet is likely too large. */
932 rte_mbuf_raw_free(rep);
933 ++rxq->stats.idropped;
937 assert(len >= (rxq->crc_present << 2));
938 /* Update packet information. */
940 rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload);
941 pkt->ol_flags = PKT_RX_RSS_HASH;
942 pkt->hash.rss = cqe->immed_rss_invalid;
943 if (rxq->crc_present)
944 len -= ETHER_CRC_LEN;
946 if (rxq->csum | rxq->csum_l2tun) {
953 rxq_cq_to_ol_flags(flags,
959 rep->port = rxq->port_id;
960 rep->data_len = seg->data_len;
961 rep->data_off = seg->data_off;
962 (*rxq->elts)[idx] = rep;
964 * Fill NIC descriptor with the new buffer. The lkey and size
965 * of the buffers are already known, only the buffer address
968 scat->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
969 if (len > seg->data_len) {
970 len -= seg->data_len;
975 /* The last segment. */
977 /* Increment bytes counter. */
978 rxq->stats.ibytes += pkt->pkt_len;
985 /* Align consumer index to the next stride. */
990 if (unlikely(i == 0 && (rq_ci >> sges_n) == rxq->rq_ci))
992 /* Update the consumer index. */
993 rxq->rq_ci = rq_ci >> sges_n;
995 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
996 *rxq->mcq.set_ci_db =
997 rte_cpu_to_be_32(rxq->mcq.cons_index & MLX4_CQ_DB_CI_MASK);
998 /* Increment packets counter. */
999 rxq->stats.ipackets += i;
1004 * Dummy DPDK callback for Tx.
1006 * This function is used to temporarily replace the real callback during
1007 * unsafe control operations on the queue, or in case of error.
1010 * Generic pointer to Tx queue structure.
1012 * Packets to transmit.
1014 * Number of packets in array.
1017 * Number of packets successfully transmitted (<= pkts_n).
1020 mlx4_tx_burst_removed(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
1029 * Dummy DPDK callback for Rx.
1031 * This function is used to temporarily replace the real callback during
1032 * unsafe control operations on the queue, or in case of error.
1035 * Generic pointer to Rx queue structure.
1037 * Array to store received packets.
1039 * Maximum number of packets in array.
1042 * Number of packets successfully received (<= pkts_n).
1045 mlx4_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)