1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include "otx2_ethdev.h"
9 #define NIX_XMIT_FC_OR_RETURN(txq, pkts) do { \
10 /* Cached value is low, Update the fc_cache_pkts */ \
11 if (unlikely((txq)->fc_cache_pkts < (pkts))) { \
12 /* Multiply with sqe_per_sqb to express in pkts */ \
13 (txq)->fc_cache_pkts = \
14 ((txq)->nb_sqb_bufs_adj - *(txq)->fc_mem) << \
15 (txq)->sqes_per_sqb_log2; \
16 /* Check it again for the room */ \
17 if (unlikely((txq)->fc_cache_pkts < (pkts))) \
23 static __rte_always_inline uint16_t
24 nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
25 uint16_t pkts, uint64_t *cmd, const uint16_t flags)
27 struct otx2_eth_txq *txq = tx_queue; uint16_t i;
28 const rte_iova_t io_addr = txq->io_addr;
29 void *lmt_addr = txq->lmt_addr;
31 NIX_XMIT_FC_OR_RETURN(txq, pkts);
33 otx2_lmt_mov(cmd, &txq->cmd[0], otx2_nix_tx_ext_subs(flags));
35 /* Perform header writes before barrier for TSO */
36 if (flags & NIX_TX_OFFLOAD_TSO_F) {
37 for (i = 0; i < pkts; i++)
38 otx2_nix_xmit_prepare_tso(tx_pkts[i], flags);
41 /* Lets commit any changes in the packet here as no further changes
42 * to the packet will be done unless no fast free is enabled.
44 if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
47 for (i = 0; i < pkts; i++) {
48 otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
49 /* Passing no of segdw as 4: HDR + EXT + SG + SMEM */
50 otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
51 tx_pkts[i]->ol_flags, 4, flags);
52 otx2_nix_xmit_one(cmd, lmt_addr, io_addr, flags);
55 /* Reduce the cached count */
56 txq->fc_cache_pkts -= pkts;
61 static __rte_always_inline uint16_t
62 nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
63 uint16_t pkts, uint64_t *cmd, const uint16_t flags)
65 struct otx2_eth_txq *txq = tx_queue; uint64_t i;
66 const rte_iova_t io_addr = txq->io_addr;
67 void *lmt_addr = txq->lmt_addr;
70 NIX_XMIT_FC_OR_RETURN(txq, pkts);
72 otx2_lmt_mov(cmd, &txq->cmd[0], otx2_nix_tx_ext_subs(flags));
74 /* Perform header writes before barrier for TSO */
75 if (flags & NIX_TX_OFFLOAD_TSO_F) {
76 for (i = 0; i < pkts; i++)
77 otx2_nix_xmit_prepare_tso(tx_pkts[i], flags);
80 for (i = 0; i < pkts; i++) {
81 otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
82 segdw = otx2_nix_prepare_mseg(tx_pkts[i], cmd, flags);
83 /* Lets commit any changes in the packet */
85 otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
86 tx_pkts[i]->ol_flags, segdw,
88 otx2_nix_xmit_mseg_one(cmd, lmt_addr, io_addr, segdw);
91 /* Reduce the cached count */
92 txq->fc_cache_pkts -= pkts;
97 #if defined(RTE_ARCH_ARM64)
99 #define NIX_DESCS_PER_LOOP 4
100 static __rte_always_inline uint16_t
101 nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
102 uint16_t pkts, uint64_t *cmd, const uint16_t flags)
104 uint64x2_t dataoff_iova0, dataoff_iova1, dataoff_iova2, dataoff_iova3;
105 uint64x2_t len_olflags0, len_olflags1, len_olflags2, len_olflags3;
106 uint64_t *mbuf0, *mbuf1, *mbuf2, *mbuf3;
107 uint64x2_t senddesc01_w0, senddesc23_w0;
108 uint64x2_t senddesc01_w1, senddesc23_w1;
109 uint64x2_t sgdesc01_w0, sgdesc23_w0;
110 uint64x2_t sgdesc01_w1, sgdesc23_w1;
111 struct otx2_eth_txq *txq = tx_queue;
112 uint64_t *lmt_addr = txq->lmt_addr;
113 rte_iova_t io_addr = txq->io_addr;
114 uint64x2_t ltypes01, ltypes23;
115 uint64x2_t xtmp128, ytmp128;
116 uint64x2_t xmask01, xmask23;
117 uint64x2_t cmd00, cmd01;
118 uint64x2_t cmd10, cmd11;
119 uint64x2_t cmd20, cmd21;
120 uint64x2_t cmd30, cmd31;
121 uint64_t lmt_status, i;
124 NIX_XMIT_FC_OR_RETURN(txq, pkts);
126 pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1);
127 pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
129 /* Reduce the cached count */
130 txq->fc_cache_pkts -= pkts;
132 /* Lets commit any changes in the packet here as no further changes
133 * to the packet will be done unless no fast free is enabled.
135 if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
138 senddesc01_w0 = vld1q_dup_u64(&txq->cmd[0]);
139 senddesc23_w0 = senddesc01_w0;
140 senddesc01_w1 = vdupq_n_u64(0);
141 senddesc23_w1 = senddesc01_w1;
142 sgdesc01_w0 = vld1q_dup_u64(&txq->cmd[2]);
143 sgdesc23_w0 = sgdesc01_w0;
145 for (i = 0; i < pkts; i += NIX_DESCS_PER_LOOP) {
146 /* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */
147 senddesc01_w0 = vbicq_u64(senddesc01_w0,
148 vdupq_n_u64(0xFFFFFFFF));
149 sgdesc01_w0 = vbicq_u64(sgdesc01_w0,
150 vdupq_n_u64(0xFFFFFFFF));
152 senddesc23_w0 = senddesc01_w0;
153 sgdesc23_w0 = sgdesc01_w0;
155 /* Move mbufs to iova */
156 mbuf0 = (uint64_t *)tx_pkts[0];
157 mbuf1 = (uint64_t *)tx_pkts[1];
158 mbuf2 = (uint64_t *)tx_pkts[2];
159 mbuf3 = (uint64_t *)tx_pkts[3];
161 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
162 offsetof(struct rte_mbuf, buf_iova));
163 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
164 offsetof(struct rte_mbuf, buf_iova));
165 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
166 offsetof(struct rte_mbuf, buf_iova));
167 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
168 offsetof(struct rte_mbuf, buf_iova));
170 * Get mbuf's, olflags, iova, pktlen, dataoff
171 * dataoff_iovaX.D[0] = iova,
172 * dataoff_iovaX.D[1](15:0) = mbuf->dataoff
173 * len_olflagsX.D[0] = ol_flags,
174 * len_olflagsX.D[1](63:32) = mbuf->pkt_len
176 dataoff_iova0 = vld1q_u64(mbuf0);
177 len_olflags0 = vld1q_u64(mbuf0 + 2);
178 dataoff_iova1 = vld1q_u64(mbuf1);
179 len_olflags1 = vld1q_u64(mbuf1 + 2);
180 dataoff_iova2 = vld1q_u64(mbuf2);
181 len_olflags2 = vld1q_u64(mbuf2 + 2);
182 dataoff_iova3 = vld1q_u64(mbuf3);
183 len_olflags3 = vld1q_u64(mbuf3 + 2);
185 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
186 struct rte_mbuf *mbuf;
187 /* Set don't free bit if reference count > 1 */
188 xmask01 = vdupq_n_u64(0);
191 mbuf = (struct rte_mbuf *)((uintptr_t)mbuf0 -
192 offsetof(struct rte_mbuf, buf_iova));
194 if (otx2_nix_prefree_seg(mbuf))
195 vsetq_lane_u64(0x80000, xmask01, 0);
197 __mempool_check_cookies(mbuf->pool,
201 mbuf = (struct rte_mbuf *)((uintptr_t)mbuf1 -
202 offsetof(struct rte_mbuf, buf_iova));
203 if (otx2_nix_prefree_seg(mbuf))
204 vsetq_lane_u64(0x80000, xmask01, 1);
206 __mempool_check_cookies(mbuf->pool,
210 mbuf = (struct rte_mbuf *)((uintptr_t)mbuf2 -
211 offsetof(struct rte_mbuf, buf_iova));
212 if (otx2_nix_prefree_seg(mbuf))
213 vsetq_lane_u64(0x80000, xmask23, 0);
215 __mempool_check_cookies(mbuf->pool,
219 mbuf = (struct rte_mbuf *)((uintptr_t)mbuf3 -
220 offsetof(struct rte_mbuf, buf_iova));
221 if (otx2_nix_prefree_seg(mbuf))
222 vsetq_lane_u64(0x80000, xmask23, 1);
224 __mempool_check_cookies(mbuf->pool,
227 senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
228 senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
229 /* Ensuring mbuf fields which got updated in
230 * otx2_nix_prefree_seg are written before LMTST.
234 struct rte_mbuf *mbuf;
235 /* Mark mempool object as "put" since
238 mbuf = (struct rte_mbuf *)((uintptr_t)mbuf0 -
239 offsetof(struct rte_mbuf, buf_iova));
240 __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
243 mbuf = (struct rte_mbuf *)((uintptr_t)mbuf1 -
244 offsetof(struct rte_mbuf, buf_iova));
245 __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
248 mbuf = (struct rte_mbuf *)((uintptr_t)mbuf2 -
249 offsetof(struct rte_mbuf, buf_iova));
250 __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
253 mbuf = (struct rte_mbuf *)((uintptr_t)mbuf3 -
254 offsetof(struct rte_mbuf, buf_iova));
255 __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
260 /* Move mbufs to point pool */
261 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
262 offsetof(struct rte_mbuf, pool) -
263 offsetof(struct rte_mbuf, buf_iova));
264 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
265 offsetof(struct rte_mbuf, pool) -
266 offsetof(struct rte_mbuf, buf_iova));
267 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
268 offsetof(struct rte_mbuf, pool) -
269 offsetof(struct rte_mbuf, buf_iova));
270 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
271 offsetof(struct rte_mbuf, pool) -
272 offsetof(struct rte_mbuf, buf_iova));
275 (NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
276 NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {
277 /* Get tx_offload for ol2, ol3, l2, l3 lengths */
279 * E(8):OL2_LEN(7):OL3_LEN(9):E(24):L3_LEN(9):L2_LEN(7)
280 * E(8):OL2_LEN(7):OL3_LEN(9):E(24):L3_LEN(9):L2_LEN(7)
283 asm volatile ("LD1 {%[a].D}[0],[%[in]]\n\t" :
284 [a]"+w"(senddesc01_w1) :
285 [in]"r"(mbuf0 + 2) : "memory");
287 asm volatile ("LD1 {%[a].D}[1],[%[in]]\n\t" :
288 [a]"+w"(senddesc01_w1) :
289 [in]"r"(mbuf1 + 2) : "memory");
291 asm volatile ("LD1 {%[b].D}[0],[%[in]]\n\t" :
292 [b]"+w"(senddesc23_w1) :
293 [in]"r"(mbuf2 + 2) : "memory");
295 asm volatile ("LD1 {%[b].D}[1],[%[in]]\n\t" :
296 [b]"+w"(senddesc23_w1) :
297 [in]"r"(mbuf3 + 2) : "memory");
299 /* Get pool pointer alone */
300 mbuf0 = (uint64_t *)*mbuf0;
301 mbuf1 = (uint64_t *)*mbuf1;
302 mbuf2 = (uint64_t *)*mbuf2;
303 mbuf3 = (uint64_t *)*mbuf3;
305 /* Get pool pointer alone */
306 mbuf0 = (uint64_t *)*mbuf0;
307 mbuf1 = (uint64_t *)*mbuf1;
308 mbuf2 = (uint64_t *)*mbuf2;
309 mbuf3 = (uint64_t *)*mbuf3;
312 const uint8x16_t shuf_mask2 = {
313 0x4, 0x5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
314 0xc, 0xd, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
316 xtmp128 = vzip2q_u64(len_olflags0, len_olflags1);
317 ytmp128 = vzip2q_u64(len_olflags2, len_olflags3);
319 /* Clear dataoff_iovaX.D[1] bits other than dataoff(15:0) */
320 const uint64x2_t and_mask0 = {
325 dataoff_iova0 = vandq_u64(dataoff_iova0, and_mask0);
326 dataoff_iova1 = vandq_u64(dataoff_iova1, and_mask0);
327 dataoff_iova2 = vandq_u64(dataoff_iova2, and_mask0);
328 dataoff_iova3 = vandq_u64(dataoff_iova3, and_mask0);
331 * Pick only 16 bits of pktlen preset at bits 63:32
332 * and place them at bits 15:0.
334 xtmp128 = vqtbl1q_u8(xtmp128, shuf_mask2);
335 ytmp128 = vqtbl1q_u8(ytmp128, shuf_mask2);
337 /* Add pairwise to get dataoff + iova in sgdesc_w1 */
338 sgdesc01_w1 = vpaddq_u64(dataoff_iova0, dataoff_iova1);
339 sgdesc23_w1 = vpaddq_u64(dataoff_iova2, dataoff_iova3);
341 /* Orr both sgdesc_w0 and senddesc_w0 with 16 bits of
342 * pktlen at 15:0 position.
344 sgdesc01_w0 = vorrq_u64(sgdesc01_w0, xtmp128);
345 sgdesc23_w0 = vorrq_u64(sgdesc23_w0, ytmp128);
346 senddesc01_w0 = vorrq_u64(senddesc01_w0, xtmp128);
347 senddesc23_w0 = vorrq_u64(senddesc23_w0, ytmp128);
349 if ((flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) &&
350 !(flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) {
352 * Lookup table to translate ol_flags to
353 * il3/il4 types. But we still use ol3/ol4 types in
354 * senddesc_w1 as only one header processing is enabled.
356 const uint8x16_t tbl = {
357 /* [0-15] = il4type:il3type */
358 0x04, /* none (IPv6 assumed) */
359 0x14, /* PKT_TX_TCP_CKSUM (IPv6 assumed) */
360 0x24, /* PKT_TX_SCTP_CKSUM (IPv6 assumed) */
361 0x34, /* PKT_TX_UDP_CKSUM (IPv6 assumed) */
362 0x03, /* PKT_TX_IP_CKSUM */
363 0x13, /* PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM */
364 0x23, /* PKT_TX_IP_CKSUM | PKT_TX_SCTP_CKSUM */
365 0x33, /* PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM */
366 0x02, /* PKT_TX_IPV4 */
367 0x12, /* PKT_TX_IPV4 | PKT_TX_TCP_CKSUM */
368 0x22, /* PKT_TX_IPV4 | PKT_TX_SCTP_CKSUM */
369 0x32, /* PKT_TX_IPV4 | PKT_TX_UDP_CKSUM */
370 0x03, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM */
371 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
374 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
377 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
382 /* Extract olflags to translate to iltypes */
383 xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
384 ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
387 * E(47):L3_LEN(9):L2_LEN(7+z)
388 * E(47):L3_LEN(9):L2_LEN(7+z)
390 senddesc01_w1 = vshlq_n_u64(senddesc01_w1, 1);
391 senddesc23_w1 = vshlq_n_u64(senddesc23_w1, 1);
393 /* Move OLFLAGS bits 55:52 to 51:48
394 * with zeros preprended on the byte and rest
397 xtmp128 = vshrq_n_u8(xtmp128, 4);
398 ytmp128 = vshrq_n_u8(ytmp128, 4);
400 * E(48):L3_LEN(8):L2_LEN(z+7)
401 * E(48):L3_LEN(8):L2_LEN(z+7)
403 const int8x16_t tshft3 = {
404 -1, 0, 8, 8, 8, 8, 8, 8,
405 -1, 0, 8, 8, 8, 8, 8, 8,
408 senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3);
409 senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3);
412 ltypes01 = vqtbl1q_u8(tbl, xtmp128);
413 ltypes23 = vqtbl1q_u8(tbl, ytmp128);
415 /* Just use ld1q to retrieve aura
416 * when we don't need tx_offload
418 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
419 offsetof(struct rte_mempool, pool_id));
420 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
421 offsetof(struct rte_mempool, pool_id));
422 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
423 offsetof(struct rte_mempool, pool_id));
424 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
425 offsetof(struct rte_mempool, pool_id));
427 /* Pick only relevant fields i.e Bit 48:55 of iltype
428 * and place it in ol3/ol4type of senddesc_w1
430 const uint8x16_t shuf_mask0 = {
431 0xFF, 0xFF, 0xFF, 0xFF, 0x6, 0xFF, 0xFF, 0xFF,
432 0xFF, 0xFF, 0xFF, 0xFF, 0xE, 0xFF, 0xFF, 0xFF,
435 ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0);
436 ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0);
438 /* Prepare ol4ptr, ol3ptr from ol3len, ol2len.
439 * a [E(32):E(16):OL3(8):OL2(8)]
441 * a [E(32):E(16):(OL3+OL2):OL2]
442 * => E(32):E(16)::OL4PTR(8):OL3PTR(8)
444 senddesc01_w1 = vaddq_u8(senddesc01_w1,
445 vshlq_n_u16(senddesc01_w1, 8));
446 senddesc23_w1 = vaddq_u8(senddesc23_w1,
447 vshlq_n_u16(senddesc23_w1, 8));
449 /* Create first half of 4W cmd for 4 mbufs (sgdesc) */
450 cmd01 = vzip1q_u64(sgdesc01_w0, sgdesc01_w1);
451 cmd11 = vzip2q_u64(sgdesc01_w0, sgdesc01_w1);
452 cmd21 = vzip1q_u64(sgdesc23_w0, sgdesc23_w1);
453 cmd31 = vzip2q_u64(sgdesc23_w0, sgdesc23_w1);
455 xmask01 = vdupq_n_u64(0);
457 asm volatile ("LD1 {%[a].H}[0],[%[in]]\n\t" :
458 [a]"+w"(xmask01) : [in]"r"(mbuf0) : "memory");
460 asm volatile ("LD1 {%[a].H}[4],[%[in]]\n\t" :
461 [a]"+w"(xmask01) : [in]"r"(mbuf1) : "memory");
463 asm volatile ("LD1 {%[b].H}[0],[%[in]]\n\t" :
464 [b]"+w"(xmask23) : [in]"r"(mbuf2) : "memory");
466 asm volatile ("LD1 {%[b].H}[4],[%[in]]\n\t" :
467 [b]"+w"(xmask23) : [in]"r"(mbuf3) : "memory");
468 xmask01 = vshlq_n_u64(xmask01, 20);
469 xmask23 = vshlq_n_u64(xmask23, 20);
471 senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
472 senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
473 /* Move ltypes to senddesc*_w1 */
474 senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01);
475 senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23);
477 /* Create first half of 4W cmd for 4 mbufs (sendhdr) */
478 cmd00 = vzip1q_u64(senddesc01_w0, senddesc01_w1);
479 cmd10 = vzip2q_u64(senddesc01_w0, senddesc01_w1);
480 cmd20 = vzip1q_u64(senddesc23_w0, senddesc23_w1);
481 cmd30 = vzip2q_u64(senddesc23_w0, senddesc23_w1);
483 } else if (!(flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) &&
484 (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) {
486 * Lookup table to translate ol_flags to
490 const uint8x16_t tbl = {
491 /* [0-15] = ol4type:ol3type */
493 0x03, /* OUTER_IP_CKSUM */
494 0x02, /* OUTER_IPV4 */
495 0x03, /* OUTER_IPV4 | OUTER_IP_CKSUM */
496 0x04, /* OUTER_IPV6 */
497 0x00, /* OUTER_IPV6 | OUTER_IP_CKSUM */
498 0x00, /* OUTER_IPV6 | OUTER_IPV4 */
499 0x00, /* OUTER_IPV6 | OUTER_IPV4 |
502 0x00, /* OUTER_UDP_CKSUM */
503 0x33, /* OUTER_UDP_CKSUM | OUTER_IP_CKSUM */
504 0x32, /* OUTER_UDP_CKSUM | OUTER_IPV4 */
505 0x33, /* OUTER_UDP_CKSUM | OUTER_IPV4 |
508 0x34, /* OUTER_UDP_CKSUM | OUTER_IPV6 */
509 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
512 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
515 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
516 * OUTER_IPV4 | OUTER_IP_CKSUM
520 /* Extract olflags to translate to iltypes */
521 xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
522 ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
525 * E(47):OL3_LEN(9):OL2_LEN(7+z)
526 * E(47):OL3_LEN(9):OL2_LEN(7+z)
528 const uint8x16_t shuf_mask5 = {
529 0x6, 0x5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
530 0xE, 0xD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
532 senddesc01_w1 = vqtbl1q_u8(senddesc01_w1, shuf_mask5);
533 senddesc23_w1 = vqtbl1q_u8(senddesc23_w1, shuf_mask5);
535 /* Extract outer ol flags only */
536 const uint64x2_t o_cksum_mask = {
541 xtmp128 = vandq_u64(xtmp128, o_cksum_mask);
542 ytmp128 = vandq_u64(ytmp128, o_cksum_mask);
544 /* Extract OUTER_UDP_CKSUM bit 41 and
548 xtmp128 = xtmp128 | vshlq_n_u64(xtmp128, 20);
549 ytmp128 = ytmp128 | vshlq_n_u64(ytmp128, 20);
551 /* Shift oltype by 2 to start nibble from BIT(56)
554 xtmp128 = vshrq_n_u8(xtmp128, 2);
555 ytmp128 = vshrq_n_u8(ytmp128, 2);
557 * E(48):L3_LEN(8):L2_LEN(z+7)
558 * E(48):L3_LEN(8):L2_LEN(z+7)
560 const int8x16_t tshft3 = {
561 -1, 0, 8, 8, 8, 8, 8, 8,
562 -1, 0, 8, 8, 8, 8, 8, 8,
565 senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3);
566 senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3);
569 ltypes01 = vqtbl1q_u8(tbl, xtmp128);
570 ltypes23 = vqtbl1q_u8(tbl, ytmp128);
572 /* Just use ld1q to retrieve aura
573 * when we don't need tx_offload
575 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
576 offsetof(struct rte_mempool, pool_id));
577 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
578 offsetof(struct rte_mempool, pool_id));
579 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
580 offsetof(struct rte_mempool, pool_id));
581 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
582 offsetof(struct rte_mempool, pool_id));
584 /* Pick only relevant fields i.e Bit 56:63 of oltype
585 * and place it in ol3/ol4type of senddesc_w1
587 const uint8x16_t shuf_mask0 = {
588 0xFF, 0xFF, 0xFF, 0xFF, 0x7, 0xFF, 0xFF, 0xFF,
589 0xFF, 0xFF, 0xFF, 0xFF, 0xF, 0xFF, 0xFF, 0xFF,
592 ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0);
593 ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0);
595 /* Prepare ol4ptr, ol3ptr from ol3len, ol2len.
596 * a [E(32):E(16):OL3(8):OL2(8)]
598 * a [E(32):E(16):(OL3+OL2):OL2]
599 * => E(32):E(16)::OL4PTR(8):OL3PTR(8)
601 senddesc01_w1 = vaddq_u8(senddesc01_w1,
602 vshlq_n_u16(senddesc01_w1, 8));
603 senddesc23_w1 = vaddq_u8(senddesc23_w1,
604 vshlq_n_u16(senddesc23_w1, 8));
606 /* Create second half of 4W cmd for 4 mbufs (sgdesc) */
607 cmd01 = vzip1q_u64(sgdesc01_w0, sgdesc01_w1);
608 cmd11 = vzip2q_u64(sgdesc01_w0, sgdesc01_w1);
609 cmd21 = vzip1q_u64(sgdesc23_w0, sgdesc23_w1);
610 cmd31 = vzip2q_u64(sgdesc23_w0, sgdesc23_w1);
612 xmask01 = vdupq_n_u64(0);
614 asm volatile ("LD1 {%[a].H}[0],[%[in]]\n\t" :
615 [a]"+w"(xmask01) : [in]"r"(mbuf0) : "memory");
617 asm volatile ("LD1 {%[a].H}[4],[%[in]]\n\t" :
618 [a]"+w"(xmask01) : [in]"r"(mbuf1) : "memory");
620 asm volatile ("LD1 {%[b].H}[0],[%[in]]\n\t" :
621 [b]"+w"(xmask23) : [in]"r"(mbuf2) : "memory");
623 asm volatile ("LD1 {%[b].H}[4],[%[in]]\n\t" :
624 [b]"+w"(xmask23) : [in]"r"(mbuf3) : "memory");
625 xmask01 = vshlq_n_u64(xmask01, 20);
626 xmask23 = vshlq_n_u64(xmask23, 20);
628 senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
629 senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
630 /* Move ltypes to senddesc*_w1 */
631 senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01);
632 senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23);
634 /* Create first half of 4W cmd for 4 mbufs (sendhdr) */
635 cmd00 = vzip1q_u64(senddesc01_w0, senddesc01_w1);
636 cmd10 = vzip2q_u64(senddesc01_w0, senddesc01_w1);
637 cmd20 = vzip1q_u64(senddesc23_w0, senddesc23_w1);
638 cmd30 = vzip2q_u64(senddesc23_w0, senddesc23_w1);
640 } else if ((flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) &&
641 (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) {
642 /* Lookup table to translate ol_flags to
643 * ol4type, ol3type, il4type, il3type of senddesc_w1
645 const uint8x16x2_t tbl = {
648 /* [0-15] = il4type:il3type */
649 0x04, /* none (IPv6) */
650 0x14, /* PKT_TX_TCP_CKSUM (IPv6) */
651 0x24, /* PKT_TX_SCTP_CKSUM (IPv6) */
652 0x34, /* PKT_TX_UDP_CKSUM (IPv6) */
653 0x03, /* PKT_TX_IP_CKSUM */
654 0x13, /* PKT_TX_IP_CKSUM |
657 0x23, /* PKT_TX_IP_CKSUM |
660 0x33, /* PKT_TX_IP_CKSUM |
663 0x02, /* PKT_TX_IPV4 */
664 0x12, /* PKT_TX_IPV4 |
667 0x22, /* PKT_TX_IPV4 |
670 0x32, /* PKT_TX_IPV4 |
673 0x03, /* PKT_TX_IPV4 |
676 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
679 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
682 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
688 /* [16-31] = ol4type:ol3type */
690 0x03, /* OUTER_IP_CKSUM */
691 0x02, /* OUTER_IPV4 */
692 0x03, /* OUTER_IPV4 | OUTER_IP_CKSUM */
693 0x04, /* OUTER_IPV6 */
694 0x00, /* OUTER_IPV6 | OUTER_IP_CKSUM */
695 0x00, /* OUTER_IPV6 | OUTER_IPV4 */
696 0x00, /* OUTER_IPV6 | OUTER_IPV4 |
699 0x00, /* OUTER_UDP_CKSUM */
700 0x33, /* OUTER_UDP_CKSUM |
703 0x32, /* OUTER_UDP_CKSUM |
706 0x33, /* OUTER_UDP_CKSUM |
707 * OUTER_IPV4 | OUTER_IP_CKSUM
709 0x34, /* OUTER_UDP_CKSUM |
712 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
715 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
718 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
719 * OUTER_IPV4 | OUTER_IP_CKSUM
725 /* Extract olflags to translate to oltype & iltype */
726 xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
727 ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
730 * E(8):OL2_LN(7):OL3_LN(9):E(23):L3_LN(9):L2_LN(7+z)
731 * E(8):OL2_LN(7):OL3_LN(9):E(23):L3_LN(9):L2_LN(7+z)
733 const uint32x4_t tshft_4 = {
737 senddesc01_w1 = vshlq_u32(senddesc01_w1, tshft_4);
738 senddesc23_w1 = vshlq_u32(senddesc23_w1, tshft_4);
741 * E(32):L3_LEN(8):L2_LEN(7+Z):OL3_LEN(8):OL2_LEN(7+Z)
742 * E(32):L3_LEN(8):L2_LEN(7+Z):OL3_LEN(8):OL2_LEN(7+Z)
744 const uint8x16_t shuf_mask5 = {
745 0x6, 0x5, 0x0, 0x1, 0xFF, 0xFF, 0xFF, 0xFF,
746 0xE, 0xD, 0x8, 0x9, 0xFF, 0xFF, 0xFF, 0xFF,
748 senddesc01_w1 = vqtbl1q_u8(senddesc01_w1, shuf_mask5);
749 senddesc23_w1 = vqtbl1q_u8(senddesc23_w1, shuf_mask5);
751 /* Extract outer and inner header ol_flags */
752 const uint64x2_t oi_cksum_mask = {
757 xtmp128 = vandq_u64(xtmp128, oi_cksum_mask);
758 ytmp128 = vandq_u64(ytmp128, oi_cksum_mask);
760 /* Extract OUTER_UDP_CKSUM bit 41 and
764 xtmp128 = xtmp128 | vshlq_n_u64(xtmp128, 20);
765 ytmp128 = ytmp128 | vshlq_n_u64(ytmp128, 20);
767 /* Shift right oltype by 2 and iltype by 4
768 * to start oltype nibble from BIT(58)
769 * instead of BIT(56) and iltype nibble from BIT(48)
770 * instead of BIT(52).
772 const int8x16_t tshft5 = {
773 8, 8, 8, 8, 8, 8, -4, -2,
774 8, 8, 8, 8, 8, 8, -4, -2,
777 xtmp128 = vshlq_u8(xtmp128, tshft5);
778 ytmp128 = vshlq_u8(ytmp128, tshft5);
780 * E(32):L3_LEN(8):L2_LEN(8):OL3_LEN(8):OL2_LEN(8)
781 * E(32):L3_LEN(8):L2_LEN(8):OL3_LEN(8):OL2_LEN(8)
783 const int8x16_t tshft3 = {
784 -1, 0, -1, 0, 0, 0, 0, 0,
785 -1, 0, -1, 0, 0, 0, 0, 0,
788 senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3);
789 senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3);
791 /* Mark Bit(4) of oltype */
792 const uint64x2_t oi_cksum_mask2 = {
797 xtmp128 = vorrq_u64(xtmp128, oi_cksum_mask2);
798 ytmp128 = vorrq_u64(ytmp128, oi_cksum_mask2);
801 ltypes01 = vqtbl2q_u8(tbl, xtmp128);
802 ltypes23 = vqtbl2q_u8(tbl, ytmp128);
804 /* Just use ld1q to retrieve aura
805 * when we don't need tx_offload
807 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
808 offsetof(struct rte_mempool, pool_id));
809 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
810 offsetof(struct rte_mempool, pool_id));
811 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
812 offsetof(struct rte_mempool, pool_id));
813 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
814 offsetof(struct rte_mempool, pool_id));
816 /* Pick only relevant fields i.e Bit 48:55 of iltype and
817 * Bit 56:63 of oltype and place it in corresponding
818 * place in senddesc_w1.
820 const uint8x16_t shuf_mask0 = {
821 0xFF, 0xFF, 0xFF, 0xFF, 0x7, 0x6, 0xFF, 0xFF,
822 0xFF, 0xFF, 0xFF, 0xFF, 0xF, 0xE, 0xFF, 0xFF,
825 ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0);
826 ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0);
828 /* Prepare l4ptr, l3ptr, ol4ptr, ol3ptr from
829 * l3len, l2len, ol3len, ol2len.
830 * a [E(32):L3(8):L2(8):OL3(8):OL2(8)]
832 * a [E:(L3+L2):(L2+OL3):(OL3+OL2):OL2]
834 * a [E:(L3+L2+OL3+OL2):(L2+OL3+OL2):(OL3+OL2):OL2]
835 * => E(32):IL4PTR(8):IL3PTR(8):OL4PTR(8):OL3PTR(8)
837 senddesc01_w1 = vaddq_u8(senddesc01_w1,
838 vshlq_n_u32(senddesc01_w1, 8));
839 senddesc23_w1 = vaddq_u8(senddesc23_w1,
840 vshlq_n_u32(senddesc23_w1, 8));
842 /* Create second half of 4W cmd for 4 mbufs (sgdesc) */
843 cmd01 = vzip1q_u64(sgdesc01_w0, sgdesc01_w1);
844 cmd11 = vzip2q_u64(sgdesc01_w0, sgdesc01_w1);
845 cmd21 = vzip1q_u64(sgdesc23_w0, sgdesc23_w1);
846 cmd31 = vzip2q_u64(sgdesc23_w0, sgdesc23_w1);
848 /* Continue preparing l4ptr, l3ptr, ol4ptr, ol3ptr */
849 senddesc01_w1 = vaddq_u8(senddesc01_w1,
850 vshlq_n_u32(senddesc01_w1, 16));
851 senddesc23_w1 = vaddq_u8(senddesc23_w1,
852 vshlq_n_u32(senddesc23_w1, 16));
854 xmask01 = vdupq_n_u64(0);
856 asm volatile ("LD1 {%[a].H}[0],[%[in]]\n\t" :
857 [a]"+w"(xmask01) : [in]"r"(mbuf0) : "memory");
859 asm volatile ("LD1 {%[a].H}[4],[%[in]]\n\t" :
860 [a]"+w"(xmask01) : [in]"r"(mbuf1) : "memory");
862 asm volatile ("LD1 {%[b].H}[0],[%[in]]\n\t" :
863 [b]"+w"(xmask23) : [in]"r"(mbuf2) : "memory");
865 asm volatile ("LD1 {%[b].H}[4],[%[in]]\n\t" :
866 [b]"+w"(xmask23) : [in]"r"(mbuf3) : "memory");
867 xmask01 = vshlq_n_u64(xmask01, 20);
868 xmask23 = vshlq_n_u64(xmask23, 20);
870 senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
871 senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
872 /* Move ltypes to senddesc*_w1 */
873 senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01);
874 senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23);
876 /* Create first half of 4W cmd for 4 mbufs (sendhdr) */
877 cmd00 = vzip1q_u64(senddesc01_w0, senddesc01_w1);
878 cmd10 = vzip2q_u64(senddesc01_w0, senddesc01_w1);
879 cmd20 = vzip1q_u64(senddesc23_w0, senddesc23_w1);
880 cmd30 = vzip2q_u64(senddesc23_w0, senddesc23_w1);
882 /* Just use ld1q to retrieve aura
883 * when we don't need tx_offload
885 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
886 offsetof(struct rte_mempool, pool_id));
887 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
888 offsetof(struct rte_mempool, pool_id));
889 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
890 offsetof(struct rte_mempool, pool_id));
891 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
892 offsetof(struct rte_mempool, pool_id));
893 xmask01 = vdupq_n_u64(0);
895 asm volatile ("LD1 {%[a].H}[0],[%[in]]\n\t" :
896 [a]"+w"(xmask01) : [in]"r"(mbuf0) : "memory");
898 asm volatile ("LD1 {%[a].H}[4],[%[in]]\n\t" :
899 [a]"+w"(xmask01) : [in]"r"(mbuf1) : "memory");
901 asm volatile ("LD1 {%[b].H}[0],[%[in]]\n\t" :
902 [b]"+w"(xmask23) : [in]"r"(mbuf2) : "memory");
904 asm volatile ("LD1 {%[b].H}[4],[%[in]]\n\t" :
905 [b]"+w"(xmask23) : [in]"r"(mbuf3) : "memory");
906 xmask01 = vshlq_n_u64(xmask01, 20);
907 xmask23 = vshlq_n_u64(xmask23, 20);
909 senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
910 senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
912 /* Create 4W cmd for 4 mbufs (sendhdr, sgdesc) */
913 cmd00 = vzip1q_u64(senddesc01_w0, senddesc01_w1);
914 cmd01 = vzip1q_u64(sgdesc01_w0, sgdesc01_w1);
915 cmd10 = vzip2q_u64(senddesc01_w0, senddesc01_w1);
916 cmd11 = vzip2q_u64(sgdesc01_w0, sgdesc01_w1);
917 cmd20 = vzip1q_u64(senddesc23_w0, senddesc23_w1);
918 cmd21 = vzip1q_u64(sgdesc23_w0, sgdesc23_w1);
919 cmd30 = vzip2q_u64(senddesc23_w0, senddesc23_w1);
920 cmd31 = vzip2q_u64(sgdesc23_w0, sgdesc23_w1);
924 vst1q_u64(lmt_addr, cmd00);
925 vst1q_u64(lmt_addr + 2, cmd01);
926 vst1q_u64(lmt_addr + 4, cmd10);
927 vst1q_u64(lmt_addr + 6, cmd11);
928 vst1q_u64(lmt_addr + 8, cmd20);
929 vst1q_u64(lmt_addr + 10, cmd21);
930 vst1q_u64(lmt_addr + 12, cmd30);
931 vst1q_u64(lmt_addr + 14, cmd31);
932 lmt_status = otx2_lmt_submit(io_addr);
934 } while (lmt_status == 0);
935 tx_pkts = tx_pkts + NIX_DESCS_PER_LOOP;
938 if (unlikely(pkts_left))
939 pkts += nix_xmit_pkts(tx_queue, tx_pkts, pkts_left, cmd, flags);
945 static __rte_always_inline uint16_t
946 nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
947 uint16_t pkts, uint64_t *cmd, const uint16_t flags)
949 RTE_SET_USED(tx_queue);
950 RTE_SET_USED(tx_pkts);
958 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
959 static uint16_t __rte_noinline __rte_hot \
960 otx2_nix_xmit_pkts_ ## name(void *tx_queue, \
961 struct rte_mbuf **tx_pkts, uint16_t pkts) \
965 /* For TSO inner checksum is a must */ \
966 if (((flags) & NIX_TX_OFFLOAD_TSO_F) && \
967 !((flags) & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) \
969 return nix_xmit_pkts(tx_queue, tx_pkts, pkts, cmd, flags); \
972 NIX_TX_FASTPATH_MODES
975 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
976 static uint16_t __rte_noinline __rte_hot \
977 otx2_nix_xmit_pkts_mseg_ ## name(void *tx_queue, \
978 struct rte_mbuf **tx_pkts, uint16_t pkts) \
980 uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2]; \
982 /* For TSO inner checksum is a must */ \
983 if (((flags) & NIX_TX_OFFLOAD_TSO_F) && \
984 !((flags) & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) \
986 return nix_xmit_pkts_mseg(tx_queue, tx_pkts, pkts, cmd, \
987 (flags) | NIX_TX_MULTI_SEG_F); \
990 NIX_TX_FASTPATH_MODES
993 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
994 static uint16_t __rte_noinline __rte_hot \
995 otx2_nix_xmit_pkts_vec_ ## name(void *tx_queue, \
996 struct rte_mbuf **tx_pkts, uint16_t pkts) \
1000 /* VLAN, TSTMP, TSO is not supported by vec */ \
1001 if ((flags) & NIX_TX_OFFLOAD_VLAN_QINQ_F || \
1002 (flags) & NIX_TX_OFFLOAD_TSTAMP_F || \
1003 (flags) & NIX_TX_OFFLOAD_TSO_F) \
1005 return nix_xmit_pkts_vector(tx_queue, tx_pkts, pkts, cmd, (flags)); \
1008 NIX_TX_FASTPATH_MODES
1012 pick_tx_func(struct rte_eth_dev *eth_dev,
1013 const eth_tx_burst_t tx_burst[2][2][2][2][2][2][2])
1015 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1017 /* [SEC] [TSTMP] [NOFF] [VLAN] [OL3_OL4_CSUM] [IL3_IL4_CSUM] */
1018 eth_dev->tx_pkt_burst = tx_burst
1019 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_SECURITY_F)]
1020 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F)]
1021 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F)]
1022 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
1023 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
1024 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
1025 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
1029 otx2_eth_set_tx_function(struct rte_eth_dev *eth_dev)
1031 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1033 const eth_tx_burst_t nix_eth_tx_burst[2][2][2][2][2][2][2] = {
1034 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
1035 [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_xmit_pkts_ ## name,
1037 NIX_TX_FASTPATH_MODES
1041 const eth_tx_burst_t nix_eth_tx_burst_mseg[2][2][2][2][2][2][2] = {
1042 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
1043 [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_xmit_pkts_mseg_ ## name,
1045 NIX_TX_FASTPATH_MODES
1049 const eth_tx_burst_t nix_eth_tx_vec_burst[2][2][2][2][2][2][2] = {
1050 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
1051 [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_xmit_pkts_vec_ ## name,
1053 NIX_TX_FASTPATH_MODES
1057 if (dev->scalar_ena ||
1058 (dev->tx_offload_flags &
1059 (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSTAMP_F |
1060 NIX_TX_OFFLOAD_TSO_F)))
1061 pick_tx_func(eth_dev, nix_eth_tx_burst);
1063 pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
1065 if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
1066 pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);