1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include "otx2_ethdev.h"
9 #define NIX_XMIT_FC_OR_RETURN(txq, pkts) do { \
10 /* Cached value is low, Update the fc_cache_pkts */ \
11 if (unlikely((txq)->fc_cache_pkts < (pkts))) { \
12 /* Multiply with sqe_per_sqb to express in pkts */ \
13 (txq)->fc_cache_pkts = \
14 ((txq)->nb_sqb_bufs_adj - *(txq)->fc_mem) << \
15 (txq)->sqes_per_sqb_log2; \
16 /* Check it again for the room */ \
17 if (unlikely((txq)->fc_cache_pkts < (pkts))) \
23 static __rte_always_inline uint16_t
24 nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
25 uint16_t pkts, uint64_t *cmd, const uint16_t flags)
27 struct otx2_eth_txq *txq = tx_queue; uint16_t i;
28 const rte_iova_t io_addr = txq->io_addr;
29 void *lmt_addr = txq->lmt_addr;
31 NIX_XMIT_FC_OR_RETURN(txq, pkts);
33 otx2_lmt_mov(cmd, &txq->cmd[0], otx2_nix_tx_ext_subs(flags));
35 /* Perform header writes before barrier for TSO */
36 if (flags & NIX_TX_OFFLOAD_TSO_F) {
37 for (i = 0; i < pkts; i++)
38 otx2_nix_xmit_prepare_tso(tx_pkts[i], flags);
41 /* Lets commit any changes in the packet */
44 for (i = 0; i < pkts; i++) {
45 otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
46 /* Passing no of segdw as 4: HDR + EXT + SG + SMEM */
47 otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
48 tx_pkts[i]->ol_flags, 4, flags);
49 otx2_nix_xmit_one(cmd, lmt_addr, io_addr, flags);
52 /* Reduce the cached count */
53 txq->fc_cache_pkts -= pkts;
58 static __rte_always_inline uint16_t
59 nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
60 uint16_t pkts, uint64_t *cmd, const uint16_t flags)
62 struct otx2_eth_txq *txq = tx_queue; uint64_t i;
63 const rte_iova_t io_addr = txq->io_addr;
64 void *lmt_addr = txq->lmt_addr;
67 NIX_XMIT_FC_OR_RETURN(txq, pkts);
69 otx2_lmt_mov(cmd, &txq->cmd[0], otx2_nix_tx_ext_subs(flags));
71 /* Perform header writes before barrier for TSO */
72 if (flags & NIX_TX_OFFLOAD_TSO_F) {
73 for (i = 0; i < pkts; i++)
74 otx2_nix_xmit_prepare_tso(tx_pkts[i], flags);
77 /* Lets commit any changes in the packet */
80 for (i = 0; i < pkts; i++) {
81 otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
82 segdw = otx2_nix_prepare_mseg(tx_pkts[i], cmd, flags);
83 otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
84 tx_pkts[i]->ol_flags, segdw,
86 otx2_nix_xmit_mseg_one(cmd, lmt_addr, io_addr, segdw);
89 /* Reduce the cached count */
90 txq->fc_cache_pkts -= pkts;
95 #if defined(RTE_ARCH_ARM64)
97 #define NIX_DESCS_PER_LOOP 4
98 static __rte_always_inline uint16_t
99 nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
100 uint16_t pkts, uint64_t *cmd, const uint16_t flags)
102 uint64x2_t dataoff_iova0, dataoff_iova1, dataoff_iova2, dataoff_iova3;
103 uint64x2_t len_olflags0, len_olflags1, len_olflags2, len_olflags3;
104 uint64_t *mbuf0, *mbuf1, *mbuf2, *mbuf3;
105 uint64x2_t senddesc01_w0, senddesc23_w0;
106 uint64x2_t senddesc01_w1, senddesc23_w1;
107 uint64x2_t sgdesc01_w0, sgdesc23_w0;
108 uint64x2_t sgdesc01_w1, sgdesc23_w1;
109 struct otx2_eth_txq *txq = tx_queue;
110 uint64_t *lmt_addr = txq->lmt_addr;
111 rte_iova_t io_addr = txq->io_addr;
112 uint64x2_t ltypes01, ltypes23;
113 uint64x2_t xtmp128, ytmp128;
114 uint64x2_t xmask01, xmask23;
115 uint64x2_t mbuf01, mbuf23;
116 uint64x2_t cmd00, cmd01;
117 uint64x2_t cmd10, cmd11;
118 uint64x2_t cmd20, cmd21;
119 uint64x2_t cmd30, cmd31;
120 uint64_t lmt_status, i;
123 NIX_XMIT_FC_OR_RETURN(txq, pkts);
125 pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1);
126 pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
128 /* Reduce the cached count */
129 txq->fc_cache_pkts -= pkts;
131 /* Lets commit any changes in the packet */
134 senddesc01_w0 = vld1q_dup_u64(&txq->cmd[0]);
135 senddesc23_w0 = senddesc01_w0;
136 senddesc01_w1 = vdupq_n_u64(0);
137 senddesc23_w1 = senddesc01_w1;
138 sgdesc01_w0 = vld1q_dup_u64(&txq->cmd[2]);
139 sgdesc23_w0 = sgdesc01_w0;
141 for (i = 0; i < pkts; i += NIX_DESCS_PER_LOOP) {
142 mbuf01 = vld1q_u64((uint64_t *)tx_pkts);
143 mbuf23 = vld1q_u64((uint64_t *)(tx_pkts + 2));
145 /* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */
146 senddesc01_w0 = vbicq_u64(senddesc01_w0,
147 vdupq_n_u64(0xFFFFFFFF));
148 sgdesc01_w0 = vbicq_u64(sgdesc01_w0,
149 vdupq_n_u64(0xFFFFFFFF));
151 senddesc23_w0 = senddesc01_w0;
152 sgdesc23_w0 = sgdesc01_w0;
154 tx_pkts = tx_pkts + NIX_DESCS_PER_LOOP;
156 /* Move mbufs to iova */
157 mbuf0 = (uint64_t *)vgetq_lane_u64(mbuf01, 0);
158 mbuf1 = (uint64_t *)vgetq_lane_u64(mbuf01, 1);
159 mbuf2 = (uint64_t *)vgetq_lane_u64(mbuf23, 0);
160 mbuf3 = (uint64_t *)vgetq_lane_u64(mbuf23, 1);
162 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
163 offsetof(struct rte_mbuf, buf_iova));
164 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
165 offsetof(struct rte_mbuf, buf_iova));
166 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
167 offsetof(struct rte_mbuf, buf_iova));
168 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
169 offsetof(struct rte_mbuf, buf_iova));
171 * Get mbuf's, olflags, iova, pktlen, dataoff
172 * dataoff_iovaX.D[0] = iova,
173 * dataoff_iovaX.D[1](15:0) = mbuf->dataoff
174 * len_olflagsX.D[0] = ol_flags,
175 * len_olflagsX.D[1](63:32) = mbuf->pkt_len
177 dataoff_iova0 = vld1q_u64(mbuf0);
178 len_olflags0 = vld1q_u64(mbuf0 + 2);
179 dataoff_iova1 = vld1q_u64(mbuf1);
180 len_olflags1 = vld1q_u64(mbuf1 + 2);
181 dataoff_iova2 = vld1q_u64(mbuf2);
182 len_olflags2 = vld1q_u64(mbuf2 + 2);
183 dataoff_iova3 = vld1q_u64(mbuf3);
184 len_olflags3 = vld1q_u64(mbuf3 + 2);
186 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
187 struct rte_mbuf *mbuf;
188 /* Set don't free bit if reference count > 1 */
189 xmask01 = vdupq_n_u64(0);
192 mbuf = (struct rte_mbuf *)((uintptr_t)mbuf0 -
193 offsetof(struct rte_mbuf, buf_iova));
195 if (otx2_nix_prefree_seg(mbuf))
196 vsetq_lane_u64(0x80000, xmask01, 0);
198 __mempool_check_cookies(mbuf->pool,
202 mbuf = (struct rte_mbuf *)((uintptr_t)mbuf1 -
203 offsetof(struct rte_mbuf, buf_iova));
204 if (otx2_nix_prefree_seg(mbuf))
205 vsetq_lane_u64(0x80000, xmask01, 1);
207 __mempool_check_cookies(mbuf->pool,
211 mbuf = (struct rte_mbuf *)((uintptr_t)mbuf2 -
212 offsetof(struct rte_mbuf, buf_iova));
213 if (otx2_nix_prefree_seg(mbuf))
214 vsetq_lane_u64(0x80000, xmask23, 0);
216 __mempool_check_cookies(mbuf->pool,
220 mbuf = (struct rte_mbuf *)((uintptr_t)mbuf3 -
221 offsetof(struct rte_mbuf, buf_iova));
222 if (otx2_nix_prefree_seg(mbuf))
223 vsetq_lane_u64(0x80000, xmask23, 1);
225 __mempool_check_cookies(mbuf->pool,
228 senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
229 senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
231 struct rte_mbuf *mbuf;
232 /* Mark mempool object as "put" since
235 mbuf = (struct rte_mbuf *)((uintptr_t)mbuf0 -
236 offsetof(struct rte_mbuf, buf_iova));
237 __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
240 mbuf = (struct rte_mbuf *)((uintptr_t)mbuf1 -
241 offsetof(struct rte_mbuf, buf_iova));
242 __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
245 mbuf = (struct rte_mbuf *)((uintptr_t)mbuf2 -
246 offsetof(struct rte_mbuf, buf_iova));
247 __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
250 mbuf = (struct rte_mbuf *)((uintptr_t)mbuf3 -
251 offsetof(struct rte_mbuf, buf_iova));
252 __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
257 /* Move mbufs to point pool */
258 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
259 offsetof(struct rte_mbuf, pool) -
260 offsetof(struct rte_mbuf, buf_iova));
261 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
262 offsetof(struct rte_mbuf, pool) -
263 offsetof(struct rte_mbuf, buf_iova));
264 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
265 offsetof(struct rte_mbuf, pool) -
266 offsetof(struct rte_mbuf, buf_iova));
267 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
268 offsetof(struct rte_mbuf, pool) -
269 offsetof(struct rte_mbuf, buf_iova));
272 (NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
273 NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {
274 /* Get tx_offload for ol2, ol3, l2, l3 lengths */
276 * E(8):OL2_LEN(7):OL3_LEN(9):E(24):L3_LEN(9):L2_LEN(7)
277 * E(8):OL2_LEN(7):OL3_LEN(9):E(24):L3_LEN(9):L2_LEN(7)
280 asm volatile ("LD1 {%[a].D}[0],[%[in]]\n\t" :
281 [a]"+w"(senddesc01_w1) :
282 [in]"r"(mbuf0 + 2) : "memory");
284 asm volatile ("LD1 {%[a].D}[1],[%[in]]\n\t" :
285 [a]"+w"(senddesc01_w1) :
286 [in]"r"(mbuf1 + 2) : "memory");
288 asm volatile ("LD1 {%[b].D}[0],[%[in]]\n\t" :
289 [b]"+w"(senddesc23_w1) :
290 [in]"r"(mbuf2 + 2) : "memory");
292 asm volatile ("LD1 {%[b].D}[1],[%[in]]\n\t" :
293 [b]"+w"(senddesc23_w1) :
294 [in]"r"(mbuf3 + 2) : "memory");
296 /* Get pool pointer alone */
297 mbuf0 = (uint64_t *)*mbuf0;
298 mbuf1 = (uint64_t *)*mbuf1;
299 mbuf2 = (uint64_t *)*mbuf2;
300 mbuf3 = (uint64_t *)*mbuf3;
302 /* Get pool pointer alone */
303 mbuf0 = (uint64_t *)*mbuf0;
304 mbuf1 = (uint64_t *)*mbuf1;
305 mbuf2 = (uint64_t *)*mbuf2;
306 mbuf3 = (uint64_t *)*mbuf3;
309 const uint8x16_t shuf_mask2 = {
310 0x4, 0x5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
311 0xc, 0xd, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
313 xtmp128 = vzip2q_u64(len_olflags0, len_olflags1);
314 ytmp128 = vzip2q_u64(len_olflags2, len_olflags3);
316 /* Clear dataoff_iovaX.D[1] bits other than dataoff(15:0) */
317 const uint64x2_t and_mask0 = {
322 dataoff_iova0 = vandq_u64(dataoff_iova0, and_mask0);
323 dataoff_iova1 = vandq_u64(dataoff_iova1, and_mask0);
324 dataoff_iova2 = vandq_u64(dataoff_iova2, and_mask0);
325 dataoff_iova3 = vandq_u64(dataoff_iova3, and_mask0);
328 * Pick only 16 bits of pktlen preset at bits 63:32
329 * and place them at bits 15:0.
331 xtmp128 = vqtbl1q_u8(xtmp128, shuf_mask2);
332 ytmp128 = vqtbl1q_u8(ytmp128, shuf_mask2);
334 /* Add pairwise to get dataoff + iova in sgdesc_w1 */
335 sgdesc01_w1 = vpaddq_u64(dataoff_iova0, dataoff_iova1);
336 sgdesc23_w1 = vpaddq_u64(dataoff_iova2, dataoff_iova3);
338 /* Orr both sgdesc_w0 and senddesc_w0 with 16 bits of
339 * pktlen at 15:0 position.
341 sgdesc01_w0 = vorrq_u64(sgdesc01_w0, xtmp128);
342 sgdesc23_w0 = vorrq_u64(sgdesc23_w0, ytmp128);
343 senddesc01_w0 = vorrq_u64(senddesc01_w0, xtmp128);
344 senddesc23_w0 = vorrq_u64(senddesc23_w0, ytmp128);
346 if ((flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) &&
347 !(flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) {
349 * Lookup table to translate ol_flags to
350 * il3/il4 types. But we still use ol3/ol4 types in
351 * senddesc_w1 as only one header processing is enabled.
353 const uint8x16_t tbl = {
354 /* [0-15] = il4type:il3type */
355 0x04, /* none (IPv6 assumed) */
356 0x14, /* PKT_TX_TCP_CKSUM (IPv6 assumed) */
357 0x24, /* PKT_TX_SCTP_CKSUM (IPv6 assumed) */
358 0x34, /* PKT_TX_UDP_CKSUM (IPv6 assumed) */
359 0x03, /* PKT_TX_IP_CKSUM */
360 0x13, /* PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM */
361 0x23, /* PKT_TX_IP_CKSUM | PKT_TX_SCTP_CKSUM */
362 0x33, /* PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM */
363 0x02, /* PKT_TX_IPV4 */
364 0x12, /* PKT_TX_IPV4 | PKT_TX_TCP_CKSUM */
365 0x22, /* PKT_TX_IPV4 | PKT_TX_SCTP_CKSUM */
366 0x32, /* PKT_TX_IPV4 | PKT_TX_UDP_CKSUM */
367 0x03, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM */
368 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
371 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
374 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
379 /* Extract olflags to translate to iltypes */
380 xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
381 ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
384 * E(47):L3_LEN(9):L2_LEN(7+z)
385 * E(47):L3_LEN(9):L2_LEN(7+z)
387 senddesc01_w1 = vshlq_n_u64(senddesc01_w1, 1);
388 senddesc23_w1 = vshlq_n_u64(senddesc23_w1, 1);
390 /* Move OLFLAGS bits 55:52 to 51:48
391 * with zeros preprended on the byte and rest
394 xtmp128 = vshrq_n_u8(xtmp128, 4);
395 ytmp128 = vshrq_n_u8(ytmp128, 4);
397 * E(48):L3_LEN(8):L2_LEN(z+7)
398 * E(48):L3_LEN(8):L2_LEN(z+7)
400 const int8x16_t tshft3 = {
401 -1, 0, 8, 8, 8, 8, 8, 8,
402 -1, 0, 8, 8, 8, 8, 8, 8,
405 senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3);
406 senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3);
409 ltypes01 = vqtbl1q_u8(tbl, xtmp128);
410 ltypes23 = vqtbl1q_u8(tbl, ytmp128);
412 /* Just use ld1q to retrieve aura
413 * when we don't need tx_offload
415 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
416 offsetof(struct rte_mempool, pool_id));
417 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
418 offsetof(struct rte_mempool, pool_id));
419 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
420 offsetof(struct rte_mempool, pool_id));
421 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
422 offsetof(struct rte_mempool, pool_id));
424 /* Pick only relevant fields i.e Bit 48:55 of iltype
425 * and place it in ol3/ol4type of senddesc_w1
427 const uint8x16_t shuf_mask0 = {
428 0xFF, 0xFF, 0xFF, 0xFF, 0x6, 0xFF, 0xFF, 0xFF,
429 0xFF, 0xFF, 0xFF, 0xFF, 0xE, 0xFF, 0xFF, 0xFF,
432 ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0);
433 ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0);
435 /* Prepare ol4ptr, ol3ptr from ol3len, ol2len.
436 * a [E(32):E(16):OL3(8):OL2(8)]
438 * a [E(32):E(16):(OL3+OL2):OL2]
439 * => E(32):E(16)::OL4PTR(8):OL3PTR(8)
441 senddesc01_w1 = vaddq_u8(senddesc01_w1,
442 vshlq_n_u16(senddesc01_w1, 8));
443 senddesc23_w1 = vaddq_u8(senddesc23_w1,
444 vshlq_n_u16(senddesc23_w1, 8));
446 /* Create first half of 4W cmd for 4 mbufs (sgdesc) */
447 cmd01 = vzip1q_u64(sgdesc01_w0, sgdesc01_w1);
448 cmd11 = vzip2q_u64(sgdesc01_w0, sgdesc01_w1);
449 cmd21 = vzip1q_u64(sgdesc23_w0, sgdesc23_w1);
450 cmd31 = vzip2q_u64(sgdesc23_w0, sgdesc23_w1);
452 xmask01 = vdupq_n_u64(0);
454 asm volatile ("LD1 {%[a].H}[0],[%[in]]\n\t" :
455 [a]"+w"(xmask01) : [in]"r"(mbuf0) : "memory");
457 asm volatile ("LD1 {%[a].H}[4],[%[in]]\n\t" :
458 [a]"+w"(xmask01) : [in]"r"(mbuf1) : "memory");
460 asm volatile ("LD1 {%[b].H}[0],[%[in]]\n\t" :
461 [b]"+w"(xmask23) : [in]"r"(mbuf2) : "memory");
463 asm volatile ("LD1 {%[b].H}[4],[%[in]]\n\t" :
464 [b]"+w"(xmask23) : [in]"r"(mbuf3) : "memory");
465 xmask01 = vshlq_n_u64(xmask01, 20);
466 xmask23 = vshlq_n_u64(xmask23, 20);
468 senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
469 senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
470 /* Move ltypes to senddesc*_w1 */
471 senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01);
472 senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23);
474 /* Create first half of 4W cmd for 4 mbufs (sendhdr) */
475 cmd00 = vzip1q_u64(senddesc01_w0, senddesc01_w1);
476 cmd10 = vzip2q_u64(senddesc01_w0, senddesc01_w1);
477 cmd20 = vzip1q_u64(senddesc23_w0, senddesc23_w1);
478 cmd30 = vzip2q_u64(senddesc23_w0, senddesc23_w1);
480 } else if (!(flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) &&
481 (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) {
483 * Lookup table to translate ol_flags to
487 const uint8x16_t tbl = {
488 /* [0-15] = ol4type:ol3type */
490 0x03, /* OUTER_IP_CKSUM */
491 0x02, /* OUTER_IPV4 */
492 0x03, /* OUTER_IPV4 | OUTER_IP_CKSUM */
493 0x04, /* OUTER_IPV6 */
494 0x00, /* OUTER_IPV6 | OUTER_IP_CKSUM */
495 0x00, /* OUTER_IPV6 | OUTER_IPV4 */
496 0x00, /* OUTER_IPV6 | OUTER_IPV4 |
499 0x00, /* OUTER_UDP_CKSUM */
500 0x33, /* OUTER_UDP_CKSUM | OUTER_IP_CKSUM */
501 0x32, /* OUTER_UDP_CKSUM | OUTER_IPV4 */
502 0x33, /* OUTER_UDP_CKSUM | OUTER_IPV4 |
505 0x34, /* OUTER_UDP_CKSUM | OUTER_IPV6 */
506 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
509 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
512 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
513 * OUTER_IPV4 | OUTER_IP_CKSUM
517 /* Extract olflags to translate to iltypes */
518 xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
519 ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
522 * E(47):OL3_LEN(9):OL2_LEN(7+z)
523 * E(47):OL3_LEN(9):OL2_LEN(7+z)
525 const uint8x16_t shuf_mask5 = {
526 0x6, 0x5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
527 0xE, 0xD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
529 senddesc01_w1 = vqtbl1q_u8(senddesc01_w1, shuf_mask5);
530 senddesc23_w1 = vqtbl1q_u8(senddesc23_w1, shuf_mask5);
532 /* Extract outer ol flags only */
533 const uint64x2_t o_cksum_mask = {
538 xtmp128 = vandq_u64(xtmp128, o_cksum_mask);
539 ytmp128 = vandq_u64(ytmp128, o_cksum_mask);
541 /* Extract OUTER_UDP_CKSUM bit 41 and
545 xtmp128 = xtmp128 | vshlq_n_u64(xtmp128, 20);
546 ytmp128 = ytmp128 | vshlq_n_u64(ytmp128, 20);
548 /* Shift oltype by 2 to start nibble from BIT(56)
551 xtmp128 = vshrq_n_u8(xtmp128, 2);
552 ytmp128 = vshrq_n_u8(ytmp128, 2);
554 * E(48):L3_LEN(8):L2_LEN(z+7)
555 * E(48):L3_LEN(8):L2_LEN(z+7)
557 const int8x16_t tshft3 = {
558 -1, 0, 8, 8, 8, 8, 8, 8,
559 -1, 0, 8, 8, 8, 8, 8, 8,
562 senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3);
563 senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3);
566 ltypes01 = vqtbl1q_u8(tbl, xtmp128);
567 ltypes23 = vqtbl1q_u8(tbl, ytmp128);
569 /* Just use ld1q to retrieve aura
570 * when we don't need tx_offload
572 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
573 offsetof(struct rte_mempool, pool_id));
574 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
575 offsetof(struct rte_mempool, pool_id));
576 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
577 offsetof(struct rte_mempool, pool_id));
578 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
579 offsetof(struct rte_mempool, pool_id));
581 /* Pick only relevant fields i.e Bit 56:63 of oltype
582 * and place it in ol3/ol4type of senddesc_w1
584 const uint8x16_t shuf_mask0 = {
585 0xFF, 0xFF, 0xFF, 0xFF, 0x7, 0xFF, 0xFF, 0xFF,
586 0xFF, 0xFF, 0xFF, 0xFF, 0xF, 0xFF, 0xFF, 0xFF,
589 ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0);
590 ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0);
592 /* Prepare ol4ptr, ol3ptr from ol3len, ol2len.
593 * a [E(32):E(16):OL3(8):OL2(8)]
595 * a [E(32):E(16):(OL3+OL2):OL2]
596 * => E(32):E(16)::OL4PTR(8):OL3PTR(8)
598 senddesc01_w1 = vaddq_u8(senddesc01_w1,
599 vshlq_n_u16(senddesc01_w1, 8));
600 senddesc23_w1 = vaddq_u8(senddesc23_w1,
601 vshlq_n_u16(senddesc23_w1, 8));
603 /* Create second half of 4W cmd for 4 mbufs (sgdesc) */
604 cmd01 = vzip1q_u64(sgdesc01_w0, sgdesc01_w1);
605 cmd11 = vzip2q_u64(sgdesc01_w0, sgdesc01_w1);
606 cmd21 = vzip1q_u64(sgdesc23_w0, sgdesc23_w1);
607 cmd31 = vzip2q_u64(sgdesc23_w0, sgdesc23_w1);
609 xmask01 = vdupq_n_u64(0);
611 asm volatile ("LD1 {%[a].H}[0],[%[in]]\n\t" :
612 [a]"+w"(xmask01) : [in]"r"(mbuf0) : "memory");
614 asm volatile ("LD1 {%[a].H}[4],[%[in]]\n\t" :
615 [a]"+w"(xmask01) : [in]"r"(mbuf1) : "memory");
617 asm volatile ("LD1 {%[b].H}[0],[%[in]]\n\t" :
618 [b]"+w"(xmask23) : [in]"r"(mbuf2) : "memory");
620 asm volatile ("LD1 {%[b].H}[4],[%[in]]\n\t" :
621 [b]"+w"(xmask23) : [in]"r"(mbuf3) : "memory");
622 xmask01 = vshlq_n_u64(xmask01, 20);
623 xmask23 = vshlq_n_u64(xmask23, 20);
625 senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
626 senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
627 /* Move ltypes to senddesc*_w1 */
628 senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01);
629 senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23);
631 /* Create first half of 4W cmd for 4 mbufs (sendhdr) */
632 cmd00 = vzip1q_u64(senddesc01_w0, senddesc01_w1);
633 cmd10 = vzip2q_u64(senddesc01_w0, senddesc01_w1);
634 cmd20 = vzip1q_u64(senddesc23_w0, senddesc23_w1);
635 cmd30 = vzip2q_u64(senddesc23_w0, senddesc23_w1);
637 } else if ((flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) &&
638 (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) {
639 /* Lookup table to translate ol_flags to
640 * ol4type, ol3type, il4type, il3type of senddesc_w1
642 const uint8x16x2_t tbl = {
645 /* [0-15] = il4type:il3type */
646 0x04, /* none (IPv6) */
647 0x14, /* PKT_TX_TCP_CKSUM (IPv6) */
648 0x24, /* PKT_TX_SCTP_CKSUM (IPv6) */
649 0x34, /* PKT_TX_UDP_CKSUM (IPv6) */
650 0x03, /* PKT_TX_IP_CKSUM */
651 0x13, /* PKT_TX_IP_CKSUM |
654 0x23, /* PKT_TX_IP_CKSUM |
657 0x33, /* PKT_TX_IP_CKSUM |
660 0x02, /* PKT_TX_IPV4 */
661 0x12, /* PKT_TX_IPV4 |
664 0x22, /* PKT_TX_IPV4 |
667 0x32, /* PKT_TX_IPV4 |
670 0x03, /* PKT_TX_IPV4 |
673 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
676 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
679 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
685 /* [16-31] = ol4type:ol3type */
687 0x03, /* OUTER_IP_CKSUM */
688 0x02, /* OUTER_IPV4 */
689 0x03, /* OUTER_IPV4 | OUTER_IP_CKSUM */
690 0x04, /* OUTER_IPV6 */
691 0x00, /* OUTER_IPV6 | OUTER_IP_CKSUM */
692 0x00, /* OUTER_IPV6 | OUTER_IPV4 */
693 0x00, /* OUTER_IPV6 | OUTER_IPV4 |
696 0x00, /* OUTER_UDP_CKSUM */
697 0x33, /* OUTER_UDP_CKSUM |
700 0x32, /* OUTER_UDP_CKSUM |
703 0x33, /* OUTER_UDP_CKSUM |
704 * OUTER_IPV4 | OUTER_IP_CKSUM
706 0x34, /* OUTER_UDP_CKSUM |
709 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
712 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
715 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
716 * OUTER_IPV4 | OUTER_IP_CKSUM
722 /* Extract olflags to translate to oltype & iltype */
723 xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
724 ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
727 * E(8):OL2_LN(7):OL3_LN(9):E(23):L3_LN(9):L2_LN(7+z)
728 * E(8):OL2_LN(7):OL3_LN(9):E(23):L3_LN(9):L2_LN(7+z)
730 const uint32x4_t tshft_4 = {
734 senddesc01_w1 = vshlq_u32(senddesc01_w1, tshft_4);
735 senddesc23_w1 = vshlq_u32(senddesc23_w1, tshft_4);
738 * E(32):L3_LEN(8):L2_LEN(7+Z):OL3_LEN(8):OL2_LEN(7+Z)
739 * E(32):L3_LEN(8):L2_LEN(7+Z):OL3_LEN(8):OL2_LEN(7+Z)
741 const uint8x16_t shuf_mask5 = {
742 0x6, 0x5, 0x0, 0x1, 0xFF, 0xFF, 0xFF, 0xFF,
743 0xE, 0xD, 0x8, 0x9, 0xFF, 0xFF, 0xFF, 0xFF,
745 senddesc01_w1 = vqtbl1q_u8(senddesc01_w1, shuf_mask5);
746 senddesc23_w1 = vqtbl1q_u8(senddesc23_w1, shuf_mask5);
748 /* Extract outer and inner header ol_flags */
749 const uint64x2_t oi_cksum_mask = {
754 xtmp128 = vandq_u64(xtmp128, oi_cksum_mask);
755 ytmp128 = vandq_u64(ytmp128, oi_cksum_mask);
757 /* Extract OUTER_UDP_CKSUM bit 41 and
761 xtmp128 = xtmp128 | vshlq_n_u64(xtmp128, 20);
762 ytmp128 = ytmp128 | vshlq_n_u64(ytmp128, 20);
764 /* Shift right oltype by 2 and iltype by 4
765 * to start oltype nibble from BIT(58)
766 * instead of BIT(56) and iltype nibble from BIT(48)
767 * instead of BIT(52).
769 const int8x16_t tshft5 = {
770 8, 8, 8, 8, 8, 8, -4, -2,
771 8, 8, 8, 8, 8, 8, -4, -2,
774 xtmp128 = vshlq_u8(xtmp128, tshft5);
775 ytmp128 = vshlq_u8(ytmp128, tshft5);
777 * E(32):L3_LEN(8):L2_LEN(8):OL3_LEN(8):OL2_LEN(8)
778 * E(32):L3_LEN(8):L2_LEN(8):OL3_LEN(8):OL2_LEN(8)
780 const int8x16_t tshft3 = {
781 -1, 0, -1, 0, 0, 0, 0, 0,
782 -1, 0, -1, 0, 0, 0, 0, 0,
785 senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3);
786 senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3);
788 /* Mark Bit(4) of oltype */
789 const uint64x2_t oi_cksum_mask2 = {
794 xtmp128 = vorrq_u64(xtmp128, oi_cksum_mask2);
795 ytmp128 = vorrq_u64(ytmp128, oi_cksum_mask2);
798 ltypes01 = vqtbl2q_u8(tbl, xtmp128);
799 ltypes23 = vqtbl2q_u8(tbl, ytmp128);
801 /* Just use ld1q to retrieve aura
802 * when we don't need tx_offload
804 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
805 offsetof(struct rte_mempool, pool_id));
806 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
807 offsetof(struct rte_mempool, pool_id));
808 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
809 offsetof(struct rte_mempool, pool_id));
810 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
811 offsetof(struct rte_mempool, pool_id));
813 /* Pick only relevant fields i.e Bit 48:55 of iltype and
814 * Bit 56:63 of oltype and place it in corresponding
815 * place in senddesc_w1.
817 const uint8x16_t shuf_mask0 = {
818 0xFF, 0xFF, 0xFF, 0xFF, 0x7, 0x6, 0xFF, 0xFF,
819 0xFF, 0xFF, 0xFF, 0xFF, 0xF, 0xE, 0xFF, 0xFF,
822 ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0);
823 ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0);
825 /* Prepare l4ptr, l3ptr, ol4ptr, ol3ptr from
826 * l3len, l2len, ol3len, ol2len.
827 * a [E(32):L3(8):L2(8):OL3(8):OL2(8)]
829 * a [E:(L3+L2):(L2+OL3):(OL3+OL2):OL2]
831 * a [E:(L3+L2+OL3+OL2):(L2+OL3+OL2):(OL3+OL2):OL2]
832 * => E(32):IL4PTR(8):IL3PTR(8):OL4PTR(8):OL3PTR(8)
834 senddesc01_w1 = vaddq_u8(senddesc01_w1,
835 vshlq_n_u32(senddesc01_w1, 8));
836 senddesc23_w1 = vaddq_u8(senddesc23_w1,
837 vshlq_n_u32(senddesc23_w1, 8));
839 /* Create second half of 4W cmd for 4 mbufs (sgdesc) */
840 cmd01 = vzip1q_u64(sgdesc01_w0, sgdesc01_w1);
841 cmd11 = vzip2q_u64(sgdesc01_w0, sgdesc01_w1);
842 cmd21 = vzip1q_u64(sgdesc23_w0, sgdesc23_w1);
843 cmd31 = vzip2q_u64(sgdesc23_w0, sgdesc23_w1);
845 /* Continue preparing l4ptr, l3ptr, ol4ptr, ol3ptr */
846 senddesc01_w1 = vaddq_u8(senddesc01_w1,
847 vshlq_n_u32(senddesc01_w1, 16));
848 senddesc23_w1 = vaddq_u8(senddesc23_w1,
849 vshlq_n_u32(senddesc23_w1, 16));
851 xmask01 = vdupq_n_u64(0);
853 asm volatile ("LD1 {%[a].H}[0],[%[in]]\n\t" :
854 [a]"+w"(xmask01) : [in]"r"(mbuf0) : "memory");
856 asm volatile ("LD1 {%[a].H}[4],[%[in]]\n\t" :
857 [a]"+w"(xmask01) : [in]"r"(mbuf1) : "memory");
859 asm volatile ("LD1 {%[b].H}[0],[%[in]]\n\t" :
860 [b]"+w"(xmask23) : [in]"r"(mbuf2) : "memory");
862 asm volatile ("LD1 {%[b].H}[4],[%[in]]\n\t" :
863 [b]"+w"(xmask23) : [in]"r"(mbuf3) : "memory");
864 xmask01 = vshlq_n_u64(xmask01, 20);
865 xmask23 = vshlq_n_u64(xmask23, 20);
867 senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
868 senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
869 /* Move ltypes to senddesc*_w1 */
870 senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01);
871 senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23);
873 /* Create first half of 4W cmd for 4 mbufs (sendhdr) */
874 cmd00 = vzip1q_u64(senddesc01_w0, senddesc01_w1);
875 cmd10 = vzip2q_u64(senddesc01_w0, senddesc01_w1);
876 cmd20 = vzip1q_u64(senddesc23_w0, senddesc23_w1);
877 cmd30 = vzip2q_u64(senddesc23_w0, senddesc23_w1);
879 /* Just use ld1q to retrieve aura
880 * when we don't need tx_offload
882 mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
883 offsetof(struct rte_mempool, pool_id));
884 mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
885 offsetof(struct rte_mempool, pool_id));
886 mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
887 offsetof(struct rte_mempool, pool_id));
888 mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
889 offsetof(struct rte_mempool, pool_id));
890 xmask01 = vdupq_n_u64(0);
892 asm volatile ("LD1 {%[a].H}[0],[%[in]]\n\t" :
893 [a]"+w"(xmask01) : [in]"r"(mbuf0) : "memory");
895 asm volatile ("LD1 {%[a].H}[4],[%[in]]\n\t" :
896 [a]"+w"(xmask01) : [in]"r"(mbuf1) : "memory");
898 asm volatile ("LD1 {%[b].H}[0],[%[in]]\n\t" :
899 [b]"+w"(xmask23) : [in]"r"(mbuf2) : "memory");
901 asm volatile ("LD1 {%[b].H}[4],[%[in]]\n\t" :
902 [b]"+w"(xmask23) : [in]"r"(mbuf3) : "memory");
903 xmask01 = vshlq_n_u64(xmask01, 20);
904 xmask23 = vshlq_n_u64(xmask23, 20);
906 senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
907 senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
909 /* Create 4W cmd for 4 mbufs (sendhdr, sgdesc) */
910 cmd00 = vzip1q_u64(senddesc01_w0, senddesc01_w1);
911 cmd01 = vzip1q_u64(sgdesc01_w0, sgdesc01_w1);
912 cmd10 = vzip2q_u64(senddesc01_w0, senddesc01_w1);
913 cmd11 = vzip2q_u64(sgdesc01_w0, sgdesc01_w1);
914 cmd20 = vzip1q_u64(senddesc23_w0, senddesc23_w1);
915 cmd21 = vzip1q_u64(sgdesc23_w0, sgdesc23_w1);
916 cmd30 = vzip2q_u64(senddesc23_w0, senddesc23_w1);
917 cmd31 = vzip2q_u64(sgdesc23_w0, sgdesc23_w1);
921 vst1q_u64(lmt_addr, cmd00);
922 vst1q_u64(lmt_addr + 2, cmd01);
923 vst1q_u64(lmt_addr + 4, cmd10);
924 vst1q_u64(lmt_addr + 6, cmd11);
925 vst1q_u64(lmt_addr + 8, cmd20);
926 vst1q_u64(lmt_addr + 10, cmd21);
927 vst1q_u64(lmt_addr + 12, cmd30);
928 vst1q_u64(lmt_addr + 14, cmd31);
929 lmt_status = otx2_lmt_submit(io_addr);
931 } while (lmt_status == 0);
934 if (unlikely(pkts_left))
935 pkts += nix_xmit_pkts(tx_queue, tx_pkts, pkts_left, cmd, flags);
941 static __rte_always_inline uint16_t
942 nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
943 uint16_t pkts, uint64_t *cmd, const uint16_t flags)
945 RTE_SET_USED(tx_queue);
946 RTE_SET_USED(tx_pkts);
954 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
955 static uint16_t __rte_noinline __hot \
956 otx2_nix_xmit_pkts_ ## name(void *tx_queue, \
957 struct rte_mbuf **tx_pkts, uint16_t pkts) \
961 /* For TSO inner checksum is a must */ \
962 if (((flags) & NIX_TX_OFFLOAD_TSO_F) && \
963 !((flags) & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) \
965 return nix_xmit_pkts(tx_queue, tx_pkts, pkts, cmd, flags); \
968 NIX_TX_FASTPATH_MODES
971 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
972 static uint16_t __rte_noinline __hot \
973 otx2_nix_xmit_pkts_mseg_ ## name(void *tx_queue, \
974 struct rte_mbuf **tx_pkts, uint16_t pkts) \
976 uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2]; \
978 /* For TSO inner checksum is a must */ \
979 if (((flags) & NIX_TX_OFFLOAD_TSO_F) && \
980 !((flags) & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) \
982 return nix_xmit_pkts_mseg(tx_queue, tx_pkts, pkts, cmd, \
983 (flags) | NIX_TX_MULTI_SEG_F); \
986 NIX_TX_FASTPATH_MODES
989 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
990 static uint16_t __rte_noinline __hot \
991 otx2_nix_xmit_pkts_vec_ ## name(void *tx_queue, \
992 struct rte_mbuf **tx_pkts, uint16_t pkts) \
996 /* VLAN, TSTMP, TSO is not supported by vec */ \
997 if ((flags) & NIX_TX_OFFLOAD_VLAN_QINQ_F || \
998 (flags) & NIX_TX_OFFLOAD_TSTAMP_F || \
999 (flags) & NIX_TX_OFFLOAD_TSO_F) \
1001 return nix_xmit_pkts_vector(tx_queue, tx_pkts, pkts, cmd, (flags)); \
1004 NIX_TX_FASTPATH_MODES
1008 pick_tx_func(struct rte_eth_dev *eth_dev,
1009 const eth_tx_burst_t tx_burst[2][2][2][2][2][2])
1011 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1013 /* [TSTMP] [NOFF] [VLAN] [OL3_OL4_CSUM] [IL3_IL4_CSUM] */
1014 eth_dev->tx_pkt_burst = tx_burst
1015 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F)]
1016 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F)]
1017 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
1018 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
1019 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
1020 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
1024 otx2_eth_set_tx_function(struct rte_eth_dev *eth_dev)
1026 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1028 const eth_tx_burst_t nix_eth_tx_burst[2][2][2][2][2][2] = {
1029 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
1030 [f5][f4][f3][f2][f1][f0] = otx2_nix_xmit_pkts_ ## name,
1032 NIX_TX_FASTPATH_MODES
1036 const eth_tx_burst_t nix_eth_tx_burst_mseg[2][2][2][2][2][2] = {
1037 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
1038 [f5][f4][f3][f2][f1][f0] = otx2_nix_xmit_pkts_mseg_ ## name,
1040 NIX_TX_FASTPATH_MODES
1044 const eth_tx_burst_t nix_eth_tx_vec_burst[2][2][2][2][2][2] = {
1045 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
1046 [f5][f4][f3][f2][f1][f0] = otx2_nix_xmit_pkts_vec_ ## name,
1048 NIX_TX_FASTPATH_MODES
1052 if (dev->scalar_ena ||
1053 (dev->tx_offload_flags &
1054 (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSTAMP_F |
1055 NIX_TX_OFFLOAD_TSO_F)))
1056 pick_tx_func(eth_dev, nix_eth_tx_burst);
1058 pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
1060 if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
1061 pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);