1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
10 #include <cnxk_ethdev.h>
12 #define NIX_RX_OFFLOAD_NONE (0)
13 #define NIX_RX_OFFLOAD_RSS_F BIT(0)
14 #define NIX_RX_OFFLOAD_PTYPE_F BIT(1)
15 #define NIX_RX_OFFLOAD_CHECKSUM_F BIT(2)
16 #define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)
17 #define NIX_RX_OFFLOAD_TSTAMP_F BIT(4)
18 #define NIX_RX_OFFLOAD_VLAN_STRIP_F BIT(5)
19 #define NIX_RX_OFFLOAD_SECURITY_F BIT(6)
20 #define NIX_RX_OFFLOAD_MAX (NIX_RX_OFFLOAD_SECURITY_F << 1)
22 /* Flags to control cqe_to_mbuf conversion function.
23 * Defining it from backwards to denote its been
24 * not used as offload flags to pick function
26 #define NIX_RX_REAS_F BIT(12)
27 #define NIX_RX_VWQE_F BIT(13)
28 #define NIX_RX_MULTI_SEG_F BIT(14)
29 #define CPT_RX_WQE_F BIT(15)
31 #define CNXK_NIX_CQ_ENTRY_SZ 128
32 #define NIX_DESCS_PER_LOOP 4
33 #define CQE_CAST(x) ((struct nix_cqe_hdr_s *)(x))
34 #define CQE_SZ(x) ((x) * CNXK_NIX_CQ_ENTRY_SZ)
36 #define CQE_PTR_OFF(b, i, o, f) \
37 (((f) & NIX_RX_VWQE_F) ? \
38 (uint64_t *)(((uintptr_t)((uint64_t *)(b))[i]) + (o)) : \
39 (uint64_t *)(((uintptr_t)(b)) + CQE_SZ(i) + (o)))
40 #define CQE_PTR_DIFF(b, i, o, f) \
41 (((f) & NIX_RX_VWQE_F) ? \
42 (uint64_t *)(((uintptr_t)((uint64_t *)(b))[i]) - (o)) : \
43 (uint64_t *)(((uintptr_t)(b)) + CQE_SZ(i) - (o)))
45 #define NIX_RX_SEC_UCC_CONST \
46 ((RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1) << 8 | \
47 ((RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1) \
49 ((RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1) \
51 ((RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1) \
53 ((RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1) \
55 (RTE_MBUF_F_RX_IP_CKSUM_GOOD >> 1) << 56)
57 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
59 nix_mbuf_validate_next(struct rte_mbuf *m)
61 if (m->nb_segs == 1 && m->next) {
62 rte_panic("mbuf->next[%p] valid when mbuf->nb_segs is %d",
68 nix_mbuf_validate_next(struct rte_mbuf *m)
74 #define NIX_RX_SEC_REASSEMBLY_F \
75 (NIX_RX_REAS_F | NIX_RX_OFFLOAD_SECURITY_F)
77 static inline rte_eth_ip_reassembly_dynfield_t *
78 cnxk_ip_reassembly_dynfield(struct rte_mbuf *mbuf,
79 int ip_reassembly_dynfield_offset)
81 return RTE_MBUF_DYNFIELD(mbuf, ip_reassembly_dynfield_offset,
82 rte_eth_ip_reassembly_dynfield_t *);
85 union mbuf_initializer {
95 static __rte_always_inline uint64_t
96 nix_clear_data_off(uint64_t oldval)
98 union mbuf_initializer mbuf_init = {.value = oldval};
100 mbuf_init.fields.data_off = 0;
101 return mbuf_init.value;
104 static __rte_always_inline struct rte_mbuf *
105 nix_get_mbuf_from_cqe(void *cq, const uint64_t data_off)
109 /* Skip CQE, NIX_RX_PARSE_S and SG HDR(9 DWORDs) and peek buff addr */
110 buff = *((rte_iova_t *)((uint64_t *)cq + 9));
111 return (struct rte_mbuf *)(buff - data_off);
114 static __rte_always_inline void
115 nix_sec_flush_meta_burst(uint16_t lmt_id, uint64_t data, uint16_t lnum,
116 uintptr_t aura_handle)
120 /* Prepare PA and Data */
121 pa = roc_npa_aura_handle_to_base(aura_handle) + NPA_LF_AURA_BATCH_FREE0;
122 pa |= ((data & 0x7) << 4);
126 data |= (uint64_t)lmt_id;
127 data |= (uint64_t)(lnum - 1) << 12;
129 roc_lmt_submit_steorl(data, pa);
132 static __rte_always_inline void
133 nix_sec_flush_meta(uintptr_t laddr, uint16_t lmt_id, uint8_t loff,
134 uintptr_t aura_handle)
138 /* laddr is pointing to first pointer */
141 /* Trigger free either on lmtline full or different aura handle */
142 pa = roc_npa_aura_handle_to_base(aura_handle) + NPA_LF_AURA_BATCH_FREE0;
144 /* Update aura handle */
145 *(uint64_t *)laddr = (((uint64_t)(loff & 0x1) << 32) |
146 roc_npa_aura_handle_to_aura(aura_handle));
148 pa |= ((uint64_t)(loff >> 1) << 4);
149 roc_lmt_submit_steorl(lmt_id, pa);
152 static struct rte_mbuf *
153 nix_sec_attach_frags(const struct cpt_parse_hdr_s *hdr,
154 struct cn10k_inb_priv_data *inb_priv,
155 const uint64_t mbuf_init)
157 struct rte_mbuf *head, *mbuf, *mbuf_prev;
158 uint32_t offset = hdr->w2.fi_offset;
159 union nix_rx_parse_u *frag_rx;
160 struct cpt_frag_info_s *finfo;
161 uint64_t *frag_ptr = NULL;
168 off = inb_priv->reass_dynfield_off;
169 ol_flags = BIT_ULL(inb_priv->reass_dynflag_bit);
170 ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
172 /* offset of 0 implies 256B, otherwise it implies offset*8B */
173 offset = (((offset - 1) & 0x1f) + 1) * 8;
174 finfo = RTE_PTR_ADD(hdr, offset + hdr->w2.fi_pad);
177 wqe = (uint64_t *)(rte_be_to_cpu_64(hdr->wqe_ptr));
178 rlen = ((*(wqe + 10)) >> 16) & 0xFFFF;
180 frag_rx = (union nix_rx_parse_u *)(wqe + 1);
181 frag_size = rlen + frag_rx->lcptr - frag_rx->laptr;
182 frag_rx->pkt_lenm1 = frag_size - 1;
184 mbuf = (struct rte_mbuf *)((uintptr_t)wqe - sizeof(struct rte_mbuf));
185 *(uint64_t *)(&mbuf->rearm_data) = mbuf_init;
186 mbuf->data_len = frag_size;
187 mbuf->pkt_len = frag_size;
188 mbuf->ol_flags = ol_flags;
192 /* Update dynamic field with userdata */
193 *rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
195 cnxk_ip_reassembly_dynfield(head, off)->nb_frags = hdr->w0.num_frags - 1;
196 cnxk_ip_reassembly_dynfield(head, off)->next_frag = NULL;
199 if (hdr->w0.num_frags > 1) {
200 wqe = (uint64_t *)(rte_be_to_cpu_64(hdr->frag1_wqe_ptr));
201 rlen = ((*(wqe + 10)) >> 16) & 0xFFFF;
203 frag_rx = (union nix_rx_parse_u *)(wqe + 1);
204 frag_size = rlen + frag_rx->lcptr - frag_rx->laptr;
205 frag_rx->pkt_lenm1 = frag_size - 1;
207 mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
208 sizeof(struct rte_mbuf));
209 *(uint64_t *)(&mbuf->rearm_data) = mbuf_init;
210 mbuf->data_len = frag_size;
211 mbuf->pkt_len = frag_size;
212 mbuf->ol_flags = ol_flags;
215 /* Update dynamic field with userdata */
216 *rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
218 cnxk_ip_reassembly_dynfield(mbuf, off)->nb_frags =
219 hdr->w0.num_frags - 2;
220 cnxk_ip_reassembly_dynfield(mbuf, off)->next_frag = NULL;
221 cnxk_ip_reassembly_dynfield(mbuf_prev, off)->next_frag = mbuf;
226 if (hdr->w0.num_frags > 2) {
227 frag_ptr = (uint64_t *)(finfo + 1);
228 wqe = (uint64_t *)(rte_be_to_cpu_64(*frag_ptr));
229 rlen = ((*(wqe + 10)) >> 16) & 0xFFFF;
231 frag_rx = (union nix_rx_parse_u *)(wqe + 1);
232 frag_size = rlen + frag_rx->lcptr - frag_rx->laptr;
233 frag_rx->pkt_lenm1 = frag_size - 1;
235 mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
236 sizeof(struct rte_mbuf));
237 *(uint64_t *)(&mbuf->rearm_data) = mbuf_init;
238 mbuf->data_len = frag_size;
239 mbuf->pkt_len = frag_size;
240 mbuf->ol_flags = ol_flags;
243 /* Update dynamic field with userdata */
244 *rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
246 cnxk_ip_reassembly_dynfield(mbuf, off)->nb_frags =
247 hdr->w0.num_frags - 3;
248 cnxk_ip_reassembly_dynfield(mbuf, off)->next_frag = NULL;
249 cnxk_ip_reassembly_dynfield(mbuf_prev, off)->next_frag = mbuf;
254 if (hdr->w0.num_frags > 3) {
255 wqe = (uint64_t *)(rte_be_to_cpu_64(*(frag_ptr + 1)));
256 rlen = ((*(wqe + 10)) >> 16) & 0xFFFF;
258 frag_rx = (union nix_rx_parse_u *)(wqe + 1);
259 frag_size = rlen + frag_rx->lcptr - frag_rx->laptr;
260 frag_rx->pkt_lenm1 = frag_size - 1;
262 mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
263 sizeof(struct rte_mbuf));
264 *(uint64_t *)(&mbuf->rearm_data) = mbuf_init;
265 mbuf->data_len = frag_size;
266 mbuf->pkt_len = frag_size;
267 mbuf->ol_flags = ol_flags;
270 /* Update dynamic field with userdata */
271 *rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
273 cnxk_ip_reassembly_dynfield(mbuf, off)->nb_frags =
274 hdr->w0.num_frags - 4;
275 cnxk_ip_reassembly_dynfield(mbuf, off)->next_frag = NULL;
276 cnxk_ip_reassembly_dynfield(mbuf_prev, off)->next_frag = mbuf;
281 static struct rte_mbuf *
282 nix_sec_reassemble_frags(const struct cpt_parse_hdr_s *hdr, uint64_t cq_w1,
283 uint64_t cq_w5, uint64_t mbuf_init)
285 uint32_t fragx_sum, pkt_hdr_len, l3_hdr_size;
286 uint32_t offset = hdr->w2.fi_offset;
287 union nix_rx_parse_u *inner_rx;
288 uint16_t rlen, data_off, b_off;
289 union nix_rx_parse_u *frag_rx;
290 struct cpt_frag_info_s *finfo;
291 struct rte_mbuf *head, *mbuf;
292 uint64_t *frag_ptr = NULL;
293 rte_iova_t *inner_iova;
297 /* Base data offset */
298 b_off = mbuf_init & 0xFFFFUL;
299 mbuf_init &= ~0xFFFFUL;
301 /* offset of 0 implies 256B, otherwise it implies offset*8B */
302 offset = (((offset - 1) & 0x1f) + 1) * 8;
303 finfo = RTE_PTR_ADD(hdr, offset + hdr->w2.fi_pad);
306 wqe = (uint64_t *)rte_be_to_cpu_64(hdr->wqe_ptr);
307 inner_rx = (union nix_rx_parse_u *)(wqe + 1);
308 inner_iova = (rte_iova_t *)*(wqe + 9);
310 /* Update only the upper 28-bits from meta pkt parse info */
311 *((uint64_t *)inner_rx) = ((*((uint64_t *)inner_rx) & ((1ULL << 36) - 1)) |
312 (cq_w1 & ~((1ULL << 36) - 1)));
314 rlen = ((*(wqe + 10)) >> 16) & 0xFFFF;
315 frag_size = rlen + ((cq_w5 >> 16) & 0xFF) - (cq_w5 & 0xFF);
316 fragx_sum = rte_be_to_cpu_16(finfo->w1.frag_size0);
317 pkt_hdr_len = frag_size - fragx_sum;
319 mbuf = (struct rte_mbuf *)((uintptr_t)wqe - sizeof(struct rte_mbuf));
320 *(uint64_t *)(&mbuf->rearm_data) = mbuf_init | b_off;
321 mbuf->data_len = frag_size;
324 if (inner_rx->lctype == NPC_LT_LC_IP) {
325 struct rte_ipv4_hdr *hdr = (struct rte_ipv4_hdr *)
326 RTE_PTR_ADD(inner_iova, inner_rx->lcptr);
328 l3_hdr_size = (hdr->version_ihl & 0xf) << 2;
330 struct rte_ipv6_hdr *hdr = (struct rte_ipv6_hdr *)
331 RTE_PTR_ADD(inner_iova, inner_rx->lcptr);
332 size_t ext_len = sizeof(struct rte_ipv6_hdr);
333 uint8_t *nxt_hdr = (uint8_t *)hdr;
337 while (nh != -EINVAL) {
339 l3_hdr_size += ext_len;
340 nh = rte_ipv6_get_next_ext(nxt_hdr, nh, &ext_len);
345 wqe = (uint64_t *)(rte_be_to_cpu_64(hdr->frag1_wqe_ptr));
346 frag_size = rte_be_to_cpu_16(finfo->w1.frag_size1);
347 frag_rx = (union nix_rx_parse_u *)(wqe + 1);
349 mbuf->next = (struct rte_mbuf *)((uintptr_t)wqe - sizeof(struct rte_mbuf));
351 data_off = b_off + frag_rx->lcptr + l3_hdr_size;
352 *(uint64_t *)(&mbuf->rearm_data) = mbuf_init | data_off;
353 mbuf->data_len = frag_size;
354 fragx_sum += frag_size;
356 /* Mark frag as get */
357 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
360 if (hdr->w0.num_frags > 2) {
361 frag_ptr = (uint64_t *)(finfo + 1);
362 wqe = (uint64_t *)(rte_be_to_cpu_64(*frag_ptr));
363 frag_size = rte_be_to_cpu_16(finfo->w1.frag_size2);
364 frag_rx = (union nix_rx_parse_u *)(wqe + 1);
366 mbuf->next = (struct rte_mbuf *)((uintptr_t)wqe - sizeof(struct rte_mbuf));
368 data_off = b_off + frag_rx->lcptr + l3_hdr_size;
369 *(uint64_t *)(&mbuf->rearm_data) = mbuf_init | data_off;
370 mbuf->data_len = frag_size;
371 fragx_sum += frag_size;
373 /* Mark frag as get */
374 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
378 if (hdr->w0.num_frags > 3) {
379 wqe = (uint64_t *)(rte_be_to_cpu_64(*(frag_ptr + 1)));
380 frag_size = rte_be_to_cpu_16(finfo->w1.frag_size3);
381 frag_rx = (union nix_rx_parse_u *)(wqe + 1);
383 mbuf->next = (struct rte_mbuf *)((uintptr_t)wqe - sizeof(struct rte_mbuf));
385 data_off = b_off + frag_rx->lcptr + l3_hdr_size;
386 *(uint64_t *)(&mbuf->rearm_data) = mbuf_init | data_off;
387 mbuf->data_len = frag_size;
388 fragx_sum += frag_size;
390 /* Mark frag as get */
391 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
394 if (inner_rx->lctype == NPC_LT_LC_IP) {
395 struct rte_ipv4_hdr *hdr = (struct rte_ipv4_hdr *)
396 RTE_PTR_ADD(inner_iova, inner_rx->lcptr);
398 hdr->fragment_offset = 0;
399 hdr->total_length = rte_cpu_to_be_16(fragx_sum + l3_hdr_size);
400 hdr->hdr_checksum = 0;
401 hdr->hdr_checksum = rte_ipv4_cksum(hdr);
403 inner_rx->pkt_lenm1 = pkt_hdr_len + fragx_sum - 1;
405 /* Remove the frag header by moving header 8 bytes forward */
406 struct rte_ipv6_hdr *hdr = (struct rte_ipv6_hdr *)
407 RTE_PTR_ADD(inner_iova, inner_rx->lcptr);
409 hdr->payload_len = rte_cpu_to_be_16(fragx_sum + l3_hdr_size -
410 8 - sizeof(struct rte_ipv6_hdr));
412 rte_memcpy(rte_pktmbuf_mtod_offset(head, void *, 8),
413 rte_pktmbuf_mtod(head, void *),
414 inner_rx->lcptr + sizeof(struct rte_ipv6_hdr));
416 inner_rx->pkt_lenm1 = pkt_hdr_len + fragx_sum - 8 - 1;
421 head->pkt_len = inner_rx->pkt_lenm1 + 1;
422 head->nb_segs = hdr->w0.num_frags;
427 static __rte_always_inline struct rte_mbuf *
428 nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,
429 uintptr_t laddr, uint8_t *loff, struct rte_mbuf *mbuf,
430 uint16_t data_off, const uint16_t flags,
431 const uint64_t mbuf_init)
433 const void *__p = (void *)((uintptr_t)mbuf + (uint16_t)data_off);
434 const struct cpt_parse_hdr_s *hdr = (const struct cpt_parse_hdr_s *)__p;
435 struct cn10k_inb_priv_data *inb_priv;
436 struct rte_mbuf *inner = NULL;
444 if ((flags & NIX_RX_REAS_F) && (cq_w1 & BIT(11))) {
445 /* Get SPI from CPT_PARSE_S's cookie(already swapped) */
449 inb_sa = roc_nix_inl_ot_ipsec_inb_sa(sa_base, sa_idx);
450 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
452 if (!hdr->w0.num_frags) {
453 /* No Reassembly or inbound error */
454 inner = (struct rte_mbuf *)
455 (rte_be_to_cpu_64(hdr->wqe_ptr) -
456 sizeof(struct rte_mbuf));
458 /* Update dynamic field with userdata */
459 *rte_security_dynfield(inner) =
460 (uint64_t)inb_priv->userdata;
462 /* Get ucc from cpt parse header */
463 ucc = hdr->w3.hw_ccode;
465 /* Calculate inner packet length as
466 * IP total len + l2 len
468 ip = (uintptr_t)hdr + ((cq_w5 >> 16) & 0xFF);
469 ip += ((cq_w1 >> 40) & 0x6);
470 len = rte_be_to_cpu_16(*(uint16_t *)ip);
471 len += ((cq_w5 >> 16) & 0xFF) - (cq_w5 & 0xFF);
472 len += (cq_w1 & BIT(42)) ? 40 : 0;
474 inner->pkt_len = len;
475 inner->data_len = len;
476 *(uint64_t *)(&inner->rearm_data) = mbuf_init;
478 inner->ol_flags = ((ucc == CPT_COMP_WARN) ?
479 RTE_MBUF_F_RX_SEC_OFFLOAD :
480 (RTE_MBUF_F_RX_SEC_OFFLOAD |
481 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED));
483 ucc = hdr->w3.uc_ccode;
484 inner->ol_flags |= ((ucc & 0xF0) == 0xF0) ?
485 ((NIX_RX_SEC_UCC_CONST >> ((ucc & 0xF) << 3))
487 } else if (!(hdr->w0.err_sum) && !(hdr->w0.reas_sts)) {
488 /* Reassembly success */
489 inner = nix_sec_reassemble_frags(hdr, cq_w1, cq_w5,
492 /* Update dynamic field with userdata */
493 *rte_security_dynfield(inner) =
494 (uint64_t)inb_priv->userdata;
497 inner->ol_flags = RTE_MBUF_F_RX_SEC_OFFLOAD;
499 /* Reassembly failure */
500 inner = nix_sec_attach_frags(hdr, inb_priv, mbuf_init);
503 /* Store meta in lmtline to free
504 * Assume all meta's from same aura.
506 *(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf;
509 /* Mark meta mbuf as put */
510 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0);
512 /* Mark inner mbuf as get */
513 RTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1);
516 } else if (cq_w1 & BIT(11)) {
517 inner = (struct rte_mbuf *)(rte_be_to_cpu_64(hdr->wqe_ptr) -
518 sizeof(struct rte_mbuf));
520 /* Get SPI from CPT_PARSE_S's cookie(already swapped) */
524 inb_sa = roc_nix_inl_ot_ipsec_inb_sa(sa_base, sa_idx);
525 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
527 /* Update dynamic field with userdata */
528 *rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
530 /* Get ucc from cpt parse header */
531 ucc = hdr->w3.hw_ccode;
533 /* Calculate inner packet length as IP total len + l2 len */
534 ip = (uintptr_t)hdr + ((cq_w5 >> 16) & 0xFF);
535 ip += ((cq_w1 >> 40) & 0x6);
536 len = rte_be_to_cpu_16(*(uint16_t *)ip);
537 len += ((cq_w5 >> 16) & 0xFF) - (cq_w5 & 0xFF);
538 len += (cq_w1 & BIT(42)) ? 40 : 0;
540 inner->pkt_len = len;
541 inner->data_len = len;
542 *(uint64_t *)(&inner->rearm_data) = mbuf_init;
544 inner->ol_flags = ((ucc == CPT_COMP_WARN) ?
545 RTE_MBUF_F_RX_SEC_OFFLOAD :
546 (RTE_MBUF_F_RX_SEC_OFFLOAD |
547 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED));
549 ucc = hdr->w3.uc_ccode;
550 inner->ol_flags |= ((ucc & 0xF0) == 0xF0) ?
551 ((NIX_RX_SEC_UCC_CONST >> ((ucc & 0xF) << 3))
554 /* Store meta in lmtline to free
555 * Assume all meta's from same aura.
557 *(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf;
560 /* Mark meta mbuf as put */
561 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0);
563 /* Mark inner mbuf as get */
564 RTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1);
572 #if defined(RTE_ARCH_ARM64)
574 static __rte_always_inline void
575 nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t inb_sa,
576 uintptr_t cpth, struct rte_mbuf *inner,
577 uint8x16_t *rx_desc_field1, uint64_t *ol_flags,
578 const uint16_t flags, uint64x2_t *rearm)
580 const struct cpt_parse_hdr_s *hdr =
581 (const struct cpt_parse_hdr_s *)cpth;
582 uint64_t mbuf_init = vgetq_lane_u64(*rearm, 0);
583 struct cn10k_inb_priv_data *inb_priv;
585 /* Clear checksum flags */
586 *ol_flags &= ~(RTE_MBUF_F_RX_L4_CKSUM_MASK |
587 RTE_MBUF_F_RX_IP_CKSUM_MASK);
589 /* Get SPI from CPT_PARSE_S's cookie(already swapped) */
590 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd((void *)inb_sa);
592 /* Update dynamic field with userdata */
593 *rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
595 /* Mark inner mbuf as get */
596 RTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1);
598 if (flags & NIX_RX_REAS_F && hdr->w0.num_frags) {
599 if (!(hdr->w0.err_sum) && !(hdr->w0.reas_sts)) {
600 /* Reassembly success */
601 nix_sec_reassemble_frags(hdr, cq_w1, cq_w5, mbuf_init);
604 *ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
606 /* Update pkt_len and data_len */
607 *rx_desc_field1 = vsetq_lane_u16(inner->pkt_len,
609 *rx_desc_field1 = vsetq_lane_u16(inner->data_len,
612 /* Data offset might be updated */
613 mbuf_init = *(uint64_t *)(&inner->rearm_data);
614 *rearm = vsetq_lane_u64(mbuf_init, *rearm, 0);
616 /* Reassembly failure */
617 nix_sec_attach_frags(hdr, inb_priv, mbuf_init);
618 *ol_flags |= inner->ol_flags;
620 /* Update pkt_len and data_len */
621 *rx_desc_field1 = vsetq_lane_u16(inner->pkt_len,
623 *rx_desc_field1 = vsetq_lane_u16(inner->data_len,
630 static __rte_always_inline uint32_t
631 nix_ptype_get(const void *const lookup_mem, const uint64_t in)
633 const uint16_t *const ptype = lookup_mem;
634 const uint16_t lh_lg_lf = (in & 0xFFF0000000000000) >> 52;
635 const uint16_t tu_l2 = ptype[(in & 0x000FFFF000000000) >> 36];
636 const uint16_t il4_tu = ptype[PTYPE_NON_TUNNEL_ARRAY_SZ + lh_lg_lf];
638 return (il4_tu << PTYPE_NON_TUNNEL_WIDTH) | tu_l2;
641 static __rte_always_inline uint32_t
642 nix_rx_olflags_get(const void *const lookup_mem, const uint64_t in)
644 const uint32_t *const ol_flags =
645 (const uint32_t *)((const uint8_t *)lookup_mem +
648 return ol_flags[(in & 0xfff00000) >> 20];
651 static inline uint64_t
652 nix_update_match_id(const uint16_t match_id, uint64_t ol_flags,
653 struct rte_mbuf *mbuf)
655 /* There is no separate bit to check match_id
656 * is valid or not? and no flag to identify it is an
657 * RTE_FLOW_ACTION_TYPE_FLAG vs RTE_FLOW_ACTION_TYPE_MARK
658 * action. The former case addressed through 0 being invalid
659 * value and inc/dec match_id pair when MARK is activated.
660 * The later case addressed through defining
661 * CNXK_FLOW_MARK_DEFAULT as value for
662 * RTE_FLOW_ACTION_TYPE_MARK.
663 * This would translate to not use
664 * CNXK_FLOW_ACTION_FLAG_DEFAULT - 1 and
665 * CNXK_FLOW_ACTION_FLAG_DEFAULT for match_id.
666 * i.e valid mark_id's are from
667 * 0 to CNXK_FLOW_ACTION_FLAG_DEFAULT - 2
669 if (likely(match_id)) {
670 ol_flags |= RTE_MBUF_F_RX_FDIR;
671 if (match_id != CNXK_FLOW_ACTION_FLAG_DEFAULT) {
672 ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
673 mbuf->hash.fdir.hi = match_id - 1;
680 static __rte_always_inline void
681 nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
682 uint64_t rearm, const uint16_t flags)
684 const rte_iova_t *iova_list;
685 struct rte_mbuf *head;
686 const rte_iova_t *eol;
690 sg = *(const uint64_t *)(rx + 1);
691 nb_segs = (sg >> 48) & 0x3;
693 if (nb_segs == 1 && !(flags & NIX_RX_SEC_REASSEMBLY_F)) {
698 mbuf->pkt_len = (rx->pkt_lenm1 + 1) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
699 CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
700 mbuf->data_len = (sg & 0xFFFF) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
701 CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
702 mbuf->nb_segs = nb_segs;
705 eol = ((const rte_iova_t *)(rx + 1) + ((rx->desc_sizem1 + 1) << 1));
706 /* Skip SG_S and first IOVA*/
707 iova_list = ((const rte_iova_t *)(rx + 1)) + 2;
710 rearm = rearm & ~0xFFFF;
714 mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
717 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
719 mbuf->data_len = sg & 0xFFFF;
721 *(uint64_t *)(&mbuf->rearm_data) = rearm;
725 if (!nb_segs && (iova_list + 1 < eol)) {
726 sg = *(const uint64_t *)(iova_list);
727 nb_segs = (sg >> 48) & 0x3;
728 head->nb_segs += nb_segs;
729 iova_list = (const rte_iova_t *)(iova_list + 1);
735 static __rte_always_inline void
736 cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
737 struct rte_mbuf *mbuf, const void *lookup_mem,
738 const uint64_t val, const uint16_t flag)
740 const union nix_rx_parse_u *rx =
741 (const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
742 const uint64_t w1 = *(const uint64_t *)rx;
743 uint16_t len = rx->pkt_lenm1 + 1;
744 uint64_t ol_flags = 0;
746 if (flag & NIX_RX_OFFLOAD_PTYPE_F)
747 mbuf->packet_type = nix_ptype_get(lookup_mem, w1);
749 mbuf->packet_type = 0;
751 if (flag & NIX_RX_OFFLOAD_RSS_F) {
752 mbuf->hash.rss = tag;
753 ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
756 /* Skip rx ol flags extraction for Security packets */
757 if ((!(flag & NIX_RX_SEC_REASSEMBLY_F) || !(w1 & BIT(11))) &&
758 flag & NIX_RX_OFFLOAD_CHECKSUM_F)
759 ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
761 if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
762 if (rx->vtag0_gone) {
763 ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
764 mbuf->vlan_tci = rx->vtag0_tci;
766 if (rx->vtag1_gone) {
767 ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
768 mbuf->vlan_tci_outer = rx->vtag1_tci;
772 if (flag & NIX_RX_OFFLOAD_MARK_UPDATE_F)
773 ol_flags = nix_update_match_id(rx->match_id, ol_flags, mbuf);
775 /* Packet data length and ol flags is already updated for sec */
776 if (flag & NIX_RX_SEC_REASSEMBLY_F && w1 & BIT_ULL(11)) {
777 mbuf->ol_flags |= ol_flags;
779 mbuf->ol_flags = ol_flags;
781 mbuf->data_len = len;
782 *(uint64_t *)(&mbuf->rearm_data) = val;
785 if (flag & NIX_RX_MULTI_SEG_F)
787 * For multi segment packets, mbuf length correction according
788 * to Rx timestamp length will be handled later during
789 * timestamp data process.
790 * Hence, flag argument is not required.
792 nix_cqe_xtract_mseg(rx, mbuf, val, 0);
795 static inline uint16_t
796 nix_rx_nb_pkts(struct cn10k_eth_rxq *rxq, const uint64_t wdata,
797 const uint16_t pkts, const uint32_t qmask)
799 uint32_t available = rxq->available;
801 /* Update the available count if cached value is not enough */
802 if (unlikely(available < pkts)) {
803 uint64_t reg, head, tail;
805 /* Use LDADDA version to avoid reorder */
806 reg = roc_atomic64_add_sync(wdata, rxq->cq_status);
807 /* CQ_OP_STATUS operation error */
808 if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
809 reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
812 tail = reg & 0xFFFFF;
813 head = (reg >> 20) & 0xFFFFF;
815 available = tail - head + qmask + 1;
817 available = tail - head;
819 rxq->available = available;
822 return RTE_MIN(pkts, available);
825 static __rte_always_inline void
826 cn10k_nix_mbuf_to_tstamp(struct rte_mbuf *mbuf,
827 struct cnxk_timesync_info *tstamp,
828 const uint8_t ts_enable, uint64_t *tstamp_ptr)
831 mbuf->pkt_len -= CNXK_NIX_TIMESYNC_RX_OFFSET;
832 mbuf->data_len -= CNXK_NIX_TIMESYNC_RX_OFFSET;
834 /* Reading the rx timestamp inserted by CGX, viz at
835 * starting of the packet data.
837 *tstamp_ptr = ((*tstamp_ptr >> 32) * NSEC_PER_SEC) +
838 (*tstamp_ptr & 0xFFFFFFFFUL);
839 *cnxk_nix_timestamp_dynfield(mbuf, tstamp) =
840 rte_be_to_cpu_64(*tstamp_ptr);
841 /* RTE_MBUF_F_RX_IEEE1588_TMST flag needs to be set only in case
842 * PTP packets are received.
844 if (mbuf->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC) {
846 *cnxk_nix_timestamp_dynfield(mbuf, tstamp);
847 tstamp->rx_ready = 1;
848 mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP |
849 RTE_MBUF_F_RX_IEEE1588_TMST |
850 tstamp->rx_tstamp_dynflag;
855 static __rte_always_inline uint16_t
856 cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
857 const uint16_t flags)
859 struct cn10k_eth_rxq *rxq = rx_queue;
860 const uint64_t mbuf_init = rxq->mbuf_initializer;
861 const void *lookup_mem = rxq->lookup_mem;
862 const uint64_t data_off = rxq->data_off;
863 const uintptr_t desc = rxq->desc;
864 const uint64_t wdata = rxq->wdata;
865 const uint32_t qmask = rxq->qmask;
866 uint64_t lbase = rxq->lmt_base;
867 uint16_t packets = 0, nb_pkts;
868 uint8_t loff = 0, lnum = 0;
869 uint32_t head = rxq->head;
870 struct nix_cqe_hdr_s *cq;
871 struct rte_mbuf *mbuf;
872 uint64_t aura_handle;
877 nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
879 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
880 aura_handle = rxq->aura_handle;
881 sa_base = rxq->sa_base;
882 sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
883 ROC_LMT_BASE_ID_GET(lbase, lmt_id);
888 while (packets < nb_pkts) {
889 /* Prefetch N desc ahead */
890 rte_prefetch_non_temporal(
891 (void *)(desc + (CQE_SZ((head + 2) & qmask))));
892 cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head));
894 mbuf = nix_get_mbuf_from_cqe(cq, data_off);
896 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
897 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
899 /* Translate meta to mbuf */
900 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
901 const uint64_t cq_w1 = *((const uint64_t *)cq + 1);
902 const uint64_t cq_w5 = *((const uint64_t *)cq + 5);
904 mbuf = nix_sec_meta_to_mbuf_sc(cq_w1, cq_w5, sa_base, laddr,
905 &loff, mbuf, data_off,
909 cn10k_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
911 cn10k_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
912 (flags & NIX_RX_OFFLOAD_TSTAMP_F),
913 (uint64_t *)((uint8_t *)mbuf
915 rx_pkts[packets++] = mbuf;
916 roc_prefetch_store_keep(mbuf);
920 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
921 /* Flush when we don't have space for 4 meta */
922 if ((15 - loff) < 1) {
923 nix_sec_flush_meta(laddr, lmt_id + lnum, loff,
926 lnum &= BIT_ULL(ROC_LMT_LINES_PER_CORE_LOG2) -
928 /* First pointer starts at 8B offset */
929 laddr = (uintptr_t)LMT_OFF(lbase, lnum, 8);
936 rxq->available -= nb_pkts;
938 /* Free all the CQs that we've processed */
939 plt_write64((wdata | nb_pkts), rxq->cq_door);
941 /* Free remaining meta buffers if any */
942 if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff)
943 nix_sec_flush_meta(laddr, lmt_id + lnum, loff, aura_handle);
945 if (flags & NIX_RX_OFFLOAD_SECURITY_F)
951 #if defined(RTE_ARCH_ARM64)
953 static __rte_always_inline uint64_t
954 nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
956 if (w2 & BIT_ULL(21) /* vtag0_gone */) {
957 ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
958 *f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
964 static __rte_always_inline uint64_t
965 nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
967 if (w2 & BIT_ULL(23) /* vtag1_gone */) {
968 ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
969 mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
975 #define NIX_PUSH_META_TO_FREE(_mbuf, _laddr, _loff_p) \
977 *(uint64_t *)((_laddr) + (*(_loff_p) << 3)) = (uint64_t)_mbuf; \
978 *(_loff_p) = *(_loff_p) + 1; \
979 /* Mark meta mbuf as put */ \
980 RTE_MEMPOOL_CHECK_COOKIES(_mbuf->pool, (void **)&_mbuf, 1, 0); \
983 static __rte_always_inline uint16_t
984 cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
985 const uint16_t flags, void *lookup_mem,
986 struct cnxk_timesync_info *tstamp,
989 struct cn10k_eth_rxq *rxq = args;
990 const uint64_t mbuf_initializer = (flags & NIX_RX_VWQE_F) ?
992 rxq->mbuf_initializer;
993 const uint64x2_t data_off = flags & NIX_RX_VWQE_F ?
994 vdupq_n_u64(RTE_PKTMBUF_HEADROOM) :
995 vdupq_n_u64(rxq->data_off);
996 const uint32_t qmask = flags & NIX_RX_VWQE_F ? 0 : rxq->qmask;
997 const uint64_t wdata = flags & NIX_RX_VWQE_F ? 0 : rxq->wdata;
998 const uintptr_t desc = flags & NIX_RX_VWQE_F ? 0 : rxq->desc;
999 uint64x2_t cq0_w8, cq1_w8, cq2_w8, cq3_w8, mbuf01, mbuf23;
1000 uint64_t ol_flags0, ol_flags1, ol_flags2, ol_flags3;
1001 uint64x2_t rearm0 = vdupq_n_u64(mbuf_initializer);
1002 uint64x2_t rearm1 = vdupq_n_u64(mbuf_initializer);
1003 uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer);
1004 uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
1005 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
1006 uint64_t aura_handle, lbase, laddr;
1007 uint8_t loff = 0, lnum = 0, shft = 0;
1008 uint8x16_t f0, f1, f2, f3;
1009 uint16_t lmt_id, d_off;
1010 uint16_t packets = 0;
1016 if (!(flags & NIX_RX_VWQE_F)) {
1017 lookup_mem = rxq->lookup_mem;
1020 pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
1021 pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1);
1022 /* Packets has to be floor-aligned to NIX_DESCS_PER_LOOP */
1023 pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
1024 if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
1025 tstamp = rxq->tstamp;
1027 cq0 = desc + CQE_SZ(head);
1028 rte_prefetch0(CQE_PTR_OFF(cq0, 0, 64, flags));
1029 rte_prefetch0(CQE_PTR_OFF(cq0, 1, 64, flags));
1030 rte_prefetch0(CQE_PTR_OFF(cq0, 2, 64, flags));
1031 rte_prefetch0(CQE_PTR_OFF(cq0, 3, 64, flags));
1036 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
1037 if (flags & NIX_RX_VWQE_F) {
1040 mbuf0 = (struct rte_mbuf *)((uintptr_t)mbufs[0] -
1041 sizeof(struct rte_mbuf));
1042 /* Pick first mbuf's aura handle assuming all
1043 * mbufs are from a vec and are from same RQ.
1045 aura_handle = mbuf0->pool->pool_id;
1046 /* Calculate offset from mbuf to actual data area */
1047 d_off = ((uintptr_t)mbuf0->buf_addr - (uintptr_t)mbuf0);
1048 d_off += (mbuf_initializer & 0xFFFF);
1050 /* Get SA Base from lookup tbl using port_id */
1051 port = mbuf_initializer >> 48;
1052 sa_base = cnxk_nix_sa_base_get(port, lookup_mem);
1056 aura_handle = rxq->aura_handle;
1057 d_off = rxq->data_off;
1058 sa_base = rxq->sa_base;
1059 lbase = rxq->lmt_base;
1061 sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
1062 ROC_LMT_BASE_ID_GET(lbase, lmt_id);
1068 while (packets < pkts) {
1069 if (!(flags & NIX_RX_VWQE_F)) {
1070 /* Exit loop if head is about to wrap and become
1073 if (((head + NIX_DESCS_PER_LOOP - 1) & qmask) <
1074 NIX_DESCS_PER_LOOP) {
1075 pkts_left += (pkts - packets);
1079 cq0 = desc + CQE_SZ(head);
1081 cq0 = (uintptr_t)&mbufs[packets];
1084 if (flags & NIX_RX_VWQE_F) {
1085 if (pkts - packets > 4) {
1086 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0,
1088 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0,
1090 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0,
1092 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0,
1095 if (likely(pkts - packets > 8)) {
1096 rte_prefetch1(CQE_PTR_OFF(cq0,
1098 rte_prefetch1(CQE_PTR_OFF(cq0,
1100 rte_prefetch1(CQE_PTR_OFF(cq0,
1102 rte_prefetch1(CQE_PTR_OFF(cq0,
1104 if (pkts - packets > 12) {
1105 rte_prefetch1(CQE_PTR_OFF(cq0,
1107 rte_prefetch1(CQE_PTR_OFF(cq0,
1109 rte_prefetch1(CQE_PTR_OFF(cq0,
1111 rte_prefetch1(CQE_PTR_OFF(cq0,
1116 rte_prefetch0(CQE_PTR_DIFF(cq0,
1117 4, RTE_PKTMBUF_HEADROOM, flags));
1118 rte_prefetch0(CQE_PTR_DIFF(cq0,
1119 5, RTE_PKTMBUF_HEADROOM, flags));
1120 rte_prefetch0(CQE_PTR_DIFF(cq0,
1121 6, RTE_PKTMBUF_HEADROOM, flags));
1122 rte_prefetch0(CQE_PTR_DIFF(cq0,
1123 7, RTE_PKTMBUF_HEADROOM, flags));
1125 if (likely(pkts - packets > 8)) {
1126 rte_prefetch0(CQE_PTR_DIFF(cq0,
1127 8, RTE_PKTMBUF_HEADROOM, flags));
1128 rte_prefetch0(CQE_PTR_DIFF(cq0,
1129 9, RTE_PKTMBUF_HEADROOM, flags));
1130 rte_prefetch0(CQE_PTR_DIFF(cq0,
1131 10, RTE_PKTMBUF_HEADROOM, flags));
1132 rte_prefetch0(CQE_PTR_DIFF(cq0,
1133 11, RTE_PKTMBUF_HEADROOM, flags));
1137 if (flags & NIX_RX_OFFLOAD_SECURITY_F &&
1138 pkts - packets > 4) {
1139 /* Fetch cpt parse header */
1141 (void *)*CQE_PTR_OFF(cq0, 4, 72, flags);
1143 (void *)*CQE_PTR_OFF(cq0, 5, 72, flags);
1145 (void *)*CQE_PTR_OFF(cq0, 6, 72, flags);
1147 (void *)*CQE_PTR_OFF(cq0, 7, 72, flags);
1154 if (pkts - packets > 8) {
1156 rte_prefetch0(CQE_PTR_OFF(cq0, 8, 0, flags));
1157 rte_prefetch0(CQE_PTR_OFF(cq0, 9, 0, flags));
1158 rte_prefetch0(CQE_PTR_OFF(cq0, 10, 0, flags));
1159 rte_prefetch0(CQE_PTR_OFF(cq0, 11, 0, flags));
1161 rte_prefetch0(CQE_PTR_OFF(cq0, 8, 64, flags));
1162 rte_prefetch0(CQE_PTR_OFF(cq0, 9, 64, flags));
1163 rte_prefetch0(CQE_PTR_OFF(cq0, 10, 64, flags));
1164 rte_prefetch0(CQE_PTR_OFF(cq0, 11, 64, flags));
1168 if (!(flags & NIX_RX_VWQE_F)) {
1169 /* Get NIX_RX_SG_S for size and buffer pointer */
1170 cq0_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 0, 64, flags));
1171 cq1_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 1, 64, flags));
1172 cq2_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 2, 64, flags));
1173 cq3_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 3, 64, flags));
1175 /* Extract mbuf from NIX_RX_SG_S */
1176 mbuf01 = vzip2q_u64(cq0_w8, cq1_w8);
1177 mbuf23 = vzip2q_u64(cq2_w8, cq3_w8);
1178 mbuf01 = vqsubq_u64(mbuf01, data_off);
1179 mbuf23 = vqsubq_u64(mbuf23, data_off);
1182 vsubq_u64(vld1q_u64((uint64_t *)cq0), data_off);
1183 mbuf23 = vsubq_u64(vld1q_u64((uint64_t *)(cq0 + 16)),
1187 /* Move mbufs to scalar registers for future use */
1188 mbuf0 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 0);
1189 mbuf1 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 1);
1190 mbuf2 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 0);
1191 mbuf3 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 1);
1193 if (!(flags & NIX_RX_VWQE_F)) {
1194 /* Mask to get packet len from NIX_RX_SG_S */
1195 const uint8x16_t shuf_msk = {
1196 0xFF, 0xFF, /* pkt_type set as unknown */
1197 0xFF, 0xFF, /* pkt_type set as unknown */
1198 0, 1, /* octet 1~0, low 16 bits pkt_len */
1199 0xFF, 0xFF, /* skip high 16it pkt_len, zero out */
1200 0, 1, /* octet 1~0, 16 bits data_len */
1201 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1203 /* Form the rx_descriptor_fields1 with pkt_len and data_len */
1204 f0 = vqtbl1q_u8(cq0_w8, shuf_msk);
1205 f1 = vqtbl1q_u8(cq1_w8, shuf_msk);
1206 f2 = vqtbl1q_u8(cq2_w8, shuf_msk);
1207 f3 = vqtbl1q_u8(cq3_w8, shuf_msk);
1210 /* Load CQE word0 and word 1 */
1211 const uint64_t cq0_w0 = *CQE_PTR_OFF(cq0, 0, 0, flags);
1212 const uint64_t cq0_w1 = *CQE_PTR_OFF(cq0, 0, 8, flags);
1213 const uint64_t cq0_w2 = *CQE_PTR_OFF(cq0, 0, 16, flags);
1214 const uint64_t cq1_w0 = *CQE_PTR_OFF(cq0, 1, 0, flags);
1215 const uint64_t cq1_w1 = *CQE_PTR_OFF(cq0, 1, 8, flags);
1216 const uint64_t cq1_w2 = *CQE_PTR_OFF(cq0, 1, 16, flags);
1217 const uint64_t cq2_w0 = *CQE_PTR_OFF(cq0, 2, 0, flags);
1218 const uint64_t cq2_w1 = *CQE_PTR_OFF(cq0, 2, 8, flags);
1219 const uint64_t cq2_w2 = *CQE_PTR_OFF(cq0, 2, 16, flags);
1220 const uint64_t cq3_w0 = *CQE_PTR_OFF(cq0, 3, 0, flags);
1221 const uint64_t cq3_w1 = *CQE_PTR_OFF(cq0, 3, 8, flags);
1222 const uint64_t cq3_w2 = *CQE_PTR_OFF(cq0, 3, 16, flags);
1224 if (flags & NIX_RX_VWQE_F) {
1225 uint16_t psize0, psize1, psize2, psize3;
1227 psize0 = (cq0_w2 & 0xFFFF) + 1;
1228 psize1 = (cq1_w2 & 0xFFFF) + 1;
1229 psize2 = (cq2_w2 & 0xFFFF) + 1;
1230 psize3 = (cq3_w2 & 0xFFFF) + 1;
1232 f0 = vdupq_n_u64(0);
1233 f1 = vdupq_n_u64(0);
1234 f2 = vdupq_n_u64(0);
1235 f3 = vdupq_n_u64(0);
1237 f0 = vsetq_lane_u16(psize0, f0, 2);
1238 f0 = vsetq_lane_u16(psize0, f0, 4);
1240 f1 = vsetq_lane_u16(psize1, f1, 2);
1241 f1 = vsetq_lane_u16(psize1, f1, 4);
1243 f2 = vsetq_lane_u16(psize2, f2, 2);
1244 f2 = vsetq_lane_u16(psize2, f2, 4);
1246 f3 = vsetq_lane_u16(psize3, f3, 2);
1247 f3 = vsetq_lane_u16(psize3, f3, 4);
1250 if (flags & NIX_RX_OFFLOAD_RSS_F) {
1251 /* Fill rss in the rx_descriptor_fields1 */
1252 f0 = vsetq_lane_u32(cq0_w0, f0, 3);
1253 f1 = vsetq_lane_u32(cq1_w0, f1, 3);
1254 f2 = vsetq_lane_u32(cq2_w0, f2, 3);
1255 f3 = vsetq_lane_u32(cq3_w0, f3, 3);
1256 ol_flags0 = RTE_MBUF_F_RX_RSS_HASH;
1257 ol_flags1 = RTE_MBUF_F_RX_RSS_HASH;
1258 ol_flags2 = RTE_MBUF_F_RX_RSS_HASH;
1259 ol_flags3 = RTE_MBUF_F_RX_RSS_HASH;
1267 if (flags & NIX_RX_OFFLOAD_PTYPE_F) {
1268 /* Fill packet_type in the rx_descriptor_fields1 */
1269 f0 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq0_w1),
1271 f1 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq1_w1),
1273 f2 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq2_w1),
1275 f3 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq3_w1),
1279 if (flags & NIX_RX_OFFLOAD_CHECKSUM_F) {
1280 ol_flags0 |= nix_rx_olflags_get(lookup_mem, cq0_w1);
1281 ol_flags1 |= nix_rx_olflags_get(lookup_mem, cq1_w1);
1282 ol_flags2 |= nix_rx_olflags_get(lookup_mem, cq2_w1);
1283 ol_flags3 |= nix_rx_olflags_get(lookup_mem, cq3_w1);
1286 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
1287 RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1);
1288 RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1);
1289 RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1);
1290 RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1);
1292 /* Translate meta to mbuf */
1293 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
1294 uint64_t cq0_w5 = *CQE_PTR_OFF(cq0, 0, 40, flags);
1295 uint64_t cq1_w5 = *CQE_PTR_OFF(cq0, 1, 40, flags);
1296 uint64_t cq2_w5 = *CQE_PTR_OFF(cq0, 2, 40, flags);
1297 uint64_t cq3_w5 = *CQE_PTR_OFF(cq0, 3, 40, flags);
1298 uintptr_t cpth0 = (uintptr_t)mbuf0 + d_off;
1299 uintptr_t cpth1 = (uintptr_t)mbuf1 + d_off;
1300 uintptr_t cpth2 = (uintptr_t)mbuf2 + d_off;
1301 uintptr_t cpth3 = (uintptr_t)mbuf3 + d_off;
1303 uint64x2_t inner0, inner1, inner2, inner3;
1304 uint64x2_t wqe01, wqe23, sa01, sa23;
1305 uint16x4_t lens, l2lens, ltypes;
1308 inner0 = vld1q_u64((const uint64_t *)cpth0);
1309 inner1 = vld1q_u64((const uint64_t *)cpth1);
1310 inner2 = vld1q_u64((const uint64_t *)cpth2);
1311 inner3 = vld1q_u64((const uint64_t *)cpth3);
1313 /* Extract and reverse wqe pointers */
1314 wqe01 = vzip2q_u64(inner0, inner1);
1315 wqe23 = vzip2q_u64(inner2, inner3);
1316 wqe01 = vrev64q_u8(wqe01);
1317 wqe23 = vrev64q_u8(wqe23);
1318 /* Adjust wqe pointers to point to mbuf */
1319 wqe01 = vsubq_u64(wqe01,
1320 vdupq_n_u64(sizeof(struct rte_mbuf)));
1321 wqe23 = vsubq_u64(wqe23,
1322 vdupq_n_u64(sizeof(struct rte_mbuf)));
1324 /* Extract sa idx from cookie area and add to sa_base */
1325 sa01 = vzip1q_u64(inner0, inner1);
1326 sa23 = vzip1q_u64(inner2, inner3);
1328 sa01 = vshrq_n_u64(sa01, 32);
1329 sa23 = vshrq_n_u64(sa23, 32);
1330 sa01 = vshlq_n_u64(sa01,
1331 ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2);
1332 sa23 = vshlq_n_u64(sa23,
1333 ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2);
1334 sa01 = vaddq_u64(sa01, vdupq_n_u64(sa_base));
1335 sa23 = vaddq_u64(sa23, vdupq_n_u64(sa_base));
1337 const uint8x16_t tbl = {
1338 /* ROC_IE_OT_UCC_SUCCESS_SA_SOFTEXP_FIRST */
1340 /* ROC_IE_OT_UCC_SUCCESS_PKT_IP_BADCSUM */
1341 RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
1342 /* ROC_IE_OT_UCC_SUCCESS_SA_SOFTEXP_AGAIN */
1344 /* ROC_IE_OT_UCC_SUCCESS_PKT_L4_GOODCSUM */
1345 (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
1346 RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
1347 /* ROC_IE_OT_UCC_SUCCESS_PKT_L4_BADCSUM */
1348 (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
1349 RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
1350 /* ROC_IE_OT_UCC_SUCCESS_PKT_UDPESP_NZCSUM */
1351 (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
1352 RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
1353 /* ROC_IE_OT_UCC_SUCCESS_PKT_UDP_ZEROCSUM */
1354 (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
1355 RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
1356 /* ROC_IE_OT_UCC_SUCCESS_PKT_IP_GOODCSUM */
1357 RTE_MBUF_F_RX_IP_CKSUM_GOOD >> 1,
1358 /* HW_CCODE -> RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED */
1359 1, 0, 1, 1, 1, 1, 0, 1,
1362 const int8x8_t err_off = {
1363 /* UCC of significance starts from 0xF0 */
1365 /* Move HW_CCODE from 0:6 -> 8:14 */
1376 ucc = vset_lane_u16(*(uint16_t *)(cpth0 + 30), ucc, 0);
1377 ucc = vset_lane_u16(*(uint16_t *)(cpth1 + 30), ucc, 1);
1378 ucc = vset_lane_u16(*(uint16_t *)(cpth2 + 30), ucc, 2);
1379 ucc = vset_lane_u16(*(uint16_t *)(cpth3 + 30), ucc, 3);
1380 ucc = vsub_s8(ucc, err_off);
1381 ucc = vqtbl1_u8(tbl, ucc);
1383 RTE_BUILD_BUG_ON(NPC_LT_LC_IP != 2);
1384 RTE_BUILD_BUG_ON(NPC_LT_LC_IP_OPT != 3);
1385 RTE_BUILD_BUG_ON(NPC_LT_LC_IP6 != 4);
1386 RTE_BUILD_BUG_ON(NPC_LT_LC_IP6_EXT != 5);
1388 ltypes = vdup_n_u16(0);
1389 ltypes = vset_lane_u16((cq0_w1 >> 40) & 0x6, ltypes, 0);
1390 ltypes = vset_lane_u16((cq1_w1 >> 40) & 0x6, ltypes, 1);
1391 ltypes = vset_lane_u16((cq2_w1 >> 40) & 0x6, ltypes, 2);
1392 ltypes = vset_lane_u16((cq3_w1 >> 40) & 0x6, ltypes, 3);
1394 /* Extract and reverse l3 length from IPv4/IPv6 hdr
1395 * that is in same cacheline most probably as cpth.
1397 cpth0 += ((cq0_w5 >> 16) & 0xFF) +
1398 vget_lane_u16(ltypes, 0);
1399 cpth1 += ((cq1_w5 >> 16) & 0xFF) +
1400 vget_lane_u16(ltypes, 1);
1401 cpth2 += ((cq2_w5 >> 16) & 0xFF) +
1402 vget_lane_u16(ltypes, 2);
1403 cpth3 += ((cq3_w5 >> 16) & 0xFF) +
1404 vget_lane_u16(ltypes, 3);
1405 lens = vdup_n_u16(0);
1406 lens = vset_lane_u16(*(uint16_t *)cpth0, lens, 0);
1407 lens = vset_lane_u16(*(uint16_t *)cpth1, lens, 1);
1408 lens = vset_lane_u16(*(uint16_t *)cpth2, lens, 2);
1409 lens = vset_lane_u16(*(uint16_t *)cpth3, lens, 3);
1410 lens = vrev16_u8(lens);
1412 /* Add l2 length to l3 lengths */
1413 l2lens = vdup_n_u16(0);
1414 l2lens = vset_lane_u16(((cq0_w5 >> 16) & 0xFF) -
1417 l2lens = vset_lane_u16(((cq1_w5 >> 16) & 0xFF) -
1420 l2lens = vset_lane_u16(((cq2_w5 >> 16) & 0xFF) -
1423 l2lens = vset_lane_u16(((cq3_w5 >> 16) & 0xFF) -
1426 lens = vadd_u16(lens, l2lens);
1428 /* L3 header adjust */
1429 const int8x8_t l3adj = {
1430 0, 0, 0, 0, 40, 0, 0, 0,
1432 lens = vadd_u16(lens, vtbl1_u8(l3adj, ltypes));
1434 /* Initialize rearm data when reassembly is enabled as
1435 * data offset might change.
1437 if (flags & NIX_RX_REAS_F) {
1438 rearm0 = vdupq_n_u64(mbuf_initializer);
1439 rearm1 = vdupq_n_u64(mbuf_initializer);
1440 rearm2 = vdupq_n_u64(mbuf_initializer);
1441 rearm3 = vdupq_n_u64(mbuf_initializer);
1444 /* Checksum ol_flags will be cleared if mbuf is meta */
1445 if (cq0_w1 & BIT(11)) {
1446 uintptr_t wqe = vgetq_lane_u64(wqe01, 0);
1447 uintptr_t sa = vgetq_lane_u64(sa01, 0);
1448 uint16_t len = vget_lane_u16(lens, 0);
1450 cpth0 = (uintptr_t)mbuf0 + d_off;
1451 /* Free meta to aura */
1452 NIX_PUSH_META_TO_FREE(mbuf0, laddr, &loff);
1453 mbuf01 = vsetq_lane_u64(wqe, mbuf01, 0);
1454 mbuf0 = (struct rte_mbuf *)wqe;
1456 /* Update pkt_len and data_len */
1457 f0 = vsetq_lane_u16(len, f0, 2);
1458 f0 = vsetq_lane_u16(len, f0, 4);
1460 nix_sec_meta_to_mbuf(cq0_w1, cq0_w5, sa, cpth0,
1461 mbuf0, &f0, &ol_flags0,
1463 ol_flags0 |= ((uint64_t)vget_lane_u8(ucc, 0))
1465 ol_flags0 |= (RTE_MBUF_F_RX_SEC_OFFLOAD |
1466 (uint64_t)vget_lane_u8(ucc, 1) << 19);
1469 if (cq1_w1 & BIT(11)) {
1470 uintptr_t wqe = vgetq_lane_u64(wqe01, 1);
1471 uintptr_t sa = vgetq_lane_u64(sa01, 1);
1472 uint16_t len = vget_lane_u16(lens, 1);
1474 cpth1 = (uintptr_t)mbuf1 + d_off;
1475 /* Free meta to aura */
1476 NIX_PUSH_META_TO_FREE(mbuf1, laddr, &loff);
1477 mbuf01 = vsetq_lane_u64(wqe, mbuf01, 1);
1478 mbuf1 = (struct rte_mbuf *)wqe;
1480 /* Update pkt_len and data_len */
1481 f1 = vsetq_lane_u16(len, f1, 2);
1482 f1 = vsetq_lane_u16(len, f1, 4);
1484 nix_sec_meta_to_mbuf(cq1_w1, cq1_w5, sa, cpth1,
1485 mbuf1, &f1, &ol_flags1,
1487 ol_flags1 |= ((uint64_t)vget_lane_u8(ucc, 2))
1489 ol_flags1 |= (RTE_MBUF_F_RX_SEC_OFFLOAD |
1490 (uint64_t)vget_lane_u8(ucc, 3) << 19);
1493 if (cq2_w1 & BIT(11)) {
1494 uintptr_t wqe = vgetq_lane_u64(wqe23, 0);
1495 uintptr_t sa = vgetq_lane_u64(sa23, 0);
1496 uint16_t len = vget_lane_u16(lens, 2);
1498 cpth2 = (uintptr_t)mbuf2 + d_off;
1499 /* Free meta to aura */
1500 NIX_PUSH_META_TO_FREE(mbuf2, laddr, &loff);
1501 mbuf23 = vsetq_lane_u64(wqe, mbuf23, 0);
1502 mbuf2 = (struct rte_mbuf *)wqe;
1504 /* Update pkt_len and data_len */
1505 f2 = vsetq_lane_u16(len, f2, 2);
1506 f2 = vsetq_lane_u16(len, f2, 4);
1508 nix_sec_meta_to_mbuf(cq2_w1, cq2_w5, sa, cpth2,
1509 mbuf2, &f2, &ol_flags2,
1511 ol_flags2 |= ((uint64_t)vget_lane_u8(ucc, 4))
1513 ol_flags2 |= (RTE_MBUF_F_RX_SEC_OFFLOAD |
1514 (uint64_t)vget_lane_u8(ucc, 5) << 19);
1517 if (cq3_w1 & BIT(11)) {
1518 uintptr_t wqe = vgetq_lane_u64(wqe23, 1);
1519 uintptr_t sa = vgetq_lane_u64(sa23, 1);
1520 uint16_t len = vget_lane_u16(lens, 3);
1522 cpth3 = (uintptr_t)mbuf3 + d_off;
1523 /* Free meta to aura */
1524 NIX_PUSH_META_TO_FREE(mbuf3, laddr, &loff);
1525 mbuf23 = vsetq_lane_u64(wqe, mbuf23, 1);
1526 mbuf3 = (struct rte_mbuf *)wqe;
1528 /* Update pkt_len and data_len */
1529 f3 = vsetq_lane_u16(len, f3, 2);
1530 f3 = vsetq_lane_u16(len, f3, 4);
1532 nix_sec_meta_to_mbuf(cq3_w1, cq3_w5, sa, cpth3,
1533 mbuf3, &f3, &ol_flags3,
1535 ol_flags3 |= ((uint64_t)vget_lane_u8(ucc, 6))
1537 ol_flags3 |= (RTE_MBUF_F_RX_SEC_OFFLOAD |
1538 (uint64_t)vget_lane_u8(ucc, 7) << 19);
1542 if (flags & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
1544 ol_flags0 = nix_vlan_update(cq0_w2, ol_flags0, &f0);
1545 ol_flags1 = nix_vlan_update(cq1_w2, ol_flags1, &f1);
1546 ol_flags2 = nix_vlan_update(cq2_w2, ol_flags2, &f2);
1547 ol_flags3 = nix_vlan_update(cq3_w2, ol_flags3, &f3);
1549 ol_flags0 = nix_qinq_update(cq0_w2, ol_flags0, mbuf0);
1550 ol_flags1 = nix_qinq_update(cq1_w2, ol_flags1, mbuf1);
1551 ol_flags2 = nix_qinq_update(cq2_w2, ol_flags2, mbuf2);
1552 ol_flags3 = nix_qinq_update(cq3_w2, ol_flags3, mbuf3);
1555 if (flags & NIX_RX_OFFLOAD_MARK_UPDATE_F) {
1556 ol_flags0 = nix_update_match_id(
1557 *(uint16_t *)CQE_PTR_OFF(cq0, 0, 38, flags),
1559 ol_flags1 = nix_update_match_id(
1560 *(uint16_t *)CQE_PTR_OFF(cq0, 1, 38, flags),
1562 ol_flags2 = nix_update_match_id(
1563 *(uint16_t *)CQE_PTR_OFF(cq0, 2, 38, flags),
1565 ol_flags3 = nix_update_match_id(
1566 *(uint16_t *)CQE_PTR_OFF(cq0, 3, 38, flags),
1570 if (flags & NIX_RX_OFFLOAD_TSTAMP_F) {
1571 const uint16x8_t len_off = {
1573 0, /* ptype 16:32 */
1574 CNXK_NIX_TIMESYNC_RX_OFFSET, /* pktlen 0:15*/
1575 0, /* pktlen 16:32 */
1576 CNXK_NIX_TIMESYNC_RX_OFFSET, /* datalen 0:15 */
1580 const uint32x4_t ptype = {RTE_PTYPE_L2_ETHER_TIMESYNC,
1581 RTE_PTYPE_L2_ETHER_TIMESYNC,
1582 RTE_PTYPE_L2_ETHER_TIMESYNC,
1583 RTE_PTYPE_L2_ETHER_TIMESYNC};
1584 const uint64_t ts_olf = RTE_MBUF_F_RX_IEEE1588_PTP |
1585 RTE_MBUF_F_RX_IEEE1588_TMST |
1586 tstamp->rx_tstamp_dynflag;
1587 const uint32x4_t and_mask = {0x1, 0x2, 0x4, 0x8};
1588 uint64x2_t ts01, ts23, mask;
1592 /* Subtract timesync length from total pkt length. */
1593 f0 = vsubq_u16(f0, len_off);
1594 f1 = vsubq_u16(f1, len_off);
1595 f2 = vsubq_u16(f2, len_off);
1596 f3 = vsubq_u16(f3, len_off);
1598 /* Get the address of actual timestamp. */
1599 ts01 = vaddq_u64(mbuf01, data_off);
1600 ts23 = vaddq_u64(mbuf23, data_off);
1601 /* Load timestamp from address. */
1602 ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
1605 ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
1608 ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
1611 ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
1614 /* Convert from be to cpu byteorder. */
1615 ts01 = vrev64q_u8(ts01);
1616 ts23 = vrev64q_u8(ts23);
1617 /* Store timestamp into scalar for later use. */
1618 ts[0] = vgetq_lane_u64(ts01, 0);
1619 ts[1] = vgetq_lane_u64(ts01, 1);
1620 ts[2] = vgetq_lane_u64(ts23, 0);
1621 ts[3] = vgetq_lane_u64(ts23, 1);
1623 /* Store timestamp into dynfield. */
1624 *cnxk_nix_timestamp_dynfield(mbuf0, tstamp) = ts[0];
1625 *cnxk_nix_timestamp_dynfield(mbuf1, tstamp) = ts[1];
1626 *cnxk_nix_timestamp_dynfield(mbuf2, tstamp) = ts[2];
1627 *cnxk_nix_timestamp_dynfield(mbuf3, tstamp) = ts[3];
1629 /* Generate ptype mask to filter L2 ether timesync */
1630 mask = vdupq_n_u32(vgetq_lane_u32(f0, 0));
1631 mask = vsetq_lane_u32(vgetq_lane_u32(f1, 0), mask, 1);
1632 mask = vsetq_lane_u32(vgetq_lane_u32(f2, 0), mask, 2);
1633 mask = vsetq_lane_u32(vgetq_lane_u32(f3, 0), mask, 3);
1635 /* Match against L2 ether timesync. */
1636 mask = vceqq_u32(mask, ptype);
1637 /* Convert from vector from scalar mask */
1638 res = vaddvq_u32(vandq_u32(mask, and_mask));
1642 /* Fill in the ol_flags for any packets that
1645 ol_flags0 |= ((res & 0x1) ? ts_olf : 0);
1646 ol_flags1 |= ((res & 0x2) ? ts_olf : 0);
1647 ol_flags2 |= ((res & 0x4) ? ts_olf : 0);
1648 ol_flags3 |= ((res & 0x8) ? ts_olf : 0);
1650 /* Update Rxq timestamp with the latest
1653 tstamp->rx_ready = 1;
1654 tstamp->rx_tstamp = ts[31 - __builtin_clz(res)];
1658 /* Form rearm_data with ol_flags */
1659 rearm0 = vsetq_lane_u64(ol_flags0, rearm0, 1);
1660 rearm1 = vsetq_lane_u64(ol_flags1, rearm1, 1);
1661 rearm2 = vsetq_lane_u64(ol_flags2, rearm2, 1);
1662 rearm3 = vsetq_lane_u64(ol_flags3, rearm3, 1);
1664 /* Update rx_descriptor_fields1 */
1665 vst1q_u64((uint64_t *)mbuf0->rx_descriptor_fields1, f0);
1666 vst1q_u64((uint64_t *)mbuf1->rx_descriptor_fields1, f1);
1667 vst1q_u64((uint64_t *)mbuf2->rx_descriptor_fields1, f2);
1668 vst1q_u64((uint64_t *)mbuf3->rx_descriptor_fields1, f3);
1670 /* Update rearm_data */
1671 vst1q_u64((uint64_t *)mbuf0->rearm_data, rearm0);
1672 vst1q_u64((uint64_t *)mbuf1->rearm_data, rearm1);
1673 vst1q_u64((uint64_t *)mbuf2->rearm_data, rearm2);
1674 vst1q_u64((uint64_t *)mbuf3->rearm_data, rearm3);
1676 if (flags & NIX_RX_MULTI_SEG_F) {
1677 /* Multi segment is enable build mseg list for
1678 * individual mbufs in scalar mode.
1680 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
1681 (CQE_PTR_OFF(cq0, 0, 8, flags)),
1682 mbuf0, mbuf_initializer, flags);
1683 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
1684 (CQE_PTR_OFF(cq0, 1, 8, flags)),
1685 mbuf1, mbuf_initializer, flags);
1686 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
1687 (CQE_PTR_OFF(cq0, 2, 8, flags)),
1688 mbuf2, mbuf_initializer, flags);
1689 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
1690 (CQE_PTR_OFF(cq0, 3, 8, flags)),
1691 mbuf3, mbuf_initializer, flags);
1694 /* Store the mbufs to rx_pkts */
1695 vst1q_u64((uint64_t *)&mbufs[packets], mbuf01);
1696 vst1q_u64((uint64_t *)&mbufs[packets + 2], mbuf23);
1698 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
1699 RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1);
1700 RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1);
1701 RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1);
1702 RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1);
1704 nix_mbuf_validate_next(mbuf0);
1705 nix_mbuf_validate_next(mbuf1);
1706 nix_mbuf_validate_next(mbuf2);
1707 nix_mbuf_validate_next(mbuf3);
1709 packets += NIX_DESCS_PER_LOOP;
1711 if (!(flags & NIX_RX_VWQE_F)) {
1712 /* Advance head pointer and packets */
1713 head += NIX_DESCS_PER_LOOP;
1717 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
1718 /* Check if lmtline border is crossed and adjust lnum */
1720 /* Update aura handle */
1721 *(uint64_t *)(laddr - 8) =
1722 (((uint64_t)(15 & 0x1) << 32) |
1723 roc_npa_aura_handle_to_aura(aura_handle));
1728 laddr = (uintptr_t)LMT_OFF(lbase, lnum, 8);
1729 /* Pick the pointer from 16th index and put it
1730 * at end of this new line.
1732 *(uint64_t *)(laddr + (loff << 3) - 8) =
1733 *(uint64_t *)(laddr - 8);
1736 /* Flush it when we are in 16th line and might
1739 if (lnum >= 15 && loff >= 12) {
1740 /* 16 LMT Line size m1 */
1741 uint64_t data = BIT_ULL(48) - 1;
1743 /* Update aura handle */
1744 *(uint64_t *)(laddr - 8) =
1745 (((uint64_t)(loff & 0x1) << 32) |
1746 roc_npa_aura_handle_to_aura(aura_handle));
1748 data = (data & ~(0x7UL << shft)) |
1749 (((uint64_t)loff >> 1) << shft);
1751 /* Send up to 16 lmt lines of pointers */
1752 nix_sec_flush_meta_burst(lmt_id, data, lnum + 1,
1758 /* First pointer starts at 8B offset */
1759 laddr = (uintptr_t)LMT_OFF(lbase, lnum, 8);
1764 if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) {
1765 /* 16 LMT Line size m1 */
1766 uint64_t data = BIT_ULL(48) - 1;
1768 /* Update aura handle */
1769 *(uint64_t *)(laddr - 8) =
1770 (((uint64_t)(loff & 0x1) << 32) |
1771 roc_npa_aura_handle_to_aura(aura_handle));
1773 data = (data & ~(0x7UL << shft)) |
1774 (((uint64_t)loff >> 1) << shft);
1776 /* Send up to 16 lmt lines of pointers */
1777 nix_sec_flush_meta_burst(lmt_id, data, lnum + 1, aura_handle);
1778 if (flags & NIX_RX_VWQE_F)
1782 if (flags & NIX_RX_VWQE_F)
1786 rxq->available -= packets;
1789 /* Free all the CQs that we've processed */
1790 plt_write64((rxq->wdata | packets), rxq->cq_door);
1792 if (unlikely(pkts_left))
1793 packets += cn10k_nix_recv_pkts(args, &mbufs[packets], pkts_left,
1801 static inline uint16_t
1802 cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
1803 const uint16_t flags, void *lookup_mem,
1804 struct cnxk_timesync_info *tstamp,
1808 RTE_SET_USED(mbufs);
1810 RTE_SET_USED(flags);
1811 RTE_SET_USED(lookup_mem);
1812 RTE_SET_USED(tstamp);
1813 RTE_SET_USED(lmt_base);
1821 #define RSS_F NIX_RX_OFFLOAD_RSS_F
1822 #define PTYPE_F NIX_RX_OFFLOAD_PTYPE_F
1823 #define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F
1824 #define MARK_F NIX_RX_OFFLOAD_MARK_UPDATE_F
1825 #define TS_F NIX_RX_OFFLOAD_TSTAMP_F
1826 #define RX_VLAN_F NIX_RX_OFFLOAD_VLAN_STRIP_F
1827 #define R_SEC_F NIX_RX_OFFLOAD_SECURITY_F
1829 /* [R_SEC_F] [RX_VLAN_F] [TS] [MARK] [CKSUM] [PTYPE] [RSS] */
1830 #define NIX_RX_FASTPATH_MODES_0_15 \
1831 R(no_offload, NIX_RX_OFFLOAD_NONE) \
1834 R(ptype_rss, PTYPE_F | RSS_F) \
1836 R(cksum_rss, CKSUM_F | RSS_F) \
1837 R(cksum_ptype, CKSUM_F | PTYPE_F) \
1838 R(cksum_ptype_rss, CKSUM_F | PTYPE_F | RSS_F) \
1840 R(mark_rss, MARK_F | RSS_F) \
1841 R(mark_ptype, MARK_F | PTYPE_F) \
1842 R(mark_ptype_rss, MARK_F | PTYPE_F | RSS_F) \
1843 R(mark_cksum, MARK_F | CKSUM_F) \
1844 R(mark_cksum_rss, MARK_F | CKSUM_F | RSS_F) \
1845 R(mark_cksum_ptype, MARK_F | CKSUM_F | PTYPE_F) \
1846 R(mark_cksum_ptype_rss, MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1848 #define NIX_RX_FASTPATH_MODES_16_31 \
1850 R(ts_rss, TS_F | RSS_F) \
1851 R(ts_ptype, TS_F | PTYPE_F) \
1852 R(ts_ptype_rss, TS_F | PTYPE_F | RSS_F) \
1853 R(ts_cksum, TS_F | CKSUM_F) \
1854 R(ts_cksum_rss, TS_F | CKSUM_F | RSS_F) \
1855 R(ts_cksum_ptype, TS_F | CKSUM_F | PTYPE_F) \
1856 R(ts_cksum_ptype_rss, TS_F | CKSUM_F | PTYPE_F | RSS_F) \
1857 R(ts_mark, TS_F | MARK_F) \
1858 R(ts_mark_rss, TS_F | MARK_F | RSS_F) \
1859 R(ts_mark_ptype, TS_F | MARK_F | PTYPE_F) \
1860 R(ts_mark_ptype_rss, TS_F | MARK_F | PTYPE_F | RSS_F) \
1861 R(ts_mark_cksum, TS_F | MARK_F | CKSUM_F) \
1862 R(ts_mark_cksum_rss, TS_F | MARK_F | CKSUM_F | RSS_F) \
1863 R(ts_mark_cksum_ptype, TS_F | MARK_F | CKSUM_F | PTYPE_F) \
1864 R(ts_mark_cksum_ptype_rss, TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1866 #define NIX_RX_FASTPATH_MODES_32_47 \
1867 R(vlan, RX_VLAN_F) \
1868 R(vlan_rss, RX_VLAN_F | RSS_F) \
1869 R(vlan_ptype, RX_VLAN_F | PTYPE_F) \
1870 R(vlan_ptype_rss, RX_VLAN_F | PTYPE_F | RSS_F) \
1871 R(vlan_cksum, RX_VLAN_F | CKSUM_F) \
1872 R(vlan_cksum_rss, RX_VLAN_F | CKSUM_F | RSS_F) \
1873 R(vlan_cksum_ptype, RX_VLAN_F | CKSUM_F | PTYPE_F) \
1874 R(vlan_cksum_ptype_rss, RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
1875 R(vlan_mark, RX_VLAN_F | MARK_F) \
1876 R(vlan_mark_rss, RX_VLAN_F | MARK_F | RSS_F) \
1877 R(vlan_mark_ptype, RX_VLAN_F | MARK_F | PTYPE_F) \
1878 R(vlan_mark_ptype_rss, RX_VLAN_F | MARK_F | PTYPE_F | RSS_F) \
1879 R(vlan_mark_cksum, RX_VLAN_F | MARK_F | CKSUM_F) \
1880 R(vlan_mark_cksum_rss, RX_VLAN_F | MARK_F | CKSUM_F | RSS_F) \
1881 R(vlan_mark_cksum_ptype, RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F) \
1882 R(vlan_mark_cksum_ptype_rss, \
1883 RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1885 #define NIX_RX_FASTPATH_MODES_48_63 \
1886 R(vlan_ts, RX_VLAN_F | TS_F) \
1887 R(vlan_ts_rss, RX_VLAN_F | TS_F | RSS_F) \
1888 R(vlan_ts_ptype, RX_VLAN_F | TS_F | PTYPE_F) \
1889 R(vlan_ts_ptype_rss, RX_VLAN_F | TS_F | PTYPE_F | RSS_F) \
1890 R(vlan_ts_cksum, RX_VLAN_F | TS_F | CKSUM_F) \
1891 R(vlan_ts_cksum_rss, RX_VLAN_F | TS_F | CKSUM_F | RSS_F) \
1892 R(vlan_ts_cksum_ptype, RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F) \
1893 R(vlan_ts_cksum_ptype_rss, \
1894 RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
1895 R(vlan_ts_mark, RX_VLAN_F | TS_F | MARK_F) \
1896 R(vlan_ts_mark_rss, RX_VLAN_F | TS_F | MARK_F | RSS_F) \
1897 R(vlan_ts_mark_ptype, RX_VLAN_F | TS_F | MARK_F | PTYPE_F) \
1898 R(vlan_ts_mark_ptype_rss, RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
1899 R(vlan_ts_mark_cksum, RX_VLAN_F | TS_F | MARK_F | CKSUM_F) \
1900 R(vlan_ts_mark_cksum_rss, RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
1901 R(vlan_ts_mark_cksum_ptype, \
1902 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
1903 R(vlan_ts_mark_cksum_ptype_rss, \
1904 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1906 #define NIX_RX_FASTPATH_MODES_64_79 \
1908 R(sec_rss, R_SEC_F | RSS_F) \
1909 R(sec_ptype, R_SEC_F | PTYPE_F) \
1910 R(sec_ptype_rss, R_SEC_F | PTYPE_F | RSS_F) \
1911 R(sec_cksum, R_SEC_F | CKSUM_F) \
1912 R(sec_cksum_rss, R_SEC_F | CKSUM_F | RSS_F) \
1913 R(sec_cksum_ptype, R_SEC_F | CKSUM_F | PTYPE_F) \
1914 R(sec_cksum_ptype_rss, R_SEC_F | CKSUM_F | PTYPE_F | RSS_F) \
1915 R(sec_mark, R_SEC_F | MARK_F) \
1916 R(sec_mark_rss, R_SEC_F | MARK_F | RSS_F) \
1917 R(sec_mark_ptype, R_SEC_F | MARK_F | PTYPE_F) \
1918 R(sec_mark_ptype_rss, R_SEC_F | MARK_F | PTYPE_F | RSS_F) \
1919 R(sec_mark_cksum, R_SEC_F | MARK_F | CKSUM_F) \
1920 R(sec_mark_cksum_rss, R_SEC_F | MARK_F | CKSUM_F | RSS_F) \
1921 R(sec_mark_cksum_ptype, R_SEC_F | MARK_F | CKSUM_F | PTYPE_F) \
1922 R(sec_mark_cksum_ptype_rss, \
1923 R_SEC_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1925 #define NIX_RX_FASTPATH_MODES_80_95 \
1926 R(sec_ts, R_SEC_F | TS_F) \
1927 R(sec_ts_rss, R_SEC_F | TS_F | RSS_F) \
1928 R(sec_ts_ptype, R_SEC_F | TS_F | PTYPE_F) \
1929 R(sec_ts_ptype_rss, R_SEC_F | TS_F | PTYPE_F | RSS_F) \
1930 R(sec_ts_cksum, R_SEC_F | TS_F | CKSUM_F) \
1931 R(sec_ts_cksum_rss, R_SEC_F | TS_F | CKSUM_F | RSS_F) \
1932 R(sec_ts_cksum_ptype, R_SEC_F | TS_F | CKSUM_F | PTYPE_F) \
1933 R(sec_ts_cksum_ptype_rss, R_SEC_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
1934 R(sec_ts_mark, R_SEC_F | TS_F | MARK_F) \
1935 R(sec_ts_mark_rss, R_SEC_F | TS_F | MARK_F | RSS_F) \
1936 R(sec_ts_mark_ptype, R_SEC_F | TS_F | MARK_F | PTYPE_F) \
1937 R(sec_ts_mark_ptype_rss, R_SEC_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
1938 R(sec_ts_mark_cksum, R_SEC_F | TS_F | MARK_F | CKSUM_F) \
1939 R(sec_ts_mark_cksum_rss, R_SEC_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
1940 R(sec_ts_mark_cksum_ptype, \
1941 R_SEC_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
1942 R(sec_ts_mark_cksum_ptype_rss, \
1943 R_SEC_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1945 #define NIX_RX_FASTPATH_MODES_96_111 \
1946 R(sec_vlan, R_SEC_F | RX_VLAN_F) \
1947 R(sec_vlan_rss, R_SEC_F | RX_VLAN_F | RSS_F) \
1948 R(sec_vlan_ptype, R_SEC_F | RX_VLAN_F | PTYPE_F) \
1949 R(sec_vlan_ptype_rss, R_SEC_F | RX_VLAN_F | PTYPE_F | RSS_F) \
1950 R(sec_vlan_cksum, R_SEC_F | RX_VLAN_F | CKSUM_F) \
1951 R(sec_vlan_cksum_rss, R_SEC_F | RX_VLAN_F | CKSUM_F | RSS_F) \
1952 R(sec_vlan_cksum_ptype, R_SEC_F | RX_VLAN_F | CKSUM_F | PTYPE_F) \
1953 R(sec_vlan_cksum_ptype_rss, \
1954 R_SEC_F | RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
1955 R(sec_vlan_mark, R_SEC_F | RX_VLAN_F | MARK_F) \
1956 R(sec_vlan_mark_rss, R_SEC_F | RX_VLAN_F | MARK_F | RSS_F) \
1957 R(sec_vlan_mark_ptype, R_SEC_F | RX_VLAN_F | MARK_F | PTYPE_F) \
1958 R(sec_vlan_mark_ptype_rss, \
1959 R_SEC_F | RX_VLAN_F | MARK_F | PTYPE_F | RSS_F) \
1960 R(sec_vlan_mark_cksum, R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F) \
1961 R(sec_vlan_mark_cksum_rss, \
1962 R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | RSS_F) \
1963 R(sec_vlan_mark_cksum_ptype, \
1964 R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F) \
1965 R(sec_vlan_mark_cksum_ptype_rss, \
1966 R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1968 #define NIX_RX_FASTPATH_MODES_112_127 \
1969 R(sec_vlan_ts, R_SEC_F | RX_VLAN_F | TS_F) \
1970 R(sec_vlan_ts_rss, R_SEC_F | RX_VLAN_F | TS_F | RSS_F) \
1971 R(sec_vlan_ts_ptype, R_SEC_F | RX_VLAN_F | TS_F | PTYPE_F) \
1972 R(sec_vlan_ts_ptype_rss, R_SEC_F | RX_VLAN_F | TS_F | PTYPE_F | RSS_F) \
1973 R(sec_vlan_ts_cksum, R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F) \
1974 R(sec_vlan_ts_cksum_rss, R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | RSS_F) \
1975 R(sec_vlan_ts_cksum_ptype, \
1976 R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F) \
1977 R(sec_vlan_ts_cksum_ptype_rss, \
1978 R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
1979 R(sec_vlan_ts_mark, R_SEC_F | RX_VLAN_F | TS_F | MARK_F) \
1980 R(sec_vlan_ts_mark_rss, R_SEC_F | RX_VLAN_F | TS_F | MARK_F | RSS_F) \
1981 R(sec_vlan_ts_mark_ptype, \
1982 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | PTYPE_F) \
1983 R(sec_vlan_ts_mark_ptype_rss, \
1984 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
1985 R(sec_vlan_ts_mark_cksum, \
1986 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F) \
1987 R(sec_vlan_ts_mark_cksum_rss, \
1988 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
1989 R(sec_vlan_ts_mark_cksum_ptype, \
1990 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
1991 R(sec_vlan_ts_mark_cksum_ptype_rss, \
1992 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1995 #define NIX_RX_FASTPATH_MODES \
1996 NIX_RX_FASTPATH_MODES_0_15 \
1997 NIX_RX_FASTPATH_MODES_16_31 \
1998 NIX_RX_FASTPATH_MODES_32_47 \
1999 NIX_RX_FASTPATH_MODES_48_63 \
2000 NIX_RX_FASTPATH_MODES_64_79 \
2001 NIX_RX_FASTPATH_MODES_80_95 \
2002 NIX_RX_FASTPATH_MODES_96_111 \
2003 NIX_RX_FASTPATH_MODES_112_127 \
2005 #define R(name, flags) \
2006 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_##name( \
2007 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
2008 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_mseg_##name( \
2009 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
2010 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_vec_##name( \
2011 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
2012 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_vec_mseg_##name( \
2013 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
2014 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_reas_##name( \
2015 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
2016 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_reas_mseg_##name(\
2017 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
2018 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_reas_vec_##name( \
2019 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
2020 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_reas_vec_mseg_##name( \
2021 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts);
2023 NIX_RX_FASTPATH_MODES
2026 #define NIX_RX_RECV(fn, flags) \
2027 uint16_t __rte_noinline __rte_hot fn( \
2028 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
2030 return cn10k_nix_recv_pkts(rx_queue, rx_pkts, pkts, (flags)); \
2033 #define NIX_RX_RECV_MSEG(fn, flags) NIX_RX_RECV(fn, flags | NIX_RX_MULTI_SEG_F)
2035 #define NIX_RX_RECV_VEC(fn, flags) \
2036 uint16_t __rte_noinline __rte_hot fn( \
2037 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
2039 return cn10k_nix_recv_pkts_vector(rx_queue, rx_pkts, pkts, \
2040 (flags), NULL, NULL, 0); \
2043 #define NIX_RX_RECV_VEC_MSEG(fn, flags) \
2044 NIX_RX_RECV_VEC(fn, flags | NIX_RX_MULTI_SEG_F)
2046 #endif /* __CN10K_RX_H__ */