1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
10 #include <cnxk_ethdev.h>
12 #define NIX_RX_OFFLOAD_NONE (0)
13 #define NIX_RX_OFFLOAD_RSS_F BIT(0)
14 #define NIX_RX_OFFLOAD_PTYPE_F BIT(1)
15 #define NIX_RX_OFFLOAD_CHECKSUM_F BIT(2)
16 #define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)
17 #define NIX_RX_OFFLOAD_TSTAMP_F BIT(4)
18 #define NIX_RX_OFFLOAD_VLAN_STRIP_F BIT(5)
19 #define NIX_RX_OFFLOAD_SECURITY_F BIT(6)
20 #define NIX_RX_OFFLOAD_MAX (NIX_RX_OFFLOAD_SECURITY_F << 1)
22 /* Flags to control cqe_to_mbuf conversion function.
23 * Defining it from backwards to denote its been
24 * not used as offload flags to pick function
26 #define NIX_RX_REAS_F BIT(12)
27 #define NIX_RX_VWQE_F BIT(13)
28 #define NIX_RX_MULTI_SEG_F BIT(14)
29 #define CPT_RX_WQE_F BIT(15)
31 #define CNXK_NIX_CQ_ENTRY_SZ 128
32 #define NIX_DESCS_PER_LOOP 4
33 #define CQE_CAST(x) ((struct nix_cqe_hdr_s *)(x))
34 #define CQE_SZ(x) ((x) * CNXK_NIX_CQ_ENTRY_SZ)
36 #define CQE_PTR_OFF(b, i, o, f) \
37 (((f) & NIX_RX_VWQE_F) ? \
38 (uint64_t *)(((uintptr_t)((uint64_t *)(b))[i]) + (o)) : \
39 (uint64_t *)(((uintptr_t)(b)) + CQE_SZ(i) + (o)))
40 #define CQE_PTR_DIFF(b, i, o, f) \
41 (((f) & NIX_RX_VWQE_F) ? \
42 (uint64_t *)(((uintptr_t)((uint64_t *)(b))[i]) - (o)) : \
43 (uint64_t *)(((uintptr_t)(b)) + CQE_SZ(i) - (o)))
45 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
47 nix_mbuf_validate_next(struct rte_mbuf *m)
49 if (m->nb_segs == 1 && m->next) {
50 rte_panic("mbuf->next[%p] valid when mbuf->nb_segs is %d",
56 nix_mbuf_validate_next(struct rte_mbuf *m)
62 #define NIX_RX_SEC_REASSEMBLY_F \
63 (NIX_RX_REAS_F | NIX_RX_OFFLOAD_SECURITY_F)
65 static inline rte_eth_ip_reassembly_dynfield_t *
66 cnxk_ip_reassembly_dynfield(struct rte_mbuf *mbuf,
67 int ip_reassembly_dynfield_offset)
69 return RTE_MBUF_DYNFIELD(mbuf, ip_reassembly_dynfield_offset,
70 rte_eth_ip_reassembly_dynfield_t *);
73 union mbuf_initializer {
83 static __rte_always_inline uint64_t
84 nix_clear_data_off(uint64_t oldval)
86 union mbuf_initializer mbuf_init = {.value = oldval};
88 mbuf_init.fields.data_off = 0;
89 return mbuf_init.value;
92 static __rte_always_inline struct rte_mbuf *
93 nix_get_mbuf_from_cqe(void *cq, const uint64_t data_off)
97 /* Skip CQE, NIX_RX_PARSE_S and SG HDR(9 DWORDs) and peek buff addr */
98 buff = *((rte_iova_t *)((uint64_t *)cq + 9));
99 return (struct rte_mbuf *)(buff - data_off);
102 static __rte_always_inline void
103 nix_sec_flush_meta_burst(uint16_t lmt_id, uint64_t data, uint16_t lnum,
104 uintptr_t aura_handle)
108 /* Prepare PA and Data */
109 pa = roc_npa_aura_handle_to_base(aura_handle) + NPA_LF_AURA_BATCH_FREE0;
110 pa |= ((data & 0x7) << 4);
114 data |= (uint64_t)lmt_id;
115 data |= (uint64_t)(lnum - 1) << 12;
117 roc_lmt_submit_steorl(data, pa);
120 static __rte_always_inline void
121 nix_sec_flush_meta(uintptr_t laddr, uint16_t lmt_id, uint8_t loff,
122 uintptr_t aura_handle)
126 /* laddr is pointing to first pointer */
129 /* Trigger free either on lmtline full or different aura handle */
130 pa = roc_npa_aura_handle_to_base(aura_handle) + NPA_LF_AURA_BATCH_FREE0;
132 /* Update aura handle */
133 *(uint64_t *)laddr = (((uint64_t)(loff & 0x1) << 32) |
134 roc_npa_aura_handle_to_aura(aura_handle));
136 pa |= ((uint64_t)(loff >> 1) << 4);
137 roc_lmt_submit_steorl(lmt_id, pa);
140 static struct rte_mbuf *
141 nix_sec_attach_frags(const struct cpt_parse_hdr_s *hdr,
142 struct cn10k_inb_priv_data *inb_priv,
143 const uint64_t mbuf_init)
145 struct rte_mbuf *head, *mbuf, *mbuf_prev;
146 uint32_t offset = hdr->w2.fi_offset;
147 union nix_rx_parse_u *frag_rx;
148 struct cpt_frag_info_s *finfo;
149 uint64_t *frag_ptr = NULL;
156 off = inb_priv->reass_dynfield_off;
157 ol_flags = BIT_ULL(inb_priv->reass_dynflag_bit);
158 ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
160 /* offset of 0 implies 256B, otherwise it implies offset*8B */
161 offset = (((offset - 1) & 0x1f) + 1) * 8;
162 finfo = RTE_PTR_ADD(hdr, offset + hdr->w2.fi_pad);
165 wqe = (uint64_t *)(rte_be_to_cpu_64(hdr->wqe_ptr));
166 rlen = ((*(wqe + 10)) >> 16) & 0xFFFF;
168 frag_rx = (union nix_rx_parse_u *)(wqe + 1);
169 frag_size = rlen + frag_rx->lcptr - frag_rx->laptr;
170 frag_rx->pkt_lenm1 = frag_size - 1;
172 mbuf = (struct rte_mbuf *)((uintptr_t)wqe - sizeof(struct rte_mbuf));
173 *(uint64_t *)(&mbuf->rearm_data) = mbuf_init;
174 mbuf->data_len = frag_size;
175 mbuf->pkt_len = frag_size;
176 mbuf->ol_flags = ol_flags;
180 /* Update dynamic field with userdata */
181 *rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
183 cnxk_ip_reassembly_dynfield(head, off)->nb_frags = hdr->w0.num_frags - 1;
184 cnxk_ip_reassembly_dynfield(head, off)->next_frag = NULL;
187 if (hdr->w0.num_frags > 1) {
188 wqe = (uint64_t *)(rte_be_to_cpu_64(hdr->frag1_wqe_ptr));
189 rlen = ((*(wqe + 10)) >> 16) & 0xFFFF;
191 frag_rx = (union nix_rx_parse_u *)(wqe + 1);
192 frag_size = rlen + frag_rx->lcptr - frag_rx->laptr;
193 frag_rx->pkt_lenm1 = frag_size - 1;
195 mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
196 sizeof(struct rte_mbuf));
197 *(uint64_t *)(&mbuf->rearm_data) = mbuf_init;
198 mbuf->data_len = frag_size;
199 mbuf->pkt_len = frag_size;
200 mbuf->ol_flags = ol_flags;
203 /* Update dynamic field with userdata */
204 *rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
206 cnxk_ip_reassembly_dynfield(mbuf, off)->nb_frags =
207 hdr->w0.num_frags - 2;
208 cnxk_ip_reassembly_dynfield(mbuf, off)->next_frag = NULL;
209 cnxk_ip_reassembly_dynfield(mbuf_prev, off)->next_frag = mbuf;
214 if (hdr->w0.num_frags > 2) {
215 frag_ptr = (uint64_t *)(finfo + 1);
216 wqe = (uint64_t *)(rte_be_to_cpu_64(*frag_ptr));
217 rlen = ((*(wqe + 10)) >> 16) & 0xFFFF;
219 frag_rx = (union nix_rx_parse_u *)(wqe + 1);
220 frag_size = rlen + frag_rx->lcptr - frag_rx->laptr;
221 frag_rx->pkt_lenm1 = frag_size - 1;
223 mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
224 sizeof(struct rte_mbuf));
225 *(uint64_t *)(&mbuf->rearm_data) = mbuf_init;
226 mbuf->data_len = frag_size;
227 mbuf->pkt_len = frag_size;
228 mbuf->ol_flags = ol_flags;
231 /* Update dynamic field with userdata */
232 *rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
234 cnxk_ip_reassembly_dynfield(mbuf, off)->nb_frags =
235 hdr->w0.num_frags - 3;
236 cnxk_ip_reassembly_dynfield(mbuf, off)->next_frag = NULL;
237 cnxk_ip_reassembly_dynfield(mbuf_prev, off)->next_frag = mbuf;
242 if (hdr->w0.num_frags > 3) {
243 wqe = (uint64_t *)(rte_be_to_cpu_64(*(frag_ptr + 1)));
244 rlen = ((*(wqe + 10)) >> 16) & 0xFFFF;
246 frag_rx = (union nix_rx_parse_u *)(wqe + 1);
247 frag_size = rlen + frag_rx->lcptr - frag_rx->laptr;
248 frag_rx->pkt_lenm1 = frag_size - 1;
250 mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
251 sizeof(struct rte_mbuf));
252 *(uint64_t *)(&mbuf->rearm_data) = mbuf_init;
253 mbuf->data_len = frag_size;
254 mbuf->pkt_len = frag_size;
255 mbuf->ol_flags = ol_flags;
258 /* Update dynamic field with userdata */
259 *rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
261 cnxk_ip_reassembly_dynfield(mbuf, off)->nb_frags =
262 hdr->w0.num_frags - 4;
263 cnxk_ip_reassembly_dynfield(mbuf, off)->next_frag = NULL;
264 cnxk_ip_reassembly_dynfield(mbuf_prev, off)->next_frag = mbuf;
269 static struct rte_mbuf *
270 nix_sec_reassemble_frags(const struct cpt_parse_hdr_s *hdr, uint64_t cq_w1,
271 uint64_t cq_w5, uint64_t mbuf_init)
273 uint32_t fragx_sum, pkt_hdr_len, l3_hdr_size;
274 uint32_t offset = hdr->w2.fi_offset;
275 union nix_rx_parse_u *inner_rx;
276 uint16_t rlen, data_off, b_off;
277 union nix_rx_parse_u *frag_rx;
278 struct cpt_frag_info_s *finfo;
279 struct rte_mbuf *head, *mbuf;
280 uint64_t *frag_ptr = NULL;
281 rte_iova_t *inner_iova;
285 /* Base data offset */
286 b_off = mbuf_init & 0xFFFFUL;
287 mbuf_init &= ~0xFFFFUL;
289 /* offset of 0 implies 256B, otherwise it implies offset*8B */
290 offset = (((offset - 1) & 0x1f) + 1) * 8;
291 finfo = RTE_PTR_ADD(hdr, offset + hdr->w2.fi_pad);
294 wqe = (uint64_t *)rte_be_to_cpu_64(hdr->wqe_ptr);
295 inner_rx = (union nix_rx_parse_u *)(wqe + 1);
296 inner_iova = (rte_iova_t *)*(wqe + 9);
298 /* Update only the upper 28-bits from meta pkt parse info */
299 *((uint64_t *)inner_rx) = ((*((uint64_t *)inner_rx) & ((1ULL << 36) - 1)) |
300 (cq_w1 & ~((1ULL << 36) - 1)));
302 rlen = ((*(wqe + 10)) >> 16) & 0xFFFF;
303 frag_size = rlen + ((cq_w5 >> 16) & 0xFF) - (cq_w5 & 0xFF);
304 fragx_sum = rte_be_to_cpu_16(finfo->w1.frag_size0);
305 pkt_hdr_len = frag_size - fragx_sum;
307 mbuf = (struct rte_mbuf *)((uintptr_t)wqe - sizeof(struct rte_mbuf));
308 *(uint64_t *)(&mbuf->rearm_data) = mbuf_init | b_off;
309 mbuf->data_len = frag_size;
312 if (inner_rx->lctype == NPC_LT_LC_IP) {
313 struct rte_ipv4_hdr *hdr = (struct rte_ipv4_hdr *)
314 RTE_PTR_ADD(inner_iova, inner_rx->lcptr);
316 l3_hdr_size = (hdr->version_ihl & 0xf) << 2;
318 struct rte_ipv6_hdr *hdr = (struct rte_ipv6_hdr *)
319 RTE_PTR_ADD(inner_iova, inner_rx->lcptr);
320 size_t ext_len = sizeof(struct rte_ipv6_hdr);
321 uint8_t *nxt_hdr = (uint8_t *)hdr;
325 while (nh != -EINVAL) {
327 l3_hdr_size += ext_len;
328 nh = rte_ipv6_get_next_ext(nxt_hdr, nh, &ext_len);
333 wqe = (uint64_t *)(rte_be_to_cpu_64(hdr->frag1_wqe_ptr));
334 frag_size = rte_be_to_cpu_16(finfo->w1.frag_size1);
335 frag_rx = (union nix_rx_parse_u *)(wqe + 1);
337 mbuf->next = (struct rte_mbuf *)((uintptr_t)wqe - sizeof(struct rte_mbuf));
339 data_off = b_off + frag_rx->lcptr + l3_hdr_size;
340 *(uint64_t *)(&mbuf->rearm_data) = mbuf_init | data_off;
341 mbuf->data_len = frag_size;
342 fragx_sum += frag_size;
345 if (hdr->w0.num_frags > 2) {
346 frag_ptr = (uint64_t *)(finfo + 1);
347 wqe = (uint64_t *)(rte_be_to_cpu_64(*frag_ptr));
348 frag_size = rte_be_to_cpu_16(finfo->w1.frag_size2);
349 frag_rx = (union nix_rx_parse_u *)(wqe + 1);
351 mbuf->next = (struct rte_mbuf *)((uintptr_t)wqe - sizeof(struct rte_mbuf));
353 data_off = b_off + frag_rx->lcptr + l3_hdr_size;
354 *(uint64_t *)(&mbuf->rearm_data) = mbuf_init | data_off;
355 mbuf->data_len = frag_size;
356 fragx_sum += frag_size;
360 if (hdr->w0.num_frags > 3) {
361 wqe = (uint64_t *)(rte_be_to_cpu_64(*(frag_ptr + 1)));
362 frag_size = rte_be_to_cpu_16(finfo->w1.frag_size3);
363 frag_rx = (union nix_rx_parse_u *)(wqe + 1);
365 mbuf->next = (struct rte_mbuf *)((uintptr_t)wqe - sizeof(struct rte_mbuf));
367 data_off = b_off + frag_rx->lcptr + l3_hdr_size;
368 *(uint64_t *)(&mbuf->rearm_data) = mbuf_init | data_off;
369 mbuf->data_len = frag_size;
370 fragx_sum += frag_size;
373 if (inner_rx->lctype == NPC_LT_LC_IP) {
374 struct rte_ipv4_hdr *hdr = (struct rte_ipv4_hdr *)
375 RTE_PTR_ADD(inner_iova, inner_rx->lcptr);
377 hdr->fragment_offset = 0;
378 hdr->total_length = rte_cpu_to_be_16(fragx_sum + l3_hdr_size);
379 hdr->hdr_checksum = 0;
380 hdr->hdr_checksum = rte_ipv4_cksum(hdr);
382 inner_rx->pkt_lenm1 = pkt_hdr_len + fragx_sum - 1;
384 /* Remove the frag header by moving header 8 bytes forward */
385 struct rte_ipv6_hdr *hdr = (struct rte_ipv6_hdr *)
386 RTE_PTR_ADD(inner_iova, inner_rx->lcptr);
388 hdr->payload_len = rte_cpu_to_be_16(fragx_sum + l3_hdr_size -
389 8 - sizeof(struct rte_ipv6_hdr));
391 rte_memcpy(rte_pktmbuf_mtod_offset(head, void *, 8),
392 rte_pktmbuf_mtod(head, void *),
393 inner_rx->lcptr + sizeof(struct rte_ipv6_hdr));
395 inner_rx->pkt_lenm1 = pkt_hdr_len + fragx_sum - 8 - 1;
400 head->pkt_len = inner_rx->pkt_lenm1 + 1;
401 head->nb_segs = hdr->w0.num_frags;
406 static __rte_always_inline struct rte_mbuf *
407 nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,
408 uintptr_t laddr, uint8_t *loff, struct rte_mbuf *mbuf,
409 uint16_t data_off, const uint16_t flags,
410 const uint64_t mbuf_init)
412 const void *__p = (void *)((uintptr_t)mbuf + (uint16_t)data_off);
413 const struct cpt_parse_hdr_s *hdr = (const struct cpt_parse_hdr_s *)__p;
414 struct cn10k_inb_priv_data *inb_priv;
415 struct rte_mbuf *inner = NULL;
423 if ((flags & NIX_RX_REAS_F) && (cq_w1 & BIT(11))) {
424 /* Get SPI from CPT_PARSE_S's cookie(already swapped) */
428 inb_sa = roc_nix_inl_ot_ipsec_inb_sa(sa_base, sa_idx);
429 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
431 if (!hdr->w0.num_frags) {
432 /* No Reassembly or inbound error */
433 inner = (struct rte_mbuf *)
434 (rte_be_to_cpu_64(hdr->wqe_ptr) -
435 sizeof(struct rte_mbuf));
437 /* Update dynamic field with userdata */
438 *rte_security_dynfield(inner) =
439 (uint64_t)inb_priv->userdata;
441 /* CPT result(struct cpt_cn10k_res_s) is at
442 * after first IOVA in meta
444 res_w1 = *((uint64_t *)(&inner[1]) + 10);
445 uc_cc = res_w1 & 0xFF;
447 /* Calculate inner packet length */
448 len = ((res_w1 >> 16) & 0xFFFF) + hdr->w2.il3_off -
449 sizeof(struct cpt_parse_hdr_s) - (w0 & 0x7);
450 inner->pkt_len = len;
451 inner->data_len = len;
452 *(uint64_t *)(&inner->rearm_data) = mbuf_init;
454 inner->ol_flags = ((uc_cc == CPT_COMP_WARN) ?
455 RTE_MBUF_F_RX_SEC_OFFLOAD :
456 (RTE_MBUF_F_RX_SEC_OFFLOAD |
457 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED));
458 } else if (!(hdr->w0.err_sum) && !(hdr->w0.reas_sts)) {
459 /* Reassembly success */
460 inner = nix_sec_reassemble_frags(hdr, cq_w1, cq_w5,
463 /* Update dynamic field with userdata */
464 *rte_security_dynfield(inner) =
465 (uint64_t)inb_priv->userdata;
468 inner->ol_flags = RTE_MBUF_F_RX_SEC_OFFLOAD;
470 /* Reassembly failure */
471 inner = nix_sec_attach_frags(hdr, inb_priv, mbuf_init);
474 /* Store meta in lmtline to free
475 * Assume all meta's from same aura.
477 *(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf;
481 } else if (cq_w1 & BIT(11)) {
482 inner = (struct rte_mbuf *)(rte_be_to_cpu_64(hdr->wqe_ptr) -
483 sizeof(struct rte_mbuf));
485 /* Get SPI from CPT_PARSE_S's cookie(already swapped) */
489 inb_sa = roc_nix_inl_ot_ipsec_inb_sa(sa_base, sa_idx);
490 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
492 /* Update dynamic field with userdata */
493 *rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
495 /* Update l2 hdr length first */
497 /* CPT result(struct cpt_cn10k_res_s) is at
498 * after first IOVA in meta
500 res_w1 = *((uint64_t *)(&inner[1]) + 10);
501 uc_cc = res_w1 & 0xFF;
503 /* Calculate inner packet length */
504 len = ((res_w1 >> 16) & 0xFFFF) + hdr->w2.il3_off -
505 sizeof(struct cpt_parse_hdr_s) - (w0 & 0x7);
506 inner->pkt_len = len;
507 inner->data_len = len;
508 *(uint64_t *)(&inner->rearm_data) = mbuf_init;
510 inner->ol_flags = ((uc_cc == CPT_COMP_WARN) ?
511 RTE_MBUF_F_RX_SEC_OFFLOAD :
512 (RTE_MBUF_F_RX_SEC_OFFLOAD |
513 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED));
515 /* Store meta in lmtline to free
516 * Assume all meta's from same aura.
518 *(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf;
521 /* Mark meta mbuf as put */
522 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0);
524 /* Mark inner mbuf as get */
525 RTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1);
533 #if defined(RTE_ARCH_ARM64)
535 static __rte_always_inline struct rte_mbuf *
536 nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t sa_base,
537 uintptr_t laddr, uint8_t *loff, struct rte_mbuf *mbuf,
538 uint16_t data_off, uint8x16_t *rx_desc_field1,
539 uint64_t *ol_flags, const uint16_t flags,
542 const void *__p = (void *)((uintptr_t)mbuf + (uint16_t)data_off);
543 const struct cpt_parse_hdr_s *hdr = (const struct cpt_parse_hdr_s *)__p;
544 uint64_t mbuf_init = vgetq_lane_u64(*rearm, 0);
545 struct cn10k_inb_priv_data *inb_priv;
546 struct rte_mbuf *inner;
547 uint64_t *sg, res_w1;
553 if ((flags & NIX_RX_REAS_F) && (cq_w1 & BIT(11))) {
557 /* Get SPI from CPT_PARSE_S's cookie(already swapped) */
561 inb_sa = roc_nix_inl_ot_ipsec_inb_sa(sa_base, sa_idx);
562 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
564 /* Clear checksum flags */
565 *ol_flags &= ~(RTE_MBUF_F_RX_L4_CKSUM_MASK |
566 RTE_MBUF_F_RX_IP_CKSUM_MASK);
568 if (!hdr->w0.num_frags) {
569 /* No Reassembly or inbound error */
570 inner = (struct rte_mbuf *)
571 (rte_be_to_cpu_64(hdr->wqe_ptr) -
572 sizeof(struct rte_mbuf));
573 /* Update dynamic field with userdata */
574 *rte_security_dynfield(inner) =
575 (uint64_t)inb_priv->userdata;
577 /* CPT result(struct cpt_cn10k_res_s) is at
578 * after first IOVA in meta
580 sg = (uint64_t *)(inner + 1);
583 /* Clear checksum flags and update security flag */
584 *ol_flags &= ~(RTE_MBUF_F_RX_L4_CKSUM_MASK |
585 RTE_MBUF_F_RX_IP_CKSUM_MASK);
587 (((res_w1 & 0xFF) == CPT_COMP_WARN) ?
588 RTE_MBUF_F_RX_SEC_OFFLOAD :
589 (RTE_MBUF_F_RX_SEC_OFFLOAD |
590 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED));
591 /* Calculate inner packet length */
592 len = ((res_w1 >> 16) & 0xFFFF) +
594 sizeof(struct cpt_parse_hdr_s) -
596 /* Update pkt_len and data_len */
598 vsetq_lane_u16(len, *rx_desc_field1, 2);
600 vsetq_lane_u16(len, *rx_desc_field1, 4);
602 } else if (!(hdr->w0.err_sum) && !(hdr->w0.reas_sts)) {
603 /* Reassembly success */
604 inner = nix_sec_reassemble_frags(hdr, cq_w1, cq_w5,
606 sg = (uint64_t *)(inner + 1);
609 /* Update dynamic field with userdata */
610 *rte_security_dynfield(inner) =
611 (uint64_t)inb_priv->userdata;
614 *ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
616 /* Update pkt_len and data_len */
617 *rx_desc_field1 = vsetq_lane_u16(inner->pkt_len,
619 *rx_desc_field1 = vsetq_lane_u16(inner->data_len,
622 /* Data offset might be updated */
623 mbuf_init = *(uint64_t *)(&inner->rearm_data);
624 *rearm = vsetq_lane_u64(mbuf_init, *rearm, 0);
626 /* Reassembly failure */
627 inner = nix_sec_attach_frags(hdr, inb_priv, mbuf_init);
628 *ol_flags |= inner->ol_flags;
630 /* Update pkt_len and data_len */
631 *rx_desc_field1 = vsetq_lane_u16(inner->pkt_len,
633 *rx_desc_field1 = vsetq_lane_u16(inner->data_len,
637 /* Store meta in lmtline to free
638 * Assume all meta's from same aura.
640 *(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf;
643 /* Return inner mbuf */
646 } else if (cq_w1 & BIT(11)) {
647 inner = (struct rte_mbuf *)(rte_be_to_cpu_64(hdr->wqe_ptr) -
648 sizeof(struct rte_mbuf));
649 /* Get SPI from CPT_PARSE_S's cookie(already swapped) */
653 inb_sa = roc_nix_inl_ot_ipsec_inb_sa(sa_base, sa_idx);
654 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
656 /* Update dynamic field with userdata */
657 *rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
659 /* CPT result(struct cpt_cn10k_res_s) is at
660 * after first IOVA in meta
662 sg = (uint64_t *)(inner + 1);
665 /* Clear checksum flags and update security flag */
666 *ol_flags &= ~(RTE_MBUF_F_RX_L4_CKSUM_MASK | RTE_MBUF_F_RX_IP_CKSUM_MASK);
667 *ol_flags |= (((res_w1 & 0xFF) == CPT_COMP_WARN) ?
668 RTE_MBUF_F_RX_SEC_OFFLOAD :
669 (RTE_MBUF_F_RX_SEC_OFFLOAD | RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED));
670 /* Calculate inner packet length */
671 len = ((res_w1 >> 16) & 0xFFFF) + hdr->w2.il3_off -
672 sizeof(struct cpt_parse_hdr_s) - (w0 & 0x7);
673 /* Update pkt_len and data_len */
674 *rx_desc_field1 = vsetq_lane_u16(len, *rx_desc_field1, 2);
675 *rx_desc_field1 = vsetq_lane_u16(len, *rx_desc_field1, 4);
677 /* Store meta in lmtline to free
678 * Assume all meta's from same aura.
680 *(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf;
683 /* Mark meta mbuf as put */
684 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0);
686 /* Mark inner mbuf as get */
687 RTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1);
689 /* Return inner mbuf */
693 /* Return same mbuf as it is not a decrypted pkt */
698 static __rte_always_inline uint32_t
699 nix_ptype_get(const void *const lookup_mem, const uint64_t in)
701 const uint16_t *const ptype = lookup_mem;
702 const uint16_t lh_lg_lf = (in & 0xFFF0000000000000) >> 52;
703 const uint16_t tu_l2 = ptype[(in & 0x000FFFF000000000) >> 36];
704 const uint16_t il4_tu = ptype[PTYPE_NON_TUNNEL_ARRAY_SZ + lh_lg_lf];
706 return (il4_tu << PTYPE_NON_TUNNEL_WIDTH) | tu_l2;
709 static __rte_always_inline uint32_t
710 nix_rx_olflags_get(const void *const lookup_mem, const uint64_t in)
712 const uint32_t *const ol_flags =
713 (const uint32_t *)((const uint8_t *)lookup_mem +
716 return ol_flags[(in & 0xfff00000) >> 20];
719 static inline uint64_t
720 nix_update_match_id(const uint16_t match_id, uint64_t ol_flags,
721 struct rte_mbuf *mbuf)
723 /* There is no separate bit to check match_id
724 * is valid or not? and no flag to identify it is an
725 * RTE_FLOW_ACTION_TYPE_FLAG vs RTE_FLOW_ACTION_TYPE_MARK
726 * action. The former case addressed through 0 being invalid
727 * value and inc/dec match_id pair when MARK is activated.
728 * The later case addressed through defining
729 * CNXK_FLOW_MARK_DEFAULT as value for
730 * RTE_FLOW_ACTION_TYPE_MARK.
731 * This would translate to not use
732 * CNXK_FLOW_ACTION_FLAG_DEFAULT - 1 and
733 * CNXK_FLOW_ACTION_FLAG_DEFAULT for match_id.
734 * i.e valid mark_id's are from
735 * 0 to CNXK_FLOW_ACTION_FLAG_DEFAULT - 2
737 if (likely(match_id)) {
738 ol_flags |= RTE_MBUF_F_RX_FDIR;
739 if (match_id != CNXK_FLOW_ACTION_FLAG_DEFAULT) {
740 ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
741 mbuf->hash.fdir.hi = match_id - 1;
748 static __rte_always_inline void
749 nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
750 uint64_t rearm, const uint16_t flags)
752 const rte_iova_t *iova_list;
753 struct rte_mbuf *head;
754 const rte_iova_t *eol;
758 sg = *(const uint64_t *)(rx + 1);
759 nb_segs = (sg >> 48) & 0x3;
761 if (nb_segs == 1 && !(flags & NIX_RX_SEC_REASSEMBLY_F)) {
766 mbuf->pkt_len = (rx->pkt_lenm1 + 1) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
767 CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
768 mbuf->data_len = (sg & 0xFFFF) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
769 CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
770 mbuf->nb_segs = nb_segs;
773 eol = ((const rte_iova_t *)(rx + 1) + ((rx->desc_sizem1 + 1) << 1));
774 /* Skip SG_S and first IOVA*/
775 iova_list = ((const rte_iova_t *)(rx + 1)) + 2;
778 rearm = rearm & ~0xFFFF;
782 mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
785 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
787 mbuf->data_len = sg & 0xFFFF;
789 *(uint64_t *)(&mbuf->rearm_data) = rearm;
793 if (!nb_segs && (iova_list + 1 < eol)) {
794 sg = *(const uint64_t *)(iova_list);
795 nb_segs = (sg >> 48) & 0x3;
796 head->nb_segs += nb_segs;
797 iova_list = (const rte_iova_t *)(iova_list + 1);
803 static __rte_always_inline void
804 cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
805 struct rte_mbuf *mbuf, const void *lookup_mem,
806 const uint64_t val, const uint16_t flag)
808 const union nix_rx_parse_u *rx =
809 (const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
810 const uint64_t w1 = *(const uint64_t *)rx;
811 uint16_t len = rx->pkt_lenm1 + 1;
812 uint64_t ol_flags = 0;
814 if (flag & NIX_RX_OFFLOAD_PTYPE_F)
815 mbuf->packet_type = nix_ptype_get(lookup_mem, w1);
817 mbuf->packet_type = 0;
819 if (flag & NIX_RX_OFFLOAD_RSS_F) {
820 mbuf->hash.rss = tag;
821 ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
824 /* Skip rx ol flags extraction for Security packets */
825 if ((!(flag & NIX_RX_SEC_REASSEMBLY_F) || !(w1 & BIT(11))) &&
826 flag & NIX_RX_OFFLOAD_CHECKSUM_F)
827 ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
829 if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
830 if (rx->vtag0_gone) {
831 ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
832 mbuf->vlan_tci = rx->vtag0_tci;
834 if (rx->vtag1_gone) {
835 ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
836 mbuf->vlan_tci_outer = rx->vtag1_tci;
840 if (flag & NIX_RX_OFFLOAD_MARK_UPDATE_F)
841 ol_flags = nix_update_match_id(rx->match_id, ol_flags, mbuf);
843 /* Packet data length and ol flags is already updated for sec */
844 if (flag & NIX_RX_SEC_REASSEMBLY_F && w1 & BIT_ULL(11)) {
845 mbuf->ol_flags |= ol_flags;
847 mbuf->ol_flags = ol_flags;
849 mbuf->data_len = len;
850 *(uint64_t *)(&mbuf->rearm_data) = val;
853 if (flag & NIX_RX_MULTI_SEG_F)
855 * For multi segment packets, mbuf length correction according
856 * to Rx timestamp length will be handled later during
857 * timestamp data process.
858 * Hence, flag argument is not required.
860 nix_cqe_xtract_mseg(rx, mbuf, val, 0);
863 static inline uint16_t
864 nix_rx_nb_pkts(struct cn10k_eth_rxq *rxq, const uint64_t wdata,
865 const uint16_t pkts, const uint32_t qmask)
867 uint32_t available = rxq->available;
869 /* Update the available count if cached value is not enough */
870 if (unlikely(available < pkts)) {
871 uint64_t reg, head, tail;
873 /* Use LDADDA version to avoid reorder */
874 reg = roc_atomic64_add_sync(wdata, rxq->cq_status);
875 /* CQ_OP_STATUS operation error */
876 if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
877 reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
880 tail = reg & 0xFFFFF;
881 head = (reg >> 20) & 0xFFFFF;
883 available = tail - head + qmask + 1;
885 available = tail - head;
887 rxq->available = available;
890 return RTE_MIN(pkts, available);
893 static __rte_always_inline void
894 cn10k_nix_mbuf_to_tstamp(struct rte_mbuf *mbuf,
895 struct cnxk_timesync_info *tstamp,
896 const uint8_t ts_enable, uint64_t *tstamp_ptr)
899 mbuf->pkt_len -= CNXK_NIX_TIMESYNC_RX_OFFSET;
900 mbuf->data_len -= CNXK_NIX_TIMESYNC_RX_OFFSET;
902 /* Reading the rx timestamp inserted by CGX, viz at
903 * starting of the packet data.
905 *tstamp_ptr = ((*tstamp_ptr >> 32) * NSEC_PER_SEC) +
906 (*tstamp_ptr & 0xFFFFFFFFUL);
907 *cnxk_nix_timestamp_dynfield(mbuf, tstamp) =
908 rte_be_to_cpu_64(*tstamp_ptr);
909 /* RTE_MBUF_F_RX_IEEE1588_TMST flag needs to be set only in case
910 * PTP packets are received.
912 if (mbuf->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC) {
914 *cnxk_nix_timestamp_dynfield(mbuf, tstamp);
915 tstamp->rx_ready = 1;
916 mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP |
917 RTE_MBUF_F_RX_IEEE1588_TMST |
918 tstamp->rx_tstamp_dynflag;
923 static __rte_always_inline uint16_t
924 cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
925 const uint16_t flags)
927 struct cn10k_eth_rxq *rxq = rx_queue;
928 const uint64_t mbuf_init = rxq->mbuf_initializer;
929 const void *lookup_mem = rxq->lookup_mem;
930 const uint64_t data_off = rxq->data_off;
931 const uintptr_t desc = rxq->desc;
932 const uint64_t wdata = rxq->wdata;
933 const uint32_t qmask = rxq->qmask;
934 uint64_t lbase = rxq->lmt_base;
935 uint16_t packets = 0, nb_pkts;
936 uint8_t loff = 0, lnum = 0;
937 uint32_t head = rxq->head;
938 struct nix_cqe_hdr_s *cq;
939 struct rte_mbuf *mbuf;
940 uint64_t aura_handle;
945 nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
947 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
948 aura_handle = rxq->aura_handle;
949 sa_base = rxq->sa_base;
950 sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
951 ROC_LMT_BASE_ID_GET(lbase, lmt_id);
956 while (packets < nb_pkts) {
957 /* Prefetch N desc ahead */
958 rte_prefetch_non_temporal(
959 (void *)(desc + (CQE_SZ((head + 2) & qmask))));
960 cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head));
962 mbuf = nix_get_mbuf_from_cqe(cq, data_off);
964 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
965 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
967 /* Translate meta to mbuf */
968 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
969 const uint64_t cq_w1 = *((const uint64_t *)cq + 1);
970 const uint64_t cq_w5 = *((const uint64_t *)cq + 5);
972 mbuf = nix_sec_meta_to_mbuf_sc(cq_w1, cq_w5, sa_base, laddr,
973 &loff, mbuf, data_off,
977 cn10k_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
979 cn10k_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
980 (flags & NIX_RX_OFFLOAD_TSTAMP_F),
981 (uint64_t *)((uint8_t *)mbuf
983 rx_pkts[packets++] = mbuf;
984 roc_prefetch_store_keep(mbuf);
988 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
989 /* Flush when we don't have space for 4 meta */
990 if ((15 - loff) < 1) {
991 nix_sec_flush_meta(laddr, lmt_id + lnum, loff,
994 lnum &= BIT_ULL(ROC_LMT_LINES_PER_CORE_LOG2) -
996 /* First pointer starts at 8B offset */
997 laddr = (uintptr_t)LMT_OFF(lbase, lnum, 8);
1004 rxq->available -= nb_pkts;
1006 /* Free all the CQs that we've processed */
1007 plt_write64((wdata | nb_pkts), rxq->cq_door);
1009 /* Free remaining meta buffers if any */
1010 if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) {
1011 nix_sec_flush_meta(laddr, lmt_id + lnum, loff, aura_handle);
1018 #if defined(RTE_ARCH_ARM64)
1020 static __rte_always_inline uint64_t
1021 nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
1023 if (w2 & BIT_ULL(21) /* vtag0_gone */) {
1024 ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
1025 *f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
1031 static __rte_always_inline uint64_t
1032 nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
1034 if (w2 & BIT_ULL(23) /* vtag1_gone */) {
1035 ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
1036 mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
1042 static __rte_always_inline uint16_t
1043 cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
1044 const uint16_t flags, void *lookup_mem,
1045 struct cnxk_timesync_info *tstamp,
1048 struct cn10k_eth_rxq *rxq = args;
1049 const uint64_t mbuf_initializer = (flags & NIX_RX_VWQE_F) ?
1051 rxq->mbuf_initializer;
1052 const uint64x2_t data_off = flags & NIX_RX_VWQE_F ?
1053 vdupq_n_u64(RTE_PKTMBUF_HEADROOM) :
1054 vdupq_n_u64(rxq->data_off);
1055 const uint32_t qmask = flags & NIX_RX_VWQE_F ? 0 : rxq->qmask;
1056 const uint64_t wdata = flags & NIX_RX_VWQE_F ? 0 : rxq->wdata;
1057 const uintptr_t desc = flags & NIX_RX_VWQE_F ? 0 : rxq->desc;
1058 uint64x2_t cq0_w8, cq1_w8, cq2_w8, cq3_w8, mbuf01, mbuf23;
1059 uint64_t ol_flags0, ol_flags1, ol_flags2, ol_flags3;
1060 uint64x2_t rearm0 = vdupq_n_u64(mbuf_initializer);
1061 uint64x2_t rearm1 = vdupq_n_u64(mbuf_initializer);
1062 uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer);
1063 uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
1064 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
1065 uint64_t aura_handle, lbase, laddr;
1066 uint8_t loff = 0, lnum = 0, shft = 0;
1067 uint8x16_t f0, f1, f2, f3;
1068 uint16_t lmt_id, d_off;
1069 uint16_t packets = 0;
1075 if (!(flags & NIX_RX_VWQE_F)) {
1076 lookup_mem = rxq->lookup_mem;
1079 pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
1080 pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1);
1081 /* Packets has to be floor-aligned to NIX_DESCS_PER_LOOP */
1082 pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
1083 if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
1084 tstamp = rxq->tstamp;
1089 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
1090 if (flags & NIX_RX_VWQE_F) {
1093 mbuf0 = (struct rte_mbuf *)((uintptr_t)mbufs[0] -
1094 sizeof(struct rte_mbuf));
1095 /* Pick first mbuf's aura handle assuming all
1096 * mbufs are from a vec and are from same RQ.
1098 aura_handle = mbuf0->pool->pool_id;
1099 /* Calculate offset from mbuf to actual data area */
1100 d_off = ((uintptr_t)mbuf0->buf_addr - (uintptr_t)mbuf0);
1101 d_off += (mbuf_initializer & 0xFFFF);
1103 /* Get SA Base from lookup tbl using port_id */
1104 port = mbuf_initializer >> 48;
1105 sa_base = cnxk_nix_sa_base_get(port, lookup_mem);
1109 aura_handle = rxq->aura_handle;
1110 d_off = rxq->data_off;
1111 sa_base = rxq->sa_base;
1112 lbase = rxq->lmt_base;
1114 sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
1115 ROC_LMT_BASE_ID_GET(lbase, lmt_id);
1121 while (packets < pkts) {
1122 if (!(flags & NIX_RX_VWQE_F)) {
1123 /* Exit loop if head is about to wrap and become
1126 if (((head + NIX_DESCS_PER_LOOP - 1) & qmask) <
1127 NIX_DESCS_PER_LOOP) {
1128 pkts_left += (pkts - packets);
1132 cq0 = desc + CQE_SZ(head);
1134 cq0 = (uintptr_t)&mbufs[packets];
1137 if (flags & NIX_RX_VWQE_F) {
1138 if (pkts - packets > 4) {
1139 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0,
1141 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0,
1143 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0,
1145 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0,
1148 if (likely(pkts - packets > 8)) {
1149 rte_prefetch1(CQE_PTR_OFF(cq0,
1151 rte_prefetch1(CQE_PTR_OFF(cq0,
1153 rte_prefetch1(CQE_PTR_OFF(cq0,
1155 rte_prefetch1(CQE_PTR_OFF(cq0,
1157 if (pkts - packets > 12) {
1158 rte_prefetch1(CQE_PTR_OFF(cq0,
1160 rte_prefetch1(CQE_PTR_OFF(cq0,
1162 rte_prefetch1(CQE_PTR_OFF(cq0,
1164 rte_prefetch1(CQE_PTR_OFF(cq0,
1169 rte_prefetch0(CQE_PTR_DIFF(cq0,
1170 4, RTE_PKTMBUF_HEADROOM, flags));
1171 rte_prefetch0(CQE_PTR_DIFF(cq0,
1172 5, RTE_PKTMBUF_HEADROOM, flags));
1173 rte_prefetch0(CQE_PTR_DIFF(cq0,
1174 6, RTE_PKTMBUF_HEADROOM, flags));
1175 rte_prefetch0(CQE_PTR_DIFF(cq0,
1176 7, RTE_PKTMBUF_HEADROOM, flags));
1178 if (likely(pkts - packets > 8)) {
1179 rte_prefetch0(CQE_PTR_DIFF(cq0,
1180 8, RTE_PKTMBUF_HEADROOM, flags));
1181 rte_prefetch0(CQE_PTR_DIFF(cq0,
1182 9, RTE_PKTMBUF_HEADROOM, flags));
1183 rte_prefetch0(CQE_PTR_DIFF(cq0,
1184 10, RTE_PKTMBUF_HEADROOM, flags));
1185 rte_prefetch0(CQE_PTR_DIFF(cq0,
1186 11, RTE_PKTMBUF_HEADROOM, flags));
1190 if (pkts - packets > 4) {
1191 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0, 4, 64, flags));
1192 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0, 5, 64, flags));
1193 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0, 6, 64, flags));
1194 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0, 7, 64, flags));
1198 if (!(flags & NIX_RX_VWQE_F)) {
1199 /* Get NIX_RX_SG_S for size and buffer pointer */
1200 cq0_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 0, 64, flags));
1201 cq1_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 1, 64, flags));
1202 cq2_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 2, 64, flags));
1203 cq3_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 3, 64, flags));
1205 /* Extract mbuf from NIX_RX_SG_S */
1206 mbuf01 = vzip2q_u64(cq0_w8, cq1_w8);
1207 mbuf23 = vzip2q_u64(cq2_w8, cq3_w8);
1208 mbuf01 = vqsubq_u64(mbuf01, data_off);
1209 mbuf23 = vqsubq_u64(mbuf23, data_off);
1212 vsubq_u64(vld1q_u64((uint64_t *)cq0), data_off);
1213 mbuf23 = vsubq_u64(vld1q_u64((uint64_t *)(cq0 + 16)),
1217 /* Move mbufs to scalar registers for future use */
1218 mbuf0 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 0);
1219 mbuf1 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 1);
1220 mbuf2 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 0);
1221 mbuf3 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 1);
1223 if (!(flags & NIX_RX_VWQE_F)) {
1224 /* Mask to get packet len from NIX_RX_SG_S */
1225 const uint8x16_t shuf_msk = {
1226 0xFF, 0xFF, /* pkt_type set as unknown */
1227 0xFF, 0xFF, /* pkt_type set as unknown */
1228 0, 1, /* octet 1~0, low 16 bits pkt_len */
1229 0xFF, 0xFF, /* skip high 16it pkt_len, zero out */
1230 0, 1, /* octet 1~0, 16 bits data_len */
1231 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1233 /* Form the rx_descriptor_fields1 with pkt_len and data_len */
1234 f0 = vqtbl1q_u8(cq0_w8, shuf_msk);
1235 f1 = vqtbl1q_u8(cq1_w8, shuf_msk);
1236 f2 = vqtbl1q_u8(cq2_w8, shuf_msk);
1237 f3 = vqtbl1q_u8(cq3_w8, shuf_msk);
1239 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
1240 /* Prefetch probable CPT parse header area */
1241 rte_prefetch_non_temporal(RTE_PTR_ADD(mbuf0, d_off));
1242 rte_prefetch_non_temporal(RTE_PTR_ADD(mbuf1, d_off));
1243 rte_prefetch_non_temporal(RTE_PTR_ADD(mbuf2, d_off));
1244 rte_prefetch_non_temporal(RTE_PTR_ADD(mbuf3, d_off));
1247 /* Load CQE word0 and word 1 */
1248 const uint64_t cq0_w0 = *CQE_PTR_OFF(cq0, 0, 0, flags);
1249 const uint64_t cq0_w1 = *CQE_PTR_OFF(cq0, 0, 8, flags);
1250 const uint64_t cq0_w2 = *CQE_PTR_OFF(cq0, 0, 16, flags);
1251 const uint64_t cq1_w0 = *CQE_PTR_OFF(cq0, 1, 0, flags);
1252 const uint64_t cq1_w1 = *CQE_PTR_OFF(cq0, 1, 8, flags);
1253 const uint64_t cq1_w2 = *CQE_PTR_OFF(cq0, 1, 16, flags);
1254 const uint64_t cq2_w0 = *CQE_PTR_OFF(cq0, 2, 0, flags);
1255 const uint64_t cq2_w1 = *CQE_PTR_OFF(cq0, 2, 8, flags);
1256 const uint64_t cq2_w2 = *CQE_PTR_OFF(cq0, 2, 16, flags);
1257 const uint64_t cq3_w0 = *CQE_PTR_OFF(cq0, 3, 0, flags);
1258 const uint64_t cq3_w1 = *CQE_PTR_OFF(cq0, 3, 8, flags);
1259 const uint64_t cq3_w2 = *CQE_PTR_OFF(cq0, 3, 16, flags);
1261 if (flags & NIX_RX_VWQE_F) {
1262 uint16_t psize0, psize1, psize2, psize3;
1264 psize0 = (cq0_w2 & 0xFFFF) + 1;
1265 psize1 = (cq1_w2 & 0xFFFF) + 1;
1266 psize2 = (cq2_w2 & 0xFFFF) + 1;
1267 psize3 = (cq3_w2 & 0xFFFF) + 1;
1269 f0 = vdupq_n_u64(0);
1270 f1 = vdupq_n_u64(0);
1271 f2 = vdupq_n_u64(0);
1272 f3 = vdupq_n_u64(0);
1274 f0 = vsetq_lane_u16(psize0, f0, 2);
1275 f0 = vsetq_lane_u16(psize0, f0, 4);
1277 f1 = vsetq_lane_u16(psize1, f1, 2);
1278 f1 = vsetq_lane_u16(psize1, f1, 4);
1280 f2 = vsetq_lane_u16(psize2, f2, 2);
1281 f2 = vsetq_lane_u16(psize2, f2, 4);
1283 f3 = vsetq_lane_u16(psize3, f3, 2);
1284 f3 = vsetq_lane_u16(psize3, f3, 4);
1287 if (flags & NIX_RX_OFFLOAD_RSS_F) {
1288 /* Fill rss in the rx_descriptor_fields1 */
1289 f0 = vsetq_lane_u32(cq0_w0, f0, 3);
1290 f1 = vsetq_lane_u32(cq1_w0, f1, 3);
1291 f2 = vsetq_lane_u32(cq2_w0, f2, 3);
1292 f3 = vsetq_lane_u32(cq3_w0, f3, 3);
1293 ol_flags0 = RTE_MBUF_F_RX_RSS_HASH;
1294 ol_flags1 = RTE_MBUF_F_RX_RSS_HASH;
1295 ol_flags2 = RTE_MBUF_F_RX_RSS_HASH;
1296 ol_flags3 = RTE_MBUF_F_RX_RSS_HASH;
1304 if (flags & NIX_RX_OFFLOAD_PTYPE_F) {
1305 /* Fill packet_type in the rx_descriptor_fields1 */
1306 f0 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq0_w1),
1308 f1 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq1_w1),
1310 f2 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq2_w1),
1312 f3 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq3_w1),
1316 if (flags & NIX_RX_OFFLOAD_CHECKSUM_F) {
1317 ol_flags0 |= nix_rx_olflags_get(lookup_mem, cq0_w1);
1318 ol_flags1 |= nix_rx_olflags_get(lookup_mem, cq1_w1);
1319 ol_flags2 |= nix_rx_olflags_get(lookup_mem, cq2_w1);
1320 ol_flags3 |= nix_rx_olflags_get(lookup_mem, cq3_w1);
1323 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
1324 RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1);
1325 RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1);
1326 RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1);
1327 RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1);
1329 /* Translate meta to mbuf */
1330 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
1331 uint64_t cq0_w5 = *(uint64_t *)(cq0 + CQE_SZ(0) + 40);
1332 uint64_t cq1_w5 = *(uint64_t *)(cq0 + CQE_SZ(1) + 40);
1333 uint64_t cq2_w5 = *(uint64_t *)(cq0 + CQE_SZ(2) + 40);
1334 uint64_t cq3_w5 = *(uint64_t *)(cq0 + CQE_SZ(3) + 40);
1336 /* Initialize rearm data when reassembly is enabled as
1337 * data offset might change.
1339 if (flags & NIX_RX_REAS_F) {
1340 rearm0 = vdupq_n_u64(mbuf_initializer);
1341 rearm1 = vdupq_n_u64(mbuf_initializer);
1342 rearm2 = vdupq_n_u64(mbuf_initializer);
1343 rearm3 = vdupq_n_u64(mbuf_initializer);
1346 /* Checksum ol_flags will be cleared if mbuf is meta */
1347 mbuf0 = nix_sec_meta_to_mbuf(cq0_w1, cq0_w5, sa_base, laddr,
1348 &loff, mbuf0, d_off, &f0,
1349 &ol_flags0, flags, &rearm0);
1350 mbuf01 = vsetq_lane_u64((uint64_t)mbuf0, mbuf01, 0);
1352 mbuf1 = nix_sec_meta_to_mbuf(cq1_w1, cq1_w5, sa_base, laddr,
1353 &loff, mbuf1, d_off, &f1,
1354 &ol_flags1, flags, &rearm1);
1355 mbuf01 = vsetq_lane_u64((uint64_t)mbuf1, mbuf01, 1);
1357 mbuf2 = nix_sec_meta_to_mbuf(cq2_w1, cq2_w5, sa_base, laddr,
1358 &loff, mbuf2, d_off, &f2,
1359 &ol_flags2, flags, &rearm2);
1360 mbuf23 = vsetq_lane_u64((uint64_t)mbuf2, mbuf23, 0);
1362 mbuf3 = nix_sec_meta_to_mbuf(cq3_w1, cq3_w5, sa_base, laddr,
1363 &loff, mbuf3, d_off, &f3,
1364 &ol_flags3, flags, &rearm3);
1365 mbuf23 = vsetq_lane_u64((uint64_t)mbuf3, mbuf23, 1);
1368 if (flags & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
1370 ol_flags0 = nix_vlan_update(cq0_w2, ol_flags0, &f0);
1371 ol_flags1 = nix_vlan_update(cq1_w2, ol_flags1, &f1);
1372 ol_flags2 = nix_vlan_update(cq2_w2, ol_flags2, &f2);
1373 ol_flags3 = nix_vlan_update(cq3_w2, ol_flags3, &f3);
1375 ol_flags0 = nix_qinq_update(cq0_w2, ol_flags0, mbuf0);
1376 ol_flags1 = nix_qinq_update(cq1_w2, ol_flags1, mbuf1);
1377 ol_flags2 = nix_qinq_update(cq2_w2, ol_flags2, mbuf2);
1378 ol_flags3 = nix_qinq_update(cq3_w2, ol_flags3, mbuf3);
1381 if (flags & NIX_RX_OFFLOAD_MARK_UPDATE_F) {
1382 ol_flags0 = nix_update_match_id(
1383 *(uint16_t *)CQE_PTR_OFF(cq0, 0, 38, flags),
1385 ol_flags1 = nix_update_match_id(
1386 *(uint16_t *)CQE_PTR_OFF(cq0, 1, 38, flags),
1388 ol_flags2 = nix_update_match_id(
1389 *(uint16_t *)CQE_PTR_OFF(cq0, 2, 38, flags),
1391 ol_flags3 = nix_update_match_id(
1392 *(uint16_t *)CQE_PTR_OFF(cq0, 3, 38, flags),
1396 if (flags & NIX_RX_OFFLOAD_TSTAMP_F) {
1397 const uint16x8_t len_off = {
1399 0, /* ptype 16:32 */
1400 CNXK_NIX_TIMESYNC_RX_OFFSET, /* pktlen 0:15*/
1401 0, /* pktlen 16:32 */
1402 CNXK_NIX_TIMESYNC_RX_OFFSET, /* datalen 0:15 */
1406 const uint32x4_t ptype = {RTE_PTYPE_L2_ETHER_TIMESYNC,
1407 RTE_PTYPE_L2_ETHER_TIMESYNC,
1408 RTE_PTYPE_L2_ETHER_TIMESYNC,
1409 RTE_PTYPE_L2_ETHER_TIMESYNC};
1410 const uint64_t ts_olf = RTE_MBUF_F_RX_IEEE1588_PTP |
1411 RTE_MBUF_F_RX_IEEE1588_TMST |
1412 tstamp->rx_tstamp_dynflag;
1413 const uint32x4_t and_mask = {0x1, 0x2, 0x4, 0x8};
1414 uint64x2_t ts01, ts23, mask;
1418 /* Subtract timesync length from total pkt length. */
1419 f0 = vsubq_u16(f0, len_off);
1420 f1 = vsubq_u16(f1, len_off);
1421 f2 = vsubq_u16(f2, len_off);
1422 f3 = vsubq_u16(f3, len_off);
1424 /* Get the address of actual timestamp. */
1425 ts01 = vaddq_u64(mbuf01, data_off);
1426 ts23 = vaddq_u64(mbuf23, data_off);
1427 /* Load timestamp from address. */
1428 ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
1431 ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
1434 ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
1437 ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
1440 /* Convert from be to cpu byteorder. */
1441 ts01 = vrev64q_u8(ts01);
1442 ts23 = vrev64q_u8(ts23);
1443 /* Store timestamp into scalar for later use. */
1444 ts[0] = vgetq_lane_u64(ts01, 0);
1445 ts[1] = vgetq_lane_u64(ts01, 1);
1446 ts[2] = vgetq_lane_u64(ts23, 0);
1447 ts[3] = vgetq_lane_u64(ts23, 1);
1449 /* Store timestamp into dynfield. */
1450 *cnxk_nix_timestamp_dynfield(mbuf0, tstamp) = ts[0];
1451 *cnxk_nix_timestamp_dynfield(mbuf1, tstamp) = ts[1];
1452 *cnxk_nix_timestamp_dynfield(mbuf2, tstamp) = ts[2];
1453 *cnxk_nix_timestamp_dynfield(mbuf3, tstamp) = ts[3];
1455 /* Generate ptype mask to filter L2 ether timesync */
1456 mask = vdupq_n_u32(vgetq_lane_u32(f0, 0));
1457 mask = vsetq_lane_u32(vgetq_lane_u32(f1, 0), mask, 1);
1458 mask = vsetq_lane_u32(vgetq_lane_u32(f2, 0), mask, 2);
1459 mask = vsetq_lane_u32(vgetq_lane_u32(f3, 0), mask, 3);
1461 /* Match against L2 ether timesync. */
1462 mask = vceqq_u32(mask, ptype);
1463 /* Convert from vector from scalar mask */
1464 res = vaddvq_u32(vandq_u32(mask, and_mask));
1468 /* Fill in the ol_flags for any packets that
1471 ol_flags0 |= ((res & 0x1) ? ts_olf : 0);
1472 ol_flags1 |= ((res & 0x2) ? ts_olf : 0);
1473 ol_flags2 |= ((res & 0x4) ? ts_olf : 0);
1474 ol_flags3 |= ((res & 0x8) ? ts_olf : 0);
1476 /* Update Rxq timestamp with the latest
1479 tstamp->rx_ready = 1;
1480 tstamp->rx_tstamp = ts[31 - __builtin_clz(res)];
1484 /* Form rearm_data with ol_flags */
1485 rearm0 = vsetq_lane_u64(ol_flags0, rearm0, 1);
1486 rearm1 = vsetq_lane_u64(ol_flags1, rearm1, 1);
1487 rearm2 = vsetq_lane_u64(ol_flags2, rearm2, 1);
1488 rearm3 = vsetq_lane_u64(ol_flags3, rearm3, 1);
1490 /* Update rx_descriptor_fields1 */
1491 vst1q_u64((uint64_t *)mbuf0->rx_descriptor_fields1, f0);
1492 vst1q_u64((uint64_t *)mbuf1->rx_descriptor_fields1, f1);
1493 vst1q_u64((uint64_t *)mbuf2->rx_descriptor_fields1, f2);
1494 vst1q_u64((uint64_t *)mbuf3->rx_descriptor_fields1, f3);
1496 /* Update rearm_data */
1497 vst1q_u64((uint64_t *)mbuf0->rearm_data, rearm0);
1498 vst1q_u64((uint64_t *)mbuf1->rearm_data, rearm1);
1499 vst1q_u64((uint64_t *)mbuf2->rearm_data, rearm2);
1500 vst1q_u64((uint64_t *)mbuf3->rearm_data, rearm3);
1502 /* Store the mbufs to rx_pkts */
1503 vst1q_u64((uint64_t *)&mbufs[packets], mbuf01);
1504 vst1q_u64((uint64_t *)&mbufs[packets + 2], mbuf23);
1506 if (flags & NIX_RX_MULTI_SEG_F) {
1507 /* Multi segment is enable build mseg list for
1508 * individual mbufs in scalar mode.
1510 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
1511 (CQE_PTR_OFF(cq0, 0, 8, flags)),
1512 mbuf0, mbuf_initializer, flags);
1513 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
1514 (CQE_PTR_OFF(cq0, 1, 8, flags)),
1515 mbuf1, mbuf_initializer, flags);
1516 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
1517 (CQE_PTR_OFF(cq0, 2, 8, flags)),
1518 mbuf2, mbuf_initializer, flags);
1519 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
1520 (CQE_PTR_OFF(cq0, 3, 8, flags)),
1521 mbuf3, mbuf_initializer, flags);
1524 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
1525 RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1);
1526 RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1);
1527 RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1);
1528 RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1);
1530 nix_mbuf_validate_next(mbuf0);
1531 nix_mbuf_validate_next(mbuf1);
1532 nix_mbuf_validate_next(mbuf2);
1533 nix_mbuf_validate_next(mbuf3);
1535 packets += NIX_DESCS_PER_LOOP;
1537 if (!(flags & NIX_RX_VWQE_F)) {
1538 /* Advance head pointer and packets */
1539 head += NIX_DESCS_PER_LOOP;
1543 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
1544 /* Check if lmtline border is crossed and adjust lnum */
1546 /* Update aura handle */
1547 *(uint64_t *)(laddr - 8) =
1548 (((uint64_t)(15 & 0x1) << 32) |
1549 roc_npa_aura_handle_to_aura(aura_handle));
1554 laddr = (uintptr_t)LMT_OFF(lbase, lnum, 8);
1555 /* Pick the pointer from 16th index and put it
1556 * at end of this new line.
1558 *(uint64_t *)(laddr + (loff << 3) - 8) =
1559 *(uint64_t *)(laddr - 8);
1562 /* Flush it when we are in 16th line and might
1565 if (lnum >= 15 && loff >= 12) {
1566 /* 16 LMT Line size m1 */
1567 uint64_t data = BIT_ULL(48) - 1;
1569 /* Update aura handle */
1570 *(uint64_t *)(laddr - 8) =
1571 (((uint64_t)(loff & 0x1) << 32) |
1572 roc_npa_aura_handle_to_aura(aura_handle));
1574 data = (data & ~(0x7UL << shft)) |
1575 (((uint64_t)loff >> 1) << shft);
1577 /* Send up to 16 lmt lines of pointers */
1578 nix_sec_flush_meta_burst(lmt_id, data, lnum + 1,
1584 /* First pointer starts at 8B offset */
1585 laddr = (uintptr_t)LMT_OFF(lbase, lnum, 8);
1590 if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) {
1591 /* 16 LMT Line size m1 */
1592 uint64_t data = BIT_ULL(48) - 1;
1594 /* Update aura handle */
1595 *(uint64_t *)(laddr - 8) =
1596 (((uint64_t)(loff & 0x1) << 32) |
1597 roc_npa_aura_handle_to_aura(aura_handle));
1599 data = (data & ~(0x7UL << shft)) |
1600 (((uint64_t)loff >> 1) << shft);
1602 /* Send up to 16 lmt lines of pointers */
1603 nix_sec_flush_meta_burst(lmt_id, data, lnum + 1, aura_handle);
1604 if (flags & NIX_RX_VWQE_F)
1608 if (flags & NIX_RX_VWQE_F)
1612 rxq->available -= packets;
1615 /* Free all the CQs that we've processed */
1616 plt_write64((rxq->wdata | packets), rxq->cq_door);
1618 if (unlikely(pkts_left))
1619 packets += cn10k_nix_recv_pkts(args, &mbufs[packets], pkts_left,
1627 static inline uint16_t
1628 cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
1629 const uint16_t flags, void *lookup_mem,
1630 struct cnxk_timesync_info *tstamp,
1634 RTE_SET_USED(mbufs);
1636 RTE_SET_USED(flags);
1637 RTE_SET_USED(lookup_mem);
1638 RTE_SET_USED(tstamp);
1639 RTE_SET_USED(lmt_base);
1647 #define RSS_F NIX_RX_OFFLOAD_RSS_F
1648 #define PTYPE_F NIX_RX_OFFLOAD_PTYPE_F
1649 #define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F
1650 #define MARK_F NIX_RX_OFFLOAD_MARK_UPDATE_F
1651 #define TS_F NIX_RX_OFFLOAD_TSTAMP_F
1652 #define RX_VLAN_F NIX_RX_OFFLOAD_VLAN_STRIP_F
1653 #define R_SEC_F NIX_RX_OFFLOAD_SECURITY_F
1655 /* [R_SEC_F] [RX_VLAN_F] [TS] [MARK] [CKSUM] [PTYPE] [RSS] */
1656 #define NIX_RX_FASTPATH_MODES_0_15 \
1657 R(no_offload, NIX_RX_OFFLOAD_NONE) \
1660 R(ptype_rss, PTYPE_F | RSS_F) \
1662 R(cksum_rss, CKSUM_F | RSS_F) \
1663 R(cksum_ptype, CKSUM_F | PTYPE_F) \
1664 R(cksum_ptype_rss, CKSUM_F | PTYPE_F | RSS_F) \
1666 R(mark_rss, MARK_F | RSS_F) \
1667 R(mark_ptype, MARK_F | PTYPE_F) \
1668 R(mark_ptype_rss, MARK_F | PTYPE_F | RSS_F) \
1669 R(mark_cksum, MARK_F | CKSUM_F) \
1670 R(mark_cksum_rss, MARK_F | CKSUM_F | RSS_F) \
1671 R(mark_cksum_ptype, MARK_F | CKSUM_F | PTYPE_F) \
1672 R(mark_cksum_ptype_rss, MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1674 #define NIX_RX_FASTPATH_MODES_16_31 \
1676 R(ts_rss, TS_F | RSS_F) \
1677 R(ts_ptype, TS_F | PTYPE_F) \
1678 R(ts_ptype_rss, TS_F | PTYPE_F | RSS_F) \
1679 R(ts_cksum, TS_F | CKSUM_F) \
1680 R(ts_cksum_rss, TS_F | CKSUM_F | RSS_F) \
1681 R(ts_cksum_ptype, TS_F | CKSUM_F | PTYPE_F) \
1682 R(ts_cksum_ptype_rss, TS_F | CKSUM_F | PTYPE_F | RSS_F) \
1683 R(ts_mark, TS_F | MARK_F) \
1684 R(ts_mark_rss, TS_F | MARK_F | RSS_F) \
1685 R(ts_mark_ptype, TS_F | MARK_F | PTYPE_F) \
1686 R(ts_mark_ptype_rss, TS_F | MARK_F | PTYPE_F | RSS_F) \
1687 R(ts_mark_cksum, TS_F | MARK_F | CKSUM_F) \
1688 R(ts_mark_cksum_rss, TS_F | MARK_F | CKSUM_F | RSS_F) \
1689 R(ts_mark_cksum_ptype, TS_F | MARK_F | CKSUM_F | PTYPE_F) \
1690 R(ts_mark_cksum_ptype_rss, TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1692 #define NIX_RX_FASTPATH_MODES_32_47 \
1693 R(vlan, RX_VLAN_F) \
1694 R(vlan_rss, RX_VLAN_F | RSS_F) \
1695 R(vlan_ptype, RX_VLAN_F | PTYPE_F) \
1696 R(vlan_ptype_rss, RX_VLAN_F | PTYPE_F | RSS_F) \
1697 R(vlan_cksum, RX_VLAN_F | CKSUM_F) \
1698 R(vlan_cksum_rss, RX_VLAN_F | CKSUM_F | RSS_F) \
1699 R(vlan_cksum_ptype, RX_VLAN_F | CKSUM_F | PTYPE_F) \
1700 R(vlan_cksum_ptype_rss, RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
1701 R(vlan_mark, RX_VLAN_F | MARK_F) \
1702 R(vlan_mark_rss, RX_VLAN_F | MARK_F | RSS_F) \
1703 R(vlan_mark_ptype, RX_VLAN_F | MARK_F | PTYPE_F) \
1704 R(vlan_mark_ptype_rss, RX_VLAN_F | MARK_F | PTYPE_F | RSS_F) \
1705 R(vlan_mark_cksum, RX_VLAN_F | MARK_F | CKSUM_F) \
1706 R(vlan_mark_cksum_rss, RX_VLAN_F | MARK_F | CKSUM_F | RSS_F) \
1707 R(vlan_mark_cksum_ptype, RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F) \
1708 R(vlan_mark_cksum_ptype_rss, \
1709 RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1711 #define NIX_RX_FASTPATH_MODES_48_63 \
1712 R(vlan_ts, RX_VLAN_F | TS_F) \
1713 R(vlan_ts_rss, RX_VLAN_F | TS_F | RSS_F) \
1714 R(vlan_ts_ptype, RX_VLAN_F | TS_F | PTYPE_F) \
1715 R(vlan_ts_ptype_rss, RX_VLAN_F | TS_F | PTYPE_F | RSS_F) \
1716 R(vlan_ts_cksum, RX_VLAN_F | TS_F | CKSUM_F) \
1717 R(vlan_ts_cksum_rss, RX_VLAN_F | TS_F | CKSUM_F | RSS_F) \
1718 R(vlan_ts_cksum_ptype, RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F) \
1719 R(vlan_ts_cksum_ptype_rss, \
1720 RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
1721 R(vlan_ts_mark, RX_VLAN_F | TS_F | MARK_F) \
1722 R(vlan_ts_mark_rss, RX_VLAN_F | TS_F | MARK_F | RSS_F) \
1723 R(vlan_ts_mark_ptype, RX_VLAN_F | TS_F | MARK_F | PTYPE_F) \
1724 R(vlan_ts_mark_ptype_rss, RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
1725 R(vlan_ts_mark_cksum, RX_VLAN_F | TS_F | MARK_F | CKSUM_F) \
1726 R(vlan_ts_mark_cksum_rss, RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
1727 R(vlan_ts_mark_cksum_ptype, \
1728 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
1729 R(vlan_ts_mark_cksum_ptype_rss, \
1730 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1732 #define NIX_RX_FASTPATH_MODES_64_79 \
1734 R(sec_rss, R_SEC_F | RSS_F) \
1735 R(sec_ptype, R_SEC_F | PTYPE_F) \
1736 R(sec_ptype_rss, R_SEC_F | PTYPE_F | RSS_F) \
1737 R(sec_cksum, R_SEC_F | CKSUM_F) \
1738 R(sec_cksum_rss, R_SEC_F | CKSUM_F | RSS_F) \
1739 R(sec_cksum_ptype, R_SEC_F | CKSUM_F | PTYPE_F) \
1740 R(sec_cksum_ptype_rss, R_SEC_F | CKSUM_F | PTYPE_F | RSS_F) \
1741 R(sec_mark, R_SEC_F | MARK_F) \
1742 R(sec_mark_rss, R_SEC_F | MARK_F | RSS_F) \
1743 R(sec_mark_ptype, R_SEC_F | MARK_F | PTYPE_F) \
1744 R(sec_mark_ptype_rss, R_SEC_F | MARK_F | PTYPE_F | RSS_F) \
1745 R(sec_mark_cksum, R_SEC_F | MARK_F | CKSUM_F) \
1746 R(sec_mark_cksum_rss, R_SEC_F | MARK_F | CKSUM_F | RSS_F) \
1747 R(sec_mark_cksum_ptype, R_SEC_F | MARK_F | CKSUM_F | PTYPE_F) \
1748 R(sec_mark_cksum_ptype_rss, \
1749 R_SEC_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1751 #define NIX_RX_FASTPATH_MODES_80_95 \
1752 R(sec_ts, R_SEC_F | TS_F) \
1753 R(sec_ts_rss, R_SEC_F | TS_F | RSS_F) \
1754 R(sec_ts_ptype, R_SEC_F | TS_F | PTYPE_F) \
1755 R(sec_ts_ptype_rss, R_SEC_F | TS_F | PTYPE_F | RSS_F) \
1756 R(sec_ts_cksum, R_SEC_F | TS_F | CKSUM_F) \
1757 R(sec_ts_cksum_rss, R_SEC_F | TS_F | CKSUM_F | RSS_F) \
1758 R(sec_ts_cksum_ptype, R_SEC_F | TS_F | CKSUM_F | PTYPE_F) \
1759 R(sec_ts_cksum_ptype_rss, R_SEC_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
1760 R(sec_ts_mark, R_SEC_F | TS_F | MARK_F) \
1761 R(sec_ts_mark_rss, R_SEC_F | TS_F | MARK_F | RSS_F) \
1762 R(sec_ts_mark_ptype, R_SEC_F | TS_F | MARK_F | PTYPE_F) \
1763 R(sec_ts_mark_ptype_rss, R_SEC_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
1764 R(sec_ts_mark_cksum, R_SEC_F | TS_F | MARK_F | CKSUM_F) \
1765 R(sec_ts_mark_cksum_rss, R_SEC_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
1766 R(sec_ts_mark_cksum_ptype, \
1767 R_SEC_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
1768 R(sec_ts_mark_cksum_ptype_rss, \
1769 R_SEC_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1771 #define NIX_RX_FASTPATH_MODES_96_111 \
1772 R(sec_vlan, R_SEC_F | RX_VLAN_F) \
1773 R(sec_vlan_rss, R_SEC_F | RX_VLAN_F | RSS_F) \
1774 R(sec_vlan_ptype, R_SEC_F | RX_VLAN_F | PTYPE_F) \
1775 R(sec_vlan_ptype_rss, R_SEC_F | RX_VLAN_F | PTYPE_F | RSS_F) \
1776 R(sec_vlan_cksum, R_SEC_F | RX_VLAN_F | CKSUM_F) \
1777 R(sec_vlan_cksum_rss, R_SEC_F | RX_VLAN_F | CKSUM_F | RSS_F) \
1778 R(sec_vlan_cksum_ptype, R_SEC_F | RX_VLAN_F | CKSUM_F | PTYPE_F) \
1779 R(sec_vlan_cksum_ptype_rss, \
1780 R_SEC_F | RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
1781 R(sec_vlan_mark, R_SEC_F | RX_VLAN_F | MARK_F) \
1782 R(sec_vlan_mark_rss, R_SEC_F | RX_VLAN_F | MARK_F | RSS_F) \
1783 R(sec_vlan_mark_ptype, R_SEC_F | RX_VLAN_F | MARK_F | PTYPE_F) \
1784 R(sec_vlan_mark_ptype_rss, \
1785 R_SEC_F | RX_VLAN_F | MARK_F | PTYPE_F | RSS_F) \
1786 R(sec_vlan_mark_cksum, R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F) \
1787 R(sec_vlan_mark_cksum_rss, \
1788 R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | RSS_F) \
1789 R(sec_vlan_mark_cksum_ptype, \
1790 R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F) \
1791 R(sec_vlan_mark_cksum_ptype_rss, \
1792 R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1794 #define NIX_RX_FASTPATH_MODES_112_127 \
1795 R(sec_vlan_ts, R_SEC_F | RX_VLAN_F | TS_F) \
1796 R(sec_vlan_ts_rss, R_SEC_F | RX_VLAN_F | TS_F | RSS_F) \
1797 R(sec_vlan_ts_ptype, R_SEC_F | RX_VLAN_F | TS_F | PTYPE_F) \
1798 R(sec_vlan_ts_ptype_rss, R_SEC_F | RX_VLAN_F | TS_F | PTYPE_F | RSS_F) \
1799 R(sec_vlan_ts_cksum, R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F) \
1800 R(sec_vlan_ts_cksum_rss, R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | RSS_F) \
1801 R(sec_vlan_ts_cksum_ptype, \
1802 R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F) \
1803 R(sec_vlan_ts_cksum_ptype_rss, \
1804 R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
1805 R(sec_vlan_ts_mark, R_SEC_F | RX_VLAN_F | TS_F | MARK_F) \
1806 R(sec_vlan_ts_mark_rss, R_SEC_F | RX_VLAN_F | TS_F | MARK_F | RSS_F) \
1807 R(sec_vlan_ts_mark_ptype, \
1808 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | PTYPE_F) \
1809 R(sec_vlan_ts_mark_ptype_rss, \
1810 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
1811 R(sec_vlan_ts_mark_cksum, \
1812 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F) \
1813 R(sec_vlan_ts_mark_cksum_rss, \
1814 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
1815 R(sec_vlan_ts_mark_cksum_ptype, \
1816 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
1817 R(sec_vlan_ts_mark_cksum_ptype_rss, \
1818 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1821 #define NIX_RX_FASTPATH_MODES \
1822 NIX_RX_FASTPATH_MODES_0_15 \
1823 NIX_RX_FASTPATH_MODES_16_31 \
1824 NIX_RX_FASTPATH_MODES_32_47 \
1825 NIX_RX_FASTPATH_MODES_48_63 \
1826 NIX_RX_FASTPATH_MODES_64_79 \
1827 NIX_RX_FASTPATH_MODES_80_95 \
1828 NIX_RX_FASTPATH_MODES_96_111 \
1829 NIX_RX_FASTPATH_MODES_112_127 \
1831 #define R(name, flags) \
1832 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_##name( \
1833 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
1834 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_mseg_##name( \
1835 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
1836 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_vec_##name( \
1837 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
1838 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_vec_mseg_##name( \
1839 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
1840 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_reas_##name( \
1841 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
1842 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_reas_mseg_##name(\
1843 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
1844 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_reas_vec_##name( \
1845 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
1846 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_reas_vec_mseg_##name( \
1847 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts);
1849 NIX_RX_FASTPATH_MODES
1852 #define NIX_RX_RECV(fn, flags) \
1853 uint16_t __rte_noinline __rte_hot fn( \
1854 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
1856 return cn10k_nix_recv_pkts(rx_queue, rx_pkts, pkts, (flags)); \
1859 #define NIX_RX_RECV_MSEG(fn, flags) NIX_RX_RECV(fn, flags | NIX_RX_MULTI_SEG_F)
1861 #define NIX_RX_RECV_VEC(fn, flags) \
1862 uint16_t __rte_noinline __rte_hot fn( \
1863 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
1865 return cn10k_nix_recv_pkts_vector(rx_queue, rx_pkts, pkts, \
1866 (flags), NULL, NULL, 0); \
1869 #define NIX_RX_RECV_VEC_MSEG(fn, flags) \
1870 NIX_RX_RECV_VEC(fn, flags | NIX_RX_MULTI_SEG_F)
1872 #endif /* __CN10K_RX_H__ */