1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
10 #include <cnxk_ethdev.h>
12 #define NIX_RX_OFFLOAD_NONE (0)
13 #define NIX_RX_OFFLOAD_RSS_F BIT(0)
14 #define NIX_RX_OFFLOAD_PTYPE_F BIT(1)
15 #define NIX_RX_OFFLOAD_CHECKSUM_F BIT(2)
16 #define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)
17 #define NIX_RX_OFFLOAD_TSTAMP_F BIT(4)
18 #define NIX_RX_OFFLOAD_VLAN_STRIP_F BIT(5)
19 #define NIX_RX_OFFLOAD_SECURITY_F BIT(6)
21 /* Flags to control cqe_to_mbuf conversion function.
22 * Defining it from backwards to denote its been
23 * not used as offload flags to pick function
25 #define NIX_RX_VWQE_F BIT(13)
26 #define NIX_RX_MULTI_SEG_F BIT(14)
27 #define CPT_RX_WQE_F BIT(15)
29 #define CNXK_NIX_CQ_ENTRY_SZ 128
30 #define NIX_DESCS_PER_LOOP 4
31 #define CQE_CAST(x) ((struct nix_cqe_hdr_s *)(x))
32 #define CQE_SZ(x) ((x) * CNXK_NIX_CQ_ENTRY_SZ)
34 #define CQE_PTR_OFF(b, i, o, f) \
35 (((f) & NIX_RX_VWQE_F) ? \
36 (uint64_t *)(((uintptr_t)((uint64_t *)(b))[i]) + (o)) : \
37 (uint64_t *)(((uintptr_t)(b)) + CQE_SZ(i) + (o)))
39 union mbuf_initializer {
49 static __rte_always_inline uint64_t
50 nix_clear_data_off(uint64_t oldval)
52 union mbuf_initializer mbuf_init = {.value = oldval};
54 mbuf_init.fields.data_off = 0;
55 return mbuf_init.value;
58 static __rte_always_inline struct rte_mbuf *
59 nix_get_mbuf_from_cqe(void *cq, const uint64_t data_off)
63 /* Skip CQE, NIX_RX_PARSE_S and SG HDR(9 DWORDs) and peek buff addr */
64 buff = *((rte_iova_t *)((uint64_t *)cq + 9));
65 return (struct rte_mbuf *)(buff - data_off);
68 static __rte_always_inline void
69 nix_sec_flush_meta(uintptr_t laddr, uint16_t lmt_id, uint8_t loff,
70 uintptr_t aura_handle)
74 /* laddr is pointing to first pointer */
77 /* Trigger free either on lmtline full or different aura handle */
78 pa = roc_npa_aura_handle_to_base(aura_handle) + NPA_LF_AURA_BATCH_FREE0;
80 /* Update aura handle */
81 *(uint64_t *)laddr = (((uint64_t)(loff & 0x1) << 32) |
82 roc_npa_aura_handle_to_aura(aura_handle));
84 pa |= ((loff >> 1) << 4);
85 roc_lmt_submit_steorl(lmt_id, pa);
88 static __rte_always_inline struct rte_mbuf *
89 nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, const uint64_t sa_base, uintptr_t laddr,
90 uint8_t *loff, struct rte_mbuf *mbuf, uint16_t data_off)
92 const void *__p = (void *)((uintptr_t)mbuf + (uint16_t)data_off);
93 const struct cpt_parse_hdr_s *hdr = (const struct cpt_parse_hdr_s *)__p;
94 struct cn10k_inb_priv_data *inb_priv;
95 struct rte_mbuf *inner;
100 if (cq_w1 & BIT(11)) {
101 inner = (struct rte_mbuf *)(rte_be_to_cpu_64(hdr->wqe_ptr) -
102 sizeof(struct rte_mbuf));
104 /* Get SPI from CPT_PARSE_S's cookie(already swapped) */
108 inb_sa = roc_nix_inl_ot_ipsec_inb_sa(sa_base, sa_idx);
109 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
111 /* Update dynamic field with userdata */
112 *rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
114 /* Update l2 hdr length first */
115 inner->pkt_len = (hdr->w2.il3_off -
116 sizeof(struct cpt_parse_hdr_s) - (w0 & 0x7));
118 /* Store meta in lmtline to free
119 * Assume all meta's from same aura.
121 *(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf;
129 #if defined(RTE_ARCH_ARM64)
131 static __rte_always_inline struct rte_mbuf *
132 nix_sec_meta_to_mbuf(uint64_t cq_w1, uintptr_t sa_base, uintptr_t laddr,
133 uint8_t *loff, struct rte_mbuf *mbuf, uint16_t data_off,
134 uint8x16_t *rx_desc_field1, uint64_t *ol_flags)
136 const void *__p = (void *)((uintptr_t)mbuf + (uint16_t)data_off);
137 const struct cpt_parse_hdr_s *hdr = (const struct cpt_parse_hdr_s *)__p;
138 struct cn10k_inb_priv_data *inb_priv;
139 struct rte_mbuf *inner;
140 uint64_t *sg, res_w1;
146 if (cq_w1 & BIT(11)) {
147 inner = (struct rte_mbuf *)(rte_be_to_cpu_64(hdr->wqe_ptr) -
148 sizeof(struct rte_mbuf));
149 /* Get SPI from CPT_PARSE_S's cookie(already swapped) */
153 inb_sa = roc_nix_inl_ot_ipsec_inb_sa(sa_base, sa_idx);
154 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
156 /* Update dynamic field with userdata */
157 *rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
159 /* CPT result(struct cpt_cn10k_res_s) is at
160 * after first IOVA in meta
162 sg = (uint64_t *)(inner + 1);
165 /* Clear checksum flags and update security flag */
166 *ol_flags &= ~(RTE_MBUF_F_RX_L4_CKSUM_MASK | RTE_MBUF_F_RX_IP_CKSUM_MASK);
167 *ol_flags |= (((res_w1 & 0xFF) == CPT_COMP_WARN) ?
168 RTE_MBUF_F_RX_SEC_OFFLOAD :
169 (RTE_MBUF_F_RX_SEC_OFFLOAD | RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED));
170 /* Calculate inner packet length */
171 len = ((res_w1 >> 16) & 0xFFFF) + hdr->w2.il3_off -
172 sizeof(struct cpt_parse_hdr_s) - (w0 & 0x7);
173 /* Update pkt_len and data_len */
174 *rx_desc_field1 = vsetq_lane_u16(len, *rx_desc_field1, 2);
175 *rx_desc_field1 = vsetq_lane_u16(len, *rx_desc_field1, 4);
177 /* Store meta in lmtline to free
178 * Assume all meta's from same aura.
180 *(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf;
183 /* Return inner mbuf */
187 /* Return same mbuf as it is not a decrypted pkt */
192 static __rte_always_inline uint32_t
193 nix_ptype_get(const void *const lookup_mem, const uint64_t in)
195 const uint16_t *const ptype = lookup_mem;
196 const uint16_t lh_lg_lf = (in & 0xFFF0000000000000) >> 52;
197 const uint16_t tu_l2 = ptype[(in & 0x000FFFF000000000) >> 36];
198 const uint16_t il4_tu = ptype[PTYPE_NON_TUNNEL_ARRAY_SZ + lh_lg_lf];
200 return (il4_tu << PTYPE_NON_TUNNEL_WIDTH) | tu_l2;
203 static __rte_always_inline uint32_t
204 nix_rx_olflags_get(const void *const lookup_mem, const uint64_t in)
206 const uint32_t *const ol_flags =
207 (const uint32_t *)((const uint8_t *)lookup_mem +
210 return ol_flags[(in & 0xfff00000) >> 20];
213 static inline uint64_t
214 nix_update_match_id(const uint16_t match_id, uint64_t ol_flags,
215 struct rte_mbuf *mbuf)
217 /* There is no separate bit to check match_id
218 * is valid or not? and no flag to identify it is an
219 * RTE_FLOW_ACTION_TYPE_FLAG vs RTE_FLOW_ACTION_TYPE_MARK
220 * action. The former case addressed through 0 being invalid
221 * value and inc/dec match_id pair when MARK is activated.
222 * The later case addressed through defining
223 * CNXK_FLOW_MARK_DEFAULT as value for
224 * RTE_FLOW_ACTION_TYPE_MARK.
225 * This would translate to not use
226 * CNXK_FLOW_ACTION_FLAG_DEFAULT - 1 and
227 * CNXK_FLOW_ACTION_FLAG_DEFAULT for match_id.
228 * i.e valid mark_id's are from
229 * 0 to CNXK_FLOW_ACTION_FLAG_DEFAULT - 2
231 if (likely(match_id)) {
232 ol_flags |= RTE_MBUF_F_RX_FDIR;
233 if (match_id != CNXK_FLOW_ACTION_FLAG_DEFAULT) {
234 ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
235 mbuf->hash.fdir.hi = match_id - 1;
242 static __rte_always_inline void
243 nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
244 uint64_t rearm, const uint16_t flags)
246 const rte_iova_t *iova_list;
247 struct rte_mbuf *head;
248 const rte_iova_t *eol;
252 sg = *(const uint64_t *)(rx + 1);
253 nb_segs = (sg >> 48) & 0x3;
260 mbuf->pkt_len = (rx->pkt_lenm1 + 1) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
261 CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
262 mbuf->data_len = (sg & 0xFFFF) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
263 CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
264 mbuf->nb_segs = nb_segs;
267 eol = ((const rte_iova_t *)(rx + 1) + ((rx->desc_sizem1 + 1) << 1));
268 /* Skip SG_S and first IOVA*/
269 iova_list = ((const rte_iova_t *)(rx + 1)) + 2;
272 rearm = rearm & ~0xFFFF;
276 mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
279 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
281 mbuf->data_len = sg & 0xFFFF;
283 *(uint64_t *)(&mbuf->rearm_data) = rearm;
287 if (!nb_segs && (iova_list + 1 < eol)) {
288 sg = *(const uint64_t *)(iova_list);
289 nb_segs = (sg >> 48) & 0x3;
290 head->nb_segs += nb_segs;
291 iova_list = (const rte_iova_t *)(iova_list + 1);
297 static __rte_always_inline void
298 cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
299 struct rte_mbuf *mbuf, const void *lookup_mem,
300 const uint64_t val, const uint16_t flag)
302 const union nix_rx_parse_u *rx =
303 (const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
304 const uint64_t w1 = *(const uint64_t *)rx;
305 uint16_t len = rx->pkt_lenm1 + 1;
306 uint64_t ol_flags = 0;
308 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
309 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
311 if (flag & NIX_RX_OFFLOAD_PTYPE_F)
312 mbuf->packet_type = nix_ptype_get(lookup_mem, w1);
314 mbuf->packet_type = 0;
316 if (flag & NIX_RX_OFFLOAD_RSS_F) {
317 mbuf->hash.rss = tag;
318 ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
321 /* Process Security packets */
322 if (flag & NIX_RX_OFFLOAD_SECURITY_F) {
324 /* CPT result(struct cpt_cn10k_res_s) is at
325 * after first IOVA in meta
327 const uint64_t *sg = (const uint64_t *)(mbuf + 1);
328 const uint64_t res_w1 = sg[10];
329 const uint16_t uc_cc = res_w1 & 0xFF;
332 len = ((res_w1 >> 16) & 0xFFFF) + mbuf->pkt_len;
333 ol_flags |= ((uc_cc == CPT_COMP_WARN) ?
334 RTE_MBUF_F_RX_SEC_OFFLOAD :
335 (RTE_MBUF_F_RX_SEC_OFFLOAD |
336 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED));
338 if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
339 ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
342 if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
343 ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
346 if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
347 if (rx->vtag0_gone) {
348 ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
349 mbuf->vlan_tci = rx->vtag0_tci;
351 if (rx->vtag1_gone) {
352 ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
353 mbuf->vlan_tci_outer = rx->vtag1_tci;
357 if (flag & NIX_RX_OFFLOAD_MARK_UPDATE_F)
358 ol_flags = nix_update_match_id(rx->match_id, ol_flags, mbuf);
360 mbuf->ol_flags = ol_flags;
362 mbuf->data_len = len;
363 *(uint64_t *)(&mbuf->rearm_data) = val;
365 if (flag & NIX_RX_MULTI_SEG_F)
366 nix_cqe_xtract_mseg(rx, mbuf, val, flag);
371 static inline uint16_t
372 nix_rx_nb_pkts(struct cn10k_eth_rxq *rxq, const uint64_t wdata,
373 const uint16_t pkts, const uint32_t qmask)
375 uint32_t available = rxq->available;
377 /* Update the available count if cached value is not enough */
378 if (unlikely(available < pkts)) {
379 uint64_t reg, head, tail;
381 /* Use LDADDA version to avoid reorder */
382 reg = roc_atomic64_add_sync(wdata, rxq->cq_status);
383 /* CQ_OP_STATUS operation error */
384 if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
385 reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
388 tail = reg & 0xFFFFF;
389 head = (reg >> 20) & 0xFFFFF;
391 available = tail - head + qmask + 1;
393 available = tail - head;
395 rxq->available = available;
398 return RTE_MIN(pkts, available);
401 static __rte_always_inline uint16_t
402 cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
403 const uint16_t flags)
405 struct cn10k_eth_rxq *rxq = rx_queue;
406 const uint64_t mbuf_init = rxq->mbuf_initializer;
407 const void *lookup_mem = rxq->lookup_mem;
408 const uint64_t data_off = rxq->data_off;
409 const uintptr_t desc = rxq->desc;
410 const uint64_t wdata = rxq->wdata;
411 const uint32_t qmask = rxq->qmask;
412 uint64_t lbase = rxq->lmt_base;
413 uint16_t packets = 0, nb_pkts;
414 uint8_t loff = 0, lnum = 0;
415 uint32_t head = rxq->head;
416 struct nix_cqe_hdr_s *cq;
417 struct rte_mbuf *mbuf;
418 uint64_t aura_handle;
423 nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
425 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
426 aura_handle = rxq->aura_handle;
427 sa_base = rxq->sa_base;
428 sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
429 ROC_LMT_BASE_ID_GET(lbase, lmt_id);
434 while (packets < nb_pkts) {
435 /* Prefetch N desc ahead */
436 rte_prefetch_non_temporal(
437 (void *)(desc + (CQE_SZ((head + 2) & qmask))));
438 cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head));
440 mbuf = nix_get_mbuf_from_cqe(cq, data_off);
442 /* Translate meta to mbuf */
443 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
444 const uint64_t cq_w1 = *((const uint64_t *)cq + 1);
446 mbuf = nix_sec_meta_to_mbuf_sc(cq_w1, sa_base, laddr,
447 &loff, mbuf, data_off);
450 cn10k_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
452 cnxk_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
453 (flags & NIX_RX_OFFLOAD_TSTAMP_F),
454 (flags & NIX_RX_MULTI_SEG_F),
455 (uint64_t *)((uint8_t *)mbuf
457 rx_pkts[packets++] = mbuf;
458 roc_prefetch_store_keep(mbuf);
462 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
463 /* Flush when we don't have space for 4 meta */
464 if ((15 - loff) < 1) {
465 nix_sec_flush_meta(laddr, lmt_id + lnum, loff,
468 lnum &= BIT_ULL(ROC_LMT_LINES_PER_CORE_LOG2) -
470 /* First pointer starts at 8B offset */
471 laddr = (uintptr_t)LMT_OFF(lbase, lnum, 8);
478 rxq->available -= nb_pkts;
480 /* Free all the CQs that we've processed */
481 plt_write64((wdata | nb_pkts), rxq->cq_door);
483 /* Free remaining meta buffers if any */
484 if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) {
485 nix_sec_flush_meta(laddr, lmt_id + lnum, loff, aura_handle);
492 #if defined(RTE_ARCH_ARM64)
494 static __rte_always_inline uint64_t
495 nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
497 if (w2 & BIT_ULL(21) /* vtag0_gone */) {
498 ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
499 *f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
505 static __rte_always_inline uint64_t
506 nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
508 if (w2 & BIT_ULL(23) /* vtag1_gone */) {
509 ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
510 mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
516 static __rte_always_inline uint16_t
517 cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
518 const uint16_t flags, void *lookup_mem,
519 struct cnxk_timesync_info *tstamp,
522 struct cn10k_eth_rxq *rxq = args;
523 const uint64_t mbuf_initializer = (flags & NIX_RX_VWQE_F) ?
525 rxq->mbuf_initializer;
526 const uint64x2_t data_off = flags & NIX_RX_VWQE_F ?
527 vdupq_n_u64(0x80ULL) :
528 vdupq_n_u64(rxq->data_off);
529 const uint32_t qmask = flags & NIX_RX_VWQE_F ? 0 : rxq->qmask;
530 const uint64_t wdata = flags & NIX_RX_VWQE_F ? 0 : rxq->wdata;
531 const uintptr_t desc = flags & NIX_RX_VWQE_F ? 0 : rxq->desc;
532 uint64x2_t cq0_w8, cq1_w8, cq2_w8, cq3_w8, mbuf01, mbuf23;
533 uint64_t ol_flags0, ol_flags1, ol_flags2, ol_flags3;
534 uint64x2_t rearm0 = vdupq_n_u64(mbuf_initializer);
535 uint64x2_t rearm1 = vdupq_n_u64(mbuf_initializer);
536 uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer);
537 uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
538 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
539 uint64_t aura_handle, lbase, laddr;
540 uint8_t loff = 0, lnum = 0;
541 uint8x16_t f0, f1, f2, f3;
542 uint16_t lmt_id, d_off;
543 uint16_t packets = 0;
549 if (!(flags & NIX_RX_VWQE_F)) {
550 lookup_mem = rxq->lookup_mem;
553 pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
554 pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1);
555 /* Packets has to be floor-aligned to NIX_DESCS_PER_LOOP */
556 pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
557 if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
558 tstamp = rxq->tstamp;
563 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
564 if (flags & NIX_RX_VWQE_F) {
567 mbuf0 = (struct rte_mbuf *)((uintptr_t)mbufs[0] -
568 sizeof(struct rte_mbuf));
569 /* Pick first mbuf's aura handle assuming all
570 * mbufs are from a vec and are from same RQ.
572 aura_handle = mbuf0->pool->pool_id;
573 /* Calculate offset from mbuf to actual data area */
574 d_off = ((uintptr_t)mbuf0->buf_addr - (uintptr_t)mbuf0);
575 d_off += (mbuf_initializer & 0xFFFF);
577 /* Get SA Base from lookup tbl using port_id */
578 port = mbuf_initializer >> 48;
579 sa_base = cnxk_nix_sa_base_get(port, lookup_mem);
583 aura_handle = rxq->aura_handle;
584 d_off = rxq->data_off;
585 sa_base = rxq->sa_base;
586 lbase = rxq->lmt_base;
588 sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
589 ROC_LMT_BASE_ID_GET(lbase, lmt_id);
595 while (packets < pkts) {
596 if (!(flags & NIX_RX_VWQE_F)) {
597 /* Exit loop if head is about to wrap and become
600 if (((head + NIX_DESCS_PER_LOOP - 1) & qmask) <
601 NIX_DESCS_PER_LOOP) {
602 pkts_left += (pkts - packets);
606 cq0 = desc + CQE_SZ(head);
608 cq0 = (uintptr_t)&mbufs[packets];
611 /* Prefetch N desc ahead */
612 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0, 8, 0, flags));
613 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0, 9, 0, flags));
614 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0, 10, 0, flags));
615 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0, 11, 0, flags));
617 /* Get NIX_RX_SG_S for size and buffer pointer */
618 cq0_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 0, 64, flags));
619 cq1_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 1, 64, flags));
620 cq2_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 2, 64, flags));
621 cq3_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 3, 64, flags));
623 if (!(flags & NIX_RX_VWQE_F)) {
624 /* Extract mbuf from NIX_RX_SG_S */
625 mbuf01 = vzip2q_u64(cq0_w8, cq1_w8);
626 mbuf23 = vzip2q_u64(cq2_w8, cq3_w8);
627 mbuf01 = vqsubq_u64(mbuf01, data_off);
628 mbuf23 = vqsubq_u64(mbuf23, data_off);
631 vsubq_u64(vld1q_u64((uint64_t *)cq0), data_off);
632 mbuf23 = vsubq_u64(vld1q_u64((uint64_t *)(cq0 + 16)),
636 /* Move mbufs to scalar registers for future use */
637 mbuf0 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 0);
638 mbuf1 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 1);
639 mbuf2 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 0);
640 mbuf3 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 1);
642 /* Mask to get packet len from NIX_RX_SG_S */
643 const uint8x16_t shuf_msk = {
644 0xFF, 0xFF, /* pkt_type set as unknown */
645 0xFF, 0xFF, /* pkt_type set as unknown */
646 0, 1, /* octet 1~0, low 16 bits pkt_len */
647 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
648 0, 1, /* octet 1~0, 16 bits data_len */
649 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
651 /* Form the rx_descriptor_fields1 with pkt_len and data_len */
652 f0 = vqtbl1q_u8(cq0_w8, shuf_msk);
653 f1 = vqtbl1q_u8(cq1_w8, shuf_msk);
654 f2 = vqtbl1q_u8(cq2_w8, shuf_msk);
655 f3 = vqtbl1q_u8(cq3_w8, shuf_msk);
657 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
658 /* Prefetch probable CPT parse header area */
659 rte_prefetch_non_temporal(RTE_PTR_ADD(mbuf0, d_off));
660 rte_prefetch_non_temporal(RTE_PTR_ADD(mbuf1, d_off));
661 rte_prefetch_non_temporal(RTE_PTR_ADD(mbuf2, d_off));
662 rte_prefetch_non_temporal(RTE_PTR_ADD(mbuf3, d_off));
665 /* Load CQE word0 and word 1 */
666 const uint64_t cq0_w0 = *CQE_PTR_OFF(cq0, 0, 0, flags);
667 const uint64_t cq0_w1 = *CQE_PTR_OFF(cq0, 0, 8, flags);
668 const uint64_t cq1_w0 = *CQE_PTR_OFF(cq0, 1, 0, flags);
669 const uint64_t cq1_w1 = *CQE_PTR_OFF(cq0, 1, 8, flags);
670 const uint64_t cq2_w0 = *CQE_PTR_OFF(cq0, 2, 0, flags);
671 const uint64_t cq2_w1 = *CQE_PTR_OFF(cq0, 2, 8, flags);
672 const uint64_t cq3_w0 = *CQE_PTR_OFF(cq0, 3, 0, flags);
673 const uint64_t cq3_w1 = *CQE_PTR_OFF(cq0, 3, 8, flags);
675 if (flags & NIX_RX_OFFLOAD_RSS_F) {
676 /* Fill rss in the rx_descriptor_fields1 */
677 f0 = vsetq_lane_u32(cq0_w0, f0, 3);
678 f1 = vsetq_lane_u32(cq1_w0, f1, 3);
679 f2 = vsetq_lane_u32(cq2_w0, f2, 3);
680 f3 = vsetq_lane_u32(cq3_w0, f3, 3);
681 ol_flags0 = RTE_MBUF_F_RX_RSS_HASH;
682 ol_flags1 = RTE_MBUF_F_RX_RSS_HASH;
683 ol_flags2 = RTE_MBUF_F_RX_RSS_HASH;
684 ol_flags3 = RTE_MBUF_F_RX_RSS_HASH;
692 if (flags & NIX_RX_OFFLOAD_PTYPE_F) {
693 /* Fill packet_type in the rx_descriptor_fields1 */
694 f0 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq0_w1),
696 f1 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq1_w1),
698 f2 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq2_w1),
700 f3 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq3_w1),
704 if (flags & NIX_RX_OFFLOAD_CHECKSUM_F) {
705 ol_flags0 |= nix_rx_olflags_get(lookup_mem, cq0_w1);
706 ol_flags1 |= nix_rx_olflags_get(lookup_mem, cq1_w1);
707 ol_flags2 |= nix_rx_olflags_get(lookup_mem, cq2_w1);
708 ol_flags3 |= nix_rx_olflags_get(lookup_mem, cq3_w1);
711 /* Translate meta to mbuf */
712 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
713 /* Checksum ol_flags will be cleared if mbuf is meta */
714 mbuf0 = nix_sec_meta_to_mbuf(cq0_w1, sa_base, laddr,
715 &loff, mbuf0, d_off, &f0,
717 mbuf01 = vsetq_lane_u64((uint64_t)mbuf0, mbuf01, 0);
719 mbuf1 = nix_sec_meta_to_mbuf(cq1_w1, sa_base, laddr,
720 &loff, mbuf1, d_off, &f1,
722 mbuf01 = vsetq_lane_u64((uint64_t)mbuf1, mbuf01, 1);
724 mbuf2 = nix_sec_meta_to_mbuf(cq2_w1, sa_base, laddr,
725 &loff, mbuf2, d_off, &f2,
727 mbuf23 = vsetq_lane_u64((uint64_t)mbuf2, mbuf23, 0);
729 mbuf3 = nix_sec_meta_to_mbuf(cq3_w1, sa_base, laddr,
730 &loff, mbuf3, d_off, &f3,
732 mbuf23 = vsetq_lane_u64((uint64_t)mbuf3, mbuf23, 1);
735 if (flags & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
736 uint64_t cq0_w2 = *(uint64_t *)(cq0 + CQE_SZ(0) + 16);
737 uint64_t cq1_w2 = *(uint64_t *)(cq0 + CQE_SZ(1) + 16);
738 uint64_t cq2_w2 = *(uint64_t *)(cq0 + CQE_SZ(2) + 16);
739 uint64_t cq3_w2 = *(uint64_t *)(cq0 + CQE_SZ(3) + 16);
741 ol_flags0 = nix_vlan_update(cq0_w2, ol_flags0, &f0);
742 ol_flags1 = nix_vlan_update(cq1_w2, ol_flags1, &f1);
743 ol_flags2 = nix_vlan_update(cq2_w2, ol_flags2, &f2);
744 ol_flags3 = nix_vlan_update(cq3_w2, ol_flags3, &f3);
746 ol_flags0 = nix_qinq_update(cq0_w2, ol_flags0, mbuf0);
747 ol_flags1 = nix_qinq_update(cq1_w2, ol_flags1, mbuf1);
748 ol_flags2 = nix_qinq_update(cq2_w2, ol_flags2, mbuf2);
749 ol_flags3 = nix_qinq_update(cq3_w2, ol_flags3, mbuf3);
752 if (flags & NIX_RX_OFFLOAD_MARK_UPDATE_F) {
753 ol_flags0 = nix_update_match_id(
754 *(uint16_t *)CQE_PTR_OFF(cq0, 0, 38, flags),
756 ol_flags1 = nix_update_match_id(
757 *(uint16_t *)CQE_PTR_OFF(cq0, 1, 38, flags),
759 ol_flags2 = nix_update_match_id(
760 *(uint16_t *)CQE_PTR_OFF(cq0, 2, 38, flags),
762 ol_flags3 = nix_update_match_id(
763 *(uint16_t *)CQE_PTR_OFF(cq0, 3, 38, flags),
767 if (flags & NIX_RX_OFFLOAD_TSTAMP_F) {
768 const uint16x8_t len_off = {
771 CNXK_NIX_TIMESYNC_RX_OFFSET, /* pktlen 0:15*/
772 0, /* pktlen 16:32 */
773 CNXK_NIX_TIMESYNC_RX_OFFSET, /* datalen 0:15 */
777 const uint32x4_t ptype = {RTE_PTYPE_L2_ETHER_TIMESYNC,
778 RTE_PTYPE_L2_ETHER_TIMESYNC,
779 RTE_PTYPE_L2_ETHER_TIMESYNC,
780 RTE_PTYPE_L2_ETHER_TIMESYNC};
781 const uint64_t ts_olf = RTE_MBUF_F_RX_IEEE1588_PTP |
782 RTE_MBUF_F_RX_IEEE1588_TMST |
783 tstamp->rx_tstamp_dynflag;
784 const uint32x4_t and_mask = {0x1, 0x2, 0x4, 0x8};
785 uint64x2_t ts01, ts23, mask;
789 /* Subtract timesync length from total pkt length. */
790 f0 = vsubq_u16(f0, len_off);
791 f1 = vsubq_u16(f1, len_off);
792 f2 = vsubq_u16(f2, len_off);
793 f3 = vsubq_u16(f3, len_off);
795 /* Get the address of actual timestamp. */
796 ts01 = vaddq_u64(mbuf01, data_off);
797 ts23 = vaddq_u64(mbuf23, data_off);
798 /* Load timestamp from address. */
799 ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
802 ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
805 ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
808 ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
811 /* Convert from be to cpu byteorder. */
812 ts01 = vrev64q_u8(ts01);
813 ts23 = vrev64q_u8(ts23);
814 /* Store timestamp into scalar for later use. */
815 ts[0] = vgetq_lane_u64(ts01, 0);
816 ts[1] = vgetq_lane_u64(ts01, 1);
817 ts[2] = vgetq_lane_u64(ts23, 0);
818 ts[3] = vgetq_lane_u64(ts23, 1);
820 /* Store timestamp into dynfield. */
821 *cnxk_nix_timestamp_dynfield(mbuf0, tstamp) = ts[0];
822 *cnxk_nix_timestamp_dynfield(mbuf1, tstamp) = ts[1];
823 *cnxk_nix_timestamp_dynfield(mbuf2, tstamp) = ts[2];
824 *cnxk_nix_timestamp_dynfield(mbuf3, tstamp) = ts[3];
826 /* Generate ptype mask to filter L2 ether timesync */
827 mask = vdupq_n_u32(vgetq_lane_u32(f0, 0));
828 mask = vsetq_lane_u32(vgetq_lane_u32(f1, 0), mask, 1);
829 mask = vsetq_lane_u32(vgetq_lane_u32(f2, 0), mask, 2);
830 mask = vsetq_lane_u32(vgetq_lane_u32(f3, 0), mask, 3);
832 /* Match against L2 ether timesync. */
833 mask = vceqq_u32(mask, ptype);
834 /* Convert from vector from scalar mask */
835 res = vaddvq_u32(vandq_u32(mask, and_mask));
839 /* Fill in the ol_flags for any packets that
842 ol_flags0 |= ((res & 0x1) ? ts_olf : 0);
843 ol_flags1 |= ((res & 0x2) ? ts_olf : 0);
844 ol_flags2 |= ((res & 0x4) ? ts_olf : 0);
845 ol_flags3 |= ((res & 0x8) ? ts_olf : 0);
847 /* Update Rxq timestamp with the latest
850 tstamp->rx_ready = 1;
851 tstamp->rx_tstamp = ts[31 - __builtin_clz(res)];
855 /* Form rearm_data with ol_flags */
856 rearm0 = vsetq_lane_u64(ol_flags0, rearm0, 1);
857 rearm1 = vsetq_lane_u64(ol_flags1, rearm1, 1);
858 rearm2 = vsetq_lane_u64(ol_flags2, rearm2, 1);
859 rearm3 = vsetq_lane_u64(ol_flags3, rearm3, 1);
861 /* Update rx_descriptor_fields1 */
862 vst1q_u64((uint64_t *)mbuf0->rx_descriptor_fields1, f0);
863 vst1q_u64((uint64_t *)mbuf1->rx_descriptor_fields1, f1);
864 vst1q_u64((uint64_t *)mbuf2->rx_descriptor_fields1, f2);
865 vst1q_u64((uint64_t *)mbuf3->rx_descriptor_fields1, f3);
867 /* Update rearm_data */
868 vst1q_u64((uint64_t *)mbuf0->rearm_data, rearm0);
869 vst1q_u64((uint64_t *)mbuf1->rearm_data, rearm1);
870 vst1q_u64((uint64_t *)mbuf2->rearm_data, rearm2);
871 vst1q_u64((uint64_t *)mbuf3->rearm_data, rearm3);
873 /* Store the mbufs to rx_pkts */
874 vst1q_u64((uint64_t *)&mbufs[packets], mbuf01);
875 vst1q_u64((uint64_t *)&mbufs[packets + 2], mbuf23);
877 if (flags & NIX_RX_MULTI_SEG_F) {
878 /* Multi segment is enable build mseg list for
879 * individual mbufs in scalar mode.
881 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
882 (CQE_PTR_OFF(cq0, 0, 8, flags)),
883 mbuf0, mbuf_initializer, flags);
884 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
885 (CQE_PTR_OFF(cq0, 1, 8, flags)),
886 mbuf1, mbuf_initializer, flags);
887 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
888 (CQE_PTR_OFF(cq0, 2, 8, flags)),
889 mbuf2, mbuf_initializer, flags);
890 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
891 (CQE_PTR_OFF(cq0, 3, 8, flags)),
892 mbuf3, mbuf_initializer, flags);
894 /* Update that no more segments */
902 roc_prefetch_store_keep(mbuf0);
903 roc_prefetch_store_keep(mbuf1);
904 roc_prefetch_store_keep(mbuf2);
905 roc_prefetch_store_keep(mbuf3);
907 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
908 RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1);
909 RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1);
910 RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1);
911 RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1);
913 packets += NIX_DESCS_PER_LOOP;
915 if (!(flags & NIX_RX_VWQE_F)) {
916 /* Advance head pointer and packets */
917 head += NIX_DESCS_PER_LOOP;
921 if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
922 /* Flush when we don't have space for 4 meta */
923 if ((15 - loff) < 4) {
924 nix_sec_flush_meta(laddr, lmt_id + lnum, loff,
927 lnum &= BIT_ULL(ROC_LMT_LINES_PER_CORE_LOG2) -
929 /* First pointer starts at 8B offset */
930 laddr = (uintptr_t)LMT_OFF(lbase, lnum, 8);
936 if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) {
937 nix_sec_flush_meta(laddr, lmt_id + lnum, loff, aura_handle);
938 if (flags & NIX_RX_VWQE_F)
942 if (flags & NIX_RX_VWQE_F)
946 rxq->available -= packets;
949 /* Free all the CQs that we've processed */
950 plt_write64((rxq->wdata | packets), rxq->cq_door);
952 if (unlikely(pkts_left))
953 packets += cn10k_nix_recv_pkts(args, &mbufs[packets], pkts_left,
961 static inline uint16_t
962 cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
963 const uint16_t flags, void *lookup_mem,
964 struct cnxk_timesync_info *tstamp,
971 RTE_SET_USED(lookup_mem);
972 RTE_SET_USED(tstamp);
973 RTE_SET_USED(lmt_base);
981 #define RSS_F NIX_RX_OFFLOAD_RSS_F
982 #define PTYPE_F NIX_RX_OFFLOAD_PTYPE_F
983 #define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F
984 #define MARK_F NIX_RX_OFFLOAD_MARK_UPDATE_F
985 #define TS_F NIX_RX_OFFLOAD_TSTAMP_F
986 #define RX_VLAN_F NIX_RX_OFFLOAD_VLAN_STRIP_F
987 #define R_SEC_F NIX_RX_OFFLOAD_SECURITY_F
989 /* [R_SEC_F] [RX_VLAN_F] [TS] [MARK] [CKSUM] [PTYPE] [RSS] */
990 #define NIX_RX_FASTPATH_MODES \
991 R(no_offload, 0, 0, 0, 0, 0, 0, 0, \
992 NIX_RX_OFFLOAD_NONE) \
993 R(rss, 0, 0, 0, 0, 0, 0, 1, \
995 R(ptype, 0, 0, 0, 0, 0, 1, 0, \
997 R(ptype_rss, 0, 0, 0, 0, 0, 1, 1, \
999 R(cksum, 0, 0, 0, 0, 1, 0, 0, \
1001 R(cksum_rss, 0, 0, 0, 0, 1, 0, 1, \
1003 R(cksum_ptype, 0, 0, 0, 0, 1, 1, 0, \
1004 CKSUM_F | PTYPE_F) \
1005 R(cksum_ptype_rss, 0, 0, 0, 0, 1, 1, 1, \
1006 CKSUM_F | PTYPE_F | RSS_F) \
1007 R(mark, 0, 0, 0, 1, 0, 0, 0, \
1009 R(mark_rss, 0, 0, 0, 1, 0, 0, 1, \
1011 R(mark_ptype, 0, 0, 0, 1, 0, 1, 0, \
1013 R(mark_ptype_rss, 0, 0, 0, 1, 0, 1, 1, \
1014 MARK_F | PTYPE_F | RSS_F) \
1015 R(mark_cksum, 0, 0, 0, 1, 1, 0, 0, \
1017 R(mark_cksum_rss, 0, 0, 0, 1, 1, 0, 1, \
1018 MARK_F | CKSUM_F | RSS_F) \
1019 R(mark_cksum_ptype, 0, 0, 0, 1, 1, 1, 0, \
1020 MARK_F | CKSUM_F | PTYPE_F) \
1021 R(mark_cksum_ptype_rss, 0, 0, 0, 1, 1, 1, 1, \
1022 MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
1023 R(ts, 0, 0, 1, 0, 0, 0, 0, \
1025 R(ts_rss, 0, 0, 1, 0, 0, 0, 1, \
1027 R(ts_ptype, 0, 0, 1, 0, 0, 1, 0, \
1029 R(ts_ptype_rss, 0, 0, 1, 0, 0, 1, 1, \
1030 TS_F | PTYPE_F | RSS_F) \
1031 R(ts_cksum, 0, 0, 1, 0, 1, 0, 0, \
1033 R(ts_cksum_rss, 0, 0, 1, 0, 1, 0, 1, \
1034 TS_F | CKSUM_F | RSS_F) \
1035 R(ts_cksum_ptype, 0, 0, 1, 0, 1, 1, 0, \
1036 TS_F | CKSUM_F | PTYPE_F) \
1037 R(ts_cksum_ptype_rss, 0, 0, 1, 0, 1, 1, 1, \
1038 TS_F | CKSUM_F | PTYPE_F | RSS_F) \
1039 R(ts_mark, 0, 0, 1, 1, 0, 0, 0, \
1041 R(ts_mark_rss, 0, 0, 1, 1, 0, 0, 1, \
1042 TS_F | MARK_F | RSS_F) \
1043 R(ts_mark_ptype, 0, 0, 1, 1, 0, 1, 0, \
1044 TS_F | MARK_F | PTYPE_F) \
1045 R(ts_mark_ptype_rss, 0, 0, 1, 1, 0, 1, 1, \
1046 TS_F | MARK_F | PTYPE_F | RSS_F) \
1047 R(ts_mark_cksum, 0, 0, 1, 1, 1, 0, 0, \
1048 TS_F | MARK_F | CKSUM_F) \
1049 R(ts_mark_cksum_rss, 0, 0, 1, 1, 1, 0, 1, \
1050 TS_F | MARK_F | CKSUM_F | RSS_F) \
1051 R(ts_mark_cksum_ptype, 0, 0, 1, 1, 1, 1, 0, \
1052 TS_F | MARK_F | CKSUM_F | PTYPE_F) \
1053 R(ts_mark_cksum_ptype_rss, 0, 0, 1, 1, 1, 1, 1, \
1054 TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
1055 R(vlan, 0, 1, 0, 0, 0, 0, 0, \
1057 R(vlan_rss, 0, 1, 0, 0, 0, 0, 1, \
1058 RX_VLAN_F | RSS_F) \
1059 R(vlan_ptype, 0, 1, 0, 0, 0, 1, 0, \
1060 RX_VLAN_F | PTYPE_F) \
1061 R(vlan_ptype_rss, 0, 1, 0, 0, 0, 1, 1, \
1062 RX_VLAN_F | PTYPE_F | RSS_F) \
1063 R(vlan_cksum, 0, 1, 0, 0, 1, 0, 0, \
1064 RX_VLAN_F | CKSUM_F) \
1065 R(vlan_cksum_rss, 0, 1, 0, 0, 1, 0, 1, \
1066 RX_VLAN_F | CKSUM_F | RSS_F) \
1067 R(vlan_cksum_ptype, 0, 1, 0, 0, 1, 1, 0, \
1068 RX_VLAN_F | CKSUM_F | PTYPE_F) \
1069 R(vlan_cksum_ptype_rss, 0, 1, 0, 0, 1, 1, 1, \
1070 RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
1071 R(vlan_mark, 0, 1, 0, 1, 0, 0, 0, \
1072 RX_VLAN_F | MARK_F) \
1073 R(vlan_mark_rss, 0, 1, 0, 1, 0, 0, 1, \
1074 RX_VLAN_F | MARK_F | RSS_F) \
1075 R(vlan_mark_ptype, 0, 1, 0, 1, 0, 1, 0, \
1076 RX_VLAN_F | MARK_F | PTYPE_F) \
1077 R(vlan_mark_ptype_rss, 0, 1, 0, 1, 0, 1, 1, \
1078 RX_VLAN_F | MARK_F | PTYPE_F | RSS_F) \
1079 R(vlan_mark_cksum, 0, 1, 0, 1, 1, 0, 0, \
1080 RX_VLAN_F | MARK_F | CKSUM_F) \
1081 R(vlan_mark_cksum_rss, 0, 1, 0, 1, 1, 0, 1, \
1082 RX_VLAN_F | MARK_F | CKSUM_F | RSS_F) \
1083 R(vlan_mark_cksum_ptype, 0, 1, 0, 1, 1, 1, 0, \
1084 RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F) \
1085 R(vlan_mark_cksum_ptype_rss, 0, 1, 0, 1, 1, 1, 1, \
1086 RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
1087 R(vlan_ts, 0, 1, 1, 0, 0, 0, 0, \
1089 R(vlan_ts_rss, 0, 1, 1, 0, 0, 0, 1, \
1090 RX_VLAN_F | TS_F | RSS_F) \
1091 R(vlan_ts_ptype, 0, 1, 1, 0, 0, 1, 0, \
1092 RX_VLAN_F | TS_F | PTYPE_F) \
1093 R(vlan_ts_ptype_rss, 0, 1, 1, 0, 0, 1, 1, \
1094 RX_VLAN_F | TS_F | PTYPE_F | RSS_F) \
1095 R(vlan_ts_cksum, 0, 1, 1, 0, 1, 0, 0, \
1096 RX_VLAN_F | TS_F | CKSUM_F) \
1097 R(vlan_ts_cksum_rss, 0, 1, 1, 0, 1, 0, 1, \
1098 RX_VLAN_F | TS_F | CKSUM_F | RSS_F) \
1099 R(vlan_ts_cksum_ptype, 0, 1, 1, 0, 1, 1, 0, \
1100 RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F) \
1101 R(vlan_ts_cksum_ptype_rss, 0, 1, 1, 0, 1, 1, 1, \
1102 RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
1103 R(vlan_ts_mark, 0, 1, 1, 1, 0, 0, 0, \
1104 RX_VLAN_F | TS_F | MARK_F) \
1105 R(vlan_ts_mark_rss, 0, 1, 1, 1, 0, 0, 1, \
1106 RX_VLAN_F | TS_F | MARK_F | RSS_F) \
1107 R(vlan_ts_mark_ptype, 0, 1, 1, 1, 0, 1, 0, \
1108 RX_VLAN_F | TS_F | MARK_F | PTYPE_F) \
1109 R(vlan_ts_mark_ptype_rss, 0, 1, 1, 1, 0, 1, 1, \
1110 RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
1111 R(vlan_ts_mark_cksum, 0, 1, 1, 1, 1, 0, 0, \
1112 RX_VLAN_F | TS_F | MARK_F | CKSUM_F) \
1113 R(vlan_ts_mark_cksum_rss, 0, 1, 1, 1, 1, 0, 1, \
1114 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
1115 R(vlan_ts_mark_cksum_ptype, 0, 1, 1, 1, 1, 1, 0, \
1116 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
1117 R(vlan_ts_mark_cksum_ptype_rss, 0, 1, 1, 1, 1, 1, 1, \
1118 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
1119 R(sec, 1, 0, 0, 0, 0, 0, 0, \
1121 R(sec_rss, 1, 0, 0, 0, 0, 0, 1, \
1123 R(sec_ptype, 1, 0, 0, 0, 0, 1, 0, \
1124 R_SEC_F | PTYPE_F) \
1125 R(sec_ptype_rss, 1, 0, 0, 0, 0, 1, 1, \
1126 R_SEC_F | PTYPE_F | RSS_F) \
1127 R(sec_cksum, 1, 0, 0, 0, 1, 0, 0, \
1128 R_SEC_F | CKSUM_F) \
1129 R(sec_cksum_rss, 1, 0, 0, 0, 1, 0, 1, \
1130 R_SEC_F | CKSUM_F | RSS_F) \
1131 R(sec_cksum_ptype, 1, 0, 0, 0, 1, 1, 0, \
1132 R_SEC_F | CKSUM_F | PTYPE_F) \
1133 R(sec_cksum_ptype_rss, 1, 0, 0, 0, 1, 1, 1, \
1134 R_SEC_F | CKSUM_F | PTYPE_F | RSS_F) \
1135 R(sec_mark, 1, 0, 0, 1, 0, 0, 0, \
1137 R(sec_mark_rss, 1, 0, 0, 1, 0, 0, 1, \
1138 R_SEC_F | MARK_F | RSS_F) \
1139 R(sec_mark_ptype, 1, 0, 0, 1, 0, 1, 0, \
1140 R_SEC_F | MARK_F | PTYPE_F) \
1141 R(sec_mark_ptype_rss, 1, 0, 0, 1, 0, 1, 1, \
1142 R_SEC_F | MARK_F | PTYPE_F | RSS_F) \
1143 R(sec_mark_cksum, 1, 0, 0, 1, 1, 0, 0, \
1144 R_SEC_F | MARK_F | CKSUM_F) \
1145 R(sec_mark_cksum_rss, 1, 0, 0, 1, 1, 0, 1, \
1146 R_SEC_F | MARK_F | CKSUM_F | RSS_F) \
1147 R(sec_mark_cksum_ptype, 1, 0, 0, 1, 1, 1, 0, \
1148 R_SEC_F | MARK_F | CKSUM_F | PTYPE_F) \
1149 R(sec_mark_cksum_ptype_rss, 1, 0, 0, 1, 1, 1, 1, \
1150 R_SEC_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
1151 R(sec_ts, 1, 0, 1, 0, 0, 0, 0, \
1153 R(sec_ts_rss, 1, 0, 1, 0, 0, 0, 1, \
1154 R_SEC_F | TS_F | RSS_F) \
1155 R(sec_ts_ptype, 1, 0, 1, 0, 0, 1, 0, \
1156 R_SEC_F | TS_F | PTYPE_F) \
1157 R(sec_ts_ptype_rss, 1, 0, 1, 0, 0, 1, 1, \
1158 R_SEC_F | TS_F | PTYPE_F | RSS_F) \
1159 R(sec_ts_cksum, 1, 0, 1, 0, 1, 0, 0, \
1160 R_SEC_F | TS_F | CKSUM_F) \
1161 R(sec_ts_cksum_rss, 1, 0, 1, 0, 1, 0, 1, \
1162 R_SEC_F | TS_F | CKSUM_F | RSS_F) \
1163 R(sec_ts_cksum_ptype, 1, 0, 1, 0, 1, 1, 0, \
1164 R_SEC_F | TS_F | CKSUM_F | PTYPE_F) \
1165 R(sec_ts_cksum_ptype_rss, 1, 0, 1, 0, 1, 1, 1, \
1166 R_SEC_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
1167 R(sec_ts_mark, 1, 0, 1, 1, 0, 0, 0, \
1168 R_SEC_F | TS_F | MARK_F) \
1169 R(sec_ts_mark_rss, 1, 0, 1, 1, 0, 0, 1, \
1170 R_SEC_F | TS_F | MARK_F | RSS_F) \
1171 R(sec_ts_mark_ptype, 1, 0, 1, 1, 0, 1, 0, \
1172 R_SEC_F | TS_F | MARK_F | PTYPE_F) \
1173 R(sec_ts_mark_ptype_rss, 1, 0, 1, 1, 0, 1, 1, \
1174 R_SEC_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
1175 R(sec_ts_mark_cksum, 1, 0, 1, 1, 1, 0, 0, \
1176 R_SEC_F | TS_F | MARK_F | CKSUM_F) \
1177 R(sec_ts_mark_cksum_rss, 1, 0, 1, 1, 1, 0, 1, \
1178 R_SEC_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
1179 R(sec_ts_mark_cksum_ptype, 1, 0, 1, 1, 1, 1, 0, \
1180 R_SEC_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
1181 R(sec_ts_mark_cksum_ptype_rss, 1, 0, 1, 1, 1, 1, 1, \
1182 R_SEC_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
1183 R(sec_vlan, 1, 1, 0, 0, 0, 0, 0, \
1184 R_SEC_F | RX_VLAN_F) \
1185 R(sec_vlan_rss, 1, 1, 0, 0, 0, 0, 1, \
1186 R_SEC_F | RX_VLAN_F | RSS_F) \
1187 R(sec_vlan_ptype, 1, 1, 0, 0, 0, 1, 0, \
1188 R_SEC_F | RX_VLAN_F | PTYPE_F) \
1189 R(sec_vlan_ptype_rss, 1, 1, 0, 0, 0, 1, 1, \
1190 R_SEC_F | RX_VLAN_F | PTYPE_F | RSS_F) \
1191 R(sec_vlan_cksum, 1, 1, 0, 0, 1, 0, 0, \
1192 R_SEC_F | RX_VLAN_F | CKSUM_F) \
1193 R(sec_vlan_cksum_rss, 1, 1, 0, 0, 1, 0, 1, \
1194 R_SEC_F | RX_VLAN_F | CKSUM_F | RSS_F) \
1195 R(sec_vlan_cksum_ptype, 1, 1, 0, 0, 1, 1, 0, \
1196 R_SEC_F | RX_VLAN_F | CKSUM_F | PTYPE_F) \
1197 R(sec_vlan_cksum_ptype_rss, 1, 1, 0, 0, 1, 1, 1, \
1198 R_SEC_F | RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
1199 R(sec_vlan_mark, 1, 1, 0, 1, 0, 0, 0, \
1200 R_SEC_F | RX_VLAN_F | MARK_F) \
1201 R(sec_vlan_mark_rss, 1, 1, 0, 1, 0, 0, 1, \
1202 R_SEC_F | RX_VLAN_F | MARK_F | RSS_F) \
1203 R(sec_vlan_mark_ptype, 1, 1, 0, 1, 0, 1, 0, \
1204 R_SEC_F | RX_VLAN_F | MARK_F | PTYPE_F) \
1205 R(sec_vlan_mark_ptype_rss, 1, 1, 0, 1, 0, 1, 1, \
1206 R_SEC_F | RX_VLAN_F | MARK_F | PTYPE_F | RSS_F) \
1207 R(sec_vlan_mark_cksum, 1, 1, 0, 1, 1, 0, 0, \
1208 R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F) \
1209 R(sec_vlan_mark_cksum_rss, 1, 1, 0, 1, 1, 0, 1, \
1210 R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | RSS_F) \
1211 R(sec_vlan_mark_cksum_ptype, 1, 1, 0, 1, 1, 1, 0, \
1212 R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F) \
1213 R(sec_vlan_mark_cksum_ptype_rss, 1, 1, 0, 1, 1, 1, 1, \
1214 R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
1215 R(sec_vlan_ts, 1, 1, 1, 0, 0, 0, 0, \
1216 R_SEC_F | RX_VLAN_F | TS_F) \
1217 R(sec_vlan_ts_rss, 1, 1, 1, 0, 0, 0, 1, \
1218 R_SEC_F | RX_VLAN_F | TS_F | RSS_F) \
1219 R(sec_vlan_ts_ptype, 1, 1, 1, 0, 0, 1, 0, \
1220 R_SEC_F | RX_VLAN_F | TS_F | PTYPE_F) \
1221 R(sec_vlan_ts_ptype_rss, 1, 1, 1, 0, 0, 1, 1, \
1222 R_SEC_F | RX_VLAN_F | TS_F | PTYPE_F | RSS_F) \
1223 R(sec_vlan_ts_cksum, 1, 1, 1, 0, 1, 0, 0, \
1224 R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F) \
1225 R(sec_vlan_ts_cksum_rss, 1, 1, 1, 0, 1, 0, 1, \
1226 R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | RSS_F) \
1227 R(sec_vlan_ts_cksum_ptype, 1, 1, 1, 0, 1, 1, 0, \
1228 R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F) \
1229 R(sec_vlan_ts_cksum_ptype_rss, 1, 1, 1, 0, 1, 1, 1, \
1230 R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
1231 R(sec_vlan_ts_mark, 1, 1, 1, 1, 0, 0, 0, \
1232 R_SEC_F | RX_VLAN_F | TS_F | MARK_F) \
1233 R(sec_vlan_ts_mark_rss, 1, 1, 1, 1, 0, 0, 1, \
1234 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | RSS_F) \
1235 R(sec_vlan_ts_mark_ptype, 1, 1, 1, 1, 0, 1, 0, \
1236 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | PTYPE_F) \
1237 R(sec_vlan_ts_mark_ptype_rss, 1, 1, 1, 1, 0, 1, 1, \
1238 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
1239 R(sec_vlan_ts_mark_cksum, 1, 1, 1, 1, 1, 0, 0, \
1240 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F) \
1241 R(sec_vlan_ts_mark_cksum_rss, 1, 1, 1, 1, 1, 0, 1, \
1242 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
1243 R(sec_vlan_ts_mark_cksum_ptype, 1, 1, 1, 1, 1, 1, 0, \
1244 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
1245 R(sec_vlan_ts_mark_cksum_ptype_rss, 1, 1, 1, 1, 1, 1, 1, \
1246 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1248 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
1249 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_##name( \
1250 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
1252 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_mseg_##name( \
1253 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
1255 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_vec_##name( \
1256 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
1258 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_vec_mseg_##name( \
1259 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts);
1261 NIX_RX_FASTPATH_MODES
1264 #endif /* __CN10K_RX_H__ */