1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
11 #include <cnxk_ethdev.h>
13 #define NIX_RX_OFFLOAD_NONE (0)
14 #define NIX_RX_OFFLOAD_RSS_F BIT(0)
15 #define NIX_RX_OFFLOAD_PTYPE_F BIT(1)
16 #define NIX_RX_OFFLOAD_CHECKSUM_F BIT(2)
17 #define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)
18 #define NIX_RX_OFFLOAD_TSTAMP_F BIT(4)
19 #define NIX_RX_OFFLOAD_VLAN_STRIP_F BIT(5)
20 #define NIX_RX_OFFLOAD_SECURITY_F BIT(6)
21 #define NIX_RX_OFFLOAD_MAX (NIX_RX_OFFLOAD_SECURITY_F << 1)
23 /* Flags to control cqe_to_mbuf conversion function.
24 * Defining it from backwards to denote its been
25 * not used as offload flags to pick function
27 #define NIX_RX_MULTI_SEG_F BIT(14)
28 #define CPT_RX_WQE_F BIT(15)
30 #define CNXK_NIX_CQ_ENTRY_SZ 128
31 #define NIX_DESCS_PER_LOOP 4
32 #define CQE_CAST(x) ((struct nix_cqe_hdr_s *)(x))
33 #define CQE_SZ(x) ((x) * CNXK_NIX_CQ_ENTRY_SZ)
35 #define IPSEC_SQ_LO_IDX 4
36 #define IPSEC_SQ_HI_IDX 8
38 union mbuf_initializer {
48 static __rte_always_inline uint64_t
49 nix_clear_data_off(uint64_t oldval)
51 union mbuf_initializer mbuf_init = {.value = oldval};
53 mbuf_init.fields.data_off = 0;
54 return mbuf_init.value;
57 static __rte_always_inline struct rte_mbuf *
58 nix_get_mbuf_from_cqe(void *cq, const uint64_t data_off)
62 /* Skip CQE, NIX_RX_PARSE_S and SG HDR(9 DWORDs) and peek buff addr */
63 buff = *((rte_iova_t *)((uint64_t *)cq + 9));
64 return (struct rte_mbuf *)(buff - data_off);
67 static __rte_always_inline uint32_t
68 nix_ptype_get(const void *const lookup_mem, const uint64_t in)
70 const uint16_t *const ptype = lookup_mem;
71 const uint16_t lh_lg_lf = (in & 0xFFF0000000000000) >> 52;
72 const uint16_t tu_l2 = ptype[(in & 0x000FFFF000000000) >> 36];
73 const uint16_t il4_tu = ptype[PTYPE_NON_TUNNEL_ARRAY_SZ + lh_lg_lf];
75 return (il4_tu << PTYPE_NON_TUNNEL_WIDTH) | tu_l2;
78 static __rte_always_inline uint32_t
79 nix_rx_olflags_get(const void *const lookup_mem, const uint64_t in)
81 const uint32_t *const ol_flags =
82 (const uint32_t *)((const uint8_t *)lookup_mem +
85 return ol_flags[(in & 0xfff00000) >> 20];
88 static inline uint64_t
89 nix_update_match_id(const uint16_t match_id, uint64_t ol_flags,
90 struct rte_mbuf *mbuf)
92 /* There is no separate bit to check match_id
93 * is valid or not? and no flag to identify it is an
94 * RTE_FLOW_ACTION_TYPE_FLAG vs RTE_FLOW_ACTION_TYPE_MARK
95 * action. The former case addressed through 0 being invalid
96 * value and inc/dec match_id pair when MARK is activated.
97 * The later case addressed through defining
98 * CNXK_FLOW_MARK_DEFAULT as value for
99 * RTE_FLOW_ACTION_TYPE_MARK.
100 * This would translate to not use
101 * CNXK_FLOW_ACTION_FLAG_DEFAULT - 1 and
102 * CNXK_FLOW_ACTION_FLAG_DEFAULT for match_id.
103 * i.e valid mark_id's are from
104 * 0 to CNXK_FLOW_ACTION_FLAG_DEFAULT - 2
106 if (likely(match_id)) {
107 ol_flags |= RTE_MBUF_F_RX_FDIR;
108 if (match_id != CNXK_FLOW_ACTION_FLAG_DEFAULT) {
109 ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
110 mbuf->hash.fdir.hi = match_id - 1;
117 static __rte_always_inline void
118 nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
119 uint64_t rearm, const uint16_t flags)
121 const rte_iova_t *iova_list;
122 struct rte_mbuf *head;
123 const rte_iova_t *eol;
127 sg = *(const uint64_t *)(rx + 1);
128 nb_segs = (sg >> 48) & 0x3;
135 mbuf->pkt_len = (rx->pkt_lenm1 + 1) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
136 CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
137 mbuf->data_len = (sg & 0xFFFF) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
138 CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
139 mbuf->nb_segs = nb_segs;
142 eol = ((const rte_iova_t *)(rx + 1) +
143 ((rx->cn9k.desc_sizem1 + 1) << 1));
144 /* Skip SG_S and first IOVA*/
145 iova_list = ((const rte_iova_t *)(rx + 1)) + 2;
148 rearm = rearm & ~0xFFFF;
152 mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
155 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
157 mbuf->data_len = sg & 0xFFFF;
159 *(uint64_t *)(&mbuf->rearm_data) = rearm;
163 if (!nb_segs && (iova_list + 1 < eol)) {
164 sg = *(const uint64_t *)(iova_list);
165 nb_segs = (sg >> 48) & 0x3;
166 head->nb_segs += nb_segs;
167 iova_list = (const rte_iova_t *)(iova_list + 1);
174 ipsec_antireplay_check(struct roc_onf_ipsec_inb_sa *sa,
175 struct cn9k_inb_priv_data *priv, uintptr_t data,
178 struct cnxk_on_ipsec_ar *ar = &priv->ar;
186 esn = sa->ctl.esn_en;
187 seql = rte_be_to_cpu_32(*((uint32_t *)(data + IPSEC_SQ_LO_IDX)));
190 seq = (uint64_t)seql;
192 seqh = rte_be_to_cpu_32(*((uint32_t *)(data +
194 seq = ((uint64_t)seqh << 32) | seql;
197 if (unlikely(seq == 0))
200 rte_spinlock_lock(&ar->lock);
201 rc = cnxk_on_anti_replay_check(seq, ar, win_sz);
203 seq_in_sa = ((uint64_t)rte_be_to_cpu_32(sa->esn_hi) << 32) |
204 rte_be_to_cpu_32(sa->esn_low);
205 if (seq > seq_in_sa) {
206 sa->esn_low = rte_cpu_to_be_32(seql);
207 sa->esn_hi = rte_cpu_to_be_32(seqh);
210 rte_spinlock_unlock(&ar->lock);
215 static inline uint64_t
216 nix_rx_sec_mbuf_err_update(const union nix_rx_parse_u *rx, uint16_t res,
217 uint64_t *rearm_val, uint16_t *len)
219 uint8_t uc_cc = res >> 8;
220 uint8_t cc = res & 0xFF;
225 if (unlikely(cc != CPT_COMP_GOOD))
226 return RTE_MBUF_F_RX_SEC_OFFLOAD |
227 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
229 data_off = *rearm_val & (BIT_ULL(16) - 1);
230 m_len = rx->cn9k.pkt_lenm1 + 1;
233 case ROC_IE_ON_UCC_IP_PAYLOAD_TYPE_ERR:
234 case ROC_IE_ON_UCC_AUTH_ERR:
235 case ROC_IE_ON_UCC_PADDING_INVALID:
236 /* Adjust data offset to start at copied L2 */
237 data_off += ROC_ONF_IPSEC_INB_SPI_SEQ_SZ +
238 ROC_ONF_IPSEC_INB_MAX_L2_SZ;
239 ol_flags = RTE_MBUF_F_RX_SEC_OFFLOAD |
240 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
242 case ROC_IE_ON_UCC_CTX_INVALID:
243 case ROC_IE_ON_UCC_SPI_MISMATCH:
244 case ROC_IE_ON_UCC_SA_MISMATCH:
245 /* Return as normal packet */
249 /* Return as error packet after updating packet lengths */
250 ol_flags = RTE_MBUF_F_RX_SEC_OFFLOAD |
251 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
256 *rearm_val = *rearm_val & ~(BIT_ULL(16) - 1);
257 *rearm_val |= data_off;
261 static __rte_always_inline uint64_t
262 nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
263 uintptr_t sa_base, uint64_t *rearm_val, uint16_t *len)
265 uintptr_t res_sg0 = ((uintptr_t)cq + ROC_ONF_IPSEC_INB_RES_OFF - 8);
266 const union nix_rx_parse_u *rx =
267 (const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
268 struct cn9k_inb_priv_data *sa_priv;
269 struct roc_onf_ipsec_inb_sa *sa;
270 uint8_t lcptr = rx->lcptr;
271 struct rte_ipv4_hdr *ipv4;
272 uint16_t data_off, res;
273 uint32_t spi, win_sz;
279 res = *(uint64_t *)(res_sg0 + 8);
280 data_off = *rearm_val & (BIT_ULL(16) - 1);
281 data = (uintptr_t)m->buf_addr;
284 rte_prefetch0((void *)data);
286 if (unlikely(res != (CPT_COMP_GOOD | ROC_IE_ON_UCC_SUCCESS << 8)))
287 return nix_rx_sec_mbuf_err_update(rx, res, rearm_val, len);
290 /* 20 bits of tag would have the SPI */
291 spi = cq->tag & CNXK_ETHDEV_SPI_TAG_MASK;
294 sa_w = sa_base & (ROC_NIX_INL_SA_BASE_ALIGN - 1);
295 sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
296 spi_mask = (1ULL << sa_w) - 1;
297 sa = roc_nix_inl_onf_ipsec_inb_sa(sa_base, spi & spi_mask);
299 /* Update dynamic field with userdata */
300 sa_priv = roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(sa);
301 dw = *(__uint128_t *)sa_priv;
302 *rte_security_dynfield(m) = (uint64_t)dw;
304 /* Check if anti-replay is enabled */
305 win_sz = (uint32_t)(dw >> 64);
307 if (ipsec_antireplay_check(sa, sa_priv, data, win_sz) < 0)
308 return RTE_MBUF_F_RX_SEC_OFFLOAD | RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
311 /* Get total length from IPv4 header. We can assume only IPv4 */
312 ipv4 = (struct rte_ipv4_hdr *)(data + ROC_ONF_IPSEC_INB_SPI_SEQ_SZ +
313 ROC_ONF_IPSEC_INB_MAX_L2_SZ);
315 /* Update data offset */
316 data_off += (ROC_ONF_IPSEC_INB_SPI_SEQ_SZ +
317 ROC_ONF_IPSEC_INB_MAX_L2_SZ);
318 *rearm_val = *rearm_val & ~(BIT_ULL(16) - 1);
319 *rearm_val |= data_off;
321 *len = rte_be_to_cpu_16(ipv4->total_length) + lcptr;
322 return RTE_MBUF_F_RX_SEC_OFFLOAD;
325 static __rte_always_inline void
326 cn9k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
327 struct rte_mbuf *mbuf, const void *lookup_mem,
328 uint64_t val, const uint16_t flag)
330 const union nix_rx_parse_u *rx =
331 (const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
332 uint16_t len = rx->cn9k.pkt_lenm1 + 1;
333 const uint64_t w1 = *(const uint64_t *)rx;
334 uint32_t packet_type;
335 uint64_t ol_flags = 0;
337 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
338 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
340 if (flag & NIX_RX_OFFLOAD_PTYPE_F)
341 packet_type = nix_ptype_get(lookup_mem, w1);
345 if ((flag & NIX_RX_OFFLOAD_SECURITY_F) &&
346 cq->cqe_type == NIX_XQE_TYPE_RX_IPSECH) {
347 uint16_t port = val >> 48;
350 /* Get SA Base from lookup mem */
351 sa_base = cnxk_nix_sa_base_get(port, lookup_mem);
353 ol_flags |= nix_rx_sec_mbuf_update(cq, mbuf, sa_base, &val,
356 /* Only Tunnel inner IPv4 is supported */
357 packet_type = (packet_type &
358 ~(RTE_PTYPE_L3_MASK | RTE_PTYPE_TUNNEL_MASK));
359 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
360 mbuf->packet_type = packet_type;
364 if (flag & NIX_RX_OFFLOAD_PTYPE_F)
365 mbuf->packet_type = packet_type;
367 if (flag & NIX_RX_OFFLOAD_RSS_F) {
368 mbuf->hash.rss = tag;
369 ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
372 if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
373 ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
376 if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
377 if (rx->cn9k.vtag0_gone) {
378 ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
379 mbuf->vlan_tci = rx->cn9k.vtag0_tci;
381 if (rx->cn9k.vtag1_gone) {
382 ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
383 mbuf->vlan_tci_outer = rx->cn9k.vtag1_tci;
387 if (flag & NIX_RX_OFFLOAD_MARK_UPDATE_F)
389 nix_update_match_id(rx->cn9k.match_id, ol_flags, mbuf);
391 mbuf->ol_flags = ol_flags;
392 *(uint64_t *)(&mbuf->rearm_data) = val;
394 mbuf->data_len = len;
396 if (flag & NIX_RX_MULTI_SEG_F)
398 * For multi segment packets, mbuf length correction according
399 * to Rx timestamp length will be handled later during
400 * timestamp data process.
401 * Hence, flag argument is not required.
403 nix_cqe_xtract_mseg(rx, mbuf, val, 0);
408 static inline uint16_t
409 nix_rx_nb_pkts(struct cn9k_eth_rxq *rxq, const uint64_t wdata,
410 const uint16_t pkts, const uint32_t qmask)
412 uint32_t available = rxq->available;
414 /* Update the available count if cached value is not enough */
415 if (unlikely(available < pkts)) {
416 uint64_t reg, head, tail;
418 /* Use LDADDA version to avoid reorder */
419 reg = roc_atomic64_add_sync(wdata, rxq->cq_status);
420 /* CQ_OP_STATUS operation error */
421 if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
422 reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
425 tail = reg & 0xFFFFF;
426 head = (reg >> 20) & 0xFFFFF;
428 available = tail - head + qmask + 1;
430 available = tail - head;
432 rxq->available = available;
435 return RTE_MIN(pkts, available);
438 static __rte_always_inline uint16_t
439 cn9k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
440 const uint16_t flags)
442 struct cn9k_eth_rxq *rxq = rx_queue;
443 const uint64_t mbuf_init = rxq->mbuf_initializer;
444 const void *lookup_mem = rxq->lookup_mem;
445 const uint64_t data_off = rxq->data_off;
446 const uintptr_t desc = rxq->desc;
447 const uint64_t wdata = rxq->wdata;
448 const uint32_t qmask = rxq->qmask;
449 uint16_t packets = 0, nb_pkts;
450 uint32_t head = rxq->head;
451 struct nix_cqe_hdr_s *cq;
452 struct rte_mbuf *mbuf;
454 nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
456 while (packets < nb_pkts) {
457 /* Prefetch N desc ahead */
458 rte_prefetch_non_temporal(
459 (void *)(desc + (CQE_SZ((head + 2) & qmask))));
460 cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head));
462 mbuf = nix_get_mbuf_from_cqe(cq, data_off);
464 cn9k_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
466 cnxk_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
467 (flags & NIX_RX_OFFLOAD_TSTAMP_F),
468 (uint64_t *)((uint8_t *)mbuf
470 rx_pkts[packets++] = mbuf;
471 roc_prefetch_store_keep(mbuf);
477 rxq->available -= nb_pkts;
479 /* Free all the CQs that we've processed */
480 plt_write64((wdata | nb_pkts), rxq->cq_door);
485 #if defined(RTE_ARCH_ARM64)
487 static __rte_always_inline uint64_t
488 nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
490 if (w2 & BIT_ULL(21) /* vtag0_gone */) {
491 ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
492 *f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
498 static __rte_always_inline uint64_t
499 nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
501 if (w2 & BIT_ULL(23) /* vtag1_gone */) {
502 ol_flags |= RTE_MBUF_F_RX_QINQ | RTE_MBUF_F_RX_QINQ_STRIPPED;
503 mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
509 static __rte_always_inline uint16_t
510 cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
511 uint16_t pkts, const uint16_t flags)
513 struct cn9k_eth_rxq *rxq = rx_queue;
514 uint16_t packets = 0;
515 uint64x2_t cq0_w8, cq1_w8, cq2_w8, cq3_w8, mbuf01, mbuf23;
516 const uint64_t mbuf_initializer = rxq->mbuf_initializer;
517 const uint64x2_t data_off = vdupq_n_u64(rxq->data_off);
518 uint64_t ol_flags0, ol_flags1, ol_flags2, ol_flags3;
519 uint64x2_t rearm0 = vdupq_n_u64(mbuf_initializer);
520 uint64x2_t rearm1 = vdupq_n_u64(mbuf_initializer);
521 uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer);
522 uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
523 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
524 const uint16_t *lookup_mem = rxq->lookup_mem;
525 const uint32_t qmask = rxq->qmask;
526 const uint64_t wdata = rxq->wdata;
527 const uintptr_t desc = rxq->desc;
528 uint8x16_t f0, f1, f2, f3;
529 uint32_t head = rxq->head;
532 pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
533 pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1);
535 /* Packets has to be floor-aligned to NIX_DESCS_PER_LOOP */
536 pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
538 while (packets < pkts) {
539 /* Exit loop if head is about to wrap and become unaligned */
540 if (((head + NIX_DESCS_PER_LOOP - 1) & qmask) <
541 NIX_DESCS_PER_LOOP) {
542 pkts_left += (pkts - packets);
546 const uintptr_t cq0 = desc + CQE_SZ(head);
548 /* Prefetch N desc ahead */
549 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(8)));
550 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(9)));
551 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(10)));
552 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(11)));
554 /* Get NIX_RX_SG_S for size and buffer pointer */
555 cq0_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(0) + 64));
556 cq1_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(1) + 64));
557 cq2_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(2) + 64));
558 cq3_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(3) + 64));
560 /* Extract mbuf from NIX_RX_SG_S */
561 mbuf01 = vzip2q_u64(cq0_w8, cq1_w8);
562 mbuf23 = vzip2q_u64(cq2_w8, cq3_w8);
563 mbuf01 = vqsubq_u64(mbuf01, data_off);
564 mbuf23 = vqsubq_u64(mbuf23, data_off);
566 /* Move mbufs to scalar registers for future use */
567 mbuf0 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 0);
568 mbuf1 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 1);
569 mbuf2 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 0);
570 mbuf3 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 1);
572 /* Mask to get packet len from NIX_RX_SG_S */
573 const uint8x16_t shuf_msk = {
574 0xFF, 0xFF, /* pkt_type set as unknown */
575 0xFF, 0xFF, /* pkt_type set as unknown */
576 0, 1, /* octet 1~0, low 16 bits pkt_len */
577 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
578 0, 1, /* octet 1~0, 16 bits data_len */
579 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
581 /* Form the rx_descriptor_fields1 with pkt_len and data_len */
582 f0 = vqtbl1q_u8(cq0_w8, shuf_msk);
583 f1 = vqtbl1q_u8(cq1_w8, shuf_msk);
584 f2 = vqtbl1q_u8(cq2_w8, shuf_msk);
585 f3 = vqtbl1q_u8(cq3_w8, shuf_msk);
587 /* Load CQE word0 and word 1 */
588 uint64_t cq0_w0 = ((uint64_t *)(cq0 + CQE_SZ(0)))[0];
589 uint64_t cq0_w1 = ((uint64_t *)(cq0 + CQE_SZ(0)))[1];
590 uint64_t cq1_w0 = ((uint64_t *)(cq0 + CQE_SZ(1)))[0];
591 uint64_t cq1_w1 = ((uint64_t *)(cq0 + CQE_SZ(1)))[1];
592 uint64_t cq2_w0 = ((uint64_t *)(cq0 + CQE_SZ(2)))[0];
593 uint64_t cq2_w1 = ((uint64_t *)(cq0 + CQE_SZ(2)))[1];
594 uint64_t cq3_w0 = ((uint64_t *)(cq0 + CQE_SZ(3)))[0];
595 uint64_t cq3_w1 = ((uint64_t *)(cq0 + CQE_SZ(3)))[1];
597 if (flags & NIX_RX_OFFLOAD_RSS_F) {
598 /* Fill rss in the rx_descriptor_fields1 */
599 f0 = vsetq_lane_u32(cq0_w0, f0, 3);
600 f1 = vsetq_lane_u32(cq1_w0, f1, 3);
601 f2 = vsetq_lane_u32(cq2_w0, f2, 3);
602 f3 = vsetq_lane_u32(cq3_w0, f3, 3);
603 ol_flags0 = RTE_MBUF_F_RX_RSS_HASH;
604 ol_flags1 = RTE_MBUF_F_RX_RSS_HASH;
605 ol_flags2 = RTE_MBUF_F_RX_RSS_HASH;
606 ol_flags3 = RTE_MBUF_F_RX_RSS_HASH;
614 if (flags & NIX_RX_OFFLOAD_PTYPE_F) {
615 /* Fill packet_type in the rx_descriptor_fields1 */
616 f0 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq0_w1),
618 f1 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq1_w1),
620 f2 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq2_w1),
622 f3 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq3_w1),
626 if (flags & NIX_RX_OFFLOAD_CHECKSUM_F) {
627 ol_flags0 |= nix_rx_olflags_get(lookup_mem, cq0_w1);
628 ol_flags1 |= nix_rx_olflags_get(lookup_mem, cq1_w1);
629 ol_flags2 |= nix_rx_olflags_get(lookup_mem, cq2_w1);
630 ol_flags3 |= nix_rx_olflags_get(lookup_mem, cq3_w1);
633 if (flags & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
634 uint64_t cq0_w2 = *(uint64_t *)(cq0 + CQE_SZ(0) + 16);
635 uint64_t cq1_w2 = *(uint64_t *)(cq0 + CQE_SZ(1) + 16);
636 uint64_t cq2_w2 = *(uint64_t *)(cq0 + CQE_SZ(2) + 16);
637 uint64_t cq3_w2 = *(uint64_t *)(cq0 + CQE_SZ(3) + 16);
639 ol_flags0 = nix_vlan_update(cq0_w2, ol_flags0, &f0);
640 ol_flags1 = nix_vlan_update(cq1_w2, ol_flags1, &f1);
641 ol_flags2 = nix_vlan_update(cq2_w2, ol_flags2, &f2);
642 ol_flags3 = nix_vlan_update(cq3_w2, ol_flags3, &f3);
644 ol_flags0 = nix_qinq_update(cq0_w2, ol_flags0, mbuf0);
645 ol_flags1 = nix_qinq_update(cq1_w2, ol_flags1, mbuf1);
646 ol_flags2 = nix_qinq_update(cq2_w2, ol_flags2, mbuf2);
647 ol_flags3 = nix_qinq_update(cq3_w2, ol_flags3, mbuf3);
650 if (flags & NIX_RX_OFFLOAD_MARK_UPDATE_F) {
651 ol_flags0 = nix_update_match_id(
652 *(uint16_t *)(cq0 + CQE_SZ(0) + 38), ol_flags0,
654 ol_flags1 = nix_update_match_id(
655 *(uint16_t *)(cq0 + CQE_SZ(1) + 38), ol_flags1,
657 ol_flags2 = nix_update_match_id(
658 *(uint16_t *)(cq0 + CQE_SZ(2) + 38), ol_flags2,
660 ol_flags3 = nix_update_match_id(
661 *(uint16_t *)(cq0 + CQE_SZ(3) + 38), ol_flags3,
665 if (flags & NIX_RX_OFFLOAD_TSTAMP_F) {
666 const uint16x8_t len_off = {
669 CNXK_NIX_TIMESYNC_RX_OFFSET, /* pktlen 0:15*/
670 0, /* pktlen 16:32 */
671 CNXK_NIX_TIMESYNC_RX_OFFSET, /* datalen 0:15 */
675 const uint32x4_t ptype = {RTE_PTYPE_L2_ETHER_TIMESYNC,
676 RTE_PTYPE_L2_ETHER_TIMESYNC,
677 RTE_PTYPE_L2_ETHER_TIMESYNC,
678 RTE_PTYPE_L2_ETHER_TIMESYNC};
679 const uint64_t ts_olf = RTE_MBUF_F_RX_IEEE1588_PTP |
680 RTE_MBUF_F_RX_IEEE1588_TMST |
681 rxq->tstamp->rx_tstamp_dynflag;
682 const uint32x4_t and_mask = {0x1, 0x2, 0x4, 0x8};
683 uint64x2_t ts01, ts23, mask;
687 /* Subtract timesync length from total pkt length. */
688 f0 = vsubq_u16(f0, len_off);
689 f1 = vsubq_u16(f1, len_off);
690 f2 = vsubq_u16(f2, len_off);
691 f3 = vsubq_u16(f3, len_off);
693 /* Get the address of actual timestamp. */
694 ts01 = vaddq_u64(mbuf01, data_off);
695 ts23 = vaddq_u64(mbuf23, data_off);
696 /* Load timestamp from address. */
697 ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
700 ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
703 ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
706 ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
709 /* Convert from be to cpu byteorder. */
710 ts01 = vrev64q_u8(ts01);
711 ts23 = vrev64q_u8(ts23);
712 /* Store timestamp into scalar for later use. */
713 ts[0] = vgetq_lane_u64(ts01, 0);
714 ts[1] = vgetq_lane_u64(ts01, 1);
715 ts[2] = vgetq_lane_u64(ts23, 0);
716 ts[3] = vgetq_lane_u64(ts23, 1);
718 /* Store timestamp into dynfield. */
719 *cnxk_nix_timestamp_dynfield(mbuf0, rxq->tstamp) =
721 *cnxk_nix_timestamp_dynfield(mbuf1, rxq->tstamp) =
723 *cnxk_nix_timestamp_dynfield(mbuf2, rxq->tstamp) =
725 *cnxk_nix_timestamp_dynfield(mbuf3, rxq->tstamp) =
728 /* Generate ptype mask to filter L2 ether timesync */
729 mask = vdupq_n_u32(vgetq_lane_u32(f0, 0));
730 mask = vsetq_lane_u32(vgetq_lane_u32(f1, 0), mask, 1);
731 mask = vsetq_lane_u32(vgetq_lane_u32(f2, 0), mask, 2);
732 mask = vsetq_lane_u32(vgetq_lane_u32(f3, 0), mask, 3);
734 /* Match against L2 ether timesync. */
735 mask = vceqq_u32(mask, ptype);
736 /* Convert from vector from scalar mask */
737 res = vaddvq_u32(vandq_u32(mask, and_mask));
741 /* Fill in the ol_flags for any packets that
744 ol_flags0 |= ((res & 0x1) ? ts_olf : 0);
745 ol_flags1 |= ((res & 0x2) ? ts_olf : 0);
746 ol_flags2 |= ((res & 0x4) ? ts_olf : 0);
747 ol_flags3 |= ((res & 0x8) ? ts_olf : 0);
749 /* Update Rxq timestamp with the latest
752 rxq->tstamp->rx_ready = 1;
753 rxq->tstamp->rx_tstamp =
754 ts[31 - __builtin_clz(res)];
758 /* Form rearm_data with ol_flags */
759 rearm0 = vsetq_lane_u64(ol_flags0, rearm0, 1);
760 rearm1 = vsetq_lane_u64(ol_flags1, rearm1, 1);
761 rearm2 = vsetq_lane_u64(ol_flags2, rearm2, 1);
762 rearm3 = vsetq_lane_u64(ol_flags3, rearm3, 1);
764 /* Update rx_descriptor_fields1 */
765 vst1q_u64((uint64_t *)mbuf0->rx_descriptor_fields1, f0);
766 vst1q_u64((uint64_t *)mbuf1->rx_descriptor_fields1, f1);
767 vst1q_u64((uint64_t *)mbuf2->rx_descriptor_fields1, f2);
768 vst1q_u64((uint64_t *)mbuf3->rx_descriptor_fields1, f3);
770 /* Update rearm_data */
771 vst1q_u64((uint64_t *)mbuf0->rearm_data, rearm0);
772 vst1q_u64((uint64_t *)mbuf1->rearm_data, rearm1);
773 vst1q_u64((uint64_t *)mbuf2->rearm_data, rearm2);
774 vst1q_u64((uint64_t *)mbuf3->rearm_data, rearm3);
776 if (flags & NIX_RX_MULTI_SEG_F) {
777 /* Multi segment is enable build mseg list for
778 * individual mbufs in scalar mode.
780 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
781 (cq0 + CQE_SZ(0) + 8), mbuf0,
782 mbuf_initializer, flags);
783 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
784 (cq0 + CQE_SZ(1) + 8), mbuf1,
785 mbuf_initializer, flags);
786 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
787 (cq0 + CQE_SZ(2) + 8), mbuf2,
788 mbuf_initializer, flags);
789 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
790 (cq0 + CQE_SZ(3) + 8), mbuf3,
791 mbuf_initializer, flags);
793 /* Update that no more segments */
800 /* Store the mbufs to rx_pkts */
801 vst1q_u64((uint64_t *)&rx_pkts[packets], mbuf01);
802 vst1q_u64((uint64_t *)&rx_pkts[packets + 2], mbuf23);
805 roc_prefetch_store_keep(mbuf0);
806 roc_prefetch_store_keep(mbuf1);
807 roc_prefetch_store_keep(mbuf2);
808 roc_prefetch_store_keep(mbuf3);
810 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
811 RTE_MEMPOOL_CHECK_COOKIES(mbuf0->pool, (void **)&mbuf0, 1, 1);
812 RTE_MEMPOOL_CHECK_COOKIES(mbuf1->pool, (void **)&mbuf1, 1, 1);
813 RTE_MEMPOOL_CHECK_COOKIES(mbuf2->pool, (void **)&mbuf2, 1, 1);
814 RTE_MEMPOOL_CHECK_COOKIES(mbuf3->pool, (void **)&mbuf3, 1, 1);
816 /* Advance head pointer and packets */
817 head += NIX_DESCS_PER_LOOP;
819 packets += NIX_DESCS_PER_LOOP;
823 rxq->available -= packets;
826 /* Free all the CQs that we've processed */
827 plt_write64((rxq->wdata | packets), rxq->cq_door);
829 if (unlikely(pkts_left))
830 packets += cn9k_nix_recv_pkts(rx_queue, &rx_pkts[packets],
838 static inline uint16_t
839 cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
840 uint16_t pkts, const uint16_t flags)
842 RTE_SET_USED(rx_queue);
843 RTE_SET_USED(rx_pkts);
852 #define RSS_F NIX_RX_OFFLOAD_RSS_F
853 #define PTYPE_F NIX_RX_OFFLOAD_PTYPE_F
854 #define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F
855 #define MARK_F NIX_RX_OFFLOAD_MARK_UPDATE_F
856 #define TS_F NIX_RX_OFFLOAD_TSTAMP_F
857 #define RX_VLAN_F NIX_RX_OFFLOAD_VLAN_STRIP_F
858 #define R_SEC_F NIX_RX_OFFLOAD_SECURITY_F
860 /* [R_SEC_F] [RX_VLAN_F] [TS] [MARK] [CKSUM] [PTYPE] [RSS] */
861 #define NIX_RX_FASTPATH_MODES_0_15 \
862 R(no_offload, NIX_RX_OFFLOAD_NONE) \
865 R(ptype_rss, PTYPE_F | RSS_F) \
867 R(cksum_rss, CKSUM_F | RSS_F) \
868 R(cksum_ptype, CKSUM_F | PTYPE_F) \
869 R(cksum_ptype_rss, CKSUM_F | PTYPE_F | RSS_F) \
871 R(mark_rss, MARK_F | RSS_F) \
872 R(mark_ptype, MARK_F | PTYPE_F) \
873 R(mark_ptype_rss, MARK_F | PTYPE_F | RSS_F) \
874 R(mark_cksum, MARK_F | CKSUM_F) \
875 R(mark_cksum_rss, MARK_F | CKSUM_F | RSS_F) \
876 R(mark_cksum_ptype, MARK_F | CKSUM_F | PTYPE_F) \
877 R(mark_cksum_ptype_rss, MARK_F | CKSUM_F | PTYPE_F | RSS_F)
879 #define NIX_RX_FASTPATH_MODES_16_31 \
881 R(ts_rss, TS_F | RSS_F) \
882 R(ts_ptype, TS_F | PTYPE_F) \
883 R(ts_ptype_rss, TS_F | PTYPE_F | RSS_F) \
884 R(ts_cksum, TS_F | CKSUM_F) \
885 R(ts_cksum_rss, TS_F | CKSUM_F | RSS_F) \
886 R(ts_cksum_ptype, TS_F | CKSUM_F | PTYPE_F) \
887 R(ts_cksum_ptype_rss, TS_F | CKSUM_F | PTYPE_F | RSS_F) \
888 R(ts_mark, TS_F | MARK_F) \
889 R(ts_mark_rss, TS_F | MARK_F | RSS_F) \
890 R(ts_mark_ptype, TS_F | MARK_F | PTYPE_F) \
891 R(ts_mark_ptype_rss, TS_F | MARK_F | PTYPE_F | RSS_F) \
892 R(ts_mark_cksum, TS_F | MARK_F | CKSUM_F) \
893 R(ts_mark_cksum_rss, TS_F | MARK_F | CKSUM_F | RSS_F) \
894 R(ts_mark_cksum_ptype, TS_F | MARK_F | CKSUM_F | PTYPE_F) \
895 R(ts_mark_cksum_ptype_rss, TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
897 #define NIX_RX_FASTPATH_MODES_32_47 \
899 R(vlan_rss, RX_VLAN_F | RSS_F) \
900 R(vlan_ptype, RX_VLAN_F | PTYPE_F) \
901 R(vlan_ptype_rss, RX_VLAN_F | PTYPE_F | RSS_F) \
902 R(vlan_cksum, RX_VLAN_F | CKSUM_F) \
903 R(vlan_cksum_rss, RX_VLAN_F | CKSUM_F | RSS_F) \
904 R(vlan_cksum_ptype, RX_VLAN_F | CKSUM_F | PTYPE_F) \
905 R(vlan_cksum_ptype_rss, RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
906 R(vlan_mark, RX_VLAN_F | MARK_F) \
907 R(vlan_mark_rss, RX_VLAN_F | MARK_F | RSS_F) \
908 R(vlan_mark_ptype, RX_VLAN_F | MARK_F | PTYPE_F) \
909 R(vlan_mark_ptype_rss, RX_VLAN_F | MARK_F | PTYPE_F | RSS_F) \
910 R(vlan_mark_cksum, RX_VLAN_F | MARK_F | CKSUM_F) \
911 R(vlan_mark_cksum_rss, RX_VLAN_F | MARK_F | CKSUM_F | RSS_F) \
912 R(vlan_mark_cksum_ptype, RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F) \
913 R(vlan_mark_cksum_ptype_rss, \
914 RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
916 #define NIX_RX_FASTPATH_MODES_48_63 \
917 R(vlan_ts, RX_VLAN_F | TS_F) \
918 R(vlan_ts_rss, RX_VLAN_F | TS_F | RSS_F) \
919 R(vlan_ts_ptype, RX_VLAN_F | TS_F | PTYPE_F) \
920 R(vlan_ts_ptype_rss, RX_VLAN_F | TS_F | PTYPE_F | RSS_F) \
921 R(vlan_ts_cksum, RX_VLAN_F | TS_F | CKSUM_F) \
922 R(vlan_ts_cksum_rss, RX_VLAN_F | TS_F | CKSUM_F | RSS_F) \
923 R(vlan_ts_cksum_ptype, RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F) \
924 R(vlan_ts_cksum_ptype_rss, \
925 RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
926 R(vlan_ts_mark, RX_VLAN_F | TS_F | MARK_F) \
927 R(vlan_ts_mark_rss, RX_VLAN_F | TS_F | MARK_F | RSS_F) \
928 R(vlan_ts_mark_ptype, RX_VLAN_F | TS_F | MARK_F | PTYPE_F) \
929 R(vlan_ts_mark_ptype_rss, RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
930 R(vlan_ts_mark_cksum, RX_VLAN_F | TS_F | MARK_F | CKSUM_F) \
931 R(vlan_ts_mark_cksum_rss, RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
932 R(vlan_ts_mark_cksum_ptype, \
933 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
934 R(vlan_ts_mark_cksum_ptype_rss, \
935 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
937 #define NIX_RX_FASTPATH_MODES_64_79 \
939 R(sec_rss, R_SEC_F | RSS_F) \
940 R(sec_ptype, R_SEC_F | PTYPE_F) \
941 R(sec_ptype_rss, R_SEC_F | PTYPE_F | RSS_F) \
942 R(sec_cksum, R_SEC_F | CKSUM_F) \
943 R(sec_cksum_rss, R_SEC_F | CKSUM_F | RSS_F) \
944 R(sec_cksum_ptype, R_SEC_F | CKSUM_F | PTYPE_F) \
945 R(sec_cksum_ptype_rss, R_SEC_F | CKSUM_F | PTYPE_F | RSS_F) \
946 R(sec_mark, R_SEC_F | MARK_F) \
947 R(sec_mark_rss, R_SEC_F | MARK_F | RSS_F) \
948 R(sec_mark_ptype, R_SEC_F | MARK_F | PTYPE_F) \
949 R(sec_mark_ptype_rss, R_SEC_F | MARK_F | PTYPE_F | RSS_F) \
950 R(sec_mark_cksum, R_SEC_F | MARK_F | CKSUM_F) \
951 R(sec_mark_cksum_rss, R_SEC_F | MARK_F | CKSUM_F | RSS_F) \
952 R(sec_mark_cksum_ptype, R_SEC_F | MARK_F | CKSUM_F | PTYPE_F) \
953 R(sec_mark_cksum_ptype_rss, \
954 R_SEC_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
956 #define NIX_RX_FASTPATH_MODES_80_95 \
957 R(sec_ts, R_SEC_F | TS_F) \
958 R(sec_ts_rss, R_SEC_F | TS_F | RSS_F) \
959 R(sec_ts_ptype, R_SEC_F | TS_F | PTYPE_F) \
960 R(sec_ts_ptype_rss, R_SEC_F | TS_F | PTYPE_F | RSS_F) \
961 R(sec_ts_cksum, R_SEC_F | TS_F | CKSUM_F) \
962 R(sec_ts_cksum_rss, R_SEC_F | TS_F | CKSUM_F | RSS_F) \
963 R(sec_ts_cksum_ptype, R_SEC_F | TS_F | CKSUM_F | PTYPE_F) \
964 R(sec_ts_cksum_ptype_rss, R_SEC_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
965 R(sec_ts_mark, R_SEC_F | TS_F | MARK_F) \
966 R(sec_ts_mark_rss, R_SEC_F | TS_F | MARK_F | RSS_F) \
967 R(sec_ts_mark_ptype, R_SEC_F | TS_F | MARK_F | PTYPE_F) \
968 R(sec_ts_mark_ptype_rss, R_SEC_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
969 R(sec_ts_mark_cksum, R_SEC_F | TS_F | MARK_F | CKSUM_F) \
970 R(sec_ts_mark_cksum_rss, R_SEC_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
971 R(sec_ts_mark_cksum_ptype, \
972 R_SEC_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
973 R(sec_ts_mark_cksum_ptype_rss, \
974 R_SEC_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
976 #define NIX_RX_FASTPATH_MODES_96_111 \
977 R(sec_vlan, R_SEC_F | RX_VLAN_F) \
978 R(sec_vlan_rss, R_SEC_F | RX_VLAN_F | RSS_F) \
979 R(sec_vlan_ptype, R_SEC_F | RX_VLAN_F | PTYPE_F) \
980 R(sec_vlan_ptype_rss, R_SEC_F | RX_VLAN_F | PTYPE_F | RSS_F) \
981 R(sec_vlan_cksum, R_SEC_F | RX_VLAN_F | CKSUM_F) \
982 R(sec_vlan_cksum_rss, R_SEC_F | RX_VLAN_F | CKSUM_F | RSS_F) \
983 R(sec_vlan_cksum_ptype, R_SEC_F | RX_VLAN_F | CKSUM_F | PTYPE_F) \
984 R(sec_vlan_cksum_ptype_rss, \
985 R_SEC_F | RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
986 R(sec_vlan_mark, R_SEC_F | RX_VLAN_F | MARK_F) \
987 R(sec_vlan_mark_rss, R_SEC_F | RX_VLAN_F | MARK_F | RSS_F) \
988 R(sec_vlan_mark_ptype, R_SEC_F | RX_VLAN_F | MARK_F | PTYPE_F) \
989 R(sec_vlan_mark_ptype_rss, \
990 R_SEC_F | RX_VLAN_F | MARK_F | PTYPE_F | RSS_F) \
991 R(sec_vlan_mark_cksum, R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F) \
992 R(sec_vlan_mark_cksum_rss, \
993 R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | RSS_F) \
994 R(sec_vlan_mark_cksum_ptype, \
995 R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F) \
996 R(sec_vlan_mark_cksum_ptype_rss, \
997 R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
999 #define NIX_RX_FASTPATH_MODES_112_127 \
1000 R(sec_vlan_ts, R_SEC_F | RX_VLAN_F | TS_F) \
1001 R(sec_vlan_ts_rss, R_SEC_F | RX_VLAN_F | TS_F | RSS_F) \
1002 R(sec_vlan_ts_ptype, R_SEC_F | RX_VLAN_F | TS_F | PTYPE_F) \
1003 R(sec_vlan_ts_ptype_rss, R_SEC_F | RX_VLAN_F | TS_F | PTYPE_F | RSS_F) \
1004 R(sec_vlan_ts_cksum, R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F) \
1005 R(sec_vlan_ts_cksum_rss, R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | RSS_F) \
1006 R(sec_vlan_ts_cksum_ptype, \
1007 R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F) \
1008 R(sec_vlan_ts_cksum_ptype_rss, \
1009 R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
1010 R(sec_vlan_ts_mark, R_SEC_F | RX_VLAN_F | TS_F | MARK_F) \
1011 R(sec_vlan_ts_mark_rss, R_SEC_F | RX_VLAN_F | TS_F | MARK_F | RSS_F) \
1012 R(sec_vlan_ts_mark_ptype, \
1013 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | PTYPE_F) \
1014 R(sec_vlan_ts_mark_ptype_rss, \
1015 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
1016 R(sec_vlan_ts_mark_cksum, \
1017 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F) \
1018 R(sec_vlan_ts_mark_cksum_rss, \
1019 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
1020 R(sec_vlan_ts_mark_cksum_ptype, \
1021 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
1022 R(sec_vlan_ts_mark_cksum_ptype_rss, \
1023 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1025 #define NIX_RX_FASTPATH_MODES \
1026 NIX_RX_FASTPATH_MODES_0_15 \
1027 NIX_RX_FASTPATH_MODES_16_31 \
1028 NIX_RX_FASTPATH_MODES_32_47 \
1029 NIX_RX_FASTPATH_MODES_48_63 \
1030 NIX_RX_FASTPATH_MODES_64_79 \
1031 NIX_RX_FASTPATH_MODES_80_95 \
1032 NIX_RX_FASTPATH_MODES_96_111 \
1033 NIX_RX_FASTPATH_MODES_112_127
1035 #define R(name, flags) \
1036 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_##name( \
1037 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
1038 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_mseg_##name( \
1039 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
1040 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_vec_##name( \
1041 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
1042 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_vec_mseg_##name( \
1043 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts);
1045 NIX_RX_FASTPATH_MODES
1048 #define NIX_RX_RECV(fn, flags) \
1049 uint16_t __rte_noinline __rte_hot fn( \
1050 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
1052 return cn9k_nix_recv_pkts(rx_queue, rx_pkts, pkts, (flags)); \
1055 #define NIX_RX_RECV_MSEG(fn, flags) NIX_RX_RECV(fn, flags | NIX_RX_MULTI_SEG_F)
1057 #define NIX_RX_RECV_VEC(fn, flags) \
1058 uint16_t __rte_noinline __rte_hot fn( \
1059 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \
1061 return cn9k_nix_recv_pkts_vector(rx_queue, rx_pkts, pkts, \
1065 #define NIX_RX_RECV_VEC_MSEG(fn, flags) \
1066 NIX_RX_RECV_VEC(fn, flags | NIX_RX_MULTI_SEG_F)
1068 #endif /* __CN9K_RX_H__ */