1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
11 #include <cnxk_ethdev.h>
13 #define NIX_RX_OFFLOAD_NONE (0)
14 #define NIX_RX_OFFLOAD_RSS_F BIT(0)
15 #define NIX_RX_OFFLOAD_PTYPE_F BIT(1)
16 #define NIX_RX_OFFLOAD_CHECKSUM_F BIT(2)
17 #define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)
18 #define NIX_RX_OFFLOAD_TSTAMP_F BIT(4)
19 #define NIX_RX_OFFLOAD_VLAN_STRIP_F BIT(5)
20 #define NIX_RX_OFFLOAD_SECURITY_F BIT(6)
22 /* Flags to control cqe_to_mbuf conversion function.
23 * Defining it from backwards to denote its been
24 * not used as offload flags to pick function
26 #define NIX_RX_MULTI_SEG_F BIT(14)
27 #define CPT_RX_WQE_F BIT(15)
29 #define CNXK_NIX_CQ_ENTRY_SZ 128
30 #define NIX_DESCS_PER_LOOP 4
31 #define CQE_CAST(x) ((struct nix_cqe_hdr_s *)(x))
32 #define CQE_SZ(x) ((x) * CNXK_NIX_CQ_ENTRY_SZ)
34 union mbuf_initializer {
44 static __rte_always_inline uint64_t
45 nix_clear_data_off(uint64_t oldval)
47 union mbuf_initializer mbuf_init = {.value = oldval};
49 mbuf_init.fields.data_off = 0;
50 return mbuf_init.value;
53 static __rte_always_inline struct rte_mbuf *
54 nix_get_mbuf_from_cqe(void *cq, const uint64_t data_off)
58 /* Skip CQE, NIX_RX_PARSE_S and SG HDR(9 DWORDs) and peek buff addr */
59 buff = *((rte_iova_t *)((uint64_t *)cq + 9));
60 return (struct rte_mbuf *)(buff - data_off);
63 static __rte_always_inline uint32_t
64 nix_ptype_get(const void *const lookup_mem, const uint64_t in)
66 const uint16_t *const ptype = lookup_mem;
67 const uint16_t lh_lg_lf = (in & 0xFFF0000000000000) >> 52;
68 const uint16_t tu_l2 = ptype[(in & 0x000FFFF000000000) >> 36];
69 const uint16_t il4_tu = ptype[PTYPE_NON_TUNNEL_ARRAY_SZ + lh_lg_lf];
71 return (il4_tu << PTYPE_NON_TUNNEL_WIDTH) | tu_l2;
74 static __rte_always_inline uint32_t
75 nix_rx_olflags_get(const void *const lookup_mem, const uint64_t in)
77 const uint32_t *const ol_flags =
78 (const uint32_t *)((const uint8_t *)lookup_mem +
81 return ol_flags[(in & 0xfff00000) >> 20];
84 static inline uint64_t
85 nix_update_match_id(const uint16_t match_id, uint64_t ol_flags,
86 struct rte_mbuf *mbuf)
88 /* There is no separate bit to check match_id
89 * is valid or not? and no flag to identify it is an
90 * RTE_FLOW_ACTION_TYPE_FLAG vs RTE_FLOW_ACTION_TYPE_MARK
91 * action. The former case addressed through 0 being invalid
92 * value and inc/dec match_id pair when MARK is activated.
93 * The later case addressed through defining
94 * CNXK_FLOW_MARK_DEFAULT as value for
95 * RTE_FLOW_ACTION_TYPE_MARK.
96 * This would translate to not use
97 * CNXK_FLOW_ACTION_FLAG_DEFAULT - 1 and
98 * CNXK_FLOW_ACTION_FLAG_DEFAULT for match_id.
99 * i.e valid mark_id's are from
100 * 0 to CNXK_FLOW_ACTION_FLAG_DEFAULT - 2
102 if (likely(match_id)) {
103 ol_flags |= PKT_RX_FDIR;
104 if (match_id != CNXK_FLOW_ACTION_FLAG_DEFAULT) {
105 ol_flags |= PKT_RX_FDIR_ID;
106 mbuf->hash.fdir.hi = match_id - 1;
113 static __rte_always_inline void
114 nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
115 uint64_t rearm, const uint16_t flags)
117 const rte_iova_t *iova_list;
118 struct rte_mbuf *head;
119 const rte_iova_t *eol;
123 sg = *(const uint64_t *)(rx + 1);
124 nb_segs = (sg >> 48) & 0x3;
131 mbuf->pkt_len = (rx->pkt_lenm1 + 1) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
132 CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
133 mbuf->data_len = (sg & 0xFFFF) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
134 CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
135 mbuf->nb_segs = nb_segs;
138 eol = ((const rte_iova_t *)(rx + 1) +
139 ((rx->cn9k.desc_sizem1 + 1) << 1));
140 /* Skip SG_S and first IOVA*/
141 iova_list = ((const rte_iova_t *)(rx + 1)) + 2;
144 rearm = rearm & ~0xFFFF;
148 mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
151 __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
153 mbuf->data_len = sg & 0xFFFF;
155 *(uint64_t *)(&mbuf->rearm_data) = rearm;
159 if (!nb_segs && (iova_list + 1 < eol)) {
160 sg = *(const uint64_t *)(iova_list);
161 nb_segs = (sg >> 48) & 0x3;
162 head->nb_segs += nb_segs;
163 iova_list = (const rte_iova_t *)(iova_list + 1);
169 static __rte_always_inline uint64_t
170 nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
171 uintptr_t sa_base, uint64_t *rearm_val, uint16_t *len)
173 uintptr_t res_sg0 = ((uintptr_t)cq + ROC_ONF_IPSEC_INB_RES_OFF - 8);
174 const union nix_rx_parse_u *rx =
175 (const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
176 struct cn9k_inb_priv_data *sa_priv;
177 struct roc_onf_ipsec_inb_sa *sa;
178 uint8_t lcptr = rx->lcptr;
179 struct rte_ipv4_hdr *ipv4;
180 uint16_t data_off, res;
187 res = *(uint64_t *)(res_sg0 + 8);
188 data_off = *rearm_val & (BIT_ULL(16) - 1);
189 data = (uintptr_t)m->buf_addr;
192 rte_prefetch0((void *)data);
194 if (unlikely(res != (CPT_COMP_GOOD | ROC_IE_ONF_UCC_SUCCESS << 8)))
195 return PKT_RX_SEC_OFFLOAD | PKT_RX_SEC_OFFLOAD_FAILED;
198 /* 20 bits of tag would have the SPI */
199 spi = cq->tag & CNXK_ETHDEV_SPI_TAG_MASK;
202 sa_w = sa_base & (ROC_NIX_INL_SA_BASE_ALIGN - 1);
203 sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
204 spi_mask = (1ULL << sa_w) - 1;
205 sa = roc_nix_inl_onf_ipsec_inb_sa(sa_base, spi & spi_mask);
207 /* Update dynamic field with userdata */
208 sa_priv = roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(sa);
209 dw = *(__uint128_t *)sa_priv;
210 *rte_security_dynfield(m) = (uint64_t)dw;
212 /* Get total length from IPv4 header. We can assume only IPv4 */
213 ipv4 = (struct rte_ipv4_hdr *)(data + ROC_ONF_IPSEC_INB_SPI_SEQ_SZ +
214 ROC_ONF_IPSEC_INB_MAX_L2_SZ);
216 /* Update data offset */
217 data_off += (ROC_ONF_IPSEC_INB_SPI_SEQ_SZ +
218 ROC_ONF_IPSEC_INB_MAX_L2_SZ);
219 *rearm_val = *rearm_val & ~(BIT_ULL(16) - 1);
220 *rearm_val |= data_off;
222 *len = rte_be_to_cpu_16(ipv4->total_length) + lcptr;
223 return PKT_RX_SEC_OFFLOAD;
226 static __rte_always_inline void
227 cn9k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
228 struct rte_mbuf *mbuf, const void *lookup_mem,
229 uint64_t val, const uint16_t flag)
231 const union nix_rx_parse_u *rx =
232 (const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
233 uint16_t len = rx->cn9k.pkt_lenm1 + 1;
234 const uint64_t w1 = *(const uint64_t *)rx;
235 uint32_t packet_type;
236 uint64_t ol_flags = 0;
238 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
239 __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
241 if (flag & NIX_RX_OFFLOAD_PTYPE_F)
242 packet_type = nix_ptype_get(lookup_mem, w1);
246 if ((flag & NIX_RX_OFFLOAD_SECURITY_F) &&
247 cq->cqe_type == NIX_XQE_TYPE_RX_IPSECH) {
248 uint16_t port = val >> 48;
251 /* Get SA Base from lookup mem */
252 sa_base = cnxk_nix_sa_base_get(port, lookup_mem);
254 ol_flags |= nix_rx_sec_mbuf_update(cq, mbuf, sa_base, &val,
257 /* Only Tunnel inner IPv4 is supported */
258 packet_type = (packet_type &
259 ~(RTE_PTYPE_L3_MASK | RTE_PTYPE_TUNNEL_MASK));
260 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
261 mbuf->packet_type = packet_type;
265 if (flag & NIX_RX_OFFLOAD_PTYPE_F)
266 mbuf->packet_type = packet_type;
268 if (flag & NIX_RX_OFFLOAD_RSS_F) {
269 mbuf->hash.rss = tag;
270 ol_flags |= PKT_RX_RSS_HASH;
273 if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
274 ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
277 if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
278 if (rx->cn9k.vtag0_gone) {
279 ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
280 mbuf->vlan_tci = rx->cn9k.vtag0_tci;
282 if (rx->cn9k.vtag1_gone) {
283 ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
284 mbuf->vlan_tci_outer = rx->cn9k.vtag1_tci;
288 if (flag & NIX_RX_OFFLOAD_MARK_UPDATE_F)
290 nix_update_match_id(rx->cn9k.match_id, ol_flags, mbuf);
293 mbuf->data_len = len;
294 *(uint64_t *)(&mbuf->rearm_data) = val;
296 mbuf->ol_flags = ol_flags;
298 if (flag & NIX_RX_MULTI_SEG_F)
299 nix_cqe_xtract_mseg(rx, mbuf, val, flag);
304 static inline uint16_t
305 nix_rx_nb_pkts(struct cn9k_eth_rxq *rxq, const uint64_t wdata,
306 const uint16_t pkts, const uint32_t qmask)
308 uint32_t available = rxq->available;
310 /* Update the available count if cached value is not enough */
311 if (unlikely(available < pkts)) {
312 uint64_t reg, head, tail;
314 /* Use LDADDA version to avoid reorder */
315 reg = roc_atomic64_add_sync(wdata, rxq->cq_status);
316 /* CQ_OP_STATUS operation error */
317 if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
318 reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
321 tail = reg & 0xFFFFF;
322 head = (reg >> 20) & 0xFFFFF;
324 available = tail - head + qmask + 1;
326 available = tail - head;
328 rxq->available = available;
331 return RTE_MIN(pkts, available);
334 static __rte_always_inline uint16_t
335 cn9k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
336 const uint16_t flags)
338 struct cn9k_eth_rxq *rxq = rx_queue;
339 const uint64_t mbuf_init = rxq->mbuf_initializer;
340 const void *lookup_mem = rxq->lookup_mem;
341 const uint64_t data_off = rxq->data_off;
342 const uintptr_t desc = rxq->desc;
343 const uint64_t wdata = rxq->wdata;
344 const uint32_t qmask = rxq->qmask;
345 uint16_t packets = 0, nb_pkts;
346 uint32_t head = rxq->head;
347 struct nix_cqe_hdr_s *cq;
348 struct rte_mbuf *mbuf;
350 nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
352 while (packets < nb_pkts) {
353 /* Prefetch N desc ahead */
354 rte_prefetch_non_temporal(
355 (void *)(desc + (CQE_SZ((head + 2) & qmask))));
356 cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head));
358 mbuf = nix_get_mbuf_from_cqe(cq, data_off);
360 cn9k_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
362 cnxk_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
363 (flags & NIX_RX_OFFLOAD_TSTAMP_F),
364 (flags & NIX_RX_MULTI_SEG_F),
365 (uint64_t *)((uint8_t *)mbuf
367 rx_pkts[packets++] = mbuf;
368 roc_prefetch_store_keep(mbuf);
374 rxq->available -= nb_pkts;
376 /* Free all the CQs that we've processed */
377 plt_write64((wdata | nb_pkts), rxq->cq_door);
382 #if defined(RTE_ARCH_ARM64)
384 static __rte_always_inline uint64_t
385 nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
387 if (w2 & BIT_ULL(21) /* vtag0_gone */) {
388 ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
389 *f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
395 static __rte_always_inline uint64_t
396 nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
398 if (w2 & BIT_ULL(23) /* vtag1_gone */) {
399 ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
400 mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
406 static __rte_always_inline uint16_t
407 cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
408 uint16_t pkts, const uint16_t flags)
410 struct cn9k_eth_rxq *rxq = rx_queue;
411 uint16_t packets = 0;
412 uint64x2_t cq0_w8, cq1_w8, cq2_w8, cq3_w8, mbuf01, mbuf23;
413 const uint64_t mbuf_initializer = rxq->mbuf_initializer;
414 const uint64x2_t data_off = vdupq_n_u64(rxq->data_off);
415 uint64_t ol_flags0, ol_flags1, ol_flags2, ol_flags3;
416 uint64x2_t rearm0 = vdupq_n_u64(mbuf_initializer);
417 uint64x2_t rearm1 = vdupq_n_u64(mbuf_initializer);
418 uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer);
419 uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
420 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
421 const uint16_t *lookup_mem = rxq->lookup_mem;
422 const uint32_t qmask = rxq->qmask;
423 const uint64_t wdata = rxq->wdata;
424 const uintptr_t desc = rxq->desc;
425 uint8x16_t f0, f1, f2, f3;
426 uint32_t head = rxq->head;
429 pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
430 pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1);
432 /* Packets has to be floor-aligned to NIX_DESCS_PER_LOOP */
433 pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
435 while (packets < pkts) {
436 /* Exit loop if head is about to wrap and become unaligned */
437 if (((head + NIX_DESCS_PER_LOOP - 1) & qmask) <
438 NIX_DESCS_PER_LOOP) {
439 pkts_left += (pkts - packets);
443 const uintptr_t cq0 = desc + CQE_SZ(head);
445 /* Prefetch N desc ahead */
446 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(8)));
447 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(9)));
448 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(10)));
449 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(11)));
451 /* Get NIX_RX_SG_S for size and buffer pointer */
452 cq0_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(0) + 64));
453 cq1_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(1) + 64));
454 cq2_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(2) + 64));
455 cq3_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(3) + 64));
457 /* Extract mbuf from NIX_RX_SG_S */
458 mbuf01 = vzip2q_u64(cq0_w8, cq1_w8);
459 mbuf23 = vzip2q_u64(cq2_w8, cq3_w8);
460 mbuf01 = vqsubq_u64(mbuf01, data_off);
461 mbuf23 = vqsubq_u64(mbuf23, data_off);
463 /* Move mbufs to scalar registers for future use */
464 mbuf0 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 0);
465 mbuf1 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 1);
466 mbuf2 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 0);
467 mbuf3 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 1);
469 /* Mask to get packet len from NIX_RX_SG_S */
470 const uint8x16_t shuf_msk = {
471 0xFF, 0xFF, /* pkt_type set as unknown */
472 0xFF, 0xFF, /* pkt_type set as unknown */
473 0, 1, /* octet 1~0, low 16 bits pkt_len */
474 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
475 0, 1, /* octet 1~0, 16 bits data_len */
476 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
478 /* Form the rx_descriptor_fields1 with pkt_len and data_len */
479 f0 = vqtbl1q_u8(cq0_w8, shuf_msk);
480 f1 = vqtbl1q_u8(cq1_w8, shuf_msk);
481 f2 = vqtbl1q_u8(cq2_w8, shuf_msk);
482 f3 = vqtbl1q_u8(cq3_w8, shuf_msk);
484 /* Load CQE word0 and word 1 */
485 uint64_t cq0_w0 = ((uint64_t *)(cq0 + CQE_SZ(0)))[0];
486 uint64_t cq0_w1 = ((uint64_t *)(cq0 + CQE_SZ(0)))[1];
487 uint64_t cq1_w0 = ((uint64_t *)(cq0 + CQE_SZ(1)))[0];
488 uint64_t cq1_w1 = ((uint64_t *)(cq0 + CQE_SZ(1)))[1];
489 uint64_t cq2_w0 = ((uint64_t *)(cq0 + CQE_SZ(2)))[0];
490 uint64_t cq2_w1 = ((uint64_t *)(cq0 + CQE_SZ(2)))[1];
491 uint64_t cq3_w0 = ((uint64_t *)(cq0 + CQE_SZ(3)))[0];
492 uint64_t cq3_w1 = ((uint64_t *)(cq0 + CQE_SZ(3)))[1];
494 if (flags & NIX_RX_OFFLOAD_RSS_F) {
495 /* Fill rss in the rx_descriptor_fields1 */
496 f0 = vsetq_lane_u32(cq0_w0, f0, 3);
497 f1 = vsetq_lane_u32(cq1_w0, f1, 3);
498 f2 = vsetq_lane_u32(cq2_w0, f2, 3);
499 f3 = vsetq_lane_u32(cq3_w0, f3, 3);
500 ol_flags0 = PKT_RX_RSS_HASH;
501 ol_flags1 = PKT_RX_RSS_HASH;
502 ol_flags2 = PKT_RX_RSS_HASH;
503 ol_flags3 = PKT_RX_RSS_HASH;
511 if (flags & NIX_RX_OFFLOAD_PTYPE_F) {
512 /* Fill packet_type in the rx_descriptor_fields1 */
513 f0 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq0_w1),
515 f1 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq1_w1),
517 f2 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq2_w1),
519 f3 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq3_w1),
523 if (flags & NIX_RX_OFFLOAD_CHECKSUM_F) {
524 ol_flags0 |= nix_rx_olflags_get(lookup_mem, cq0_w1);
525 ol_flags1 |= nix_rx_olflags_get(lookup_mem, cq1_w1);
526 ol_flags2 |= nix_rx_olflags_get(lookup_mem, cq2_w1);
527 ol_flags3 |= nix_rx_olflags_get(lookup_mem, cq3_w1);
530 if (flags & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
531 uint64_t cq0_w2 = *(uint64_t *)(cq0 + CQE_SZ(0) + 16);
532 uint64_t cq1_w2 = *(uint64_t *)(cq0 + CQE_SZ(1) + 16);
533 uint64_t cq2_w2 = *(uint64_t *)(cq0 + CQE_SZ(2) + 16);
534 uint64_t cq3_w2 = *(uint64_t *)(cq0 + CQE_SZ(3) + 16);
536 ol_flags0 = nix_vlan_update(cq0_w2, ol_flags0, &f0);
537 ol_flags1 = nix_vlan_update(cq1_w2, ol_flags1, &f1);
538 ol_flags2 = nix_vlan_update(cq2_w2, ol_flags2, &f2);
539 ol_flags3 = nix_vlan_update(cq3_w2, ol_flags3, &f3);
541 ol_flags0 = nix_qinq_update(cq0_w2, ol_flags0, mbuf0);
542 ol_flags1 = nix_qinq_update(cq1_w2, ol_flags1, mbuf1);
543 ol_flags2 = nix_qinq_update(cq2_w2, ol_flags2, mbuf2);
544 ol_flags3 = nix_qinq_update(cq3_w2, ol_flags3, mbuf3);
547 if (flags & NIX_RX_OFFLOAD_MARK_UPDATE_F) {
548 ol_flags0 = nix_update_match_id(
549 *(uint16_t *)(cq0 + CQE_SZ(0) + 38), ol_flags0,
551 ol_flags1 = nix_update_match_id(
552 *(uint16_t *)(cq0 + CQE_SZ(1) + 38), ol_flags1,
554 ol_flags2 = nix_update_match_id(
555 *(uint16_t *)(cq0 + CQE_SZ(2) + 38), ol_flags2,
557 ol_flags3 = nix_update_match_id(
558 *(uint16_t *)(cq0 + CQE_SZ(3) + 38), ol_flags3,
562 if (flags & NIX_RX_OFFLOAD_TSTAMP_F) {
563 const uint16x8_t len_off = {
566 CNXK_NIX_TIMESYNC_RX_OFFSET, /* pktlen 0:15*/
567 0, /* pktlen 16:32 */
568 CNXK_NIX_TIMESYNC_RX_OFFSET, /* datalen 0:15 */
572 const uint32x4_t ptype = {RTE_PTYPE_L2_ETHER_TIMESYNC,
573 RTE_PTYPE_L2_ETHER_TIMESYNC,
574 RTE_PTYPE_L2_ETHER_TIMESYNC,
575 RTE_PTYPE_L2_ETHER_TIMESYNC};
576 const uint64_t ts_olf = PKT_RX_IEEE1588_PTP |
577 PKT_RX_IEEE1588_TMST |
578 rxq->tstamp->rx_tstamp_dynflag;
579 const uint32x4_t and_mask = {0x1, 0x2, 0x4, 0x8};
580 uint64x2_t ts01, ts23, mask;
584 /* Subtract timesync length from total pkt length. */
585 f0 = vsubq_u16(f0, len_off);
586 f1 = vsubq_u16(f1, len_off);
587 f2 = vsubq_u16(f2, len_off);
588 f3 = vsubq_u16(f3, len_off);
590 /* Get the address of actual timestamp. */
591 ts01 = vaddq_u64(mbuf01, data_off);
592 ts23 = vaddq_u64(mbuf23, data_off);
593 /* Load timestamp from address. */
594 ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
597 ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
600 ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
603 ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
606 /* Convert from be to cpu byteorder. */
607 ts01 = vrev64q_u8(ts01);
608 ts23 = vrev64q_u8(ts23);
609 /* Store timestamp into scalar for later use. */
610 ts[0] = vgetq_lane_u64(ts01, 0);
611 ts[1] = vgetq_lane_u64(ts01, 1);
612 ts[2] = vgetq_lane_u64(ts23, 0);
613 ts[3] = vgetq_lane_u64(ts23, 1);
615 /* Store timestamp into dynfield. */
616 *cnxk_nix_timestamp_dynfield(mbuf0, rxq->tstamp) =
618 *cnxk_nix_timestamp_dynfield(mbuf1, rxq->tstamp) =
620 *cnxk_nix_timestamp_dynfield(mbuf2, rxq->tstamp) =
622 *cnxk_nix_timestamp_dynfield(mbuf3, rxq->tstamp) =
625 /* Generate ptype mask to filter L2 ether timesync */
626 mask = vdupq_n_u32(vgetq_lane_u32(f0, 0));
627 mask = vsetq_lane_u32(vgetq_lane_u32(f1, 0), mask, 1);
628 mask = vsetq_lane_u32(vgetq_lane_u32(f2, 0), mask, 2);
629 mask = vsetq_lane_u32(vgetq_lane_u32(f3, 0), mask, 3);
631 /* Match against L2 ether timesync. */
632 mask = vceqq_u32(mask, ptype);
633 /* Convert from vector from scalar mask */
634 res = vaddvq_u32(vandq_u32(mask, and_mask));
638 /* Fill in the ol_flags for any packets that
641 ol_flags0 |= ((res & 0x1) ? ts_olf : 0);
642 ol_flags1 |= ((res & 0x2) ? ts_olf : 0);
643 ol_flags2 |= ((res & 0x4) ? ts_olf : 0);
644 ol_flags3 |= ((res & 0x8) ? ts_olf : 0);
646 /* Update Rxq timestamp with the latest
649 rxq->tstamp->rx_ready = 1;
650 rxq->tstamp->rx_tstamp =
651 ts[31 - __builtin_clz(res)];
655 /* Form rearm_data with ol_flags */
656 rearm0 = vsetq_lane_u64(ol_flags0, rearm0, 1);
657 rearm1 = vsetq_lane_u64(ol_flags1, rearm1, 1);
658 rearm2 = vsetq_lane_u64(ol_flags2, rearm2, 1);
659 rearm3 = vsetq_lane_u64(ol_flags3, rearm3, 1);
661 /* Update rx_descriptor_fields1 */
662 vst1q_u64((uint64_t *)mbuf0->rx_descriptor_fields1, f0);
663 vst1q_u64((uint64_t *)mbuf1->rx_descriptor_fields1, f1);
664 vst1q_u64((uint64_t *)mbuf2->rx_descriptor_fields1, f2);
665 vst1q_u64((uint64_t *)mbuf3->rx_descriptor_fields1, f3);
667 /* Update rearm_data */
668 vst1q_u64((uint64_t *)mbuf0->rearm_data, rearm0);
669 vst1q_u64((uint64_t *)mbuf1->rearm_data, rearm1);
670 vst1q_u64((uint64_t *)mbuf2->rearm_data, rearm2);
671 vst1q_u64((uint64_t *)mbuf3->rearm_data, rearm3);
673 /* Store the mbufs to rx_pkts */
674 vst1q_u64((uint64_t *)&rx_pkts[packets], mbuf01);
675 vst1q_u64((uint64_t *)&rx_pkts[packets + 2], mbuf23);
677 if (flags & NIX_RX_MULTI_SEG_F) {
678 /* Multi segment is enable build mseg list for
679 * individual mbufs in scalar mode.
681 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
682 (cq0 + CQE_SZ(0) + 8), mbuf0,
683 mbuf_initializer, flags);
684 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
685 (cq0 + CQE_SZ(1) + 8), mbuf1,
686 mbuf_initializer, flags);
687 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
688 (cq0 + CQE_SZ(2) + 8), mbuf2,
689 mbuf_initializer, flags);
690 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
691 (cq0 + CQE_SZ(3) + 8), mbuf3,
692 mbuf_initializer, flags);
694 /* Update that no more segments */
702 roc_prefetch_store_keep(mbuf0);
703 roc_prefetch_store_keep(mbuf1);
704 roc_prefetch_store_keep(mbuf2);
705 roc_prefetch_store_keep(mbuf3);
707 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
708 __mempool_check_cookies(mbuf0->pool, (void **)&mbuf0, 1, 1);
709 __mempool_check_cookies(mbuf1->pool, (void **)&mbuf1, 1, 1);
710 __mempool_check_cookies(mbuf2->pool, (void **)&mbuf2, 1, 1);
711 __mempool_check_cookies(mbuf3->pool, (void **)&mbuf3, 1, 1);
713 /* Advance head pointer and packets */
714 head += NIX_DESCS_PER_LOOP;
716 packets += NIX_DESCS_PER_LOOP;
720 rxq->available -= packets;
723 /* Free all the CQs that we've processed */
724 plt_write64((rxq->wdata | packets), rxq->cq_door);
726 if (unlikely(pkts_left))
727 packets += cn9k_nix_recv_pkts(rx_queue, &rx_pkts[packets],
735 static inline uint16_t
736 cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
737 uint16_t pkts, const uint16_t flags)
739 RTE_SET_USED(rx_queue);
740 RTE_SET_USED(rx_pkts);
749 #define RSS_F NIX_RX_OFFLOAD_RSS_F
750 #define PTYPE_F NIX_RX_OFFLOAD_PTYPE_F
751 #define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F
752 #define MARK_F NIX_RX_OFFLOAD_MARK_UPDATE_F
753 #define TS_F NIX_RX_OFFLOAD_TSTAMP_F
754 #define RX_VLAN_F NIX_RX_OFFLOAD_VLAN_STRIP_F
755 #define R_SEC_F NIX_RX_OFFLOAD_SECURITY_F
757 /* [R_SEC_F] [RX_VLAN_F] [TS] [MARK] [CKSUM] [PTYPE] [RSS] */
758 #define NIX_RX_FASTPATH_MODES \
759 R(no_offload, 0, 0, 0, 0, 0, 0, 0, \
760 NIX_RX_OFFLOAD_NONE) \
761 R(rss, 0, 0, 0, 0, 0, 0, 1, \
763 R(ptype, 0, 0, 0, 0, 0, 1, 0, \
765 R(ptype_rss, 0, 0, 0, 0, 0, 1, 1, \
767 R(cksum, 0, 0, 0, 0, 1, 0, 0, \
769 R(cksum_rss, 0, 0, 0, 0, 1, 0, 1, \
771 R(cksum_ptype, 0, 0, 0, 0, 1, 1, 0, \
773 R(cksum_ptype_rss, 0, 0, 0, 0, 1, 1, 1, \
774 CKSUM_F | PTYPE_F | RSS_F) \
775 R(mark, 0, 0, 0, 1, 0, 0, 0, \
777 R(mark_rss, 0, 0, 0, 1, 0, 0, 1, \
779 R(mark_ptype, 0, 0, 0, 1, 0, 1, 0, \
781 R(mark_ptype_rss, 0, 0, 0, 1, 0, 1, 1, \
782 MARK_F | PTYPE_F | RSS_F) \
783 R(mark_cksum, 0, 0, 0, 1, 1, 0, 0, \
785 R(mark_cksum_rss, 0, 0, 0, 1, 1, 0, 1, \
786 MARK_F | CKSUM_F | RSS_F) \
787 R(mark_cksum_ptype, 0, 0, 0, 1, 1, 1, 0, \
788 MARK_F | CKSUM_F | PTYPE_F) \
789 R(mark_cksum_ptype_rss, 0, 0, 0, 1, 1, 1, 1, \
790 MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
791 R(ts, 0, 0, 1, 0, 0, 0, 0, \
793 R(ts_rss, 0, 0, 1, 0, 0, 0, 1, \
795 R(ts_ptype, 0, 0, 1, 0, 0, 1, 0, \
797 R(ts_ptype_rss, 0, 0, 1, 0, 0, 1, 1, \
798 TS_F | PTYPE_F | RSS_F) \
799 R(ts_cksum, 0, 0, 1, 0, 1, 0, 0, \
801 R(ts_cksum_rss, 0, 0, 1, 0, 1, 0, 1, \
802 TS_F | CKSUM_F | RSS_F) \
803 R(ts_cksum_ptype, 0, 0, 1, 0, 1, 1, 0, \
804 TS_F | CKSUM_F | PTYPE_F) \
805 R(ts_cksum_ptype_rss, 0, 0, 1, 0, 1, 1, 1, \
806 TS_F | CKSUM_F | PTYPE_F | RSS_F) \
807 R(ts_mark, 0, 0, 1, 1, 0, 0, 0, \
809 R(ts_mark_rss, 0, 0, 1, 1, 0, 0, 1, \
810 TS_F | MARK_F | RSS_F) \
811 R(ts_mark_ptype, 0, 0, 1, 1, 0, 1, 0, \
812 TS_F | MARK_F | PTYPE_F) \
813 R(ts_mark_ptype_rss, 0, 0, 1, 1, 0, 1, 1, \
814 TS_F | MARK_F | PTYPE_F | RSS_F) \
815 R(ts_mark_cksum, 0, 0, 1, 1, 1, 0, 0, \
816 TS_F | MARK_F | CKSUM_F) \
817 R(ts_mark_cksum_rss, 0, 0, 1, 1, 1, 0, 1, \
818 TS_F | MARK_F | CKSUM_F | RSS_F) \
819 R(ts_mark_cksum_ptype, 0, 0, 1, 1, 1, 1, 0, \
820 TS_F | MARK_F | CKSUM_F | PTYPE_F) \
821 R(ts_mark_cksum_ptype_rss, 0, 0, 1, 1, 1, 1, 1, \
822 TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
823 R(vlan, 0, 1, 0, 0, 0, 0, 0, \
825 R(vlan_rss, 0, 1, 0, 0, 0, 0, 1, \
827 R(vlan_ptype, 0, 1, 0, 0, 0, 1, 0, \
828 RX_VLAN_F | PTYPE_F) \
829 R(vlan_ptype_rss, 0, 1, 0, 0, 0, 1, 1, \
830 RX_VLAN_F | PTYPE_F | RSS_F) \
831 R(vlan_cksum, 0, 1, 0, 0, 1, 0, 0, \
832 RX_VLAN_F | CKSUM_F) \
833 R(vlan_cksum_rss, 0, 1, 0, 0, 1, 0, 1, \
834 RX_VLAN_F | CKSUM_F | RSS_F) \
835 R(vlan_cksum_ptype, 0, 1, 0, 0, 1, 1, 0, \
836 RX_VLAN_F | CKSUM_F | PTYPE_F) \
837 R(vlan_cksum_ptype_rss, 0, 1, 0, 0, 1, 1, 1, \
838 RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
839 R(vlan_mark, 0, 1, 0, 1, 0, 0, 0, \
840 RX_VLAN_F | MARK_F) \
841 R(vlan_mark_rss, 0, 1, 0, 1, 0, 0, 1, \
842 RX_VLAN_F | MARK_F | RSS_F) \
843 R(vlan_mark_ptype, 0, 1, 0, 1, 0, 1, 0, \
844 RX_VLAN_F | MARK_F | PTYPE_F) \
845 R(vlan_mark_ptype_rss, 0, 1, 0, 1, 0, 1, 1, \
846 RX_VLAN_F | MARK_F | PTYPE_F | RSS_F) \
847 R(vlan_mark_cksum, 0, 1, 0, 1, 1, 0, 0, \
848 RX_VLAN_F | MARK_F | CKSUM_F) \
849 R(vlan_mark_cksum_rss, 0, 1, 0, 1, 1, 0, 1, \
850 RX_VLAN_F | MARK_F | CKSUM_F | RSS_F) \
851 R(vlan_mark_cksum_ptype, 0, 1, 0, 1, 1, 1, 0, \
852 RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F) \
853 R(vlan_mark_cksum_ptype_rss, 0, 1, 0, 1, 1, 1, 1, \
854 RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
855 R(vlan_ts, 0, 1, 1, 0, 0, 0, 0, \
857 R(vlan_ts_rss, 0, 1, 1, 0, 0, 0, 1, \
858 RX_VLAN_F | TS_F | RSS_F) \
859 R(vlan_ts_ptype, 0, 1, 1, 0, 0, 1, 0, \
860 RX_VLAN_F | TS_F | PTYPE_F) \
861 R(vlan_ts_ptype_rss, 0, 1, 1, 0, 0, 1, 1, \
862 RX_VLAN_F | TS_F | PTYPE_F | RSS_F) \
863 R(vlan_ts_cksum, 0, 1, 1, 0, 1, 0, 0, \
864 RX_VLAN_F | TS_F | CKSUM_F) \
865 R(vlan_ts_cksum_rss, 0, 1, 1, 0, 1, 0, 1, \
866 RX_VLAN_F | TS_F | CKSUM_F | RSS_F) \
867 R(vlan_ts_cksum_ptype, 0, 1, 1, 0, 1, 1, 0, \
868 RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F) \
869 R(vlan_ts_cksum_ptype_rss, 0, 1, 1, 0, 1, 1, 1, \
870 RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
871 R(vlan_ts_mark, 0, 1, 1, 1, 0, 0, 0, \
872 RX_VLAN_F | TS_F | MARK_F) \
873 R(vlan_ts_mark_rss, 0, 1, 1, 1, 0, 0, 1, \
874 RX_VLAN_F | TS_F | MARK_F | RSS_F) \
875 R(vlan_ts_mark_ptype, 0, 1, 1, 1, 0, 1, 0, \
876 RX_VLAN_F | TS_F | MARK_F | PTYPE_F) \
877 R(vlan_ts_mark_ptype_rss, 0, 1, 1, 1, 0, 1, 1, \
878 RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
879 R(vlan_ts_mark_cksum, 0, 1, 1, 1, 1, 0, 0, \
880 RX_VLAN_F | TS_F | MARK_F | CKSUM_F) \
881 R(vlan_ts_mark_cksum_rss, 0, 1, 1, 1, 1, 0, 1, \
882 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
883 R(vlan_ts_mark_cksum_ptype, 0, 1, 1, 1, 1, 1, 0, \
884 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
885 R(vlan_ts_mark_cksum_ptype_rss, 0, 1, 1, 1, 1, 1, 1, \
886 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
887 R(sec, 1, 0, 0, 0, 0, 0, 0, \
889 R(sec_rss, 1, 0, 0, 0, 0, 0, 1, \
891 R(sec_ptype, 1, 0, 0, 0, 0, 1, 0, \
893 R(sec_ptype_rss, 1, 0, 0, 0, 0, 1, 1, \
894 R_SEC_F | PTYPE_F | RSS_F) \
895 R(sec_cksum, 1, 0, 0, 0, 1, 0, 0, \
897 R(sec_cksum_rss, 1, 0, 0, 0, 1, 0, 1, \
898 R_SEC_F | CKSUM_F | RSS_F) \
899 R(sec_cksum_ptype, 1, 0, 0, 0, 1, 1, 0, \
900 R_SEC_F | CKSUM_F | PTYPE_F) \
901 R(sec_cksum_ptype_rss, 1, 0, 0, 0, 1, 1, 1, \
902 R_SEC_F | CKSUM_F | PTYPE_F | RSS_F) \
903 R(sec_mark, 1, 0, 0, 1, 0, 0, 0, \
905 R(sec_mark_rss, 1, 0, 0, 1, 0, 0, 1, \
906 R_SEC_F | MARK_F | RSS_F) \
907 R(sec_mark_ptype, 1, 0, 0, 1, 0, 1, 0, \
908 R_SEC_F | MARK_F | PTYPE_F) \
909 R(sec_mark_ptype_rss, 1, 0, 0, 1, 0, 1, 1, \
910 R_SEC_F | MARK_F | PTYPE_F | RSS_F) \
911 R(sec_mark_cksum, 1, 0, 0, 1, 1, 0, 0, \
912 R_SEC_F | MARK_F | CKSUM_F) \
913 R(sec_mark_cksum_rss, 1, 0, 0, 1, 1, 0, 1, \
914 R_SEC_F | MARK_F | CKSUM_F | RSS_F) \
915 R(sec_mark_cksum_ptype, 1, 0, 0, 1, 1, 1, 0, \
916 R_SEC_F | MARK_F | CKSUM_F | PTYPE_F) \
917 R(sec_mark_cksum_ptype_rss, 1, 0, 0, 1, 1, 1, 1, \
918 R_SEC_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
919 R(sec_ts, 1, 0, 1, 0, 0, 0, 0, \
921 R(sec_ts_rss, 1, 0, 1, 0, 0, 0, 1, \
922 R_SEC_F | TS_F | RSS_F) \
923 R(sec_ts_ptype, 1, 0, 1, 0, 0, 1, 0, \
924 R_SEC_F | TS_F | PTYPE_F) \
925 R(sec_ts_ptype_rss, 1, 0, 1, 0, 0, 1, 1, \
926 R_SEC_F | TS_F | PTYPE_F | RSS_F) \
927 R(sec_ts_cksum, 1, 0, 1, 0, 1, 0, 0, \
928 R_SEC_F | TS_F | CKSUM_F) \
929 R(sec_ts_cksum_rss, 1, 0, 1, 0, 1, 0, 1, \
930 R_SEC_F | TS_F | CKSUM_F | RSS_F) \
931 R(sec_ts_cksum_ptype, 1, 0, 1, 0, 1, 1, 0, \
932 R_SEC_F | TS_F | CKSUM_F | PTYPE_F) \
933 R(sec_ts_cksum_ptype_rss, 1, 0, 1, 0, 1, 1, 1, \
934 R_SEC_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
935 R(sec_ts_mark, 1, 0, 1, 1, 0, 0, 0, \
936 R_SEC_F | TS_F | MARK_F) \
937 R(sec_ts_mark_rss, 1, 0, 1, 1, 0, 0, 1, \
938 R_SEC_F | TS_F | MARK_F | RSS_F) \
939 R(sec_ts_mark_ptype, 1, 0, 1, 1, 0, 1, 0, \
940 R_SEC_F | TS_F | MARK_F | PTYPE_F) \
941 R(sec_ts_mark_ptype_rss, 1, 0, 1, 1, 0, 1, 1, \
942 R_SEC_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
943 R(sec_ts_mark_cksum, 1, 0, 1, 1, 1, 0, 0, \
944 R_SEC_F | TS_F | MARK_F | CKSUM_F) \
945 R(sec_ts_mark_cksum_rss, 1, 0, 1, 1, 1, 0, 1, \
946 R_SEC_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
947 R(sec_ts_mark_cksum_ptype, 1, 0, 1, 1, 1, 1, 0, \
948 R_SEC_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
949 R(sec_ts_mark_cksum_ptype_rss, 1, 0, 1, 1, 1, 1, 1, \
950 R_SEC_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
951 R(sec_vlan, 1, 1, 0, 0, 0, 0, 0, \
952 R_SEC_F | RX_VLAN_F) \
953 R(sec_vlan_rss, 1, 1, 0, 0, 0, 0, 1, \
954 R_SEC_F | RX_VLAN_F | RSS_F) \
955 R(sec_vlan_ptype, 1, 1, 0, 0, 0, 1, 0, \
956 R_SEC_F | RX_VLAN_F | PTYPE_F) \
957 R(sec_vlan_ptype_rss, 1, 1, 0, 0, 0, 1, 1, \
958 R_SEC_F | RX_VLAN_F | PTYPE_F | RSS_F) \
959 R(sec_vlan_cksum, 1, 1, 0, 0, 1, 0, 0, \
960 R_SEC_F | RX_VLAN_F | CKSUM_F) \
961 R(sec_vlan_cksum_rss, 1, 1, 0, 0, 1, 0, 1, \
962 R_SEC_F | RX_VLAN_F | CKSUM_F | RSS_F) \
963 R(sec_vlan_cksum_ptype, 1, 1, 0, 0, 1, 1, 0, \
964 R_SEC_F | RX_VLAN_F | CKSUM_F | PTYPE_F) \
965 R(sec_vlan_cksum_ptype_rss, 1, 1, 0, 0, 1, 1, 1, \
966 R_SEC_F | RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
967 R(sec_vlan_mark, 1, 1, 0, 1, 0, 0, 0, \
968 R_SEC_F | RX_VLAN_F | MARK_F) \
969 R(sec_vlan_mark_rss, 1, 1, 0, 1, 0, 0, 1, \
970 R_SEC_F | RX_VLAN_F | MARK_F | RSS_F) \
971 R(sec_vlan_mark_ptype, 1, 1, 0, 1, 0, 1, 0, \
972 R_SEC_F | RX_VLAN_F | MARK_F | PTYPE_F) \
973 R(sec_vlan_mark_ptype_rss, 1, 1, 0, 1, 0, 1, 1, \
974 R_SEC_F | RX_VLAN_F | MARK_F | PTYPE_F | RSS_F) \
975 R(sec_vlan_mark_cksum, 1, 1, 0, 1, 1, 0, 0, \
976 R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F) \
977 R(sec_vlan_mark_cksum_rss, 1, 1, 0, 1, 1, 0, 1, \
978 R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | RSS_F) \
979 R(sec_vlan_mark_cksum_ptype, 1, 1, 0, 1, 1, 1, 0, \
980 R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F) \
981 R(sec_vlan_mark_cksum_ptype_rss, 1, 1, 0, 1, 1, 1, 1, \
982 R_SEC_F | RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
983 R(sec_vlan_ts, 1, 1, 1, 0, 0, 0, 0, \
984 R_SEC_F | RX_VLAN_F | TS_F) \
985 R(sec_vlan_ts_rss, 1, 1, 1, 0, 0, 0, 1, \
986 R_SEC_F | RX_VLAN_F | TS_F | RSS_F) \
987 R(sec_vlan_ts_ptype, 1, 1, 1, 0, 0, 1, 0, \
988 R_SEC_F | RX_VLAN_F | TS_F | PTYPE_F) \
989 R(sec_vlan_ts_ptype_rss, 1, 1, 1, 0, 0, 1, 1, \
990 R_SEC_F | RX_VLAN_F | TS_F | PTYPE_F | RSS_F) \
991 R(sec_vlan_ts_cksum, 1, 1, 1, 0, 1, 0, 0, \
992 R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F) \
993 R(sec_vlan_ts_cksum_rss, 1, 1, 1, 0, 1, 0, 1, \
994 R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | RSS_F) \
995 R(sec_vlan_ts_cksum_ptype, 1, 1, 1, 0, 1, 1, 0, \
996 R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F) \
997 R(sec_vlan_ts_cksum_ptype_rss, 1, 1, 1, 0, 1, 1, 1, \
998 R_SEC_F | RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
999 R(sec_vlan_ts_mark, 1, 1, 1, 1, 0, 0, 0, \
1000 R_SEC_F | RX_VLAN_F | TS_F | MARK_F) \
1001 R(sec_vlan_ts_mark_rss, 1, 1, 1, 1, 0, 0, 1, \
1002 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | RSS_F) \
1003 R(sec_vlan_ts_mark_ptype, 1, 1, 1, 1, 0, 1, 0, \
1004 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | PTYPE_F) \
1005 R(sec_vlan_ts_mark_ptype_rss, 1, 1, 1, 1, 0, 1, 1, \
1006 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
1007 R(sec_vlan_ts_mark_cksum, 1, 1, 1, 1, 1, 0, 0, \
1008 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F) \
1009 R(sec_vlan_ts_mark_cksum_rss, 1, 1, 1, 1, 1, 0, 1, \
1010 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
1011 R(sec_vlan_ts_mark_cksum_ptype, 1, 1, 1, 1, 1, 1, 0, \
1012 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
1013 R(sec_vlan_ts_mark_cksum_ptype_rss, 1, 1, 1, 1, 1, 1, 1, \
1014 R_SEC_F | RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
1016 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
1017 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_##name( \
1018 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
1020 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_mseg_##name( \
1021 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
1023 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_vec_##name( \
1024 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
1026 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_vec_mseg_##name( \
1027 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts);
1029 NIX_RX_FASTPATH_MODES
1032 #endif /* __CN9K_RX_H__ */