1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
11 #include <cnxk_ethdev.h>
13 #define NIX_RX_OFFLOAD_NONE (0)
14 #define NIX_RX_OFFLOAD_RSS_F BIT(0)
15 #define NIX_RX_OFFLOAD_PTYPE_F BIT(1)
16 #define NIX_RX_OFFLOAD_CHECKSUM_F BIT(2)
17 #define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)
18 #define NIX_RX_OFFLOAD_TSTAMP_F BIT(4)
19 #define NIX_RX_OFFLOAD_VLAN_STRIP_F BIT(5)
21 /* Flags to control cqe_to_mbuf conversion function.
22 * Defining it from backwards to denote its been
23 * not used as offload flags to pick function
25 #define NIX_RX_MULTI_SEG_F BIT(15)
27 #define CNXK_NIX_CQ_ENTRY_SZ 128
28 #define NIX_DESCS_PER_LOOP 4
29 #define CQE_CAST(x) ((struct nix_cqe_hdr_s *)(x))
30 #define CQE_SZ(x) ((x) * CNXK_NIX_CQ_ENTRY_SZ)
32 union mbuf_initializer {
42 static __rte_always_inline uint64_t
43 nix_clear_data_off(uint64_t oldval)
45 union mbuf_initializer mbuf_init = {.value = oldval};
47 mbuf_init.fields.data_off = 0;
48 return mbuf_init.value;
51 static __rte_always_inline struct rte_mbuf *
52 nix_get_mbuf_from_cqe(void *cq, const uint64_t data_off)
56 /* Skip CQE, NIX_RX_PARSE_S and SG HDR(9 DWORDs) and peek buff addr */
57 buff = *((rte_iova_t *)((uint64_t *)cq + 9));
58 return (struct rte_mbuf *)(buff - data_off);
61 static __rte_always_inline uint32_t
62 nix_ptype_get(const void *const lookup_mem, const uint64_t in)
64 const uint16_t *const ptype = lookup_mem;
65 const uint16_t lh_lg_lf = (in & 0xFFF0000000000000) >> 52;
66 const uint16_t tu_l2 = ptype[(in & 0x000FFFF000000000) >> 36];
67 const uint16_t il4_tu = ptype[PTYPE_NON_TUNNEL_ARRAY_SZ + lh_lg_lf];
69 return (il4_tu << PTYPE_NON_TUNNEL_WIDTH) | tu_l2;
72 static __rte_always_inline uint32_t
73 nix_rx_olflags_get(const void *const lookup_mem, const uint64_t in)
75 const uint32_t *const ol_flags =
76 (const uint32_t *)((const uint8_t *)lookup_mem +
79 return ol_flags[(in & 0xfff00000) >> 20];
82 static inline uint64_t
83 nix_update_match_id(const uint16_t match_id, uint64_t ol_flags,
84 struct rte_mbuf *mbuf)
86 /* There is no separate bit to check match_id
87 * is valid or not? and no flag to identify it is an
88 * RTE_FLOW_ACTION_TYPE_FLAG vs RTE_FLOW_ACTION_TYPE_MARK
89 * action. The former case addressed through 0 being invalid
90 * value and inc/dec match_id pair when MARK is activated.
91 * The later case addressed through defining
92 * CNXK_FLOW_MARK_DEFAULT as value for
93 * RTE_FLOW_ACTION_TYPE_MARK.
94 * This would translate to not use
95 * CNXK_FLOW_ACTION_FLAG_DEFAULT - 1 and
96 * CNXK_FLOW_ACTION_FLAG_DEFAULT for match_id.
97 * i.e valid mark_id's are from
98 * 0 to CNXK_FLOW_ACTION_FLAG_DEFAULT - 2
100 if (likely(match_id)) {
101 ol_flags |= PKT_RX_FDIR;
102 if (match_id != CNXK_FLOW_ACTION_FLAG_DEFAULT) {
103 ol_flags |= PKT_RX_FDIR_ID;
104 mbuf->hash.fdir.hi = match_id - 1;
111 static __rte_always_inline void
112 nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
115 const rte_iova_t *iova_list;
116 struct rte_mbuf *head;
117 const rte_iova_t *eol;
121 sg = *(const uint64_t *)(rx + 1);
122 nb_segs = (sg >> 48) & 0x3;
123 mbuf->nb_segs = nb_segs;
124 mbuf->data_len = sg & 0xFFFF;
127 eol = ((const rte_iova_t *)(rx + 1) +
128 ((rx->cn9k.desc_sizem1 + 1) << 1));
129 /* Skip SG_S and first IOVA*/
130 iova_list = ((const rte_iova_t *)(rx + 1)) + 2;
133 rearm = rearm & ~0xFFFF;
137 mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
140 __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
142 mbuf->data_len = sg & 0xFFFF;
144 *(uint64_t *)(&mbuf->rearm_data) = rearm;
148 if (!nb_segs && (iova_list + 1 < eol)) {
149 sg = *(const uint64_t *)(iova_list);
150 nb_segs = (sg >> 48) & 0x3;
151 head->nb_segs += nb_segs;
152 iova_list = (const rte_iova_t *)(iova_list + 1);
158 static __rte_always_inline void
159 cn9k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
160 struct rte_mbuf *mbuf, const void *lookup_mem,
161 const uint64_t val, const uint16_t flag)
163 const union nix_rx_parse_u *rx =
164 (const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
165 const uint16_t len = rx->cn9k.pkt_lenm1 + 1;
166 const uint64_t w1 = *(const uint64_t *)rx;
167 uint64_t ol_flags = 0;
169 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
170 __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
172 if (flag & NIX_RX_OFFLOAD_PTYPE_F)
173 mbuf->packet_type = nix_ptype_get(lookup_mem, w1);
175 mbuf->packet_type = 0;
177 if (flag & NIX_RX_OFFLOAD_RSS_F) {
178 mbuf->hash.rss = tag;
179 ol_flags |= PKT_RX_RSS_HASH;
182 if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
183 ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
185 if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
186 if (rx->cn9k.vtag0_gone) {
187 ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
188 mbuf->vlan_tci = rx->cn9k.vtag0_tci;
190 if (rx->cn9k.vtag1_gone) {
191 ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
192 mbuf->vlan_tci_outer = rx->cn9k.vtag1_tci;
196 if (flag & NIX_RX_OFFLOAD_MARK_UPDATE_F)
198 nix_update_match_id(rx->cn9k.match_id, ol_flags, mbuf);
200 mbuf->ol_flags = ol_flags;
201 *(uint64_t *)(&mbuf->rearm_data) = val;
204 if (flag & NIX_RX_MULTI_SEG_F) {
205 nix_cqe_xtract_mseg(rx, mbuf, val);
207 mbuf->data_len = len;
212 static inline uint16_t
213 nix_rx_nb_pkts(struct cn9k_eth_rxq *rxq, const uint64_t wdata,
214 const uint16_t pkts, const uint32_t qmask)
216 uint32_t available = rxq->available;
218 /* Update the available count if cached value is not enough */
219 if (unlikely(available < pkts)) {
220 uint64_t reg, head, tail;
222 /* Use LDADDA version to avoid reorder */
223 reg = roc_atomic64_add_sync(wdata, rxq->cq_status);
224 /* CQ_OP_STATUS operation error */
225 if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
226 reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
229 tail = reg & 0xFFFFF;
230 head = (reg >> 20) & 0xFFFFF;
232 available = tail - head + qmask + 1;
234 available = tail - head;
236 rxq->available = available;
239 return RTE_MIN(pkts, available);
242 static __rte_always_inline uint16_t
243 cn9k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
244 const uint16_t flags)
246 struct cn9k_eth_rxq *rxq = rx_queue;
247 const uint64_t mbuf_init = rxq->mbuf_initializer;
248 const void *lookup_mem = rxq->lookup_mem;
249 const uint64_t data_off = rxq->data_off;
250 const uintptr_t desc = rxq->desc;
251 const uint64_t wdata = rxq->wdata;
252 const uint32_t qmask = rxq->qmask;
253 uint16_t packets = 0, nb_pkts;
254 uint32_t head = rxq->head;
255 struct nix_cqe_hdr_s *cq;
256 struct rte_mbuf *mbuf;
258 nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
260 while (packets < nb_pkts) {
261 /* Prefetch N desc ahead */
262 rte_prefetch_non_temporal(
263 (void *)(desc + (CQE_SZ((head + 2) & qmask))));
264 cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head));
266 mbuf = nix_get_mbuf_from_cqe(cq, data_off);
268 cn9k_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
270 cnxk_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
271 (flags & NIX_RX_OFFLOAD_TSTAMP_F),
272 (uint64_t *)((uint8_t *)mbuf + data_off)
274 rx_pkts[packets++] = mbuf;
275 roc_prefetch_store_keep(mbuf);
281 rxq->available -= nb_pkts;
283 /* Free all the CQs that we've processed */
284 plt_write64((wdata | nb_pkts), rxq->cq_door);
289 #if defined(RTE_ARCH_ARM64)
291 static __rte_always_inline uint64_t
292 nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
294 if (w2 & BIT_ULL(21) /* vtag0_gone */) {
295 ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
296 *f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
302 static __rte_always_inline uint64_t
303 nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
305 if (w2 & BIT_ULL(23) /* vtag1_gone */) {
306 ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
307 mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
313 static __rte_always_inline uint16_t
314 cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
315 uint16_t pkts, const uint16_t flags)
317 struct cn9k_eth_rxq *rxq = rx_queue;
318 uint16_t packets = 0;
319 uint64x2_t cq0_w8, cq1_w8, cq2_w8, cq3_w8, mbuf01, mbuf23;
320 const uint64_t mbuf_initializer = rxq->mbuf_initializer;
321 const uint64x2_t data_off = vdupq_n_u64(rxq->data_off);
322 uint64_t ol_flags0, ol_flags1, ol_flags2, ol_flags3;
323 uint64x2_t rearm0 = vdupq_n_u64(mbuf_initializer);
324 uint64x2_t rearm1 = vdupq_n_u64(mbuf_initializer);
325 uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer);
326 uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
327 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
328 const uint16_t *lookup_mem = rxq->lookup_mem;
329 const uint32_t qmask = rxq->qmask;
330 const uint64_t wdata = rxq->wdata;
331 const uintptr_t desc = rxq->desc;
332 uint8x16_t f0, f1, f2, f3;
333 uint32_t head = rxq->head;
336 pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
337 pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1);
339 /* Packets has to be floor-aligned to NIX_DESCS_PER_LOOP */
340 pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
342 while (packets < pkts) {
343 /* Exit loop if head is about to wrap and become unaligned */
344 if (((head + NIX_DESCS_PER_LOOP - 1) & qmask) <
345 NIX_DESCS_PER_LOOP) {
346 pkts_left += (pkts - packets);
350 const uintptr_t cq0 = desc + CQE_SZ(head);
352 /* Prefetch N desc ahead */
353 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(8)));
354 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(9)));
355 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(10)));
356 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(11)));
358 /* Get NIX_RX_SG_S for size and buffer pointer */
359 cq0_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(0) + 64));
360 cq1_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(1) + 64));
361 cq2_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(2) + 64));
362 cq3_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(3) + 64));
364 /* Extract mbuf from NIX_RX_SG_S */
365 mbuf01 = vzip2q_u64(cq0_w8, cq1_w8);
366 mbuf23 = vzip2q_u64(cq2_w8, cq3_w8);
367 mbuf01 = vqsubq_u64(mbuf01, data_off);
368 mbuf23 = vqsubq_u64(mbuf23, data_off);
370 /* Move mbufs to scalar registers for future use */
371 mbuf0 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 0);
372 mbuf1 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 1);
373 mbuf2 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 0);
374 mbuf3 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 1);
376 /* Mask to get packet len from NIX_RX_SG_S */
377 const uint8x16_t shuf_msk = {
378 0xFF, 0xFF, /* pkt_type set as unknown */
379 0xFF, 0xFF, /* pkt_type set as unknown */
380 0, 1, /* octet 1~0, low 16 bits pkt_len */
381 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
382 0, 1, /* octet 1~0, 16 bits data_len */
383 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
385 /* Form the rx_descriptor_fields1 with pkt_len and data_len */
386 f0 = vqtbl1q_u8(cq0_w8, shuf_msk);
387 f1 = vqtbl1q_u8(cq1_w8, shuf_msk);
388 f2 = vqtbl1q_u8(cq2_w8, shuf_msk);
389 f3 = vqtbl1q_u8(cq3_w8, shuf_msk);
391 /* Load CQE word0 and word 1 */
392 uint64_t cq0_w0 = ((uint64_t *)(cq0 + CQE_SZ(0)))[0];
393 uint64_t cq0_w1 = ((uint64_t *)(cq0 + CQE_SZ(0)))[1];
394 uint64_t cq1_w0 = ((uint64_t *)(cq0 + CQE_SZ(1)))[0];
395 uint64_t cq1_w1 = ((uint64_t *)(cq0 + CQE_SZ(1)))[1];
396 uint64_t cq2_w0 = ((uint64_t *)(cq0 + CQE_SZ(2)))[0];
397 uint64_t cq2_w1 = ((uint64_t *)(cq0 + CQE_SZ(2)))[1];
398 uint64_t cq3_w0 = ((uint64_t *)(cq0 + CQE_SZ(3)))[0];
399 uint64_t cq3_w1 = ((uint64_t *)(cq0 + CQE_SZ(3)))[1];
401 if (flags & NIX_RX_OFFLOAD_RSS_F) {
402 /* Fill rss in the rx_descriptor_fields1 */
403 f0 = vsetq_lane_u32(cq0_w0, f0, 3);
404 f1 = vsetq_lane_u32(cq1_w0, f1, 3);
405 f2 = vsetq_lane_u32(cq2_w0, f2, 3);
406 f3 = vsetq_lane_u32(cq3_w0, f3, 3);
407 ol_flags0 = PKT_RX_RSS_HASH;
408 ol_flags1 = PKT_RX_RSS_HASH;
409 ol_flags2 = PKT_RX_RSS_HASH;
410 ol_flags3 = PKT_RX_RSS_HASH;
418 if (flags & NIX_RX_OFFLOAD_PTYPE_F) {
419 /* Fill packet_type in the rx_descriptor_fields1 */
420 f0 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq0_w1),
422 f1 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq1_w1),
424 f2 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq2_w1),
426 f3 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq3_w1),
430 if (flags & NIX_RX_OFFLOAD_CHECKSUM_F) {
431 ol_flags0 |= nix_rx_olflags_get(lookup_mem, cq0_w1);
432 ol_flags1 |= nix_rx_olflags_get(lookup_mem, cq1_w1);
433 ol_flags2 |= nix_rx_olflags_get(lookup_mem, cq2_w1);
434 ol_flags3 |= nix_rx_olflags_get(lookup_mem, cq3_w1);
437 if (flags & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
438 uint64_t cq0_w2 = *(uint64_t *)(cq0 + CQE_SZ(0) + 16);
439 uint64_t cq1_w2 = *(uint64_t *)(cq0 + CQE_SZ(1) + 16);
440 uint64_t cq2_w2 = *(uint64_t *)(cq0 + CQE_SZ(2) + 16);
441 uint64_t cq3_w2 = *(uint64_t *)(cq0 + CQE_SZ(3) + 16);
443 ol_flags0 = nix_vlan_update(cq0_w2, ol_flags0, &f0);
444 ol_flags1 = nix_vlan_update(cq1_w2, ol_flags1, &f1);
445 ol_flags2 = nix_vlan_update(cq2_w2, ol_flags2, &f2);
446 ol_flags3 = nix_vlan_update(cq3_w2, ol_flags3, &f3);
448 ol_flags0 = nix_qinq_update(cq0_w2, ol_flags0, mbuf0);
449 ol_flags1 = nix_qinq_update(cq1_w2, ol_flags1, mbuf1);
450 ol_flags2 = nix_qinq_update(cq2_w2, ol_flags2, mbuf2);
451 ol_flags3 = nix_qinq_update(cq3_w2, ol_flags3, mbuf3);
454 if (flags & NIX_RX_OFFLOAD_MARK_UPDATE_F) {
455 ol_flags0 = nix_update_match_id(
456 *(uint16_t *)(cq0 + CQE_SZ(0) + 38), ol_flags0,
458 ol_flags1 = nix_update_match_id(
459 *(uint16_t *)(cq0 + CQE_SZ(1) + 38), ol_flags1,
461 ol_flags2 = nix_update_match_id(
462 *(uint16_t *)(cq0 + CQE_SZ(2) + 38), ol_flags2,
464 ol_flags3 = nix_update_match_id(
465 *(uint16_t *)(cq0 + CQE_SZ(3) + 38), ol_flags3,
469 /* Form rearm_data with ol_flags */
470 rearm0 = vsetq_lane_u64(ol_flags0, rearm0, 1);
471 rearm1 = vsetq_lane_u64(ol_flags1, rearm1, 1);
472 rearm2 = vsetq_lane_u64(ol_flags2, rearm2, 1);
473 rearm3 = vsetq_lane_u64(ol_flags3, rearm3, 1);
475 /* Update rx_descriptor_fields1 */
476 vst1q_u64((uint64_t *)mbuf0->rx_descriptor_fields1, f0);
477 vst1q_u64((uint64_t *)mbuf1->rx_descriptor_fields1, f1);
478 vst1q_u64((uint64_t *)mbuf2->rx_descriptor_fields1, f2);
479 vst1q_u64((uint64_t *)mbuf3->rx_descriptor_fields1, f3);
481 /* Update rearm_data */
482 vst1q_u64((uint64_t *)mbuf0->rearm_data, rearm0);
483 vst1q_u64((uint64_t *)mbuf1->rearm_data, rearm1);
484 vst1q_u64((uint64_t *)mbuf2->rearm_data, rearm2);
485 vst1q_u64((uint64_t *)mbuf3->rearm_data, rearm3);
487 /* Update that no more segments */
493 /* Store the mbufs to rx_pkts */
494 vst1q_u64((uint64_t *)&rx_pkts[packets], mbuf01);
495 vst1q_u64((uint64_t *)&rx_pkts[packets + 2], mbuf23);
498 roc_prefetch_store_keep(mbuf0);
499 roc_prefetch_store_keep(mbuf1);
500 roc_prefetch_store_keep(mbuf2);
501 roc_prefetch_store_keep(mbuf3);
503 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
504 __mempool_check_cookies(mbuf0->pool, (void **)&mbuf0, 1, 1);
505 __mempool_check_cookies(mbuf1->pool, (void **)&mbuf1, 1, 1);
506 __mempool_check_cookies(mbuf2->pool, (void **)&mbuf2, 1, 1);
507 __mempool_check_cookies(mbuf3->pool, (void **)&mbuf3, 1, 1);
509 /* Advance head pointer and packets */
510 head += NIX_DESCS_PER_LOOP;
512 packets += NIX_DESCS_PER_LOOP;
516 rxq->available -= packets;
519 /* Free all the CQs that we've processed */
520 plt_write64((rxq->wdata | packets), rxq->cq_door);
522 if (unlikely(pkts_left))
523 packets += cn9k_nix_recv_pkts(rx_queue, &rx_pkts[packets],
531 static inline uint16_t
532 cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
533 uint16_t pkts, const uint16_t flags)
535 RTE_SET_USED(rx_queue);
536 RTE_SET_USED(rx_pkts);
545 #define RSS_F NIX_RX_OFFLOAD_RSS_F
546 #define PTYPE_F NIX_RX_OFFLOAD_PTYPE_F
547 #define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F
548 #define MARK_F NIX_RX_OFFLOAD_MARK_UPDATE_F
549 #define TS_F NIX_RX_OFFLOAD_TSTAMP_F
550 #define RX_VLAN_F NIX_RX_OFFLOAD_VLAN_STRIP_F
552 /* [RX_VLAN_F] [TS] [MARK] [CKSUM] [PTYPE] [RSS] */
553 #define NIX_RX_FASTPATH_MODES \
554 R(no_offload, 0, 0, 0, 0, 0, 0, NIX_RX_OFFLOAD_NONE) \
555 R(rss, 0, 0, 0, 0, 0, 1, RSS_F) \
556 R(ptype, 0, 0, 0, 0, 1, 0, PTYPE_F) \
557 R(ptype_rss, 0, 0, 0, 0, 1, 1, PTYPE_F | RSS_F) \
558 R(cksum, 0, 0, 0, 1, 0, 0, CKSUM_F) \
559 R(cksum_rss, 0, 0, 0, 1, 0, 1, CKSUM_F | RSS_F) \
560 R(cksum_ptype, 0, 0, 0, 1, 1, 0, CKSUM_F | PTYPE_F) \
561 R(cksum_ptype_rss, 0, 0, 0, 1, 1, 1, CKSUM_F | PTYPE_F | RSS_F) \
562 R(mark, 0, 0, 1, 0, 0, 0, MARK_F) \
563 R(mark_rss, 0, 0, 1, 0, 0, 1, MARK_F | RSS_F) \
564 R(mark_ptype, 0, 0, 1, 0, 1, 0, MARK_F | PTYPE_F) \
565 R(mark_ptype_rss, 0, 0, 1, 0, 1, 1, MARK_F | PTYPE_F | RSS_F) \
566 R(mark_cksum, 0, 0, 1, 1, 0, 0, MARK_F | CKSUM_F) \
567 R(mark_cksum_rss, 0, 0, 1, 1, 0, 1, MARK_F | CKSUM_F | RSS_F) \
568 R(mark_cksum_ptype, 0, 0, 1, 1, 1, 0, MARK_F | CKSUM_F | PTYPE_F) \
569 R(mark_cksum_ptype_rss, 0, 0, 1, 1, 1, 1, \
570 MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
571 R(ts, 0, 1, 0, 0, 0, 0, TS_F) \
572 R(ts_rss, 0, 1, 0, 0, 0, 1, TS_F | RSS_F) \
573 R(ts_ptype, 0, 1, 0, 0, 1, 0, TS_F | PTYPE_F) \
574 R(ts_ptype_rss, 0, 1, 0, 0, 1, 1, TS_F | PTYPE_F | RSS_F) \
575 R(ts_cksum, 0, 1, 0, 1, 0, 0, TS_F | CKSUM_F) \
576 R(ts_cksum_rss, 0, 1, 0, 1, 0, 1, TS_F | CKSUM_F | RSS_F) \
577 R(ts_cksum_ptype, 0, 1, 0, 1, 1, 0, TS_F | CKSUM_F | PTYPE_F) \
578 R(ts_cksum_ptype_rss, 0, 1, 0, 1, 1, 1, \
579 TS_F | CKSUM_F | PTYPE_F | RSS_F) \
580 R(ts_mark, 0, 1, 1, 0, 0, 0, TS_F | MARK_F) \
581 R(ts_mark_rss, 0, 1, 1, 0, 0, 1, TS_F | MARK_F | RSS_F) \
582 R(ts_mark_ptype, 0, 1, 1, 0, 1, 0, TS_F | MARK_F | PTYPE_F) \
583 R(ts_mark_ptype_rss, 0, 1, 1, 0, 1, 1, \
584 TS_F | MARK_F | PTYPE_F | RSS_F) \
585 R(ts_mark_cksum, 0, 1, 1, 1, 0, 0, TS_F | MARK_F | CKSUM_F) \
586 R(ts_mark_cksum_rss, 0, 1, 1, 1, 0, 1, \
587 TS_F | MARK_F | CKSUM_F | RSS_F) \
588 R(ts_mark_cksum_ptype, 0, 1, 1, 1, 1, 0, \
589 TS_F | MARK_F | CKSUM_F | PTYPE_F) \
590 R(ts_mark_cksum_ptype_rss, 0, 1, 1, 1, 1, 1, \
591 TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
592 R(vlan, 1, 0, 0, 0, 0, 0, RX_VLAN_F) \
593 R(vlan_rss, 1, 0, 0, 0, 0, 1, RX_VLAN_F | RSS_F) \
594 R(vlan_ptype, 1, 0, 0, 0, 1, 0, RX_VLAN_F | PTYPE_F) \
595 R(vlan_ptype_rss, 1, 0, 0, 0, 1, 1, RX_VLAN_F | PTYPE_F | RSS_F) \
596 R(vlan_cksum, 1, 0, 0, 1, 0, 0, RX_VLAN_F | CKSUM_F) \
597 R(vlan_cksum_rss, 1, 0, 0, 1, 0, 1, RX_VLAN_F | CKSUM_F | RSS_F) \
598 R(vlan_cksum_ptype, 1, 0, 0, 1, 1, 0, \
599 RX_VLAN_F | CKSUM_F | PTYPE_F) \
600 R(vlan_cksum_ptype_rss, 1, 0, 0, 1, 1, 1, \
601 RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
602 R(vlan_mark, 1, 0, 1, 0, 0, 0, RX_VLAN_F | MARK_F) \
603 R(vlan_mark_rss, 1, 0, 1, 0, 0, 1, RX_VLAN_F | MARK_F | RSS_F) \
604 R(vlan_mark_ptype, 1, 0, 1, 0, 1, 0, RX_VLAN_F | MARK_F | PTYPE_F)\
605 R(vlan_mark_ptype_rss, 1, 0, 1, 0, 1, 1, \
606 RX_VLAN_F | MARK_F | PTYPE_F | RSS_F) \
607 R(vlan_mark_cksum, 1, 0, 1, 1, 0, 0, RX_VLAN_F | MARK_F | CKSUM_F)\
608 R(vlan_mark_cksum_rss, 1, 0, 1, 1, 0, 1, \
609 RX_VLAN_F | MARK_F | CKSUM_F | RSS_F) \
610 R(vlan_mark_cksum_ptype, 1, 0, 1, 1, 1, 0, \
611 RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F) \
612 R(vlan_mark_cksum_ptype_rss, 1, 0, 1, 1, 1, 1, \
613 RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
614 R(vlan_ts, 1, 1, 0, 0, 0, 0, RX_VLAN_F | TS_F) \
615 R(vlan_ts_rss, 1, 1, 0, 0, 0, 1, RX_VLAN_F | TS_F | RSS_F) \
616 R(vlan_ts_ptype, 1, 1, 0, 0, 1, 0, RX_VLAN_F | TS_F | PTYPE_F) \
617 R(vlan_ts_ptype_rss, 1, 1, 0, 0, 1, 1, \
618 RX_VLAN_F | TS_F | PTYPE_F | RSS_F) \
619 R(vlan_ts_cksum, 1, 1, 0, 1, 0, 0, RX_VLAN_F | TS_F | CKSUM_F) \
620 R(vlan_ts_cksum_rss, 1, 1, 0, 1, 0, 1, \
621 RX_VLAN_F | TS_F | CKSUM_F | RSS_F) \
622 R(vlan_ts_cksum_ptype, 1, 1, 0, 1, 1, 0, \
623 RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F) \
624 R(vlan_ts_cksum_ptype_rss, 1, 1, 0, 1, 1, 1, \
625 RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
626 R(vlan_ts_mark, 1, 1, 1, 0, 0, 0, RX_VLAN_F | TS_F | MARK_F) \
627 R(vlan_ts_mark_rss, 1, 1, 1, 0, 0, 1, \
628 RX_VLAN_F | TS_F | MARK_F | RSS_F) \
629 R(vlan_ts_mark_ptype, 1, 1, 1, 0, 1, 0, \
630 RX_VLAN_F | TS_F | MARK_F | PTYPE_F) \
631 R(vlan_ts_mark_ptype_rss, 1, 1, 1, 0, 1, 1, \
632 RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
633 R(vlan_ts_mark_cksum, 1, 1, 1, 1, 0, 0, \
634 RX_VLAN_F | TS_F | MARK_F | CKSUM_F) \
635 R(vlan_ts_mark_cksum_rss, 1, 1, 1, 1, 0, 1, \
636 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
637 R(vlan_ts_mark_cksum_ptype, 1, 1, 1, 1, 1, 0, \
638 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
639 R(vlan_ts_mark_cksum_ptype_rss, 1, 1, 1, 1, 1, 1, \
640 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
642 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
643 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_##name( \
644 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
646 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_mseg_##name( \
647 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
649 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_vec_##name( \
650 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts);
652 NIX_RX_FASTPATH_MODES
655 #endif /* __CN9K_RX_H__ */