1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
11 #include <cnxk_ethdev.h>
13 #define NIX_RX_OFFLOAD_NONE (0)
14 #define NIX_RX_OFFLOAD_RSS_F BIT(0)
15 #define NIX_RX_OFFLOAD_PTYPE_F BIT(1)
16 #define NIX_RX_OFFLOAD_CHECKSUM_F BIT(2)
17 #define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)
18 #define NIX_RX_OFFLOAD_TSTAMP_F BIT(4)
20 /* Flags to control cqe_to_mbuf conversion function.
21 * Defining it from backwards to denote its been
22 * not used as offload flags to pick function
24 #define NIX_RX_MULTI_SEG_F BIT(15)
26 #define CNXK_NIX_CQ_ENTRY_SZ 128
27 #define NIX_DESCS_PER_LOOP 4
28 #define CQE_CAST(x) ((struct nix_cqe_hdr_s *)(x))
29 #define CQE_SZ(x) ((x) * CNXK_NIX_CQ_ENTRY_SZ)
31 union mbuf_initializer {
41 static __rte_always_inline uint64_t
42 nix_clear_data_off(uint64_t oldval)
44 union mbuf_initializer mbuf_init = {.value = oldval};
46 mbuf_init.fields.data_off = 0;
47 return mbuf_init.value;
50 static __rte_always_inline struct rte_mbuf *
51 nix_get_mbuf_from_cqe(void *cq, const uint64_t data_off)
55 /* Skip CQE, NIX_RX_PARSE_S and SG HDR(9 DWORDs) and peek buff addr */
56 buff = *((rte_iova_t *)((uint64_t *)cq + 9));
57 return (struct rte_mbuf *)(buff - data_off);
60 static __rte_always_inline uint32_t
61 nix_ptype_get(const void *const lookup_mem, const uint64_t in)
63 const uint16_t *const ptype = lookup_mem;
64 const uint16_t lh_lg_lf = (in & 0xFFF0000000000000) >> 52;
65 const uint16_t tu_l2 = ptype[(in & 0x000FFFF000000000) >> 36];
66 const uint16_t il4_tu = ptype[PTYPE_NON_TUNNEL_ARRAY_SZ + lh_lg_lf];
68 return (il4_tu << PTYPE_NON_TUNNEL_WIDTH) | tu_l2;
71 static __rte_always_inline uint32_t
72 nix_rx_olflags_get(const void *const lookup_mem, const uint64_t in)
74 const uint32_t *const ol_flags =
75 (const uint32_t *)((const uint8_t *)lookup_mem +
78 return ol_flags[(in & 0xfff00000) >> 20];
81 static inline uint64_t
82 nix_update_match_id(const uint16_t match_id, uint64_t ol_flags,
83 struct rte_mbuf *mbuf)
85 /* There is no separate bit to check match_id
86 * is valid or not? and no flag to identify it is an
87 * RTE_FLOW_ACTION_TYPE_FLAG vs RTE_FLOW_ACTION_TYPE_MARK
88 * action. The former case addressed through 0 being invalid
89 * value and inc/dec match_id pair when MARK is activated.
90 * The later case addressed through defining
91 * CNXK_FLOW_MARK_DEFAULT as value for
92 * RTE_FLOW_ACTION_TYPE_MARK.
93 * This would translate to not use
94 * CNXK_FLOW_ACTION_FLAG_DEFAULT - 1 and
95 * CNXK_FLOW_ACTION_FLAG_DEFAULT for match_id.
96 * i.e valid mark_id's are from
97 * 0 to CNXK_FLOW_ACTION_FLAG_DEFAULT - 2
99 if (likely(match_id)) {
100 ol_flags |= PKT_RX_FDIR;
101 if (match_id != CNXK_FLOW_ACTION_FLAG_DEFAULT) {
102 ol_flags |= PKT_RX_FDIR_ID;
103 mbuf->hash.fdir.hi = match_id - 1;
110 static __rte_always_inline void
111 nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
114 const rte_iova_t *iova_list;
115 struct rte_mbuf *head;
116 const rte_iova_t *eol;
120 sg = *(const uint64_t *)(rx + 1);
121 nb_segs = (sg >> 48) & 0x3;
122 mbuf->nb_segs = nb_segs;
123 mbuf->data_len = sg & 0xFFFF;
126 eol = ((const rte_iova_t *)(rx + 1) +
127 ((rx->cn9k.desc_sizem1 + 1) << 1));
128 /* Skip SG_S and first IOVA*/
129 iova_list = ((const rte_iova_t *)(rx + 1)) + 2;
132 rearm = rearm & ~0xFFFF;
136 mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
139 __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
141 mbuf->data_len = sg & 0xFFFF;
143 *(uint64_t *)(&mbuf->rearm_data) = rearm;
147 if (!nb_segs && (iova_list + 1 < eol)) {
148 sg = *(const uint64_t *)(iova_list);
149 nb_segs = (sg >> 48) & 0x3;
150 head->nb_segs += nb_segs;
151 iova_list = (const rte_iova_t *)(iova_list + 1);
157 static __rte_always_inline void
158 cn9k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
159 struct rte_mbuf *mbuf, const void *lookup_mem,
160 const uint64_t val, const uint16_t flag)
162 const union nix_rx_parse_u *rx =
163 (const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
164 const uint16_t len = rx->cn9k.pkt_lenm1 + 1;
165 const uint64_t w1 = *(const uint64_t *)rx;
166 uint64_t ol_flags = 0;
168 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
169 __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
171 if (flag & NIX_RX_OFFLOAD_PTYPE_F)
172 mbuf->packet_type = nix_ptype_get(lookup_mem, w1);
174 mbuf->packet_type = 0;
176 if (flag & NIX_RX_OFFLOAD_RSS_F) {
177 mbuf->hash.rss = tag;
178 ol_flags |= PKT_RX_RSS_HASH;
181 if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
182 ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
184 if (flag & NIX_RX_OFFLOAD_MARK_UPDATE_F)
186 nix_update_match_id(rx->cn9k.match_id, ol_flags, mbuf);
188 mbuf->ol_flags = ol_flags;
189 *(uint64_t *)(&mbuf->rearm_data) = val;
192 if (flag & NIX_RX_MULTI_SEG_F) {
193 nix_cqe_xtract_mseg(rx, mbuf, val);
195 mbuf->data_len = len;
200 static inline uint16_t
201 nix_rx_nb_pkts(struct cn9k_eth_rxq *rxq, const uint64_t wdata,
202 const uint16_t pkts, const uint32_t qmask)
204 uint32_t available = rxq->available;
206 /* Update the available count if cached value is not enough */
207 if (unlikely(available < pkts)) {
208 uint64_t reg, head, tail;
210 /* Use LDADDA version to avoid reorder */
211 reg = roc_atomic64_add_sync(wdata, rxq->cq_status);
212 /* CQ_OP_STATUS operation error */
213 if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
214 reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
217 tail = reg & 0xFFFFF;
218 head = (reg >> 20) & 0xFFFFF;
220 available = tail - head + qmask + 1;
222 available = tail - head;
224 rxq->available = available;
227 return RTE_MIN(pkts, available);
230 static __rte_always_inline uint16_t
231 cn9k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
232 const uint16_t flags)
234 struct cn9k_eth_rxq *rxq = rx_queue;
235 const uint64_t mbuf_init = rxq->mbuf_initializer;
236 const void *lookup_mem = rxq->lookup_mem;
237 const uint64_t data_off = rxq->data_off;
238 const uintptr_t desc = rxq->desc;
239 const uint64_t wdata = rxq->wdata;
240 const uint32_t qmask = rxq->qmask;
241 uint16_t packets = 0, nb_pkts;
242 uint32_t head = rxq->head;
243 struct nix_cqe_hdr_s *cq;
244 struct rte_mbuf *mbuf;
246 nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
248 while (packets < nb_pkts) {
249 /* Prefetch N desc ahead */
250 rte_prefetch_non_temporal(
251 (void *)(desc + (CQE_SZ((head + 2) & qmask))));
252 cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head));
254 mbuf = nix_get_mbuf_from_cqe(cq, data_off);
256 cn9k_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
258 cnxk_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
259 (flags & NIX_RX_OFFLOAD_TSTAMP_F),
260 (uint64_t *)((uint8_t *)mbuf + data_off)
262 rx_pkts[packets++] = mbuf;
263 roc_prefetch_store_keep(mbuf);
269 rxq->available -= nb_pkts;
271 /* Free all the CQs that we've processed */
272 plt_write64((wdata | nb_pkts), rxq->cq_door);
277 #if defined(RTE_ARCH_ARM64)
279 static __rte_always_inline uint16_t
280 cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
281 uint16_t pkts, const uint16_t flags)
283 struct cn9k_eth_rxq *rxq = rx_queue;
284 uint16_t packets = 0;
285 uint64x2_t cq0_w8, cq1_w8, cq2_w8, cq3_w8, mbuf01, mbuf23;
286 const uint64_t mbuf_initializer = rxq->mbuf_initializer;
287 const uint64x2_t data_off = vdupq_n_u64(rxq->data_off);
288 uint64_t ol_flags0, ol_flags1, ol_flags2, ol_flags3;
289 uint64x2_t rearm0 = vdupq_n_u64(mbuf_initializer);
290 uint64x2_t rearm1 = vdupq_n_u64(mbuf_initializer);
291 uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer);
292 uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
293 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
294 const uint16_t *lookup_mem = rxq->lookup_mem;
295 const uint32_t qmask = rxq->qmask;
296 const uint64_t wdata = rxq->wdata;
297 const uintptr_t desc = rxq->desc;
298 uint8x16_t f0, f1, f2, f3;
299 uint32_t head = rxq->head;
302 pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
303 pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1);
305 /* Packets has to be floor-aligned to NIX_DESCS_PER_LOOP */
306 pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
308 while (packets < pkts) {
309 /* Exit loop if head is about to wrap and become unaligned */
310 if (((head + NIX_DESCS_PER_LOOP - 1) & qmask) <
311 NIX_DESCS_PER_LOOP) {
312 pkts_left += (pkts - packets);
316 const uintptr_t cq0 = desc + CQE_SZ(head);
318 /* Prefetch N desc ahead */
319 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(8)));
320 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(9)));
321 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(10)));
322 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(11)));
324 /* Get NIX_RX_SG_S for size and buffer pointer */
325 cq0_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(0) + 64));
326 cq1_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(1) + 64));
327 cq2_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(2) + 64));
328 cq3_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(3) + 64));
330 /* Extract mbuf from NIX_RX_SG_S */
331 mbuf01 = vzip2q_u64(cq0_w8, cq1_w8);
332 mbuf23 = vzip2q_u64(cq2_w8, cq3_w8);
333 mbuf01 = vqsubq_u64(mbuf01, data_off);
334 mbuf23 = vqsubq_u64(mbuf23, data_off);
336 /* Move mbufs to scalar registers for future use */
337 mbuf0 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 0);
338 mbuf1 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 1);
339 mbuf2 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 0);
340 mbuf3 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 1);
342 /* Mask to get packet len from NIX_RX_SG_S */
343 const uint8x16_t shuf_msk = {
344 0xFF, 0xFF, /* pkt_type set as unknown */
345 0xFF, 0xFF, /* pkt_type set as unknown */
346 0, 1, /* octet 1~0, low 16 bits pkt_len */
347 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
348 0, 1, /* octet 1~0, 16 bits data_len */
349 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
351 /* Form the rx_descriptor_fields1 with pkt_len and data_len */
352 f0 = vqtbl1q_u8(cq0_w8, shuf_msk);
353 f1 = vqtbl1q_u8(cq1_w8, shuf_msk);
354 f2 = vqtbl1q_u8(cq2_w8, shuf_msk);
355 f3 = vqtbl1q_u8(cq3_w8, shuf_msk);
357 /* Load CQE word0 and word 1 */
358 uint64_t cq0_w0 = ((uint64_t *)(cq0 + CQE_SZ(0)))[0];
359 uint64_t cq0_w1 = ((uint64_t *)(cq0 + CQE_SZ(0)))[1];
360 uint64_t cq1_w0 = ((uint64_t *)(cq0 + CQE_SZ(1)))[0];
361 uint64_t cq1_w1 = ((uint64_t *)(cq0 + CQE_SZ(1)))[1];
362 uint64_t cq2_w0 = ((uint64_t *)(cq0 + CQE_SZ(2)))[0];
363 uint64_t cq2_w1 = ((uint64_t *)(cq0 + CQE_SZ(2)))[1];
364 uint64_t cq3_w0 = ((uint64_t *)(cq0 + CQE_SZ(3)))[0];
365 uint64_t cq3_w1 = ((uint64_t *)(cq0 + CQE_SZ(3)))[1];
367 if (flags & NIX_RX_OFFLOAD_RSS_F) {
368 /* Fill rss in the rx_descriptor_fields1 */
369 f0 = vsetq_lane_u32(cq0_w0, f0, 3);
370 f1 = vsetq_lane_u32(cq1_w0, f1, 3);
371 f2 = vsetq_lane_u32(cq2_w0, f2, 3);
372 f3 = vsetq_lane_u32(cq3_w0, f3, 3);
373 ol_flags0 = PKT_RX_RSS_HASH;
374 ol_flags1 = PKT_RX_RSS_HASH;
375 ol_flags2 = PKT_RX_RSS_HASH;
376 ol_flags3 = PKT_RX_RSS_HASH;
384 if (flags & NIX_RX_OFFLOAD_PTYPE_F) {
385 /* Fill packet_type in the rx_descriptor_fields1 */
386 f0 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq0_w1),
388 f1 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq1_w1),
390 f2 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq2_w1),
392 f3 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq3_w1),
396 if (flags & NIX_RX_OFFLOAD_CHECKSUM_F) {
397 ol_flags0 |= nix_rx_olflags_get(lookup_mem, cq0_w1);
398 ol_flags1 |= nix_rx_olflags_get(lookup_mem, cq1_w1);
399 ol_flags2 |= nix_rx_olflags_get(lookup_mem, cq2_w1);
400 ol_flags3 |= nix_rx_olflags_get(lookup_mem, cq3_w1);
403 if (flags & NIX_RX_OFFLOAD_MARK_UPDATE_F) {
404 ol_flags0 = nix_update_match_id(
405 *(uint16_t *)(cq0 + CQE_SZ(0) + 38), ol_flags0,
407 ol_flags1 = nix_update_match_id(
408 *(uint16_t *)(cq0 + CQE_SZ(1) + 38), ol_flags1,
410 ol_flags2 = nix_update_match_id(
411 *(uint16_t *)(cq0 + CQE_SZ(2) + 38), ol_flags2,
413 ol_flags3 = nix_update_match_id(
414 *(uint16_t *)(cq0 + CQE_SZ(3) + 38), ol_flags3,
418 /* Form rearm_data with ol_flags */
419 rearm0 = vsetq_lane_u64(ol_flags0, rearm0, 1);
420 rearm1 = vsetq_lane_u64(ol_flags1, rearm1, 1);
421 rearm2 = vsetq_lane_u64(ol_flags2, rearm2, 1);
422 rearm3 = vsetq_lane_u64(ol_flags3, rearm3, 1);
424 /* Update rx_descriptor_fields1 */
425 vst1q_u64((uint64_t *)mbuf0->rx_descriptor_fields1, f0);
426 vst1q_u64((uint64_t *)mbuf1->rx_descriptor_fields1, f1);
427 vst1q_u64((uint64_t *)mbuf2->rx_descriptor_fields1, f2);
428 vst1q_u64((uint64_t *)mbuf3->rx_descriptor_fields1, f3);
430 /* Update rearm_data */
431 vst1q_u64((uint64_t *)mbuf0->rearm_data, rearm0);
432 vst1q_u64((uint64_t *)mbuf1->rearm_data, rearm1);
433 vst1q_u64((uint64_t *)mbuf2->rearm_data, rearm2);
434 vst1q_u64((uint64_t *)mbuf3->rearm_data, rearm3);
436 /* Update that no more segments */
442 /* Store the mbufs to rx_pkts */
443 vst1q_u64((uint64_t *)&rx_pkts[packets], mbuf01);
444 vst1q_u64((uint64_t *)&rx_pkts[packets + 2], mbuf23);
447 roc_prefetch_store_keep(mbuf0);
448 roc_prefetch_store_keep(mbuf1);
449 roc_prefetch_store_keep(mbuf2);
450 roc_prefetch_store_keep(mbuf3);
452 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
453 __mempool_check_cookies(mbuf0->pool, (void **)&mbuf0, 1, 1);
454 __mempool_check_cookies(mbuf1->pool, (void **)&mbuf1, 1, 1);
455 __mempool_check_cookies(mbuf2->pool, (void **)&mbuf2, 1, 1);
456 __mempool_check_cookies(mbuf3->pool, (void **)&mbuf3, 1, 1);
458 /* Advance head pointer and packets */
459 head += NIX_DESCS_PER_LOOP;
461 packets += NIX_DESCS_PER_LOOP;
465 rxq->available -= packets;
468 /* Free all the CQs that we've processed */
469 plt_write64((rxq->wdata | packets), rxq->cq_door);
471 if (unlikely(pkts_left))
472 packets += cn9k_nix_recv_pkts(rx_queue, &rx_pkts[packets],
480 static inline uint16_t
481 cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
482 uint16_t pkts, const uint16_t flags)
484 RTE_SET_USED(rx_queue);
485 RTE_SET_USED(rx_pkts);
494 #define RSS_F NIX_RX_OFFLOAD_RSS_F
495 #define PTYPE_F NIX_RX_OFFLOAD_PTYPE_F
496 #define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F
497 #define MARK_F NIX_RX_OFFLOAD_MARK_UPDATE_F
498 #define TS_F NIX_RX_OFFLOAD_TSTAMP_F
500 /* [TS] [MARK] [CKSUM] [PTYPE] [RSS] */
501 #define NIX_RX_FASTPATH_MODES \
502 R(no_offload, 0, 0, 0, 0, 0, NIX_RX_OFFLOAD_NONE) \
503 R(rss, 0, 0, 0, 0, 1, RSS_F) \
504 R(ptype, 0, 0, 0, 1, 0, PTYPE_F) \
505 R(ptype_rss, 0, 0, 0, 1, 1, PTYPE_F | RSS_F) \
506 R(cksum, 0, 0, 1, 0, 0, CKSUM_F) \
507 R(cksum_rss, 0, 0, 1, 0, 1, CKSUM_F | RSS_F) \
508 R(cksum_ptype, 0, 0, 1, 1, 0, CKSUM_F | PTYPE_F) \
509 R(cksum_ptype_rss, 0, 0, 1, 1, 1, CKSUM_F | PTYPE_F | RSS_F) \
510 R(mark, 0, 1, 0, 0, 0, MARK_F) \
511 R(mark_rss, 0, 1, 0, 0, 1, MARK_F | RSS_F) \
512 R(mark_ptype, 0, 1, 0, 1, 0, MARK_F | PTYPE_F) \
513 R(mark_ptype_rss, 0, 1, 0, 1, 1, MARK_F | PTYPE_F | RSS_F) \
514 R(mark_cksum, 0, 1, 1, 0, 0, MARK_F | CKSUM_F) \
515 R(mark_cksum_rss, 0, 1, 1, 0, 1, MARK_F | CKSUM_F | RSS_F) \
516 R(mark_cksum_ptype, 0, 1, 1, 1, 0, MARK_F | CKSUM_F | PTYPE_F) \
517 R(mark_cksum_ptype_rss, 0, 1, 1, 1, 1, MARK_F | CKSUM_F | PTYPE_F | RSS_F)\
518 R(ts, 1, 0, 0, 0, 0, TS_F) \
519 R(ts_rss, 1, 0, 0, 0, 1, TS_F | RSS_F) \
520 R(ts_ptype, 1, 0, 0, 1, 0, TS_F | PTYPE_F) \
521 R(ts_ptype_rss, 1, 0, 0, 1, 1, TS_F | PTYPE_F | RSS_F) \
522 R(ts_cksum, 1, 0, 1, 0, 0, TS_F | CKSUM_F) \
523 R(ts_cksum_rss, 1, 0, 1, 0, 1, TS_F | CKSUM_F | RSS_F) \
524 R(ts_cksum_ptype, 1, 0, 1, 1, 0, TS_F | CKSUM_F | PTYPE_F) \
525 R(ts_cksum_ptype_rss, 1, 0, 1, 1, 1, TS_F | CKSUM_F | PTYPE_F | RSS_F)\
526 R(ts_mark, 1, 1, 0, 0, 0, TS_F | MARK_F) \
527 R(ts_mark_rss, 1, 1, 0, 0, 1, TS_F | MARK_F | RSS_F) \
528 R(ts_mark_ptype, 1, 1, 0, 1, 0, TS_F | MARK_F | PTYPE_F) \
529 R(ts_mark_ptype_rss, 1, 1, 0, 1, 1, TS_F | MARK_F | PTYPE_F | RSS_F)\
530 R(ts_mark_cksum, 1, 1, 1, 0, 0, TS_F | MARK_F | CKSUM_F) \
531 R(ts_mark_cksum_rss, 1, 1, 1, 0, 1, TS_F | MARK_F | CKSUM_F | RSS_F)\
532 R(ts_mark_cksum_ptype, 1, 1, 1, 1, 0, TS_F | MARK_F | CKSUM_F | PTYPE_F)\
533 R(ts_mark_cksum_ptype_rss, 1, 1, 1, 1, 1, TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
535 #define R(name, f4, f3, f2, f1, f0, flags) \
536 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_##name( \
537 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
539 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_mseg_##name( \
540 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
542 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_vec_##name( \
543 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts);
545 NIX_RX_FASTPATH_MODES
548 #endif /* __CN9K_RX_H__ */