1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
10 #include <cnxk_ethdev.h>
12 #define NIX_RX_OFFLOAD_NONE (0)
13 #define NIX_RX_OFFLOAD_RSS_F BIT(0)
14 #define NIX_RX_OFFLOAD_PTYPE_F BIT(1)
15 #define NIX_RX_OFFLOAD_CHECKSUM_F BIT(2)
16 #define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)
17 #define NIX_RX_OFFLOAD_TSTAMP_F BIT(4)
18 #define NIX_RX_OFFLOAD_VLAN_STRIP_F BIT(5)
20 /* Flags to control cqe_to_mbuf conversion function.
21 * Defining it from backwards to denote its been
22 * not used as offload flags to pick function
24 #define NIX_RX_VWQE_F BIT(14)
25 #define NIX_RX_MULTI_SEG_F BIT(15)
27 #define CNXK_NIX_CQ_ENTRY_SZ 128
28 #define NIX_DESCS_PER_LOOP 4
29 #define CQE_CAST(x) ((struct nix_cqe_hdr_s *)(x))
30 #define CQE_SZ(x) ((x) * CNXK_NIX_CQ_ENTRY_SZ)
32 #define CQE_PTR_OFF(b, i, o, f) \
33 (((f) & NIX_RX_VWQE_F) ? \
34 (uint64_t *)(((uintptr_t)((uint64_t *)(b))[i]) + (o)) : \
35 (uint64_t *)(((uintptr_t)(b)) + CQE_SZ(i) + (o)))
37 union mbuf_initializer {
47 static __rte_always_inline uint64_t
48 nix_clear_data_off(uint64_t oldval)
50 union mbuf_initializer mbuf_init = {.value = oldval};
52 mbuf_init.fields.data_off = 0;
53 return mbuf_init.value;
56 static __rte_always_inline struct rte_mbuf *
57 nix_get_mbuf_from_cqe(void *cq, const uint64_t data_off)
61 /* Skip CQE, NIX_RX_PARSE_S and SG HDR(9 DWORDs) and peek buff addr */
62 buff = *((rte_iova_t *)((uint64_t *)cq + 9));
63 return (struct rte_mbuf *)(buff - data_off);
66 static __rte_always_inline uint32_t
67 nix_ptype_get(const void *const lookup_mem, const uint64_t in)
69 const uint16_t *const ptype = lookup_mem;
70 const uint16_t lh_lg_lf = (in & 0xFFF0000000000000) >> 52;
71 const uint16_t tu_l2 = ptype[(in & 0x000FFFF000000000) >> 36];
72 const uint16_t il4_tu = ptype[PTYPE_NON_TUNNEL_ARRAY_SZ + lh_lg_lf];
74 return (il4_tu << PTYPE_NON_TUNNEL_WIDTH) | tu_l2;
77 static __rte_always_inline uint32_t
78 nix_rx_olflags_get(const void *const lookup_mem, const uint64_t in)
80 const uint32_t *const ol_flags =
81 (const uint32_t *)((const uint8_t *)lookup_mem +
84 return ol_flags[(in & 0xfff00000) >> 20];
87 static inline uint64_t
88 nix_update_match_id(const uint16_t match_id, uint64_t ol_flags,
89 struct rte_mbuf *mbuf)
91 /* There is no separate bit to check match_id
92 * is valid or not? and no flag to identify it is an
93 * RTE_FLOW_ACTION_TYPE_FLAG vs RTE_FLOW_ACTION_TYPE_MARK
94 * action. The former case addressed through 0 being invalid
95 * value and inc/dec match_id pair when MARK is activated.
96 * The later case addressed through defining
97 * CNXK_FLOW_MARK_DEFAULT as value for
98 * RTE_FLOW_ACTION_TYPE_MARK.
99 * This would translate to not use
100 * CNXK_FLOW_ACTION_FLAG_DEFAULT - 1 and
101 * CNXK_FLOW_ACTION_FLAG_DEFAULT for match_id.
102 * i.e valid mark_id's are from
103 * 0 to CNXK_FLOW_ACTION_FLAG_DEFAULT - 2
105 if (likely(match_id)) {
106 ol_flags |= PKT_RX_FDIR;
107 if (match_id != CNXK_FLOW_ACTION_FLAG_DEFAULT) {
108 ol_flags |= PKT_RX_FDIR_ID;
109 mbuf->hash.fdir.hi = match_id - 1;
116 static __rte_always_inline void
117 nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
118 uint64_t rearm, const uint16_t flags)
120 const rte_iova_t *iova_list;
121 struct rte_mbuf *head;
122 const rte_iova_t *eol;
126 sg = *(const uint64_t *)(rx + 1);
127 nb_segs = (sg >> 48) & 0x3;
134 mbuf->pkt_len = (rx->pkt_lenm1 + 1) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
135 CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
136 mbuf->data_len = (sg & 0xFFFF) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
137 CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
138 mbuf->nb_segs = nb_segs;
141 eol = ((const rte_iova_t *)(rx + 1) + ((rx->desc_sizem1 + 1) << 1));
142 /* Skip SG_S and first IOVA*/
143 iova_list = ((const rte_iova_t *)(rx + 1)) + 2;
146 rearm = rearm & ~0xFFFF;
150 mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
153 __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
155 mbuf->data_len = sg & 0xFFFF;
157 *(uint64_t *)(&mbuf->rearm_data) = rearm;
161 if (!nb_segs && (iova_list + 1 < eol)) {
162 sg = *(const uint64_t *)(iova_list);
163 nb_segs = (sg >> 48) & 0x3;
164 head->nb_segs += nb_segs;
165 iova_list = (const rte_iova_t *)(iova_list + 1);
171 static __rte_always_inline void
172 cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
173 struct rte_mbuf *mbuf, const void *lookup_mem,
174 const uint64_t val, const uint16_t flag)
176 const union nix_rx_parse_u *rx =
177 (const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
178 const uint16_t len = rx->pkt_lenm1 + 1;
179 const uint64_t w1 = *(const uint64_t *)rx;
180 uint64_t ol_flags = 0;
182 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
183 __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
185 if (flag & NIX_RX_OFFLOAD_PTYPE_F)
186 mbuf->packet_type = nix_ptype_get(lookup_mem, w1);
188 mbuf->packet_type = 0;
190 if (flag & NIX_RX_OFFLOAD_RSS_F) {
191 mbuf->hash.rss = tag;
192 ol_flags |= PKT_RX_RSS_HASH;
195 if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
196 ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
198 if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
199 if (rx->vtag0_gone) {
200 ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
201 mbuf->vlan_tci = rx->vtag0_tci;
203 if (rx->vtag1_gone) {
204 ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
205 mbuf->vlan_tci_outer = rx->vtag1_tci;
209 if (flag & NIX_RX_OFFLOAD_MARK_UPDATE_F)
210 ol_flags = nix_update_match_id(rx->match_id, ol_flags, mbuf);
212 mbuf->ol_flags = ol_flags;
214 mbuf->data_len = len;
215 *(uint64_t *)(&mbuf->rearm_data) = val;
217 if (flag & NIX_RX_MULTI_SEG_F)
218 nix_cqe_xtract_mseg(rx, mbuf, val, flag);
223 static inline uint16_t
224 nix_rx_nb_pkts(struct cn10k_eth_rxq *rxq, const uint64_t wdata,
225 const uint16_t pkts, const uint32_t qmask)
227 uint32_t available = rxq->available;
229 /* Update the available count if cached value is not enough */
230 if (unlikely(available < pkts)) {
231 uint64_t reg, head, tail;
233 /* Use LDADDA version to avoid reorder */
234 reg = roc_atomic64_add_sync(wdata, rxq->cq_status);
235 /* CQ_OP_STATUS operation error */
236 if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
237 reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
240 tail = reg & 0xFFFFF;
241 head = (reg >> 20) & 0xFFFFF;
243 available = tail - head + qmask + 1;
245 available = tail - head;
247 rxq->available = available;
250 return RTE_MIN(pkts, available);
253 static __rte_always_inline uint16_t
254 cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
255 const uint16_t flags)
257 struct cn10k_eth_rxq *rxq = rx_queue;
258 const uint64_t mbuf_init = rxq->mbuf_initializer;
259 const void *lookup_mem = rxq->lookup_mem;
260 const uint64_t data_off = rxq->data_off;
261 const uintptr_t desc = rxq->desc;
262 const uint64_t wdata = rxq->wdata;
263 const uint32_t qmask = rxq->qmask;
264 uint16_t packets = 0, nb_pkts;
265 uint32_t head = rxq->head;
266 struct nix_cqe_hdr_s *cq;
267 struct rte_mbuf *mbuf;
269 nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
271 while (packets < nb_pkts) {
272 /* Prefetch N desc ahead */
273 rte_prefetch_non_temporal(
274 (void *)(desc + (CQE_SZ((head + 2) & qmask))));
275 cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head));
277 mbuf = nix_get_mbuf_from_cqe(cq, data_off);
279 cn10k_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
281 cnxk_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
282 (flags & NIX_RX_OFFLOAD_TSTAMP_F),
283 (flags & NIX_RX_MULTI_SEG_F),
284 (uint64_t *)((uint8_t *)mbuf
286 rx_pkts[packets++] = mbuf;
287 roc_prefetch_store_keep(mbuf);
293 rxq->available -= nb_pkts;
295 /* Free all the CQs that we've processed */
296 plt_write64((wdata | nb_pkts), rxq->cq_door);
301 #if defined(RTE_ARCH_ARM64)
303 static __rte_always_inline uint64_t
304 nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
306 if (w2 & BIT_ULL(21) /* vtag0_gone */) {
307 ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
308 *f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
314 static __rte_always_inline uint64_t
315 nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
317 if (w2 & BIT_ULL(23) /* vtag1_gone */) {
318 ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
319 mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
325 static __rte_always_inline uint16_t
326 cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
327 const uint16_t flags, void *lookup_mem,
328 struct cnxk_timesync_info *tstamp)
330 struct cn10k_eth_rxq *rxq = args;
331 const uint64_t mbuf_initializer = (flags & NIX_RX_VWQE_F) ?
333 rxq->mbuf_initializer;
334 const uint64x2_t data_off = flags & NIX_RX_VWQE_F ?
335 vdupq_n_u64(0x80ULL) :
336 vdupq_n_u64(rxq->data_off);
337 const uint32_t qmask = flags & NIX_RX_VWQE_F ? 0 : rxq->qmask;
338 const uint64_t wdata = flags & NIX_RX_VWQE_F ? 0 : rxq->wdata;
339 const uintptr_t desc = flags & NIX_RX_VWQE_F ? 0 : rxq->desc;
340 uint64x2_t cq0_w8, cq1_w8, cq2_w8, cq3_w8, mbuf01, mbuf23;
341 uint64_t ol_flags0, ol_flags1, ol_flags2, ol_flags3;
342 uint64x2_t rearm0 = vdupq_n_u64(mbuf_initializer);
343 uint64x2_t rearm1 = vdupq_n_u64(mbuf_initializer);
344 uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer);
345 uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
346 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
347 uint8x16_t f0, f1, f2, f3;
348 uint16_t packets = 0;
353 if (!(flags & NIX_RX_VWQE_F)) {
354 lookup_mem = rxq->lookup_mem;
357 pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
358 pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1);
359 /* Packets has to be floor-aligned to NIX_DESCS_PER_LOOP */
360 pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
361 if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
362 tstamp = rxq->tstamp;
367 while (packets < pkts) {
368 if (!(flags & NIX_RX_VWQE_F)) {
369 /* Exit loop if head is about to wrap and become
372 if (((head + NIX_DESCS_PER_LOOP - 1) & qmask) <
373 NIX_DESCS_PER_LOOP) {
374 pkts_left += (pkts - packets);
378 cq0 = desc + CQE_SZ(head);
380 cq0 = (uintptr_t)&mbufs[packets];
383 /* Prefetch N desc ahead */
384 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0, 8, 0, flags));
385 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0, 9, 0, flags));
386 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0, 10, 0, flags));
387 rte_prefetch_non_temporal(CQE_PTR_OFF(cq0, 11, 0, flags));
389 /* Get NIX_RX_SG_S for size and buffer pointer */
390 cq0_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 0, 64, flags));
391 cq1_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 1, 64, flags));
392 cq2_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 2, 64, flags));
393 cq3_w8 = vld1q_u64(CQE_PTR_OFF(cq0, 3, 64, flags));
395 if (!(flags & NIX_RX_VWQE_F)) {
396 /* Extract mbuf from NIX_RX_SG_S */
397 mbuf01 = vzip2q_u64(cq0_w8, cq1_w8);
398 mbuf23 = vzip2q_u64(cq2_w8, cq3_w8);
399 mbuf01 = vqsubq_u64(mbuf01, data_off);
400 mbuf23 = vqsubq_u64(mbuf23, data_off);
403 vsubq_u64(vld1q_u64((uint64_t *)cq0), data_off);
404 mbuf23 = vsubq_u64(vld1q_u64((uint64_t *)(cq0 + 16)),
408 /* Move mbufs to scalar registers for future use */
409 mbuf0 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 0);
410 mbuf1 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 1);
411 mbuf2 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 0);
412 mbuf3 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 1);
414 /* Mask to get packet len from NIX_RX_SG_S */
415 const uint8x16_t shuf_msk = {
416 0xFF, 0xFF, /* pkt_type set as unknown */
417 0xFF, 0xFF, /* pkt_type set as unknown */
418 0, 1, /* octet 1~0, low 16 bits pkt_len */
419 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
420 0, 1, /* octet 1~0, 16 bits data_len */
421 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
423 /* Form the rx_descriptor_fields1 with pkt_len and data_len */
424 f0 = vqtbl1q_u8(cq0_w8, shuf_msk);
425 f1 = vqtbl1q_u8(cq1_w8, shuf_msk);
426 f2 = vqtbl1q_u8(cq2_w8, shuf_msk);
427 f3 = vqtbl1q_u8(cq3_w8, shuf_msk);
429 /* Load CQE word0 and word 1 */
430 const uint64_t cq0_w0 = *CQE_PTR_OFF(cq0, 0, 0, flags);
431 const uint64_t cq0_w1 = *CQE_PTR_OFF(cq0, 0, 8, flags);
432 const uint64_t cq1_w0 = *CQE_PTR_OFF(cq0, 1, 0, flags);
433 const uint64_t cq1_w1 = *CQE_PTR_OFF(cq0, 1, 8, flags);
434 const uint64_t cq2_w0 = *CQE_PTR_OFF(cq0, 2, 0, flags);
435 const uint64_t cq2_w1 = *CQE_PTR_OFF(cq0, 2, 8, flags);
436 const uint64_t cq3_w0 = *CQE_PTR_OFF(cq0, 3, 0, flags);
437 const uint64_t cq3_w1 = *CQE_PTR_OFF(cq0, 3, 8, flags);
439 if (flags & NIX_RX_OFFLOAD_RSS_F) {
440 /* Fill rss in the rx_descriptor_fields1 */
441 f0 = vsetq_lane_u32(cq0_w0, f0, 3);
442 f1 = vsetq_lane_u32(cq1_w0, f1, 3);
443 f2 = vsetq_lane_u32(cq2_w0, f2, 3);
444 f3 = vsetq_lane_u32(cq3_w0, f3, 3);
445 ol_flags0 = PKT_RX_RSS_HASH;
446 ol_flags1 = PKT_RX_RSS_HASH;
447 ol_flags2 = PKT_RX_RSS_HASH;
448 ol_flags3 = PKT_RX_RSS_HASH;
456 if (flags & NIX_RX_OFFLOAD_PTYPE_F) {
457 /* Fill packet_type in the rx_descriptor_fields1 */
458 f0 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq0_w1),
460 f1 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq1_w1),
462 f2 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq2_w1),
464 f3 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq3_w1),
468 if (flags & NIX_RX_OFFLOAD_CHECKSUM_F) {
469 ol_flags0 |= nix_rx_olflags_get(lookup_mem, cq0_w1);
470 ol_flags1 |= nix_rx_olflags_get(lookup_mem, cq1_w1);
471 ol_flags2 |= nix_rx_olflags_get(lookup_mem, cq2_w1);
472 ol_flags3 |= nix_rx_olflags_get(lookup_mem, cq3_w1);
475 if (flags & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
476 uint64_t cq0_w2 = *(uint64_t *)(cq0 + CQE_SZ(0) + 16);
477 uint64_t cq1_w2 = *(uint64_t *)(cq0 + CQE_SZ(1) + 16);
478 uint64_t cq2_w2 = *(uint64_t *)(cq0 + CQE_SZ(2) + 16);
479 uint64_t cq3_w2 = *(uint64_t *)(cq0 + CQE_SZ(3) + 16);
481 ol_flags0 = nix_vlan_update(cq0_w2, ol_flags0, &f0);
482 ol_flags1 = nix_vlan_update(cq1_w2, ol_flags1, &f1);
483 ol_flags2 = nix_vlan_update(cq2_w2, ol_flags2, &f2);
484 ol_flags3 = nix_vlan_update(cq3_w2, ol_flags3, &f3);
486 ol_flags0 = nix_qinq_update(cq0_w2, ol_flags0, mbuf0);
487 ol_flags1 = nix_qinq_update(cq1_w2, ol_flags1, mbuf1);
488 ol_flags2 = nix_qinq_update(cq2_w2, ol_flags2, mbuf2);
489 ol_flags3 = nix_qinq_update(cq3_w2, ol_flags3, mbuf3);
492 if (flags & NIX_RX_OFFLOAD_MARK_UPDATE_F) {
493 ol_flags0 = nix_update_match_id(
494 *(uint16_t *)CQE_PTR_OFF(cq0, 0, 38, flags),
496 ol_flags1 = nix_update_match_id(
497 *(uint16_t *)CQE_PTR_OFF(cq0, 1, 38, flags),
499 ol_flags2 = nix_update_match_id(
500 *(uint16_t *)CQE_PTR_OFF(cq0, 2, 38, flags),
502 ol_flags3 = nix_update_match_id(
503 *(uint16_t *)CQE_PTR_OFF(cq0, 3, 38, flags),
507 if (flags & NIX_RX_OFFLOAD_TSTAMP_F) {
508 const uint16x8_t len_off = {
511 CNXK_NIX_TIMESYNC_RX_OFFSET, /* pktlen 0:15*/
512 0, /* pktlen 16:32 */
513 CNXK_NIX_TIMESYNC_RX_OFFSET, /* datalen 0:15 */
517 const uint32x4_t ptype = {RTE_PTYPE_L2_ETHER_TIMESYNC,
518 RTE_PTYPE_L2_ETHER_TIMESYNC,
519 RTE_PTYPE_L2_ETHER_TIMESYNC,
520 RTE_PTYPE_L2_ETHER_TIMESYNC};
521 const uint64_t ts_olf = PKT_RX_IEEE1588_PTP |
522 PKT_RX_IEEE1588_TMST |
523 tstamp->rx_tstamp_dynflag;
524 const uint32x4_t and_mask = {0x1, 0x2, 0x4, 0x8};
525 uint64x2_t ts01, ts23, mask;
529 /* Subtract timesync length from total pkt length. */
530 f0 = vsubq_u16(f0, len_off);
531 f1 = vsubq_u16(f1, len_off);
532 f2 = vsubq_u16(f2, len_off);
533 f3 = vsubq_u16(f3, len_off);
535 /* Get the address of actual timestamp. */
536 ts01 = vaddq_u64(mbuf01, data_off);
537 ts23 = vaddq_u64(mbuf23, data_off);
538 /* Load timestamp from address. */
539 ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
542 ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
545 ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
548 ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
551 /* Convert from be to cpu byteorder. */
552 ts01 = vrev64q_u8(ts01);
553 ts23 = vrev64q_u8(ts23);
554 /* Store timestamp into scalar for later use. */
555 ts[0] = vgetq_lane_u64(ts01, 0);
556 ts[1] = vgetq_lane_u64(ts01, 1);
557 ts[2] = vgetq_lane_u64(ts23, 0);
558 ts[3] = vgetq_lane_u64(ts23, 1);
560 /* Store timestamp into dynfield. */
561 *cnxk_nix_timestamp_dynfield(mbuf0, tstamp) = ts[0];
562 *cnxk_nix_timestamp_dynfield(mbuf1, tstamp) = ts[1];
563 *cnxk_nix_timestamp_dynfield(mbuf2, tstamp) = ts[2];
564 *cnxk_nix_timestamp_dynfield(mbuf3, tstamp) = ts[3];
566 /* Generate ptype mask to filter L2 ether timesync */
567 mask = vdupq_n_u32(vgetq_lane_u32(f0, 0));
568 mask = vsetq_lane_u32(vgetq_lane_u32(f1, 0), mask, 1);
569 mask = vsetq_lane_u32(vgetq_lane_u32(f2, 0), mask, 2);
570 mask = vsetq_lane_u32(vgetq_lane_u32(f3, 0), mask, 3);
572 /* Match against L2 ether timesync. */
573 mask = vceqq_u32(mask, ptype);
574 /* Convert from vector from scalar mask */
575 res = vaddvq_u32(vandq_u32(mask, and_mask));
579 /* Fill in the ol_flags for any packets that
582 ol_flags0 |= ((res & 0x1) ? ts_olf : 0);
583 ol_flags1 |= ((res & 0x2) ? ts_olf : 0);
584 ol_flags2 |= ((res & 0x4) ? ts_olf : 0);
585 ol_flags3 |= ((res & 0x8) ? ts_olf : 0);
587 /* Update Rxq timestamp with the latest
590 tstamp->rx_ready = 1;
591 tstamp->rx_tstamp = ts[31 - __builtin_clz(res)];
595 /* Form rearm_data with ol_flags */
596 rearm0 = vsetq_lane_u64(ol_flags0, rearm0, 1);
597 rearm1 = vsetq_lane_u64(ol_flags1, rearm1, 1);
598 rearm2 = vsetq_lane_u64(ol_flags2, rearm2, 1);
599 rearm3 = vsetq_lane_u64(ol_flags3, rearm3, 1);
601 /* Update rx_descriptor_fields1 */
602 vst1q_u64((uint64_t *)mbuf0->rx_descriptor_fields1, f0);
603 vst1q_u64((uint64_t *)mbuf1->rx_descriptor_fields1, f1);
604 vst1q_u64((uint64_t *)mbuf2->rx_descriptor_fields1, f2);
605 vst1q_u64((uint64_t *)mbuf3->rx_descriptor_fields1, f3);
607 /* Update rearm_data */
608 vst1q_u64((uint64_t *)mbuf0->rearm_data, rearm0);
609 vst1q_u64((uint64_t *)mbuf1->rearm_data, rearm1);
610 vst1q_u64((uint64_t *)mbuf2->rearm_data, rearm2);
611 vst1q_u64((uint64_t *)mbuf3->rearm_data, rearm3);
613 /* Store the mbufs to rx_pkts */
614 vst1q_u64((uint64_t *)&mbufs[packets], mbuf01);
615 vst1q_u64((uint64_t *)&mbufs[packets + 2], mbuf23);
617 if (flags & NIX_RX_MULTI_SEG_F) {
618 /* Multi segment is enable build mseg list for
619 * individual mbufs in scalar mode.
621 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
622 (CQE_PTR_OFF(cq0, 0, 8, flags)),
623 mbuf0, mbuf_initializer, flags);
624 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
625 (CQE_PTR_OFF(cq0, 1, 8, flags)),
626 mbuf1, mbuf_initializer, flags);
627 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
628 (CQE_PTR_OFF(cq0, 2, 8, flags)),
629 mbuf2, mbuf_initializer, flags);
630 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
631 (CQE_PTR_OFF(cq0, 3, 8, flags)),
632 mbuf3, mbuf_initializer, flags);
634 /* Update that no more segments */
642 roc_prefetch_store_keep(mbuf0);
643 roc_prefetch_store_keep(mbuf1);
644 roc_prefetch_store_keep(mbuf2);
645 roc_prefetch_store_keep(mbuf3);
647 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
648 __mempool_check_cookies(mbuf0->pool, (void **)&mbuf0, 1, 1);
649 __mempool_check_cookies(mbuf1->pool, (void **)&mbuf1, 1, 1);
650 __mempool_check_cookies(mbuf2->pool, (void **)&mbuf2, 1, 1);
651 __mempool_check_cookies(mbuf3->pool, (void **)&mbuf3, 1, 1);
653 packets += NIX_DESCS_PER_LOOP;
655 if (!(flags & NIX_RX_VWQE_F)) {
656 /* Advance head pointer and packets */
657 head += NIX_DESCS_PER_LOOP;
662 if (flags & NIX_RX_VWQE_F)
666 rxq->available -= packets;
669 /* Free all the CQs that we've processed */
670 plt_write64((rxq->wdata | packets), rxq->cq_door);
672 if (unlikely(pkts_left))
673 packets += cn10k_nix_recv_pkts(args, &mbufs[packets], pkts_left,
681 static inline uint16_t
682 cn10k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
683 uint16_t pkts, const uint16_t flags,
684 void *lookup_mem, void *tstamp)
686 RTE_SET_USED(lookup_mem);
687 RTE_SET_USED(rx_queue);
688 RTE_SET_USED(rx_pkts);
691 RTE_SET_USED(tstamp);
699 #define RSS_F NIX_RX_OFFLOAD_RSS_F
700 #define PTYPE_F NIX_RX_OFFLOAD_PTYPE_F
701 #define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F
702 #define MARK_F NIX_RX_OFFLOAD_MARK_UPDATE_F
703 #define TS_F NIX_RX_OFFLOAD_TSTAMP_F
704 #define RX_VLAN_F NIX_RX_OFFLOAD_VLAN_STRIP_F
706 /* [RX_VLAN_F] [TS] [MARK] [CKSUM] [PTYPE] [RSS] */
707 #define NIX_RX_FASTPATH_MODES \
708 R(no_offload, 0, 0, 0, 0, 0, 0, NIX_RX_OFFLOAD_NONE) \
709 R(rss, 0, 0, 0, 0, 0, 1, RSS_F) \
710 R(ptype, 0, 0, 0, 0, 1, 0, PTYPE_F) \
711 R(ptype_rss, 0, 0, 0, 0, 1, 1, PTYPE_F | RSS_F) \
712 R(cksum, 0, 0, 0, 1, 0, 0, CKSUM_F) \
713 R(cksum_rss, 0, 0, 0, 1, 0, 1, CKSUM_F | RSS_F) \
714 R(cksum_ptype, 0, 0, 0, 1, 1, 0, CKSUM_F | PTYPE_F) \
715 R(cksum_ptype_rss, 0, 0, 0, 1, 1, 1, CKSUM_F | PTYPE_F | RSS_F) \
716 R(mark, 0, 0, 1, 0, 0, 0, MARK_F) \
717 R(mark_rss, 0, 0, 1, 0, 0, 1, MARK_F | RSS_F) \
718 R(mark_ptype, 0, 0, 1, 0, 1, 0, MARK_F | PTYPE_F) \
719 R(mark_ptype_rss, 0, 0, 1, 0, 1, 1, MARK_F | PTYPE_F | RSS_F) \
720 R(mark_cksum, 0, 0, 1, 1, 0, 0, MARK_F | CKSUM_F) \
721 R(mark_cksum_rss, 0, 0, 1, 1, 0, 1, MARK_F | CKSUM_F | RSS_F) \
722 R(mark_cksum_ptype, 0, 0, 1, 1, 1, 0, MARK_F | CKSUM_F | PTYPE_F) \
723 R(mark_cksum_ptype_rss, 0, 0, 1, 1, 1, 1, \
724 MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
725 R(ts, 0, 1, 0, 0, 0, 0, TS_F) \
726 R(ts_rss, 0, 1, 0, 0, 0, 1, TS_F | RSS_F) \
727 R(ts_ptype, 0, 1, 0, 0, 1, 0, TS_F | PTYPE_F) \
728 R(ts_ptype_rss, 0, 1, 0, 0, 1, 1, TS_F | PTYPE_F | RSS_F) \
729 R(ts_cksum, 0, 1, 0, 1, 0, 0, TS_F | CKSUM_F) \
730 R(ts_cksum_rss, 0, 1, 0, 1, 0, 1, TS_F | CKSUM_F | RSS_F) \
731 R(ts_cksum_ptype, 0, 1, 0, 1, 1, 0, TS_F | CKSUM_F | PTYPE_F) \
732 R(ts_cksum_ptype_rss, 0, 1, 0, 1, 1, 1, \
733 TS_F | CKSUM_F | PTYPE_F | RSS_F) \
734 R(ts_mark, 0, 1, 1, 0, 0, 0, TS_F | MARK_F) \
735 R(ts_mark_rss, 0, 1, 1, 0, 0, 1, TS_F | MARK_F | RSS_F) \
736 R(ts_mark_ptype, 0, 1, 1, 0, 1, 0, TS_F | MARK_F | PTYPE_F) \
737 R(ts_mark_ptype_rss, 0, 1, 1, 0, 1, 1, \
738 TS_F | MARK_F | PTYPE_F | RSS_F) \
739 R(ts_mark_cksum, 0, 1, 1, 1, 0, 0, TS_F | MARK_F | CKSUM_F) \
740 R(ts_mark_cksum_rss, 0, 1, 1, 1, 0, 1, \
741 TS_F | MARK_F | CKSUM_F | RSS_F) \
742 R(ts_mark_cksum_ptype, 0, 1, 1, 1, 1, 0, \
743 TS_F | MARK_F | CKSUM_F | PTYPE_F) \
744 R(ts_mark_cksum_ptype_rss, 0, 1, 1, 1, 1, 1, \
745 TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
746 R(vlan, 1, 0, 0, 0, 0, 0, RX_VLAN_F) \
747 R(vlan_rss, 1, 0, 0, 0, 0, 1, RX_VLAN_F | RSS_F) \
748 R(vlan_ptype, 1, 0, 0, 0, 1, 0, RX_VLAN_F | PTYPE_F) \
749 R(vlan_ptype_rss, 1, 0, 0, 0, 1, 1, RX_VLAN_F | PTYPE_F | RSS_F) \
750 R(vlan_cksum, 1, 0, 0, 1, 0, 0, RX_VLAN_F | CKSUM_F) \
751 R(vlan_cksum_rss, 1, 0, 0, 1, 0, 1, RX_VLAN_F | CKSUM_F | RSS_F) \
752 R(vlan_cksum_ptype, 1, 0, 0, 1, 1, 0, \
753 RX_VLAN_F | CKSUM_F | PTYPE_F) \
754 R(vlan_cksum_ptype_rss, 1, 0, 0, 1, 1, 1, \
755 RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
756 R(vlan_mark, 1, 0, 1, 0, 0, 0, RX_VLAN_F | MARK_F) \
757 R(vlan_mark_rss, 1, 0, 1, 0, 0, 1, RX_VLAN_F | MARK_F | RSS_F) \
758 R(vlan_mark_ptype, 1, 0, 1, 0, 1, 0, RX_VLAN_F | MARK_F | PTYPE_F)\
759 R(vlan_mark_ptype_rss, 1, 0, 1, 0, 1, 1, \
760 RX_VLAN_F | MARK_F | PTYPE_F | RSS_F) \
761 R(vlan_mark_cksum, 1, 0, 1, 1, 0, 0, RX_VLAN_F | MARK_F | CKSUM_F)\
762 R(vlan_mark_cksum_rss, 1, 0, 1, 1, 0, 1, \
763 RX_VLAN_F | MARK_F | CKSUM_F | RSS_F) \
764 R(vlan_mark_cksum_ptype, 1, 0, 1, 1, 1, 0, \
765 RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F) \
766 R(vlan_mark_cksum_ptype_rss, 1, 0, 1, 1, 1, 1, \
767 RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
768 R(vlan_ts, 1, 1, 0, 0, 0, 0, RX_VLAN_F | TS_F) \
769 R(vlan_ts_rss, 1, 1, 0, 0, 0, 1, RX_VLAN_F | TS_F | RSS_F) \
770 R(vlan_ts_ptype, 1, 1, 0, 0, 1, 0, RX_VLAN_F | TS_F | PTYPE_F) \
771 R(vlan_ts_ptype_rss, 1, 1, 0, 0, 1, 1, \
772 RX_VLAN_F | TS_F | PTYPE_F | RSS_F) \
773 R(vlan_ts_cksum, 1, 1, 0, 1, 0, 0, RX_VLAN_F | TS_F | CKSUM_F) \
774 R(vlan_ts_cksum_rss, 1, 1, 0, 1, 0, 1, \
775 RX_VLAN_F | TS_F | CKSUM_F | RSS_F) \
776 R(vlan_ts_cksum_ptype, 1, 1, 0, 1, 1, 0, \
777 RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F) \
778 R(vlan_ts_cksum_ptype_rss, 1, 1, 0, 1, 1, 1, \
779 RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
780 R(vlan_ts_mark, 1, 1, 1, 0, 0, 0, RX_VLAN_F | TS_F | MARK_F) \
781 R(vlan_ts_mark_rss, 1, 1, 1, 0, 0, 1, \
782 RX_VLAN_F | TS_F | MARK_F | RSS_F) \
783 R(vlan_ts_mark_ptype, 1, 1, 1, 0, 1, 0, \
784 RX_VLAN_F | TS_F | MARK_F | PTYPE_F) \
785 R(vlan_ts_mark_ptype_rss, 1, 1, 1, 0, 1, 1, \
786 RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
787 R(vlan_ts_mark_cksum, 1, 1, 1, 1, 0, 0, \
788 RX_VLAN_F | TS_F | MARK_F | CKSUM_F) \
789 R(vlan_ts_mark_cksum_rss, 1, 1, 1, 1, 0, 1, \
790 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
791 R(vlan_ts_mark_cksum_ptype, 1, 1, 1, 1, 1, 0, \
792 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
793 R(vlan_ts_mark_cksum_ptype_rss, 1, 1, 1, 1, 1, 1, \
794 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
796 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
797 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_##name( \
798 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
800 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_mseg_##name( \
801 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
803 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_vec_##name( \
804 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
806 uint16_t __rte_noinline __rte_hot cn10k_nix_recv_pkts_vec_mseg_##name( \
807 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts);
809 NIX_RX_FASTPATH_MODES
812 #endif /* __CN10K_RX_H__ */