1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
11 #include <cnxk_ethdev.h>
13 #define NIX_RX_OFFLOAD_NONE (0)
14 #define NIX_RX_OFFLOAD_RSS_F BIT(0)
15 #define NIX_RX_OFFLOAD_PTYPE_F BIT(1)
16 #define NIX_RX_OFFLOAD_CHECKSUM_F BIT(2)
17 #define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)
18 #define NIX_RX_OFFLOAD_TSTAMP_F BIT(4)
19 #define NIX_RX_OFFLOAD_VLAN_STRIP_F BIT(5)
21 /* Flags to control cqe_to_mbuf conversion function.
22 * Defining it from backwards to denote its been
23 * not used as offload flags to pick function
25 #define NIX_RX_MULTI_SEG_F BIT(14)
26 #define CPT_RX_WQE_F BIT(15)
28 #define CNXK_NIX_CQ_ENTRY_SZ 128
29 #define NIX_DESCS_PER_LOOP 4
30 #define CQE_CAST(x) ((struct nix_cqe_hdr_s *)(x))
31 #define CQE_SZ(x) ((x) * CNXK_NIX_CQ_ENTRY_SZ)
33 union mbuf_initializer {
43 static __rte_always_inline uint64_t
44 nix_clear_data_off(uint64_t oldval)
46 union mbuf_initializer mbuf_init = {.value = oldval};
48 mbuf_init.fields.data_off = 0;
49 return mbuf_init.value;
52 static __rte_always_inline struct rte_mbuf *
53 nix_get_mbuf_from_cqe(void *cq, const uint64_t data_off)
57 /* Skip CQE, NIX_RX_PARSE_S and SG HDR(9 DWORDs) and peek buff addr */
58 buff = *((rte_iova_t *)((uint64_t *)cq + 9));
59 return (struct rte_mbuf *)(buff - data_off);
62 static __rte_always_inline uint32_t
63 nix_ptype_get(const void *const lookup_mem, const uint64_t in)
65 const uint16_t *const ptype = lookup_mem;
66 const uint16_t lh_lg_lf = (in & 0xFFF0000000000000) >> 52;
67 const uint16_t tu_l2 = ptype[(in & 0x000FFFF000000000) >> 36];
68 const uint16_t il4_tu = ptype[PTYPE_NON_TUNNEL_ARRAY_SZ + lh_lg_lf];
70 return (il4_tu << PTYPE_NON_TUNNEL_WIDTH) | tu_l2;
73 static __rte_always_inline uint32_t
74 nix_rx_olflags_get(const void *const lookup_mem, const uint64_t in)
76 const uint32_t *const ol_flags =
77 (const uint32_t *)((const uint8_t *)lookup_mem +
80 return ol_flags[(in & 0xfff00000) >> 20];
83 static inline uint64_t
84 nix_update_match_id(const uint16_t match_id, uint64_t ol_flags,
85 struct rte_mbuf *mbuf)
87 /* There is no separate bit to check match_id
88 * is valid or not? and no flag to identify it is an
89 * RTE_FLOW_ACTION_TYPE_FLAG vs RTE_FLOW_ACTION_TYPE_MARK
90 * action. The former case addressed through 0 being invalid
91 * value and inc/dec match_id pair when MARK is activated.
92 * The later case addressed through defining
93 * CNXK_FLOW_MARK_DEFAULT as value for
94 * RTE_FLOW_ACTION_TYPE_MARK.
95 * This would translate to not use
96 * CNXK_FLOW_ACTION_FLAG_DEFAULT - 1 and
97 * CNXK_FLOW_ACTION_FLAG_DEFAULT for match_id.
98 * i.e valid mark_id's are from
99 * 0 to CNXK_FLOW_ACTION_FLAG_DEFAULT - 2
101 if (likely(match_id)) {
102 ol_flags |= PKT_RX_FDIR;
103 if (match_id != CNXK_FLOW_ACTION_FLAG_DEFAULT) {
104 ol_flags |= PKT_RX_FDIR_ID;
105 mbuf->hash.fdir.hi = match_id - 1;
112 static __rte_always_inline void
113 nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
114 uint64_t rearm, const uint16_t flags)
116 const rte_iova_t *iova_list;
117 struct rte_mbuf *head;
118 const rte_iova_t *eol;
122 sg = *(const uint64_t *)(rx + 1);
123 nb_segs = (sg >> 48) & 0x3;
130 mbuf->pkt_len = (rx->pkt_lenm1 + 1) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
131 CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
132 mbuf->data_len = (sg & 0xFFFF) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ?
133 CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
134 mbuf->nb_segs = nb_segs;
137 eol = ((const rte_iova_t *)(rx + 1) +
138 ((rx->cn9k.desc_sizem1 + 1) << 1));
139 /* Skip SG_S and first IOVA*/
140 iova_list = ((const rte_iova_t *)(rx + 1)) + 2;
143 rearm = rearm & ~0xFFFF;
147 mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
150 __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
152 mbuf->data_len = sg & 0xFFFF;
154 *(uint64_t *)(&mbuf->rearm_data) = rearm;
158 if (!nb_segs && (iova_list + 1 < eol)) {
159 sg = *(const uint64_t *)(iova_list);
160 nb_segs = (sg >> 48) & 0x3;
161 head->nb_segs += nb_segs;
162 iova_list = (const rte_iova_t *)(iova_list + 1);
168 static __rte_always_inline void
169 cn9k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
170 struct rte_mbuf *mbuf, const void *lookup_mem,
171 const uint64_t val, const uint16_t flag)
173 const union nix_rx_parse_u *rx =
174 (const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
175 const uint16_t len = rx->cn9k.pkt_lenm1 + 1;
176 const uint64_t w1 = *(const uint64_t *)rx;
177 uint64_t ol_flags = 0;
179 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
180 __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
182 if (flag & NIX_RX_OFFLOAD_PTYPE_F)
183 mbuf->packet_type = nix_ptype_get(lookup_mem, w1);
185 mbuf->packet_type = 0;
187 if (flag & NIX_RX_OFFLOAD_RSS_F) {
188 mbuf->hash.rss = tag;
189 ol_flags |= PKT_RX_RSS_HASH;
192 if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
193 ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
195 if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
196 if (rx->cn9k.vtag0_gone) {
197 ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
198 mbuf->vlan_tci = rx->cn9k.vtag0_tci;
200 if (rx->cn9k.vtag1_gone) {
201 ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
202 mbuf->vlan_tci_outer = rx->cn9k.vtag1_tci;
206 if (flag & NIX_RX_OFFLOAD_MARK_UPDATE_F)
208 nix_update_match_id(rx->cn9k.match_id, ol_flags, mbuf);
210 mbuf->ol_flags = ol_flags;
212 mbuf->data_len = len;
213 *(uint64_t *)(&mbuf->rearm_data) = val;
215 if (flag & NIX_RX_MULTI_SEG_F)
216 nix_cqe_xtract_mseg(rx, mbuf, val, flag);
221 static inline uint16_t
222 nix_rx_nb_pkts(struct cn9k_eth_rxq *rxq, const uint64_t wdata,
223 const uint16_t pkts, const uint32_t qmask)
225 uint32_t available = rxq->available;
227 /* Update the available count if cached value is not enough */
228 if (unlikely(available < pkts)) {
229 uint64_t reg, head, tail;
231 /* Use LDADDA version to avoid reorder */
232 reg = roc_atomic64_add_sync(wdata, rxq->cq_status);
233 /* CQ_OP_STATUS operation error */
234 if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
235 reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
238 tail = reg & 0xFFFFF;
239 head = (reg >> 20) & 0xFFFFF;
241 available = tail - head + qmask + 1;
243 available = tail - head;
245 rxq->available = available;
248 return RTE_MIN(pkts, available);
251 static __rte_always_inline uint16_t
252 cn9k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts,
253 const uint16_t flags)
255 struct cn9k_eth_rxq *rxq = rx_queue;
256 const uint64_t mbuf_init = rxq->mbuf_initializer;
257 const void *lookup_mem = rxq->lookup_mem;
258 const uint64_t data_off = rxq->data_off;
259 const uintptr_t desc = rxq->desc;
260 const uint64_t wdata = rxq->wdata;
261 const uint32_t qmask = rxq->qmask;
262 uint16_t packets = 0, nb_pkts;
263 uint32_t head = rxq->head;
264 struct nix_cqe_hdr_s *cq;
265 struct rte_mbuf *mbuf;
267 nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
269 while (packets < nb_pkts) {
270 /* Prefetch N desc ahead */
271 rte_prefetch_non_temporal(
272 (void *)(desc + (CQE_SZ((head + 2) & qmask))));
273 cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head));
275 mbuf = nix_get_mbuf_from_cqe(cq, data_off);
277 cn9k_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
279 cnxk_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
280 (flags & NIX_RX_OFFLOAD_TSTAMP_F),
281 (flags & NIX_RX_MULTI_SEG_F),
282 (uint64_t *)((uint8_t *)mbuf
284 rx_pkts[packets++] = mbuf;
285 roc_prefetch_store_keep(mbuf);
291 rxq->available -= nb_pkts;
293 /* Free all the CQs that we've processed */
294 plt_write64((wdata | nb_pkts), rxq->cq_door);
299 #if defined(RTE_ARCH_ARM64)
301 static __rte_always_inline uint64_t
302 nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
304 if (w2 & BIT_ULL(21) /* vtag0_gone */) {
305 ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
306 *f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
312 static __rte_always_inline uint64_t
313 nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
315 if (w2 & BIT_ULL(23) /* vtag1_gone */) {
316 ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
317 mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
323 static __rte_always_inline uint16_t
324 cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
325 uint16_t pkts, const uint16_t flags)
327 struct cn9k_eth_rxq *rxq = rx_queue;
328 uint16_t packets = 0;
329 uint64x2_t cq0_w8, cq1_w8, cq2_w8, cq3_w8, mbuf01, mbuf23;
330 const uint64_t mbuf_initializer = rxq->mbuf_initializer;
331 const uint64x2_t data_off = vdupq_n_u64(rxq->data_off);
332 uint64_t ol_flags0, ol_flags1, ol_flags2, ol_flags3;
333 uint64x2_t rearm0 = vdupq_n_u64(mbuf_initializer);
334 uint64x2_t rearm1 = vdupq_n_u64(mbuf_initializer);
335 uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer);
336 uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
337 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
338 const uint16_t *lookup_mem = rxq->lookup_mem;
339 const uint32_t qmask = rxq->qmask;
340 const uint64_t wdata = rxq->wdata;
341 const uintptr_t desc = rxq->desc;
342 uint8x16_t f0, f1, f2, f3;
343 uint32_t head = rxq->head;
346 pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
347 pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1);
349 /* Packets has to be floor-aligned to NIX_DESCS_PER_LOOP */
350 pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
352 while (packets < pkts) {
353 /* Exit loop if head is about to wrap and become unaligned */
354 if (((head + NIX_DESCS_PER_LOOP - 1) & qmask) <
355 NIX_DESCS_PER_LOOP) {
356 pkts_left += (pkts - packets);
360 const uintptr_t cq0 = desc + CQE_SZ(head);
362 /* Prefetch N desc ahead */
363 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(8)));
364 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(9)));
365 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(10)));
366 rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(11)));
368 /* Get NIX_RX_SG_S for size and buffer pointer */
369 cq0_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(0) + 64));
370 cq1_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(1) + 64));
371 cq2_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(2) + 64));
372 cq3_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(3) + 64));
374 /* Extract mbuf from NIX_RX_SG_S */
375 mbuf01 = vzip2q_u64(cq0_w8, cq1_w8);
376 mbuf23 = vzip2q_u64(cq2_w8, cq3_w8);
377 mbuf01 = vqsubq_u64(mbuf01, data_off);
378 mbuf23 = vqsubq_u64(mbuf23, data_off);
380 /* Move mbufs to scalar registers for future use */
381 mbuf0 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 0);
382 mbuf1 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 1);
383 mbuf2 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 0);
384 mbuf3 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 1);
386 /* Mask to get packet len from NIX_RX_SG_S */
387 const uint8x16_t shuf_msk = {
388 0xFF, 0xFF, /* pkt_type set as unknown */
389 0xFF, 0xFF, /* pkt_type set as unknown */
390 0, 1, /* octet 1~0, low 16 bits pkt_len */
391 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
392 0, 1, /* octet 1~0, 16 bits data_len */
393 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
395 /* Form the rx_descriptor_fields1 with pkt_len and data_len */
396 f0 = vqtbl1q_u8(cq0_w8, shuf_msk);
397 f1 = vqtbl1q_u8(cq1_w8, shuf_msk);
398 f2 = vqtbl1q_u8(cq2_w8, shuf_msk);
399 f3 = vqtbl1q_u8(cq3_w8, shuf_msk);
401 /* Load CQE word0 and word 1 */
402 uint64_t cq0_w0 = ((uint64_t *)(cq0 + CQE_SZ(0)))[0];
403 uint64_t cq0_w1 = ((uint64_t *)(cq0 + CQE_SZ(0)))[1];
404 uint64_t cq1_w0 = ((uint64_t *)(cq0 + CQE_SZ(1)))[0];
405 uint64_t cq1_w1 = ((uint64_t *)(cq0 + CQE_SZ(1)))[1];
406 uint64_t cq2_w0 = ((uint64_t *)(cq0 + CQE_SZ(2)))[0];
407 uint64_t cq2_w1 = ((uint64_t *)(cq0 + CQE_SZ(2)))[1];
408 uint64_t cq3_w0 = ((uint64_t *)(cq0 + CQE_SZ(3)))[0];
409 uint64_t cq3_w1 = ((uint64_t *)(cq0 + CQE_SZ(3)))[1];
411 if (flags & NIX_RX_OFFLOAD_RSS_F) {
412 /* Fill rss in the rx_descriptor_fields1 */
413 f0 = vsetq_lane_u32(cq0_w0, f0, 3);
414 f1 = vsetq_lane_u32(cq1_w0, f1, 3);
415 f2 = vsetq_lane_u32(cq2_w0, f2, 3);
416 f3 = vsetq_lane_u32(cq3_w0, f3, 3);
417 ol_flags0 = PKT_RX_RSS_HASH;
418 ol_flags1 = PKT_RX_RSS_HASH;
419 ol_flags2 = PKT_RX_RSS_HASH;
420 ol_flags3 = PKT_RX_RSS_HASH;
428 if (flags & NIX_RX_OFFLOAD_PTYPE_F) {
429 /* Fill packet_type in the rx_descriptor_fields1 */
430 f0 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq0_w1),
432 f1 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq1_w1),
434 f2 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq2_w1),
436 f3 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq3_w1),
440 if (flags & NIX_RX_OFFLOAD_CHECKSUM_F) {
441 ol_flags0 |= nix_rx_olflags_get(lookup_mem, cq0_w1);
442 ol_flags1 |= nix_rx_olflags_get(lookup_mem, cq1_w1);
443 ol_flags2 |= nix_rx_olflags_get(lookup_mem, cq2_w1);
444 ol_flags3 |= nix_rx_olflags_get(lookup_mem, cq3_w1);
447 if (flags & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
448 uint64_t cq0_w2 = *(uint64_t *)(cq0 + CQE_SZ(0) + 16);
449 uint64_t cq1_w2 = *(uint64_t *)(cq0 + CQE_SZ(1) + 16);
450 uint64_t cq2_w2 = *(uint64_t *)(cq0 + CQE_SZ(2) + 16);
451 uint64_t cq3_w2 = *(uint64_t *)(cq0 + CQE_SZ(3) + 16);
453 ol_flags0 = nix_vlan_update(cq0_w2, ol_flags0, &f0);
454 ol_flags1 = nix_vlan_update(cq1_w2, ol_flags1, &f1);
455 ol_flags2 = nix_vlan_update(cq2_w2, ol_flags2, &f2);
456 ol_flags3 = nix_vlan_update(cq3_w2, ol_flags3, &f3);
458 ol_flags0 = nix_qinq_update(cq0_w2, ol_flags0, mbuf0);
459 ol_flags1 = nix_qinq_update(cq1_w2, ol_flags1, mbuf1);
460 ol_flags2 = nix_qinq_update(cq2_w2, ol_flags2, mbuf2);
461 ol_flags3 = nix_qinq_update(cq3_w2, ol_flags3, mbuf3);
464 if (flags & NIX_RX_OFFLOAD_MARK_UPDATE_F) {
465 ol_flags0 = nix_update_match_id(
466 *(uint16_t *)(cq0 + CQE_SZ(0) + 38), ol_flags0,
468 ol_flags1 = nix_update_match_id(
469 *(uint16_t *)(cq0 + CQE_SZ(1) + 38), ol_flags1,
471 ol_flags2 = nix_update_match_id(
472 *(uint16_t *)(cq0 + CQE_SZ(2) + 38), ol_flags2,
474 ol_flags3 = nix_update_match_id(
475 *(uint16_t *)(cq0 + CQE_SZ(3) + 38), ol_flags3,
479 if (flags & NIX_RX_OFFLOAD_TSTAMP_F) {
480 const uint16x8_t len_off = {
483 CNXK_NIX_TIMESYNC_RX_OFFSET, /* pktlen 0:15*/
484 0, /* pktlen 16:32 */
485 CNXK_NIX_TIMESYNC_RX_OFFSET, /* datalen 0:15 */
489 const uint32x4_t ptype = {RTE_PTYPE_L2_ETHER_TIMESYNC,
490 RTE_PTYPE_L2_ETHER_TIMESYNC,
491 RTE_PTYPE_L2_ETHER_TIMESYNC,
492 RTE_PTYPE_L2_ETHER_TIMESYNC};
493 const uint64_t ts_olf = PKT_RX_IEEE1588_PTP |
494 PKT_RX_IEEE1588_TMST |
495 rxq->tstamp->rx_tstamp_dynflag;
496 const uint32x4_t and_mask = {0x1, 0x2, 0x4, 0x8};
497 uint64x2_t ts01, ts23, mask;
501 /* Subtract timesync length from total pkt length. */
502 f0 = vsubq_u16(f0, len_off);
503 f1 = vsubq_u16(f1, len_off);
504 f2 = vsubq_u16(f2, len_off);
505 f3 = vsubq_u16(f3, len_off);
507 /* Get the address of actual timestamp. */
508 ts01 = vaddq_u64(mbuf01, data_off);
509 ts23 = vaddq_u64(mbuf23, data_off);
510 /* Load timestamp from address. */
511 ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
514 ts01 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts01,
517 ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
520 ts23 = vsetq_lane_u64(*(uint64_t *)vgetq_lane_u64(ts23,
523 /* Convert from be to cpu byteorder. */
524 ts01 = vrev64q_u8(ts01);
525 ts23 = vrev64q_u8(ts23);
526 /* Store timestamp into scalar for later use. */
527 ts[0] = vgetq_lane_u64(ts01, 0);
528 ts[1] = vgetq_lane_u64(ts01, 1);
529 ts[2] = vgetq_lane_u64(ts23, 0);
530 ts[3] = vgetq_lane_u64(ts23, 1);
532 /* Store timestamp into dynfield. */
533 *cnxk_nix_timestamp_dynfield(mbuf0, rxq->tstamp) =
535 *cnxk_nix_timestamp_dynfield(mbuf1, rxq->tstamp) =
537 *cnxk_nix_timestamp_dynfield(mbuf2, rxq->tstamp) =
539 *cnxk_nix_timestamp_dynfield(mbuf3, rxq->tstamp) =
542 /* Generate ptype mask to filter L2 ether timesync */
543 mask = vdupq_n_u32(vgetq_lane_u32(f0, 0));
544 mask = vsetq_lane_u32(vgetq_lane_u32(f1, 0), mask, 1);
545 mask = vsetq_lane_u32(vgetq_lane_u32(f2, 0), mask, 2);
546 mask = vsetq_lane_u32(vgetq_lane_u32(f3, 0), mask, 3);
548 /* Match against L2 ether timesync. */
549 mask = vceqq_u32(mask, ptype);
550 /* Convert from vector from scalar mask */
551 res = vaddvq_u32(vandq_u32(mask, and_mask));
555 /* Fill in the ol_flags for any packets that
558 ol_flags0 |= ((res & 0x1) ? ts_olf : 0);
559 ol_flags1 |= ((res & 0x2) ? ts_olf : 0);
560 ol_flags2 |= ((res & 0x4) ? ts_olf : 0);
561 ol_flags3 |= ((res & 0x8) ? ts_olf : 0);
563 /* Update Rxq timestamp with the latest
566 rxq->tstamp->rx_ready = 1;
567 rxq->tstamp->rx_tstamp =
568 ts[31 - __builtin_clz(res)];
572 /* Form rearm_data with ol_flags */
573 rearm0 = vsetq_lane_u64(ol_flags0, rearm0, 1);
574 rearm1 = vsetq_lane_u64(ol_flags1, rearm1, 1);
575 rearm2 = vsetq_lane_u64(ol_flags2, rearm2, 1);
576 rearm3 = vsetq_lane_u64(ol_flags3, rearm3, 1);
578 /* Update rx_descriptor_fields1 */
579 vst1q_u64((uint64_t *)mbuf0->rx_descriptor_fields1, f0);
580 vst1q_u64((uint64_t *)mbuf1->rx_descriptor_fields1, f1);
581 vst1q_u64((uint64_t *)mbuf2->rx_descriptor_fields1, f2);
582 vst1q_u64((uint64_t *)mbuf3->rx_descriptor_fields1, f3);
584 /* Update rearm_data */
585 vst1q_u64((uint64_t *)mbuf0->rearm_data, rearm0);
586 vst1q_u64((uint64_t *)mbuf1->rearm_data, rearm1);
587 vst1q_u64((uint64_t *)mbuf2->rearm_data, rearm2);
588 vst1q_u64((uint64_t *)mbuf3->rearm_data, rearm3);
590 /* Store the mbufs to rx_pkts */
591 vst1q_u64((uint64_t *)&rx_pkts[packets], mbuf01);
592 vst1q_u64((uint64_t *)&rx_pkts[packets + 2], mbuf23);
594 if (flags & NIX_RX_MULTI_SEG_F) {
595 /* Multi segment is enable build mseg list for
596 * individual mbufs in scalar mode.
598 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
599 (cq0 + CQE_SZ(0) + 8), mbuf0,
600 mbuf_initializer, flags);
601 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
602 (cq0 + CQE_SZ(1) + 8), mbuf1,
603 mbuf_initializer, flags);
604 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
605 (cq0 + CQE_SZ(2) + 8), mbuf2,
606 mbuf_initializer, flags);
607 nix_cqe_xtract_mseg((union nix_rx_parse_u *)
608 (cq0 + CQE_SZ(3) + 8), mbuf3,
609 mbuf_initializer, flags);
611 /* Update that no more segments */
619 roc_prefetch_store_keep(mbuf0);
620 roc_prefetch_store_keep(mbuf1);
621 roc_prefetch_store_keep(mbuf2);
622 roc_prefetch_store_keep(mbuf3);
624 /* Mark mempool obj as "get" as it is alloc'ed by NIX */
625 __mempool_check_cookies(mbuf0->pool, (void **)&mbuf0, 1, 1);
626 __mempool_check_cookies(mbuf1->pool, (void **)&mbuf1, 1, 1);
627 __mempool_check_cookies(mbuf2->pool, (void **)&mbuf2, 1, 1);
628 __mempool_check_cookies(mbuf3->pool, (void **)&mbuf3, 1, 1);
630 /* Advance head pointer and packets */
631 head += NIX_DESCS_PER_LOOP;
633 packets += NIX_DESCS_PER_LOOP;
637 rxq->available -= packets;
640 /* Free all the CQs that we've processed */
641 plt_write64((rxq->wdata | packets), rxq->cq_door);
643 if (unlikely(pkts_left))
644 packets += cn9k_nix_recv_pkts(rx_queue, &rx_pkts[packets],
652 static inline uint16_t
653 cn9k_nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
654 uint16_t pkts, const uint16_t flags)
656 RTE_SET_USED(rx_queue);
657 RTE_SET_USED(rx_pkts);
666 #define RSS_F NIX_RX_OFFLOAD_RSS_F
667 #define PTYPE_F NIX_RX_OFFLOAD_PTYPE_F
668 #define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F
669 #define MARK_F NIX_RX_OFFLOAD_MARK_UPDATE_F
670 #define TS_F NIX_RX_OFFLOAD_TSTAMP_F
671 #define RX_VLAN_F NIX_RX_OFFLOAD_VLAN_STRIP_F
673 /* [RX_VLAN_F] [TS] [MARK] [CKSUM] [PTYPE] [RSS] */
674 #define NIX_RX_FASTPATH_MODES \
675 R(no_offload, 0, 0, 0, 0, 0, 0, NIX_RX_OFFLOAD_NONE) \
676 R(rss, 0, 0, 0, 0, 0, 1, RSS_F) \
677 R(ptype, 0, 0, 0, 0, 1, 0, PTYPE_F) \
678 R(ptype_rss, 0, 0, 0, 0, 1, 1, PTYPE_F | RSS_F) \
679 R(cksum, 0, 0, 0, 1, 0, 0, CKSUM_F) \
680 R(cksum_rss, 0, 0, 0, 1, 0, 1, CKSUM_F | RSS_F) \
681 R(cksum_ptype, 0, 0, 0, 1, 1, 0, CKSUM_F | PTYPE_F) \
682 R(cksum_ptype_rss, 0, 0, 0, 1, 1, 1, CKSUM_F | PTYPE_F | RSS_F) \
683 R(mark, 0, 0, 1, 0, 0, 0, MARK_F) \
684 R(mark_rss, 0, 0, 1, 0, 0, 1, MARK_F | RSS_F) \
685 R(mark_ptype, 0, 0, 1, 0, 1, 0, MARK_F | PTYPE_F) \
686 R(mark_ptype_rss, 0, 0, 1, 0, 1, 1, MARK_F | PTYPE_F | RSS_F) \
687 R(mark_cksum, 0, 0, 1, 1, 0, 0, MARK_F | CKSUM_F) \
688 R(mark_cksum_rss, 0, 0, 1, 1, 0, 1, MARK_F | CKSUM_F | RSS_F) \
689 R(mark_cksum_ptype, 0, 0, 1, 1, 1, 0, MARK_F | CKSUM_F | PTYPE_F) \
690 R(mark_cksum_ptype_rss, 0, 0, 1, 1, 1, 1, \
691 MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
692 R(ts, 0, 1, 0, 0, 0, 0, TS_F) \
693 R(ts_rss, 0, 1, 0, 0, 0, 1, TS_F | RSS_F) \
694 R(ts_ptype, 0, 1, 0, 0, 1, 0, TS_F | PTYPE_F) \
695 R(ts_ptype_rss, 0, 1, 0, 0, 1, 1, TS_F | PTYPE_F | RSS_F) \
696 R(ts_cksum, 0, 1, 0, 1, 0, 0, TS_F | CKSUM_F) \
697 R(ts_cksum_rss, 0, 1, 0, 1, 0, 1, TS_F | CKSUM_F | RSS_F) \
698 R(ts_cksum_ptype, 0, 1, 0, 1, 1, 0, TS_F | CKSUM_F | PTYPE_F) \
699 R(ts_cksum_ptype_rss, 0, 1, 0, 1, 1, 1, \
700 TS_F | CKSUM_F | PTYPE_F | RSS_F) \
701 R(ts_mark, 0, 1, 1, 0, 0, 0, TS_F | MARK_F) \
702 R(ts_mark_rss, 0, 1, 1, 0, 0, 1, TS_F | MARK_F | RSS_F) \
703 R(ts_mark_ptype, 0, 1, 1, 0, 1, 0, TS_F | MARK_F | PTYPE_F) \
704 R(ts_mark_ptype_rss, 0, 1, 1, 0, 1, 1, \
705 TS_F | MARK_F | PTYPE_F | RSS_F) \
706 R(ts_mark_cksum, 0, 1, 1, 1, 0, 0, TS_F | MARK_F | CKSUM_F) \
707 R(ts_mark_cksum_rss, 0, 1, 1, 1, 0, 1, \
708 TS_F | MARK_F | CKSUM_F | RSS_F) \
709 R(ts_mark_cksum_ptype, 0, 1, 1, 1, 1, 0, \
710 TS_F | MARK_F | CKSUM_F | PTYPE_F) \
711 R(ts_mark_cksum_ptype_rss, 0, 1, 1, 1, 1, 1, \
712 TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
713 R(vlan, 1, 0, 0, 0, 0, 0, RX_VLAN_F) \
714 R(vlan_rss, 1, 0, 0, 0, 0, 1, RX_VLAN_F | RSS_F) \
715 R(vlan_ptype, 1, 0, 0, 0, 1, 0, RX_VLAN_F | PTYPE_F) \
716 R(vlan_ptype_rss, 1, 0, 0, 0, 1, 1, RX_VLAN_F | PTYPE_F | RSS_F) \
717 R(vlan_cksum, 1, 0, 0, 1, 0, 0, RX_VLAN_F | CKSUM_F) \
718 R(vlan_cksum_rss, 1, 0, 0, 1, 0, 1, RX_VLAN_F | CKSUM_F | RSS_F) \
719 R(vlan_cksum_ptype, 1, 0, 0, 1, 1, 0, \
720 RX_VLAN_F | CKSUM_F | PTYPE_F) \
721 R(vlan_cksum_ptype_rss, 1, 0, 0, 1, 1, 1, \
722 RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
723 R(vlan_mark, 1, 0, 1, 0, 0, 0, RX_VLAN_F | MARK_F) \
724 R(vlan_mark_rss, 1, 0, 1, 0, 0, 1, RX_VLAN_F | MARK_F | RSS_F) \
725 R(vlan_mark_ptype, 1, 0, 1, 0, 1, 0, RX_VLAN_F | MARK_F | PTYPE_F)\
726 R(vlan_mark_ptype_rss, 1, 0, 1, 0, 1, 1, \
727 RX_VLAN_F | MARK_F | PTYPE_F | RSS_F) \
728 R(vlan_mark_cksum, 1, 0, 1, 1, 0, 0, RX_VLAN_F | MARK_F | CKSUM_F)\
729 R(vlan_mark_cksum_rss, 1, 0, 1, 1, 0, 1, \
730 RX_VLAN_F | MARK_F | CKSUM_F | RSS_F) \
731 R(vlan_mark_cksum_ptype, 1, 0, 1, 1, 1, 0, \
732 RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F) \
733 R(vlan_mark_cksum_ptype_rss, 1, 0, 1, 1, 1, 1, \
734 RX_VLAN_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
735 R(vlan_ts, 1, 1, 0, 0, 0, 0, RX_VLAN_F | TS_F) \
736 R(vlan_ts_rss, 1, 1, 0, 0, 0, 1, RX_VLAN_F | TS_F | RSS_F) \
737 R(vlan_ts_ptype, 1, 1, 0, 0, 1, 0, RX_VLAN_F | TS_F | PTYPE_F) \
738 R(vlan_ts_ptype_rss, 1, 1, 0, 0, 1, 1, \
739 RX_VLAN_F | TS_F | PTYPE_F | RSS_F) \
740 R(vlan_ts_cksum, 1, 1, 0, 1, 0, 0, RX_VLAN_F | TS_F | CKSUM_F) \
741 R(vlan_ts_cksum_rss, 1, 1, 0, 1, 0, 1, \
742 RX_VLAN_F | TS_F | CKSUM_F | RSS_F) \
743 R(vlan_ts_cksum_ptype, 1, 1, 0, 1, 1, 0, \
744 RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F) \
745 R(vlan_ts_cksum_ptype_rss, 1, 1, 0, 1, 1, 1, \
746 RX_VLAN_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
747 R(vlan_ts_mark, 1, 1, 1, 0, 0, 0, RX_VLAN_F | TS_F | MARK_F) \
748 R(vlan_ts_mark_rss, 1, 1, 1, 0, 0, 1, \
749 RX_VLAN_F | TS_F | MARK_F | RSS_F) \
750 R(vlan_ts_mark_ptype, 1, 1, 1, 0, 1, 0, \
751 RX_VLAN_F | TS_F | MARK_F | PTYPE_F) \
752 R(vlan_ts_mark_ptype_rss, 1, 1, 1, 0, 1, 1, \
753 RX_VLAN_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
754 R(vlan_ts_mark_cksum, 1, 1, 1, 1, 0, 0, \
755 RX_VLAN_F | TS_F | MARK_F | CKSUM_F) \
756 R(vlan_ts_mark_cksum_rss, 1, 1, 1, 1, 0, 1, \
757 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
758 R(vlan_ts_mark_cksum_ptype, 1, 1, 1, 1, 1, 0, \
759 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
760 R(vlan_ts_mark_cksum_ptype_rss, 1, 1, 1, 1, 1, 1, \
761 RX_VLAN_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F)
763 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
764 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_##name( \
765 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
767 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_mseg_##name( \
768 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
770 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_vec_##name( \
771 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts); \
773 uint16_t __rte_noinline __rte_hot cn9k_nix_recv_pkts_vec_mseg_##name( \
774 void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts);
776 NIX_RX_FASTPATH_MODES
779 #endif /* __CN9K_RX_H__ */