1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
10 #include <rte_atomic.h>
11 #include <rte_branch_prediction.h>
12 #include <rte_byteorder.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
15 #include <rte_errno.h>
16 #include <rte_ethdev_driver.h>
17 #include <rte_ether.h>
20 #include <rte_prefetch.h>
22 #include "base/nicvf_plat.h"
24 #include "nicvf_ethdev.h"
25 #include "nicvf_rxtx.h"
26 #include "nicvf_logs.h"
28 static inline void __rte_hot
29 fill_sq_desc_header(union sq_entry_t *entry, struct rte_mbuf *pkt)
31 /* Local variable sqe to avoid read from sq desc memory*/
35 /* Fill SQ header descriptor */
37 sqe.hdr.subdesc_type = SQ_DESC_TYPE_HEADER;
38 /* Number of sub-descriptors following this one */
39 sqe.hdr.subdesc_cnt = pkt->nb_segs;
40 sqe.hdr.tot_len = pkt->pkt_len;
42 ol_flags = pkt->ol_flags & NICVF_TX_OFFLOAD_MASK;
43 if (unlikely(ol_flags)) {
45 uint64_t l4_flags = ol_flags & PKT_TX_L4_MASK;
46 if (l4_flags == PKT_TX_TCP_CKSUM)
47 sqe.hdr.csum_l4 = SEND_L4_CSUM_TCP;
48 else if (l4_flags == PKT_TX_UDP_CKSUM)
49 sqe.hdr.csum_l4 = SEND_L4_CSUM_UDP;
51 sqe.hdr.csum_l4 = SEND_L4_CSUM_DISABLE;
53 sqe.hdr.l3_offset = pkt->l2_len;
54 sqe.hdr.l4_offset = pkt->l3_len + pkt->l2_len;
57 if (ol_flags & PKT_TX_IP_CKSUM)
61 entry->buff[0] = sqe.buff[0];
64 static inline void __rte_hot
65 fill_sq_desc_header_zero_w1(union sq_entry_t *entry,
68 fill_sq_desc_header(entry, pkt);
69 entry->buff[1] = 0ULL;
73 nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq)
77 uint32_t head = sq->head;
78 struct rte_mbuf **txbuffs = sq->txbuffs;
79 void *obj_p[NICVF_MAX_TX_FREE_THRESH] __rte_cache_aligned;
81 curr_head = nicvf_addr_read(sq->sq_head) >> 4;
82 while (head != curr_head) {
84 obj_p[j++] = txbuffs[head];
86 head = (head + 1) & sq->qlen_mask;
89 rte_mempool_put_bulk(sq->pool, obj_p, j);
92 NICVF_TX_ASSERT(sq->xmit_bufs >= 0);
96 nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq)
100 uint32_t head = sq->head;
101 struct rte_mbuf **txbuffs = sq->txbuffs;
103 curr_head = nicvf_addr_read(sq->sq_head) >> 4;
104 while (head != curr_head) {
106 rte_pktmbuf_free_seg(txbuffs[head]);
110 head = (head + 1) & sq->qlen_mask;
113 sq->head = curr_head;
115 NICVF_TX_ASSERT(sq->xmit_bufs >= 0);
118 static inline uint32_t __rte_hot
119 nicvf_free_tx_desc(struct nicvf_txq *sq)
121 return ((sq->head - sq->tail - 1) & sq->qlen_mask);
124 /* Send Header + Packet */
125 #define TX_DESC_PER_PKT 2
127 static inline uint32_t __rte_hot
128 nicvf_free_xmitted_buffers(struct nicvf_txq *sq, struct rte_mbuf **tx_pkts,
131 uint32_t free_desc = nicvf_free_tx_desc(sq);
133 if (free_desc < nb_pkts * TX_DESC_PER_PKT ||
134 sq->xmit_bufs > sq->tx_free_thresh) {
135 if (unlikely(sq->pool == NULL))
136 sq->pool = tx_pkts[0]->pool;
139 /* Freed now, let see the number of free descs again */
140 free_desc = nicvf_free_tx_desc(sq);
146 nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
151 struct nicvf_txq *sq = tx_queue;
152 union sq_entry_t *desc_ptr = sq->desc;
153 struct rte_mbuf **txbuffs = sq->txbuffs;
154 struct rte_mbuf *pkt;
155 uint32_t qlen_mask = sq->qlen_mask;
158 free_desc = nicvf_free_xmitted_buffers(sq, tx_pkts, nb_pkts);
160 for (i = 0; i < nb_pkts && (int)free_desc >= TX_DESC_PER_PKT; i++) {
163 txbuffs[tail] = NULL;
164 fill_sq_desc_header(desc_ptr + tail, pkt);
165 tail = (tail + 1) & qlen_mask;
168 fill_sq_desc_gather(desc_ptr + tail, pkt);
169 tail = (tail + 1) & qlen_mask;
170 free_desc -= TX_DESC_PER_PKT;
178 /* Inform HW to xmit the packets */
179 nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT);
185 nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
189 uint32_t used_desc, next_used_desc, used_bufs, free_desc, tail;
190 struct nicvf_txq *sq = tx_queue;
191 union sq_entry_t *desc_ptr = sq->desc;
192 struct rte_mbuf **txbuffs = sq->txbuffs;
193 struct rte_mbuf *pkt, *seg;
194 uint32_t qlen_mask = sq->qlen_mask;
201 free_desc = nicvf_free_xmitted_buffers(sq, tx_pkts, nb_pkts);
203 for (i = 0; i < nb_pkts; i++) {
206 nb_segs = pkt->nb_segs;
208 next_used_desc = used_desc + nb_segs + 1;
209 if (next_used_desc > free_desc)
211 used_desc = next_used_desc;
212 used_bufs += nb_segs;
214 txbuffs[tail] = NULL;
215 fill_sq_desc_header_zero_w1(desc_ptr + tail, pkt);
216 tail = (tail + 1) & qlen_mask;
219 fill_sq_desc_gather(desc_ptr + tail, pkt);
220 tail = (tail + 1) & qlen_mask;
223 for (k = 1; k < nb_segs; k++) {
225 fill_sq_desc_gather(desc_ptr + tail, seg);
226 tail = (tail + 1) & qlen_mask;
231 if (likely(used_desc)) {
233 sq->xmit_bufs += used_bufs;
236 /* Inform HW to xmit the packets */
237 nicvf_addr_write(sq->sq_door, used_desc);
242 static const uint32_t ptype_table[16][16] __rte_cache_aligned = {
243 [L3_NONE][L4_NONE] = RTE_PTYPE_UNKNOWN,
244 [L3_NONE][L4_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
245 [L3_NONE][L4_IPFRAG] = RTE_PTYPE_L4_FRAG,
246 [L3_NONE][L4_IPCOMP] = RTE_PTYPE_UNKNOWN,
247 [L3_NONE][L4_TCP] = RTE_PTYPE_L4_TCP,
248 [L3_NONE][L4_UDP_PASS1] = RTE_PTYPE_L4_UDP,
249 [L3_NONE][L4_GRE] = RTE_PTYPE_TUNNEL_GRE,
250 [L3_NONE][L4_UDP_PASS2] = RTE_PTYPE_L4_UDP,
251 [L3_NONE][L4_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
252 [L3_NONE][L4_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
253 [L3_NONE][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
255 [L3_IPV4][L4_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
256 [L3_IPV4][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
257 [L3_IPV4][L4_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
258 [L3_IPV4][L4_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
259 [L3_IPV4][L4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
260 [L3_IPV4][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
261 [L3_IPV4][L4_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
262 [L3_IPV4][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
263 [L3_IPV4][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
264 [L3_IPV4][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
265 [L3_IPV4][L4_NVGRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
267 [L3_IPV4_OPT][L4_NONE] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
268 [L3_IPV4_OPT][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV4_EXT |
270 [L3_IPV4_OPT][L4_IPFRAG] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
271 [L3_IPV4_OPT][L4_IPCOMP] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
272 [L3_IPV4_OPT][L4_TCP] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
273 [L3_IPV4_OPT][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
274 [L3_IPV4_OPT][L4_GRE] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
275 [L3_IPV4_OPT][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
276 [L3_IPV4_OPT][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV4_EXT |
277 RTE_PTYPE_TUNNEL_GENEVE,
278 [L3_IPV4_OPT][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV4_EXT |
279 RTE_PTYPE_TUNNEL_VXLAN,
280 [L3_IPV4_OPT][L4_NVGRE] = RTE_PTYPE_L3_IPV4_EXT |
281 RTE_PTYPE_TUNNEL_NVGRE,
283 [L3_IPV6][L4_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
284 [L3_IPV6][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
285 [L3_IPV6][L4_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
286 [L3_IPV6][L4_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
287 [L3_IPV6][L4_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
288 [L3_IPV6][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
289 [L3_IPV6][L4_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
290 [L3_IPV6][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
291 [L3_IPV6][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
292 [L3_IPV6][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
293 [L3_IPV6][L4_NVGRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_NVGRE,
295 [L3_IPV6_OPT][L4_NONE] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
296 [L3_IPV6_OPT][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV6_EXT |
298 [L3_IPV6_OPT][L4_IPFRAG] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
299 [L3_IPV6_OPT][L4_IPCOMP] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
300 [L3_IPV6_OPT][L4_TCP] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
301 [L3_IPV6_OPT][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
302 [L3_IPV6_OPT][L4_GRE] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
303 [L3_IPV6_OPT][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
304 [L3_IPV6_OPT][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV6_EXT |
305 RTE_PTYPE_TUNNEL_GENEVE,
306 [L3_IPV6_OPT][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV6_EXT |
307 RTE_PTYPE_TUNNEL_VXLAN,
308 [L3_IPV6_OPT][L4_NVGRE] = RTE_PTYPE_L3_IPV6_EXT |
309 RTE_PTYPE_TUNNEL_NVGRE,
311 [L3_ET_STOP][L4_NONE] = RTE_PTYPE_UNKNOWN,
312 [L3_ET_STOP][L4_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
313 [L3_ET_STOP][L4_IPFRAG] = RTE_PTYPE_L4_FRAG,
314 [L3_ET_STOP][L4_IPCOMP] = RTE_PTYPE_UNKNOWN,
315 [L3_ET_STOP][L4_TCP] = RTE_PTYPE_L4_TCP,
316 [L3_ET_STOP][L4_UDP_PASS1] = RTE_PTYPE_L4_UDP,
317 [L3_ET_STOP][L4_GRE] = RTE_PTYPE_TUNNEL_GRE,
318 [L3_ET_STOP][L4_UDP_PASS2] = RTE_PTYPE_L4_UDP,
319 [L3_ET_STOP][L4_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
320 [L3_ET_STOP][L4_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
321 [L3_ET_STOP][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
323 [L3_OTHER][L4_NONE] = RTE_PTYPE_UNKNOWN,
324 [L3_OTHER][L4_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
325 [L3_OTHER][L4_IPFRAG] = RTE_PTYPE_L4_FRAG,
326 [L3_OTHER][L4_IPCOMP] = RTE_PTYPE_UNKNOWN,
327 [L3_OTHER][L4_TCP] = RTE_PTYPE_L4_TCP,
328 [L3_OTHER][L4_UDP_PASS1] = RTE_PTYPE_L4_UDP,
329 [L3_OTHER][L4_GRE] = RTE_PTYPE_TUNNEL_GRE,
330 [L3_OTHER][L4_UDP_PASS2] = RTE_PTYPE_L4_UDP,
331 [L3_OTHER][L4_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
332 [L3_OTHER][L4_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
333 [L3_OTHER][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
336 static inline uint32_t __rte_hot
337 nicvf_rx_classify_pkt(cqe_rx_word0_t cqe_rx_w0)
339 return ptype_table[cqe_rx_w0.l3_type][cqe_rx_w0.l4_type];
342 static inline uint64_t __rte_hot
343 nicvf_set_olflags(const cqe_rx_word0_t cqe_rx_w0)
345 static const uint64_t flag_table[3] __rte_cache_aligned = {
346 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
347 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_UNKNOWN,
348 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
351 const uint8_t idx = (cqe_rx_w0.err_opcode == CQE_RX_ERR_L4_CHK) << 1 |
352 (cqe_rx_w0.err_opcode == CQE_RX_ERR_IP_CHK);
353 return flag_table[idx];
356 static inline int __rte_hot
357 nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill)
360 uint32_t ltail, next_tail;
361 struct nicvf_rbdr *rbdr = rxq->shared_rbdr;
362 uint64_t mbuf_phys_off = rxq->mbuf_phys_off;
363 struct rbdr_entry_t *desc = rbdr->desc;
364 uint32_t qlen_mask = rbdr->qlen_mask;
365 uintptr_t door = rbdr->rbdr_door;
366 void *obj_p[NICVF_MAX_RX_FREE_THRESH] __rte_cache_aligned;
368 if (unlikely(rte_mempool_get_bulk(rxq->pool, obj_p, to_fill) < 0)) {
369 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
374 NICVF_RX_ASSERT((unsigned int)to_fill <= (qlen_mask -
375 (nicvf_addr_read(rbdr->rbdr_status) & NICVF_RBDR_COUNT_MASK)));
377 next_tail = __atomic_fetch_add(&rbdr->next_tail, to_fill,
380 for (i = 0; i < to_fill; i++) {
381 struct rbdr_entry_t *entry = desc + (ltail & qlen_mask);
383 entry->full_addr = nicvf_mbuff_virt2phy((uintptr_t)obj_p[i],
388 rte_wait_until_equal_32(&rbdr->tail, next_tail, __ATOMIC_RELAXED);
390 __atomic_store_n(&rbdr->tail, ltail, __ATOMIC_RELEASE);
391 nicvf_addr_write(door, to_fill);
395 static inline int32_t __rte_hot
396 nicvf_rx_pkts_to_process(struct nicvf_rxq *rxq, uint16_t nb_pkts,
397 int32_t available_space)
399 if (unlikely(available_space < nb_pkts))
400 rxq->available_space = nicvf_addr_read(rxq->cq_status)
401 & NICVF_CQ_CQE_COUNT_MASK;
403 return RTE_MIN(nb_pkts, available_space);
406 static inline void __rte_hot
407 nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2,
408 struct rte_mbuf *pkt)
410 if (likely(cqe_rx_w0.rss_alg)) {
411 pkt->hash.rss = cqe_rx_w2.rss_tag;
412 pkt->ol_flags |= PKT_RX_RSS_HASH;
417 static __rte_always_inline uint16_t
418 nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
421 uint32_t i, to_process;
422 struct cqe_rx_t *cqe_rx;
423 struct rte_mbuf *pkt;
424 cqe_rx_word0_t cqe_rx_w0;
425 cqe_rx_word1_t cqe_rx_w1;
426 cqe_rx_word2_t cqe_rx_w2;
427 cqe_rx_word3_t cqe_rx_w3;
428 struct nicvf_rxq *rxq = rx_queue;
429 union cq_entry_t *desc = rxq->desc;
430 const uint64_t cqe_mask = rxq->qlen_mask;
431 uint64_t rb0_ptr, mbuf_phys_off = rxq->mbuf_phys_off;
432 const uint64_t mbuf_init = rxq->mbuf_initializer.value;
433 uint32_t cqe_head = rxq->head & cqe_mask;
434 int32_t available_space = rxq->available_space;
435 const uint8_t rbptr_offset = rxq->rbptr_offset;
437 to_process = nicvf_rx_pkts_to_process(rxq, nb_pkts, available_space);
439 for (i = 0; i < to_process; i++) {
440 rte_prefetch_non_temporal(&desc[cqe_head + 2]);
441 cqe_rx = (struct cqe_rx_t *)&desc[cqe_head];
442 NICVF_RX_ASSERT(((struct cq_entry_type_t *)cqe_rx)->cqe_type
445 NICVF_LOAD_PAIR(cqe_rx_w0.u64, cqe_rx_w1.u64, cqe_rx);
446 NICVF_LOAD_PAIR(cqe_rx_w2.u64, cqe_rx_w3.u64, &cqe_rx->word2);
447 rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset);
448 pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
449 (rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off);
451 if (flag & NICVF_RX_OFFLOAD_NONE)
453 if (flag & NICVF_RX_OFFLOAD_CKSUM)
454 pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
455 if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
456 if (unlikely(cqe_rx_w0.vlan_stripped)) {
457 pkt->ol_flags |= PKT_RX_VLAN
458 | PKT_RX_VLAN_STRIPPED;
460 rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
463 pkt->data_len = cqe_rx_w3.rb0_sz;
464 pkt->pkt_len = cqe_rx_w3.rb0_sz;
465 pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
466 nicvf_mbuff_init_update(pkt, mbuf_init, cqe_rx_w1.align_pad);
467 nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
469 cqe_head = (cqe_head + 1) & cqe_mask;
470 nicvf_prefetch_store_keep(pkt);
473 if (likely(to_process)) {
474 rxq->available_space -= to_process;
475 rxq->head = cqe_head;
476 nicvf_addr_write(rxq->cq_door, to_process);
477 rxq->recv_buffers += to_process;
479 if (rxq->recv_buffers > rxq->rx_free_thresh) {
480 rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
481 NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
488 nicvf_recv_pkts_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
491 return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
492 NICVF_RX_OFFLOAD_NONE);
496 nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
499 return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
500 NICVF_RX_OFFLOAD_CKSUM);
504 nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
507 return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
508 NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
512 nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
515 return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
516 NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
519 static __rte_always_inline uint16_t __rte_hot
520 nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
521 uint64_t mbuf_phys_off,
522 struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
523 uint64_t mbuf_init, const uint32_t flag)
525 struct rte_mbuf *pkt, *seg, *prev;
526 cqe_rx_word0_t cqe_rx_w0;
527 cqe_rx_word1_t cqe_rx_w1;
528 cqe_rx_word2_t cqe_rx_w2;
529 uint16_t *rb_sz, nb_segs, seg_idx;
532 NICVF_LOAD_PAIR(cqe_rx_w0.u64, cqe_rx_w1.u64, cqe_rx);
533 NICVF_RX_ASSERT(cqe_rx_w0.cqe_type == CQE_TYPE_RX);
534 cqe_rx_w2 = cqe_rx->word2;
535 rb_sz = &cqe_rx->word3.rb0_sz;
536 rb_ptr = (uint64_t *)cqe_rx + rbptr_offset;
537 nb_segs = cqe_rx_w0.rb_cnt;
538 pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
539 (rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off);
541 pkt->pkt_len = cqe_rx_w1.pkt_len;
542 pkt->data_len = rb_sz[nicvf_frag_num(0)];
543 nicvf_mbuff_init_mseg_update(
544 pkt, mbuf_init, cqe_rx_w1.align_pad, nb_segs);
545 pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
546 if (flag & NICVF_RX_OFFLOAD_NONE)
548 if (flag & NICVF_RX_OFFLOAD_CKSUM)
549 pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
550 if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
551 if (unlikely(cqe_rx_w0.vlan_stripped)) {
552 pkt->ol_flags |= PKT_RX_VLAN
553 | PKT_RX_VLAN_STRIPPED;
554 pkt->vlan_tci = rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
557 nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
561 for (seg_idx = 1; seg_idx < nb_segs; seg_idx++) {
562 seg = (struct rte_mbuf *)nicvf_mbuff_phy2virt
563 (rb_ptr[seg_idx], mbuf_phys_off);
566 seg->data_len = rb_sz[nicvf_frag_num(seg_idx)];
567 nicvf_mbuff_init_update(seg, mbuf_init, 0);
575 static __rte_always_inline uint16_t __rte_hot
576 nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
577 uint16_t nb_pkts, const uint32_t flag)
579 union cq_entry_t *cq_entry;
580 struct cqe_rx_t *cqe_rx;
581 struct nicvf_rxq *rxq = rx_queue;
582 union cq_entry_t *desc = rxq->desc;
583 const uint64_t cqe_mask = rxq->qlen_mask;
584 uint64_t mbuf_phys_off = rxq->mbuf_phys_off;
585 uint32_t i, to_process, cqe_head, buffers_consumed = 0;
586 int32_t available_space = rxq->available_space;
588 const uint64_t mbuf_init = rxq->mbuf_initializer.value;
589 const uint8_t rbptr_offset = rxq->rbptr_offset;
591 cqe_head = rxq->head & cqe_mask;
592 to_process = nicvf_rx_pkts_to_process(rxq, nb_pkts, available_space);
594 for (i = 0; i < to_process; i++) {
595 rte_prefetch_non_temporal(&desc[cqe_head + 2]);
596 cq_entry = &desc[cqe_head];
597 cqe_rx = (struct cqe_rx_t *)cq_entry;
598 nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off,
599 rx_pkts + i, rbptr_offset, mbuf_init, flag);
600 buffers_consumed += nb_segs;
601 cqe_head = (cqe_head + 1) & cqe_mask;
602 nicvf_prefetch_store_keep(rx_pkts[i]);
605 if (likely(to_process)) {
606 rxq->available_space -= to_process;
607 rxq->head = cqe_head;
608 nicvf_addr_write(rxq->cq_door, to_process);
609 rxq->recv_buffers += buffers_consumed;
611 if (rxq->recv_buffers > rxq->rx_free_thresh) {
612 rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
613 NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
620 nicvf_recv_pkts_multiseg_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
623 return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
624 NICVF_RX_OFFLOAD_NONE);
628 nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
631 return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
632 NICVF_RX_OFFLOAD_CKSUM);
636 nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
639 return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
640 NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
644 nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
645 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
647 return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
648 NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
652 nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
654 struct nicvf_rxq *rxq;
656 rxq = dev->data->rx_queues[queue_idx];
657 return nicvf_addr_read(rxq->cq_status) & NICVF_CQ_CQE_COUNT_MASK;
661 nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx)
663 struct nicvf_rxq *rxq;
667 rxq = dev->data->rx_queues[queue_idx];
668 to_process = rxq->recv_buffers;
669 while (rxq->recv_buffers > 0) {
670 rx_free = RTE_MIN(rxq->recv_buffers, NICVF_MAX_RX_FREE_THRESH);
671 rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rx_free);
674 assert(rxq->recv_buffers == 0);