1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 6WIND S.A.
3 * Copyright 2021 Mellanox Technologies, Ltd
11 #include <rte_mempool.h>
12 #include <rte_prefetch.h>
13 #include <rte_common.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_ether.h>
16 #include <rte_cycles.h>
20 #include <mlx5_common.h>
22 #include "mlx5_autoconf.h"
23 #include "mlx5_defs.h"
26 #include "mlx5_utils.h"
27 #include "mlx5_rxtx.h"
31 static __rte_always_inline uint32_t
32 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
33 volatile struct mlx5_mini_cqe8 *mcqe);
35 static __rte_always_inline int
36 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
37 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
39 static __rte_always_inline uint32_t
40 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
42 static __rte_always_inline void
43 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
44 volatile struct mlx5_cqe *cqe,
45 volatile struct mlx5_mini_cqe8 *mcqe);
48 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
49 volatile struct mlx5_cqe *__rte_restrict cqe,
50 uint32_t phcsum, uint8_t l4_type);
53 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
54 volatile struct mlx5_cqe *__rte_restrict cqe,
55 volatile struct mlx5_mini_cqe8 *mcqe,
56 struct mlx5_rxq_data *rxq, uint32_t len);
60 * Internal function to compute the number of used descriptors in an RX queue.
66 * The number of used Rx descriptor.
69 rx_queue_count(struct mlx5_rxq_data *rxq)
71 struct rxq_zip *zip = &rxq->zip;
72 volatile struct mlx5_cqe *cqe;
73 const unsigned int cqe_n = (1 << rxq->cqe_n);
74 const unsigned int sges_n = (1 << rxq->sges_n);
75 const unsigned int elts_n = (1 << rxq->elts_n);
76 const unsigned int strd_n = (1 << rxq->strd_num_n);
77 const unsigned int cqe_cnt = cqe_n - 1;
78 unsigned int cq_ci, used;
80 /* if we are processing a compressed cqe */
82 used = zip->cqe_cnt - zip->ai;
88 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
89 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
94 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
95 n = rte_be_to_cpu_32(cqe->byte_cnt);
100 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
102 used = RTE_MIN(used * sges_n, elts_n * strd_n);
107 * DPDK callback to check the status of a Rx descriptor.
112 * The index of the descriptor in the ring.
115 * The status of the Rx descriptor.
118 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
120 struct mlx5_rxq_data *rxq = rx_queue;
121 struct mlx5_rxq_ctrl *rxq_ctrl =
122 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
123 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
125 if (dev->rx_pkt_burst == NULL ||
126 dev->rx_pkt_burst == removed_rx_burst) {
130 if (offset >= (1 << rxq->cqe_n)) {
134 if (offset < rx_queue_count(rxq))
135 return RTE_ETH_RX_DESC_DONE;
136 return RTE_ETH_RX_DESC_AVAIL;
140 * DPDK callback to get the RX queue information.
143 * Pointer to the device structure.
146 * Rx queue identificator.
149 * Pointer to the RX queue information structure.
156 mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
157 struct rte_eth_rxq_info *qinfo)
159 struct mlx5_priv *priv = dev->data->dev_private;
160 struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
161 struct mlx5_rxq_ctrl *rxq_ctrl =
162 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
166 qinfo->mp = mlx5_rxq_mprq_enabled(rxq) ?
167 rxq->mprq_mp : rxq->mp;
168 qinfo->conf.rx_thresh.pthresh = 0;
169 qinfo->conf.rx_thresh.hthresh = 0;
170 qinfo->conf.rx_thresh.wthresh = 0;
171 qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
172 qinfo->conf.rx_drop_en = 1;
173 qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1;
174 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
175 qinfo->scattered_rx = dev->data->scattered_rx;
176 qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ?
177 (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
182 * DPDK callback to get the RX packet burst mode information.
185 * Pointer to the device structure.
188 * Rx queue identificatior.
191 * Pointer to the burts mode information.
194 * 0 as success, -EINVAL as failure.
197 mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
198 uint16_t rx_queue_id __rte_unused,
199 struct rte_eth_burst_mode *mode)
201 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
202 struct mlx5_priv *priv = dev->data->dev_private;
203 struct mlx5_rxq_data *rxq;
205 rxq = (*priv->rxqs)[rx_queue_id];
210 if (pkt_burst == mlx5_rx_burst) {
211 snprintf(mode->info, sizeof(mode->info), "%s", "Scalar");
212 } else if (pkt_burst == mlx5_rx_burst_mprq) {
213 snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ");
214 } else if (pkt_burst == mlx5_rx_burst_vec) {
215 #if defined RTE_ARCH_X86_64
216 snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE");
217 #elif defined RTE_ARCH_ARM64
218 snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon");
219 #elif defined RTE_ARCH_PPC_64
220 snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec");
224 } else if (pkt_burst == mlx5_rx_burst_mprq_vec) {
225 #if defined RTE_ARCH_X86_64
226 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector SSE");
227 #elif defined RTE_ARCH_ARM64
228 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector Neon");
229 #elif defined RTE_ARCH_PPC_64
230 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector AltiVec");
241 * DPDK callback to get the number of used descriptors in a RX queue.
244 * Pointer to the device structure.
250 * The number of used rx descriptor.
251 * -EINVAL if the queue is invalid
254 mlx5_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
256 struct mlx5_priv *priv = dev->data->dev_private;
257 struct mlx5_rxq_data *rxq;
259 if (dev->rx_pkt_burst == NULL ||
260 dev->rx_pkt_burst == removed_rx_burst) {
264 rxq = (*priv->rxqs)[rx_queue_id];
269 return rx_queue_count(rxq);
272 int mlx5_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
274 struct mlx5_rxq_data *rxq = rx_queue;
275 const unsigned int cqe_num = 1 << rxq->cqe_n;
276 const unsigned int cqe_mask = cqe_num - 1;
277 const uint16_t idx = rxq->cq_ci & cqe_num;
278 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
280 if (unlikely(rxq->cqes == NULL)) {
284 pmc->addr = &cqe->op_own;
286 pmc->mask = MLX5_CQE_OWNER_MASK;
287 pmc->size = sizeof(uint8_t);
292 * Translate RX completion flags to packet type.
295 * Pointer to RX queue structure.
299 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
302 * Packet type for struct rte_mbuf.
304 static inline uint32_t
305 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
306 volatile struct mlx5_mini_cqe8 *mcqe)
310 uint8_t pinfo = (cqe->pkt_info & 0x3) << 6;
312 /* Get l3/l4 header from mini-CQE in case L3/L4 format*/
314 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
315 ptype = (cqe->hdr_type_etc & 0xfc00) >> 10;
317 ptype = mcqe->hdr_type >> 2;
319 * The index to the array should have:
320 * bit[1:0] = l3_hdr_type
321 * bit[4:2] = l4_hdr_type
324 * bit[7] = outer_l3_type
327 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
331 * Initialize Rx WQ and indexes.
334 * Pointer to RX queue structure.
337 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
339 const unsigned int wqe_n = 1 << rxq->elts_n;
342 for (i = 0; (i != wqe_n); ++i) {
343 volatile struct mlx5_wqe_data_seg *scat;
347 if (mlx5_rxq_mprq_enabled(rxq)) {
348 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
350 scat = &((volatile struct mlx5_wqe_mprq *)
352 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
353 1 << rxq->strd_num_n);
354 byte_count = (1 << rxq->strd_sz_n) *
355 (1 << rxq->strd_num_n);
357 struct rte_mbuf *buf = (*rxq->elts)[i];
359 scat = &((volatile struct mlx5_wqe_data_seg *)
361 addr = rte_pktmbuf_mtod(buf, uintptr_t);
362 byte_count = DATA_LEN(buf);
364 /* scat->addr must be able to store a pointer. */
365 MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
366 *scat = (struct mlx5_wqe_data_seg){
367 .addr = rte_cpu_to_be_64(addr),
368 .byte_count = rte_cpu_to_be_32(byte_count),
369 .lkey = mlx5_rx_addr2mr(rxq, addr),
372 rxq->consumed_strd = 0;
373 rxq->decompressed = 0;
375 rxq->zip = (struct rxq_zip){
378 rxq->elts_ci = mlx5_rxq_mprq_enabled(rxq) ?
379 (wqe_n >> rxq->sges_n) * (1 << rxq->strd_num_n) : 0;
380 /* Update doorbell counter. */
381 rxq->rq_ci = wqe_n >> rxq->sges_n;
383 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
388 * The function inserts the RQ state to reset when the first error CQE is
389 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
390 * it moves the RQ state to ready and initializes the RQ.
391 * Next CQE identification and error counting are in the caller responsibility.
394 * Pointer to RX queue structure.
396 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
397 * 0 when called from non-vectorized Rx burst.
400 * -1 in case of recovery error, otherwise the CQE status.
403 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
405 const uint16_t cqe_n = 1 << rxq->cqe_n;
406 const uint16_t cqe_mask = cqe_n - 1;
407 const uint16_t wqe_n = 1 << rxq->elts_n;
408 const uint16_t strd_n = 1 << rxq->strd_num_n;
409 struct mlx5_rxq_ctrl *rxq_ctrl =
410 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
412 volatile struct mlx5_cqe *cqe;
413 volatile struct mlx5_err_cqe *err_cqe;
415 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
417 struct mlx5_mp_arg_queue_state_modify sm;
420 switch (rxq->err_state) {
421 case MLX5_RXQ_ERR_STATE_NO_ERROR:
422 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
424 case MLX5_RXQ_ERR_STATE_NEED_RESET:
426 sm.queue_id = rxq->idx;
427 sm.state = IBV_WQS_RESET;
428 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
430 if (rxq_ctrl->dump_file_n <
431 rxq_ctrl->priv->config.max_dump_files_num) {
432 MKSTR(err_str, "Unexpected CQE error syndrome "
433 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
434 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
435 rxq->cqn, rxq_ctrl->wqn,
436 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
437 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
438 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
439 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
440 mlx5_dump_debug_information(name, NULL, err_str, 0);
441 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
442 (const void *)((uintptr_t)
444 sizeof(*u.cqe) * cqe_n);
445 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
446 (const void *)((uintptr_t)
449 rxq_ctrl->dump_file_n++;
451 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
453 case MLX5_RXQ_ERR_STATE_NEED_READY:
454 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
455 if (ret == MLX5_CQE_STATUS_HW_OWN) {
457 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
460 * The RQ consumer index must be zeroed while moving
461 * from RESET state to RDY state.
463 *rxq->rq_db = rte_cpu_to_be_32(0);
466 sm.queue_id = rxq->idx;
467 sm.state = IBV_WQS_RDY;
468 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
472 const uint32_t elts_n =
473 mlx5_rxq_mprq_enabled(rxq) ?
474 wqe_n * strd_n : wqe_n;
475 const uint32_t e_mask = elts_n - 1;
477 mlx5_rxq_mprq_enabled(rxq) ?
478 rxq->elts_ci : rxq->rq_ci;
480 struct rte_mbuf **elt;
482 unsigned int n = elts_n - (elts_ci -
485 for (i = 0; i < (int)n; ++i) {
486 elt_idx = (elts_ci + i) & e_mask;
487 elt = &(*rxq->elts)[elt_idx];
488 *elt = rte_mbuf_raw_alloc(rxq->mp);
490 for (i--; i >= 0; --i) {
501 for (i = 0; i < (int)elts_n; ++i) {
502 elt = &(*rxq->elts)[i];
504 (uint16_t)((*elt)->buf_len -
505 rte_pktmbuf_headroom(*elt));
507 /* Padding with a fake mbuf for vec Rx. */
508 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
509 (*rxq->elts)[elts_n + i] =
512 mlx5_rxq_initialize(rxq);
513 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
522 * Get size of the next packet for a given CQE. For compressed CQEs, the
523 * consumer index is updated only once all packets of the current one have
527 * Pointer to RX queue.
531 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
535 * 0 in case of empty CQE, otherwise the packet size in bytes.
538 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
539 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
541 struct rxq_zip *zip = &rxq->zip;
542 uint16_t cqe_n = cqe_cnt + 1;
548 /* Process compressed data in the CQE and mini arrays. */
550 volatile struct mlx5_mini_cqe8 (*mc)[8] =
551 (volatile struct mlx5_mini_cqe8 (*)[8])
552 (uintptr_t)(&(*rxq->cqes)[zip->ca &
554 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt &
556 *mcqe = &(*mc)[zip->ai & 7];
557 if ((++zip->ai & 7) == 0) {
558 /* Invalidate consumed CQEs */
562 (*rxq->cqes)[idx & cqe_cnt].op_own =
567 * Increment consumer index to skip the number
568 * of CQEs consumed. Hardware leaves holes in
569 * the CQ ring for software use.
574 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
575 /* Invalidate the rest */
580 (*rxq->cqes)[idx & cqe_cnt].op_own =
584 rxq->cq_ci = zip->cq_ci;
588 * No compressed data, get next CQE and verify if it is
596 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
597 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
598 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
600 ret = mlx5_rx_err_handle(rxq, 0);
601 if (ret == MLX5_CQE_STATUS_HW_OWN ||
609 * Introduce the local variable to have queue cq_ci
610 * index in queue structure always consistent with
611 * actual CQE boundary (not pointing to the middle
612 * of compressed CQE session).
614 cq_ci = rxq->cq_ci + 1;
615 op_own = cqe->op_own;
616 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
617 volatile struct mlx5_mini_cqe8 (*mc)[8] =
618 (volatile struct mlx5_mini_cqe8 (*)[8])
619 (uintptr_t)(&(*rxq->cqes)
620 [cq_ci & cqe_cnt].pkt_info);
622 /* Fix endianness. */
623 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
625 * Current mini array position is the one
626 * returned by check_cqe64().
628 * If completion comprises several mini arrays,
629 * as a special case the second one is located
630 * 7 CQEs after the initial CQE instead of 8
631 * for subsequent ones.
634 zip->na = zip->ca + 7;
635 /* Compute the next non compressed CQE. */
636 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
637 /* Get packet size to return. */
638 len = rte_be_to_cpu_32((*mc)[0].byte_cnt &
642 /* Prefetch all to be invalidated */
646 rte_prefetch0(&(*rxq->cqes)[(idx) &
652 len = rte_be_to_cpu_32(cqe->byte_cnt);
655 if (unlikely(rxq->err_state)) {
656 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
657 ++rxq->stats.idropped;
665 * Translate RX completion flags to offload flags.
671 * Offload flags (ol_flags) for struct rte_mbuf.
673 static inline uint32_t
674 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
676 uint32_t ol_flags = 0;
677 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
681 MLX5_CQE_RX_L3_HDR_VALID,
682 PKT_RX_IP_CKSUM_GOOD) |
684 MLX5_CQE_RX_L4_HDR_VALID,
685 PKT_RX_L4_CKSUM_GOOD);
690 * Fill in mbuf fields from RX completion flags.
691 * Note that pkt->ol_flags should be initialized outside of this function.
694 * Pointer to RX queue.
699 * @param rss_hash_res
700 * Packet RSS Hash result.
703 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
704 volatile struct mlx5_cqe *cqe,
705 volatile struct mlx5_mini_cqe8 *mcqe)
707 /* Update packet information. */
708 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe, mcqe);
711 uint32_t rss_hash_res = 0;
713 /* If compressed, take hash result from mini-CQE. */
715 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_HASH)
716 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
718 rss_hash_res = rte_be_to_cpu_32(mcqe->rx_hash_result);
720 pkt->hash.rss = rss_hash_res;
721 pkt->ol_flags |= PKT_RX_RSS_HASH;
727 /* If compressed, take flow tag from mini-CQE. */
729 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_FTAG_STRIDX)
730 mark = cqe->sop_drop_qpn;
732 mark = ((mcqe->byte_cnt_flow & 0xff) << 8) |
733 (mcqe->flow_tag_high << 16);
734 if (MLX5_FLOW_MARK_IS_VALID(mark)) {
735 pkt->ol_flags |= PKT_RX_FDIR;
736 if (mark != RTE_BE32(MLX5_FLOW_MARK_DEFAULT)) {
737 pkt->ol_flags |= PKT_RX_FDIR_ID;
738 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
742 if (rxq->dynf_meta) {
743 uint32_t meta = cqe->flow_table_metadata &
744 rxq->flow_meta_port_mask;
747 pkt->ol_flags |= rxq->flow_meta_mask;
748 *RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset,
753 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
754 if (rxq->vlan_strip) {
758 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
759 vlan_strip = cqe->hdr_type_etc &
760 RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
762 vlan_strip = mcqe->hdr_type &
763 RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
765 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
766 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
769 if (rxq->hw_timestamp) {
770 uint64_t ts = rte_be_to_cpu_64(cqe->timestamp);
772 if (rxq->rt_timestamp)
773 ts = mlx5_txpp_convert_rx_ts(rxq->sh, ts);
774 mlx5_timestamp_set(pkt, rxq->timestamp_offset, ts);
775 pkt->ol_flags |= rxq->timestamp_rx_flag;
780 * DPDK callback for RX.
783 * Generic pointer to RX queue structure.
785 * Array to store received packets.
787 * Maximum number of packets in array.
790 * Number of packets successfully received (<= pkts_n).
793 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
795 struct mlx5_rxq_data *rxq = dpdk_rxq;
796 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
797 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
798 const unsigned int sges_n = rxq->sges_n;
799 struct rte_mbuf *pkt = NULL;
800 struct rte_mbuf *seg = NULL;
801 volatile struct mlx5_cqe *cqe =
802 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
804 unsigned int rq_ci = rxq->rq_ci << sges_n;
805 int len = 0; /* keep its value across iterations. */
808 unsigned int idx = rq_ci & wqe_cnt;
809 volatile struct mlx5_wqe_data_seg *wqe =
810 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
811 struct rte_mbuf *rep = (*rxq->elts)[idx];
812 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
820 /* Allocate the buf from the same pool. */
821 rep = rte_mbuf_raw_alloc(seg->pool);
822 if (unlikely(rep == NULL)) {
823 ++rxq->stats.rx_nombuf;
826 * no buffers before we even started,
832 MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
836 rte_mbuf_raw_free(pkt);
845 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
846 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
848 rte_mbuf_raw_free(rep);
852 MLX5_ASSERT(len >= (rxq->crc_present << 2));
853 pkt->ol_flags &= EXT_ATTACHED_MBUF;
854 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
855 if (rxq->crc_present)
856 len -= RTE_ETHER_CRC_LEN;
858 if (cqe->lro_num_seg > 1) {
860 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
862 pkt->ol_flags |= PKT_RX_LRO;
863 pkt->tso_segsz = len / cqe->lro_num_seg;
866 DATA_LEN(rep) = DATA_LEN(seg);
867 PKT_LEN(rep) = PKT_LEN(seg);
868 SET_DATA_OFF(rep, DATA_OFF(seg));
869 PORT(rep) = PORT(seg);
870 (*rxq->elts)[idx] = rep;
872 * Fill NIC descriptor with the new buffer. The lkey and size
873 * of the buffers are already known, only the buffer address
876 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
877 /* If there's only one MR, no need to replace LKey in WQE. */
878 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
879 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
880 if (len > DATA_LEN(seg)) {
881 len -= DATA_LEN(seg);
887 #ifdef MLX5_PMD_SOFT_COUNTERS
888 /* Increment bytes counter. */
889 rxq->stats.ibytes += PKT_LEN(pkt);
896 /* Align consumer index to the next stride. */
901 if (unlikely(i == 0 && ((rq_ci >> sges_n) == rxq->rq_ci)))
903 /* Update the consumer index. */
904 rxq->rq_ci = rq_ci >> sges_n;
906 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
908 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
909 #ifdef MLX5_PMD_SOFT_COUNTERS
910 /* Increment packets counter. */
911 rxq->stats.ipackets += i;
917 * Update LRO packet TCP header.
918 * The HW LRO feature doesn't update the TCP header after coalescing the
919 * TCP segments but supplies information in CQE to fill it by SW.
922 * Pointer to the TCP header.
924 * Pointer to the completion entry.
926 * The L3 pseudo-header checksum.
929 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
930 volatile struct mlx5_cqe *__rte_restrict cqe,
931 uint32_t phcsum, uint8_t l4_type)
934 * The HW calculates only the TCP payload checksum, need to complete
935 * the TCP header checksum and the L3 pseudo-header checksum.
937 uint32_t csum = phcsum + cqe->csum;
939 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
940 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
941 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
942 tcp->recv_ack = cqe->lro_ack_seq_num;
943 tcp->rx_win = cqe->lro_tcp_win;
945 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
946 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
948 csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4);
949 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
950 csum = (~csum) & 0xffff;
957 * Update LRO packet headers.
958 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
959 * TCP segments but supply information in CQE to fill it by SW.
962 * The packet address.
964 * Pointer to the completion entry.
969 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
970 volatile struct mlx5_cqe *__rte_restrict cqe,
971 volatile struct mlx5_mini_cqe8 *mcqe,
972 struct mlx5_rxq_data *rxq, uint32_t len)
975 struct rte_ether_hdr *eth;
976 struct rte_vlan_hdr *vlan;
977 struct rte_ipv4_hdr *ipv4;
978 struct rte_ipv6_hdr *ipv6;
979 struct rte_tcp_hdr *tcp;
984 uint16_t proto = h.eth->ether_type;
989 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
990 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
991 proto = h.vlan->eth_proto;
994 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
995 h.ipv4->time_to_live = cqe->lro_min_ttl;
996 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
997 h.ipv4->hdr_checksum = 0;
998 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
999 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1002 h.ipv6->hop_limits = cqe->lro_min_ttl;
1003 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1005 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1009 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
1010 l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1011 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1013 l4_type = (rte_be_to_cpu_16(mcqe->hdr_type) &
1014 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1015 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum, l4_type);
1019 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1021 struct mlx5_mprq_buf *buf = opaque;
1023 if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
1024 rte_mempool_put(buf->mp, buf);
1025 } else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
1026 __ATOMIC_RELAXED) == 0)) {
1027 __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
1028 rte_mempool_put(buf->mp, buf);
1033 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1035 mlx5_mprq_buf_free_cb(NULL, buf);
1039 * DPDK callback for RX with Multi-Packet RQ support.
1042 * Generic pointer to RX queue structure.
1044 * Array to store received packets.
1046 * Maximum number of packets in array.
1049 * Number of packets successfully received (<= pkts_n).
1052 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1054 struct mlx5_rxq_data *rxq = dpdk_rxq;
1055 const uint32_t strd_n = 1 << rxq->strd_num_n;
1056 const uint32_t strd_sz = 1 << rxq->strd_sz_n;
1057 const uint32_t cq_mask = (1 << rxq->cqe_n) - 1;
1058 const uint32_t wq_mask = (1 << rxq->elts_n) - 1;
1059 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1061 uint32_t rq_ci = rxq->rq_ci;
1062 uint16_t consumed_strd = rxq->consumed_strd;
1063 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1065 while (i < pkts_n) {
1066 struct rte_mbuf *pkt;
1072 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1073 enum mlx5_rqx_code rxq_code;
1075 if (consumed_strd == strd_n) {
1076 /* Replace WQE if the buffer is still in use. */
1077 mprq_buf_replace(rxq, rq_ci & wq_mask);
1078 /* Advance to the next WQE. */
1081 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1083 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1084 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1088 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1089 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1090 if (rxq->crc_present)
1091 len -= RTE_ETHER_CRC_LEN;
1093 rxq->mcqe_format == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX)
1094 strd_cnt = (len / strd_sz) + !!(len % strd_sz);
1096 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1097 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1098 MLX5_ASSERT(strd_cnt);
1099 consumed_strd += strd_cnt;
1100 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1102 strd_idx = rte_be_to_cpu_16(mcqe == NULL ?
1105 MLX5_ASSERT(strd_idx < strd_n);
1106 MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
1108 pkt = rte_pktmbuf_alloc(rxq->mp);
1109 if (unlikely(pkt == NULL)) {
1110 ++rxq->stats.rx_nombuf;
1113 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1114 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1115 if (rxq->crc_present)
1116 len -= RTE_ETHER_CRC_LEN;
1117 rxq_code = mprq_buf_to_pkt(rxq, pkt, len, buf,
1118 strd_idx, strd_cnt);
1119 if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) {
1120 rte_pktmbuf_free_seg(pkt);
1121 if (rxq_code == MLX5_RXQ_CODE_DROPPED) {
1122 ++rxq->stats.idropped;
1125 if (rxq_code == MLX5_RXQ_CODE_NOMBUF) {
1126 ++rxq->stats.rx_nombuf;
1130 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
1131 if (cqe->lro_num_seg > 1) {
1132 mlx5_lro_update_hdr(rte_pktmbuf_mtod(pkt, uint8_t *),
1133 cqe, mcqe, rxq, len);
1134 pkt->ol_flags |= PKT_RX_LRO;
1135 pkt->tso_segsz = len / cqe->lro_num_seg;
1138 PORT(pkt) = rxq->port_id;
1139 #ifdef MLX5_PMD_SOFT_COUNTERS
1140 /* Increment bytes counter. */
1141 rxq->stats.ibytes += PKT_LEN(pkt);
1143 /* Return packet. */
1147 /* Update the consumer indexes. */
1148 rxq->consumed_strd = consumed_strd;
1150 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1151 if (rq_ci != rxq->rq_ci) {
1154 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1156 #ifdef MLX5_PMD_SOFT_COUNTERS
1157 /* Increment packets counter. */
1158 rxq->stats.ipackets += i;
1164 * Dummy DPDK callback for RX.
1166 * This function is used to temporarily replace the real callback during
1167 * unsafe control operations on the queue, or in case of error.
1170 * Generic pointer to RX queue structure.
1172 * Array to store received packets.
1174 * Maximum number of packets in array.
1177 * Number of packets successfully received (<= pkts_n).
1180 removed_rx_burst(void *dpdk_rxq __rte_unused,
1181 struct rte_mbuf **pkts __rte_unused,
1182 uint16_t pkts_n __rte_unused)
1189 * Vectorized Rx routines are not compiled in when required vector instructions
1190 * are not supported on a target architecture.
1191 * The following null stubs are needed for linkage when those are not included
1192 * outside of this file (e.g. mlx5_rxtx_vec_sse.c for x86).
1196 mlx5_rx_burst_vec(void *dpdk_rxq __rte_unused,
1197 struct rte_mbuf **pkts __rte_unused,
1198 uint16_t pkts_n __rte_unused)
1204 mlx5_rx_burst_mprq_vec(void *dpdk_rxq __rte_unused,
1205 struct rte_mbuf **pkts __rte_unused,
1206 uint16_t pkts_n __rte_unused)
1212 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1218 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)