1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 6WIND S.A.
3 * Copyright 2021 Mellanox Technologies, Ltd
11 #include <rte_mempool.h>
12 #include <rte_prefetch.h>
13 #include <rte_common.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_ether.h>
16 #include <rte_cycles.h>
20 #include <mlx5_common.h>
22 #include "mlx5_autoconf.h"
23 #include "mlx5_defs.h"
25 #include "mlx5_utils.h"
26 #include "mlx5_rxtx.h"
30 static __rte_always_inline uint32_t
31 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
32 volatile struct mlx5_mini_cqe8 *mcqe);
34 static __rte_always_inline int
35 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
36 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
38 static __rte_always_inline uint32_t
39 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
41 static __rte_always_inline void
42 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
43 volatile struct mlx5_cqe *cqe,
44 volatile struct mlx5_mini_cqe8 *mcqe);
47 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
48 volatile struct mlx5_cqe *__rte_restrict cqe,
49 uint32_t phcsum, uint8_t l4_type);
52 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
53 volatile struct mlx5_cqe *__rte_restrict cqe,
54 volatile struct mlx5_mini_cqe8 *mcqe,
55 struct mlx5_rxq_data *rxq, uint32_t len);
59 * Internal function to compute the number of used descriptors in an RX queue.
65 * The number of used Rx descriptor.
68 rx_queue_count(struct mlx5_rxq_data *rxq)
70 struct rxq_zip *zip = &rxq->zip;
71 volatile struct mlx5_cqe *cqe;
72 const unsigned int cqe_n = (1 << rxq->cqe_n);
73 const unsigned int sges_n = (1 << rxq->sges_n);
74 const unsigned int elts_n = (1 << rxq->elts_n);
75 const unsigned int strd_n = (1 << rxq->strd_num_n);
76 const unsigned int cqe_cnt = cqe_n - 1;
77 unsigned int cq_ci, used;
79 /* if we are processing a compressed cqe */
81 used = zip->cqe_cnt - zip->ai;
87 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
88 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
93 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
94 n = rte_be_to_cpu_32(cqe->byte_cnt);
99 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
101 used = RTE_MIN(used * sges_n, elts_n * strd_n);
106 * DPDK callback to check the status of a Rx descriptor.
111 * The index of the descriptor in the ring.
114 * The status of the Rx descriptor.
117 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
119 struct mlx5_rxq_data *rxq = rx_queue;
120 struct mlx5_rxq_ctrl *rxq_ctrl =
121 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
122 struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
124 if (dev->rx_pkt_burst == NULL ||
125 dev->rx_pkt_burst == removed_rx_burst) {
129 if (offset >= (1 << rxq->cqe_n)) {
133 if (offset < rx_queue_count(rxq))
134 return RTE_ETH_RX_DESC_DONE;
135 return RTE_ETH_RX_DESC_AVAIL;
139 * DPDK callback to get the RX queue information.
142 * Pointer to the device structure.
145 * Rx queue identificator.
148 * Pointer to the RX queue information structure.
155 mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
156 struct rte_eth_rxq_info *qinfo)
158 struct mlx5_priv *priv = dev->data->dev_private;
159 struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
160 struct mlx5_rxq_ctrl *rxq_ctrl =
161 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
165 qinfo->mp = mlx5_rxq_mprq_enabled(rxq) ?
166 rxq->mprq_mp : rxq->mp;
167 qinfo->conf.rx_thresh.pthresh = 0;
168 qinfo->conf.rx_thresh.hthresh = 0;
169 qinfo->conf.rx_thresh.wthresh = 0;
170 qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
171 qinfo->conf.rx_drop_en = 1;
172 qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1;
173 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
174 qinfo->scattered_rx = dev->data->scattered_rx;
175 qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ?
176 (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
181 * DPDK callback to get the RX packet burst mode information.
184 * Pointer to the device structure.
187 * Rx queue identificatior.
190 * Pointer to the burts mode information.
193 * 0 as success, -EINVAL as failure.
196 mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
197 uint16_t rx_queue_id __rte_unused,
198 struct rte_eth_burst_mode *mode)
200 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
201 struct mlx5_priv *priv = dev->data->dev_private;
202 struct mlx5_rxq_data *rxq;
204 rxq = (*priv->rxqs)[rx_queue_id];
209 if (pkt_burst == mlx5_rx_burst) {
210 snprintf(mode->info, sizeof(mode->info), "%s", "Scalar");
211 } else if (pkt_burst == mlx5_rx_burst_mprq) {
212 snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ");
213 } else if (pkt_burst == mlx5_rx_burst_vec) {
214 #if defined RTE_ARCH_X86_64
215 snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE");
216 #elif defined RTE_ARCH_ARM64
217 snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon");
218 #elif defined RTE_ARCH_PPC_64
219 snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec");
223 } else if (pkt_burst == mlx5_rx_burst_mprq_vec) {
224 #if defined RTE_ARCH_X86_64
225 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector SSE");
226 #elif defined RTE_ARCH_ARM64
227 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector Neon");
228 #elif defined RTE_ARCH_PPC_64
229 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector AltiVec");
240 * DPDK callback to get the number of used descriptors in a RX queue.
243 * The Rx queue pointer.
246 * The number of used rx descriptor.
247 * -EINVAL if the queue is invalid
250 mlx5_rx_queue_count(void *rx_queue)
252 struct mlx5_rxq_data *rxq = rx_queue;
253 struct rte_eth_dev *dev;
260 dev = &rte_eth_devices[rxq->port_id];
262 if (dev->rx_pkt_burst == NULL ||
263 dev->rx_pkt_burst == removed_rx_burst) {
268 return rx_queue_count(rxq);
271 #define CLB_VAL_IDX 0
272 #define CLB_MSK_IDX 1
274 mlx5_monitor_callback(const uint64_t value,
275 const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])
277 const uint64_t m = opaque[CLB_MSK_IDX];
278 const uint64_t v = opaque[CLB_VAL_IDX];
280 return (value & m) == v ? -1 : 0;
283 int mlx5_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
285 struct mlx5_rxq_data *rxq = rx_queue;
286 const unsigned int cqe_num = 1 << rxq->cqe_n;
287 const unsigned int cqe_mask = cqe_num - 1;
288 const uint16_t idx = rxq->cq_ci & cqe_num;
289 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
291 if (unlikely(rxq->cqes == NULL)) {
295 pmc->addr = &cqe->op_own;
296 pmc->opaque[CLB_VAL_IDX] = !!idx;
297 pmc->opaque[CLB_MSK_IDX] = MLX5_CQE_OWNER_MASK;
298 pmc->fn = mlx5_monitor_callback;
299 pmc->size = sizeof(uint8_t);
304 * Translate RX completion flags to packet type.
307 * Pointer to RX queue structure.
311 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
314 * Packet type for struct rte_mbuf.
316 static inline uint32_t
317 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
318 volatile struct mlx5_mini_cqe8 *mcqe)
322 uint8_t pinfo = (cqe->pkt_info & 0x3) << 6;
324 /* Get l3/l4 header from mini-CQE in case L3/L4 format*/
326 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
327 ptype = (cqe->hdr_type_etc & 0xfc00) >> 10;
329 ptype = mcqe->hdr_type >> 2;
331 * The index to the array should have:
332 * bit[1:0] = l3_hdr_type
333 * bit[4:2] = l4_hdr_type
336 * bit[7] = outer_l3_type
339 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
343 * Initialize Rx WQ and indexes.
346 * Pointer to RX queue structure.
349 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
351 const unsigned int wqe_n = 1 << rxq->elts_n;
354 for (i = 0; (i != wqe_n); ++i) {
355 volatile struct mlx5_wqe_data_seg *scat;
359 if (mlx5_rxq_mprq_enabled(rxq)) {
360 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
362 scat = &((volatile struct mlx5_wqe_mprq *)
364 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
365 1 << rxq->strd_num_n);
366 byte_count = (1 << rxq->strd_sz_n) *
367 (1 << rxq->strd_num_n);
369 struct rte_mbuf *buf = (*rxq->elts)[i];
371 scat = &((volatile struct mlx5_wqe_data_seg *)
373 addr = rte_pktmbuf_mtod(buf, uintptr_t);
374 byte_count = DATA_LEN(buf);
376 /* scat->addr must be able to store a pointer. */
377 MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
378 *scat = (struct mlx5_wqe_data_seg){
379 .addr = rte_cpu_to_be_64(addr),
380 .byte_count = rte_cpu_to_be_32(byte_count),
381 .lkey = mlx5_rx_addr2mr(rxq, addr),
384 rxq->consumed_strd = 0;
385 rxq->decompressed = 0;
387 rxq->zip = (struct rxq_zip){
390 rxq->elts_ci = mlx5_rxq_mprq_enabled(rxq) ?
391 (wqe_n >> rxq->sges_n) * (1 << rxq->strd_num_n) : 0;
392 /* Update doorbell counter. */
393 rxq->rq_ci = wqe_n >> rxq->sges_n;
395 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
400 * The function inserts the RQ state to reset when the first error CQE is
401 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
402 * it moves the RQ state to ready and initializes the RQ.
403 * Next CQE identification and error counting are in the caller responsibility.
406 * Pointer to RX queue structure.
408 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
409 * 0 when called from non-vectorized Rx burst.
412 * -1 in case of recovery error, otherwise the CQE status.
415 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
417 const uint16_t cqe_n = 1 << rxq->cqe_n;
418 const uint16_t cqe_mask = cqe_n - 1;
419 const uint16_t wqe_n = 1 << rxq->elts_n;
420 const uint16_t strd_n = 1 << rxq->strd_num_n;
421 struct mlx5_rxq_ctrl *rxq_ctrl =
422 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
424 volatile struct mlx5_cqe *cqe;
425 volatile struct mlx5_err_cqe *err_cqe;
427 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
429 struct mlx5_mp_arg_queue_state_modify sm;
432 switch (rxq->err_state) {
433 case MLX5_RXQ_ERR_STATE_NO_ERROR:
434 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
436 case MLX5_RXQ_ERR_STATE_NEED_RESET:
438 sm.queue_id = rxq->idx;
439 sm.state = IBV_WQS_RESET;
440 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
442 if (rxq_ctrl->dump_file_n <
443 rxq_ctrl->priv->config.max_dump_files_num) {
444 MKSTR(err_str, "Unexpected CQE error syndrome "
445 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
446 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
447 rxq->cqn, rxq_ctrl->wqn,
448 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
449 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
450 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
451 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
452 mlx5_dump_debug_information(name, NULL, err_str, 0);
453 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
454 (const void *)((uintptr_t)
456 sizeof(*u.cqe) * cqe_n);
457 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
458 (const void *)((uintptr_t)
461 rxq_ctrl->dump_file_n++;
463 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
465 case MLX5_RXQ_ERR_STATE_NEED_READY:
466 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
467 if (ret == MLX5_CQE_STATUS_HW_OWN) {
469 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
472 * The RQ consumer index must be zeroed while moving
473 * from RESET state to RDY state.
475 *rxq->rq_db = rte_cpu_to_be_32(0);
478 sm.queue_id = rxq->idx;
479 sm.state = IBV_WQS_RDY;
480 if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
484 const uint32_t elts_n =
485 mlx5_rxq_mprq_enabled(rxq) ?
486 wqe_n * strd_n : wqe_n;
487 const uint32_t e_mask = elts_n - 1;
489 mlx5_rxq_mprq_enabled(rxq) ?
490 rxq->elts_ci : rxq->rq_ci;
492 struct rte_mbuf **elt;
494 unsigned int n = elts_n - (elts_ci -
497 for (i = 0; i < (int)n; ++i) {
498 elt_idx = (elts_ci + i) & e_mask;
499 elt = &(*rxq->elts)[elt_idx];
500 *elt = rte_mbuf_raw_alloc(rxq->mp);
502 for (i--; i >= 0; --i) {
513 for (i = 0; i < (int)elts_n; ++i) {
514 elt = &(*rxq->elts)[i];
516 (uint16_t)((*elt)->buf_len -
517 rte_pktmbuf_headroom(*elt));
519 /* Padding with a fake mbuf for vec Rx. */
520 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
521 (*rxq->elts)[elts_n + i] =
524 mlx5_rxq_initialize(rxq);
525 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
534 * Get size of the next packet for a given CQE. For compressed CQEs, the
535 * consumer index is updated only once all packets of the current one have
539 * Pointer to RX queue.
543 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
547 * 0 in case of empty CQE, otherwise the packet size in bytes.
550 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
551 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
553 struct rxq_zip *zip = &rxq->zip;
554 uint16_t cqe_n = cqe_cnt + 1;
560 /* Process compressed data in the CQE and mini arrays. */
562 volatile struct mlx5_mini_cqe8 (*mc)[8] =
563 (volatile struct mlx5_mini_cqe8 (*)[8])
564 (uintptr_t)(&(*rxq->cqes)[zip->ca &
566 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt &
568 *mcqe = &(*mc)[zip->ai & 7];
569 if ((++zip->ai & 7) == 0) {
570 /* Invalidate consumed CQEs */
574 (*rxq->cqes)[idx & cqe_cnt].op_own =
579 * Increment consumer index to skip the number
580 * of CQEs consumed. Hardware leaves holes in
581 * the CQ ring for software use.
586 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
587 /* Invalidate the rest */
592 (*rxq->cqes)[idx & cqe_cnt].op_own =
596 rxq->cq_ci = zip->cq_ci;
600 * No compressed data, get next CQE and verify if it is
608 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
609 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
610 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
612 ret = mlx5_rx_err_handle(rxq, 0);
613 if (ret == MLX5_CQE_STATUS_HW_OWN ||
621 * Introduce the local variable to have queue cq_ci
622 * index in queue structure always consistent with
623 * actual CQE boundary (not pointing to the middle
624 * of compressed CQE session).
626 cq_ci = rxq->cq_ci + 1;
627 op_own = cqe->op_own;
628 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
629 volatile struct mlx5_mini_cqe8 (*mc)[8] =
630 (volatile struct mlx5_mini_cqe8 (*)[8])
631 (uintptr_t)(&(*rxq->cqes)
632 [cq_ci & cqe_cnt].pkt_info);
634 /* Fix endianness. */
635 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
637 * Current mini array position is the one
638 * returned by check_cqe64().
640 * If completion comprises several mini arrays,
641 * as a special case the second one is located
642 * 7 CQEs after the initial CQE instead of 8
643 * for subsequent ones.
646 zip->na = zip->ca + 7;
647 /* Compute the next non compressed CQE. */
648 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
649 /* Get packet size to return. */
650 len = rte_be_to_cpu_32((*mc)[0].byte_cnt &
654 /* Prefetch all to be invalidated */
658 rte_prefetch0(&(*rxq->cqes)[(idx) &
664 len = rte_be_to_cpu_32(cqe->byte_cnt);
667 if (unlikely(rxq->err_state)) {
668 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
669 ++rxq->stats.idropped;
677 * Translate RX completion flags to offload flags.
683 * Offload flags (ol_flags) for struct rte_mbuf.
685 static inline uint32_t
686 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
688 uint32_t ol_flags = 0;
689 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
693 MLX5_CQE_RX_L3_HDR_VALID,
694 PKT_RX_IP_CKSUM_GOOD) |
696 MLX5_CQE_RX_L4_HDR_VALID,
697 PKT_RX_L4_CKSUM_GOOD);
702 * Fill in mbuf fields from RX completion flags.
703 * Note that pkt->ol_flags should be initialized outside of this function.
706 * Pointer to RX queue.
711 * @param rss_hash_res
712 * Packet RSS Hash result.
715 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
716 volatile struct mlx5_cqe *cqe,
717 volatile struct mlx5_mini_cqe8 *mcqe)
719 /* Update packet information. */
720 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe, mcqe);
723 uint32_t rss_hash_res = 0;
725 /* If compressed, take hash result from mini-CQE. */
727 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_HASH)
728 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
730 rss_hash_res = rte_be_to_cpu_32(mcqe->rx_hash_result);
732 pkt->hash.rss = rss_hash_res;
733 pkt->ol_flags |= PKT_RX_RSS_HASH;
739 /* If compressed, take flow tag from mini-CQE. */
741 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_FTAG_STRIDX)
742 mark = cqe->sop_drop_qpn;
744 mark = ((mcqe->byte_cnt_flow & 0xff) << 8) |
745 (mcqe->flow_tag_high << 16);
746 if (MLX5_FLOW_MARK_IS_VALID(mark)) {
747 pkt->ol_flags |= PKT_RX_FDIR;
748 if (mark != RTE_BE32(MLX5_FLOW_MARK_DEFAULT)) {
749 pkt->ol_flags |= PKT_RX_FDIR_ID;
750 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
754 if (rxq->dynf_meta) {
755 uint32_t meta = rte_be_to_cpu_32(cqe->flow_table_metadata) &
756 rxq->flow_meta_port_mask;
759 pkt->ol_flags |= rxq->flow_meta_mask;
760 *RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset,
765 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
766 if (rxq->vlan_strip) {
770 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
771 vlan_strip = cqe->hdr_type_etc &
772 RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
774 vlan_strip = mcqe->hdr_type &
775 RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
777 pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
778 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
781 if (rxq->hw_timestamp) {
782 uint64_t ts = rte_be_to_cpu_64(cqe->timestamp);
784 if (rxq->rt_timestamp)
785 ts = mlx5_txpp_convert_rx_ts(rxq->sh, ts);
786 mlx5_timestamp_set(pkt, rxq->timestamp_offset, ts);
787 pkt->ol_flags |= rxq->timestamp_rx_flag;
792 * DPDK callback for RX.
795 * Generic pointer to RX queue structure.
797 * Array to store received packets.
799 * Maximum number of packets in array.
802 * Number of packets successfully received (<= pkts_n).
805 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
807 struct mlx5_rxq_data *rxq = dpdk_rxq;
808 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
809 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
810 const unsigned int sges_n = rxq->sges_n;
811 struct rte_mbuf *pkt = NULL;
812 struct rte_mbuf *seg = NULL;
813 volatile struct mlx5_cqe *cqe =
814 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
816 unsigned int rq_ci = rxq->rq_ci << sges_n;
817 int len = 0; /* keep its value across iterations. */
820 unsigned int idx = rq_ci & wqe_cnt;
821 volatile struct mlx5_wqe_data_seg *wqe =
822 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
823 struct rte_mbuf *rep = (*rxq->elts)[idx];
824 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
832 /* Allocate the buf from the same pool. */
833 rep = rte_mbuf_raw_alloc(seg->pool);
834 if (unlikely(rep == NULL)) {
835 ++rxq->stats.rx_nombuf;
838 * no buffers before we even started,
844 MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
848 rte_mbuf_raw_free(pkt);
857 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
858 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
860 rte_mbuf_raw_free(rep);
864 MLX5_ASSERT(len >= (rxq->crc_present << 2));
865 pkt->ol_flags &= EXT_ATTACHED_MBUF;
866 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
867 if (rxq->crc_present)
868 len -= RTE_ETHER_CRC_LEN;
870 if (cqe->lro_num_seg > 1) {
872 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
874 pkt->ol_flags |= PKT_RX_LRO;
875 pkt->tso_segsz = len / cqe->lro_num_seg;
878 DATA_LEN(rep) = DATA_LEN(seg);
879 PKT_LEN(rep) = PKT_LEN(seg);
880 SET_DATA_OFF(rep, DATA_OFF(seg));
881 PORT(rep) = PORT(seg);
882 (*rxq->elts)[idx] = rep;
884 * Fill NIC descriptor with the new buffer. The lkey and size
885 * of the buffers are already known, only the buffer address
888 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
889 /* If there's only one MR, no need to replace LKey in WQE. */
890 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
891 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
892 if (len > DATA_LEN(seg)) {
893 len -= DATA_LEN(seg);
899 #ifdef MLX5_PMD_SOFT_COUNTERS
900 /* Increment bytes counter. */
901 rxq->stats.ibytes += PKT_LEN(pkt);
908 /* Align consumer index to the next stride. */
913 if (unlikely(i == 0 && ((rq_ci >> sges_n) == rxq->rq_ci)))
915 /* Update the consumer index. */
916 rxq->rq_ci = rq_ci >> sges_n;
918 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
920 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
921 #ifdef MLX5_PMD_SOFT_COUNTERS
922 /* Increment packets counter. */
923 rxq->stats.ipackets += i;
929 * Update LRO packet TCP header.
930 * The HW LRO feature doesn't update the TCP header after coalescing the
931 * TCP segments but supplies information in CQE to fill it by SW.
934 * Pointer to the TCP header.
936 * Pointer to the completion entry.
938 * The L3 pseudo-header checksum.
941 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
942 volatile struct mlx5_cqe *__rte_restrict cqe,
943 uint32_t phcsum, uint8_t l4_type)
946 * The HW calculates only the TCP payload checksum, need to complete
947 * the TCP header checksum and the L3 pseudo-header checksum.
949 uint32_t csum = phcsum + cqe->csum;
951 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
952 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
953 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
954 tcp->recv_ack = cqe->lro_ack_seq_num;
955 tcp->rx_win = cqe->lro_tcp_win;
957 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
958 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
960 csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4);
961 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
962 csum = (~csum) & 0xffff;
969 * Update LRO packet headers.
970 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
971 * TCP segments but supply information in CQE to fill it by SW.
974 * The packet address.
976 * Pointer to the completion entry.
981 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
982 volatile struct mlx5_cqe *__rte_restrict cqe,
983 volatile struct mlx5_mini_cqe8 *mcqe,
984 struct mlx5_rxq_data *rxq, uint32_t len)
987 struct rte_ether_hdr *eth;
988 struct rte_vlan_hdr *vlan;
989 struct rte_ipv4_hdr *ipv4;
990 struct rte_ipv6_hdr *ipv6;
991 struct rte_tcp_hdr *tcp;
996 uint16_t proto = h.eth->ether_type;
1001 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
1002 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
1003 proto = h.vlan->eth_proto;
1006 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1007 h.ipv4->time_to_live = cqe->lro_min_ttl;
1008 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1009 h.ipv4->hdr_checksum = 0;
1010 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1011 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1014 h.ipv6->hop_limits = cqe->lro_min_ttl;
1015 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1017 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1021 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
1022 l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1023 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1025 l4_type = (rte_be_to_cpu_16(mcqe->hdr_type) &
1026 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1027 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum, l4_type);
1031 mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
1033 struct mlx5_mprq_buf *buf = opaque;
1035 if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
1036 rte_mempool_put(buf->mp, buf);
1037 } else if (unlikely(__atomic_sub_fetch(&buf->refcnt, 1,
1038 __ATOMIC_RELAXED) == 0)) {
1039 __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
1040 rte_mempool_put(buf->mp, buf);
1045 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1047 mlx5_mprq_buf_free_cb(NULL, buf);
1051 * DPDK callback for RX with Multi-Packet RQ support.
1054 * Generic pointer to RX queue structure.
1056 * Array to store received packets.
1058 * Maximum number of packets in array.
1061 * Number of packets successfully received (<= pkts_n).
1064 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1066 struct mlx5_rxq_data *rxq = dpdk_rxq;
1067 const uint32_t strd_n = 1 << rxq->strd_num_n;
1068 const uint32_t strd_sz = 1 << rxq->strd_sz_n;
1069 const uint32_t cq_mask = (1 << rxq->cqe_n) - 1;
1070 const uint32_t wq_mask = (1 << rxq->elts_n) - 1;
1071 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1073 uint32_t rq_ci = rxq->rq_ci;
1074 uint16_t consumed_strd = rxq->consumed_strd;
1075 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1077 while (i < pkts_n) {
1078 struct rte_mbuf *pkt;
1084 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1085 enum mlx5_rqx_code rxq_code;
1087 if (consumed_strd == strd_n) {
1088 /* Replace WQE if the buffer is still in use. */
1089 mprq_buf_replace(rxq, rq_ci & wq_mask);
1090 /* Advance to the next WQE. */
1093 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1095 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1096 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1100 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1101 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1102 if (rxq->crc_present)
1103 len -= RTE_ETHER_CRC_LEN;
1105 rxq->mcqe_format == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX)
1106 strd_cnt = (len / strd_sz) + !!(len % strd_sz);
1108 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1109 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1110 MLX5_ASSERT(strd_cnt);
1111 consumed_strd += strd_cnt;
1112 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1114 strd_idx = rte_be_to_cpu_16(mcqe == NULL ?
1117 MLX5_ASSERT(strd_idx < strd_n);
1118 MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
1120 pkt = rte_pktmbuf_alloc(rxq->mp);
1121 if (unlikely(pkt == NULL)) {
1122 ++rxq->stats.rx_nombuf;
1125 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1126 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1127 if (rxq->crc_present)
1128 len -= RTE_ETHER_CRC_LEN;
1129 rxq_code = mprq_buf_to_pkt(rxq, pkt, len, buf,
1130 strd_idx, strd_cnt);
1131 if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) {
1132 rte_pktmbuf_free_seg(pkt);
1133 if (rxq_code == MLX5_RXQ_CODE_DROPPED) {
1134 ++rxq->stats.idropped;
1137 if (rxq_code == MLX5_RXQ_CODE_NOMBUF) {
1138 ++rxq->stats.rx_nombuf;
1142 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
1143 if (cqe->lro_num_seg > 1) {
1144 mlx5_lro_update_hdr(rte_pktmbuf_mtod(pkt, uint8_t *),
1145 cqe, mcqe, rxq, len);
1146 pkt->ol_flags |= PKT_RX_LRO;
1147 pkt->tso_segsz = len / cqe->lro_num_seg;
1150 PORT(pkt) = rxq->port_id;
1151 #ifdef MLX5_PMD_SOFT_COUNTERS
1152 /* Increment bytes counter. */
1153 rxq->stats.ibytes += PKT_LEN(pkt);
1155 /* Return packet. */
1159 /* Update the consumer indexes. */
1160 rxq->consumed_strd = consumed_strd;
1162 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1163 if (rq_ci != rxq->rq_ci) {
1166 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1168 #ifdef MLX5_PMD_SOFT_COUNTERS
1169 /* Increment packets counter. */
1170 rxq->stats.ipackets += i;
1176 * Dummy DPDK callback for RX.
1178 * This function is used to temporarily replace the real callback during
1179 * unsafe control operations on the queue, or in case of error.
1182 * Generic pointer to RX queue structure.
1184 * Array to store received packets.
1186 * Maximum number of packets in array.
1189 * Number of packets successfully received (<= pkts_n).
1192 removed_rx_burst(void *dpdk_rxq __rte_unused,
1193 struct rte_mbuf **pkts __rte_unused,
1194 uint16_t pkts_n __rte_unused)
1201 * Vectorized Rx routines are not compiled in when required vector instructions
1202 * are not supported on a target architecture.
1203 * The following null stubs are needed for linkage when those are not included
1204 * outside of this file (e.g. mlx5_rxtx_vec_sse.c for x86).
1208 mlx5_rx_burst_vec(void *dpdk_rxq __rte_unused,
1209 struct rte_mbuf **pkts __rte_unused,
1210 uint16_t pkts_n __rte_unused)
1216 mlx5_rx_burst_mprq_vec(void *dpdk_rxq __rte_unused,
1217 struct rte_mbuf **pkts __rte_unused,
1218 uint16_t pkts_n __rte_unused)
1224 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1230 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)