1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 6WIND S.A.
3 * Copyright 2021 Mellanox Technologies, Ltd
11 #include <rte_mempool.h>
12 #include <rte_prefetch.h>
13 #include <rte_common.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_ether.h>
16 #include <rte_cycles.h>
20 #include <mlx5_common.h>
21 #include <mlx5_common_mr.h>
23 #include "mlx5_autoconf.h"
24 #include "mlx5_defs.h"
26 #include "mlx5_utils.h"
27 #include "mlx5_rxtx.h"
31 static __rte_always_inline uint32_t
32 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
33 volatile struct mlx5_mini_cqe8 *mcqe);
35 static __rte_always_inline int
36 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
37 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
39 static __rte_always_inline uint32_t
40 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
42 static __rte_always_inline void
43 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
44 volatile struct mlx5_cqe *cqe,
45 volatile struct mlx5_mini_cqe8 *mcqe);
48 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
49 volatile struct mlx5_cqe *__rte_restrict cqe,
50 uint32_t phcsum, uint8_t l4_type);
53 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
54 volatile struct mlx5_cqe *__rte_restrict cqe,
55 volatile struct mlx5_mini_cqe8 *mcqe,
56 struct mlx5_rxq_data *rxq, uint32_t len);
60 * Internal function to compute the number of used descriptors in an RX queue.
66 * The number of used Rx descriptor.
69 rx_queue_count(struct mlx5_rxq_data *rxq)
71 struct rxq_zip *zip = &rxq->zip;
72 volatile struct mlx5_cqe *cqe;
73 const unsigned int cqe_n = (1 << rxq->cqe_n);
74 const unsigned int sges_n = (1 << rxq->sges_n);
75 const unsigned int elts_n = (1 << rxq->elts_n);
76 const unsigned int strd_n = (1 << rxq->strd_num_n);
77 const unsigned int cqe_cnt = cqe_n - 1;
78 unsigned int cq_ci, used;
80 /* if we are processing a compressed cqe */
82 used = zip->cqe_cnt - zip->ai;
88 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
89 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
94 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
95 n = rte_be_to_cpu_32(cqe->byte_cnt);
100 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
102 used = RTE_MIN(used * sges_n, elts_n * strd_n);
107 * DPDK callback to check the status of a Rx descriptor.
112 * The index of the descriptor in the ring.
115 * The status of the Rx descriptor.
118 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
120 struct mlx5_rxq_data *rxq = rx_queue;
122 if (offset >= (1 << rxq->cqe_n)) {
126 if (offset < rx_queue_count(rxq))
127 return RTE_ETH_RX_DESC_DONE;
128 return RTE_ETH_RX_DESC_AVAIL;
132 * DPDK callback to get the RX queue information.
135 * Pointer to the device structure.
138 * Rx queue identificator.
141 * Pointer to the RX queue information structure.
148 mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
149 struct rte_eth_rxq_info *qinfo)
151 struct mlx5_priv *priv = dev->data->dev_private;
152 struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
153 struct mlx5_rxq_ctrl *rxq_ctrl =
154 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
158 qinfo->mp = mlx5_rxq_mprq_enabled(rxq) ?
159 rxq->mprq_mp : rxq->mp;
160 qinfo->conf.rx_thresh.pthresh = 0;
161 qinfo->conf.rx_thresh.hthresh = 0;
162 qinfo->conf.rx_thresh.wthresh = 0;
163 qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
164 qinfo->conf.rx_drop_en = 1;
165 qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1;
166 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
167 qinfo->scattered_rx = dev->data->scattered_rx;
168 qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ?
169 (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
174 * DPDK callback to get the RX packet burst mode information.
177 * Pointer to the device structure.
180 * Rx queue identificatior.
183 * Pointer to the burts mode information.
186 * 0 as success, -EINVAL as failure.
189 mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
190 uint16_t rx_queue_id __rte_unused,
191 struct rte_eth_burst_mode *mode)
193 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
194 struct mlx5_priv *priv = dev->data->dev_private;
195 struct mlx5_rxq_data *rxq;
197 rxq = (*priv->rxqs)[rx_queue_id];
202 if (pkt_burst == mlx5_rx_burst) {
203 snprintf(mode->info, sizeof(mode->info), "%s", "Scalar");
204 } else if (pkt_burst == mlx5_rx_burst_mprq) {
205 snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ");
206 } else if (pkt_burst == mlx5_rx_burst_vec) {
207 #if defined RTE_ARCH_X86_64
208 snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE");
209 #elif defined RTE_ARCH_ARM64
210 snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon");
211 #elif defined RTE_ARCH_PPC_64
212 snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec");
216 } else if (pkt_burst == mlx5_rx_burst_mprq_vec) {
217 #if defined RTE_ARCH_X86_64
218 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector SSE");
219 #elif defined RTE_ARCH_ARM64
220 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector Neon");
221 #elif defined RTE_ARCH_PPC_64
222 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector AltiVec");
233 * DPDK callback to get the number of used descriptors in a RX queue.
236 * The Rx queue pointer.
239 * The number of used rx descriptor.
240 * -EINVAL if the queue is invalid
243 mlx5_rx_queue_count(void *rx_queue)
245 struct mlx5_rxq_data *rxq = rx_queue;
246 struct rte_eth_dev *dev;
253 dev = &rte_eth_devices[rxq->port_id];
255 if (dev->rx_pkt_burst == NULL ||
256 dev->rx_pkt_burst == removed_rx_burst) {
261 return rx_queue_count(rxq);
264 #define CLB_VAL_IDX 0
265 #define CLB_MSK_IDX 1
267 mlx5_monitor_callback(const uint64_t value,
268 const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])
270 const uint64_t m = opaque[CLB_MSK_IDX];
271 const uint64_t v = opaque[CLB_VAL_IDX];
273 return (value & m) == v ? -1 : 0;
276 int mlx5_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
278 struct mlx5_rxq_data *rxq = rx_queue;
279 const unsigned int cqe_num = 1 << rxq->cqe_n;
280 const unsigned int cqe_mask = cqe_num - 1;
281 const uint16_t idx = rxq->cq_ci & cqe_num;
282 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
284 if (unlikely(rxq->cqes == NULL)) {
288 pmc->addr = &cqe->op_own;
289 pmc->opaque[CLB_VAL_IDX] = !!idx;
290 pmc->opaque[CLB_MSK_IDX] = MLX5_CQE_OWNER_MASK;
291 pmc->fn = mlx5_monitor_callback;
292 pmc->size = sizeof(uint8_t);
297 * Translate RX completion flags to packet type.
300 * Pointer to RX queue structure.
304 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
307 * Packet type for struct rte_mbuf.
309 static inline uint32_t
310 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
311 volatile struct mlx5_mini_cqe8 *mcqe)
315 uint8_t pinfo = (cqe->pkt_info & 0x3) << 6;
317 /* Get l3/l4 header from mini-CQE in case L3/L4 format*/
319 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
320 ptype = (cqe->hdr_type_etc & 0xfc00) >> 10;
322 ptype = mcqe->hdr_type >> 2;
324 * The index to the array should have:
325 * bit[1:0] = l3_hdr_type
326 * bit[4:2] = l4_hdr_type
329 * bit[7] = outer_l3_type
332 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
336 * Initialize Rx WQ and indexes.
339 * Pointer to RX queue structure.
342 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
344 const unsigned int wqe_n = 1 << rxq->elts_n;
347 for (i = 0; (i != wqe_n); ++i) {
348 volatile struct mlx5_wqe_data_seg *scat;
352 if (mlx5_rxq_mprq_enabled(rxq)) {
353 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
355 scat = &((volatile struct mlx5_wqe_mprq *)
357 addr = (uintptr_t)mlx5_mprq_buf_addr(buf,
358 1 << rxq->strd_num_n);
359 byte_count = (1 << rxq->strd_sz_n) *
360 (1 << rxq->strd_num_n);
362 struct rte_mbuf *buf = (*rxq->elts)[i];
364 scat = &((volatile struct mlx5_wqe_data_seg *)
366 addr = rte_pktmbuf_mtod(buf, uintptr_t);
367 byte_count = DATA_LEN(buf);
369 /* scat->addr must be able to store a pointer. */
370 MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
371 *scat = (struct mlx5_wqe_data_seg){
372 .addr = rte_cpu_to_be_64(addr),
373 .byte_count = rte_cpu_to_be_32(byte_count),
374 .lkey = mlx5_rx_addr2mr(rxq, addr),
377 rxq->consumed_strd = 0;
378 rxq->decompressed = 0;
380 rxq->zip = (struct rxq_zip){
383 rxq->elts_ci = mlx5_rxq_mprq_enabled(rxq) ?
384 (wqe_n >> rxq->sges_n) * (1 << rxq->strd_num_n) : 0;
385 /* Update doorbell counter. */
386 rxq->rq_ci = wqe_n >> rxq->sges_n;
388 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
393 * The function inserts the RQ state to reset when the first error CQE is
394 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
395 * it moves the RQ state to ready and initializes the RQ.
396 * Next CQE identification and error counting are in the caller responsibility.
399 * Pointer to RX queue structure.
401 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
402 * 0 when called from non-vectorized Rx burst.
405 * -1 in case of recovery error, otherwise the CQE status.
408 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
410 const uint16_t cqe_n = 1 << rxq->cqe_n;
411 const uint16_t cqe_mask = cqe_n - 1;
412 const uint16_t wqe_n = 1 << rxq->elts_n;
413 const uint16_t strd_n = 1 << rxq->strd_num_n;
414 struct mlx5_rxq_ctrl *rxq_ctrl =
415 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
417 volatile struct mlx5_cqe *cqe;
418 volatile struct mlx5_err_cqe *err_cqe;
420 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
422 struct mlx5_mp_arg_queue_state_modify sm;
425 switch (rxq->err_state) {
426 case MLX5_RXQ_ERR_STATE_NO_ERROR:
427 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
429 case MLX5_RXQ_ERR_STATE_NEED_RESET:
431 sm.queue_id = rxq->idx;
432 sm.state = IBV_WQS_RESET;
433 if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm))
435 if (rxq_ctrl->dump_file_n <
436 RXQ_PORT(rxq_ctrl)->config.max_dump_files_num) {
437 MKSTR(err_str, "Unexpected CQE error syndrome "
438 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
439 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
440 rxq->cqn, rxq_ctrl->wqn,
441 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
442 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
443 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
444 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
445 mlx5_dump_debug_information(name, NULL, err_str, 0);
446 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
447 (const void *)((uintptr_t)
449 sizeof(*u.cqe) * cqe_n);
450 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
451 (const void *)((uintptr_t)
454 rxq_ctrl->dump_file_n++;
456 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
458 case MLX5_RXQ_ERR_STATE_NEED_READY:
459 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
460 if (ret == MLX5_CQE_STATUS_HW_OWN) {
462 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
465 * The RQ consumer index must be zeroed while moving
466 * from RESET state to RDY state.
468 *rxq->rq_db = rte_cpu_to_be_32(0);
471 sm.queue_id = rxq->idx;
472 sm.state = IBV_WQS_RDY;
473 if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm))
476 const uint32_t elts_n =
477 mlx5_rxq_mprq_enabled(rxq) ?
478 wqe_n * strd_n : wqe_n;
479 const uint32_t e_mask = elts_n - 1;
481 mlx5_rxq_mprq_enabled(rxq) ?
482 rxq->elts_ci : rxq->rq_ci;
484 struct rte_mbuf **elt;
486 unsigned int n = elts_n - (elts_ci -
489 for (i = 0; i < (int)n; ++i) {
490 elt_idx = (elts_ci + i) & e_mask;
491 elt = &(*rxq->elts)[elt_idx];
492 *elt = rte_mbuf_raw_alloc(rxq->mp);
494 for (i--; i >= 0; --i) {
505 for (i = 0; i < (int)elts_n; ++i) {
506 elt = &(*rxq->elts)[i];
508 (uint16_t)((*elt)->buf_len -
509 rte_pktmbuf_headroom(*elt));
511 /* Padding with a fake mbuf for vec Rx. */
512 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
513 (*rxq->elts)[elts_n + i] =
516 mlx5_rxq_initialize(rxq);
517 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
526 * Get size of the next packet for a given CQE. For compressed CQEs, the
527 * consumer index is updated only once all packets of the current one have
531 * Pointer to RX queue.
535 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
539 * 0 in case of empty CQE, otherwise the packet size in bytes.
542 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
543 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
545 struct rxq_zip *zip = &rxq->zip;
546 uint16_t cqe_n = cqe_cnt + 1;
552 /* Process compressed data in the CQE and mini arrays. */
554 volatile struct mlx5_mini_cqe8 (*mc)[8] =
555 (volatile struct mlx5_mini_cqe8 (*)[8])
556 (uintptr_t)(&(*rxq->cqes)[zip->ca &
558 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt &
560 *mcqe = &(*mc)[zip->ai & 7];
561 if ((++zip->ai & 7) == 0) {
562 /* Invalidate consumed CQEs */
566 (*rxq->cqes)[idx & cqe_cnt].op_own =
571 * Increment consumer index to skip the number
572 * of CQEs consumed. Hardware leaves holes in
573 * the CQ ring for software use.
578 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
579 /* Invalidate the rest */
584 (*rxq->cqes)[idx & cqe_cnt].op_own =
588 rxq->cq_ci = zip->cq_ci;
592 * No compressed data, get next CQE and verify if it is
600 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
601 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
602 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
604 ret = mlx5_rx_err_handle(rxq, 0);
605 if (ret == MLX5_CQE_STATUS_HW_OWN ||
613 * Introduce the local variable to have queue cq_ci
614 * index in queue structure always consistent with
615 * actual CQE boundary (not pointing to the middle
616 * of compressed CQE session).
618 cq_ci = rxq->cq_ci + 1;
619 op_own = cqe->op_own;
620 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
621 volatile struct mlx5_mini_cqe8 (*mc)[8] =
622 (volatile struct mlx5_mini_cqe8 (*)[8])
623 (uintptr_t)(&(*rxq->cqes)
624 [cq_ci & cqe_cnt].pkt_info);
626 /* Fix endianness. */
627 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
629 * Current mini array position is the one
630 * returned by check_cqe64().
632 * If completion comprises several mini arrays,
633 * as a special case the second one is located
634 * 7 CQEs after the initial CQE instead of 8
635 * for subsequent ones.
638 zip->na = zip->ca + 7;
639 /* Compute the next non compressed CQE. */
640 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
641 /* Get packet size to return. */
642 len = rte_be_to_cpu_32((*mc)[0].byte_cnt &
646 /* Prefetch all to be invalidated */
650 rte_prefetch0(&(*rxq->cqes)[(idx) &
656 len = rte_be_to_cpu_32(cqe->byte_cnt);
659 if (unlikely(rxq->err_state)) {
660 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
661 ++rxq->stats.idropped;
669 * Translate RX completion flags to offload flags.
675 * Offload flags (ol_flags) for struct rte_mbuf.
677 static inline uint32_t
678 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
680 uint32_t ol_flags = 0;
681 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
685 MLX5_CQE_RX_L3_HDR_VALID,
686 RTE_MBUF_F_RX_IP_CKSUM_GOOD) |
688 MLX5_CQE_RX_L4_HDR_VALID,
689 RTE_MBUF_F_RX_L4_CKSUM_GOOD);
694 * Fill in mbuf fields from RX completion flags.
695 * Note that pkt->ol_flags should be initialized outside of this function.
698 * Pointer to RX queue.
703 * @param rss_hash_res
704 * Packet RSS Hash result.
707 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
708 volatile struct mlx5_cqe *cqe,
709 volatile struct mlx5_mini_cqe8 *mcqe)
711 /* Update packet information. */
712 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe, mcqe);
715 uint32_t rss_hash_res = 0;
717 /* If compressed, take hash result from mini-CQE. */
719 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_HASH)
720 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
722 rss_hash_res = rte_be_to_cpu_32(mcqe->rx_hash_result);
724 pkt->hash.rss = rss_hash_res;
725 pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
731 /* If compressed, take flow tag from mini-CQE. */
733 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_FTAG_STRIDX)
734 mark = cqe->sop_drop_qpn;
736 mark = ((mcqe->byte_cnt_flow & 0xff) << 8) |
737 (mcqe->flow_tag_high << 16);
738 if (MLX5_FLOW_MARK_IS_VALID(mark)) {
739 pkt->ol_flags |= RTE_MBUF_F_RX_FDIR;
740 if (mark != RTE_BE32(MLX5_FLOW_MARK_DEFAULT)) {
741 pkt->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
742 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
746 if (rxq->dynf_meta) {
747 uint32_t meta = rte_be_to_cpu_32(cqe->flow_table_metadata) &
748 rxq->flow_meta_port_mask;
751 pkt->ol_flags |= rxq->flow_meta_mask;
752 *RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset,
757 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
758 if (rxq->vlan_strip) {
762 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
763 vlan_strip = cqe->hdr_type_etc &
764 RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
766 vlan_strip = mcqe->hdr_type &
767 RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
769 pkt->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
770 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
773 if (rxq->hw_timestamp) {
774 uint64_t ts = rte_be_to_cpu_64(cqe->timestamp);
776 if (rxq->rt_timestamp)
777 ts = mlx5_txpp_convert_rx_ts(rxq->sh, ts);
778 mlx5_timestamp_set(pkt, rxq->timestamp_offset, ts);
779 pkt->ol_flags |= rxq->timestamp_rx_flag;
784 * DPDK callback for RX.
787 * Generic pointer to RX queue structure.
789 * Array to store received packets.
791 * Maximum number of packets in array.
794 * Number of packets successfully received (<= pkts_n).
797 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
799 struct mlx5_rxq_data *rxq = dpdk_rxq;
800 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
801 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
802 const unsigned int sges_n = rxq->sges_n;
803 struct rte_mbuf *pkt = NULL;
804 struct rte_mbuf *seg = NULL;
805 volatile struct mlx5_cqe *cqe =
806 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
808 unsigned int rq_ci = rxq->rq_ci << sges_n;
809 int len = 0; /* keep its value across iterations. */
812 unsigned int idx = rq_ci & wqe_cnt;
813 volatile struct mlx5_wqe_data_seg *wqe =
814 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
815 struct rte_mbuf *rep = (*rxq->elts)[idx];
816 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
824 /* Allocate the buf from the same pool. */
825 rep = rte_mbuf_raw_alloc(seg->pool);
826 if (unlikely(rep == NULL)) {
827 ++rxq->stats.rx_nombuf;
830 * no buffers before we even started,
836 MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
840 rte_mbuf_raw_free(pkt);
849 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
850 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
852 rte_mbuf_raw_free(rep);
856 MLX5_ASSERT(len >= (rxq->crc_present << 2));
857 pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
858 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
859 if (rxq->crc_present)
860 len -= RTE_ETHER_CRC_LEN;
862 if (cqe->lro_num_seg > 1) {
864 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
866 pkt->ol_flags |= RTE_MBUF_F_RX_LRO;
867 pkt->tso_segsz = len / cqe->lro_num_seg;
870 DATA_LEN(rep) = DATA_LEN(seg);
871 PKT_LEN(rep) = PKT_LEN(seg);
872 SET_DATA_OFF(rep, DATA_OFF(seg));
873 PORT(rep) = PORT(seg);
874 (*rxq->elts)[idx] = rep;
876 * Fill NIC descriptor with the new buffer. The lkey and size
877 * of the buffers are already known, only the buffer address
880 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
881 /* If there's only one MR, no need to replace LKey in WQE. */
882 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
883 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
884 if (len > DATA_LEN(seg)) {
885 len -= DATA_LEN(seg);
891 #ifdef MLX5_PMD_SOFT_COUNTERS
892 /* Increment bytes counter. */
893 rxq->stats.ibytes += PKT_LEN(pkt);
900 /* Align consumer index to the next stride. */
905 if (unlikely(i == 0 && ((rq_ci >> sges_n) == rxq->rq_ci)))
907 /* Update the consumer index. */
908 rxq->rq_ci = rq_ci >> sges_n;
910 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
912 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
913 #ifdef MLX5_PMD_SOFT_COUNTERS
914 /* Increment packets counter. */
915 rxq->stats.ipackets += i;
921 * Update LRO packet TCP header.
922 * The HW LRO feature doesn't update the TCP header after coalescing the
923 * TCP segments but supplies information in CQE to fill it by SW.
926 * Pointer to the TCP header.
928 * Pointer to the completion entry.
930 * The L3 pseudo-header checksum.
933 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
934 volatile struct mlx5_cqe *__rte_restrict cqe,
935 uint32_t phcsum, uint8_t l4_type)
938 * The HW calculates only the TCP payload checksum, need to complete
939 * the TCP header checksum and the L3 pseudo-header checksum.
941 uint32_t csum = phcsum + cqe->csum;
943 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
944 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
945 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
946 tcp->recv_ack = cqe->lro_ack_seq_num;
947 tcp->rx_win = cqe->lro_tcp_win;
949 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
950 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
952 csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4);
953 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
954 csum = (~csum) & 0xffff;
961 * Update LRO packet headers.
962 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
963 * TCP segments but supply information in CQE to fill it by SW.
966 * The packet address.
968 * Pointer to the completion entry.
973 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
974 volatile struct mlx5_cqe *__rte_restrict cqe,
975 volatile struct mlx5_mini_cqe8 *mcqe,
976 struct mlx5_rxq_data *rxq, uint32_t len)
979 struct rte_ether_hdr *eth;
980 struct rte_vlan_hdr *vlan;
981 struct rte_ipv4_hdr *ipv4;
982 struct rte_ipv6_hdr *ipv6;
983 struct rte_tcp_hdr *tcp;
988 uint16_t proto = h.eth->ether_type;
993 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
994 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
995 proto = h.vlan->eth_proto;
998 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
999 h.ipv4->time_to_live = cqe->lro_min_ttl;
1000 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1001 h.ipv4->hdr_checksum = 0;
1002 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1003 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1006 h.ipv6->hop_limits = cqe->lro_min_ttl;
1007 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1009 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1013 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
1014 l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1015 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1017 l4_type = (rte_be_to_cpu_16(mcqe->hdr_type) &
1018 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1019 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum, l4_type);
1023 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1025 mlx5_mprq_buf_free_cb(NULL, buf);
1029 * DPDK callback for RX with Multi-Packet RQ support.
1032 * Generic pointer to RX queue structure.
1034 * Array to store received packets.
1036 * Maximum number of packets in array.
1039 * Number of packets successfully received (<= pkts_n).
1042 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1044 struct mlx5_rxq_data *rxq = dpdk_rxq;
1045 const uint32_t strd_n = 1 << rxq->strd_num_n;
1046 const uint32_t strd_sz = 1 << rxq->strd_sz_n;
1047 const uint32_t cq_mask = (1 << rxq->cqe_n) - 1;
1048 const uint32_t wq_mask = (1 << rxq->elts_n) - 1;
1049 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1051 uint32_t rq_ci = rxq->rq_ci;
1052 uint16_t consumed_strd = rxq->consumed_strd;
1053 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1055 while (i < pkts_n) {
1056 struct rte_mbuf *pkt;
1062 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1063 enum mlx5_rqx_code rxq_code;
1065 if (consumed_strd == strd_n) {
1066 /* Replace WQE if the buffer is still in use. */
1067 mprq_buf_replace(rxq, rq_ci & wq_mask);
1068 /* Advance to the next WQE. */
1071 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1073 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1074 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1078 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1079 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1080 if (rxq->crc_present)
1081 len -= RTE_ETHER_CRC_LEN;
1083 rxq->mcqe_format == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX)
1084 strd_cnt = (len / strd_sz) + !!(len % strd_sz);
1086 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1087 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1088 MLX5_ASSERT(strd_cnt);
1089 consumed_strd += strd_cnt;
1090 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1092 strd_idx = rte_be_to_cpu_16(mcqe == NULL ?
1095 MLX5_ASSERT(strd_idx < strd_n);
1096 MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
1098 pkt = rte_pktmbuf_alloc(rxq->mp);
1099 if (unlikely(pkt == NULL)) {
1100 ++rxq->stats.rx_nombuf;
1103 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1104 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1105 if (rxq->crc_present)
1106 len -= RTE_ETHER_CRC_LEN;
1107 rxq_code = mprq_buf_to_pkt(rxq, pkt, len, buf,
1108 strd_idx, strd_cnt);
1109 if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) {
1110 rte_pktmbuf_free_seg(pkt);
1111 if (rxq_code == MLX5_RXQ_CODE_DROPPED) {
1112 ++rxq->stats.idropped;
1115 if (rxq_code == MLX5_RXQ_CODE_NOMBUF) {
1116 ++rxq->stats.rx_nombuf;
1120 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
1121 if (cqe->lro_num_seg > 1) {
1122 mlx5_lro_update_hdr(rte_pktmbuf_mtod(pkt, uint8_t *),
1123 cqe, mcqe, rxq, len);
1124 pkt->ol_flags |= RTE_MBUF_F_RX_LRO;
1125 pkt->tso_segsz = len / cqe->lro_num_seg;
1128 PORT(pkt) = rxq->port_id;
1129 #ifdef MLX5_PMD_SOFT_COUNTERS
1130 /* Increment bytes counter. */
1131 rxq->stats.ibytes += PKT_LEN(pkt);
1133 /* Return packet. */
1137 /* Update the consumer indexes. */
1138 rxq->consumed_strd = consumed_strd;
1140 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1141 if (rq_ci != rxq->rq_ci) {
1144 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1146 #ifdef MLX5_PMD_SOFT_COUNTERS
1147 /* Increment packets counter. */
1148 rxq->stats.ipackets += i;
1154 * Dummy DPDK callback for RX.
1156 * This function is used to temporarily replace the real callback during
1157 * unsafe control operations on the queue, or in case of error.
1160 * Generic pointer to RX queue structure.
1162 * Array to store received packets.
1164 * Maximum number of packets in array.
1167 * Number of packets successfully received (<= pkts_n).
1170 removed_rx_burst(void *dpdk_rxq __rte_unused,
1171 struct rte_mbuf **pkts __rte_unused,
1172 uint16_t pkts_n __rte_unused)
1179 * Vectorized Rx routines are not compiled in when required vector instructions
1180 * are not supported on a target architecture.
1181 * The following null stubs are needed for linkage when those are not included
1182 * outside of this file (e.g. mlx5_rxtx_vec_sse.c for x86).
1186 mlx5_rx_burst_vec(void *dpdk_rxq __rte_unused,
1187 struct rte_mbuf **pkts __rte_unused,
1188 uint16_t pkts_n __rte_unused)
1194 mlx5_rx_burst_mprq_vec(void *dpdk_rxq __rte_unused,
1195 struct rte_mbuf **pkts __rte_unused,
1196 uint16_t pkts_n __rte_unused)
1202 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1208 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)