1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2021 6WIND S.A.
3 * Copyright 2021 Mellanox Technologies, Ltd
11 #include <rte_mempool.h>
12 #include <rte_prefetch.h>
13 #include <rte_common.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_ether.h>
16 #include <rte_cycles.h>
20 #include <mlx5_common.h>
21 #include <mlx5_common_mr.h>
23 #include "mlx5_autoconf.h"
24 #include "mlx5_defs.h"
26 #include "mlx5_utils.h"
27 #include "mlx5_rxtx.h"
31 static __rte_always_inline uint32_t
32 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
33 volatile struct mlx5_mini_cqe8 *mcqe);
35 static __rte_always_inline int
36 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
37 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
39 static __rte_always_inline uint32_t
40 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
42 static __rte_always_inline void
43 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
44 volatile struct mlx5_cqe *cqe,
45 volatile struct mlx5_mini_cqe8 *mcqe);
48 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
49 volatile struct mlx5_cqe *__rte_restrict cqe,
50 uint32_t phcsum, uint8_t l4_type);
53 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
54 volatile struct mlx5_cqe *__rte_restrict cqe,
55 volatile struct mlx5_mini_cqe8 *mcqe,
56 struct mlx5_rxq_data *rxq, uint32_t len);
60 * Internal function to compute the number of used descriptors in an RX queue.
66 * The number of used Rx descriptor.
69 rx_queue_count(struct mlx5_rxq_data *rxq)
71 struct rxq_zip *zip = &rxq->zip;
72 volatile struct mlx5_cqe *cqe;
73 const unsigned int cqe_n = (1 << rxq->cqe_n);
74 const unsigned int sges_n = (1 << rxq->sges_n);
75 const unsigned int elts_n = (1 << rxq->elts_n);
76 const unsigned int strd_n = RTE_BIT32(rxq->log_strd_num);
77 const unsigned int cqe_cnt = cqe_n - 1;
78 unsigned int cq_ci, used;
80 /* if we are processing a compressed cqe */
82 used = zip->cqe_cnt - zip->ai;
88 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
89 while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
94 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
95 n = rte_be_to_cpu_32(cqe->byte_cnt);
100 cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
102 used = RTE_MIN(used * sges_n, elts_n * strd_n);
107 * DPDK callback to check the status of a Rx descriptor.
112 * The index of the descriptor in the ring.
115 * The status of the Rx descriptor.
118 mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
120 struct mlx5_rxq_data *rxq = rx_queue;
122 if (offset >= (1 << rxq->cqe_n)) {
126 if (offset < rx_queue_count(rxq))
127 return RTE_ETH_RX_DESC_DONE;
128 return RTE_ETH_RX_DESC_AVAIL;
132 * DPDK callback to get the RX queue information.
135 * Pointer to the device structure.
138 * Rx queue identificator.
141 * Pointer to the RX queue information structure.
148 mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
149 struct rte_eth_rxq_info *qinfo)
151 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, rx_queue_id);
152 struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, rx_queue_id);
156 qinfo->mp = mlx5_rxq_mprq_enabled(rxq) ?
157 rxq->mprq_mp : rxq->mp;
158 qinfo->conf.rx_thresh.pthresh = 0;
159 qinfo->conf.rx_thresh.hthresh = 0;
160 qinfo->conf.rx_thresh.wthresh = 0;
161 qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
162 qinfo->conf.rx_drop_en = 1;
163 if (rxq_ctrl == NULL || rxq_ctrl->obj == NULL)
164 qinfo->conf.rx_deferred_start = 0;
166 qinfo->conf.rx_deferred_start = 1;
167 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
168 qinfo->scattered_rx = dev->data->scattered_rx;
169 qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ?
170 RTE_BIT32(rxq->elts_n) * RTE_BIT32(rxq->log_strd_num) :
171 RTE_BIT32(rxq->elts_n);
175 * DPDK callback to get the RX packet burst mode information.
178 * Pointer to the device structure.
181 * Rx queue identification.
184 * Pointer to the burts mode information.
187 * 0 as success, -EINVAL as failure.
190 mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
191 uint16_t rx_queue_id __rte_unused,
192 struct rte_eth_burst_mode *mode)
194 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
195 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
201 if (pkt_burst == mlx5_rx_burst) {
202 snprintf(mode->info, sizeof(mode->info), "%s", "Scalar");
203 } else if (pkt_burst == mlx5_rx_burst_mprq) {
204 snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ");
205 } else if (pkt_burst == mlx5_rx_burst_vec) {
206 #if defined RTE_ARCH_X86_64
207 snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE");
208 #elif defined RTE_ARCH_ARM64
209 snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon");
210 #elif defined RTE_ARCH_PPC_64
211 snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec");
215 } else if (pkt_burst == mlx5_rx_burst_mprq_vec) {
216 #if defined RTE_ARCH_X86_64
217 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector SSE");
218 #elif defined RTE_ARCH_ARM64
219 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector Neon");
220 #elif defined RTE_ARCH_PPC_64
221 snprintf(mode->info, sizeof(mode->info), "%s", "MPRQ Vector AltiVec");
232 * DPDK callback to get the number of used descriptors in a RX queue.
235 * The Rx queue pointer.
238 * The number of used rx descriptor.
239 * -EINVAL if the queue is invalid
242 mlx5_rx_queue_count(void *rx_queue)
244 struct mlx5_rxq_data *rxq = rx_queue;
245 struct rte_eth_dev *dev;
252 dev = &rte_eth_devices[rxq->port_id];
254 if (dev->rx_pkt_burst == NULL ||
255 dev->rx_pkt_burst == removed_rx_burst) {
260 return rx_queue_count(rxq);
263 #define CLB_VAL_IDX 0
264 #define CLB_MSK_IDX 1
266 mlx5_monitor_callback(const uint64_t value,
267 const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])
269 const uint64_t m = opaque[CLB_MSK_IDX];
270 const uint64_t v = opaque[CLB_VAL_IDX];
272 return (value & m) == v ? -1 : 0;
275 int mlx5_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
277 struct mlx5_rxq_data *rxq = rx_queue;
278 const unsigned int cqe_num = 1 << rxq->cqe_n;
279 const unsigned int cqe_mask = cqe_num - 1;
280 const uint16_t idx = rxq->cq_ci & cqe_num;
281 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
283 if (unlikely(rxq->cqes == NULL)) {
287 pmc->addr = &cqe->op_own;
288 pmc->opaque[CLB_VAL_IDX] = !!idx;
289 pmc->opaque[CLB_MSK_IDX] = MLX5_CQE_OWNER_MASK;
290 pmc->fn = mlx5_monitor_callback;
291 pmc->size = sizeof(uint8_t);
296 * Translate RX completion flags to packet type.
299 * Pointer to RX queue structure.
303 * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
306 * Packet type for struct rte_mbuf.
308 static inline uint32_t
309 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
310 volatile struct mlx5_mini_cqe8 *mcqe)
314 uint8_t pinfo = (cqe->pkt_info & 0x3) << 6;
316 /* Get l3/l4 header from mini-CQE in case L3/L4 format*/
318 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
319 ptype = (cqe->hdr_type_etc & 0xfc00) >> 10;
321 ptype = mcqe->hdr_type >> 2;
323 * The index to the array should have:
324 * bit[1:0] = l3_hdr_type
325 * bit[4:2] = l4_hdr_type
328 * bit[7] = outer_l3_type
331 return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
335 * Initialize Rx WQ and indexes.
338 * Pointer to RX queue structure.
341 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
343 const unsigned int wqe_n = 1 << rxq->elts_n;
346 for (i = 0; (i != wqe_n); ++i) {
347 volatile struct mlx5_wqe_data_seg *scat;
352 if (mlx5_rxq_mprq_enabled(rxq)) {
353 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
355 scat = &((volatile struct mlx5_wqe_mprq *)
357 addr = (uintptr_t)mlx5_mprq_buf_addr
358 (buf, RTE_BIT32(rxq->log_strd_num));
359 byte_count = RTE_BIT32(rxq->log_strd_sz) *
360 RTE_BIT32(rxq->log_strd_num);
361 lkey = mlx5_rx_addr2mr(rxq, addr);
363 struct rte_mbuf *buf = (*rxq->elts)[i];
365 scat = &((volatile struct mlx5_wqe_data_seg *)
367 addr = rte_pktmbuf_mtod(buf, uintptr_t);
368 byte_count = DATA_LEN(buf);
369 lkey = mlx5_rx_mb2mr(rxq, buf);
371 /* scat->addr must be able to store a pointer. */
372 MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
373 *scat = (struct mlx5_wqe_data_seg){
374 .addr = rte_cpu_to_be_64(addr),
375 .byte_count = rte_cpu_to_be_32(byte_count),
379 rxq->consumed_strd = 0;
380 rxq->decompressed = 0;
382 rxq->zip = (struct rxq_zip){
385 rxq->elts_ci = mlx5_rxq_mprq_enabled(rxq) ?
386 (wqe_n >> rxq->sges_n) * RTE_BIT32(rxq->log_strd_num) : 0;
387 /* Update doorbell counter. */
388 rxq->rq_ci = wqe_n >> rxq->sges_n;
390 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
395 * The function inserts the RQ state to reset when the first error CQE is
396 * shown, then drains the CQ by the caller function loop. When the CQ is empty,
397 * it moves the RQ state to ready and initializes the RQ.
398 * Next CQE identification and error counting are in the caller responsibility.
401 * Pointer to RX queue structure.
403 * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
404 * 0 when called from non-vectorized Rx burst.
407 * -1 in case of recovery error, otherwise the CQE status.
410 mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
412 const uint16_t cqe_n = 1 << rxq->cqe_n;
413 const uint16_t cqe_mask = cqe_n - 1;
414 const uint16_t wqe_n = 1 << rxq->elts_n;
415 const uint16_t strd_n = RTE_BIT32(rxq->log_strd_num);
416 struct mlx5_rxq_ctrl *rxq_ctrl =
417 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
419 volatile struct mlx5_cqe *cqe;
420 volatile struct mlx5_err_cqe *err_cqe;
422 .cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask],
424 struct mlx5_mp_arg_queue_state_modify sm;
427 switch (rxq->err_state) {
428 case MLX5_RXQ_ERR_STATE_NO_ERROR:
429 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_RESET;
431 case MLX5_RXQ_ERR_STATE_NEED_RESET:
433 sm.queue_id = rxq->idx;
434 sm.state = IBV_WQS_RESET;
435 if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm))
437 if (rxq_ctrl->dump_file_n <
438 RXQ_PORT(rxq_ctrl)->config.max_dump_files_num) {
439 MKSTR(err_str, "Unexpected CQE error syndrome "
440 "0x%02x CQN = %u RQN = %u wqe_counter = %u"
441 " rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
442 rxq->cqn, rxq_ctrl->wqn,
443 rte_be_to_cpu_16(u.err_cqe->wqe_counter),
444 rxq->rq_ci << rxq->sges_n, rxq->cq_ci);
445 MKSTR(name, "dpdk_mlx5_port_%u_rxq_%u_%u",
446 rxq->port_id, rxq->idx, (uint32_t)rte_rdtsc());
447 mlx5_dump_debug_information(name, NULL, err_str, 0);
448 mlx5_dump_debug_information(name, "MLX5 Error CQ:",
449 (const void *)((uintptr_t)
451 sizeof(*u.cqe) * cqe_n);
452 mlx5_dump_debug_information(name, "MLX5 Error RQ:",
453 (const void *)((uintptr_t)
456 rxq_ctrl->dump_file_n++;
458 rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
460 case MLX5_RXQ_ERR_STATE_NEED_READY:
461 ret = check_cqe(u.cqe, cqe_n, rxq->cq_ci);
462 if (ret == MLX5_CQE_STATUS_HW_OWN) {
464 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
467 * The RQ consumer index must be zeroed while moving
468 * from RESET state to RDY state.
470 *rxq->rq_db = rte_cpu_to_be_32(0);
473 sm.queue_id = rxq->idx;
474 sm.state = IBV_WQS_RDY;
475 if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm))
478 const uint32_t elts_n =
479 mlx5_rxq_mprq_enabled(rxq) ?
480 wqe_n * strd_n : wqe_n;
481 const uint32_t e_mask = elts_n - 1;
483 mlx5_rxq_mprq_enabled(rxq) ?
484 rxq->elts_ci : rxq->rq_ci;
486 struct rte_mbuf **elt;
488 unsigned int n = elts_n - (elts_ci -
491 for (i = 0; i < (int)n; ++i) {
492 elt_idx = (elts_ci + i) & e_mask;
493 elt = &(*rxq->elts)[elt_idx];
494 *elt = rte_mbuf_raw_alloc(rxq->mp);
496 for (i--; i >= 0; --i) {
507 for (i = 0; i < (int)elts_n; ++i) {
508 elt = &(*rxq->elts)[i];
510 (uint16_t)((*elt)->buf_len -
511 rte_pktmbuf_headroom(*elt));
513 /* Padding with a fake mbuf for vec Rx. */
514 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
515 (*rxq->elts)[elts_n + i] =
518 mlx5_rxq_initialize(rxq);
519 rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
528 * Get size of the next packet for a given CQE. For compressed CQEs, the
529 * consumer index is updated only once all packets of the current one have
533 * Pointer to RX queue.
537 * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
541 * 0 in case of empty CQE, otherwise the packet size in bytes.
544 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
545 uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
547 struct rxq_zip *zip = &rxq->zip;
548 uint16_t cqe_n = cqe_cnt + 1;
554 /* Process compressed data in the CQE and mini arrays. */
556 volatile struct mlx5_mini_cqe8 (*mc)[8] =
557 (volatile struct mlx5_mini_cqe8 (*)[8])
558 (uintptr_t)(&(*rxq->cqes)[zip->ca &
560 len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt &
562 *mcqe = &(*mc)[zip->ai & 7];
563 if ((++zip->ai & 7) == 0) {
564 /* Invalidate consumed CQEs */
568 (*rxq->cqes)[idx & cqe_cnt].op_own =
573 * Increment consumer index to skip the number
574 * of CQEs consumed. Hardware leaves holes in
575 * the CQ ring for software use.
580 if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
581 /* Invalidate the rest */
586 (*rxq->cqes)[idx & cqe_cnt].op_own =
590 rxq->cq_ci = zip->cq_ci;
594 * No compressed data, get next CQE and verify if it is
602 ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
603 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
604 if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
606 ret = mlx5_rx_err_handle(rxq, 0);
607 if (ret == MLX5_CQE_STATUS_HW_OWN ||
615 * Introduce the local variable to have queue cq_ci
616 * index in queue structure always consistent with
617 * actual CQE boundary (not pointing to the middle
618 * of compressed CQE session).
620 cq_ci = rxq->cq_ci + 1;
621 op_own = cqe->op_own;
622 if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
623 volatile struct mlx5_mini_cqe8 (*mc)[8] =
624 (volatile struct mlx5_mini_cqe8 (*)[8])
625 (uintptr_t)(&(*rxq->cqes)
626 [cq_ci & cqe_cnt].pkt_info);
628 /* Fix endianness. */
629 zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
631 * Current mini array position is the one
632 * returned by check_cqe64().
634 * If completion comprises several mini arrays,
635 * as a special case the second one is located
636 * 7 CQEs after the initial CQE instead of 8
637 * for subsequent ones.
640 zip->na = zip->ca + 7;
641 /* Compute the next non compressed CQE. */
642 zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
643 /* Get packet size to return. */
644 len = rte_be_to_cpu_32((*mc)[0].byte_cnt &
648 /* Prefetch all to be invalidated */
652 rte_prefetch0(&(*rxq->cqes)[(idx) &
658 len = rte_be_to_cpu_32(cqe->byte_cnt);
661 if (unlikely(rxq->err_state)) {
662 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
663 ++rxq->stats.idropped;
671 * Translate RX completion flags to offload flags.
677 * Offload flags (ol_flags) for struct rte_mbuf.
679 static inline uint32_t
680 rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
682 uint32_t ol_flags = 0;
683 uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
687 MLX5_CQE_RX_L3_HDR_VALID,
688 RTE_MBUF_F_RX_IP_CKSUM_GOOD) |
690 MLX5_CQE_RX_L4_HDR_VALID,
691 RTE_MBUF_F_RX_L4_CKSUM_GOOD);
696 * Fill in mbuf fields from RX completion flags.
697 * Note that pkt->ol_flags should be initialized outside of this function.
700 * Pointer to RX queue.
705 * @param rss_hash_res
706 * Packet RSS Hash result.
709 rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
710 volatile struct mlx5_cqe *cqe,
711 volatile struct mlx5_mini_cqe8 *mcqe)
713 /* Update packet information. */
714 pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe, mcqe);
715 pkt->port = unlikely(rxq->shared) ? cqe->user_index_low : rxq->port_id;
718 uint32_t rss_hash_res = 0;
720 /* If compressed, take hash result from mini-CQE. */
722 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_HASH)
723 rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
725 rss_hash_res = rte_be_to_cpu_32(mcqe->rx_hash_result);
727 pkt->hash.rss = rss_hash_res;
728 pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
734 /* If compressed, take flow tag from mini-CQE. */
736 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_FTAG_STRIDX)
737 mark = cqe->sop_drop_qpn;
739 mark = ((mcqe->byte_cnt_flow & 0xff) << 8) |
740 (mcqe->flow_tag_high << 16);
741 if (MLX5_FLOW_MARK_IS_VALID(mark)) {
742 pkt->ol_flags |= RTE_MBUF_F_RX_FDIR;
743 if (mark != RTE_BE32(MLX5_FLOW_MARK_DEFAULT)) {
744 pkt->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;
745 pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
749 if (rxq->dynf_meta) {
750 uint32_t meta = rte_be_to_cpu_32(cqe->flow_table_metadata) &
751 rxq->flow_meta_port_mask;
754 pkt->ol_flags |= rxq->flow_meta_mask;
755 *RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset,
760 pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
761 if (rxq->vlan_strip) {
765 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
766 vlan_strip = cqe->hdr_type_etc &
767 RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
769 vlan_strip = mcqe->hdr_type &
770 RTE_BE16(MLX5_CQE_VLAN_STRIPPED);
772 pkt->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
773 pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
776 if (rxq->hw_timestamp) {
777 uint64_t ts = rte_be_to_cpu_64(cqe->timestamp);
779 if (rxq->rt_timestamp)
780 ts = mlx5_txpp_convert_rx_ts(rxq->sh, ts);
781 mlx5_timestamp_set(pkt, rxq->timestamp_offset, ts);
782 pkt->ol_flags |= rxq->timestamp_rx_flag;
787 * DPDK callback for RX.
790 * Generic pointer to RX queue structure.
792 * Array to store received packets.
794 * Maximum number of packets in array.
797 * Number of packets successfully received (<= pkts_n).
800 mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
802 struct mlx5_rxq_data *rxq = dpdk_rxq;
803 const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
804 const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
805 const unsigned int sges_n = rxq->sges_n;
806 struct rte_mbuf *pkt = NULL;
807 struct rte_mbuf *seg = NULL;
808 volatile struct mlx5_cqe *cqe =
809 &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
811 unsigned int rq_ci = rxq->rq_ci << sges_n;
812 int len = 0; /* keep its value across iterations. */
815 unsigned int idx = rq_ci & wqe_cnt;
816 volatile struct mlx5_wqe_data_seg *wqe =
817 &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
818 struct rte_mbuf *rep = (*rxq->elts)[idx];
819 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
827 /* Allocate the buf from the same pool. */
828 rep = rte_mbuf_raw_alloc(seg->pool);
829 if (unlikely(rep == NULL)) {
830 ++rxq->stats.rx_nombuf;
833 * no buffers before we even started,
839 MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
843 rte_mbuf_raw_free(pkt);
852 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
853 len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
855 rte_mbuf_raw_free(rep);
859 MLX5_ASSERT(len >= (rxq->crc_present << 2));
860 pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
861 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
862 if (rxq->crc_present)
863 len -= RTE_ETHER_CRC_LEN;
865 if (cqe->lro_num_seg > 1) {
867 (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
869 pkt->ol_flags |= RTE_MBUF_F_RX_LRO;
870 pkt->tso_segsz = len / cqe->lro_num_seg;
873 DATA_LEN(rep) = DATA_LEN(seg);
874 PKT_LEN(rep) = PKT_LEN(seg);
875 SET_DATA_OFF(rep, DATA_OFF(seg));
876 PORT(rep) = PORT(seg);
877 (*rxq->elts)[idx] = rep;
879 * Fill NIC descriptor with the new buffer. The lkey and size
880 * of the buffers are already known, only the buffer address
883 wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
884 /* If there's only one MR, no need to replace LKey in WQE. */
885 if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
886 wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
887 if (len > DATA_LEN(seg)) {
888 len -= DATA_LEN(seg);
894 #ifdef MLX5_PMD_SOFT_COUNTERS
895 /* Increment bytes counter. */
896 rxq->stats.ibytes += PKT_LEN(pkt);
903 /* Align consumer index to the next stride. */
908 if (unlikely(i == 0 && ((rq_ci >> sges_n) == rxq->rq_ci)))
910 /* Update the consumer index. */
911 rxq->rq_ci = rq_ci >> sges_n;
913 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
915 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
916 #ifdef MLX5_PMD_SOFT_COUNTERS
917 /* Increment packets counter. */
918 rxq->stats.ipackets += i;
924 * Update LRO packet TCP header.
925 * The HW LRO feature doesn't update the TCP header after coalescing the
926 * TCP segments but supplies information in CQE to fill it by SW.
929 * Pointer to the TCP header.
931 * Pointer to the completion entry.
933 * The L3 pseudo-header checksum.
936 mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
937 volatile struct mlx5_cqe *__rte_restrict cqe,
938 uint32_t phcsum, uint8_t l4_type)
941 * The HW calculates only the TCP payload checksum, need to complete
942 * the TCP header checksum and the L3 pseudo-header checksum.
944 uint32_t csum = phcsum + cqe->csum;
946 if (l4_type == MLX5_L4_HDR_TYPE_TCP_EMPTY_ACK ||
947 l4_type == MLX5_L4_HDR_TYPE_TCP_WITH_ACL) {
948 tcp->tcp_flags |= RTE_TCP_ACK_FLAG;
949 tcp->recv_ack = cqe->lro_ack_seq_num;
950 tcp->rx_win = cqe->lro_tcp_win;
952 if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
953 tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
955 csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4);
956 csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
957 csum = (~csum) & 0xffff;
964 * Update LRO packet headers.
965 * The HW LRO feature doesn't update the L3/TCP headers after coalescing the
966 * TCP segments but supply information in CQE to fill it by SW.
969 * The packet address.
971 * Pointer to the completion entry.
976 mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
977 volatile struct mlx5_cqe *__rte_restrict cqe,
978 volatile struct mlx5_mini_cqe8 *mcqe,
979 struct mlx5_rxq_data *rxq, uint32_t len)
982 struct rte_ether_hdr *eth;
983 struct rte_vlan_hdr *vlan;
984 struct rte_ipv4_hdr *ipv4;
985 struct rte_ipv6_hdr *ipv6;
986 struct rte_tcp_hdr *tcp;
991 uint16_t proto = h.eth->ether_type;
996 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
997 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
998 proto = h.vlan->eth_proto;
1001 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
1002 h.ipv4->time_to_live = cqe->lro_min_ttl;
1003 h.ipv4->total_length = rte_cpu_to_be_16(len - (h.hdr - padd));
1004 h.ipv4->hdr_checksum = 0;
1005 h.ipv4->hdr_checksum = rte_ipv4_cksum(h.ipv4);
1006 phcsum = rte_ipv4_phdr_cksum(h.ipv4, 0);
1009 h.ipv6->hop_limits = cqe->lro_min_ttl;
1010 h.ipv6->payload_len = rte_cpu_to_be_16(len - (h.hdr - padd) -
1012 phcsum = rte_ipv6_phdr_cksum(h.ipv6, 0);
1016 rxq->mcqe_format != MLX5_CQE_RESP_FORMAT_L34H_STRIDX)
1017 l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
1018 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1020 l4_type = (rte_be_to_cpu_16(mcqe->hdr_type) &
1021 MLX5_CQE_L4_TYPE_MASK) >> MLX5_CQE_L4_TYPE_SHIFT;
1022 mlx5_lro_update_tcp_hdr(h.tcp, cqe, phcsum, l4_type);
1026 mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
1028 mlx5_mprq_buf_free_cb(NULL, buf);
1032 * DPDK callback for RX with Multi-Packet RQ support.
1035 * Generic pointer to RX queue structure.
1037 * Array to store received packets.
1039 * Maximum number of packets in array.
1042 * Number of packets successfully received (<= pkts_n).
1045 mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
1047 struct mlx5_rxq_data *rxq = dpdk_rxq;
1048 const uint32_t strd_n = RTE_BIT32(rxq->log_strd_num);
1049 const uint32_t strd_sz = RTE_BIT32(rxq->log_strd_sz);
1050 const uint32_t cq_mask = (1 << rxq->cqe_n) - 1;
1051 const uint32_t wq_mask = (1 << rxq->elts_n) - 1;
1052 volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1054 uint32_t rq_ci = rxq->rq_ci;
1055 uint16_t consumed_strd = rxq->consumed_strd;
1056 struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1058 while (i < pkts_n) {
1059 struct rte_mbuf *pkt;
1065 volatile struct mlx5_mini_cqe8 *mcqe = NULL;
1066 enum mlx5_rqx_code rxq_code;
1068 if (consumed_strd == strd_n) {
1069 /* Replace WQE if the buffer is still in use. */
1070 mprq_buf_replace(rxq, rq_ci & wq_mask);
1071 /* Advance to the next WQE. */
1074 buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
1076 cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
1077 ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
1081 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1082 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1083 if (rxq->crc_present)
1084 len -= RTE_ETHER_CRC_LEN;
1086 rxq->mcqe_format == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX)
1087 strd_cnt = (len / strd_sz) + !!(len % strd_sz);
1089 strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
1090 MLX5_MPRQ_STRIDE_NUM_SHIFT;
1091 MLX5_ASSERT(strd_cnt);
1092 consumed_strd += strd_cnt;
1093 if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
1095 strd_idx = rte_be_to_cpu_16(mcqe == NULL ?
1098 MLX5_ASSERT(strd_idx < strd_n);
1099 MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
1101 pkt = rte_pktmbuf_alloc(rxq->mp);
1102 if (unlikely(pkt == NULL)) {
1103 ++rxq->stats.rx_nombuf;
1106 len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
1107 MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
1108 if (rxq->crc_present)
1109 len -= RTE_ETHER_CRC_LEN;
1110 rxq_code = mprq_buf_to_pkt(rxq, pkt, len, buf,
1111 strd_idx, strd_cnt);
1112 if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) {
1113 rte_pktmbuf_free_seg(pkt);
1114 if (rxq_code == MLX5_RXQ_CODE_DROPPED) {
1115 ++rxq->stats.idropped;
1118 if (rxq_code == MLX5_RXQ_CODE_NOMBUF) {
1119 ++rxq->stats.rx_nombuf;
1123 rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
1124 if (cqe->lro_num_seg > 1) {
1125 mlx5_lro_update_hdr(rte_pktmbuf_mtod(pkt, uint8_t *),
1126 cqe, mcqe, rxq, len);
1127 pkt->ol_flags |= RTE_MBUF_F_RX_LRO;
1128 pkt->tso_segsz = len / cqe->lro_num_seg;
1131 PORT(pkt) = rxq->port_id;
1132 #ifdef MLX5_PMD_SOFT_COUNTERS
1133 /* Increment bytes counter. */
1134 rxq->stats.ibytes += PKT_LEN(pkt);
1136 /* Return packet. */
1140 /* Update the consumer indexes. */
1141 rxq->consumed_strd = consumed_strd;
1143 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
1144 if (rq_ci != rxq->rq_ci) {
1147 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
1149 #ifdef MLX5_PMD_SOFT_COUNTERS
1150 /* Increment packets counter. */
1151 rxq->stats.ipackets += i;
1157 * Dummy DPDK callback for RX.
1159 * This function is used to temporarily replace the real callback during
1160 * unsafe control operations on the queue, or in case of error.
1163 * Generic pointer to RX queue structure.
1165 * Array to store received packets.
1167 * Maximum number of packets in array.
1170 * Number of packets successfully received (<= pkts_n).
1173 removed_rx_burst(void *dpdk_rxq __rte_unused,
1174 struct rte_mbuf **pkts __rte_unused,
1175 uint16_t pkts_n __rte_unused)
1182 * Vectorized Rx routines are not compiled in when required vector instructions
1183 * are not supported on a target architecture.
1184 * The following null stubs are needed for linkage when those are not included
1185 * outside of this file (e.g. mlx5_rxtx_vec_sse.c for x86).
1189 mlx5_rx_burst_vec(void *dpdk_rxq __rte_unused,
1190 struct rte_mbuf **pkts __rte_unused,
1191 uint16_t pkts_n __rte_unused)
1197 mlx5_rx_burst_mprq_vec(void *dpdk_rxq __rte_unused,
1198 struct rte_mbuf **pkts __rte_unused,
1199 uint16_t pkts_n __rte_unused)
1205 mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
1211 mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)