4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef RTE_PMD_MLX5_RXTX_VEC_H_
35 #define RTE_PMD_MLX5_RXTX_VEC_H_
37 #include <rte_common.h>
40 #include "mlx5_autoconf.h"
44 * Compile time sanity check for vectorized functions.
47 #define S_ASSERT_RTE_MBUF(s) \
48 static_assert(s, "A field of struct rte_mbuf is changed")
49 #define S_ASSERT_MLX5_CQE(s) \
50 static_assert(s, "A field of struct mlx5_cqe is changed")
52 /* rxq_cq_decompress_v() */
53 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
54 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
55 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
56 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
57 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, hash) ==
58 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12);
60 /* rxq_cq_to_ptype_oflags_v() */
61 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, ol_flags) ==
62 offsetof(struct rte_mbuf, rearm_data) + 8);
63 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, rearm_data) ==
64 RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16));
67 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, pkt_len) ==
68 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4);
69 S_ASSERT_RTE_MBUF(offsetof(struct rte_mbuf, data_len) ==
70 offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
71 #if (RTE_CACHE_LINE_SIZE == 128)
72 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 64);
74 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 0);
76 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rx_hash_res) ==
77 offsetof(struct mlx5_cqe, pkt_info) + 12);
78 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) +
79 sizeof(((struct mlx5_cqe *)0)->rsvd1) ==
80 offsetof(struct mlx5_cqe, hdr_type_etc));
81 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, vlan_info) ==
82 offsetof(struct mlx5_cqe, hdr_type_etc) + 2);
83 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd2) +
84 sizeof(((struct mlx5_cqe *)0)->rsvd2) ==
85 offsetof(struct mlx5_cqe, byte_cnt));
86 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, sop_drop_qpn) ==
87 RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8));
88 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, op_own) ==
89 offsetof(struct mlx5_cqe, sop_drop_qpn) + 7);
92 * Replenish buffers for RX in bulk.
95 * Pointer to RX queue structure.
97 * Number of buffers to be replenished.
100 mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
102 const uint16_t q_n = 1 << rxq->elts_n;
103 const uint16_t q_mask = q_n - 1;
104 uint16_t elts_idx = rxq->rq_ci & q_mask;
105 struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
106 volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx];
109 assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH);
110 assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
111 assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP);
112 /* Not to cross queue end. */
113 n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
114 if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
115 rxq->stats.rx_nombuf += n;
118 for (i = 0; i < n; ++i)
119 wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
120 RTE_PKTMBUF_HEADROOM);
122 /* Prevent overflowing into consumed mbufs. */
123 elts_idx = rxq->rq_ci & q_mask;
124 for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
125 (*rxq->elts)[elts_idx + i] = &rxq->fake_mbuf;
127 *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
130 #endif /* RTE_PMD_MLX5_RXTX_VEC_H_ */