4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
37 #pragma GCC diagnostic ignored "-Wpedantic"
39 #include <infiniband/verbs.h>
41 #pragma GCC diagnostic error "-Wpedantic"
44 #include <rte_mempool.h>
47 #include "mlx5_rxtx.h"
49 struct mlx5_check_mempool_data {
55 /* Called by mlx5_check_mempool() when iterating the memory chunks. */
57 mlx5_check_mempool_cb(struct rte_mempool *mp,
58 void *opaque, struct rte_mempool_memhdr *memhdr,
61 struct mlx5_check_mempool_data *data = opaque;
66 /* It already failed, skip the next chunks. */
69 /* It is the first chunk. */
70 if (data->start == NULL && data->end == NULL) {
71 data->start = memhdr->addr;
72 data->end = data->start + memhdr->len;
75 if (data->end == memhdr->addr) {
76 data->end += memhdr->len;
79 if (data->start == (char *)memhdr->addr + memhdr->len) {
80 data->start -= memhdr->len;
83 /* Error, mempool is not virtually contiguous. */
88 * Check if a mempool can be used: it must be virtually contiguous.
91 * Pointer to memory pool.
93 * Pointer to the start address of the mempool virtual memory area
95 * Pointer to the end address of the mempool virtual memory area
98 * 0 on success (mempool is virtually contiguous), -1 on error.
100 static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
103 struct mlx5_check_mempool_data data;
105 memset(&data, 0, sizeof(data));
106 rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data);
107 *start = (uintptr_t)data.start;
108 *end = (uintptr_t)data.end;
114 * Register mempool as a memory region.
117 * Pointer to protection domain.
119 * Pointer to memory pool.
122 * Memory region pointer, NULL in case of error.
125 mlx5_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
127 const struct rte_memseg *ms = rte_eal_get_physmem_layout();
132 if (mlx5_check_mempool(mp, &start, &end) != 0) {
133 ERROR("mempool %p: not virtually contiguous",
138 DEBUG("mempool %p area start=%p end=%p size=%zu",
139 (void *)mp, (void *)start, (void *)end,
140 (size_t)(end - start));
141 /* Round start and end to page boundary if found in memory segments. */
142 for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
143 uintptr_t addr = (uintptr_t)ms[i].addr;
144 size_t len = ms[i].len;
145 unsigned int align = ms[i].hugepage_sz;
147 if ((start > addr) && (start < addr + len))
148 start = RTE_ALIGN_FLOOR(start, align);
149 if ((end > addr) && (end < addr + len))
150 end = RTE_ALIGN_CEIL(end, align);
152 DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
153 (void *)mp, (void *)start, (void *)end,
154 (size_t)(end - start));
155 return ibv_reg_mr(pd,
158 IBV_ACCESS_LOCAL_WRITE);
162 * Register a Memory Region (MR) <-> Memory Pool (MP) association in
163 * txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
165 * This function should only be called by txq_mp2mr().
168 * Pointer to TX queue structure.
170 * Memory Pool for which a Memory Region lkey must be returned.
172 * Index of the next available entry.
175 * mr->lkey on success, (uint32_t)-1 on failure.
178 mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
181 struct mlx5_txq_ctrl *txq_ctrl =
182 container_of(txq, struct mlx5_txq_ctrl, txq);
185 /* Add a new entry, register MR first. */
186 DEBUG("%p: discovered new memory pool \"%s\" (%p)",
187 (void *)txq_ctrl, mp->name, (void *)mp);
188 mr = mlx5_mp2mr(txq_ctrl->priv->pd, mp);
189 if (unlikely(mr == NULL)) {
190 DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
194 if (unlikely(idx == RTE_DIM(txq_ctrl->txq.mp2mr))) {
195 /* Table is full, remove oldest entry. */
196 DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
199 claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[0].mr));
200 memmove(&txq_ctrl->txq.mp2mr[0], &txq_ctrl->txq.mp2mr[1],
201 (sizeof(txq_ctrl->txq.mp2mr) -
202 sizeof(txq_ctrl->txq.mp2mr[0])));
204 /* Store the new entry. */
205 txq_ctrl->txq.mp2mr[idx].start = (uintptr_t)mr->addr;
206 txq_ctrl->txq.mp2mr[idx].end = (uintptr_t)mr->addr + mr->length;
207 txq_ctrl->txq.mp2mr[idx].mr = mr;
208 txq_ctrl->txq.mp2mr[idx].lkey = rte_cpu_to_be_32(mr->lkey);
209 DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
210 (void *)txq_ctrl, mp->name, (void *)mp,
211 txq_ctrl->txq.mp2mr[idx].lkey);
212 return txq_ctrl->txq.mp2mr[idx].lkey;
215 struct txq_mp2mr_mbuf_check_data {
220 * Callback function for rte_mempool_obj_iter() to check whether a given
221 * mempool object looks like a mbuf.
224 * The mempool pointer
226 * Context data (struct txq_mp2mr_mbuf_check_data). Contains the
231 * Object index, unused.
234 txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
235 uint32_t index __rte_unused)
237 struct txq_mp2mr_mbuf_check_data *data = arg;
238 struct rte_mbuf *buf = obj;
241 * Check whether mbuf structure fits element size and whether mempool
244 if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
249 * Iterator function for rte_mempool_walk() to register existing mempools and
250 * fill the MP to MR cache of a TX queue.
253 * Memory Pool to register.
255 * Pointer to TX queue structure.
258 mlx5_txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
260 struct mlx5_txq_ctrl *txq_ctrl = arg;
261 struct txq_mp2mr_mbuf_check_data data = {
268 /* Register mempool only if the first element looks like a mbuf. */
269 if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
272 if (mlx5_check_mempool(mp, &start, &end) != 0) {
273 ERROR("mempool %p: not virtually contiguous",
277 for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
278 struct ibv_mr *mr = txq_ctrl->txq.mp2mr[i].mr;
280 if (unlikely(mr == NULL)) {
281 /* Unknown MP, add a new MR for it. */
284 if (start >= (uintptr_t)mr->addr &&
285 end <= (uintptr_t)mr->addr + mr->length)
288 mlx5_txq_mp2mr_reg(&txq_ctrl->txq, mp, i);