4 * Copyright 2017 6WIND S.A.
5 * Copyright 2017 Mellanox
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Memory management functions for mlx4 driver.
46 /* Verbs headers do not support -pedantic. */
48 #pragma GCC diagnostic ignored "-Wpedantic"
50 #include <infiniband/verbs.h>
52 #pragma GCC diagnostic error "-Wpedantic"
55 #include <rte_branch_prediction.h>
56 #include <rte_common.h>
57 #include <rte_errno.h>
58 #include <rte_memory.h>
59 #include <rte_mempool.h>
61 #include "mlx4_rxtx.h"
62 #include "mlx4_utils.h"
64 struct mlx4_check_mempool_data {
71 * Called by mlx4_check_mempool() when iterating the memory chunks.
74 * Pointer to memory pool (unused).
75 * @param[in, out] data
76 * Pointer to shared buffer with mlx4_check_mempool().
78 * Pointer to mempool chunk header.
80 * Mempool element index (unused).
83 mlx4_check_mempool_cb(struct rte_mempool *mp, void *opaque,
84 struct rte_mempool_memhdr *memhdr,
87 struct mlx4_check_mempool_data *data = opaque;
91 /* It already failed, skip the next chunks. */
94 /* It is the first chunk. */
95 if (data->start == NULL && data->end == NULL) {
96 data->start = memhdr->addr;
97 data->end = data->start + memhdr->len;
100 if (data->end == memhdr->addr) {
101 data->end += memhdr->len;
104 if (data->start == (char *)memhdr->addr + memhdr->len) {
105 data->start -= memhdr->len;
108 /* Error, mempool is not virtually contiguous. */
113 * Check if a mempool can be used: it must be virtually contiguous.
116 * Pointer to memory pool.
118 * Pointer to the start address of the mempool virtual memory area.
120 * Pointer to the end address of the mempool virtual memory area.
123 * 0 on success (mempool is virtually contiguous), -1 on error.
126 mlx4_check_mempool(struct rte_mempool *mp, uintptr_t *start, uintptr_t *end)
128 struct mlx4_check_mempool_data data;
130 memset(&data, 0, sizeof(data));
131 rte_mempool_mem_iter(mp, mlx4_check_mempool_cb, &data);
132 *start = (uintptr_t)data.start;
133 *end = (uintptr_t)data.end;
138 * Register mempool as a memory region.
141 * Pointer to protection domain.
143 * Pointer to memory pool.
146 * Memory region pointer, NULL in case of error and rte_errno is set.
149 mlx4_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
151 const struct rte_memseg *ms = rte_eal_get_physmem_layout();
157 if (mlx4_check_mempool(mp, &start, &end) != 0) {
159 ERROR("mempool %p: not virtually contiguous",
163 DEBUG("mempool %p area start=%p end=%p size=%zu",
164 (void *)mp, (void *)start, (void *)end,
165 (size_t)(end - start));
166 /* Round start and end to page boundary if found in memory segments. */
167 for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
168 uintptr_t addr = (uintptr_t)ms[i].addr;
169 size_t len = ms[i].len;
170 unsigned int align = ms[i].hugepage_sz;
172 if ((start > addr) && (start < addr + len))
173 start = RTE_ALIGN_FLOOR(start, align);
174 if ((end > addr) && (end < addr + len))
175 end = RTE_ALIGN_CEIL(end, align);
177 DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
178 (void *)mp, (void *)start, (void *)end,
179 (size_t)(end - start));
183 IBV_ACCESS_LOCAL_WRITE);
185 rte_errno = errno ? errno : EINVAL;
190 * Add memory region (MR) <-> memory pool (MP) association to txq->mp2mr[].
191 * If mp2mr[] is full, remove an entry first.
194 * Pointer to Tx queue structure.
196 * Memory pool for which a memory region lkey must be added.
198 * Index in memory pool (MP) where to add memory region (MR).
201 * Added mr->lkey on success, (uint32_t)-1 on failure.
204 mlx4_txq_add_mr(struct txq *txq, struct rte_mempool *mp, uint32_t i)
208 /* Add a new entry, register MR first. */
209 DEBUG("%p: discovered new memory pool \"%s\" (%p)",
210 (void *)txq, mp->name, (void *)mp);
211 mr = mlx4_mp2mr(txq->priv->pd, mp);
212 if (unlikely(mr == NULL)) {
213 DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
217 if (unlikely(i == RTE_DIM(txq->mp2mr))) {
218 /* Table is full, remove oldest entry. */
219 DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
222 claim_zero(ibv_dereg_mr(txq->mp2mr[0].mr));
223 memmove(&txq->mp2mr[0], &txq->mp2mr[1],
224 (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
226 /* Store the new entry. */
227 txq->mp2mr[i].mp = mp;
228 txq->mp2mr[i].mr = mr;
229 txq->mp2mr[i].lkey = mr->lkey;
230 DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
231 (void *)txq, mp->name, (void *)mp, txq->mp2mr[i].lkey);
232 return txq->mp2mr[i].lkey;