X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_mr.c;h=6b29eed559e9b0d594807a63ad58f98930bdb520;hb=3fc8de4f8df4a5f9ca23b0bc2d1ab592719c5daf;hp=bb440415449852cc6c29b82072aee0e64a40023c;hpb=491770fafcfb1e87112214882f5fd91f1e9c7448;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index bb44041544..6b29eed559 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -34,21 +34,15 @@ /* Verbs header. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ #ifdef PEDANTIC -#pragma GCC diagnostic ignored "-pedantic" +#pragma GCC diagnostic ignored "-Wpedantic" #endif #include #ifdef PEDANTIC -#pragma GCC diagnostic error "-pedantic" +#pragma GCC diagnostic error "-Wpedantic" #endif -/* DPDK headers don't like -pedantic. */ -#ifdef PEDANTIC -#pragma GCC diagnostic ignored "-pedantic" -#endif #include -#ifdef PEDANTIC -#pragma GCC diagnostic error "-pedantic" -#endif +#include #include "mlx5.h" #include "mlx5_rxtx.h" @@ -117,60 +111,14 @@ static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, return data.ret; } -/** - * Register mempool as a memory region. - * - * @param pd - * Pointer to protection domain. - * @param mp - * Pointer to memory pool. - * - * @return - * Memory region pointer, NULL in case of error. - */ -struct ibv_mr * -mlx5_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp) -{ - const struct rte_memseg *ms = rte_eal_get_physmem_layout(); - uintptr_t start; - uintptr_t end; - unsigned int i; - - if (mlx5_check_mempool(mp, &start, &end) != 0) { - ERROR("mempool %p: not virtually contiguous", - (void *)mp); - return NULL; - } - - DEBUG("mempool %p area start=%p end=%p size=%zu", - (void *)mp, (void *)start, (void *)end, - (size_t)(end - start)); - /* Round start and end to page boundary if found in memory segments. */ - for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) { - uintptr_t addr = (uintptr_t)ms[i].addr; - size_t len = ms[i].len; - unsigned int align = ms[i].hugepage_sz; - - if ((start > addr) && (start < addr + len)) - start = RTE_ALIGN_FLOOR(start, align); - if ((end > addr) && (end < addr + len)) - end = RTE_ALIGN_CEIL(end, align); - } - DEBUG("mempool %p using start=%p end=%p size=%zu for MR", - (void *)mp, (void *)start, (void *)end, - (size_t)(end - start)); - return ibv_reg_mr(pd, - (void *)start, - end - start, - IBV_ACCESS_LOCAL_WRITE); -} - /** * Register a Memory Region (MR) <-> Memory Pool (MP) association in * txq->mp2mr[]. If mp2mr[] is full, remove an entry first. * * This function should only be called by txq_mp2mr(). * + * @param priv + * Pointer to private structure. * @param txq * Pointer to TX queue structure. * @param[in] mp @@ -179,41 +127,75 @@ mlx5_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp) * Index of the next available entry. * * @return - * mr->lkey on success, (uint32_t)-1 on failure. + * mr on success, NULL on failure. */ -uint32_t -txq_mp2mr_reg(struct txq *txq, struct rte_mempool *mp, unsigned int idx) +struct mlx5_mr* +priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq, + struct rte_mempool *mp, unsigned int idx) { - struct ibv_mr *mr; + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq, struct mlx5_txq_ctrl, txq); + struct mlx5_mr *mr; /* Add a new entry, register MR first. */ DEBUG("%p: discovered new memory pool \"%s\" (%p)", - (void *)txq, mp->name, (void *)mp); - mr = mlx5_mp2mr(txq->priv->pd, mp); + (void *)txq_ctrl, mp->name, (void *)mp); + mr = priv_mr_get(priv, mp); + if (mr == NULL) + mr = priv_mr_new(priv, mp); if (unlikely(mr == NULL)) { DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.", - (void *)txq); - return (uint32_t)-1; + (void *)txq_ctrl); + return NULL; } if (unlikely(idx == RTE_DIM(txq->mp2mr))) { /* Table is full, remove oldest entry. */ DEBUG("%p: MR <-> MP table full, dropping oldest entry.", - (void *)txq); + (void *)txq_ctrl); --idx; - claim_zero(ibv_dereg_mr(txq->mp2mr[0].mr)); + priv_mr_release(priv, txq->mp2mr[0]); memmove(&txq->mp2mr[0], &txq->mp2mr[1], (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0]))); } /* Store the new entry. */ - txq->mp2mr[idx].mp = mp; - txq->mp2mr[idx].mr = mr; - txq->mp2mr[idx].lkey = mr->lkey; + txq_ctrl->txq.mp2mr[idx] = mr; DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, - (void *)txq, mp->name, (void *)mp, txq->mp2mr[idx].lkey); - return txq->mp2mr[idx].lkey; + (void *)txq_ctrl, mp->name, (void *)mp, + txq_ctrl->txq.mp2mr[idx]->lkey); + return mr; +} + +/** + * Register a Memory Region (MR) <-> Memory Pool (MP) association in + * txq->mp2mr[]. If mp2mr[] is full, remove an entry first. + * + * This function should only be called by txq_mp2mr(). + * + * @param txq + * Pointer to TX queue structure. + * @param[in] mp + * Memory Pool for which a Memory Region lkey must be returned. + * @param idx + * Index of the next available entry. + * + * @return + * mr on success, NULL on failure. + */ +struct mlx5_mr* +mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp, + unsigned int idx) +{ + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq, struct mlx5_txq_ctrl, txq); + struct mlx5_mr *mr; + + priv_lock(txq_ctrl->priv); + mr = priv_txq_mp2mr_reg(txq_ctrl->priv, txq, mp, idx); + priv_unlock(txq_ctrl->priv); + return mr; } -struct txq_mp2mr_mbuf_check_data { +struct mlx5_mp2mr_mbuf_check_data { int ret; }; @@ -235,7 +217,7 @@ static void txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj, uint32_t index __rte_unused) { - struct txq_mp2mr_mbuf_check_data *data = arg; + struct mlx5_mp2mr_mbuf_check_data *data = arg; struct rte_mbuf *buf = obj; /* @@ -256,25 +238,158 @@ txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj, * Pointer to TX queue structure. */ void -txq_mp2mr_iter(struct rte_mempool *mp, void *arg) +mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) { - struct txq *txq = arg; - struct txq_mp2mr_mbuf_check_data data = { + struct priv *priv = (struct priv *)arg; + struct mlx5_mp2mr_mbuf_check_data data = { .ret = 0, }; - unsigned int i; + struct mlx5_mr *mr; /* Register mempool only if the first element looks like a mbuf. */ if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 || data.ret == -1) return; - for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { - if (unlikely(txq->mp2mr[i].mp == NULL)) { - /* Unknown MP, add a new MR for it. */ - break; + mr = priv_mr_get(priv, mp); + if (mr) { + priv_mr_release(priv, mr); + return; + } + priv_mr_new(priv, mp); +} + +/** + * Register a new memory region from the mempool and store it in the memory + * region list. + * + * @param priv + * Pointer to private structure. + * @param mp + * Pointer to the memory pool to register. + * @return + * The memory region on success. + */ +struct mlx5_mr* +priv_mr_new(struct priv *priv, struct rte_mempool *mp) +{ + const struct rte_memseg *ms = rte_eal_get_physmem_layout(); + uintptr_t start; + uintptr_t end; + unsigned int i; + struct mlx5_mr *mr; + + mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id); + if (!mr) { + DEBUG("unable to configure MR, ibv_reg_mr() failed."); + return NULL; + } + if (mlx5_check_mempool(mp, &start, &end) != 0) { + ERROR("mempool %p: not virtually contiguous", + (void *)mp); + return NULL; + } + DEBUG("mempool %p area start=%p end=%p size=%zu", + (void *)mp, (void *)start, (void *)end, + (size_t)(end - start)); + /* Round start and end to page boundary if found in memory segments. */ + for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) { + uintptr_t addr = (uintptr_t)ms[i].addr; + size_t len = ms[i].len; + unsigned int align = ms[i].hugepage_sz; + + if ((start > addr) && (start < addr + len)) + start = RTE_ALIGN_FLOOR(start, align); + if ((end > addr) && (end < addr + len)) + end = RTE_ALIGN_CEIL(end, align); + } + DEBUG("mempool %p using start=%p end=%p size=%zu for MR", + (void *)mp, (void *)start, (void *)end, + (size_t)(end - start)); + mr->mr = ibv_reg_mr(priv->pd, (void *)start, end - start, + IBV_ACCESS_LOCAL_WRITE); + mr->mp = mp; + mr->lkey = rte_cpu_to_be_32(mr->mr->lkey); + mr->start = start; + mr->end = (uintptr_t)mr->mr->addr + mr->mr->length; + rte_atomic32_inc(&mr->refcnt); + DEBUG("%p: new Memory Region %p refcnt: %d", (void *)priv, + (void *)mr, rte_atomic32_read(&mr->refcnt)); + LIST_INSERT_HEAD(&priv->mr, mr, next); + return mr; +} + +/** + * Search the memory region object in the memory region list. + * + * @param priv + * Pointer to private structure. + * @param mp + * Pointer to the memory pool to register. + * @return + * The memory region on success. + */ +struct mlx5_mr* +priv_mr_get(struct priv *priv, struct rte_mempool *mp) +{ + struct mlx5_mr *mr; + + assert(mp); + if (LIST_EMPTY(&priv->mr)) + return NULL; + LIST_FOREACH(mr, &priv->mr, next) { + if (mr->mp == mp) { + rte_atomic32_inc(&mr->refcnt); + DEBUG("Memory Region %p refcnt: %d", + (void *)mr, rte_atomic32_read(&mr->refcnt)); + return mr; } - if (txq->mp2mr[i].mp == mp) - return; } - txq_mp2mr_reg(txq, mp, i); + return NULL; +} + +/** + * Release the memory region object. + * + * @param mr + * Pointer to memory region to release. + * + * @return + * 0 on success, errno on failure. + */ +int +priv_mr_release(struct priv *priv, struct mlx5_mr *mr) +{ + (void)priv; + assert(mr); + DEBUG("Memory Region %p refcnt: %d", + (void *)mr, rte_atomic32_read(&mr->refcnt)); + if (rte_atomic32_dec_and_test(&mr->refcnt)) { + claim_zero(ibv_dereg_mr(mr->mr)); + LIST_REMOVE(mr, next); + rte_free(mr); + return 0; + } + return EBUSY; +} + +/** + * Verify the flow list is empty + * + * @param priv + * Pointer to private structure. + * + * @return the number of object not released. + */ +int +priv_mr_verify(struct priv *priv) +{ + int ret = 0; + struct mlx5_mr *mr; + + LIST_FOREACH(mr, &priv->mr, next) { + DEBUG("%p: mr %p still referenced", (void *)priv, + (void *)mr); + ++ret; + } + return ret; }