X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_mr.c;h=fdbe7986fdddd5c1075362f2c210c362a5103590;hb=39e4a2577fd05199f53182b7c8509aeed40dc07f;hp=6b29eed559e9b0d594807a63ad58f98930bdb520;hpb=6e78005a9b3037ecd0a7de456406233a6c3446d9;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 6b29eed559..fdbe7986fd 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -1,395 +1,397 @@ -/*- - * BSD LICENSE - * - * Copyright 2016 6WIND S.A. - * Copyright 2016 Mellanox. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of 6WIND S.A. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2016 6WIND S.A. + * Copyright 2016 Mellanox Technologies, Ltd */ -/* Verbs header. */ -/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ -#ifdef PEDANTIC -#pragma GCC diagnostic ignored "-Wpedantic" -#endif -#include -#ifdef PEDANTIC -#pragma GCC diagnostic error "-Wpedantic" -#endif - +#include #include #include +#include + +#include +#include #include "mlx5.h" +#include "mlx5_mr.h" #include "mlx5_rxtx.h" +#include "mlx5_rx.h" +#include "mlx5_tx.h" -struct mlx5_check_mempool_data { - int ret; - char *start; - char *end; +struct mr_find_contig_memsegs_data { + uintptr_t addr; + uintptr_t start; + uintptr_t end; + const struct rte_memseg_list *msl; }; -/* Called by mlx5_check_mempool() when iterating the memory chunks. */ -static void -mlx5_check_mempool_cb(struct rte_mempool *mp, - void *opaque, struct rte_mempool_memhdr *memhdr, - unsigned int mem_idx) -{ - struct mlx5_check_mempool_data *data = opaque; - - (void)mp; - (void)mem_idx; - - /* It already failed, skip the next chunks. */ - if (data->ret != 0) - return; - /* It is the first chunk. */ - if (data->start == NULL && data->end == NULL) { - data->start = memhdr->addr; - data->end = data->start + memhdr->len; - return; - } - if (data->end == memhdr->addr) { - data->end += memhdr->len; - return; - } - if (data->start == (char *)memhdr->addr + memhdr->len) { - data->start -= memhdr->len; - return; - } - /* Error, mempool is not virtually contiguous. */ - data->ret = -1; -} +struct mr_update_mp_data { + struct rte_eth_dev *dev; + struct mlx5_mr_ctrl *mr_ctrl; + int ret; +}; /** - * Check if a mempool can be used: it must be virtually contiguous. - * - * @param[in] mp - * Pointer to memory pool. - * @param[out] start - * Pointer to the start address of the mempool virtual memory area - * @param[out] end - * Pointer to the end address of the mempool virtual memory area + * Callback for memory event. This can be called from both primary and secondary + * process. * - * @return - * 0 on success (mempool is virtually contiguous), -1 on error. + * @param event_type + * Memory event type. + * @param addr + * Address of memory. + * @param len + * Size of memory. */ -static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, - uintptr_t *end) +void +mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, + size_t len, void *arg __rte_unused) { - struct mlx5_check_mempool_data data; - - memset(&data, 0, sizeof(data)); - rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data); - *start = (uintptr_t)data.start; - *end = (uintptr_t)data.end; + struct mlx5_dev_ctx_shared *sh; + struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list; - return data.ret; + /* Must be called from the primary process. */ + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + switch (event_type) { + case RTE_MEM_EVENT_FREE: + rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); + /* Iterate all the existing mlx5 devices. */ + LIST_FOREACH(sh, dev_list, mem_event_cb) + mlx5_free_mr_by_addr(&sh->share_cache, + sh->ibdev_name, addr, len); + rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); + break; + case RTE_MEM_EVENT_ALLOC: + default: + break; + } } /** - * Register a Memory Region (MR) <-> Memory Pool (MP) association in - * txq->mp2mr[]. If mp2mr[] is full, remove an entry first. - * - * This function should only be called by txq_mp2mr(). + * Bottom-half of LKey search on Tx. * - * @param priv - * Pointer to private structure. * @param txq - * Pointer to TX queue structure. - * @param[in] mp - * Memory Pool for which a Memory Region lkey must be returned. - * @param idx - * Index of the next available entry. + * Pointer to Tx queue structure. + * @param addr + * Search key. * * @return - * mr on success, NULL on failure. + * Searched LKey on success, UINT32_MAX on no match. */ -struct mlx5_mr* -priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq, - struct rte_mempool *mp, unsigned int idx) +static uint32_t +mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr) { struct mlx5_txq_ctrl *txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); - struct mlx5_mr *mr; + struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + struct mlx5_priv *priv = txq_ctrl->priv; - /* Add a new entry, register MR first. */ - DEBUG("%p: discovered new memory pool \"%s\" (%p)", - (void *)txq_ctrl, mp->name, (void *)mp); - mr = priv_mr_get(priv, mp); - if (mr == NULL) - mr = priv_mr_new(priv, mp); - if (unlikely(mr == NULL)) { - DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.", - (void *)txq_ctrl); - return NULL; - } - if (unlikely(idx == RTE_DIM(txq->mp2mr))) { - /* Table is full, remove oldest entry. */ - DEBUG("%p: MR <-> MP table full, dropping oldest entry.", - (void *)txq_ctrl); - --idx; - priv_mr_release(priv, txq->mp2mr[0]); - memmove(&txq->mp2mr[0], &txq->mp2mr[1], - (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0]))); - } - /* Store the new entry. */ - txq_ctrl->txq.mp2mr[idx] = mr; - DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, - (void *)txq_ctrl, mp->name, (void *)mp, - txq_ctrl->txq.mp2mr[idx]->lkey); - return mr; + return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id, + &priv->sh->share_cache, mr_ctrl, addr, + priv->config.mr_ext_memseg_en); } /** - * Register a Memory Region (MR) <-> Memory Pool (MP) association in - * txq->mp2mr[]. If mp2mr[] is full, remove an entry first. - * - * This function should only be called by txq_mp2mr(). + * Bottom-half of LKey search on Tx. If it can't be searched in the memseg + * list, register the mempool of the mbuf as externally allocated memory. * * @param txq - * Pointer to TX queue structure. - * @param[in] mp - * Memory Pool for which a Memory Region lkey must be returned. - * @param idx - * Index of the next available entry. + * Pointer to Tx queue structure. + * @param mb + * Pointer to mbuf. * * @return - * mr on success, NULL on failure. + * Searched LKey on success, UINT32_MAX on no match. */ -struct mlx5_mr* -mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp, - unsigned int idx) +uint32_t +mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb) { struct mlx5_txq_ctrl *txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq); - struct mlx5_mr *mr; + struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + struct mlx5_priv *priv = txq_ctrl->priv; + uintptr_t addr = (uintptr_t)mb->buf_addr; + uint32_t lkey; - priv_lock(txq_ctrl->priv); - mr = priv_txq_mp2mr_reg(txq_ctrl->priv, txq, mp, idx); - priv_unlock(txq_ctrl->priv); - return mr; -} + if (priv->config.mr_mempool_reg_en) { + struct rte_mempool *mp = NULL; + struct mlx5_mprq_buf *buf; -struct mlx5_mp2mr_mbuf_check_data { - int ret; -}; + if (!RTE_MBUF_HAS_EXTBUF(mb)) { + mp = mlx5_mb2mp(mb); + } else if (mb->shinfo->free_cb == mlx5_mprq_buf_free_cb) { + /* Recover MPRQ mempool. */ + buf = mb->shinfo->fcb_opaque; + mp = buf->mp; + } + if (mp != NULL) { + lkey = mlx5_mr_mempool2mr_bh(&priv->sh->share_cache, + mr_ctrl, mp, addr); + /* + * Lookup can only fail on invalid input, e.g. "addr" + * is not from "mp" or "mp" has RTE_MEMPOOL_F_NON_IO set. + */ + if (lkey != UINT32_MAX) + return lkey; + } + /* Fallback for generic mechanism in corner cases. */ + } + lkey = mlx5_tx_addr2mr_bh(txq, addr); + if (lkey == UINT32_MAX && rte_errno == ENXIO) { + /* Mempool may have externally allocated memory. */ + return mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb)); + } + return lkey; +} /** - * Callback function for rte_mempool_obj_iter() to check whether a given - * mempool object looks like a mbuf. + * Called during rte_mempool_mem_iter() by mlx5_mr_update_ext_mp(). + * + * Externally allocated chunk is registered and a MR is created for the chunk. + * The MR object is added to the global list. If memseg list of a MR object + * (mr->msl) is null, the MR object can be regarded as externally allocated + * memory. * - * @param[in] mp - * The mempool pointer - * @param[in] arg - * Context data (struct txq_mp2mr_mbuf_check_data). Contains the - * return value. - * @param[in] obj - * Object address. - * @param index - * Object index, unused. + * Once external memory is registered, it should be static. If the memory is + * freed and the virtual address range has different physical memory mapped + * again, it may cause crash on device due to the wrong translation entry. PMD + * can't track the free event of the external memory for now. */ static void -txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj, - uint32_t index __rte_unused) +mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque, + struct rte_mempool_memhdr *memhdr, + unsigned mem_idx __rte_unused) { - struct mlx5_mp2mr_mbuf_check_data *data = arg; - struct rte_mbuf *buf = obj; + struct mr_update_mp_data *data = opaque; + struct rte_eth_dev *dev = data->dev; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl; + struct mlx5_mr *mr = NULL; + uintptr_t addr = (uintptr_t)memhdr->addr; + size_t len = memhdr->len; + struct mr_cache_entry entry; + uint32_t lkey; - /* - * Check whether mbuf structure fits element size and whether mempool - * pointer is valid. - */ - if (sizeof(*buf) > mp->elt_size || buf->pool != mp) + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + /* If already registered, it should return. */ + rte_rwlock_read_lock(&sh->share_cache.rwlock); + lkey = mlx5_mr_lookup_cache(&sh->share_cache, &entry, addr); + rte_rwlock_read_unlock(&sh->share_cache.rwlock); + if (lkey != UINT32_MAX) + return; + DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)", + dev->data->port_id, mem_idx, mp->name); + mr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id, + sh->share_cache.reg_mr_cb); + if (!mr) { + DRV_LOG(WARNING, + "port %u unable to allocate a new MR of" + " mempool (%s).", + dev->data->port_id, mp->name); data->ret = -1; + return; + } + rte_rwlock_write_lock(&sh->share_cache.rwlock); + LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr); + /* Insert to the global cache table. */ + mlx5_mr_insert_cache(&sh->share_cache, mr); + rte_rwlock_write_unlock(&sh->share_cache.rwlock); + /* Insert to the local cache table */ + mlx5_mr_addr2mr_bh(sh->pd, &priv->mp_id, &sh->share_cache, + mr_ctrl, addr, priv->config.mr_ext_memseg_en); } /** - * Iterator function for rte_mempool_walk() to register existing mempools and - * fill the MP to MR cache of a TX queue. + * Finds the first ethdev that match the device. + * The existence of multiple ethdev per pci device is only with representors. + * On such case, it is enough to get only one of the ports as they all share + * the same ibv context. + * + * @param dev + * Pointer to the device. * - * @param[in] mp - * Memory Pool to register. - * @param *arg - * Pointer to TX queue structure. + * @return + * Pointer to the ethdev if found, NULL otherwise. */ -void -mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) +static struct rte_eth_dev * +dev_to_eth_dev(struct rte_device *dev) { - struct priv *priv = (struct priv *)arg; - struct mlx5_mp2mr_mbuf_check_data data = { - .ret = 0, - }; - struct mlx5_mr *mr; + uint16_t port_id; - /* Register mempool only if the first element looks like a mbuf. */ - if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 || - data.ret == -1) - return; - mr = priv_mr_get(priv, mp); - if (mr) { - priv_mr_release(priv, mr); - return; - } - priv_mr_new(priv, mp); + port_id = rte_eth_find_next_of(0, dev); + if (port_id == RTE_MAX_ETHPORTS) + return NULL; + return &rte_eth_devices[port_id]; } /** - * Register a new memory region from the mempool and store it in the memory - * region list. + * Callback to DMA map external memory to a device. + * + * @param rte_dev + * Pointer to the generic device. + * @param addr + * Starting virtual address of memory to be mapped. + * @param iova + * Starting IOVA address of memory to be mapped. + * @param len + * Length of memory segment being mapped. * - * @param priv - * Pointer to private structure. - * @param mp - * Pointer to the memory pool to register. * @return - * The memory region on success. + * 0 on success, negative value on error. */ -struct mlx5_mr* -priv_mr_new(struct priv *priv, struct rte_mempool *mp) +int +mlx5_net_dma_map(struct rte_device *rte_dev, void *addr, + uint64_t iova __rte_unused, size_t len) { - const struct rte_memseg *ms = rte_eal_get_physmem_layout(); - uintptr_t start; - uintptr_t end; - unsigned int i; + struct rte_eth_dev *dev; struct mlx5_mr *mr; + struct mlx5_priv *priv; + struct mlx5_dev_ctx_shared *sh; - mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id); - if (!mr) { - DEBUG("unable to configure MR, ibv_reg_mr() failed."); - return NULL; + dev = dev_to_eth_dev(rte_dev); + if (!dev) { + DRV_LOG(WARNING, "unable to find matching ethdev " + "to device %s", rte_dev->name); + rte_errno = ENODEV; + return -1; } - if (mlx5_check_mempool(mp, &start, &end) != 0) { - ERROR("mempool %p: not virtually contiguous", - (void *)mp); - return NULL; - } - DEBUG("mempool %p area start=%p end=%p size=%zu", - (void *)mp, (void *)start, (void *)end, - (size_t)(end - start)); - /* Round start and end to page boundary if found in memory segments. */ - for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) { - uintptr_t addr = (uintptr_t)ms[i].addr; - size_t len = ms[i].len; - unsigned int align = ms[i].hugepage_sz; - - if ((start > addr) && (start < addr + len)) - start = RTE_ALIGN_FLOOR(start, align); - if ((end > addr) && (end < addr + len)) - end = RTE_ALIGN_CEIL(end, align); + priv = dev->data->dev_private; + sh = priv->sh; + mr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY, + sh->share_cache.reg_mr_cb); + if (!mr) { + DRV_LOG(WARNING, + "port %u unable to dma map", dev->data->port_id); + rte_errno = EINVAL; + return -1; } - DEBUG("mempool %p using start=%p end=%p size=%zu for MR", - (void *)mp, (void *)start, (void *)end, - (size_t)(end - start)); - mr->mr = ibv_reg_mr(priv->pd, (void *)start, end - start, - IBV_ACCESS_LOCAL_WRITE); - mr->mp = mp; - mr->lkey = rte_cpu_to_be_32(mr->mr->lkey); - mr->start = start; - mr->end = (uintptr_t)mr->mr->addr + mr->mr->length; - rte_atomic32_inc(&mr->refcnt); - DEBUG("%p: new Memory Region %p refcnt: %d", (void *)priv, - (void *)mr, rte_atomic32_read(&mr->refcnt)); - LIST_INSERT_HEAD(&priv->mr, mr, next); - return mr; + rte_rwlock_write_lock(&sh->share_cache.rwlock); + LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr); + /* Insert to the global cache table. */ + mlx5_mr_insert_cache(&sh->share_cache, mr); + rte_rwlock_write_unlock(&sh->share_cache.rwlock); + return 0; } /** - * Search the memory region object in the memory region list. + * Callback to DMA unmap external memory to a device. + * + * @param rte_dev + * Pointer to the generic device. + * @param addr + * Starting virtual address of memory to be unmapped. + * @param iova + * Starting IOVA address of memory to be unmapped. + * @param len + * Length of memory segment being unmapped. * - * @param priv - * Pointer to private structure. - * @param mp - * Pointer to the memory pool to register. * @return - * The memory region on success. + * 0 on success, negative value on error. */ -struct mlx5_mr* -priv_mr_get(struct priv *priv, struct rte_mempool *mp) +int +mlx5_net_dma_unmap(struct rte_device *rte_dev, void *addr, + uint64_t iova __rte_unused, size_t len __rte_unused) { + struct rte_eth_dev *dev; + struct mlx5_priv *priv; + struct mlx5_dev_ctx_shared *sh; struct mlx5_mr *mr; + struct mr_cache_entry entry; - assert(mp); - if (LIST_EMPTY(&priv->mr)) - return NULL; - LIST_FOREACH(mr, &priv->mr, next) { - if (mr->mp == mp) { - rte_atomic32_inc(&mr->refcnt); - DEBUG("Memory Region %p refcnt: %d", - (void *)mr, rte_atomic32_read(&mr->refcnt)); - return mr; - } + dev = dev_to_eth_dev(rte_dev); + if (!dev) { + DRV_LOG(WARNING, "unable to find matching ethdev to device %s", + rte_dev->name); + rte_errno = ENODEV; + return -1; } - return NULL; + priv = dev->data->dev_private; + sh = priv->sh; + rte_rwlock_write_lock(&sh->share_cache.rwlock); + mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, (uintptr_t)addr); + if (!mr) { + rte_rwlock_write_unlock(&sh->share_cache.rwlock); + DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered to device %s", + (uintptr_t)addr, rte_dev->name); + rte_errno = EINVAL; + return -1; + } + LIST_REMOVE(mr, mr); + DRV_LOG(DEBUG, "port %u remove MR(%p) from list", dev->data->port_id, + (void *)mr); + mlx5_mr_free(mr, sh->share_cache.dereg_mr_cb); + mlx5_mr_rebuild_cache(&sh->share_cache); + /* + * No explicit wmb is needed after updating dev_gen due to + * store-release ordering in unlock that provides the + * implicit barrier at the software visible level. + */ + ++sh->share_cache.dev_gen; + DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d", + sh->share_cache.dev_gen); + rte_rwlock_write_unlock(&sh->share_cache.rwlock); + return 0; } /** - * Release the memory region object. + * Register MR for entire memory chunks in a Mempool having externally allocated + * memory and fill in local cache. * - * @param mr - * Pointer to memory region to release. + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param mp + * Pointer to registering Mempool. * * @return - * 0 on success, errno on failure. + * 0 on success, -1 on failure. */ -int -priv_mr_release(struct priv *priv, struct mlx5_mr *mr) +static uint32_t +mlx5_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, + struct rte_mempool *mp) { - (void)priv; - assert(mr); - DEBUG("Memory Region %p refcnt: %d", - (void *)mr, rte_atomic32_read(&mr->refcnt)); - if (rte_atomic32_dec_and_test(&mr->refcnt)) { - claim_zero(ibv_dereg_mr(mr->mr)); - LIST_REMOVE(mr, next); - rte_free(mr); - return 0; - } - return EBUSY; + struct mr_update_mp_data data = { + .dev = dev, + .mr_ctrl = mr_ctrl, + .ret = 0, + }; + + rte_mempool_mem_iter(mp, mlx5_mr_update_ext_mp_cb, &data); + return data.ret; } /** - * Verify the flow list is empty + * Register MR entire memory chunks in a Mempool having externally allocated + * memory and search LKey of the address to return. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. + * @param addr + * Search key. + * @param mp + * Pointer to registering Mempool where addr belongs. * - * @return the number of object not released. + * @return + * LKey for address on success, UINT32_MAX on failure. */ -int -priv_mr_verify(struct priv *priv) +uint32_t +mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr, + struct rte_mempool *mp) { - int ret = 0; - struct mlx5_mr *mr; + struct mlx5_txq_ctrl *txq_ctrl = + container_of(txq, struct mlx5_txq_ctrl, txq); + struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + struct mlx5_priv *priv = txq_ctrl->priv; - LIST_FOREACH(mr, &priv->mr, next) { - DEBUG("%p: mr %p still referenced", (void *)priv, - (void *)mr); - ++ret; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + DRV_LOG(WARNING, + "port %u using address (%p) from unregistered mempool" + " having externally allocated memory" + " in secondary process, please create mempool" + " prior to rte_eth_dev_start()", + PORT_ID(priv), (void *)addr); + return UINT32_MAX; } - return ret; + mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp); + return mlx5_tx_addr2mr_bh(txq, addr); }