X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx4%2Fmlx4_mr.c;h=48d458ad455dd05dbee6c345ab41ab42349513b9;hb=11a34bf2c0f576044d362e0f550c2b4977ee9c85;hp=d23d3c613ea76b25c0f1be6c89c9dd14d488675b;hpb=9797bfcce1c9fcbf75e78dc17ecdc9b0da3ae945;p=dpdk.git diff --git a/drivers/net/mlx4/mlx4_mr.c b/drivers/net/mlx4/mlx4_mr.c index d23d3c613e..48d458ad45 100644 --- a/drivers/net/mlx4/mlx4_mr.c +++ b/drivers/net/mlx4/mlx4_mr.c @@ -289,6 +289,23 @@ mr_find_next_chunk(struct mlx4_mr *mr, struct mlx4_mr_cache *entry, uintptr_t end = 0; uint32_t idx = 0; + /* MR for external memory doesn't have memseg list. */ + if (mr->msl == NULL) { + struct ibv_mr *ibv_mr = mr->ibv_mr; + + assert(mr->ms_bmp_n == 1); + assert(mr->ms_n == 1); + assert(base_idx == 0); + /* + * Can't search it from memseg list but get it directly from + * verbs MR as there's only one chunk. + */ + entry->start = (uintptr_t)ibv_mr->addr; + entry->end = (uintptr_t)ibv_mr->addr + mr->ibv_mr->length; + entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey); + /* Returning 1 ends iteration. */ + return 1; + } for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) { if (rte_bitmap_get(mr->ms_bmp, idx)) { const struct rte_memseg_list *msl; @@ -331,14 +348,15 @@ mr_find_next_chunk(struct mlx4_mr *mr, struct mlx4_mr_cache *entry, static int mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx4_mr *mr) { - struct priv *priv = dev->data->dev_private; + struct mlx4_priv *priv = dev->data->dev_private; unsigned int n; DEBUG("port %u inserting MR(%p) to global cache", dev->data->port_id, (void *)mr); for (n = 0; n < mr->ms_bmp_n; ) { - struct mlx4_mr_cache entry = { 0, }; + struct mlx4_mr_cache entry; + memset(&entry, 0, sizeof(entry)); /* Find a contiguous chunk and advance the index. */ n = mr_find_next_chunk(mr, &entry, n); if (!entry.end) @@ -371,7 +389,7 @@ static struct mlx4_mr * mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, uintptr_t addr) { - struct priv *priv = dev->data->dev_private; + struct mlx4_priv *priv = dev->data->dev_private; struct mlx4_mr *mr; /* Iterate all the existing MRs. */ @@ -381,8 +399,9 @@ mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, if (mr->ms_n == 0) continue; for (n = 0; n < mr->ms_bmp_n; ) { - struct mlx4_mr_cache ret = { 0, }; + struct mlx4_mr_cache ret; + memset(&ret, 0, sizeof(ret)); n = mr_find_next_chunk(mr, &ret, n); if (addr >= ret.start && addr < ret.end) { /* Found. */ @@ -411,7 +430,7 @@ static uint32_t mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, uintptr_t addr) { - struct priv *priv = dev->data->dev_private; + struct mlx4_priv *priv = dev->data->dev_private; uint16_t idx; uint32_t lkey = UINT32_MAX; struct mlx4_mr *mr; @@ -458,7 +477,7 @@ mr_free(struct mlx4_mr *mr) } /** - * Releass resources of detached MR having no online entry. + * Release resources of detached MR having no online entry. * * @param dev * Pointer to Ethernet device. @@ -466,10 +485,12 @@ mr_free(struct mlx4_mr *mr) static void mlx4_mr_garbage_collect(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx4_priv *priv = dev->data->dev_private; struct mlx4_mr *mr_next; struct mlx4_mr_list free_list = LIST_HEAD_INITIALIZER(free_list); + /* Must be called from the primary process. */ + assert(rte_eal_process_type() == RTE_PROC_PRIMARY); /* * MR can't be freed with holding the lock because rte_free() could call * memory free callback function. This will be a deadlock situation. @@ -506,8 +527,11 @@ mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl, } /** - * Create a new global Memroy Region (MR) for a missing virtual address. - * Register entire virtually contiguous memory chunk around the address. + * Create a new global Memory Region (MR) for a missing virtual address. + * This API should be called on a secondary process, then a request is sent to + * the primary process in order to create a MR for the address. As the global MR + * list is on the shared memory, following LKey lookup should succeed unless the + * request fails. * * @param dev * Pointer to Ethernet device. @@ -521,10 +545,54 @@ mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl, * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. */ static uint32_t -mlx4_mr_create(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, - uintptr_t addr) +mlx4_mr_create_secondary(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, + uintptr_t addr) { - struct priv *priv = dev->data->dev_private; + struct mlx4_priv *priv = dev->data->dev_private; + int ret; + + DEBUG("port %u requesting MR creation for address (%p)", + dev->data->port_id, (void *)addr); + ret = mlx4_mp_req_mr_create(dev, addr); + if (ret) { + DEBUG("port %u fail to request MR creation for address (%p)", + dev->data->port_id, (void *)addr); + return UINT32_MAX; + } + rte_rwlock_read_lock(&priv->mr.rwlock); + /* Fill in output data. */ + mr_lookup_dev(dev, entry, addr); + /* Lookup can't fail. */ + assert(entry->lkey != UINT32_MAX); + rte_rwlock_read_unlock(&priv->mr.rwlock); + DEBUG("port %u MR CREATED by primary process for %p:\n" + " [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x", + dev->data->port_id, (void *)addr, + entry->start, entry->end, entry->lkey); + return entry->lkey; +} + +/** + * Create a new global Memory Region (MR) for a missing virtual address. + * Register entire virtually contiguous memory chunk around the address. + * This must be called from the primary process. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry, found in the global cache or newly + * created. If failed to create one, this will not be updated. + * @param addr + * Target virtual address to register. + * + * @return + * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. + */ +uint32_t +mlx4_mr_create_primary(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, + uintptr_t addr) +{ + struct mlx4_priv *priv = dev->data->dev_private; struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; const struct rte_memseg_list *msl; const struct rte_memseg *ms; @@ -551,14 +619,24 @@ mlx4_mr_create(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, */ mlx4_mr_garbage_collect(dev); /* - * Find out a contiguous virtual address chunk in use, to which the - * given address belongs, in order to register maximum range. In the - * best case where mempools are not dynamically recreated and - * '--socket-mem' is speicified as an EAL option, it is very likely to + * If enabled, find out a contiguous virtual address chunk in use, to + * which the given address belongs, in order to register maximum range. + * In the best case where mempools are not dynamically recreated and + * '--socket-mem' is specified as an EAL option, it is very likely to * have only one MR(LKey) per a socket and per a hugepage-size even - * though the system memory is highly fragmented. + * though the system memory is highly fragmented. As the whole memory + * chunk will be pinned by kernel, it can't be reused unless entire + * chunk is freed from EAL. + * + * If disabled, just register one memseg (page). Then, memory + * consumption will be minimized but it may drop performance if there + * are many MRs to lookup on the datapath. */ - if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) { + if (!priv->mr_ext_memseg_en) { + data.msl = rte_mem_virt2memseg_list((void *)addr); + data.start = RTE_ALIGN_FLOOR(addr, data.msl->page_sz); + data.end = data.start + data.msl->page_sz; + } else if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) { WARN("port %u unable to find virtually contiguous" " chunk for address (%p)." " rte_memseg_contig_walk() failed.", @@ -604,7 +682,7 @@ alloc_resources: bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE); mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size); if (mr->ms_bmp == NULL) { - WARN("port %u unable to initialize bitamp for a new MR of" + WARN("port %u unable to initialize bitmap for a new MR of" " address (%p).", dev->data->port_id, (void *)addr); rte_errno = EINVAL; @@ -671,8 +749,9 @@ alloc_resources: */ for (n = 0; n < ms_n; ++n) { uintptr_t start; - struct mlx4_mr_cache ret = { 0, }; + struct mlx4_mr_cache ret; + memset(&ret, 0, sizeof(ret)); start = data_re.start + n * msl->page_sz; /* Exclude memsegs already registered by other MRs. */ if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) { @@ -740,6 +819,40 @@ err_nolock: return UINT32_MAX; } +/** + * Create a new global Memory Region (MR) for a missing virtual address. + * This can be called from primary and secondary process. + * + * @param dev + * Pointer to Ethernet device. + * @param[out] entry + * Pointer to returning MR cache entry, found in the global cache or newly + * created. If failed to create one, this will not be updated. + * @param addr + * Target virtual address to register. + * + * @return + * Searched LKey on success, UINT32_MAX on failure and rte_errno is set. + */ +static uint32_t +mlx4_mr_create(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, + uintptr_t addr) +{ + uint32_t ret = 0; + + switch (rte_eal_process_type()) { + case RTE_PROC_PRIMARY: + ret = mlx4_mr_create_primary(dev, entry, addr); + break; + case RTE_PROC_SECONDARY: + ret = mlx4_mr_create_secondary(dev, entry, addr); + break; + default: + break; + } + return ret; +} + /** * Rebuild the global B-tree cache of device from the original MR list. * @@ -749,7 +862,7 @@ err_nolock: static void mr_rebuild_dev_cache(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx4_priv *priv = dev->data->dev_private; struct mlx4_mr *mr; DEBUG("port %u rebuild dev cache[]", dev->data->port_id); @@ -781,7 +894,7 @@ mr_rebuild_dev_cache(struct rte_eth_dev *dev) static void mlx4_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len) { - struct priv *priv = dev->data->dev_private; + struct mlx4_priv *priv = dev->data->dev_private; const struct rte_memseg_list *msl; struct mlx4_mr *mr; int ms_n; @@ -809,6 +922,7 @@ mlx4_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len) mr = mr_lookup_dev_list(dev, &entry, start); if (mr == NULL) continue; + assert(mr->msl); /* Can't be external memory. */ ms = rte_mem_virt2memseg((void *)start, msl); assert(ms != NULL); assert(msl->page_sz == ms->hugepage_sz); @@ -868,15 +982,18 @@ void mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, size_t len, void *arg __rte_unused) { - struct priv *priv; + struct mlx4_priv *priv; + struct mlx4_dev_list *dev_list = &mlx4_shared_data->mem_event_cb_list; + /* Must be called from the primary process. */ + assert(rte_eal_process_type() == RTE_PROC_PRIMARY); switch (event_type) { case RTE_MEM_EVENT_FREE: - rte_rwlock_read_lock(&mlx4_mem_event_rwlock); + rte_rwlock_read_lock(&mlx4_shared_data->mem_event_rwlock); /* Iterate all the existing mlx4 devices. */ - LIST_FOREACH(priv, &mlx4_mem_event_cb_list, mem_event_cb) - mlx4_mr_mem_event_free_cb(priv->dev, addr, len); - rte_rwlock_read_unlock(&mlx4_mem_event_rwlock); + LIST_FOREACH(priv, dev_list, mem_event_cb) + mlx4_mr_mem_event_free_cb(ETH_DEV(priv), addr, len); + rte_rwlock_read_unlock(&mlx4_shared_data->mem_event_rwlock); break; case RTE_MEM_EVENT_ALLOC: default: @@ -905,7 +1022,7 @@ static uint32_t mlx4_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, struct mlx4_mr_cache *entry, uintptr_t addr) { - struct priv *priv = dev->data->dev_private; + struct mlx4_priv *priv = dev->data->dev_private; struct mlx4_mr_btree *bt = &mr_ctrl->cache_bh; uint16_t idx; uint32_t lkey; @@ -1003,11 +1120,9 @@ uint32_t mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr) { struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl; - struct priv *priv = rxq->priv; + struct mlx4_priv *priv = rxq->priv; - DEBUG("Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p", - rxq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr); - return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr); + return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr); } /** @@ -1021,15 +1136,39 @@ mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr) * @return * Searched LKey on success, UINT32_MAX on no match. */ -uint32_t +static uint32_t mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr) { struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl; - struct priv *priv = txq->priv; + struct mlx4_priv *priv = txq->priv; + + return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr); +} + +/** + * Bottom-half of LKey search on Tx. If it can't be searched in the memseg + * list, register the mempool of the mbuf as externally allocated memory. + * + * @param txq + * Pointer to Tx queue structure. + * @param mb + * Pointer to mbuf. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +uint32_t +mlx4_tx_mb2mr_bh(struct txq *txq, struct rte_mbuf *mb) +{ + uintptr_t addr = (uintptr_t)mb->buf_addr; + uint32_t lkey; - DEBUG("Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p", - txq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr); - return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr); + lkey = mlx4_tx_addr2mr_bh(txq, addr); + if (lkey == UINT32_MAX && rte_errno == ENXIO) { + /* Mempool may have externally allocated memory. */ + return mlx4_tx_update_ext_mp(txq, addr, mlx4_mb2mp(mb)); + } + return lkey; } /** @@ -1055,6 +1194,142 @@ mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl) (void *)mr_ctrl, mr_ctrl->cur_gen); } +/** + * Called during rte_mempool_mem_iter() by mlx4_mr_update_ext_mp(). + * + * Externally allocated chunk is registered and a MR is created for the chunk. + * The MR object is added to the global list. If memseg list of a MR object + * (mr->msl) is null, the MR object can be regarded as externally allocated + * memory. + * + * Once external memory is registered, it should be static. If the memory is + * freed and the virtual address range has different physical memory mapped + * again, it may cause crash on device due to the wrong translation entry. PMD + * can't track the free event of the external memory for now. + */ +static void +mlx4_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque, + struct rte_mempool_memhdr *memhdr, + unsigned mem_idx __rte_unused) +{ + struct mr_update_mp_data *data = opaque; + struct rte_eth_dev *dev = data->dev; + struct mlx4_priv *priv = dev->data->dev_private; + struct mlx4_mr_ctrl *mr_ctrl = data->mr_ctrl; + struct mlx4_mr *mr = NULL; + uintptr_t addr = (uintptr_t)memhdr->addr; + size_t len = memhdr->len; + struct mlx4_mr_cache entry; + uint32_t lkey; + + assert(rte_eal_process_type() == RTE_PROC_PRIMARY); + /* If already registered, it should return. */ + rte_rwlock_read_lock(&priv->mr.rwlock); + lkey = mr_lookup_dev(dev, &entry, addr); + rte_rwlock_read_unlock(&priv->mr.rwlock); + if (lkey != UINT32_MAX) + return; + mr = rte_zmalloc_socket(NULL, + RTE_ALIGN_CEIL(sizeof(*mr), + RTE_CACHE_LINE_SIZE), + RTE_CACHE_LINE_SIZE, mp->socket_id); + if (mr == NULL) { + WARN("port %u unable to allocate memory for a new MR of" + " mempool (%s).", + dev->data->port_id, mp->name); + data->ret = -1; + return; + } + DEBUG("port %u register MR for chunk #%d of mempool (%s)", + dev->data->port_id, mem_idx, mp->name); + mr->ibv_mr = mlx4_glue->reg_mr(priv->pd, (void *)addr, len, + IBV_ACCESS_LOCAL_WRITE); + if (mr->ibv_mr == NULL) { + WARN("port %u fail to create a verbs MR for address (%p)", + dev->data->port_id, (void *)addr); + rte_free(mr); + data->ret = -1; + return; + } + mr->msl = NULL; /* Mark it is external memory. */ + mr->ms_bmp = NULL; + mr->ms_n = 1; + mr->ms_bmp_n = 1; + rte_rwlock_write_lock(&priv->mr.rwlock); + LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr); + DEBUG("port %u MR CREATED (%p) for external memory %p:\n" + " [0x%" PRIxPTR ", 0x%" PRIxPTR ")," + " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u", + dev->data->port_id, (void *)mr, (void *)addr, + addr, addr + len, rte_cpu_to_be_32(mr->ibv_mr->lkey), + mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n); + /* Insert to the global cache table. */ + mr_insert_dev_cache(dev, mr); + rte_rwlock_write_unlock(&priv->mr.rwlock); + /* Insert to the local cache table */ + mlx4_mr_addr2mr_bh(dev, mr_ctrl, addr); +} + +/** + * Register MR for entire memory chunks in a Mempool having externally allocated + * memory and fill in local cache. + * + * @param dev + * Pointer to Ethernet device. + * @param mr_ctrl + * Pointer to per-queue MR control structure. + * @param mp + * Pointer to registering Mempool. + * + * @return + * 0 on success, -1 on failure. + */ +static uint32_t +mlx4_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, + struct rte_mempool *mp) +{ + struct mr_update_mp_data data = { + .dev = dev, + .mr_ctrl = mr_ctrl, + .ret = 0, + }; + + rte_mempool_mem_iter(mp, mlx4_mr_update_ext_mp_cb, &data); + return data.ret; +} + +/** + * Register MR entire memory chunks in a Mempool having externally allocated + * memory and search LKey of the address to return. + * + * @param dev + * Pointer to Ethernet device. + * @param addr + * Search key. + * @param mp + * Pointer to registering Mempool where addr belongs. + * + * @return + * LKey for address on success, UINT32_MAX on failure. + */ +uint32_t +mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr, struct rte_mempool *mp) +{ + struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + struct mlx4_priv *priv = txq->priv; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + WARN("port %u using address (%p) from unregistered mempool" + " having externally allocated memory" + " in secondary process, please create mempool" + " prior to rte_eth_dev_start()", + PORT_ID(priv), (void *)addr); + return UINT32_MAX; + } + mlx4_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp); + return mlx4_tx_addr2mr_bh(txq, addr); +} + /* Called during rte_mempool_mem_iter() by mlx4_mr_update_mp(). */ static void mlx4_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque, @@ -1098,6 +1373,10 @@ mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, }; rte_mempool_mem_iter(mp, mlx4_mr_update_mp_cb, &data); + if (data.ret < 0 && rte_errno == ENXIO) { + /* Mempool may have externally allocated memory. */ + return mlx4_mr_update_ext_mp(dev, mr_ctrl, mp); + } return data.ret; } @@ -1111,7 +1390,7 @@ mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, void mlx4_mr_dump_dev(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; + struct mlx4_priv *priv = dev->data->dev_private; struct mlx4_mr *mr; int mr_n = 0; int chunk_n = 0; @@ -1128,8 +1407,9 @@ mlx4_mr_dump_dev(struct rte_eth_dev *dev) if (mr->ms_n == 0) continue; for (n = 0; n < mr->ms_bmp_n; ) { - struct mlx4_mr_cache ret = { 0, }; + struct mlx4_mr_cache ret; + memset(&ret, 0, sizeof(ret)); n = mr_find_next_chunk(mr, &ret, n); if (!ret.end) break; @@ -1153,18 +1433,19 @@ mlx4_mr_dump_dev(struct rte_eth_dev *dev) void mlx4_mr_release(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; - struct mlx4_mr *mr_next = LIST_FIRST(&priv->mr.mr_list); + struct mlx4_priv *priv = dev->data->dev_private; + struct mlx4_mr *mr_next; /* Remove from memory callback device list. */ - rte_rwlock_write_lock(&mlx4_mem_event_rwlock); + rte_rwlock_write_lock(&mlx4_shared_data->mem_event_rwlock); LIST_REMOVE(priv, mem_event_cb); - rte_rwlock_write_unlock(&mlx4_mem_event_rwlock); + rte_rwlock_write_unlock(&mlx4_shared_data->mem_event_rwlock); #ifndef NDEBUG mlx4_mr_dump_dev(dev); #endif rte_rwlock_write_lock(&priv->mr.rwlock); /* Detach from MR list and move to free list. */ + mr_next = LIST_FIRST(&priv->mr.mr_list); while (mr_next != NULL) { struct mlx4_mr *mr = mr_next;