sizeof(txq_ctrl->txq.mp2mr[0])));
}
/* Store the new entry. */
- txq_ctrl->txq.mp2mr[idx].mp = mp;
+ txq_ctrl->txq.mp2mr[idx].start = (uintptr_t)mr->addr;
+ txq_ctrl->txq.mp2mr[idx].end = (uintptr_t)mr->addr + mr->length;
txq_ctrl->txq.mp2mr[idx].mr = mr;
txq_ctrl->txq.mp2mr[idx].lkey = htonl(mr->lkey);
DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
struct txq_mp2mr_mbuf_check_data data = {
.ret = 0,
};
+ uintptr_t start;
+ uintptr_t end;
unsigned int i;
/* Register mempool only if the first element looks like a mbuf. */
if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
data.ret == -1)
return;
+ if (mlx5_check_mempool(mp, &start, &end) != 0) {
+ ERROR("mempool %p: not virtually contiguous",
+ (void *)mp);
+ return;
+ }
for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
- if (unlikely(txq_ctrl->txq.mp2mr[i].mp == NULL)) {
+ struct ibv_mr *mr = txq_ctrl->txq.mp2mr[i].mr;
+
+ if (unlikely(mr == NULL)) {
/* Unknown MP, add a new MR for it. */
break;
}
- if (txq_ctrl->txq.mp2mr[i].mp == mp)
+ if (start >= (uintptr_t)mr->addr &&
+ end <= (uintptr_t)mr->addr + mr->length)
return;
}
txq_mp2mr_reg(&txq_ctrl->txq, mp, i);
txq_complete(struct txq *txq);
static __rte_always_inline uint32_t
-txq_mp2mr(struct txq *txq, struct rte_mempool *mp);
+txq_mb2mr(struct txq *txq, struct rte_mbuf *mb);
static __rte_always_inline void
mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe);
}
/**
- * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
+ * Get Memory Region (MR) <-> rte_mbuf association from txq->mp2mr[].
* Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
* remove an entry first.
*
* mr->lkey on success, (uint32_t)-1 on failure.
*/
static inline uint32_t
-txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
+txq_mb2mr(struct txq *txq, struct rte_mbuf *mb)
{
- unsigned int i;
- uint32_t lkey = (uint32_t)-1;
+ uint16_t i = txq->mr_cache_idx;
+ uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t);
+ assert(i < RTE_DIM(txq->mp2mr));
+ if (likely(txq->mp2mr[i].start <= addr && txq->mp2mr[i].end >= addr))
+ return txq->mp2mr[i].lkey;
for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
- if (unlikely(txq->mp2mr[i].mp == NULL)) {
+ if (unlikely(txq->mp2mr[i].mr == NULL)) {
/* Unknown MP, add a new MR for it. */
break;
}
- if (txq->mp2mr[i].mp == mp) {
+ if (txq->mp2mr[i].start <= addr &&
+ txq->mp2mr[i].end >= addr) {
assert(txq->mp2mr[i].lkey != (uint32_t)-1);
assert(htonl(txq->mp2mr[i].mr->lkey) ==
txq->mp2mr[i].lkey);
- lkey = txq->mp2mr[i].lkey;
- break;
+ txq->mr_cache_idx = i;
+ return txq->mp2mr[i].lkey;
}
}
- if (unlikely(lkey == (uint32_t)-1))
- lkey = txq_mp2mr_reg(txq, mp, i);
- return lkey;
+ txq->mr_cache_idx = 0;
+ return txq_mp2mr_reg(txq, txq_mb2mp(mb), i);
}
/**
naddr = htonll(addr);
*dseg = (rte_v128u32_t){
htonl(length),
- txq_mp2mr(txq, txq_mb2mp(buf)),
+ txq_mb2mr(txq, buf),
naddr,
naddr >> 32,
};
naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t));
*dseg = (rte_v128u32_t){
htonl(length),
- txq_mp2mr(txq, txq_mb2mp(buf)),
+ txq_mb2mr(txq, buf),
naddr,
naddr >> 32,
};
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
.byte_count = htonl(DATA_LEN(buf)),
- .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ .lkey = txq_mb2mr(txq, buf),
.addr = htonll(addr),
};
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
.byte_count = htonl(DATA_LEN(buf)),
- .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ .lkey = txq_mb2mr(txq, buf),
.addr = htonll(addr),
};
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
addr = rte_pktmbuf_mtod(buf, uintptr_t);
*dseg = (struct mlx5_wqe_data_seg){
.byte_count = htonl(DATA_LEN(buf)),
- .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ .lkey = txq_mb2mr(txq, buf),
.addr = htonll(addr),
};
#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
naddr = htonll(addr);
*dseg = (rte_v128u32_t) {
htonl(length),
- txq_mp2mr(txq, txq_mb2mp(buf)),
+ txq_mb2mr(txq, buf),
naddr,
naddr >> 32,
};