struct rte_mbuf *tmp = elt->buf;
struct txq_elt *elt_next = &(*txq->elts)[elts_free_next];
+#ifndef NDEBUG
+ /* Poisoning. */
+ memset(elt, 0x66, sizeof(*elt));
+#endif
RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
/* Faster than rte_pktmbuf_free(). */
do {
return 0;
}
+struct mlx5_check_mempool_data {
+ int ret;
+ char *start;
+ char *end;
+};
+
+/* Called by mlx5_check_mempool() when iterating the memory chunks. */
+static void mlx5_check_mempool_cb(struct rte_mempool *mp,
+ void *opaque, struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx)
+{
+ struct mlx5_check_mempool_data *data = opaque;
+
+ (void)mp;
+ (void)mem_idx;
+
+ /* It already failed, skip the next chunks. */
+ if (data->ret != 0)
+ return;
+ /* It is the first chunk. */
+ if (data->start == NULL && data->end == NULL) {
+ data->start = memhdr->addr;
+ data->end = data->start + memhdr->len;
+ return;
+ }
+ if (data->end == memhdr->addr) {
+ data->end += memhdr->len;
+ return;
+ }
+ if (data->start == (char *)memhdr->addr + memhdr->len) {
+ data->start -= memhdr->len;
+ return;
+ }
+ /* Error, mempool is not virtually contigous. */
+ data->ret = -1;
+}
+
+/**
+ * Check if a mempool can be used: it must be virtually contiguous.
+ *
+ * @param[in] mp
+ * Pointer to memory pool.
+ * @param[out] start
+ * Pointer to the start address of the mempool virtual memory area
+ * @param[out] end
+ * Pointer to the end address of the mempool virtual memory area
+ *
+ * @return
+ * 0 on success (mempool is virtually contiguous), -1 on error.
+ */
+static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
+ uintptr_t *end)
+{
+ struct mlx5_check_mempool_data data;
+
+ memset(&data, 0, sizeof(data));
+ rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data);
+ *start = (uintptr_t)data.start;
+ *end = (uintptr_t)data.end;
+
+ return data.ret;
+}
+
/* For best performance, this function should not be inlined. */
-struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, const struct rte_mempool *)
+struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, struct rte_mempool *)
__attribute__((noinline));
/**
* Memory region pointer, NULL in case of error.
*/
struct ibv_mr *
-mlx5_mp2mr(struct ibv_pd *pd, const struct rte_mempool *mp)
+mlx5_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
{
const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- uintptr_t start = mp->elt_va_start;
- uintptr_t end = mp->elt_va_end;
+ uintptr_t start;
+ uintptr_t end;
unsigned int i;
+ if (mlx5_check_mempool(mp, &start, &end) != 0) {
+ ERROR("mempool %p: not virtually contiguous",
+ (void *)mp);
+ return NULL;
+ }
+
DEBUG("mempool %p area start=%p end=%p size=%zu",
- (const void *)mp, (void *)start, (void *)end,
+ (void *)mp, (void *)start, (void *)end,
(size_t)(end - start));
/* Round start and end to page boundary if found in memory segments. */
for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
end = RTE_ALIGN_CEIL(end, align);
}
DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
- (const void *)mp, (void *)start, (void *)end,
+ (void *)mp, (void *)start, (void *)end,
(size_t)(end - start));
return ibv_reg_mr(pd,
(void *)start,
end - start,
- IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
+ IBV_ACCESS_LOCAL_WRITE);
}
/**
* mr->lkey on success, (uint32_t)-1 on failure.
*/
static uint32_t
-txq_mp2mr(struct txq *txq, const struct rte_mempool *mp)
+txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
{
unsigned int i;
struct ibv_mr *mr;
}
/* Add a new entry, register MR first. */
DEBUG("%p: discovered new memory pool \"%s\" (%p)",
- (void *)txq, mp->name, (const void *)mp);
+ (void *)txq, mp->name, (void *)mp);
mr = mlx5_mp2mr(txq->priv->pd, mp);
if (unlikely(mr == NULL)) {
DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
txq->mp2mr[i].mr = mr;
txq->mp2mr[i].lkey = mr->lkey;
DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
- (void *)txq, mp->name, (const void *)mp, txq->mp2mr[i].lkey);
+ (void *)txq, mp->name, (void *)mp, txq->mp2mr[i].lkey);
return txq->mp2mr[i].lkey;
}
struct txq_mp2mr_mbuf_check_data {
- const struct rte_mempool *mp;
int ret;
};
* Callback function for rte_mempool_obj_iter() to check whether a given
* mempool object looks like a mbuf.
*
- * @param[in, out] arg
- * Context data (struct txq_mp2mr_mbuf_check_data). Contains mempool pointer
- * and return value.
- * @param[in] start
- * Object start address.
- * @param[in] end
- * Object end address.
+ * @param[in] mp
+ * The mempool pointer
+ * @param[in] arg
+ * Context data (struct txq_mp2mr_mbuf_check_data). Contains the
+ * return value.
+ * @param[in] obj
+ * Object address.
* @param index
- * Unused.
- *
- * @return
- * Nonzero value when object is not a mbuf.
+ * Object index, unused.
*/
static void
-txq_mp2mr_mbuf_check(void *arg, void *start, void *end,
- uint32_t index __rte_unused)
+txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
+ uint32_t index __rte_unused)
{
struct txq_mp2mr_mbuf_check_data *data = arg;
- struct rte_mbuf *buf =
- (void *)((uintptr_t)start + data->mp->header_size);
+ struct rte_mbuf *buf = obj;
- (void)index;
/* Check whether mbuf structure fits element size and whether mempool
* pointer is valid. */
- if (((uintptr_t)end >= (uintptr_t)(buf + 1)) &&
- (buf->pool == data->mp))
- data->ret = 0;
- else
+ if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
data->ret = -1;
}
* Pointer to TX queue structure.
*/
void
-txq_mp2mr_iter(const struct rte_mempool *mp, void *arg)
+txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
{
struct txq *txq = arg;
struct txq_mp2mr_mbuf_check_data data = {
- .mp = mp,
- .ret = -1,
+ .ret = 0,
};
- /* Discard empty mempools. */
- if (mp->size == 0)
- return;
/* Register mempool only if the first element looks like a mbuf. */
- rte_mempool_obj_iter((void *)mp->elt_va_start,
- 1,
- mp->header_size + mp->elt_size + mp->trailer_size,
- 1,
- mp->elt_pa,
- mp->pg_num,
- mp->pg_shift,
- txq_mp2mr_mbuf_check,
- &data);
- if (data.ret)
+ if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
+ data.ret == -1)
return;
txq_mp2mr(txq, mp);
}
+/**
+ * Insert VLAN using mbuf headroom space.
+ *
+ * @param buf
+ * Buffer for VLAN insertion.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static inline int
+insert_vlan_sw(struct rte_mbuf *buf)
+{
+ uintptr_t addr;
+ uint32_t vlan;
+ uint16_t head_room_len = rte_pktmbuf_headroom(buf);
+
+ if (head_room_len < 4)
+ return EINVAL;
+
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ vlan = htonl(0x81000000 | buf->vlan_tci);
+ memmove((void *)(addr - 4), (void *)addr, 12);
+ memcpy((void *)(addr + 8), &vlan, sizeof(vlan));
+
+ SET_DATA_OFF(buf, head_room_len - 4);
+ DATA_LEN(buf) += 4;
+
+ return 0;
+}
+
#if MLX5_PMD_SGE_WR_N > 1
/**
unsigned int sent_size = 0;
#endif
uint32_t send_flags = 0;
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ int insert_vlan = 0;
+#endif /* HAVE_VERBS_VLAN_INSERTION */
if (i + 1 < max)
rte_prefetch0(buf_next);
if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type))
send_flags |= IBV_EXP_QP_BURST_TUNNEL;
}
+ if (buf->ol_flags & PKT_TX_VLAN_PKT) {
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ if (!txq->priv->mps)
+ insert_vlan = 1;
+ else
+#endif /* HAVE_VERBS_VLAN_INSERTION */
+ {
+ err = insert_vlan_sw(buf);
+ if (unlikely(err))
+ goto stop;
+ }
+ }
if (likely(segs == 1)) {
uintptr_t addr;
uint32_t length;
}
/* Put packet into send queue. */
#if MLX5_PMD_MAX_INLINE > 0
- if (length <= txq->max_inline)
- err = txq->send_pending_inline
- (txq->qp,
- (void *)addr,
- length,
- send_flags);
- else
+ if (length <= txq->max_inline) {
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ if (insert_vlan)
+ err = txq->send_pending_inline_vlan
+ (txq->qp,
+ (void *)addr,
+ length,
+ send_flags,
+ &buf->vlan_tci);
+ else
+#endif /* HAVE_VERBS_VLAN_INSERTION */
+ err = txq->send_pending_inline
+ (txq->qp,
+ (void *)addr,
+ length,
+ send_flags);
+ } else
#endif
{
/* Retrieve Memory Region key for this
elt->buf = NULL;
goto stop;
}
- err = txq->send_pending
- (txq->qp,
- addr,
- length,
- lkey,
- send_flags);
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ if (insert_vlan)
+ err = txq->send_pending_vlan
+ (txq->qp,
+ addr,
+ length,
+ lkey,
+ send_flags,
+ &buf->vlan_tci);
+ else
+#endif /* HAVE_VERBS_VLAN_INSERTION */
+ err = txq->send_pending
+ (txq->qp,
+ addr,
+ length,
+ lkey,
+ send_flags);
}
if (unlikely(err))
goto stop;
if (ret.length == (unsigned int)-1)
goto stop;
/* Put SG list into send queue. */
- err = txq->send_pending_sg_list
- (txq->qp,
- sges,
- ret.num,
- send_flags);
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ if (insert_vlan)
+ err = txq->send_pending_sg_list_vlan
+ (txq->qp,
+ sges,
+ ret.num,
+ send_flags,
+ &buf->vlan_tci);
+ else
+#endif /* HAVE_VERBS_VLAN_INSERTION */
+ err = txq->send_pending_sg_list
+ (txq->qp,
+ sges,
+ ret.num,
+ send_flags);
if (unlikely(err))
goto stop;
#ifdef MLX5_PMD_SOFT_COUNTERS
* @param flags
* RX completion flags returned by poll_length_flags().
*
+ * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
+ *
* @return
* Packet type for struct rte_mbuf.
*/
}
if (ret == 0)
break;
- len = ret;
+ assert(ret >= (rxq->crc_present << 2));
+ len = ret - (rxq->crc_present << 2);
pkt_buf_len = len;
/*
* Replace spent segments with new ones, concatenate and
* cacheline while allocating rep.
*/
rte_prefetch0(seg);
- rep = __rte_mbuf_raw_alloc(rxq->mp);
+ rep = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(rep == NULL)) {
/*
* Unable to allocate a replacement mbuf,
rep->ol_flags = -1;
#endif
assert(rep->buf_len == seg->buf_len);
- assert(rep->buf_len == rxq->mb_len);
/* Reconfigure sge to use rep instead of seg. */
assert(sge->lkey == rxq->mr->lkey);
sge->addr = ((uintptr_t)rep->buf_addr + seg_headroom);
pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) {
- pkt_buf->ol_flags |= PKT_RX_VLAN_PKT;
+ pkt_buf->ol_flags |= PKT_RX_VLAN_PKT |
+ PKT_RX_VLAN_STRIPPED;
pkt_buf->vlan_tci = vlan_tci;
}
#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
* Fetch initial bytes of packet descriptor into a
* cacheline while allocating rep.
*/
- rte_prefetch0(seg);
- rte_prefetch0(&seg->cacheline1);
+ rte_mbuf_prefetch_part1(seg);
+ rte_mbuf_prefetch_part2(seg);
ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci);
if (unlikely(ret < 0)) {
struct ibv_wc wc;
}
if (ret == 0)
break;
- len = ret;
- rep = __rte_mbuf_raw_alloc(rxq->mp);
+ assert(ret >= (rxq->crc_present << 2));
+ len = ret - (rxq->crc_present << 2);
+ rep = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(rep == NULL)) {
/*
* Unable to allocate a replacement mbuf,
seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) {
- seg->ol_flags |= PKT_RX_VLAN_PKT;
+ seg->ol_flags |= PKT_RX_VLAN_PKT |
+ PKT_RX_VLAN_STRIPPED;
seg->vlan_tci = vlan_tci;
}
#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */