#include "mlx5.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
+#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
/**
* mr->lkey on success, (uint32_t)-1 on failure.
*/
static uint32_t
-txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
+txq_mp2mr(struct txq *txq, const struct rte_mempool *mp)
{
unsigned int i;
struct ibv_mr *mr;
}
}
/* Add a new entry, register MR first. */
- DEBUG("%p: discovered new memory pool %p", (void *)txq, (void *)mp);
+ DEBUG("%p: discovered new memory pool \"%s\" (%p)",
+ (void *)txq, mp->name, (const void *)mp);
mr = ibv_reg_mr(txq->priv->pd,
(void *)mp->elt_va_start,
(mp->elt_va_end - mp->elt_va_start),
DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
(void *)txq);
--i;
- claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr));
+ claim_zero(ibv_dereg_mr(txq->mp2mr[0].mr));
memmove(&txq->mp2mr[0], &txq->mp2mr[1],
(sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
}
txq->mp2mr[i].mp = mp;
txq->mp2mr[i].mr = mr;
txq->mp2mr[i].lkey = mr->lkey;
- DEBUG("%p: new MR lkey for MP %p: 0x%08" PRIu32,
- (void *)txq, (void *)mp, txq->mp2mr[i].lkey);
+ DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
+ (void *)txq, mp->name, (const void *)mp, txq->mp2mr[i].lkey);
return txq->mp2mr[i].lkey;
}
+struct txq_mp2mr_mbuf_check_data {
+ const struct rte_mempool *mp;
+ int ret;
+};
+
+/**
+ * Callback function for rte_mempool_obj_iter() to check whether a given
+ * mempool object looks like a mbuf.
+ *
+ * @param[in, out] arg
+ * Context data (struct txq_mp2mr_mbuf_check_data). Contains mempool pointer
+ * and return value.
+ * @param[in] start
+ * Object start address.
+ * @param[in] end
+ * Object end address.
+ * @param index
+ * Unused.
+ *
+ * @return
+ * Nonzero value when object is not a mbuf.
+ */
+static void
+txq_mp2mr_mbuf_check(void *arg, void *start, void *end,
+ uint32_t index __rte_unused)
+{
+ struct txq_mp2mr_mbuf_check_data *data = arg;
+ struct rte_mbuf *buf =
+ (void *)((uintptr_t)start + data->mp->header_size);
+
+ (void)index;
+ /* Check whether mbuf structure fits element size and whether mempool
+ * pointer is valid. */
+ if (((uintptr_t)end >= (uintptr_t)(buf + 1)) &&
+ (buf->pool == data->mp))
+ data->ret = 0;
+ else
+ data->ret = -1;
+}
+
+/**
+ * Iterator function for rte_mempool_walk() to register existing mempools and
+ * fill the MP to MR cache of a TX queue.
+ *
+ * @param[in] mp
+ * Memory Pool to register.
+ * @param *arg
+ * Pointer to TX queue structure.
+ */
+void
+txq_mp2mr_iter(const struct rte_mempool *mp, void *arg)
+{
+ struct txq *txq = arg;
+ struct txq_mp2mr_mbuf_check_data data = {
+ .mp = mp,
+ .ret = -1,
+ };
+
+ /* Discard empty mempools. */
+ if (mp->size == 0)
+ return;
+ /* Register mempool only if the first element looks like a mbuf. */
+ rte_mempool_obj_iter((void *)mp->elt_va_start,
+ 1,
+ mp->header_size + mp->elt_size + mp->trailer_size,
+ 1,
+ mp->elt_pa,
+ mp->pg_num,
+ mp->pg_shift,
+ txq_mp2mr_mbuf_check,
+ &data);
+ if (data.ret)
+ return;
+ txq_mp2mr(txq, mp);
+}
+
#if MLX5_PMD_SGE_WR_N > 1
/**
sge->length = size;
sge->lkey = txq->mr_linear->lkey;
sent_size += size;
+ /* Include last segment. */
+ segs++;
}
return (struct tx_burst_sg_ret){
.length = sent_size,
{
struct txq *txq = (struct txq *)dpdk_txq;
unsigned int elts_head = txq->elts_head;
- const unsigned int elts_tail = txq->elts_tail;
const unsigned int elts_n = txq->elts_n;
unsigned int elts_comp_cd = txq->elts_comp_cd;
unsigned int elts_comp = 0;
assert(elts_comp_cd != 0);
txq_complete(txq);
- max = (elts_n - (elts_head - elts_tail));
+ max = (elts_n - (elts_head - txq->elts_tail));
if (max > elts_n)
max -= elts_n;
assert(max >= 1);
unsigned int seg_headroom = RTE_PKTMBUF_HEADROOM;
unsigned int j = 0;
uint32_t flags;
+ uint16_t vlan_tci;
/* Sanity checks. */
assert(elts_head < rxq->elts_n);
assert(rxq->elts_head < rxq->elts_n);
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ ret = rxq->if_cq->poll_length_flags_cvlan(rxq->cq, NULL, NULL,
+ &flags, &vlan_tci);
+#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
&flags);
+ (void)vlan_tci;
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
if (unlikely(ret < 0)) {
struct ibv_wc wc;
int wcs_n;
PKT_LEN(pkt_buf) = pkt_buf_len;
pkt_buf->packet_type = rxq_cq_to_pkt_type(flags);
pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) {
+ pkt_buf->ol_flags |= PKT_RX_VLAN_PKT;
+ pkt_buf->vlan_tci = vlan_tci;
+ }
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
/* Return packet. */
*(pkts++) = pkt_buf;
struct rte_mbuf *seg = elt->buf;
struct rte_mbuf *rep;
uint32_t flags;
+ uint16_t vlan_tci;
/* Sanity checks. */
assert(seg != NULL);
*/
rte_prefetch0(seg);
rte_prefetch0(&seg->cacheline1);
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ ret = rxq->if_cq->poll_length_flags_cvlan(rxq->cq, NULL, NULL,
+ &flags, &vlan_tci);
+#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
&flags);
+ (void)vlan_tci;
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
if (unlikely(ret < 0)) {
struct ibv_wc wc;
int wcs_n;
DATA_LEN(seg) = len;
seg->packet_type = rxq_cq_to_pkt_type(flags);
seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) {
+ seg->ol_flags |= PKT_RX_VLAN_PKT;
+ seg->vlan_tci = vlan_tci;
+ }
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
/* Return packet. */
*(pkts++) = seg;