struct rte_mbuf *tmp = elt->buf;
struct txq_elt *elt_next = &(*txq->elts)[elts_free_next];
+#ifndef NDEBUG
+ /* Poisoning. */
+ memset(elt, 0x66, sizeof(*elt));
+#endif
RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
/* Faster than rte_pktmbuf_free(). */
do {
}
struct txq_mp2mr_mbuf_check_data {
- const struct rte_mempool *mp;
int ret;
};
* Callback function for rte_mempool_obj_iter() to check whether a given
* mempool object looks like a mbuf.
*
- * @param[in, out] arg
- * Context data (struct txq_mp2mr_mbuf_check_data). Contains mempool pointer
- * and return value.
- * @param[in] start
- * Object start address.
- * @param[in] end
- * Object end address.
+ * @param[in] mp
+ * The mempool pointer
+ * @param[in] arg
+ * Context data (struct txq_mp2mr_mbuf_check_data). Contains the
+ * return value.
+ * @param[in] obj
+ * Object address.
* @param index
- * Unused.
- *
- * @return
- * Nonzero value when object is not a mbuf.
+ * Object index, unused.
*/
static void
-txq_mp2mr_mbuf_check(void *arg, void *start, void *end,
- uint32_t index __rte_unused)
+txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
+ uint32_t index __rte_unused)
{
struct txq_mp2mr_mbuf_check_data *data = arg;
- struct rte_mbuf *buf =
- (void *)((uintptr_t)start + data->mp->header_size);
+ struct rte_mbuf *buf = obj;
- (void)index;
/* Check whether mbuf structure fits element size and whether mempool
* pointer is valid. */
- if (((uintptr_t)end >= (uintptr_t)(buf + 1)) &&
- (buf->pool == data->mp))
- data->ret = 0;
- else
+ if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
data->ret = -1;
}
* Pointer to TX queue structure.
*/
void
-txq_mp2mr_iter(const struct rte_mempool *mp, void *arg)
+txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
{
struct txq *txq = arg;
struct txq_mp2mr_mbuf_check_data data = {
- .mp = mp,
- .ret = -1,
+ .ret = 0,
};
- /* Discard empty mempools. */
- if (mp->size == 0)
- return;
/* Register mempool only if the first element looks like a mbuf. */
- rte_mempool_obj_iter((void *)mp->elt_va_start,
- 1,
- mp->header_size + mp->elt_size + mp->trailer_size,
- 1,
- mp->elt_pa,
- mp->pg_num,
- mp->pg_shift,
- txq_mp2mr_mbuf_check,
- &data);
- if (data.ret)
+ if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
+ data.ret == -1)
return;
txq_mp2mr(txq, mp);
}
+/**
+ * Insert VLAN using mbuf headroom space.
+ *
+ * @param buf
+ * Buffer for VLAN insertion.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static inline int
+insert_vlan_sw(struct rte_mbuf *buf)
+{
+ uintptr_t addr;
+ uint32_t vlan;
+ uint16_t head_room_len = rte_pktmbuf_headroom(buf);
+
+ if (head_room_len < 4)
+ return EINVAL;
+
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ vlan = htonl(0x81000000 | buf->vlan_tci);
+ memmove((void *)(addr - 4), (void *)addr, 12);
+ memcpy((void *)(addr + 8), &vlan, sizeof(vlan));
+
+ SET_DATA_OFF(buf, head_room_len - 4);
+ DATA_LEN(buf) += 4;
+
+ return 0;
+}
+
#if MLX5_PMD_SGE_WR_N > 1
/**
unsigned int sent_size = 0;
#endif
uint32_t send_flags = 0;
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ int insert_vlan = 0;
+#endif /* HAVE_VERBS_VLAN_INSERTION */
if (i + 1 < max)
rte_prefetch0(buf_next);
if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type))
send_flags |= IBV_EXP_QP_BURST_TUNNEL;
}
+ if (buf->ol_flags & PKT_TX_VLAN_PKT) {
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ if (!txq->priv->mps)
+ insert_vlan = 1;
+ else
+#endif /* HAVE_VERBS_VLAN_INSERTION */
+ {
+ err = insert_vlan_sw(buf);
+ if (unlikely(err))
+ goto stop;
+ }
+ }
if (likely(segs == 1)) {
uintptr_t addr;
uint32_t length;
}
/* Put packet into send queue. */
#if MLX5_PMD_MAX_INLINE > 0
- if (length <= txq->max_inline)
- err = txq->send_pending_inline
- (txq->qp,
- (void *)addr,
- length,
- send_flags);
- else
+ if (length <= txq->max_inline) {
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ if (insert_vlan)
+ err = txq->send_pending_inline_vlan
+ (txq->qp,
+ (void *)addr,
+ length,
+ send_flags,
+ &buf->vlan_tci);
+ else
+#endif /* HAVE_VERBS_VLAN_INSERTION */
+ err = txq->send_pending_inline
+ (txq->qp,
+ (void *)addr,
+ length,
+ send_flags);
+ } else
#endif
{
/* Retrieve Memory Region key for this
elt->buf = NULL;
goto stop;
}
- err = txq->send_pending
- (txq->qp,
- addr,
- length,
- lkey,
- send_flags);
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ if (insert_vlan)
+ err = txq->send_pending_vlan
+ (txq->qp,
+ addr,
+ length,
+ lkey,
+ send_flags,
+ &buf->vlan_tci);
+ else
+#endif /* HAVE_VERBS_VLAN_INSERTION */
+ err = txq->send_pending
+ (txq->qp,
+ addr,
+ length,
+ lkey,
+ send_flags);
}
if (unlikely(err))
goto stop;
if (ret.length == (unsigned int)-1)
goto stop;
/* Put SG list into send queue. */
- err = txq->send_pending_sg_list
- (txq->qp,
- sges,
- ret.num,
- send_flags);
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ if (insert_vlan)
+ err = txq->send_pending_sg_list_vlan
+ (txq->qp,
+ sges,
+ ret.num,
+ send_flags,
+ &buf->vlan_tci);
+ else
+#endif /* HAVE_VERBS_VLAN_INSERTION */
+ err = txq->send_pending_sg_list
+ (txq->qp,
+ sges,
+ ret.num,
+ send_flags);
if (unlikely(err))
goto stop;
#ifdef MLX5_PMD_SOFT_COUNTERS
* @param flags
* RX completion flags returned by poll_length_flags().
*
+ * @note: fix mlx5_dev_supported_ptypes_get() if any change here.
+ *
* @return
* Packet type for struct rte_mbuf.
*/
{
uint32_t ol_flags = 0;
- if (rxq->csum)
- ol_flags |=
- TRANSPOSE(~flags,
- IBV_EXP_CQ_RX_IP_CSUM_OK,
- PKT_RX_IP_CKSUM_BAD) |
- TRANSPOSE(~flags,
- IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK,
- PKT_RX_L4_CKSUM_BAD);
+ if (rxq->csum) {
+ /* Set IP checksum flag only for IPv4/IPv6 packets. */
+ if (flags &
+ (IBV_EXP_CQ_RX_IPV4_PACKET | IBV_EXP_CQ_RX_IPV6_PACKET))
+ ol_flags |=
+ TRANSPOSE(~flags,
+ IBV_EXP_CQ_RX_IP_CSUM_OK,
+ PKT_RX_IP_CKSUM_BAD);
+#ifdef HAVE_EXP_CQ_RX_TCP_PACKET
+ /* Set L4 checksum flag only for TCP/UDP packets. */
+ if (flags &
+ (IBV_EXP_CQ_RX_TCP_PACKET | IBV_EXP_CQ_RX_UDP_PACKET))
+#endif /* HAVE_EXP_CQ_RX_TCP_PACKET */
+ ol_flags |=
+ TRANSPOSE(~flags,
+ IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK,
+ PKT_RX_L4_CKSUM_BAD);
+ }
/*
* PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
* of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
}
if (ret == 0)
break;
- len = ret;
+ assert(ret >= (rxq->crc_present << 2));
+ len = ret - (rxq->crc_present << 2);
pkt_buf_len = len;
/*
* Replace spent segments with new ones, concatenate and
* cacheline while allocating rep.
*/
rte_prefetch0(seg);
- rep = __rte_mbuf_raw_alloc(rxq->mp);
+ rep = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(rep == NULL)) {
/*
* Unable to allocate a replacement mbuf,
}
if (ret == 0)
break;
- len = ret;
- rep = __rte_mbuf_raw_alloc(rxq->mp);
+ assert(ret >= (rxq->crc_present << 2));
+ len = ret - (rxq->crc_present << 2);
+ rep = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(rep == NULL)) {
/*
* Unable to allocate a replacement mbuf,