- Support for multiple MAC addresses.
- VLAN filtering.
- RX VLAN stripping.
+- TX VLAN insertion.
- RX CRC stripping configuration.
- Promiscuous mode.
- Multicast promiscuous mode.
- Flow director.
- RX VLAN stripping.
+ - TX VLAN insertion.
- RX CRC stripping configuration.
- Minimum firmware version:
Only available with Mellanox OFED >= 3.2.
+* **Added mlx5 TX VLAN insertion support.**
+
+ Added support for TX VLAN insertion.
+
+ Only available with Mellanox OFED >= 3.2.
+
* **Changed szedata2 type of driver from vdev to pdev.**
Previously szedata2 device had to be added by ``--vdev`` option.
infiniband/verbs.h \
enum IBV_EXP_CREATE_WQ_FLAG_RX_END_PADDING \
$(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_VERBS_VLAN_INSERTION \
+ infiniband/verbs.h \
+ enum IBV_EXP_RECEIVE_WQ_CVLAN_INSERTION \
+ $(AUTOCONF_OUTPUT)
$(SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD):.c=.o): mlx5_autoconf.h
struct ibv_context *attr_ctx = NULL;
struct ibv_device_attr device_attr;
unsigned int vf;
+ unsigned int mps;
int idx;
int i;
PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) ||
(pci_dev->id.device_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF));
- INFO("PCI information matches, using device \"%s\" (VF: %s)",
- list[i]->name, (vf ? "true" : "false"));
+ /* Multi-packet send is only supported by ConnectX-4 Lx PF. */
+ mps = (pci_dev->id.device_id ==
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4LX);
+ INFO("PCI information matches, using device \"%s\" (VF: %s,"
+ " MPS: %s)",
+ list[i]->name,
+ vf ? "true" : "false",
+ mps ? "true" : "false");
attr_ctx = ibv_open_device(list[i]);
err = errno;
break;
#endif /* HAVE_EXP_QUERY_DEVICE */
priv->vf = vf;
+ priv->mps = mps;
/* Allocate and register default RSS hash keys. */
priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n,
sizeof((*priv->rss_conf)[0]), 0);
unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
unsigned int hw_padding:1; /* End alignment padding is supported. */
unsigned int vf:1; /* This is a VF device. */
+ unsigned int mps:1; /* Whether multi-packet send is supported. */
unsigned int pending_alarm:1; /* An alarm is pending. */
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM) :
0);
- info->tx_offload_capa =
- (priv->hw_csum ?
- (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM) :
- 0);
+ info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+ if (priv->hw_csum)
+ info->tx_offload_capa |=
+ (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM);
if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
/* FIXME: RETA update/query API expects the callee to know the size of
txq_mp2mr(txq, mp);
}
+/**
+ * Insert VLAN using mbuf headroom space.
+ *
+ * @param buf
+ * Buffer for VLAN insertion.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static inline int
+insert_vlan_sw(struct rte_mbuf *buf)
+{
+ uintptr_t addr;
+ uint32_t vlan;
+ uint16_t head_room_len = rte_pktmbuf_headroom(buf);
+
+ if (head_room_len < 4)
+ return EINVAL;
+
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ vlan = htonl(0x81000000 | buf->vlan_tci);
+ memmove((void *)(addr - 4), (void *)addr, 12);
+ memcpy((void *)(addr + 8), &vlan, sizeof(vlan));
+
+ SET_DATA_OFF(buf, head_room_len - 4);
+ DATA_LEN(buf) += 4;
+
+ return 0;
+}
+
#if MLX5_PMD_SGE_WR_N > 1
/**
unsigned int sent_size = 0;
#endif
uint32_t send_flags = 0;
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ int insert_vlan = 0;
+#endif /* HAVE_VERBS_VLAN_INSERTION */
if (i + 1 < max)
rte_prefetch0(buf_next);
if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type))
send_flags |= IBV_EXP_QP_BURST_TUNNEL;
}
+ if (buf->ol_flags & PKT_TX_VLAN_PKT) {
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ if (!txq->priv->mps)
+ insert_vlan = 1;
+ else
+#endif /* HAVE_VERBS_VLAN_INSERTION */
+ {
+ err = insert_vlan_sw(buf);
+ if (unlikely(err))
+ goto stop;
+ }
+ }
if (likely(segs == 1)) {
uintptr_t addr;
uint32_t length;
}
/* Put packet into send queue. */
#if MLX5_PMD_MAX_INLINE > 0
- if (length <= txq->max_inline)
- err = txq->send_pending_inline
- (txq->qp,
- (void *)addr,
- length,
- send_flags);
- else
+ if (length <= txq->max_inline) {
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ if (insert_vlan)
+ err = txq->send_pending_inline_vlan
+ (txq->qp,
+ (void *)addr,
+ length,
+ send_flags,
+ &buf->vlan_tci);
+ else
+#endif /* HAVE_VERBS_VLAN_INSERTION */
+ err = txq->send_pending_inline
+ (txq->qp,
+ (void *)addr,
+ length,
+ send_flags);
+ } else
#endif
{
/* Retrieve Memory Region key for this
elt->buf = NULL;
goto stop;
}
- err = txq->send_pending
- (txq->qp,
- addr,
- length,
- lkey,
- send_flags);
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ if (insert_vlan)
+ err = txq->send_pending_vlan
+ (txq->qp,
+ addr,
+ length,
+ lkey,
+ send_flags,
+ &buf->vlan_tci);
+ else
+#endif /* HAVE_VERBS_VLAN_INSERTION */
+ err = txq->send_pending
+ (txq->qp,
+ addr,
+ length,
+ lkey,
+ send_flags);
}
if (unlikely(err))
goto stop;
if (ret.length == (unsigned int)-1)
goto stop;
/* Put SG list into send queue. */
- err = txq->send_pending_sg_list
- (txq->qp,
- sges,
- ret.num,
- send_flags);
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ if (insert_vlan)
+ err = txq->send_pending_sg_list_vlan
+ (txq->qp,
+ sges,
+ ret.num,
+ send_flags,
+ &buf->vlan_tci);
+ else
+#endif /* HAVE_VERBS_VLAN_INSERTION */
+ err = txq->send_pending_sg_list
+ (txq->qp,
+ sges,
+ ret.num,
+ send_flags);
if (unlikely(err))
goto stop;
#ifdef MLX5_PMD_SOFT_COUNTERS
struct priv *priv; /* Back pointer to private data. */
int32_t (*poll_cnt)(struct ibv_cq *cq, uint32_t max);
int (*send_pending)();
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ int (*send_pending_vlan)();
+#endif
#if MLX5_PMD_MAX_INLINE > 0
int (*send_pending_inline)();
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ int (*send_pending_inline_vlan)();
+#endif
#endif
#if MLX5_PMD_SGE_WR_N > 1
int (*send_pending_sg_list)();
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ int (*send_pending_sg_list_vlan)();
+#endif
#endif
int (*send_flush)(struct ibv_qp *qp);
struct ibv_cq *cq; /* Completion Queue. */
/* Elements used only for init part are here. */
linear_t (*elts_linear)[]; /* Linearized buffers. */
struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ struct ibv_exp_qp_burst_family_v1 *if_qp; /* QP burst interface. */
+#else
struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
+#endif
struct ibv_exp_cq_family *if_cq; /* CQ interface. */
struct ibv_exp_res_domain *rd; /* Resource Domain. */
unsigned int socket; /* CPU socket ID for allocations. */
.intf_scope = IBV_EXP_INTF_GLOBAL,
.intf = IBV_EXP_INTF_QP_BURST,
.obj = tmpl.qp,
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ .intf_version = 1,
+#endif
#ifdef HAVE_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR
- /* Multi packet send WR can only be used outside of VF. */
+ /* Enable multi-packet send if supported. */
.family_flags =
- (!priv->vf ?
+ (priv->mps ?
IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR :
0),
#endif
txq->poll_cnt = txq->if_cq->poll_cnt;
#if MLX5_PMD_MAX_INLINE > 0
txq->send_pending_inline = txq->if_qp->send_pending_inline;
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ txq->send_pending_inline_vlan = txq->if_qp->send_pending_inline_vlan;
+#endif
#endif
#if MLX5_PMD_SGE_WR_N > 1
txq->send_pending_sg_list = txq->if_qp->send_pending_sg_list;
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ txq->send_pending_sg_list_vlan = txq->if_qp->send_pending_sg_list_vlan;
+#endif
#endif
txq->send_pending = txq->if_qp->send_pending;
+#ifdef HAVE_VERBS_VLAN_INSERTION
+ txq->send_pending_vlan = txq->if_qp->send_pending_vlan;
+#endif
txq->send_flush = txq->if_qp->send_flush;
DEBUG("%p: txq updated with %p", (void *)txq, (void *)&tmpl);
/* Pre-register known mempools. */