- Configurable RETA table.
- Support for multiple MAC addresses.
- VLAN filtering.
+- RX VLAN stripping.
- Promiscuous mode.
- Multicast promiscuous mode.
- Hardware checksum offloads.
above only:
- Flow director.
+ - RX VLAN stripping.
- Minimum firmware version:
Only available with Mellanox OFED >= 3.2.
+* **Added mlx5 RX VLAN stripping support.**
+
+ Added support for RX VLAN stripping.
+
+ Only available with Mellanox OFED >= 3.2.
+
* **Increased number of next hops for LPM IPv4 to 2^24.**
The next_hop field is extended from 8 bits to 24 bits for IPv4.
.mac_addr_add = mlx5_mac_addr_add,
.mac_addr_set = mlx5_mac_addr_set,
.mtu_set = mlx5_dev_set_mtu,
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ .vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
+ .vlan_offload_set = mlx5_vlan_offload_set,
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
.reta_update = mlx5_dev_rss_reta_update,
.reta_query = mlx5_dev_rss_reta_query,
.rss_hash_update = mlx5_rss_hash_update,
#ifdef HAVE_EXP_QUERY_DEVICE
exp_device_attr.comp_mask =
IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS |
- IBV_EXP_DEVICE_ATTR_RX_HASH;
+ IBV_EXP_DEVICE_ATTR_RX_HASH |
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS |
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
+ 0;
#endif /* HAVE_EXP_QUERY_DEVICE */
DEBUG("using port %u (%08" PRIx32 ")", port, test);
priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE;
DEBUG("maximum RX indirection table size is %u",
priv->ind_table_max_size);
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ priv->hw_vlan_strip = !!(exp_device_attr.wq_vlan_offloads_cap &
+ IBV_EXP_RECEIVE_WQ_CVLAN_STRIP);
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
+ DEBUG("VLAN stripping is %ssupported",
+ (priv->hw_vlan_strip ? "" : "not "));
#else /* HAVE_EXP_QUERY_DEVICE */
priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE;
unsigned int allmulti_req:1; /* All multicast mode requested. */
unsigned int hw_csum:1; /* Checksum offload is supported. */
unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
+ unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
unsigned int vf:1; /* This is a VF device. */
unsigned int pending_alarm:1; /* An alarm is pending. */
/* RX/TX queues. */
/* mlx5_vlan.c */
int mlx5_vlan_filter_set(struct rte_eth_dev *, uint16_t, int);
+void mlx5_vlan_offload_set(struct rte_eth_dev *, int);
+void mlx5_vlan_strip_queue_set(struct rte_eth_dev *, uint16_t, int);
/* mlx5_trigger.c */
priv->device_attr.max_qp_wr);
DEBUG("priv->device_attr.max_sge is %d",
priv->device_attr.max_sge);
+ /* Configure VLAN stripping. */
+ tmpl.vlan_strip = dev->data->dev_conf.rxmode.hw_vlan_strip;
attr.wq = (struct ibv_exp_wq_init_attr){
.wq_context = NULL, /* Could be useful in the future. */
.wq_type = IBV_EXP_WQT_RQ,
MLX5_PMD_SGE_WR_N),
.pd = priv->pd,
.cq = tmpl.cq,
- .comp_mask = IBV_EXP_CREATE_WQ_RES_DOMAIN,
+ .comp_mask =
+ IBV_EXP_CREATE_WQ_RES_DOMAIN |
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ IBV_EXP_CREATE_WQ_VLAN_OFFLOADS |
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
+ 0,
.res_domain = tmpl.rd,
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ .vlan_offloads = (tmpl.vlan_strip ?
+ IBV_EXP_RECEIVE_WQ_CVLAN_STRIP :
+ 0),
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
};
tmpl.wq = ibv_exp_create_wq(priv->ctx, &attr.wq);
if (tmpl.wq == NULL) {
DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id);
attr.params = (struct ibv_exp_query_intf_params){
.intf_scope = IBV_EXP_INTF_GLOBAL,
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ .intf_version = 1,
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
.intf = IBV_EXP_INTF_CQ,
.obj = tmpl.cq,
};
#include "mlx5.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
+#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
/**
unsigned int seg_headroom = RTE_PKTMBUF_HEADROOM;
unsigned int j = 0;
uint32_t flags;
+ uint16_t vlan_tci;
/* Sanity checks. */
assert(elts_head < rxq->elts_n);
assert(rxq->elts_head < rxq->elts_n);
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ ret = rxq->if_cq->poll_length_flags_cvlan(rxq->cq, NULL, NULL,
+ &flags, &vlan_tci);
+#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
&flags);
+ (void)vlan_tci;
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
if (unlikely(ret < 0)) {
struct ibv_wc wc;
int wcs_n;
PKT_LEN(pkt_buf) = pkt_buf_len;
pkt_buf->packet_type = rxq_cq_to_pkt_type(flags);
pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) {
+ pkt_buf->ol_flags |= PKT_RX_VLAN_PKT;
+ pkt_buf->vlan_tci = vlan_tci;
+ }
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
/* Return packet. */
*(pkts++) = pkt_buf;
struct rte_mbuf *seg = elt->buf;
struct rte_mbuf *rep;
uint32_t flags;
+ uint16_t vlan_tci;
/* Sanity checks. */
assert(seg != NULL);
*/
rte_prefetch0(seg);
rte_prefetch0(&seg->cacheline1);
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ ret = rxq->if_cq->poll_length_flags_cvlan(rxq->cq, NULL, NULL,
+ &flags, &vlan_tci);
+#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
&flags);
+ (void)vlan_tci;
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
if (unlikely(ret < 0)) {
struct ibv_wc wc;
int wcs_n;
DATA_LEN(seg) = len;
seg->packet_type = rxq_cq_to_pkt_type(flags);
seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) {
+ seg->ol_flags |= PKT_RX_VLAN_PKT;
+ seg->vlan_tci = vlan_tci;
+ }
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
/* Return packet. */
*(pkts++) = seg;
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_exp_wq *wq; /* Work Queue. */
struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */
+#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
struct ibv_exp_cq_family *if_cq; /* CQ interface. */
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
unsigned int port_id; /* Port ID for incoming packets. */
unsigned int elts_n; /* (*elts)[] length. */
unsigned int elts_head; /* Current index in (*elts)[]. */
unsigned int sp:1; /* Use scattered RX elements. */
unsigned int csum:1; /* Enable checksum offloading. */
unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
+ unsigned int vlan_strip:1; /* Enable VLAN stripping. */
uint32_t mb_len; /* Length of a mp-issued mbuf. */
struct mlx5_rxq_stats stats; /* RX queue counters. */
unsigned int socket; /* CPU socket ID for allocations. */
#include "mlx5_utils.h"
#include "mlx5.h"
+#include "mlx5_autoconf.h"
/**
* Configure a VLAN filter.
assert(ret >= 0);
return -ret;
}
+
+/**
+ * Set/reset VLAN stripping for a specific queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param idx
+ * RX queue index.
+ * @param on
+ * Enable/disable VLAN stripping.
+ */
+static void
+priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
+{
+ struct rxq *rxq = (*priv->rxqs)[idx];
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ struct ibv_exp_wq_attr mod;
+ uint16_t vlan_offloads =
+ (on ? IBV_EXP_RECEIVE_WQ_CVLAN_STRIP : 0) |
+ 0;
+ int err;
+
+ DEBUG("set VLAN offloads 0x%x for port %d queue %d",
+ vlan_offloads, rxq->port_id, idx);
+ mod = (struct ibv_exp_wq_attr){
+ .attr_mask = IBV_EXP_WQ_ATTR_VLAN_OFFLOADS,
+ .vlan_offloads = vlan_offloads,
+ };
+
+ err = ibv_exp_modify_wq(rxq->wq, &mod);
+ if (err) {
+ ERROR("%p: failed to modified stripping mode: %s",
+ (void *)priv, strerror(err));
+ return;
+ }
+
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
+
+ /* Update related bits in RX queue. */
+ rxq->vlan_strip = !!on;
+}
+
+/**
+ * Callback to set/reset VLAN stripping for a specific queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param queue
+ * RX queue index.
+ * @param on
+ * Enable/disable VLAN stripping.
+ */
+void
+mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ /* Validate hw support */
+ if (!priv->hw_vlan_strip) {
+ ERROR("VLAN stripping is not supported");
+ return;
+ }
+
+ /* Validate queue number */
+ if (queue >= priv->rxqs_n) {
+ ERROR("VLAN stripping, invalid queue number %d", queue);
+ return;
+ }
+
+ priv_lock(priv);
+ priv_vlan_strip_queue_set(priv, queue, on);
+ priv_unlock(priv);
+}
+
+/**
+ * Callback to set/reset VLAN offloads for a port.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mask
+ * VLAN offload bit mask.
+ */
+void
+mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ int hw_vlan_strip = dev->data->dev_conf.rxmode.hw_vlan_strip;
+
+ if (!priv->hw_vlan_strip) {
+ ERROR("VLAN stripping is not supported");
+ return;
+ }
+
+ /* Run on every RX queue and set/reset VLAN stripping. */
+ priv_lock(priv);
+ for (i = 0; (i != priv->rxqs_n); i++)
+ priv_vlan_strip_queue_set(priv, i, hw_vlan_strip);
+ priv_unlock(priv);
+ }
+}