From: Yaacov Hazan Date: Thu, 17 Mar 2016 15:38:58 +0000 (+0100) Subject: mlx5: add VLAN insertion offload X-Git-Tag: spdx-start~7201 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=e192ef80340cc1d95e6bf72c093f5e25daec2669;p=dpdk.git mlx5: add VLAN insertion offload VLAN insertion can be done in hardware when supported in Verbs. A software fallback is provided otherwise. The software implementation is also used when multi-packet send is enabled on a queue, as both features are mutually exclusive. Signed-off-by: Yaacov Hazan Signed-off-by: Adrien Mazarguil --- diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst index ee967706c3..b6f91e6acb 100644 --- a/doc/guides/nics/mlx5.rst +++ b/doc/guides/nics/mlx5.rst @@ -79,6 +79,7 @@ Features - Support for multiple MAC addresses. - VLAN filtering. - RX VLAN stripping. +- TX VLAN insertion. - RX CRC stripping configuration. - Promiscuous mode. - Multicast promiscuous mode. @@ -242,6 +243,7 @@ Currently supported by DPDK: - Flow director. - RX VLAN stripping. + - TX VLAN insertion. - RX CRC stripping configuration. - Minimum firmware version: diff --git a/doc/guides/rel_notes/release_16_04.rst b/doc/guides/rel_notes/release_16_04.rst index 189b0ee18d..8c355ec62a 100644 --- a/doc/guides/rel_notes/release_16_04.rst +++ b/doc/guides/rel_notes/release_16_04.rst @@ -230,6 +230,12 @@ This section should contain new features added in this release. Sample format: Only available with Mellanox OFED >= 3.2. +* **Added mlx5 TX VLAN insertion support.** + + Added support for TX VLAN insertion. + + Only available with Mellanox OFED >= 3.2. + * **Changed szedata2 type of driver from vdev to pdev.** Previously szedata2 device had to be added by ``--vdev`` option. diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile index c1bc427397..92bfa0704f 100644 --- a/drivers/net/mlx5/Makefile +++ b/drivers/net/mlx5/Makefile @@ -141,6 +141,11 @@ mlx5_autoconf.h: $(RTE_SDK)/scripts/auto-config-h.sh infiniband/verbs.h \ enum IBV_EXP_CREATE_WQ_FLAG_RX_END_PADDING \ $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_VERBS_VLAN_INSERTION \ + infiniband/verbs.h \ + enum IBV_EXP_RECEIVE_WQ_CVLAN_INSERTION \ + $(AUTOCONF_OUTPUT) $(SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD):.c=.o): mlx5_autoconf.h diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 5dc9f26f8f..041cfc334c 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -261,6 +261,7 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) struct ibv_context *attr_ctx = NULL; struct ibv_device_attr device_attr; unsigned int vf; + unsigned int mps; int idx; int i; @@ -306,8 +307,14 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) || (pci_dev->id.device_id == PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)); - INFO("PCI information matches, using device \"%s\" (VF: %s)", - list[i]->name, (vf ? "true" : "false")); + /* Multi-packet send is only supported by ConnectX-4 Lx PF. */ + mps = (pci_dev->id.device_id == + PCI_DEVICE_ID_MELLANOX_CONNECTX4LX); + INFO("PCI information matches, using device \"%s\" (VF: %s," + " MPS: %s)", + list[i]->name, + vf ? "true" : "false", + mps ? "true" : "false"); attr_ctx = ibv_open_device(list[i]); err = errno; break; @@ -458,6 +465,7 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) #endif /* HAVE_EXP_QUERY_DEVICE */ priv->vf = vf; + priv->mps = mps; /* Allocate and register default RSS hash keys. */ priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n, sizeof((*priv->rss_conf)[0]), 0); diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 0a13272fbd..9d6cc0e6cf 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -106,6 +106,7 @@ struct priv { unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */ unsigned int hw_padding:1; /* End alignment padding is supported. */ unsigned int vf:1; /* This is a VF device. */ + unsigned int mps:1; /* Whether multi-packet send is supported. */ unsigned int pending_alarm:1; /* An alarm is pending. */ /* RX/TX queues. */ unsigned int rxqs_n; /* RX queues array size. */ diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 7b959c8e06..e6e20aaf8c 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -544,12 +544,12 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM) : 0); - info->tx_offload_capa = - (priv->hw_csum ? - (DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM) : - 0); + info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; + if (priv->hw_csum) + info->tx_offload_capa |= + (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM); if (priv_get_ifname(priv, &ifname) == 0) info->if_index = if_nametoindex(ifname); /* FIXME: RETA update/query API expects the callee to know the size of diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 25db7885aa..edf64aa8ca 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -333,6 +333,36 @@ txq_mp2mr_iter(const struct rte_mempool *mp, void *arg) txq_mp2mr(txq, mp); } +/** + * Insert VLAN using mbuf headroom space. + * + * @param buf + * Buffer for VLAN insertion. + * + * @return + * 0 on success, errno value on failure. + */ +static inline int +insert_vlan_sw(struct rte_mbuf *buf) +{ + uintptr_t addr; + uint32_t vlan; + uint16_t head_room_len = rte_pktmbuf_headroom(buf); + + if (head_room_len < 4) + return EINVAL; + + addr = rte_pktmbuf_mtod(buf, uintptr_t); + vlan = htonl(0x81000000 | buf->vlan_tci); + memmove((void *)(addr - 4), (void *)addr, 12); + memcpy((void *)(addr + 8), &vlan, sizeof(vlan)); + + SET_DATA_OFF(buf, head_room_len - 4); + DATA_LEN(buf) += 4; + + return 0; +} + #if MLX5_PMD_SGE_WR_N > 1 /** @@ -534,6 +564,9 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) unsigned int sent_size = 0; #endif uint32_t send_flags = 0; +#ifdef HAVE_VERBS_VLAN_INSERTION + int insert_vlan = 0; +#endif /* HAVE_VERBS_VLAN_INSERTION */ if (i + 1 < max) rte_prefetch0(buf_next); @@ -554,6 +587,18 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type)) send_flags |= IBV_EXP_QP_BURST_TUNNEL; } + if (buf->ol_flags & PKT_TX_VLAN_PKT) { +#ifdef HAVE_VERBS_VLAN_INSERTION + if (!txq->priv->mps) + insert_vlan = 1; + else +#endif /* HAVE_VERBS_VLAN_INSERTION */ + { + err = insert_vlan_sw(buf); + if (unlikely(err)) + goto stop; + } + } if (likely(segs == 1)) { uintptr_t addr; uint32_t length; @@ -577,13 +622,23 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) } /* Put packet into send queue. */ #if MLX5_PMD_MAX_INLINE > 0 - if (length <= txq->max_inline) - err = txq->send_pending_inline - (txq->qp, - (void *)addr, - length, - send_flags); - else + if (length <= txq->max_inline) { +#ifdef HAVE_VERBS_VLAN_INSERTION + if (insert_vlan) + err = txq->send_pending_inline_vlan + (txq->qp, + (void *)addr, + length, + send_flags, + &buf->vlan_tci); + else +#endif /* HAVE_VERBS_VLAN_INSERTION */ + err = txq->send_pending_inline + (txq->qp, + (void *)addr, + length, + send_flags); + } else #endif { /* Retrieve Memory Region key for this @@ -597,12 +652,23 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) elt->buf = NULL; goto stop; } - err = txq->send_pending - (txq->qp, - addr, - length, - lkey, - send_flags); +#ifdef HAVE_VERBS_VLAN_INSERTION + if (insert_vlan) + err = txq->send_pending_vlan + (txq->qp, + addr, + length, + lkey, + send_flags, + &buf->vlan_tci); + else +#endif /* HAVE_VERBS_VLAN_INSERTION */ + err = txq->send_pending + (txq->qp, + addr, + length, + lkey, + send_flags); } if (unlikely(err)) goto stop; @@ -619,11 +685,21 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (ret.length == (unsigned int)-1) goto stop; /* Put SG list into send queue. */ - err = txq->send_pending_sg_list - (txq->qp, - sges, - ret.num, - send_flags); +#ifdef HAVE_VERBS_VLAN_INSERTION + if (insert_vlan) + err = txq->send_pending_sg_list_vlan + (txq->qp, + sges, + ret.num, + send_flags, + &buf->vlan_tci); + else +#endif /* HAVE_VERBS_VLAN_INSERTION */ + err = txq->send_pending_sg_list + (txq->qp, + sges, + ret.num, + send_flags); if (unlikely(err)) goto stop; #ifdef MLX5_PMD_SOFT_COUNTERS diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 61be3e4e9d..0e2b607d5a 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -255,11 +255,20 @@ struct txq { struct priv *priv; /* Back pointer to private data. */ int32_t (*poll_cnt)(struct ibv_cq *cq, uint32_t max); int (*send_pending)(); +#ifdef HAVE_VERBS_VLAN_INSERTION + int (*send_pending_vlan)(); +#endif #if MLX5_PMD_MAX_INLINE > 0 int (*send_pending_inline)(); +#ifdef HAVE_VERBS_VLAN_INSERTION + int (*send_pending_inline_vlan)(); +#endif #endif #if MLX5_PMD_SGE_WR_N > 1 int (*send_pending_sg_list)(); +#ifdef HAVE_VERBS_VLAN_INSERTION + int (*send_pending_sg_list_vlan)(); +#endif #endif int (*send_flush)(struct ibv_qp *qp); struct ibv_cq *cq; /* Completion Queue. */ @@ -283,7 +292,11 @@ struct txq { /* Elements used only for init part are here. */ linear_t (*elts_linear)[]; /* Linearized buffers. */ struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */ +#ifdef HAVE_VERBS_VLAN_INSERTION + struct ibv_exp_qp_burst_family_v1 *if_qp; /* QP burst interface. */ +#else struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */ +#endif struct ibv_exp_cq_family *if_cq; /* CQ interface. */ struct ibv_exp_res_domain *rd; /* Resource Domain. */ unsigned int socket; /* CPU socket ID for allocations. */ diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 6700af41a5..ce2bb421ce 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -400,10 +400,13 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, .intf_scope = IBV_EXP_INTF_GLOBAL, .intf = IBV_EXP_INTF_QP_BURST, .obj = tmpl.qp, +#ifdef HAVE_VERBS_VLAN_INSERTION + .intf_version = 1, +#endif #ifdef HAVE_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR - /* Multi packet send WR can only be used outside of VF. */ + /* Enable multi-packet send if supported. */ .family_flags = - (!priv->vf ? + (priv->mps ? IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR : 0), #endif @@ -422,11 +425,20 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, txq->poll_cnt = txq->if_cq->poll_cnt; #if MLX5_PMD_MAX_INLINE > 0 txq->send_pending_inline = txq->if_qp->send_pending_inline; +#ifdef HAVE_VERBS_VLAN_INSERTION + txq->send_pending_inline_vlan = txq->if_qp->send_pending_inline_vlan; +#endif #endif #if MLX5_PMD_SGE_WR_N > 1 txq->send_pending_sg_list = txq->if_qp->send_pending_sg_list; +#ifdef HAVE_VERBS_VLAN_INSERTION + txq->send_pending_sg_list_vlan = txq->if_qp->send_pending_sg_list_vlan; +#endif #endif txq->send_pending = txq->if_qp->send_pending; +#ifdef HAVE_VERBS_VLAN_INSERTION + txq->send_pending_vlan = txq->if_qp->send_pending_vlan; +#endif txq->send_flush = txq->if_qp->send_flush; DEBUG("%p: txq updated with %p", (void *)txq, (void *)&tmpl); /* Pre-register known mempools. */