From 78214fb8821fab0669c1c48f00fc4773e5a9eb98 Mon Sep 17 00:00:00 2001 From: Moti Haimovsky Date: Thu, 9 Nov 2017 10:59:33 +0200 Subject: [PATCH] net/mlx4: fix Rx packet type offloads This patch improves Rx packet type offload report in case the device is a virtual function device. In these devices we observed that the L2 tunnel flag is set also for non-tunneled packets, this leads to a complete misinterpretation of the packet type being received. This issue occurs since the tunnel_mode is not set to 0x7 by the driver for virtual devices and therefore the value in the L2 tunnel flag is meaningless and should be ignored. Fixes: aee4a03fee4f ("net/mlx4: enhance Rx packet type offloads") Signed-off-by: Moti Haimovsky Acked-by: Adrien Mazarguil --- drivers/net/mlx4/mlx4_ethdev.c | 19 +++++++++++++++++-- drivers/net/mlx4/mlx4_rxq.c | 1 + drivers/net/mlx4/mlx4_rxtx.c | 8 +++++--- drivers/net/mlx4/mlx4_rxtx.h | 1 + 4 files changed, 24 insertions(+), 5 deletions(-) diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c index c2ea4db1d9..2f69e7d4fc 100644 --- a/drivers/net/mlx4/mlx4_ethdev.c +++ b/drivers/net/mlx4/mlx4_ethdev.c @@ -1029,6 +1029,16 @@ const uint32_t * mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev) { static const uint32_t ptypes[] = { + /* refers to rxq_cq_to_pkt_type() */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + static const uint32_t ptypes_l2tun[] = { /* refers to rxq_cq_to_pkt_type() */ RTE_PTYPE_L2_ETHER, RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, @@ -1040,8 +1050,13 @@ mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev) RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, RTE_PTYPE_UNKNOWN }; + struct priv *priv = dev->data->dev_private; - if (dev->rx_pkt_burst == mlx4_rx_burst) - return ptypes; + if (dev->rx_pkt_burst == mlx4_rx_burst) { + if (priv->hw_csum_l2tun) + return ptypes_l2tun; + else + return ptypes; + } return NULL; } diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c index 8b97a8942b..53313c56f9 100644 --- a/drivers/net/mlx4/mlx4_rxq.c +++ b/drivers/net/mlx4/mlx4_rxq.c @@ -750,6 +750,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, dev->data->dev_conf.rxmode.hw_ip_checksum), .csum_l2tun = (priv->hw_csum_l2tun && dev->data->dev_conf.rxmode.hw_ip_checksum), + .l2tun_offload = priv->hw_csum_l2tun, .stats = { .idx = idx, }, diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c index 3985e06dea..06f57cc4f7 100644 --- a/drivers/net/mlx4/mlx4_rxtx.c +++ b/drivers/net/mlx4/mlx4_rxtx.c @@ -751,7 +751,8 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) * Packet type for struct rte_mbuf. */ static inline uint32_t -rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe) +rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe, + uint32_t l2tun_offload) { uint8_t idx = 0; uint32_t pinfo = rte_be_to_cpu_32(cqe->vlan_my_qpn); @@ -762,7 +763,7 @@ rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe) * bit[7] - MLX4_CQE_L2_TUNNEL * bit[6] - MLX4_CQE_L2_TUNNEL_IPV4 */ - if (!(pinfo & MLX4_CQE_L2_VLAN_MASK) && (pinfo & MLX4_CQE_L2_TUNNEL)) + if (l2tun_offload && (pinfo & MLX4_CQE_L2_TUNNEL)) idx |= ((pinfo & MLX4_CQE_L2_TUNNEL) >> 20) | ((pinfo & MLX4_CQE_L2_TUNNEL_IPV4) >> 19); /* @@ -960,7 +961,8 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) } pkt = seg; /* Update packet information. */ - pkt->packet_type = rxq_cq_to_pkt_type(cqe); + pkt->packet_type = + rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload); pkt->ol_flags = 0; pkt->pkt_len = len; if (rxq->csum | rxq->csum_l2tun) { diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h index 4acad80103..463df2b0bf 100644 --- a/drivers/net/mlx4/mlx4_rxtx.h +++ b/drivers/net/mlx4/mlx4_rxtx.h @@ -80,6 +80,7 @@ struct rxq { volatile uint32_t *rq_db; /**< RQ doorbell record. */ uint32_t csum:1; /**< Enable checksum offloading. */ uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */ + uint32_t l2tun_offload:1; /**< L2 tunnel offload is enabled. */ struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */ struct mlx4_rxq_stats stats; /**< Rx queue counters. */ unsigned int socket; /**< CPU socket ID for allocations. */ -- 2.20.1