net/mlx4: fix Rx packet type offloads
authorMoti Haimovsky <motih@mellanox.com>
Thu, 9 Nov 2017 08:59:33 +0000 (10:59 +0200)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 10 Nov 2017 02:29:56 +0000 (02:29 +0000)
This patch improves Rx packet type offload report in case the device is
a virtual function device.
In these devices we observed that the L2 tunnel flag is set also for
non-tunneled packets, this leads to a complete misinterpretation of the
packet type being received.
This issue occurs since the tunnel_mode is not set to 0x7 by the driver
for virtual devices and therefore the value in the L2 tunnel flag is
meaningless and should be ignored.

Fixes: aee4a03fee4f ("net/mlx4: enhance Rx packet type offloads")

Signed-off-by: Moti Haimovsky <motih@mellanox.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
drivers/net/mlx4/mlx4_ethdev.c
drivers/net/mlx4/mlx4_rxq.c
drivers/net/mlx4/mlx4_rxtx.c
drivers/net/mlx4/mlx4_rxtx.h

index c2ea4db..2f69e7d 100644 (file)
@@ -1029,6 +1029,16 @@ const uint32_t *
 mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 {
        static const uint32_t ptypes[] = {
 mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 {
        static const uint32_t ptypes[] = {
+               /* refers to rxq_cq_to_pkt_type() */
+               RTE_PTYPE_L2_ETHER,
+               RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+               RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+               RTE_PTYPE_L4_FRAG,
+               RTE_PTYPE_L4_TCP,
+               RTE_PTYPE_L4_UDP,
+               RTE_PTYPE_UNKNOWN
+       };
+       static const uint32_t ptypes_l2tun[] = {
                /* refers to rxq_cq_to_pkt_type() */
                RTE_PTYPE_L2_ETHER,
                RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
                /* refers to rxq_cq_to_pkt_type() */
                RTE_PTYPE_L2_ETHER,
                RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
@@ -1040,8 +1050,13 @@ mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev)
                RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
                RTE_PTYPE_UNKNOWN
        };
                RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
                RTE_PTYPE_UNKNOWN
        };
+       struct priv *priv = dev->data->dev_private;
 
 
-       if (dev->rx_pkt_burst == mlx4_rx_burst)
-               return ptypes;
+       if (dev->rx_pkt_burst == mlx4_rx_burst) {
+               if (priv->hw_csum_l2tun)
+                       return ptypes_l2tun;
+               else
+                       return ptypes;
+       }
        return NULL;
 }
        return NULL;
 }
index 8b97a89..53313c5 100644 (file)
@@ -750,6 +750,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                         dev->data->dev_conf.rxmode.hw_ip_checksum),
                .csum_l2tun = (priv->hw_csum_l2tun &&
                               dev->data->dev_conf.rxmode.hw_ip_checksum),
                         dev->data->dev_conf.rxmode.hw_ip_checksum),
                .csum_l2tun = (priv->hw_csum_l2tun &&
                               dev->data->dev_conf.rxmode.hw_ip_checksum),
+               .l2tun_offload = priv->hw_csum_l2tun,
                .stats = {
                        .idx = idx,
                },
                .stats = {
                        .idx = idx,
                },
index 3985e06..06f57cc 100644 (file)
@@ -751,7 +751,8 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
  *   Packet type for struct rte_mbuf.
  */
 static inline uint32_t
  *   Packet type for struct rte_mbuf.
  */
 static inline uint32_t
-rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe)
+rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe,
+                  uint32_t l2tun_offload)
 {
        uint8_t idx = 0;
        uint32_t pinfo = rte_be_to_cpu_32(cqe->vlan_my_qpn);
 {
        uint8_t idx = 0;
        uint32_t pinfo = rte_be_to_cpu_32(cqe->vlan_my_qpn);
@@ -762,7 +763,7 @@ rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe)
         *  bit[7] - MLX4_CQE_L2_TUNNEL
         *  bit[6] - MLX4_CQE_L2_TUNNEL_IPV4
         */
         *  bit[7] - MLX4_CQE_L2_TUNNEL
         *  bit[6] - MLX4_CQE_L2_TUNNEL_IPV4
         */
-       if (!(pinfo & MLX4_CQE_L2_VLAN_MASK) && (pinfo & MLX4_CQE_L2_TUNNEL))
+       if (l2tun_offload && (pinfo & MLX4_CQE_L2_TUNNEL))
                idx |= ((pinfo & MLX4_CQE_L2_TUNNEL) >> 20) |
                       ((pinfo & MLX4_CQE_L2_TUNNEL_IPV4) >> 19);
        /*
                idx |= ((pinfo & MLX4_CQE_L2_TUNNEL) >> 20) |
                       ((pinfo & MLX4_CQE_L2_TUNNEL_IPV4) >> 19);
        /*
@@ -960,7 +961,8 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        }
                        pkt = seg;
                        /* Update packet information. */
                        }
                        pkt = seg;
                        /* Update packet information. */
-                       pkt->packet_type = rxq_cq_to_pkt_type(cqe);
+                       pkt->packet_type =
+                               rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload);
                        pkt->ol_flags = 0;
                        pkt->pkt_len = len;
                        if (rxq->csum | rxq->csum_l2tun) {
                        pkt->ol_flags = 0;
                        pkt->pkt_len = len;
                        if (rxq->csum | rxq->csum_l2tun) {
index 4acad80..463df2b 100644 (file)
@@ -80,6 +80,7 @@ struct rxq {
        volatile uint32_t *rq_db; /**< RQ doorbell record. */
        uint32_t csum:1; /**< Enable checksum offloading. */
        uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */
        volatile uint32_t *rq_db; /**< RQ doorbell record. */
        uint32_t csum:1; /**< Enable checksum offloading. */
        uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */
+       uint32_t l2tun_offload:1; /**< L2 tunnel offload is enabled. */
        struct mlx4_cq mcq;  /**< Info for directly manipulating the CQ. */
        struct mlx4_rxq_stats stats; /**< Rx queue counters. */
        unsigned int socket; /**< CPU socket ID for allocations. */
        struct mlx4_cq mcq;  /**< Info for directly manipulating the CQ. */
        struct mlx4_rxq_stats stats; /**< Rx queue counters. */
        unsigned int socket; /**< CPU socket ID for allocations. */