net/qede: enable IPGRE offload support
authorHarish Patil <harish.patil@cavium.com>
Wed, 28 Mar 2018 00:15:52 +0000 (17:15 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 30 Mar 2018 12:08:44 +0000 (14:08 +0200)
Signed-off-by: Harish Patil <harish.patil@cavium.com>
drivers/net/qede/qede_ethdev.c
drivers/net/qede/qede_ethdev.h
drivers/net/qede/qede_rxtx.c
drivers/net/qede/qede_rxtx.h

index 6a51e3d..f25fb91 100644 (file)
@@ -781,6 +781,36 @@ qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
        return rc;
 }
 
+static int
+qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+                 bool enable)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       enum _ecore_status_t rc = ECORE_INVAL;
+       struct ecore_tunnel_info tunn;
+
+       memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+       tunn.ip_gre.b_update_mode = true;
+       tunn.ip_gre.b_mode_enabled = enable;
+       tunn.ip_gre.tun_cls = clss;
+       tunn.ip_gre.tun_cls = clss;
+       tunn.b_update_rx_cls = true;
+       tunn.b_update_tx_cls = true;
+
+       rc = qede_tunnel_update(qdev, &tunn);
+       if (rc == ECORE_SUCCESS) {
+               qdev->ipgre.enable = enable;
+               DP_INFO(edev, "IPGRE is %s\n",
+                       enable ? "enabled" : "disabled");
+       } else {
+               DP_ERR(edev, "Failed to update tunn_clss %u\n",
+                      clss);
+       }
+
+       return rc;
+}
+
 static int
 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
                 enum rte_eth_tunnel_type tunn_type, bool enable)
@@ -794,6 +824,9 @@ qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
        case RTE_TUNNEL_TYPE_GENEVE:
                rc = qede_geneve_enable(eth_dev, clss, enable);
                break;
+       case RTE_TUNNEL_TYPE_IP_IN_GRE:
+               rc = qede_ipgre_enable(eth_dev, clss, enable);
+               break;
        default:
                rc = -EINVAL;
                break;
@@ -2078,6 +2111,7 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
                RTE_PTYPE_TUNNEL_VXLAN,
                RTE_PTYPE_L4_FRAG,
                RTE_PTYPE_TUNNEL_GENEVE,
+               RTE_PTYPE_TUNNEL_GRE,
                /* Inner */
                RTE_PTYPE_INNER_L2_ETHER,
                RTE_PTYPE_INNER_L2_ETHER_VLAN,
@@ -2501,7 +2535,6 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
                                        ECORE_TUNN_CLSS_MAC_VLAN, false);
 
                break;
-
        case RTE_TUNNEL_TYPE_GENEVE:
                if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
                        DP_ERR(edev, "UDP port %u doesn't exist\n",
@@ -2591,7 +2624,6 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
 
                qdev->vxlan.udp_port = udp_port;
                break;
-
        case RTE_TUNNEL_TYPE_GENEVE:
                if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
                        DP_INFO(edev,
@@ -2629,7 +2661,6 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
 
                qdev->geneve.udp_port = udp_port;
                break;
-
        default:
                return ECORE_INVAL;
        }
@@ -2795,7 +2826,8 @@ qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
                        qdev->geneve.filter_type = conf->filter_type;
                }
 
-               if (!qdev->vxlan.enable || !qdev->geneve.enable)
+               if (!qdev->vxlan.enable || !qdev->geneve.enable ||
+                   !qdev->ipgre.enable)
                        return qede_tunn_enable(eth_dev, clss,
                                                conf->tunnel_type,
                                                true);
@@ -2831,15 +2863,14 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
                switch (filter_conf->tunnel_type) {
                case RTE_TUNNEL_TYPE_VXLAN:
                case RTE_TUNNEL_TYPE_GENEVE:
+               case RTE_TUNNEL_TYPE_IP_IN_GRE:
                        DP_INFO(edev,
                                "Packet steering to the specified Rx queue"
                                " is not supported with UDP tunneling");
                        return(qede_tunn_filter_config(eth_dev, filter_op,
                                                      filter_conf));
-               /* Place holders for future tunneling support */
                case RTE_TUNNEL_TYPE_TEREDO:
                case RTE_TUNNEL_TYPE_NVGRE:
-               case RTE_TUNNEL_TYPE_IP_IN_GRE:
                case RTE_L2_TUNNEL_TYPE_E_TAG:
                        DP_ERR(edev, "Unsupported tunnel type %d\n",
                                filter_conf->tunnel_type);
@@ -3138,19 +3169,23 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
        /* VF tunnel offloads is enabled by default in PF driver */
        adapter->vxlan.num_filters = 0;
        adapter->geneve.num_filters = 0;
+       adapter->ipgre.num_filters = 0;
        if (is_vf) {
                adapter->vxlan.enable = true;
                adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
                                             ETH_TUNNEL_FILTER_IVLAN;
                adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
                adapter->geneve.enable = true;
-
                adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
                                              ETH_TUNNEL_FILTER_IVLAN;
                adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
+               adapter->ipgre.enable = true;
+               adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
+                                            ETH_TUNNEL_FILTER_IVLAN;
        } else {
                adapter->vxlan.enable = false;
                adapter->geneve.enable = false;
+               adapter->ipgre.enable = false;
        }
 
        DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
index 23f7e0e..baae22d 100644 (file)
@@ -170,7 +170,7 @@ struct qede_fdir_info {
 #define QEDE_VXLAN_DEF_PORT                    (4789)
 #define QEDE_GENEVE_DEF_PORT                   (6081)
 
-struct qede_udp_tunn {
+struct qede_tunn_params {
        bool enable;
        uint16_t num_filters;
        uint16_t filter_type;
@@ -205,8 +205,9 @@ struct qede_dev {
        SLIST_HEAD(uc_list_head, qede_ucast_entry) uc_list_head;
        uint16_t num_uc_addr;
        bool handle_hw_err;
-       struct qede_udp_tunn vxlan;
-       struct qede_udp_tunn geneve;
+       struct qede_tunn_params vxlan;
+       struct qede_tunn_params geneve;
+       struct qede_tunn_params ipgre;
        struct qede_fdir_info fdir_info;
        bool vlan_strip_flg;
        char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
index e9fe46c..20c10be 100644 (file)
@@ -1837,17 +1837,14 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                 * offloads. Don't rely on pkt_type marked by Rx, instead use
                 * tx_ol_flags to decide.
                 */
-               if (((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
-                                               PKT_TX_TUNNEL_VXLAN) ||
-                   ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
-                                               PKT_TX_TUNNEL_MPLSINUDP) ||
-                   ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
-                                               PKT_TX_TUNNEL_GENEVE)) {
+               tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK);
+
+               if (tunn_flg) {
                        /* Check against max which is Tunnel IPv6 + ext */
                        if (unlikely(txq->nb_tx_avail <
                                ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
                                        break;
-                       tunn_flg = true;
+
                        /* First indicate its a tunnel pkt */
                        bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
                                  ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
@@ -1986,7 +1983,8 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                         * csum offload is requested then we need to force
                         * recalculation of L4 tunnel header csum also.
                         */
-                       if (tunn_flg) {
+                       if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) !=
+                                                       PKT_TX_TUNNEL_GRE)) {
                                bd1_bd_flags_bf |=
                                        ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
                                        ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
index a2dc9e7..3c66df0 100644 (file)
                              PKT_TX_VLAN_PKT           | \
                              PKT_TX_TUNNEL_VXLAN       | \
                              PKT_TX_TUNNEL_GENEVE      | \
-                             PKT_TX_TUNNEL_MPLSINUDP)
+                             PKT_TX_TUNNEL_MPLSINUDP   | \
+                             PKT_TX_TUNNEL_GRE)
 
 #define QEDE_TX_OFFLOAD_NOTSUP_MASK \
        (PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)