net/qede/base: revise tunnel APIs/structs
authorRasesh Mody <rasesh.mody@cavium.com>
Wed, 29 Mar 2017 20:36:31 +0000 (13:36 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 4 Apr 2017 17:02:52 +0000 (19:02 +0200)
Revise tunnel APIs/structs.
 - Unite tunnel start and update params in single struct
   "ecore_tunnel_info"
 - Remove A0 chip tunnelling support.
 - Added per tunnel info - removed bitmasks.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
drivers/net/qede/base/ecore.h
drivers/net/qede/base/ecore_dev.c
drivers/net/qede/base/ecore_dev_api.h
drivers/net/qede/base/ecore_sp_api.h
drivers/net/qede/base/ecore_sp_commands.c
drivers/net/qede/base/ecore_sp_commands.h
drivers/net/qede/qede_ethdev.c
drivers/net/qede/qede_if.h
drivers/net/qede/qede_main.c

index 5c12c1e..f86f7ca 100644 (file)
@@ -204,33 +204,29 @@ enum ecore_tunn_clss {
        MAX_ECORE_TUNN_CLSS,
 };
 
-struct ecore_tunn_start_params {
-       unsigned long tunn_mode;
-       u16     vxlan_udp_port;
-       u16     geneve_udp_port;
-       u8      update_vxlan_udp_port;
-       u8      update_geneve_udp_port;
-       u8      tunn_clss_vxlan;
-       u8      tunn_clss_l2geneve;
-       u8      tunn_clss_ipgeneve;
-       u8      tunn_clss_l2gre;
-       u8      tunn_clss_ipgre;
+struct ecore_tunn_update_type {
+       bool b_update_mode;
+       bool b_mode_enabled;
+       enum ecore_tunn_clss tun_cls;
 };
 
-struct ecore_tunn_update_params {
-       unsigned long tunn_mode_update_mask;
-       unsigned long tunn_mode;
-       u16     vxlan_udp_port;
-       u16     geneve_udp_port;
-       u8      update_rx_pf_clss;
-       u8      update_tx_pf_clss;
-       u8      update_vxlan_udp_port;
-       u8      update_geneve_udp_port;
-       u8      tunn_clss_vxlan;
-       u8      tunn_clss_l2geneve;
-       u8      tunn_clss_ipgeneve;
-       u8      tunn_clss_l2gre;
-       u8      tunn_clss_ipgre;
+struct ecore_tunn_update_udp_port {
+       bool b_update_port;
+       u16 port;
+};
+
+struct ecore_tunnel_info {
+       struct ecore_tunn_update_type vxlan;
+       struct ecore_tunn_update_type l2_geneve;
+       struct ecore_tunn_update_type ip_geneve;
+       struct ecore_tunn_update_type l2_gre;
+       struct ecore_tunn_update_type ip_gre;
+
+       struct ecore_tunn_update_udp_port vxlan_port;
+       struct ecore_tunn_update_udp_port geneve_port;
+
+       bool b_update_rx_cls;
+       bool b_update_tx_cls;
 };
 
 /* The PCI personality is not quite synonymous to protocol ID:
@@ -470,17 +466,6 @@ struct ecore_fw_data {
        u32 init_ops_size;
 };
 
-struct ecore_tunnel_info {
-       u8              tunn_clss_vxlan;
-       u8              tunn_clss_l2geneve;
-       u8              tunn_clss_ipgeneve;
-       u8              tunn_clss_l2gre;
-       u8              tunn_clss_ipgre;
-       unsigned long   tunn_mode;
-       u16             port_vxlan_udp_port;
-       u16             port_geneve_udp_port;
-};
-
 struct ecore_hwfn {
        struct ecore_dev                *p_dev;
        u8                              my_id;          /* ID inside the PF */
index 1c08d4a..0d3971c 100644 (file)
@@ -1696,7 +1696,7 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
 static enum _ecore_status_t
 ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
                 struct ecore_ptt *p_ptt,
-                struct ecore_tunn_start_params *p_tunn,
+                struct ecore_tunnel_info *p_tunn,
                 int hw_mode,
                 bool b_hw_start,
                 enum ecore_int_mode int_mode, bool allow_npar_tx_switch)
index 74a15ef..356c5e4 100644 (file)
@@ -59,7 +59,7 @@ void ecore_resc_setup(struct ecore_dev *p_dev);
 
 struct ecore_hw_init_params {
        /* tunnelling parameters */
-       struct ecore_tunn_start_params *p_tunn;
+       struct ecore_tunnel_info *p_tunn;
        bool b_hw_start;
        /* interrupt mode [msix, inta, etc.] to use */
        enum ecore_int_mode int_mode;
index a4cb507..c8e564f 100644 (file)
@@ -41,5 +41,24 @@ struct ecore_spq_comp_cb {
  */
 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
                                              struct eth_slow_path_rx_cqe *cqe);
+/**
+ * @brief ecore_sp_pf_update_tunn_cfg - PF Function Tunnel configuration
+ *                                     update  Ramrod
+ *
+ * This ramrod is sent to update a tunneling configuration
+ * for a physical function (PF).
+ *
+ * @param p_hwfn
+ * @param p_tunn - pf update tunneling parameters
+ * @param comp_mode - completion mode
+ * @param p_comp_data - callback function
+ *
+ * @return enum _ecore_status_t
+ */
 
+enum _ecore_status_t
+ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+                           struct ecore_tunnel_info *p_tunn,
+                           enum spq_mode comp_mode,
+                           struct ecore_spq_comp_cb *p_comp_data);
 #endif
index f5860a0..fc47fc4 100644 (file)
@@ -88,7 +88,7 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
        return ECORE_SUCCESS;
 }
 
-static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
+static enum tunnel_clss ecore_tunn_clss_to_fw_clss(u8 type)
 {
        switch (type) {
        case ECORE_TUNN_CLSS_MAC_VLAN:
@@ -107,242 +107,208 @@ static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
 }
 
 static void
-ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn,
-                               struct ecore_tunn_update_params *p_src,
-                               struct pf_update_tunnel_config *p_tunn_cfg)
+ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info *p_tun,
+                             struct ecore_tunnel_info *p_src,
+                             bool b_pf_start)
 {
-       unsigned long update_mask = p_src->tunn_mode_update_mask;
-       struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
-       unsigned long cached_tunn_mode = p_tun->tunn_mode;
-       unsigned long tunn_mode = p_src->tunn_mode;
-       unsigned long new_tunn_mode = 0;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
-       }
-
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
-       }
-
-       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
-       }
-
-       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
-               if (p_src->update_geneve_udp_port)
-                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
-               p_src->update_geneve_udp_port = 0;
-               p_src->tunn_mode = new_tunn_mode;
-               return;
-       }
+       if (p_src->vxlan.b_update_mode || b_pf_start)
+               p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
 
-       if (p_src->update_geneve_udp_port) {
-               p_tun->port_geneve_udp_port = p_src->geneve_udp_port;
-               p_tunn_cfg->set_geneve_udp_port_flg = 1;
-               p_tunn_cfg->geneve_udp_port =
-                               OSAL_CPU_TO_LE16(p_tun->port_geneve_udp_port);
-       }
+       if (p_src->l2_gre.b_update_mode || b_pf_start)
+               p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
-       }
+       if (p_src->ip_gre.b_update_mode || b_pf_start)
+               p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
-       }
+       if (p_src->l2_geneve.b_update_mode || b_pf_start)
+               p_tun->l2_geneve.b_mode_enabled =
+                               p_src->l2_geneve.b_mode_enabled;
 
-       p_src->tunn_mode = new_tunn_mode;
+       if (p_src->ip_geneve.b_update_mode || b_pf_start)
+               p_tun->ip_geneve.b_mode_enabled =
+                               p_src->ip_geneve.b_mode_enabled;
 }
 
-static void
-ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
-                               struct ecore_tunn_update_params *p_src,
-                               struct pf_update_tunnel_config *p_tunn_cfg)
+static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun,
+                                   struct ecore_tunnel_info *p_src)
 {
-       struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
        enum tunnel_clss type;
 
-       ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
-       p_tun->tunn_mode = p_src->tunn_mode;
-
-       p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
-       p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
-
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
-       p_tun->tunn_clss_vxlan = type;
-       p_tunn_cfg->tunnel_clss_vxlan = p_tun->tunn_clss_vxlan;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
-       p_tun->tunn_clss_l2gre = type;
-       p_tunn_cfg->tunnel_clss_l2gre = p_tun->tunn_clss_l2gre;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
-       p_tun->tunn_clss_ipgre = type;
-       p_tunn_cfg->tunnel_clss_ipgre = p_tun->tunn_clss_ipgre;
-
-       if (p_src->update_vxlan_udp_port) {
-               p_tun->port_vxlan_udp_port = p_src->vxlan_udp_port;
-               p_tunn_cfg->set_vxlan_udp_port_flg = 1;
-               p_tunn_cfg->vxlan_udp_port =
-                               OSAL_CPU_TO_LE16(p_tun->port_vxlan_udp_port);
-       }
+       p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
+       p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
+
+       /* @DPDK - typecast tunnul class */
+       type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
+       p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type;
+       type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
+       p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type;
+       type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
+       p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type;
+       type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
+       p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type;
+       type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
+       p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type;
+}
+
+static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
+                                struct ecore_tunnel_info *p_src)
+{
+       p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
+       p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &p_tun->tunn_mode))
-               p_tunn_cfg->tx_enable_l2gre = 1;
+       if (p_src->geneve_port.b_update_port)
+               p_tun->geneve_port.port = p_src->geneve_port.port;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &p_tun->tunn_mode))
-               p_tunn_cfg->tx_enable_ipgre = 1;
+       if (p_src->vxlan_port.b_update_port)
+               p_tun->vxlan_port.port = p_src->vxlan_port.port;
+}
 
-       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &p_tun->tunn_mode))
-               p_tunn_cfg->tx_enable_vxlan = 1;
+static void
+__ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+                               struct ecore_tunn_update_type *tun_type)
+{
+       *p_tunn_cls = tun_type->tun_cls;
 
-       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
-               if (p_src->update_geneve_udp_port)
-                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
-               p_src->update_geneve_udp_port = 0;
-               return;
-       }
+       if (tun_type->b_mode_enabled)
+               *p_enable_tx_clas = 1;
+}
 
-       if (p_src->update_geneve_udp_port) {
-               p_tun->port_geneve_udp_port = p_src->geneve_udp_port;
-               p_tunn_cfg->set_geneve_udp_port_flg = 1;
-               p_tunn_cfg->geneve_udp_port =
-                               OSAL_CPU_TO_LE16(p_tun->port_geneve_udp_port);
+static void
+ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+                             struct ecore_tunn_update_type *tun_type,
+                             u8 *p_update_port, __le16 *p_port,
+                             struct ecore_tunn_update_udp_port *p_udp_port)
+{
+       __ecore_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas,
+                                       tun_type);
+       if (p_udp_port->b_update_port) {
+               *p_update_port = 1;
+               *p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
        }
+}
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &p_tun->tunn_mode))
-               p_tunn_cfg->tx_enable_l2geneve = 1;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &p_tun->tunn_mode))
-               p_tunn_cfg->tx_enable_ipgeneve = 1;
+static void
+ecore_tunn_set_pf_update_params(struct ecore_hwfn              *p_hwfn,
+                               struct ecore_tunnel_info *p_src,
+                               struct pf_update_tunnel_config  *p_tunn_cfg)
+{
+       struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
 
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
-       p_tun->tunn_clss_l2geneve = type;
-       p_tunn_cfg->tunnel_clss_l2geneve = p_tun->tunn_clss_l2geneve;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
-       p_tun->tunn_clss_ipgeneve = type;
-       p_tunn_cfg->tunnel_clss_ipgeneve = p_tun->tunn_clss_ipgeneve;
+       ecore_set_pf_update_tunn_mode(p_tun, p_src, false);
+       ecore_set_tunn_cls_info(p_tun, p_src);
+       ecore_set_tunn_ports(p_tun, p_src);
+
+       ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+                                     &p_tunn_cfg->tx_enable_vxlan,
+                                     &p_tun->vxlan,
+                                     &p_tunn_cfg->set_vxlan_udp_port_flg,
+                                     &p_tunn_cfg->vxlan_udp_port,
+                                     &p_tun->vxlan_port);
+
+       ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+                                     &p_tunn_cfg->tx_enable_l2geneve,
+                                     &p_tun->l2_geneve,
+                                     &p_tunn_cfg->set_geneve_udp_port_flg,
+                                     &p_tunn_cfg->geneve_udp_port,
+                                     &p_tun->geneve_port);
+
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+                                       &p_tunn_cfg->tx_enable_ipgeneve,
+                                       &p_tun->ip_geneve);
+
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+                                       &p_tunn_cfg->tx_enable_l2gre,
+                                       &p_tun->l2_gre);
+
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+                                       &p_tunn_cfg->tx_enable_ipgre,
+                                       &p_tun->ip_gre);
+
+       p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
+       p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls;
 }
 
 static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
                                   struct ecore_ptt *p_ptt,
-                                  unsigned long tunn_mode)
+                                  struct ecore_tunnel_info *p_tun)
 {
-       u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
-       u8 l2geneve_enable = 0, ipgeneve_enable = 0;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
-               l2gre_enable = 1;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
-               ipgre_enable = 1;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
-               vxlan_enable = 1;
+       ecore_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
+                            p_tun->ip_gre.b_mode_enabled);
+       ecore_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
 
-       ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
-       ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+       ecore_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
+                               p_tun->ip_geneve.b_mode_enabled);
+}
 
-       if (ECORE_IS_BB_A0(p_hwfn->p_dev))
+static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_tunnel_info *p_tunn)
+{
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, true,
+                         "A0 chip: tunnel hw config is not supported\n");
                return;
+       }
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
-               l2geneve_enable = 1;
+       if (p_tunn->vxlan_port.b_update_port)
+               ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+                                         p_tunn->vxlan_port.port);
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
-               ipgeneve_enable = 1;
+       if (p_tunn->geneve_port.b_update_port)
+               ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+                                          p_tunn->geneve_port.port);
 
-       ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
-                               ipgeneve_enable);
+       ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
 }
 
 static void
 ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
-                              struct ecore_tunn_start_params *p_src,
+                              struct ecore_tunnel_info         *p_src,
                               struct pf_start_tunnel_config *p_tunn_cfg)
 {
        struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
-       enum tunnel_clss type;
-
-       if (!p_src)
-               return;
-
-       p_tun->tunn_mode = p_src->tunn_mode;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
-       p_tun->tunn_clss_vxlan = type;
-       p_tunn_cfg->tunnel_clss_vxlan = p_tun->tunn_clss_vxlan;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
-       p_tun->tunn_clss_l2gre = type;
-       p_tunn_cfg->tunnel_clss_l2gre = p_tun->tunn_clss_l2gre;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
-       p_tun->tunn_clss_ipgre = type;
-       p_tunn_cfg->tunnel_clss_ipgre = p_tun->tunn_clss_ipgre;
-
-       if (p_src->update_vxlan_udp_port) {
-               p_tun->port_vxlan_udp_port = p_src->vxlan_udp_port;
-               p_tunn_cfg->set_vxlan_udp_port_flg = 1;
-               p_tunn_cfg->vxlan_udp_port =
-                               OSAL_CPU_TO_LE16(p_tun->port_vxlan_udp_port);
-       }
-
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &p_tun->tunn_mode))
-               p_tunn_cfg->tx_enable_l2gre = 1;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &p_tun->tunn_mode))
-               p_tunn_cfg->tx_enable_ipgre = 1;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &p_tun->tunn_mode))
-               p_tunn_cfg->tx_enable_vxlan = 1;
 
        if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
-               if (p_src->update_geneve_udp_port)
-                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
-               p_src->update_geneve_udp_port = 0;
+               DP_NOTICE(p_hwfn, true,
+                         "A0 chip: tunnel pf start config is not supported\n");
                return;
        }
 
-       if (p_src->update_geneve_udp_port) {
-               p_tun->port_geneve_udp_port = p_src->geneve_udp_port;
-               p_tunn_cfg->set_geneve_udp_port_flg = 1;
-               p_tunn_cfg->geneve_udp_port =
-                               OSAL_CPU_TO_LE16(p_tun->port_geneve_udp_port);
-       }
-
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &p_tun->tunn_mode))
-               p_tunn_cfg->tx_enable_l2geneve = 1;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &p_tun->tunn_mode))
-               p_tunn_cfg->tx_enable_ipgeneve = 1;
+       if (!p_src)
+               return;
 
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
-       p_tun->tunn_clss_l2geneve = type;
-       p_tunn_cfg->tunnel_clss_l2geneve = p_tun->tunn_clss_l2geneve;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
-       p_tun->tunn_clss_ipgeneve = type;
-       p_tunn_cfg->tunnel_clss_ipgeneve = p_tun->tunn_clss_ipgeneve;
+       ecore_set_pf_update_tunn_mode(p_tun, p_src, true);
+       ecore_set_tunn_cls_info(p_tun, p_src);
+       ecore_set_tunn_ports(p_tun, p_src);
+
+       ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+                                     &p_tunn_cfg->tx_enable_vxlan,
+                                     &p_tun->vxlan,
+                                     &p_tunn_cfg->set_vxlan_udp_port_flg,
+                                     &p_tunn_cfg->vxlan_udp_port,
+                                     &p_tun->vxlan_port);
+
+       ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+                                     &p_tunn_cfg->tx_enable_l2geneve,
+                                     &p_tun->l2_geneve,
+                                     &p_tunn_cfg->set_geneve_udp_port_flg,
+                                     &p_tunn_cfg->geneve_udp_port,
+                                     &p_tun->geneve_port);
+
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+                                       &p_tunn_cfg->tx_enable_ipgeneve,
+                                       &p_tun->ip_geneve);
+
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+                                       &p_tunn_cfg->tx_enable_l2gre,
+                                       &p_tun->l2_gre);
+
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+                                       &p_tunn_cfg->tx_enable_ipgre,
+                                       &p_tun->ip_gre);
 }
 
 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
-                                      struct ecore_tunn_start_params *p_tunn,
+                                      struct ecore_tunnel_info *p_tunn,
                                       enum ecore_mf_mode mode,
                                       bool allow_npar_tx_switch)
 {
@@ -437,18 +403,8 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
 
        rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 
-       if (p_tunn) {
-               if (p_tunn->update_vxlan_udp_port)
-                       ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
-                                                 p_tunn->vxlan_udp_port);
-
-               if (p_tunn->update_geneve_udp_port)
-                       ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
-                                                  p_tunn->geneve_udp_port);
-
-               ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
-                                      p_tunn->tunn_mode);
-       }
+       if (p_tunn)
+               ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
 
        return rc;
 }
@@ -523,7 +479,7 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
 /* Set pf update ramrod command params */
 enum _ecore_status_t
 ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
-                           struct ecore_tunn_update_params *p_tunn,
+                           struct ecore_tunnel_info *p_tunn,
                            enum spq_mode comp_mode,
                            struct ecore_spq_comp_cb *p_comp_data)
 {
@@ -531,6 +487,15 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
        struct ecore_sp_init_data init_data;
        enum _ecore_status_t rc = ECORE_NOTIMPL;
 
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, true,
+                         "A0 chip: tunnel pf update config is not supported\n");
+               return rc;
+       }
+
+       if (!p_tunn)
+               return ECORE_INVAL;
+
        /* Get SPQ entry */
        OSAL_MEMSET(&init_data, 0, sizeof(init_data));
        init_data.cid = ecore_spq_get_cid(p_hwfn);
@@ -551,15 +516,7 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
        if (rc != ECORE_SUCCESS)
                return rc;
 
-       if (p_tunn->update_vxlan_udp_port)
-               ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
-                                         p_tunn->vxlan_udp_port);
-
-       if (p_tunn->update_geneve_udp_port)
-               ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
-                                          p_tunn->geneve_udp_port);
-
-       ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
+       ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
 
        return rc;
 }
index 66c9a69..33e31e4 100644 (file)
@@ -68,31 +68,10 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
  */
 
 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
-                                      struct ecore_tunn_start_params *p_tunn,
+                                      struct ecore_tunnel_info *p_tunn,
                                       enum ecore_mf_mode mode,
                                       bool allow_npar_tx_switch);
 
-/**
- * @brief ecore_sp_pf_update_tunn_cfg - PF Function Tunnel configuration
- *                                     update  Ramrod
- *
- * This ramrod is sent to update a tunneling configuration
- * for a physical function (PF).
- *
- * @param p_hwfn
- * @param p_tunn - pf update tunneling parameters
- * @param comp_mode - completion mode
- * @param p_comp_data - callback function
- *
- * @return enum _ecore_status_t
- */
-
-enum _ecore_status_t
-ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
-                           struct ecore_tunn_update_params *p_tunn,
-                           enum spq_mode comp_mode,
-                           struct ecore_spq_comp_cb *p_comp_data);
-
 /**
  * @brief ecore_sp_pf_update - PF Function Update Ramrod
  *
index d52e1be..0c05d2d 100644 (file)
@@ -335,10 +335,10 @@ static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
        /* ucast->assert_on_error = true; - For debug */
 }
 
-static void qede_set_cmn_tunn_param(struct ecore_tunn_update_params *params,
-                                    uint8_t clss, uint64_t mode, uint64_t mask)
+static void qede_set_cmn_tunn_param(struct qed_tunn_update_params *params,
+                                   uint8_t clss, uint64_t mode, uint64_t mask)
 {
-       memset(params, 0, sizeof(struct ecore_tunn_update_params));
+       memset(params, 0, sizeof(struct qed_tunn_update_params));
        params->tunn_mode = mode;
        params->tunn_mode_update_mask = mask;
        params->update_tx_pf_clss = 1;
@@ -1707,20 +1707,22 @@ qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct ecore_tunn_update_params params;
+       struct qed_tunn_update_params params;
+       struct ecore_tunnel_info tunn; /* @DPDK */
        struct ecore_hwfn *p_hwfn;
        int rc, i;
 
        PMD_INIT_FUNC_TRACE(edev);
 
        memset(&params, 0, sizeof(params));
+       memset(&tunn, 0, sizeof(tunn));
        if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
                params.update_vxlan_udp_port = 1;
                params.vxlan_udp_port = (add) ? tunnel_udp->udp_port :
                                        QEDE_VXLAN_DEF_PORT;
                for_each_hwfn(edev, i) {
                        p_hwfn = &edev->hwfns[i];
-                       rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &params,
+                       rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
                                                ECORE_SPQ_MODE_CB, NULL);
                        if (rc != ECORE_SUCCESS) {
                                DP_ERR(edev, "Unable to config UDP port %u\n",
@@ -1817,7 +1819,8 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct ecore_tunn_update_params params;
+       struct qed_tunn_update_params params;
+       struct ecore_tunnel_info tunn;
        struct ecore_hwfn *p_hwfn;
        enum ecore_filter_ucast_type type;
        enum ecore_tunn_clss clss;
@@ -1826,6 +1829,7 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
        uint16_t filter_type;
        int rc, i;
 
+       memset(&tunn, 0, sizeof(tunn));
        filter_type = conf->filter_type | qdev->vxlan_filter_type;
        /* First determine if the given filter classification is supported */
        qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
@@ -1872,7 +1876,7 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
                for_each_hwfn(edev, i) {
                        p_hwfn = &edev->hwfns[i];
                        rc = ecore_sp_pf_update_tunn_cfg(p_hwfn,
-                               &params, ECORE_SPQ_MODE_CB, NULL);
+                               &tunn, ECORE_SPQ_MODE_CB, NULL);
                        if (rc != ECORE_SUCCESS) {
                                DP_ERR(edev, "Failed to update tunn_clss %u\n",
                                        params.tunn_clss_vxlan);
@@ -1906,8 +1910,8 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
                                                (1 << ECORE_MODE_VXLAN_TUNN));
                        for_each_hwfn(edev, i) {
                                p_hwfn = &edev->hwfns[i];
-                               rc = ecore_sp_pf_update_tunn_cfg(p_hwfn,
-                                       &params, ECORE_SPQ_MODE_CB, NULL);
+                               rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+                                       ECORE_SPQ_MODE_CB, NULL);
                                if (rc != ECORE_SUCCESS) {
                                        DP_ERR(edev,
                                                "Failed to update tunn_clss %u\n",
index baa8476..09b6912 100644 (file)
@@ -121,6 +121,22 @@ struct qed_eth_tlvs {
        u8 num_rxqs_full;
 };
 
+struct qed_tunn_update_params {
+       unsigned long   tunn_mode_update_mask;
+       unsigned long   tunn_mode;
+       u16             vxlan_udp_port;
+       u16             geneve_udp_port;
+       u8              update_rx_pf_clss;
+       u8              update_tx_pf_clss;
+       u8              update_vxlan_udp_port;
+       u8              update_geneve_udp_port;
+       u8              tunn_clss_vxlan;
+       u8              tunn_clss_l2geneve;
+       u8              tunn_clss_ipgeneve;
+       u8              tunn_clss_l2gre;
+       u8              tunn_clss_ipgre;
+};
+
 struct qed_common_cb_ops {
        void (*link_update)(void *dev, struct qed_link_output *link);
        void (*get_tlv_data)(void *dev, struct qed_eth_tlvs *data);
index e7195b4..5c79055 100644 (file)
@@ -329,20 +329,18 @@ qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
 
        memset(dev_info, 0, sizeof(struct qed_dev_info));
 
-       if (tun->tunn_mode & OSAL_BIT(ECORE_MODE_VXLAN_TUNN) &&
-           tun->tunn_clss_vxlan == ECORE_TUNN_CLSS_MAC_VLAN)
+       if (tun->vxlan.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
+           tun->vxlan.b_mode_enabled)
                dev_info->vxlan_enable = true;
 
-       if (tun->tunn_mode & OSAL_BIT(ECORE_MODE_L2GRE_TUNN) &&
-           tun->tunn_mode & OSAL_BIT(ECORE_MODE_IPGRE_TUNN) &&
-           tun->tunn_clss_l2gre == ECORE_TUNN_CLSS_MAC_VLAN &&
-           tun->tunn_clss_ipgre == ECORE_TUNN_CLSS_MAC_VLAN)
+       if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
+           tun->l2_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
+           tun->ip_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN)
                dev_info->gre_enable = true;
 
-       if (tun->tunn_mode & OSAL_BIT(ECORE_MODE_L2GENEVE_TUNN) &&
-           tun->tunn_mode & OSAL_BIT(ECORE_MODE_IPGENEVE_TUNN) &&
-           tun->tunn_clss_l2geneve == ECORE_TUNN_CLSS_MAC_VLAN &&
-           tun->tunn_clss_ipgeneve == ECORE_TUNN_CLSS_MAC_VLAN)
+       if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
+           tun->l2_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
+           tun->ip_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN)
                dev_info->geneve_enable = true;
 
        dev_info->num_hwfns = edev->num_hwfns;