net/qede/base: upgrade the FW to 8.20.0.0
[dpdk.git] / drivers / net / qede / base / ecore_sp_commands.c
index 58df3f5..d6e4b9e 100644 (file)
@@ -21,6 +21,8 @@
 #include "ecore_int.h"
 #include "ecore_hw.h"
 #include "ecore_dcbx.h"
+#include "ecore_sriov.h"
+#include "ecore_vf.h"
 
 enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
                                           struct ecore_spq_entry **pp_ent,
@@ -30,7 +32,10 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
 {
        u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
        struct ecore_spq_entry *p_ent = OSAL_NULL;
-       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       enum _ecore_status_t rc;
+
+       if (!pp_ent)
+               return ECORE_INVAL;
 
        /* Get an SPQ entry */
        rc = ecore_spq_get_entry(p_hwfn, pp_ent);
@@ -84,7 +89,7 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
        return ECORE_SUCCESS;
 }
 
-static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
+static enum tunnel_clss ecore_tunn_clss_to_fw_clss(u8 type)
 {
        switch (type) {
        case ECORE_TUNN_CLSS_MAC_VLAN:
@@ -95,230 +100,201 @@ static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
                return TUNNEL_CLSS_INNER_MAC_VLAN;
        case ECORE_TUNN_CLSS_INNER_MAC_VNI:
                return TUNNEL_CLSS_INNER_MAC_VNI;
+       case ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
+               return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
        default:
                return TUNNEL_CLSS_MAC_VLAN;
        }
 }
 
 static void
-ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn,
-                               struct ecore_tunn_update_params *p_src,
-                               struct pf_update_tunnel_config *p_tunn_cfg)
+ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info *p_tun,
+                             struct ecore_tunnel_info *p_src,
+                             bool b_pf_start)
 {
-       unsigned long cached_tunn_mode = p_hwfn->p_dev->tunn_mode;
-       unsigned long update_mask = p_src->tunn_mode_update_mask;
-       unsigned long tunn_mode = p_src->tunn_mode;
-       unsigned long new_tunn_mode = 0;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
-       }
+       if (p_src->vxlan.b_update_mode || b_pf_start)
+               p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
-       }
+       if (p_src->l2_gre.b_update_mode || b_pf_start)
+               p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
-       }
+       if (p_src->ip_gre.b_update_mode || b_pf_start)
+               p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
 
-       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
-               if (p_src->update_geneve_udp_port)
-                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
-               p_src->update_geneve_udp_port = 0;
-               p_src->tunn_mode = new_tunn_mode;
-               return;
-       }
+       if (p_src->l2_geneve.b_update_mode || b_pf_start)
+               p_tun->l2_geneve.b_mode_enabled =
+                               p_src->l2_geneve.b_mode_enabled;
 
-       if (p_src->update_geneve_udp_port) {
-               p_tunn_cfg->set_geneve_udp_port_flg = 1;
-               p_tunn_cfg->geneve_udp_port =
-                   OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
-       }
+       if (p_src->ip_geneve.b_update_mode || b_pf_start)
+               p_tun->ip_geneve.b_mode_enabled =
+                               p_src->ip_geneve.b_mode_enabled;
+}
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
-       }
+static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun,
+                                   struct ecore_tunnel_info *p_src)
+{
+       enum tunnel_clss type;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
-       }
+       p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
+       p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
+
+       /* @DPDK - typecast tunnul class */
+       type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
+       p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type;
+       type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
+       p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type;
+       type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
+       p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type;
+       type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
+       p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type;
+       type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
+       p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type;
+}
 
-       p_src->tunn_mode = new_tunn_mode;
+static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
+                                struct ecore_tunnel_info *p_src)
+{
+       p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
+       p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
+
+       if (p_src->geneve_port.b_update_port)
+               p_tun->geneve_port.port = p_src->geneve_port.port;
+
+       if (p_src->vxlan_port.b_update_port)
+               p_tun->vxlan_port.port = p_src->vxlan_port.port;
 }
 
 static void
-ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
-                               struct ecore_tunn_update_params *p_src,
-                               struct pf_update_tunnel_config *p_tunn_cfg)
+__ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
+                               struct ecore_tunn_update_type *tun_type)
 {
-       unsigned long tunn_mode = p_src->tunn_mode;
-       enum tunnel_clss type;
+       *p_tunn_cls = tun_type->tun_cls;
+}
 
-       ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
-       p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
-       p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
-
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
-       p_tunn_cfg->tunnel_clss_vxlan = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
-       p_tunn_cfg->tunnel_clss_l2gre = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
-       p_tunn_cfg->tunnel_clss_ipgre = type;
-
-       if (p_src->update_vxlan_udp_port) {
-               p_tunn_cfg->set_vxlan_udp_port_flg = 1;
-               p_tunn_cfg->vxlan_udp_port =
-                   OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
+static void
+ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
+                             struct ecore_tunn_update_type *tun_type,
+                             u8 *p_update_port, __le16 *p_port,
+                             struct ecore_tunn_update_udp_port *p_udp_port)
+{
+       __ecore_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
+       if (p_udp_port->b_update_port) {
+               *p_update_port = 1;
+               *p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
        }
+}
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_l2gre = 1;
+static void
+ecore_tunn_set_pf_update_params(struct ecore_hwfn              *p_hwfn,
+                               struct ecore_tunnel_info *p_src,
+                               struct pf_update_tunnel_config  *p_tunn_cfg)
+{
+       struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_ipgre = 1;
+       ecore_set_pf_update_tunn_mode(p_tun, p_src, false);
+       ecore_set_tunn_cls_info(p_tun, p_src);
+       ecore_set_tunn_ports(p_tun, p_src);
 
-       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_vxlan = 1;
+       ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+                                     &p_tun->vxlan,
+                                     &p_tunn_cfg->set_vxlan_udp_port_flg,
+                                     &p_tunn_cfg->vxlan_udp_port,
+                                     &p_tun->vxlan_port);
 
-       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
-               if (p_src->update_geneve_udp_port)
-                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
-               p_src->update_geneve_udp_port = 0;
-               return;
-       }
+       ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+                                     &p_tun->l2_geneve,
+                                     &p_tunn_cfg->set_geneve_udp_port_flg,
+                                     &p_tunn_cfg->geneve_udp_port,
+                                     &p_tun->geneve_port);
 
-       if (p_src->update_geneve_udp_port) {
-               p_tunn_cfg->set_geneve_udp_port_flg = 1;
-               p_tunn_cfg->geneve_udp_port =
-                   OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
-       }
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+                                       &p_tun->ip_geneve);
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_l2geneve = 1;
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+                                       &p_tun->l2_gre);
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_ipgeneve = 1;
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+                                       &p_tun->ip_gre);
 
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
-       p_tunn_cfg->tunnel_clss_l2geneve = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
-       p_tunn_cfg->tunnel_clss_ipgeneve = type;
+       p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
 }
 
 static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
                                   struct ecore_ptt *p_ptt,
-                                  unsigned long tunn_mode)
+                                  struct ecore_tunnel_info *p_tun)
 {
-       u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
-       u8 l2geneve_enable = 0, ipgeneve_enable = 0;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
-               l2gre_enable = 1;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
-               ipgre_enable = 1;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
-               vxlan_enable = 1;
+       ecore_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
+                            p_tun->ip_gre.b_mode_enabled);
+       ecore_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
 
-       ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
-       ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+       ecore_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
+                               p_tun->ip_geneve.b_mode_enabled);
+}
 
-       if (ECORE_IS_BB_A0(p_hwfn->p_dev))
+static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_tunnel_info *p_tunn)
+{
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, true,
+                         "A0 chip: tunnel hw config is not supported\n");
                return;
+       }
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
-               l2geneve_enable = 1;
+       if (p_tunn->vxlan_port.b_update_port)
+               ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+                                         p_tunn->vxlan_port.port);
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
-               ipgeneve_enable = 1;
+       if (p_tunn->geneve_port.b_update_port)
+               ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+                                          p_tunn->geneve_port.port);
 
-       ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
-                               ipgeneve_enable);
+       ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
 }
 
 static void
 ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
-                              struct ecore_tunn_start_params *p_src,
+                              struct ecore_tunnel_info         *p_src,
                               struct pf_start_tunnel_config *p_tunn_cfg)
 {
-       unsigned long tunn_mode;
-       enum tunnel_clss type;
+       struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
 
-       if (!p_src)
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, true,
+                         "A0 chip: tunnel pf start config is not supported\n");
                return;
-
-       tunn_mode = p_src->tunn_mode;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
-       p_tunn_cfg->tunnel_clss_vxlan = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
-       p_tunn_cfg->tunnel_clss_l2gre = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
-       p_tunn_cfg->tunnel_clss_ipgre = type;
-
-       if (p_src->update_vxlan_udp_port) {
-               p_tunn_cfg->set_vxlan_udp_port_flg = 1;
-               p_tunn_cfg->vxlan_udp_port =
-                   OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
        }
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_l2gre = 1;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_ipgre = 1;
+       if (!p_src)
+               return;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_vxlan = 1;
+       ecore_set_pf_update_tunn_mode(p_tun, p_src, true);
+       ecore_set_tunn_cls_info(p_tun, p_src);
+       ecore_set_tunn_ports(p_tun, p_src);
 
-       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
-               if (p_src->update_geneve_udp_port)
-                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
-               p_src->update_geneve_udp_port = 0;
-               return;
-       }
+       ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+                                     &p_tun->vxlan,
+                                     &p_tunn_cfg->set_vxlan_udp_port_flg,
+                                     &p_tunn_cfg->vxlan_udp_port,
+                                     &p_tun->vxlan_port);
 
-       if (p_src->update_geneve_udp_port) {
-               p_tunn_cfg->set_geneve_udp_port_flg = 1;
-               p_tunn_cfg->geneve_udp_port =
-                   OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
-       }
+       ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+                                     &p_tun->l2_geneve,
+                                     &p_tunn_cfg->set_geneve_udp_port_flg,
+                                     &p_tunn_cfg->geneve_udp_port,
+                                     &p_tun->geneve_port);
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_l2geneve = 1;
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+                                       &p_tun->ip_geneve);
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_ipgeneve = 1;
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+                                       &p_tun->l2_gre);
 
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
-       p_tunn_cfg->tunnel_clss_l2geneve = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
-       p_tunn_cfg->tunnel_clss_ipgeneve = type;
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+                                       &p_tun->ip_gre);
 }
 
 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
-                                      struct ecore_tunn_start_params *p_tunn,
+                                      struct ecore_tunnel_info *p_tunn,
                                       enum ecore_mf_mode mode,
                                       bool allow_npar_tx_switch)
 {
@@ -351,11 +327,10 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
        p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
        p_ramrod->event_ring_sb_index = sb_index;
        p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
-       p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
 
        /* For easier debugging */
        p_ramrod->dont_log_ramrods = 0;
-       p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf);
+       p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f);
 
        switch (mode) {
        case ECORE_MF_DEFAULT:
@@ -370,14 +345,15 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
                          "Unsupported MF mode, init as DEFAULT\n");
                p_ramrod->mf_mode = MF_NPAR;
        }
+       p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
 
        /* Place EQ address in RAMROD */
        DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
-                      p_hwfn->p_eq->chain.pbl.p_phys_table);
+                      p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
        page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
        p_ramrod->event_ring_num_pages = page_cnt;
        DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
-                      p_hwfn->p_consq->chain.pbl.p_phys_table);
+                      p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
 
        ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
                                       &p_ramrod->tunnel_config);
@@ -395,8 +371,17 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
                p_ramrod->personality = PERSONALITY_ETH;
        }
 
-       p_ramrod->base_vf_id = (u8)p_hwfn->hw_info.first_vf_in_pf;
-       p_ramrod->num_vfs = (u8)p_hwfn->p_dev->sriov_info.total_vfs;
+       if (p_hwfn->p_dev->p_iov_info) {
+               struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
+
+               p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
+               p_ramrod->num_vfs = (u8)p_iov->total_vfs;
+       }
+       /* @@@TBD - update also the "ROCE_VER_KEY" entries when the FW RoCE HSI
+        * version is available.
+        */
+       p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+       p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
                   "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
@@ -404,11 +389,8 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
 
        rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 
-       if (p_tunn) {
-               ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
-                                      p_tunn->tunn_mode);
-               p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
-       }
+       if (p_tunn)
+               ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
 
        return rc;
 }
@@ -437,10 +419,53 @@ enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
        return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 }
 
+enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_rl_update_params *params)
+{
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct rl_update_ramrod_data *rl_update;
+       struct ecore_sp_init_data init_data;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  COMMON_RAMROD_RL_UPDATE, PROTOCOLID_COMMON,
+                                  &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       rl_update = &p_ent->ramrod.rl_update;
+
+       rl_update->qcn_update_param_flg = params->qcn_update_param_flg;
+       rl_update->dcqcn_update_param_flg = params->dcqcn_update_param_flg;
+       rl_update->rl_init_flg = params->rl_init_flg;
+       rl_update->rl_start_flg = params->rl_start_flg;
+       rl_update->rl_stop_flg = params->rl_stop_flg;
+       rl_update->rl_id_first = params->rl_id_first;
+       rl_update->rl_id_last = params->rl_id_last;
+       rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
+       rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
+       rl_update->rl_max_rate = OSAL_CPU_TO_LE16(params->rl_max_rate);
+       rl_update->rl_r_ai = OSAL_CPU_TO_LE16(params->rl_r_ai);
+       rl_update->rl_r_hai = OSAL_CPU_TO_LE16(params->rl_r_hai);
+       rl_update->dcqcn_g = OSAL_CPU_TO_LE16(params->dcqcn_g);
+       rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
+       rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32(
+               params->dcqcn_timeuot_us);
+       rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
+
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
 /* Set pf update ramrod command params */
 enum _ecore_status_t
 ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
-                           struct ecore_tunn_update_params *p_tunn,
+                           struct ecore_tunnel_info *p_tunn,
                            enum spq_mode comp_mode,
                            struct ecore_spq_comp_cb *p_comp_data)
 {
@@ -448,6 +473,18 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
        struct ecore_sp_init_data init_data;
        enum _ecore_status_t rc = ECORE_NOTIMPL;
 
+       if (IS_VF(p_hwfn->p_dev))
+               return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
+
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, true,
+                         "A0 chip: tunnel pf update config is not supported\n");
+               return rc;
+       }
+
+       if (!p_tunn)
+               return ECORE_INVAL;
+
        /* Get SPQ entry */
        OSAL_MEMSET(&init_data, 0, sizeof(init_data));
        init_data.cid = ecore_spq_get_cid(p_hwfn);
@@ -465,19 +502,10 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
                                        &p_ent->ramrod.pf_update.tunnel_config);
 
        rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+       if (rc != ECORE_SUCCESS)
+               return rc;
 
-       if ((rc == ECORE_SUCCESS) && p_tunn) {
-               if (p_tunn->update_vxlan_udp_port)
-                       ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
-                                                 p_tunn->vxlan_udp_port);
-               if (p_tunn->update_geneve_udp_port)
-                       ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
-                                                  p_tunn->geneve_udp_port);
-
-               ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
-                                      p_tunn->tunn_mode);
-               p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
-       }
+       ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
 
        return rc;
 }
@@ -507,7 +535,7 @@ enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
 {
        struct ecore_spq_entry *p_ent = OSAL_NULL;
        struct ecore_sp_init_data init_data;
-       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       enum _ecore_status_t rc;
 
        /* Get SPQ entry */
        OSAL_MEMSET(&init_data, 0, sizeof(init_data));