net/qede/base: revise tunnel APIs/structs
[dpdk.git] / drivers / net / qede / base / ecore_sp_commands.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10
11 #include "ecore.h"
12 #include "ecore_status.h"
13 #include "ecore_chain.h"
14 #include "ecore_spq.h"
15 #include "ecore_init_fw_funcs.h"
16 #include "ecore_cxt.h"
17 #include "ecore_sp_commands.h"
18 #include "ecore_gtt_reg_addr.h"
19 #include "ecore_iro.h"
20 #include "reg_addr.h"
21 #include "ecore_int.h"
22 #include "ecore_hw.h"
23 #include "ecore_dcbx.h"
24 #include "ecore_sriov.h"
25
26 enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
27                                            struct ecore_spq_entry **pp_ent,
28                                            u8 cmd,
29                                            u8 protocol,
30                                            struct ecore_sp_init_data *p_data)
31 {
32         u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
33         struct ecore_spq_entry *p_ent = OSAL_NULL;
34         enum _ecore_status_t rc;
35
36         if (!pp_ent)
37                 return ECORE_INVAL;
38
39         /* Get an SPQ entry */
40         rc = ecore_spq_get_entry(p_hwfn, pp_ent);
41         if (rc != ECORE_SUCCESS)
42                 return rc;
43
44         /* Fill the SPQ entry */
45         p_ent = *pp_ent;
46         p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
47         p_ent->elem.hdr.cmd_id = cmd;
48         p_ent->elem.hdr.protocol_id = protocol;
49         p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
50         p_ent->comp_mode = p_data->comp_mode;
51         p_ent->comp_done.done = 0;
52
53         switch (p_ent->comp_mode) {
54         case ECORE_SPQ_MODE_EBLOCK:
55                 p_ent->comp_cb.cookie = &p_ent->comp_done;
56                 break;
57
58         case ECORE_SPQ_MODE_BLOCK:
59                 if (!p_data->p_comp_data)
60                         return ECORE_INVAL;
61
62                 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
63                 break;
64
65         case ECORE_SPQ_MODE_CB:
66                 if (!p_data->p_comp_data)
67                         p_ent->comp_cb.function = OSAL_NULL;
68                 else
69                         p_ent->comp_cb = *p_data->p_comp_data;
70                 break;
71
72         default:
73                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
74                           p_ent->comp_mode);
75                 return ECORE_INVAL;
76         }
77
78         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
79                    "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
80                    opaque_cid, cmd, protocol,
81                    (unsigned long)&p_ent->ramrod,
82                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
83                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
84                            "MODE_CB"));
85
86         OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
87
88         return ECORE_SUCCESS;
89 }
90
91 static enum tunnel_clss ecore_tunn_clss_to_fw_clss(u8 type)
92 {
93         switch (type) {
94         case ECORE_TUNN_CLSS_MAC_VLAN:
95                 return TUNNEL_CLSS_MAC_VLAN;
96         case ECORE_TUNN_CLSS_MAC_VNI:
97                 return TUNNEL_CLSS_MAC_VNI;
98         case ECORE_TUNN_CLSS_INNER_MAC_VLAN:
99                 return TUNNEL_CLSS_INNER_MAC_VLAN;
100         case ECORE_TUNN_CLSS_INNER_MAC_VNI:
101                 return TUNNEL_CLSS_INNER_MAC_VNI;
102         case ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
103                 return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
104         default:
105                 return TUNNEL_CLSS_MAC_VLAN;
106         }
107 }
108
109 static void
110 ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info *p_tun,
111                               struct ecore_tunnel_info *p_src,
112                               bool b_pf_start)
113 {
114         if (p_src->vxlan.b_update_mode || b_pf_start)
115                 p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
116
117         if (p_src->l2_gre.b_update_mode || b_pf_start)
118                 p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
119
120         if (p_src->ip_gre.b_update_mode || b_pf_start)
121                 p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
122
123         if (p_src->l2_geneve.b_update_mode || b_pf_start)
124                 p_tun->l2_geneve.b_mode_enabled =
125                                 p_src->l2_geneve.b_mode_enabled;
126
127         if (p_src->ip_geneve.b_update_mode || b_pf_start)
128                 p_tun->ip_geneve.b_mode_enabled =
129                                 p_src->ip_geneve.b_mode_enabled;
130 }
131
132 static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun,
133                                     struct ecore_tunnel_info *p_src)
134 {
135         enum tunnel_clss type;
136
137         p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
138         p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
139
140         /* @DPDK - typecast tunnul class */
141         type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
142         p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type;
143         type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
144         p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type;
145         type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
146         p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type;
147         type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
148         p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type;
149         type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
150         p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type;
151 }
152
153 static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
154                                  struct ecore_tunnel_info *p_src)
155 {
156         p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
157         p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
158
159         if (p_src->geneve_port.b_update_port)
160                 p_tun->geneve_port.port = p_src->geneve_port.port;
161
162         if (p_src->vxlan_port.b_update_port)
163                 p_tun->vxlan_port.port = p_src->vxlan_port.port;
164 }
165
166 static void
167 __ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
168                                 struct ecore_tunn_update_type *tun_type)
169 {
170         *p_tunn_cls = tun_type->tun_cls;
171
172         if (tun_type->b_mode_enabled)
173                 *p_enable_tx_clas = 1;
174 }
175
176 static void
177 ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
178                               struct ecore_tunn_update_type *tun_type,
179                               u8 *p_update_port, __le16 *p_port,
180                               struct ecore_tunn_update_udp_port *p_udp_port)
181 {
182         __ecore_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas,
183                                         tun_type);
184         if (p_udp_port->b_update_port) {
185                 *p_update_port = 1;
186                 *p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
187         }
188 }
189
190 static void
191 ecore_tunn_set_pf_update_params(struct ecore_hwfn               *p_hwfn,
192                                 struct ecore_tunnel_info *p_src,
193                                 struct pf_update_tunnel_config  *p_tunn_cfg)
194 {
195         struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
196
197         ecore_set_pf_update_tunn_mode(p_tun, p_src, false);
198         ecore_set_tunn_cls_info(p_tun, p_src);
199         ecore_set_tunn_ports(p_tun, p_src);
200
201         ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
202                                       &p_tunn_cfg->tx_enable_vxlan,
203                                       &p_tun->vxlan,
204                                       &p_tunn_cfg->set_vxlan_udp_port_flg,
205                                       &p_tunn_cfg->vxlan_udp_port,
206                                       &p_tun->vxlan_port);
207
208         ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
209                                       &p_tunn_cfg->tx_enable_l2geneve,
210                                       &p_tun->l2_geneve,
211                                       &p_tunn_cfg->set_geneve_udp_port_flg,
212                                       &p_tunn_cfg->geneve_udp_port,
213                                       &p_tun->geneve_port);
214
215         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
216                                         &p_tunn_cfg->tx_enable_ipgeneve,
217                                         &p_tun->ip_geneve);
218
219         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
220                                         &p_tunn_cfg->tx_enable_l2gre,
221                                         &p_tun->l2_gre);
222
223         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
224                                         &p_tunn_cfg->tx_enable_ipgre,
225                                         &p_tun->ip_gre);
226
227         p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
228         p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls;
229 }
230
231 static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
232                                    struct ecore_ptt *p_ptt,
233                                    struct ecore_tunnel_info *p_tun)
234 {
235         ecore_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
236                              p_tun->ip_gre.b_mode_enabled);
237         ecore_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
238
239         ecore_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
240                                 p_tun->ip_geneve.b_mode_enabled);
241 }
242
243 static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
244                                         struct ecore_tunnel_info *p_tunn)
245 {
246         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
247                 DP_NOTICE(p_hwfn, true,
248                           "A0 chip: tunnel hw config is not supported\n");
249                 return;
250         }
251
252         if (p_tunn->vxlan_port.b_update_port)
253                 ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
254                                           p_tunn->vxlan_port.port);
255
256         if (p_tunn->geneve_port.b_update_port)
257                 ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
258                                            p_tunn->geneve_port.port);
259
260         ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
261 }
262
263 static void
264 ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
265                                struct ecore_tunnel_info         *p_src,
266                                struct pf_start_tunnel_config *p_tunn_cfg)
267 {
268         struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
269
270         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
271                 DP_NOTICE(p_hwfn, true,
272                           "A0 chip: tunnel pf start config is not supported\n");
273                 return;
274         }
275
276         if (!p_src)
277                 return;
278
279         ecore_set_pf_update_tunn_mode(p_tun, p_src, true);
280         ecore_set_tunn_cls_info(p_tun, p_src);
281         ecore_set_tunn_ports(p_tun, p_src);
282
283         ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
284                                       &p_tunn_cfg->tx_enable_vxlan,
285                                       &p_tun->vxlan,
286                                       &p_tunn_cfg->set_vxlan_udp_port_flg,
287                                       &p_tunn_cfg->vxlan_udp_port,
288                                       &p_tun->vxlan_port);
289
290         ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
291                                       &p_tunn_cfg->tx_enable_l2geneve,
292                                       &p_tun->l2_geneve,
293                                       &p_tunn_cfg->set_geneve_udp_port_flg,
294                                       &p_tunn_cfg->geneve_udp_port,
295                                       &p_tun->geneve_port);
296
297         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
298                                         &p_tunn_cfg->tx_enable_ipgeneve,
299                                         &p_tun->ip_geneve);
300
301         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
302                                         &p_tunn_cfg->tx_enable_l2gre,
303                                         &p_tun->l2_gre);
304
305         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
306                                         &p_tunn_cfg->tx_enable_ipgre,
307                                         &p_tun->ip_gre);
308 }
309
310 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
311                                        struct ecore_tunnel_info *p_tunn,
312                                        enum ecore_mf_mode mode,
313                                        bool allow_npar_tx_switch)
314 {
315         struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
316         u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
317         u8 sb_index = p_hwfn->p_eq->eq_sb_index;
318         struct ecore_spq_entry *p_ent = OSAL_NULL;
319         struct ecore_sp_init_data init_data;
320         enum _ecore_status_t rc = ECORE_NOTIMPL;
321         u8 page_cnt;
322
323         /* update initial eq producer */
324         ecore_eq_prod_update(p_hwfn,
325                              ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));
326
327         /* Initialize the SPQ entry for the ramrod */
328         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
329         init_data.cid = ecore_spq_get_cid(p_hwfn);
330         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
331         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
332
333         rc = ecore_sp_init_request(p_hwfn, &p_ent,
334                                    COMMON_RAMROD_PF_START,
335                                    PROTOCOLID_COMMON, &init_data);
336         if (rc != ECORE_SUCCESS)
337                 return rc;
338
339         /* Fill the ramrod data */
340         p_ramrod = &p_ent->ramrod.pf_start;
341         p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
342         p_ramrod->event_ring_sb_index = sb_index;
343         p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
344
345         /* For easier debugging */
346         p_ramrod->dont_log_ramrods = 0;
347         p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf);
348
349         switch (mode) {
350         case ECORE_MF_DEFAULT:
351         case ECORE_MF_NPAR:
352                 p_ramrod->mf_mode = MF_NPAR;
353                 break;
354         case ECORE_MF_OVLAN:
355                 p_ramrod->mf_mode = MF_OVLAN;
356                 break;
357         default:
358                 DP_NOTICE(p_hwfn, true,
359                           "Unsupported MF mode, init as DEFAULT\n");
360                 p_ramrod->mf_mode = MF_NPAR;
361         }
362         p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
363
364         /* Place EQ address in RAMROD */
365         DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
366                        p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
367         page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
368         p_ramrod->event_ring_num_pages = page_cnt;
369         DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
370                        p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
371
372         ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
373                                        &p_ramrod->tunnel_config);
374
375         if (IS_MF_SI(p_hwfn))
376                 p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
377
378         switch (p_hwfn->hw_info.personality) {
379         case ECORE_PCI_ETH:
380                 p_ramrod->personality = PERSONALITY_ETH;
381                 break;
382         default:
383                 DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
384                          p_hwfn->hw_info.personality);
385                 p_ramrod->personality = PERSONALITY_ETH;
386         }
387
388         if (p_hwfn->p_dev->p_iov_info) {
389                 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
390
391                 p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
392                 p_ramrod->num_vfs = (u8)p_iov->total_vfs;
393         }
394         /* @@@TBD - update also the "ROCE_VER_KEY" entries when the FW RoCE HSI
395          * version is available.
396          */
397         p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
398         p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
399
400         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
401                    "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
402                    sb, sb_index, p_ramrod->outer_tag);
403
404         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
405
406         if (p_tunn)
407                 ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
408
409         return rc;
410 }
411
412 enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
413 {
414         struct ecore_spq_entry *p_ent = OSAL_NULL;
415         struct ecore_sp_init_data init_data;
416         enum _ecore_status_t rc = ECORE_NOTIMPL;
417
418         /* Get SPQ entry */
419         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
420         init_data.cid = ecore_spq_get_cid(p_hwfn);
421         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
422         init_data.comp_mode = ECORE_SPQ_MODE_CB;
423
424         rc = ecore_sp_init_request(p_hwfn, &p_ent,
425                                    COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
426                                    &init_data);
427         if (rc != ECORE_SUCCESS)
428                 return rc;
429
430         ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
431                                         &p_ent->ramrod.pf_update);
432
433         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
434 }
435
436 enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
437                                         struct ecore_rl_update_params *params)
438 {
439         struct ecore_spq_entry *p_ent = OSAL_NULL;
440         enum _ecore_status_t rc = ECORE_NOTIMPL;
441         struct rl_update_ramrod_data *rl_update;
442         struct ecore_sp_init_data init_data;
443
444         /* Get SPQ entry */
445         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
446         init_data.cid = ecore_spq_get_cid(p_hwfn);
447         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
448         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
449
450         rc = ecore_sp_init_request(p_hwfn, &p_ent,
451                                    COMMON_RAMROD_RL_UPDATE, PROTOCOLID_COMMON,
452                                    &init_data);
453         if (rc != ECORE_SUCCESS)
454                 return rc;
455
456         rl_update = &p_ent->ramrod.rl_update;
457
458         rl_update->qcn_update_param_flg = params->qcn_update_param_flg;
459         rl_update->dcqcn_update_param_flg = params->dcqcn_update_param_flg;
460         rl_update->rl_init_flg = params->rl_init_flg;
461         rl_update->rl_start_flg = params->rl_start_flg;
462         rl_update->rl_stop_flg = params->rl_stop_flg;
463         rl_update->rl_id_first = params->rl_id_first;
464         rl_update->rl_id_last = params->rl_id_last;
465         rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
466         rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
467         rl_update->rl_max_rate = OSAL_CPU_TO_LE16(params->rl_max_rate);
468         rl_update->rl_r_ai = OSAL_CPU_TO_LE16(params->rl_r_ai);
469         rl_update->rl_r_hai = OSAL_CPU_TO_LE16(params->rl_r_hai);
470         rl_update->dcqcn_g = OSAL_CPU_TO_LE16(params->dcqcn_g);
471         rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
472         rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32(
473                 params->dcqcn_timeuot_us);
474         rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
475
476         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
477 }
478
479 /* Set pf update ramrod command params */
480 enum _ecore_status_t
481 ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
482                             struct ecore_tunnel_info *p_tunn,
483                             enum spq_mode comp_mode,
484                             struct ecore_spq_comp_cb *p_comp_data)
485 {
486         struct ecore_spq_entry *p_ent = OSAL_NULL;
487         struct ecore_sp_init_data init_data;
488         enum _ecore_status_t rc = ECORE_NOTIMPL;
489
490         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
491                 DP_NOTICE(p_hwfn, true,
492                           "A0 chip: tunnel pf update config is not supported\n");
493                 return rc;
494         }
495
496         if (!p_tunn)
497                 return ECORE_INVAL;
498
499         /* Get SPQ entry */
500         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
501         init_data.cid = ecore_spq_get_cid(p_hwfn);
502         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
503         init_data.comp_mode = comp_mode;
504         init_data.p_comp_data = p_comp_data;
505
506         rc = ecore_sp_init_request(p_hwfn, &p_ent,
507                                    COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
508                                    &init_data);
509         if (rc != ECORE_SUCCESS)
510                 return rc;
511
512         ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
513                                         &p_ent->ramrod.pf_update.tunnel_config);
514
515         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
516         if (rc != ECORE_SUCCESS)
517                 return rc;
518
519         ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
520
521         return rc;
522 }
523
524 enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
525 {
526         struct ecore_spq_entry *p_ent = OSAL_NULL;
527         struct ecore_sp_init_data init_data;
528         enum _ecore_status_t rc = ECORE_NOTIMPL;
529
530         /* Get SPQ entry */
531         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
532         init_data.cid = ecore_spq_get_cid(p_hwfn);
533         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
534         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
535
536         rc = ecore_sp_init_request(p_hwfn, &p_ent,
537                                    COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
538                                    &init_data);
539         if (rc != ECORE_SUCCESS)
540                 return rc;
541
542         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
543 }
544
545 enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
546 {
547         struct ecore_spq_entry *p_ent = OSAL_NULL;
548         struct ecore_sp_init_data init_data;
549         enum _ecore_status_t rc;
550
551         /* Get SPQ entry */
552         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
553         init_data.cid = ecore_spq_get_cid(p_hwfn);
554         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
555         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
556
557         rc = ecore_sp_init_request(p_hwfn, &p_ent,
558                                    COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
559                                    &init_data);
560         if (rc != ECORE_SUCCESS)
561                 return rc;
562
563         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
564 }