qede: add base driver
[dpdk.git] / drivers / net / qede / base / ecore_sp_commands.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10
11 #include "ecore.h"
12 #include "ecore_status.h"
13 #include "ecore_chain.h"
14 #include "ecore_spq.h"
15 #include "ecore_init_fw_funcs.h"
16 #include "ecore_cxt.h"
17 #include "ecore_sp_commands.h"
18 #include "ecore_gtt_reg_addr.h"
19 #include "ecore_iro.h"
20 #include "reg_addr.h"
21 #include "ecore_int.h"
22 #include "ecore_hw.h"
23
24 enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
25                                            struct ecore_spq_entry **pp_ent,
26                                            u8 cmd,
27                                            u8 protocol,
28                                            struct ecore_sp_init_data *p_data)
29 {
30         u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
31         struct ecore_spq_entry *p_ent = OSAL_NULL;
32         enum _ecore_status_t rc = ECORE_NOTIMPL;
33
34         /* Get an SPQ entry */
35         rc = ecore_spq_get_entry(p_hwfn, pp_ent);
36         if (rc != ECORE_SUCCESS)
37                 return rc;
38
39         /* Fill the SPQ entry */
40         p_ent = *pp_ent;
41         p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
42         p_ent->elem.hdr.cmd_id = cmd;
43         p_ent->elem.hdr.protocol_id = protocol;
44         p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
45         p_ent->comp_mode = p_data->comp_mode;
46         p_ent->comp_done.done = 0;
47
48         switch (p_ent->comp_mode) {
49         case ECORE_SPQ_MODE_EBLOCK:
50                 p_ent->comp_cb.cookie = &p_ent->comp_done;
51                 break;
52
53         case ECORE_SPQ_MODE_BLOCK:
54                 if (!p_data->p_comp_data)
55                         return ECORE_INVAL;
56
57                 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
58                 break;
59
60         case ECORE_SPQ_MODE_CB:
61                 if (!p_data->p_comp_data)
62                         p_ent->comp_cb.function = OSAL_NULL;
63                 else
64                         p_ent->comp_cb = *p_data->p_comp_data;
65                 break;
66
67         default:
68                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
69                           p_ent->comp_mode);
70                 return ECORE_INVAL;
71         }
72
73         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
74                    "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
75                    opaque_cid, cmd, protocol,
76                    (unsigned long)&p_ent->ramrod,
77                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
78                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
79                            "MODE_CB"));
80
81         OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
82
83         return ECORE_SUCCESS;
84 }
85
86 static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
87 {
88         switch (type) {
89         case ECORE_TUNN_CLSS_MAC_VLAN:
90                 return TUNNEL_CLSS_MAC_VLAN;
91         case ECORE_TUNN_CLSS_MAC_VNI:
92                 return TUNNEL_CLSS_MAC_VNI;
93         case ECORE_TUNN_CLSS_INNER_MAC_VLAN:
94                 return TUNNEL_CLSS_INNER_MAC_VLAN;
95         case ECORE_TUNN_CLSS_INNER_MAC_VNI:
96                 return TUNNEL_CLSS_INNER_MAC_VNI;
97         default:
98                 return TUNNEL_CLSS_MAC_VLAN;
99         }
100 }
101
102 static void
103 ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn,
104                                 struct ecore_tunn_update_params *p_src,
105                                 struct pf_update_tunnel_config *p_tunn_cfg)
106 {
107         unsigned long cached_tunn_mode = p_hwfn->p_dev->tunn_mode;
108         unsigned long update_mask = p_src->tunn_mode_update_mask;
109         unsigned long tunn_mode = p_src->tunn_mode;
110         unsigned long new_tunn_mode = 0;
111
112         if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) {
113                 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
114                         OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
115         } else {
116                 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode))
117                         OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
118         }
119
120         if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) {
121                 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
122                         OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
123         } else {
124                 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode))
125                         OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
126         }
127
128         if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) {
129                 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
130                         OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
131         } else {
132                 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode))
133                         OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
134         }
135
136         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
137                 if (p_src->update_geneve_udp_port)
138                         DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
139                 p_src->update_geneve_udp_port = 0;
140                 p_src->tunn_mode = new_tunn_mode;
141                 return;
142         }
143
144         if (p_src->update_geneve_udp_port) {
145                 p_tunn_cfg->set_geneve_udp_port_flg = 1;
146                 p_tunn_cfg->geneve_udp_port =
147                     OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
148         }
149
150         if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) {
151                 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
152                         OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
153         } else {
154                 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
155                         OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
156         }
157
158         if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) {
159                 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
160                         OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
161         } else {
162                 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
163                         OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
164         }
165
166         p_src->tunn_mode = new_tunn_mode;
167 }
168
169 static void
170 ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
171                                 struct ecore_tunn_update_params *p_src,
172                                 struct pf_update_tunnel_config *p_tunn_cfg)
173 {
174         unsigned long tunn_mode = p_src->tunn_mode;
175         enum tunnel_clss type;
176
177         ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
178         p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
179         p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
180
181         type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
182         p_tunn_cfg->tunnel_clss_vxlan = type;
183         type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
184         p_tunn_cfg->tunnel_clss_l2gre = type;
185         type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
186         p_tunn_cfg->tunnel_clss_ipgre = type;
187
188         if (p_src->update_vxlan_udp_port) {
189                 p_tunn_cfg->set_vxlan_udp_port_flg = 1;
190                 p_tunn_cfg->vxlan_udp_port =
191                     OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
192         }
193
194         if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
195                 p_tunn_cfg->tx_enable_l2gre = 1;
196
197         if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
198                 p_tunn_cfg->tx_enable_ipgre = 1;
199
200         if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
201                 p_tunn_cfg->tx_enable_vxlan = 1;
202
203         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
204                 if (p_src->update_geneve_udp_port)
205                         DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
206                 p_src->update_geneve_udp_port = 0;
207                 return;
208         }
209
210         if (p_src->update_geneve_udp_port) {
211                 p_tunn_cfg->set_geneve_udp_port_flg = 1;
212                 p_tunn_cfg->geneve_udp_port =
213                     OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
214         }
215
216         if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
217                 p_tunn_cfg->tx_enable_l2geneve = 1;
218
219         if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
220                 p_tunn_cfg->tx_enable_ipgeneve = 1;
221
222         type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
223         p_tunn_cfg->tunnel_clss_l2geneve = type;
224         type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
225         p_tunn_cfg->tunnel_clss_ipgeneve = type;
226 }
227
228 static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
229                                    struct ecore_ptt *p_ptt,
230                                    unsigned long tunn_mode)
231 {
232         u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
233         u8 l2geneve_enable = 0, ipgeneve_enable = 0;
234
235         if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
236                 l2gre_enable = 1;
237
238         if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
239                 ipgre_enable = 1;
240
241         if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
242                 vxlan_enable = 1;
243
244         ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
245         ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
246
247         if (ECORE_IS_BB_A0(p_hwfn->p_dev))
248                 return;
249
250         if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
251                 l2geneve_enable = 1;
252
253         if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
254                 ipgeneve_enable = 1;
255
256         ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
257                                 ipgeneve_enable);
258 }
259
260 static void
261 ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
262                                struct ecore_tunn_start_params *p_src,
263                                struct pf_start_tunnel_config *p_tunn_cfg)
264 {
265         unsigned long tunn_mode;
266         enum tunnel_clss type;
267
268         if (!p_src)
269                 return;
270
271         tunn_mode = p_src->tunn_mode;
272         type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
273         p_tunn_cfg->tunnel_clss_vxlan = type;
274         type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
275         p_tunn_cfg->tunnel_clss_l2gre = type;
276         type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
277         p_tunn_cfg->tunnel_clss_ipgre = type;
278
279         if (p_src->update_vxlan_udp_port) {
280                 p_tunn_cfg->set_vxlan_udp_port_flg = 1;
281                 p_tunn_cfg->vxlan_udp_port =
282                     OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
283         }
284
285         if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
286                 p_tunn_cfg->tx_enable_l2gre = 1;
287
288         if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
289                 p_tunn_cfg->tx_enable_ipgre = 1;
290
291         if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
292                 p_tunn_cfg->tx_enable_vxlan = 1;
293
294         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
295                 if (p_src->update_geneve_udp_port)
296                         DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
297                 p_src->update_geneve_udp_port = 0;
298                 return;
299         }
300
301         if (p_src->update_geneve_udp_port) {
302                 p_tunn_cfg->set_geneve_udp_port_flg = 1;
303                 p_tunn_cfg->geneve_udp_port =
304                     OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
305         }
306
307         if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
308                 p_tunn_cfg->tx_enable_l2geneve = 1;
309
310         if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
311                 p_tunn_cfg->tx_enable_ipgeneve = 1;
312
313         type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
314         p_tunn_cfg->tunnel_clss_l2geneve = type;
315         type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
316         p_tunn_cfg->tunnel_clss_ipgeneve = type;
317 }
318
319 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
320                                        struct ecore_tunn_start_params *p_tunn,
321                                        enum ecore_mf_mode mode,
322                                        bool allow_npar_tx_switch)
323 {
324         struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
325         struct ecore_spq_entry *p_ent = OSAL_NULL;
326         u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
327         u8 sb_index = p_hwfn->p_eq->eq_sb_index;
328         enum _ecore_status_t rc = ECORE_NOTIMPL;
329         struct ecore_sp_init_data init_data;
330         u8 page_cnt;
331
332         /* update initial eq producer */
333         ecore_eq_prod_update(p_hwfn,
334                              ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));
335
336         /* Initialize the SPQ entry for the ramrod */
337         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
338         init_data.cid = ecore_spq_get_cid(p_hwfn);
339         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
340         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
341
342         rc = ecore_sp_init_request(p_hwfn, &p_ent,
343                                    COMMON_RAMROD_PF_START,
344                                    PROTOCOLID_COMMON, &init_data);
345         if (rc != ECORE_SUCCESS)
346                 return rc;
347
348         /* Fill the ramrod data */
349         p_ramrod = &p_ent->ramrod.pf_start;
350         p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
351         p_ramrod->event_ring_sb_index = sb_index;
352         p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
353         p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
354
355         /* For easier debugging */
356         p_ramrod->dont_log_ramrods = 0;
357         p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf);
358
359         switch (mode) {
360         case ECORE_MF_DEFAULT:
361         case ECORE_MF_NPAR:
362                 p_ramrod->mf_mode = MF_NPAR;
363                 break;
364         case ECORE_MF_OVLAN:
365                 p_ramrod->mf_mode = MF_OVLAN;
366                 break;
367         default:
368                 DP_NOTICE(p_hwfn, true,
369                           "Unsupported MF mode, init as DEFAULT\n");
370                 p_ramrod->mf_mode = MF_NPAR;
371         }
372
373         /* Place EQ address in RAMROD */
374         DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
375                        p_hwfn->p_eq->chain.pbl.p_phys_table);
376         page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
377         p_ramrod->event_ring_num_pages = page_cnt;
378         DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
379                        p_hwfn->p_consq->chain.pbl.p_phys_table);
380
381         ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
382                                        &p_ramrod->tunnel_config);
383
384         if (IS_MF_SI(p_hwfn))
385                 p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
386
387         switch (p_hwfn->hw_info.personality) {
388         case ECORE_PCI_ETH:
389                 p_ramrod->personality = PERSONALITY_ETH;
390                 break;
391         default:
392                 DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
393                           p_hwfn->hw_info.personality);
394                 p_ramrod->personality = PERSONALITY_ETH;
395         }
396
397         p_ramrod->base_vf_id = (u8)p_hwfn->hw_info.first_vf_in_pf;
398         p_ramrod->num_vfs = (u8)p_hwfn->p_dev->sriov_info.total_vfs;
399
400         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
401                    "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
402                    sb, sb_index, p_ramrod->outer_tag);
403
404         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
405
406         if (p_tunn) {
407                 ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
408                                        p_tunn->tunn_mode);
409                 p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
410         }
411
412         return rc;
413 }
414
415 enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
416 {
417         struct ecore_spq_entry *p_ent = OSAL_NULL;
418         enum _ecore_status_t rc = ECORE_NOTIMPL;
419         struct ecore_sp_init_data init_data;
420
421         /* Get SPQ entry */
422         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
423         init_data.cid = ecore_spq_get_cid(p_hwfn);
424         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
425         init_data.comp_mode = ECORE_SPQ_MODE_CB;
426
427         rc = ecore_sp_init_request(p_hwfn, &p_ent,
428                                    COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
429                                    &init_data);
430         if (rc != ECORE_SUCCESS)
431                 return rc;
432
433         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
434 }
435
436 /* Set pf update ramrod command params */
437 enum _ecore_status_t
438 ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
439                             struct ecore_tunn_update_params *p_tunn,
440                             enum spq_mode comp_mode,
441                             struct ecore_spq_comp_cb *p_comp_data)
442 {
443         struct ecore_spq_entry *p_ent = OSAL_NULL;
444         enum _ecore_status_t rc = ECORE_NOTIMPL;
445         struct ecore_sp_init_data init_data;
446
447         /* Get SPQ entry */
448         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
449         init_data.cid = ecore_spq_get_cid(p_hwfn);
450         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
451         init_data.comp_mode = comp_mode;
452         init_data.p_comp_data = p_comp_data;
453
454         rc = ecore_sp_init_request(p_hwfn, &p_ent,
455                                    COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
456                                    &init_data);
457         if (rc != ECORE_SUCCESS)
458                 return rc;
459
460         ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
461                                         &p_ent->ramrod.pf_update.tunnel_config);
462
463         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
464
465         if ((rc == ECORE_SUCCESS) && p_tunn) {
466                 if (p_tunn->update_vxlan_udp_port)
467                         ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
468                                                   p_tunn->vxlan_udp_port);
469                 if (p_tunn->update_geneve_udp_port)
470                         ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
471                                                    p_tunn->geneve_udp_port);
472
473                 ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
474                                        p_tunn->tunn_mode);
475                 p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
476         }
477
478         return rc;
479 }
480
481 enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
482 {
483         enum _ecore_status_t rc = ECORE_NOTIMPL;
484         struct ecore_spq_entry *p_ent = OSAL_NULL;
485         struct ecore_sp_init_data init_data;
486
487         /* Get SPQ entry */
488         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
489         init_data.cid = ecore_spq_get_cid(p_hwfn);
490         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
491         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
492
493         rc = ecore_sp_init_request(p_hwfn, &p_ent,
494                                    COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
495                                    &init_data);
496         if (rc != ECORE_SUCCESS)
497                 return rc;
498
499         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
500 }
501
502 enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
503 {
504         struct ecore_spq_entry *p_ent = OSAL_NULL;
505         enum _ecore_status_t rc = ECORE_NOTIMPL;
506         struct ecore_sp_init_data init_data;
507
508         /* Get SPQ entry */
509         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
510         init_data.cid = ecore_spq_get_cid(p_hwfn);
511         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
512         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
513
514         rc = ecore_sp_init_request(p_hwfn, &p_ent,
515                                    COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
516                                    &init_data);
517         if (rc != ECORE_SUCCESS)
518                 return rc;
519
520         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
521 }