f5860a046f28be3e7dbfd995ca734a0a4664d337
[dpdk.git] / drivers / net / qede / base / ecore_sp_commands.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10
11 #include "ecore.h"
12 #include "ecore_status.h"
13 #include "ecore_chain.h"
14 #include "ecore_spq.h"
15 #include "ecore_init_fw_funcs.h"
16 #include "ecore_cxt.h"
17 #include "ecore_sp_commands.h"
18 #include "ecore_gtt_reg_addr.h"
19 #include "ecore_iro.h"
20 #include "reg_addr.h"
21 #include "ecore_int.h"
22 #include "ecore_hw.h"
23 #include "ecore_dcbx.h"
24 #include "ecore_sriov.h"
25
26 enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
27                                            struct ecore_spq_entry **pp_ent,
28                                            u8 cmd,
29                                            u8 protocol,
30                                            struct ecore_sp_init_data *p_data)
31 {
32         u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
33         struct ecore_spq_entry *p_ent = OSAL_NULL;
34         enum _ecore_status_t rc;
35
36         if (!pp_ent)
37                 return ECORE_INVAL;
38
39         /* Get an SPQ entry */
40         rc = ecore_spq_get_entry(p_hwfn, pp_ent);
41         if (rc != ECORE_SUCCESS)
42                 return rc;
43
44         /* Fill the SPQ entry */
45         p_ent = *pp_ent;
46         p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
47         p_ent->elem.hdr.cmd_id = cmd;
48         p_ent->elem.hdr.protocol_id = protocol;
49         p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
50         p_ent->comp_mode = p_data->comp_mode;
51         p_ent->comp_done.done = 0;
52
53         switch (p_ent->comp_mode) {
54         case ECORE_SPQ_MODE_EBLOCK:
55                 p_ent->comp_cb.cookie = &p_ent->comp_done;
56                 break;
57
58         case ECORE_SPQ_MODE_BLOCK:
59                 if (!p_data->p_comp_data)
60                         return ECORE_INVAL;
61
62                 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
63                 break;
64
65         case ECORE_SPQ_MODE_CB:
66                 if (!p_data->p_comp_data)
67                         p_ent->comp_cb.function = OSAL_NULL;
68                 else
69                         p_ent->comp_cb = *p_data->p_comp_data;
70                 break;
71
72         default:
73                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
74                           p_ent->comp_mode);
75                 return ECORE_INVAL;
76         }
77
78         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
79                    "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
80                    opaque_cid, cmd, protocol,
81                    (unsigned long)&p_ent->ramrod,
82                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
83                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
84                            "MODE_CB"));
85
86         OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
87
88         return ECORE_SUCCESS;
89 }
90
91 static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
92 {
93         switch (type) {
94         case ECORE_TUNN_CLSS_MAC_VLAN:
95                 return TUNNEL_CLSS_MAC_VLAN;
96         case ECORE_TUNN_CLSS_MAC_VNI:
97                 return TUNNEL_CLSS_MAC_VNI;
98         case ECORE_TUNN_CLSS_INNER_MAC_VLAN:
99                 return TUNNEL_CLSS_INNER_MAC_VLAN;
100         case ECORE_TUNN_CLSS_INNER_MAC_VNI:
101                 return TUNNEL_CLSS_INNER_MAC_VNI;
102         case ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
103                 return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
104         default:
105                 return TUNNEL_CLSS_MAC_VLAN;
106         }
107 }
108
109 static void
110 ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn,
111                                 struct ecore_tunn_update_params *p_src,
112                                 struct pf_update_tunnel_config *p_tunn_cfg)
113 {
114         unsigned long update_mask = p_src->tunn_mode_update_mask;
115         struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
116         unsigned long cached_tunn_mode = p_tun->tunn_mode;
117         unsigned long tunn_mode = p_src->tunn_mode;
118         unsigned long new_tunn_mode = 0;
119
120         if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) {
121                 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
122                         OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
123         } else {
124                 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode))
125                         OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
126         }
127
128         if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) {
129                 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
130                         OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
131         } else {
132                 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode))
133                         OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
134         }
135
136         if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) {
137                 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
138                         OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
139         } else {
140                 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode))
141                         OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
142         }
143
144         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
145                 if (p_src->update_geneve_udp_port)
146                         DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
147                 p_src->update_geneve_udp_port = 0;
148                 p_src->tunn_mode = new_tunn_mode;
149                 return;
150         }
151
152         if (p_src->update_geneve_udp_port) {
153                 p_tun->port_geneve_udp_port = p_src->geneve_udp_port;
154                 p_tunn_cfg->set_geneve_udp_port_flg = 1;
155                 p_tunn_cfg->geneve_udp_port =
156                                 OSAL_CPU_TO_LE16(p_tun->port_geneve_udp_port);
157         }
158
159         if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) {
160                 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
161                         OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
162         } else {
163                 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
164                         OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
165         }
166
167         if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) {
168                 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
169                         OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
170         } else {
171                 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
172                         OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
173         }
174
175         p_src->tunn_mode = new_tunn_mode;
176 }
177
178 static void
179 ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
180                                 struct ecore_tunn_update_params *p_src,
181                                 struct pf_update_tunnel_config *p_tunn_cfg)
182 {
183         struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
184         enum tunnel_clss type;
185
186         ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
187         p_tun->tunn_mode = p_src->tunn_mode;
188
189         p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
190         p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
191
192         type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
193         p_tun->tunn_clss_vxlan = type;
194         p_tunn_cfg->tunnel_clss_vxlan = p_tun->tunn_clss_vxlan;
195         type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
196         p_tun->tunn_clss_l2gre = type;
197         p_tunn_cfg->tunnel_clss_l2gre = p_tun->tunn_clss_l2gre;
198         type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
199         p_tun->tunn_clss_ipgre = type;
200         p_tunn_cfg->tunnel_clss_ipgre = p_tun->tunn_clss_ipgre;
201
202         if (p_src->update_vxlan_udp_port) {
203                 p_tun->port_vxlan_udp_port = p_src->vxlan_udp_port;
204                 p_tunn_cfg->set_vxlan_udp_port_flg = 1;
205                 p_tunn_cfg->vxlan_udp_port =
206                                 OSAL_CPU_TO_LE16(p_tun->port_vxlan_udp_port);
207         }
208
209         if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &p_tun->tunn_mode))
210                 p_tunn_cfg->tx_enable_l2gre = 1;
211
212         if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &p_tun->tunn_mode))
213                 p_tunn_cfg->tx_enable_ipgre = 1;
214
215         if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &p_tun->tunn_mode))
216                 p_tunn_cfg->tx_enable_vxlan = 1;
217
218         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
219                 if (p_src->update_geneve_udp_port)
220                         DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
221                 p_src->update_geneve_udp_port = 0;
222                 return;
223         }
224
225         if (p_src->update_geneve_udp_port) {
226                 p_tun->port_geneve_udp_port = p_src->geneve_udp_port;
227                 p_tunn_cfg->set_geneve_udp_port_flg = 1;
228                 p_tunn_cfg->geneve_udp_port =
229                                 OSAL_CPU_TO_LE16(p_tun->port_geneve_udp_port);
230         }
231
232         if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &p_tun->tunn_mode))
233                 p_tunn_cfg->tx_enable_l2geneve = 1;
234
235         if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &p_tun->tunn_mode))
236                 p_tunn_cfg->tx_enable_ipgeneve = 1;
237
238         type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
239         p_tun->tunn_clss_l2geneve = type;
240         p_tunn_cfg->tunnel_clss_l2geneve = p_tun->tunn_clss_l2geneve;
241         type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
242         p_tun->tunn_clss_ipgeneve = type;
243         p_tunn_cfg->tunnel_clss_ipgeneve = p_tun->tunn_clss_ipgeneve;
244 }
245
246 static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
247                                    struct ecore_ptt *p_ptt,
248                                    unsigned long tunn_mode)
249 {
250         u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
251         u8 l2geneve_enable = 0, ipgeneve_enable = 0;
252
253         if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
254                 l2gre_enable = 1;
255
256         if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
257                 ipgre_enable = 1;
258
259         if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
260                 vxlan_enable = 1;
261
262         ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
263         ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
264
265         if (ECORE_IS_BB_A0(p_hwfn->p_dev))
266                 return;
267
268         if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
269                 l2geneve_enable = 1;
270
271         if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
272                 ipgeneve_enable = 1;
273
274         ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
275                                 ipgeneve_enable);
276 }
277
278 static void
279 ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
280                                struct ecore_tunn_start_params *p_src,
281                                struct pf_start_tunnel_config *p_tunn_cfg)
282 {
283         struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
284         enum tunnel_clss type;
285
286         if (!p_src)
287                 return;
288
289         p_tun->tunn_mode = p_src->tunn_mode;
290         type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
291         p_tun->tunn_clss_vxlan = type;
292         p_tunn_cfg->tunnel_clss_vxlan = p_tun->tunn_clss_vxlan;
293         type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
294         p_tun->tunn_clss_l2gre = type;
295         p_tunn_cfg->tunnel_clss_l2gre = p_tun->tunn_clss_l2gre;
296         type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
297         p_tun->tunn_clss_ipgre = type;
298         p_tunn_cfg->tunnel_clss_ipgre = p_tun->tunn_clss_ipgre;
299
300         if (p_src->update_vxlan_udp_port) {
301                 p_tun->port_vxlan_udp_port = p_src->vxlan_udp_port;
302                 p_tunn_cfg->set_vxlan_udp_port_flg = 1;
303                 p_tunn_cfg->vxlan_udp_port =
304                                 OSAL_CPU_TO_LE16(p_tun->port_vxlan_udp_port);
305         }
306
307         if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &p_tun->tunn_mode))
308                 p_tunn_cfg->tx_enable_l2gre = 1;
309
310         if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &p_tun->tunn_mode))
311                 p_tunn_cfg->tx_enable_ipgre = 1;
312
313         if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &p_tun->tunn_mode))
314                 p_tunn_cfg->tx_enable_vxlan = 1;
315
316         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
317                 if (p_src->update_geneve_udp_port)
318                         DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
319                 p_src->update_geneve_udp_port = 0;
320                 return;
321         }
322
323         if (p_src->update_geneve_udp_port) {
324                 p_tun->port_geneve_udp_port = p_src->geneve_udp_port;
325                 p_tunn_cfg->set_geneve_udp_port_flg = 1;
326                 p_tunn_cfg->geneve_udp_port =
327                                 OSAL_CPU_TO_LE16(p_tun->port_geneve_udp_port);
328         }
329
330         if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &p_tun->tunn_mode))
331                 p_tunn_cfg->tx_enable_l2geneve = 1;
332
333         if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &p_tun->tunn_mode))
334                 p_tunn_cfg->tx_enable_ipgeneve = 1;
335
336         type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
337         p_tun->tunn_clss_l2geneve = type;
338         p_tunn_cfg->tunnel_clss_l2geneve = p_tun->tunn_clss_l2geneve;
339         type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
340         p_tun->tunn_clss_ipgeneve = type;
341         p_tunn_cfg->tunnel_clss_ipgeneve = p_tun->tunn_clss_ipgeneve;
342 }
343
344 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
345                                        struct ecore_tunn_start_params *p_tunn,
346                                        enum ecore_mf_mode mode,
347                                        bool allow_npar_tx_switch)
348 {
349         struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
350         u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
351         u8 sb_index = p_hwfn->p_eq->eq_sb_index;
352         struct ecore_spq_entry *p_ent = OSAL_NULL;
353         struct ecore_sp_init_data init_data;
354         enum _ecore_status_t rc = ECORE_NOTIMPL;
355         u8 page_cnt;
356
357         /* update initial eq producer */
358         ecore_eq_prod_update(p_hwfn,
359                              ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));
360
361         /* Initialize the SPQ entry for the ramrod */
362         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
363         init_data.cid = ecore_spq_get_cid(p_hwfn);
364         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
365         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
366
367         rc = ecore_sp_init_request(p_hwfn, &p_ent,
368                                    COMMON_RAMROD_PF_START,
369                                    PROTOCOLID_COMMON, &init_data);
370         if (rc != ECORE_SUCCESS)
371                 return rc;
372
373         /* Fill the ramrod data */
374         p_ramrod = &p_ent->ramrod.pf_start;
375         p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
376         p_ramrod->event_ring_sb_index = sb_index;
377         p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
378
379         /* For easier debugging */
380         p_ramrod->dont_log_ramrods = 0;
381         p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf);
382
383         switch (mode) {
384         case ECORE_MF_DEFAULT:
385         case ECORE_MF_NPAR:
386                 p_ramrod->mf_mode = MF_NPAR;
387                 break;
388         case ECORE_MF_OVLAN:
389                 p_ramrod->mf_mode = MF_OVLAN;
390                 break;
391         default:
392                 DP_NOTICE(p_hwfn, true,
393                           "Unsupported MF mode, init as DEFAULT\n");
394                 p_ramrod->mf_mode = MF_NPAR;
395         }
396         p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
397
398         /* Place EQ address in RAMROD */
399         DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
400                        p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
401         page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
402         p_ramrod->event_ring_num_pages = page_cnt;
403         DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
404                        p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
405
406         ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
407                                        &p_ramrod->tunnel_config);
408
409         if (IS_MF_SI(p_hwfn))
410                 p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
411
412         switch (p_hwfn->hw_info.personality) {
413         case ECORE_PCI_ETH:
414                 p_ramrod->personality = PERSONALITY_ETH;
415                 break;
416         default:
417                 DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
418                          p_hwfn->hw_info.personality);
419                 p_ramrod->personality = PERSONALITY_ETH;
420         }
421
422         if (p_hwfn->p_dev->p_iov_info) {
423                 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
424
425                 p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
426                 p_ramrod->num_vfs = (u8)p_iov->total_vfs;
427         }
428         /* @@@TBD - update also the "ROCE_VER_KEY" entries when the FW RoCE HSI
429          * version is available.
430          */
431         p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
432         p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
433
434         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
435                    "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
436                    sb, sb_index, p_ramrod->outer_tag);
437
438         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
439
440         if (p_tunn) {
441                 if (p_tunn->update_vxlan_udp_port)
442                         ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
443                                                   p_tunn->vxlan_udp_port);
444
445                 if (p_tunn->update_geneve_udp_port)
446                         ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
447                                                    p_tunn->geneve_udp_port);
448
449                 ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
450                                        p_tunn->tunn_mode);
451         }
452
453         return rc;
454 }
455
456 enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
457 {
458         struct ecore_spq_entry *p_ent = OSAL_NULL;
459         struct ecore_sp_init_data init_data;
460         enum _ecore_status_t rc = ECORE_NOTIMPL;
461
462         /* Get SPQ entry */
463         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
464         init_data.cid = ecore_spq_get_cid(p_hwfn);
465         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
466         init_data.comp_mode = ECORE_SPQ_MODE_CB;
467
468         rc = ecore_sp_init_request(p_hwfn, &p_ent,
469                                    COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
470                                    &init_data);
471         if (rc != ECORE_SUCCESS)
472                 return rc;
473
474         ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
475                                         &p_ent->ramrod.pf_update);
476
477         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
478 }
479
480 enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
481                                         struct ecore_rl_update_params *params)
482 {
483         struct ecore_spq_entry *p_ent = OSAL_NULL;
484         enum _ecore_status_t rc = ECORE_NOTIMPL;
485         struct rl_update_ramrod_data *rl_update;
486         struct ecore_sp_init_data init_data;
487
488         /* Get SPQ entry */
489         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
490         init_data.cid = ecore_spq_get_cid(p_hwfn);
491         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
492         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
493
494         rc = ecore_sp_init_request(p_hwfn, &p_ent,
495                                    COMMON_RAMROD_RL_UPDATE, PROTOCOLID_COMMON,
496                                    &init_data);
497         if (rc != ECORE_SUCCESS)
498                 return rc;
499
500         rl_update = &p_ent->ramrod.rl_update;
501
502         rl_update->qcn_update_param_flg = params->qcn_update_param_flg;
503         rl_update->dcqcn_update_param_flg = params->dcqcn_update_param_flg;
504         rl_update->rl_init_flg = params->rl_init_flg;
505         rl_update->rl_start_flg = params->rl_start_flg;
506         rl_update->rl_stop_flg = params->rl_stop_flg;
507         rl_update->rl_id_first = params->rl_id_first;
508         rl_update->rl_id_last = params->rl_id_last;
509         rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
510         rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
511         rl_update->rl_max_rate = OSAL_CPU_TO_LE16(params->rl_max_rate);
512         rl_update->rl_r_ai = OSAL_CPU_TO_LE16(params->rl_r_ai);
513         rl_update->rl_r_hai = OSAL_CPU_TO_LE16(params->rl_r_hai);
514         rl_update->dcqcn_g = OSAL_CPU_TO_LE16(params->dcqcn_g);
515         rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
516         rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32(
517                 params->dcqcn_timeuot_us);
518         rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
519
520         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
521 }
522
523 /* Set pf update ramrod command params */
524 enum _ecore_status_t
525 ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
526                             struct ecore_tunn_update_params *p_tunn,
527                             enum spq_mode comp_mode,
528                             struct ecore_spq_comp_cb *p_comp_data)
529 {
530         struct ecore_spq_entry *p_ent = OSAL_NULL;
531         struct ecore_sp_init_data init_data;
532         enum _ecore_status_t rc = ECORE_NOTIMPL;
533
534         /* Get SPQ entry */
535         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
536         init_data.cid = ecore_spq_get_cid(p_hwfn);
537         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
538         init_data.comp_mode = comp_mode;
539         init_data.p_comp_data = p_comp_data;
540
541         rc = ecore_sp_init_request(p_hwfn, &p_ent,
542                                    COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
543                                    &init_data);
544         if (rc != ECORE_SUCCESS)
545                 return rc;
546
547         ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
548                                         &p_ent->ramrod.pf_update.tunnel_config);
549
550         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
551         if (rc != ECORE_SUCCESS)
552                 return rc;
553
554         if (p_tunn->update_vxlan_udp_port)
555                 ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
556                                           p_tunn->vxlan_udp_port);
557
558         if (p_tunn->update_geneve_udp_port)
559                 ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
560                                            p_tunn->geneve_udp_port);
561
562         ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
563
564         return rc;
565 }
566
567 enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
568 {
569         struct ecore_spq_entry *p_ent = OSAL_NULL;
570         struct ecore_sp_init_data init_data;
571         enum _ecore_status_t rc = ECORE_NOTIMPL;
572
573         /* Get SPQ entry */
574         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
575         init_data.cid = ecore_spq_get_cid(p_hwfn);
576         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
577         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
578
579         rc = ecore_sp_init_request(p_hwfn, &p_ent,
580                                    COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
581                                    &init_data);
582         if (rc != ECORE_SUCCESS)
583                 return rc;
584
585         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
586 }
587
588 enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
589 {
590         struct ecore_spq_entry *p_ent = OSAL_NULL;
591         struct ecore_sp_init_data init_data;
592         enum _ecore_status_t rc;
593
594         /* Get SPQ entry */
595         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
596         init_data.cid = ecore_spq_get_cid(p_hwfn);
597         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
598         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
599
600         rc = ecore_sp_init_request(p_hwfn, &p_ent,
601                                    COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
602                                    &init_data);
603         if (rc != ECORE_SUCCESS)
604                 return rc;
605
606         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
607 }