2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
12 #include "ecore_status.h"
13 #include "ecore_chain.h"
14 #include "ecore_spq.h"
15 #include "ecore_init_fw_funcs.h"
16 #include "ecore_cxt.h"
17 #include "ecore_sp_commands.h"
18 #include "ecore_gtt_reg_addr.h"
19 #include "ecore_iro.h"
21 #include "ecore_int.h"
24 enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
25 struct ecore_spq_entry **pp_ent,
28 struct ecore_sp_init_data *p_data)
30 u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
31 struct ecore_spq_entry *p_ent = OSAL_NULL;
32 enum _ecore_status_t rc = ECORE_NOTIMPL;
34 /* Get an SPQ entry */
35 rc = ecore_spq_get_entry(p_hwfn, pp_ent);
36 if (rc != ECORE_SUCCESS)
39 /* Fill the SPQ entry */
41 p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
42 p_ent->elem.hdr.cmd_id = cmd;
43 p_ent->elem.hdr.protocol_id = protocol;
44 p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
45 p_ent->comp_mode = p_data->comp_mode;
46 p_ent->comp_done.done = 0;
48 switch (p_ent->comp_mode) {
49 case ECORE_SPQ_MODE_EBLOCK:
50 p_ent->comp_cb.cookie = &p_ent->comp_done;
53 case ECORE_SPQ_MODE_BLOCK:
54 if (!p_data->p_comp_data)
57 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
60 case ECORE_SPQ_MODE_CB:
61 if (!p_data->p_comp_data)
62 p_ent->comp_cb.function = OSAL_NULL;
64 p_ent->comp_cb = *p_data->p_comp_data;
68 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
73 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
74 "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
75 opaque_cid, cmd, protocol,
76 (unsigned long)&p_ent->ramrod,
77 D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
78 ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
81 OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
86 static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
89 case ECORE_TUNN_CLSS_MAC_VLAN:
90 return TUNNEL_CLSS_MAC_VLAN;
91 case ECORE_TUNN_CLSS_MAC_VNI:
92 return TUNNEL_CLSS_MAC_VNI;
93 case ECORE_TUNN_CLSS_INNER_MAC_VLAN:
94 return TUNNEL_CLSS_INNER_MAC_VLAN;
95 case ECORE_TUNN_CLSS_INNER_MAC_VNI:
96 return TUNNEL_CLSS_INNER_MAC_VNI;
98 return TUNNEL_CLSS_MAC_VLAN;
103 ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn,
104 struct ecore_tunn_update_params *p_src,
105 struct pf_update_tunnel_config *p_tunn_cfg)
107 unsigned long cached_tunn_mode = p_hwfn->p_dev->tunn_mode;
108 unsigned long update_mask = p_src->tunn_mode_update_mask;
109 unsigned long tunn_mode = p_src->tunn_mode;
110 unsigned long new_tunn_mode = 0;
112 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) {
113 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
114 OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
116 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode))
117 OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
120 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) {
121 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
122 OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
124 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode))
125 OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
128 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) {
129 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
130 OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
132 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode))
133 OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
136 if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
137 if (p_src->update_geneve_udp_port)
138 DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
139 p_src->update_geneve_udp_port = 0;
140 p_src->tunn_mode = new_tunn_mode;
144 if (p_src->update_geneve_udp_port) {
145 p_tunn_cfg->set_geneve_udp_port_flg = 1;
146 p_tunn_cfg->geneve_udp_port =
147 OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
150 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) {
151 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
152 OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
154 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
155 OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
158 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) {
159 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
160 OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
162 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
163 OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
166 p_src->tunn_mode = new_tunn_mode;
170 ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
171 struct ecore_tunn_update_params *p_src,
172 struct pf_update_tunnel_config *p_tunn_cfg)
174 unsigned long tunn_mode = p_src->tunn_mode;
175 enum tunnel_clss type;
177 ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
178 p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
179 p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
181 type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
182 p_tunn_cfg->tunnel_clss_vxlan = type;
183 type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
184 p_tunn_cfg->tunnel_clss_l2gre = type;
185 type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
186 p_tunn_cfg->tunnel_clss_ipgre = type;
188 if (p_src->update_vxlan_udp_port) {
189 p_tunn_cfg->set_vxlan_udp_port_flg = 1;
190 p_tunn_cfg->vxlan_udp_port =
191 OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
194 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
195 p_tunn_cfg->tx_enable_l2gre = 1;
197 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
198 p_tunn_cfg->tx_enable_ipgre = 1;
200 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
201 p_tunn_cfg->tx_enable_vxlan = 1;
203 if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
204 if (p_src->update_geneve_udp_port)
205 DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
206 p_src->update_geneve_udp_port = 0;
210 if (p_src->update_geneve_udp_port) {
211 p_tunn_cfg->set_geneve_udp_port_flg = 1;
212 p_tunn_cfg->geneve_udp_port =
213 OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
216 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
217 p_tunn_cfg->tx_enable_l2geneve = 1;
219 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
220 p_tunn_cfg->tx_enable_ipgeneve = 1;
222 type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
223 p_tunn_cfg->tunnel_clss_l2geneve = type;
224 type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
225 p_tunn_cfg->tunnel_clss_ipgeneve = type;
228 static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
229 struct ecore_ptt *p_ptt,
230 unsigned long tunn_mode)
232 u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
233 u8 l2geneve_enable = 0, ipgeneve_enable = 0;
235 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
238 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
241 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
244 ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
245 ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
247 if (ECORE_IS_BB_A0(p_hwfn->p_dev))
250 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
253 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
256 ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
261 ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
262 struct ecore_tunn_start_params *p_src,
263 struct pf_start_tunnel_config *p_tunn_cfg)
265 unsigned long tunn_mode;
266 enum tunnel_clss type;
271 tunn_mode = p_src->tunn_mode;
272 type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
273 p_tunn_cfg->tunnel_clss_vxlan = type;
274 type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
275 p_tunn_cfg->tunnel_clss_l2gre = type;
276 type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
277 p_tunn_cfg->tunnel_clss_ipgre = type;
279 if (p_src->update_vxlan_udp_port) {
280 p_tunn_cfg->set_vxlan_udp_port_flg = 1;
281 p_tunn_cfg->vxlan_udp_port =
282 OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
285 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
286 p_tunn_cfg->tx_enable_l2gre = 1;
288 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
289 p_tunn_cfg->tx_enable_ipgre = 1;
291 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
292 p_tunn_cfg->tx_enable_vxlan = 1;
294 if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
295 if (p_src->update_geneve_udp_port)
296 DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
297 p_src->update_geneve_udp_port = 0;
301 if (p_src->update_geneve_udp_port) {
302 p_tunn_cfg->set_geneve_udp_port_flg = 1;
303 p_tunn_cfg->geneve_udp_port =
304 OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
307 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
308 p_tunn_cfg->tx_enable_l2geneve = 1;
310 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
311 p_tunn_cfg->tx_enable_ipgeneve = 1;
313 type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
314 p_tunn_cfg->tunnel_clss_l2geneve = type;
315 type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
316 p_tunn_cfg->tunnel_clss_ipgeneve = type;
319 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
320 struct ecore_tunn_start_params *p_tunn,
321 enum ecore_mf_mode mode,
322 bool allow_npar_tx_switch)
324 struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
325 struct ecore_spq_entry *p_ent = OSAL_NULL;
326 u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
327 u8 sb_index = p_hwfn->p_eq->eq_sb_index;
328 enum _ecore_status_t rc = ECORE_NOTIMPL;
329 struct ecore_sp_init_data init_data;
332 /* update initial eq producer */
333 ecore_eq_prod_update(p_hwfn,
334 ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));
336 /* Initialize the SPQ entry for the ramrod */
337 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
338 init_data.cid = ecore_spq_get_cid(p_hwfn);
339 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
340 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
342 rc = ecore_sp_init_request(p_hwfn, &p_ent,
343 COMMON_RAMROD_PF_START,
344 PROTOCOLID_COMMON, &init_data);
345 if (rc != ECORE_SUCCESS)
348 /* Fill the ramrod data */
349 p_ramrod = &p_ent->ramrod.pf_start;
350 p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
351 p_ramrod->event_ring_sb_index = sb_index;
352 p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
353 p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
355 /* For easier debugging */
356 p_ramrod->dont_log_ramrods = 0;
357 p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf);
360 case ECORE_MF_DEFAULT:
362 p_ramrod->mf_mode = MF_NPAR;
365 p_ramrod->mf_mode = MF_OVLAN;
368 DP_NOTICE(p_hwfn, true,
369 "Unsupported MF mode, init as DEFAULT\n");
370 p_ramrod->mf_mode = MF_NPAR;
373 /* Place EQ address in RAMROD */
374 DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
375 p_hwfn->p_eq->chain.pbl.p_phys_table);
376 page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
377 p_ramrod->event_ring_num_pages = page_cnt;
378 DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
379 p_hwfn->p_consq->chain.pbl.p_phys_table);
381 ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
382 &p_ramrod->tunnel_config);
384 if (IS_MF_SI(p_hwfn))
385 p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
387 switch (p_hwfn->hw_info.personality) {
389 p_ramrod->personality = PERSONALITY_ETH;
392 DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
393 p_hwfn->hw_info.personality);
394 p_ramrod->personality = PERSONALITY_ETH;
397 p_ramrod->base_vf_id = (u8)p_hwfn->hw_info.first_vf_in_pf;
398 p_ramrod->num_vfs = (u8)p_hwfn->p_dev->sriov_info.total_vfs;
400 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
401 "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
402 sb, sb_index, p_ramrod->outer_tag);
404 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
407 ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
409 p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
415 enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
417 struct ecore_spq_entry *p_ent = OSAL_NULL;
418 enum _ecore_status_t rc = ECORE_NOTIMPL;
419 struct ecore_sp_init_data init_data;
422 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
423 init_data.cid = ecore_spq_get_cid(p_hwfn);
424 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
425 init_data.comp_mode = ECORE_SPQ_MODE_CB;
427 rc = ecore_sp_init_request(p_hwfn, &p_ent,
428 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
430 if (rc != ECORE_SUCCESS)
433 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
436 /* Set pf update ramrod command params */
438 ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
439 struct ecore_tunn_update_params *p_tunn,
440 enum spq_mode comp_mode,
441 struct ecore_spq_comp_cb *p_comp_data)
443 struct ecore_spq_entry *p_ent = OSAL_NULL;
444 enum _ecore_status_t rc = ECORE_NOTIMPL;
445 struct ecore_sp_init_data init_data;
448 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
449 init_data.cid = ecore_spq_get_cid(p_hwfn);
450 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
451 init_data.comp_mode = comp_mode;
452 init_data.p_comp_data = p_comp_data;
454 rc = ecore_sp_init_request(p_hwfn, &p_ent,
455 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
457 if (rc != ECORE_SUCCESS)
460 ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
461 &p_ent->ramrod.pf_update.tunnel_config);
463 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
465 if ((rc == ECORE_SUCCESS) && p_tunn) {
466 if (p_tunn->update_vxlan_udp_port)
467 ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
468 p_tunn->vxlan_udp_port);
469 if (p_tunn->update_geneve_udp_port)
470 ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
471 p_tunn->geneve_udp_port);
473 ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
475 p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
481 enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
483 enum _ecore_status_t rc = ECORE_NOTIMPL;
484 struct ecore_spq_entry *p_ent = OSAL_NULL;
485 struct ecore_sp_init_data init_data;
488 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
489 init_data.cid = ecore_spq_get_cid(p_hwfn);
490 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
491 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
493 rc = ecore_sp_init_request(p_hwfn, &p_ent,
494 COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
496 if (rc != ECORE_SUCCESS)
499 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
502 enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
504 struct ecore_spq_entry *p_ent = OSAL_NULL;
505 enum _ecore_status_t rc = ECORE_NOTIMPL;
506 struct ecore_sp_init_data init_data;
509 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
510 init_data.cid = ecore_spq_get_cid(p_hwfn);
511 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
512 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
514 rc = ecore_sp_init_request(p_hwfn, &p_ent,
515 COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
517 if (rc != ECORE_SUCCESS)
520 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);