2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
12 #include "ecore_status.h"
13 #include "ecore_chain.h"
14 #include "ecore_spq.h"
15 #include "ecore_init_fw_funcs.h"
16 #include "ecore_cxt.h"
17 #include "ecore_sp_commands.h"
18 #include "ecore_gtt_reg_addr.h"
19 #include "ecore_iro.h"
21 #include "ecore_int.h"
23 #include "ecore_dcbx.h"
24 #include "ecore_sriov.h"
27 enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
28 struct ecore_spq_entry **pp_ent,
31 struct ecore_sp_init_data *p_data)
33 u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
34 struct ecore_spq_entry *p_ent = OSAL_NULL;
35 enum _ecore_status_t rc;
40 /* Get an SPQ entry */
41 rc = ecore_spq_get_entry(p_hwfn, pp_ent);
42 if (rc != ECORE_SUCCESS)
45 /* Fill the SPQ entry */
47 p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
48 p_ent->elem.hdr.cmd_id = cmd;
49 p_ent->elem.hdr.protocol_id = protocol;
50 p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
51 p_ent->comp_mode = p_data->comp_mode;
52 p_ent->comp_done.done = 0;
54 switch (p_ent->comp_mode) {
55 case ECORE_SPQ_MODE_EBLOCK:
56 p_ent->comp_cb.cookie = &p_ent->comp_done;
59 case ECORE_SPQ_MODE_BLOCK:
60 if (!p_data->p_comp_data)
63 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
66 case ECORE_SPQ_MODE_CB:
67 if (!p_data->p_comp_data)
68 p_ent->comp_cb.function = OSAL_NULL;
70 p_ent->comp_cb = *p_data->p_comp_data;
74 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
79 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
80 "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
81 opaque_cid, cmd, protocol,
82 (unsigned long)&p_ent->ramrod,
83 D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
84 ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
87 OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
92 static enum tunnel_clss ecore_tunn_clss_to_fw_clss(u8 type)
95 case ECORE_TUNN_CLSS_MAC_VLAN:
96 return TUNNEL_CLSS_MAC_VLAN;
97 case ECORE_TUNN_CLSS_MAC_VNI:
98 return TUNNEL_CLSS_MAC_VNI;
99 case ECORE_TUNN_CLSS_INNER_MAC_VLAN:
100 return TUNNEL_CLSS_INNER_MAC_VLAN;
101 case ECORE_TUNN_CLSS_INNER_MAC_VNI:
102 return TUNNEL_CLSS_INNER_MAC_VNI;
103 case ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
104 return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
106 return TUNNEL_CLSS_MAC_VLAN;
111 ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info *p_tun,
112 struct ecore_tunnel_info *p_src,
115 if (p_src->vxlan.b_update_mode || b_pf_start)
116 p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
118 if (p_src->l2_gre.b_update_mode || b_pf_start)
119 p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
121 if (p_src->ip_gre.b_update_mode || b_pf_start)
122 p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
124 if (p_src->l2_geneve.b_update_mode || b_pf_start)
125 p_tun->l2_geneve.b_mode_enabled =
126 p_src->l2_geneve.b_mode_enabled;
128 if (p_src->ip_geneve.b_update_mode || b_pf_start)
129 p_tun->ip_geneve.b_mode_enabled =
130 p_src->ip_geneve.b_mode_enabled;
133 static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun,
134 struct ecore_tunnel_info *p_src)
136 enum tunnel_clss type;
138 p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
139 p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
141 /* @DPDK - typecast tunnul class */
142 type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
143 p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type;
144 type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
145 p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type;
146 type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
147 p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type;
148 type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
149 p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type;
150 type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
151 p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type;
154 static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
155 struct ecore_tunnel_info *p_src)
157 p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
158 p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
160 if (p_src->geneve_port.b_update_port)
161 p_tun->geneve_port.port = p_src->geneve_port.port;
163 if (p_src->vxlan_port.b_update_port)
164 p_tun->vxlan_port.port = p_src->vxlan_port.port;
168 __ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
169 struct ecore_tunn_update_type *tun_type)
171 *p_tunn_cls = tun_type->tun_cls;
175 ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
176 struct ecore_tunn_update_type *tun_type,
177 u8 *p_update_port, __le16 *p_port,
178 struct ecore_tunn_update_udp_port *p_udp_port)
180 __ecore_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
181 if (p_udp_port->b_update_port) {
183 *p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
188 ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
189 struct ecore_tunnel_info *p_src,
190 struct pf_update_tunnel_config *p_tunn_cfg)
192 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
194 ecore_set_pf_update_tunn_mode(p_tun, p_src, false);
195 ecore_set_tunn_cls_info(p_tun, p_src);
196 ecore_set_tunn_ports(p_tun, p_src);
198 ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
200 &p_tunn_cfg->set_vxlan_udp_port_flg,
201 &p_tunn_cfg->vxlan_udp_port,
204 ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
206 &p_tunn_cfg->set_geneve_udp_port_flg,
207 &p_tunn_cfg->geneve_udp_port,
208 &p_tun->geneve_port);
210 __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
213 __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
216 __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
219 p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
222 static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
223 struct ecore_ptt *p_ptt,
224 struct ecore_tunnel_info *p_tun)
226 ecore_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
227 p_tun->ip_gre.b_mode_enabled);
228 ecore_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
230 ecore_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
231 p_tun->ip_geneve.b_mode_enabled);
234 static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
235 struct ecore_tunnel_info *p_tunn)
237 if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
238 DP_NOTICE(p_hwfn, true,
239 "A0 chip: tunnel hw config is not supported\n");
243 if (p_tunn->vxlan_port.b_update_port)
244 ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
245 p_tunn->vxlan_port.port);
247 if (p_tunn->geneve_port.b_update_port)
248 ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
249 p_tunn->geneve_port.port);
251 ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
255 ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
256 struct ecore_tunnel_info *p_src,
257 struct pf_start_tunnel_config *p_tunn_cfg)
259 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
261 if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
262 DP_NOTICE(p_hwfn, true,
263 "A0 chip: tunnel pf start config is not supported\n");
270 ecore_set_pf_update_tunn_mode(p_tun, p_src, true);
271 ecore_set_tunn_cls_info(p_tun, p_src);
272 ecore_set_tunn_ports(p_tun, p_src);
274 ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
276 &p_tunn_cfg->set_vxlan_udp_port_flg,
277 &p_tunn_cfg->vxlan_udp_port,
280 ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
282 &p_tunn_cfg->set_geneve_udp_port_flg,
283 &p_tunn_cfg->geneve_udp_port,
284 &p_tun->geneve_port);
286 __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
289 __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
292 __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
296 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
297 struct ecore_tunnel_info *p_tunn,
298 enum ecore_mf_mode mode,
299 bool allow_npar_tx_switch)
301 struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
302 u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
303 u8 sb_index = p_hwfn->p_eq->eq_sb_index;
304 struct ecore_spq_entry *p_ent = OSAL_NULL;
305 struct ecore_sp_init_data init_data;
306 enum _ecore_status_t rc = ECORE_NOTIMPL;
309 /* update initial eq producer */
310 ecore_eq_prod_update(p_hwfn,
311 ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));
313 /* Initialize the SPQ entry for the ramrod */
314 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
315 init_data.cid = ecore_spq_get_cid(p_hwfn);
316 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
317 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
319 rc = ecore_sp_init_request(p_hwfn, &p_ent,
320 COMMON_RAMROD_PF_START,
321 PROTOCOLID_COMMON, &init_data);
322 if (rc != ECORE_SUCCESS)
325 /* Fill the ramrod data */
326 p_ramrod = &p_ent->ramrod.pf_start;
327 p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
328 p_ramrod->event_ring_sb_index = sb_index;
329 p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
331 /* For easier debugging */
332 p_ramrod->dont_log_ramrods = 0;
333 p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f);
336 case ECORE_MF_DEFAULT:
338 p_ramrod->mf_mode = MF_NPAR;
341 p_ramrod->mf_mode = MF_OVLAN;
344 DP_NOTICE(p_hwfn, true,
345 "Unsupported MF mode, init as DEFAULT\n");
346 p_ramrod->mf_mode = MF_NPAR;
348 p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
350 /* Place EQ address in RAMROD */
351 DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
352 p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
353 page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
354 p_ramrod->event_ring_num_pages = page_cnt;
355 DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
356 p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
358 ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
359 &p_ramrod->tunnel_config);
361 if (IS_MF_SI(p_hwfn))
362 p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
364 switch (p_hwfn->hw_info.personality) {
366 p_ramrod->personality = PERSONALITY_ETH;
369 DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
370 p_hwfn->hw_info.personality);
371 p_ramrod->personality = PERSONALITY_ETH;
374 if (p_hwfn->p_dev->p_iov_info) {
375 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
377 p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
378 p_ramrod->num_vfs = (u8)p_iov->total_vfs;
380 /* @@@TBD - update also the "ROCE_VER_KEY" entries when the FW RoCE HSI
381 * version is available.
383 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
384 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
386 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
387 "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
388 sb, sb_index, p_ramrod->outer_tag);
390 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
393 ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
398 enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
400 struct ecore_spq_entry *p_ent = OSAL_NULL;
401 struct ecore_sp_init_data init_data;
402 enum _ecore_status_t rc = ECORE_NOTIMPL;
405 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
406 init_data.cid = ecore_spq_get_cid(p_hwfn);
407 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
408 init_data.comp_mode = ECORE_SPQ_MODE_CB;
410 rc = ecore_sp_init_request(p_hwfn, &p_ent,
411 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
413 if (rc != ECORE_SUCCESS)
416 ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
417 &p_ent->ramrod.pf_update);
419 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
422 enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
423 struct ecore_rl_update_params *params)
425 struct ecore_spq_entry *p_ent = OSAL_NULL;
426 enum _ecore_status_t rc = ECORE_NOTIMPL;
427 struct rl_update_ramrod_data *rl_update;
428 struct ecore_sp_init_data init_data;
431 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
432 init_data.cid = ecore_spq_get_cid(p_hwfn);
433 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
434 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
436 rc = ecore_sp_init_request(p_hwfn, &p_ent,
437 COMMON_RAMROD_RL_UPDATE, PROTOCOLID_COMMON,
439 if (rc != ECORE_SUCCESS)
442 rl_update = &p_ent->ramrod.rl_update;
444 rl_update->qcn_update_param_flg = params->qcn_update_param_flg;
445 rl_update->dcqcn_update_param_flg = params->dcqcn_update_param_flg;
446 rl_update->rl_init_flg = params->rl_init_flg;
447 rl_update->rl_start_flg = params->rl_start_flg;
448 rl_update->rl_stop_flg = params->rl_stop_flg;
449 rl_update->rl_id_first = params->rl_id_first;
450 rl_update->rl_id_last = params->rl_id_last;
451 rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
452 rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
453 rl_update->rl_max_rate = OSAL_CPU_TO_LE16(params->rl_max_rate);
454 rl_update->rl_r_ai = OSAL_CPU_TO_LE16(params->rl_r_ai);
455 rl_update->rl_r_hai = OSAL_CPU_TO_LE16(params->rl_r_hai);
456 rl_update->dcqcn_g = OSAL_CPU_TO_LE16(params->dcqcn_g);
457 rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
458 rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32(
459 params->dcqcn_timeuot_us);
460 rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
462 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
465 /* Set pf update ramrod command params */
467 ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
468 struct ecore_tunnel_info *p_tunn,
469 enum spq_mode comp_mode,
470 struct ecore_spq_comp_cb *p_comp_data)
472 struct ecore_spq_entry *p_ent = OSAL_NULL;
473 struct ecore_sp_init_data init_data;
474 enum _ecore_status_t rc = ECORE_NOTIMPL;
476 if (IS_VF(p_hwfn->p_dev))
477 return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
479 if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
480 DP_NOTICE(p_hwfn, true,
481 "A0 chip: tunnel pf update config is not supported\n");
489 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
490 init_data.cid = ecore_spq_get_cid(p_hwfn);
491 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
492 init_data.comp_mode = comp_mode;
493 init_data.p_comp_data = p_comp_data;
495 rc = ecore_sp_init_request(p_hwfn, &p_ent,
496 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
498 if (rc != ECORE_SUCCESS)
501 ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
502 &p_ent->ramrod.pf_update.tunnel_config);
504 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
505 if (rc != ECORE_SUCCESS)
508 ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
513 enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
515 struct ecore_spq_entry *p_ent = OSAL_NULL;
516 struct ecore_sp_init_data init_data;
517 enum _ecore_status_t rc = ECORE_NOTIMPL;
520 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
521 init_data.cid = ecore_spq_get_cid(p_hwfn);
522 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
523 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
525 rc = ecore_sp_init_request(p_hwfn, &p_ent,
526 COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
528 if (rc != ECORE_SUCCESS)
531 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
534 enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
536 struct ecore_spq_entry *p_ent = OSAL_NULL;
537 struct ecore_sp_init_data init_data;
538 enum _ecore_status_t rc;
541 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
542 init_data.cid = ecore_spq_get_cid(p_hwfn);
543 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
544 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
546 rc = ecore_sp_init_request(p_hwfn, &p_ent,
547 COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
549 if (rc != ECORE_SUCCESS)
552 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);