2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
12 #include "ecore_status.h"
13 #include "ecore_hsi_eth.h"
14 #include "ecore_chain.h"
15 #include "ecore_spq.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_cxt.h"
19 #include "ecore_sp_commands.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
23 #include "ecore_int.h"
26 #include "ecore_sriov.h"
27 #include "ecore_mcp.h"
29 #define ECORE_MAX_SGES_NUM 16
30 #define CRC32_POLY 0x1edc6f41
33 ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
34 struct ecore_sp_vport_start_params *p_params)
36 struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
37 struct ecore_spq_entry *p_ent = OSAL_NULL;
38 struct ecore_sp_init_data init_data;
39 u16 rx_mode = 0, tx_err = 0;
41 enum _ecore_status_t rc = ECORE_NOTIMPL;
43 rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
44 if (rc != ECORE_SUCCESS)
48 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
49 init_data.cid = ecore_spq_get_cid(p_hwfn);
50 init_data.opaque_fid = p_params->opaque_fid;
51 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
53 rc = ecore_sp_init_request(p_hwfn, &p_ent,
54 ETH_RAMROD_VPORT_START,
55 PROTOCOLID_ETH, &init_data);
56 if (rc != ECORE_SUCCESS)
59 p_ramrod = &p_ent->ramrod.vport_start;
60 p_ramrod->vport_id = abs_vport_id;
62 p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
63 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
64 p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
65 p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
66 p_ramrod->untagged = p_params->only_untagged;
67 p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
69 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
70 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
72 p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode);
74 /* Handle requests for strict behavior on transmission errors */
75 SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE,
76 p_params->b_err_illegal_vlan_mode ?
77 ETH_TX_ERR_ASSERT_MALICIOUS : 0);
78 SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL,
79 p_params->b_err_small_pkt ?
80 ETH_TX_ERR_ASSERT_MALICIOUS : 0);
81 SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR,
82 p_params->b_err_anti_spoof ?
83 ETH_TX_ERR_ASSERT_MALICIOUS : 0);
84 SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS,
85 p_params->b_err_illegal_inband_mode ?
86 ETH_TX_ERR_ASSERT_MALICIOUS : 0);
87 SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG,
88 p_params->b_err_vlan_insert_with_inband ?
89 ETH_TX_ERR_ASSERT_MALICIOUS : 0);
90 SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION,
91 p_params->b_err_big_pkt ?
92 ETH_TX_ERR_ASSERT_MALICIOUS : 0);
93 SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME,
94 p_params->b_err_ctrl_frame ?
95 ETH_TX_ERR_ASSERT_MALICIOUS : 0);
96 p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
98 /* TPA related fields */
99 OSAL_MEMSET(&p_ramrod->tpa_param, 0,
100 sizeof(struct eth_vport_tpa_param));
101 p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
103 switch (p_params->tpa_mode) {
104 case ECORE_TPA_MODE_GRO:
105 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
106 p_ramrod->tpa_param.tpa_max_size = (u16)-1;
107 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
108 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
109 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
110 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
111 p_ramrod->tpa_param.tpa_ipv4_tunn_en_flg = 1;
112 p_ramrod->tpa_param.tpa_ipv6_tunn_en_flg = 1;
113 p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
114 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
120 p_ramrod->tx_switching_en = p_params->tx_switching;
122 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
123 p_ramrod->tx_switching_en = 0;
126 p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
127 p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
129 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
130 p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev,
131 p_params->concrete_fid);
133 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
137 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
138 struct ecore_sp_vport_start_params *p_params)
140 if (IS_VF(p_hwfn->p_dev))
141 return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id,
143 p_params->remove_inner_vlan,
145 p_params->max_buffers_per_cqe,
146 p_params->only_untagged);
148 return ecore_sp_eth_vport_start(p_hwfn, p_params);
151 static enum _ecore_status_t
152 ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
153 struct vport_update_ramrod_data *p_ramrod,
154 struct ecore_rss_params *p_rss)
156 enum _ecore_status_t rc = ECORE_SUCCESS;
157 struct eth_vport_rss_config *p_config;
158 u16 abs_l2_queue = 0;
162 p_ramrod->common.update_rss_flg = 0;
165 p_config = &p_ramrod->rss_config;
167 OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE !=
168 ETH_RSS_IND_TABLE_ENTRIES_NUM);
170 rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
171 if (rc != ECORE_SUCCESS)
174 p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
175 p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
176 p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
177 p_config->update_rss_key = p_rss->update_rss_key;
179 p_config->rss_mode = p_rss->rss_enable ?
180 ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED;
182 p_config->capabilities = 0;
184 SET_FIELD(p_config->capabilities,
185 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
186 !!(p_rss->rss_caps & ECORE_RSS_IPV4));
187 SET_FIELD(p_config->capabilities,
188 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
189 !!(p_rss->rss_caps & ECORE_RSS_IPV6));
190 SET_FIELD(p_config->capabilities,
191 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
192 !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
193 SET_FIELD(p_config->capabilities,
194 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
195 !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
196 SET_FIELD(p_config->capabilities,
197 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
198 !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
199 SET_FIELD(p_config->capabilities,
200 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
201 !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
202 p_config->tbl_size = p_rss->rss_table_size_log;
203 p_config->capabilities = OSAL_CPU_TO_LE16(p_config->capabilities);
205 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
206 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
207 p_ramrod->common.update_rss_flg,
209 p_config->update_rss_capabilities,
210 p_config->capabilities,
211 p_config->update_rss_ind_table, p_config->update_rss_key);
213 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
214 rc = ecore_fw_l2_queue(p_hwfn,
215 (u8)p_rss->rss_ind_table[i],
217 if (rc != ECORE_SUCCESS)
220 p_config->indirection_table[i] = OSAL_CPU_TO_LE16(abs_l2_queue);
221 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "i= %d, queue = %d\n",
222 i, p_config->indirection_table[i]);
225 for (i = 0; i < 10; i++)
226 p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]);
232 ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
233 struct vport_update_ramrod_data *p_ramrod,
234 struct ecore_filter_accept_flags accept_flags)
236 p_ramrod->common.update_rx_mode_flg =
237 accept_flags.update_rx_mode_config;
238 p_ramrod->common.update_tx_mode_flg =
239 accept_flags.update_tx_mode_config;
242 /* On B0 emulation we cannot enable Tx, since this would cause writes
243 * to PVFC HW block which isn't implemented in emulation.
245 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
246 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
247 "Non-Asic - prevent Tx mode in vport update\n");
248 p_ramrod->common.update_tx_mode_flg = 0;
252 /* Set Rx mode accept flags */
253 if (p_ramrod->common.update_rx_mode_flg) {
254 u8 accept_filter = accept_flags.rx_accept_filter;
257 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
258 !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) ||
259 !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
261 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
262 !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED));
264 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
265 !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) ||
266 !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
268 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
269 (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
270 !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
272 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
273 !!(accept_filter & ECORE_ACCEPT_BCAST));
275 p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
276 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
277 "p_ramrod->rx_mode.state = 0x%x\n",
281 /* Set Tx mode accept flags */
282 if (p_ramrod->common.update_tx_mode_flg) {
283 u8 accept_filter = accept_flags.tx_accept_filter;
286 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
287 !!(accept_filter & ECORE_ACCEPT_NONE));
289 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
290 !!(accept_filter & ECORE_ACCEPT_NONE));
292 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
293 (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
294 !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
296 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
297 !!(accept_filter & ECORE_ACCEPT_BCAST));
299 p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state);
300 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
301 "p_ramrod->tx_mode.state = 0x%x\n",
307 ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn,
308 struct vport_update_ramrod_data *p_ramrod,
309 struct ecore_sge_tpa_params *p_params)
311 struct eth_vport_tpa_param *p_tpa;
314 p_ramrod->common.update_tpa_param_flg = 0;
315 p_ramrod->common.update_tpa_en_flg = 0;
316 p_ramrod->common.update_tpa_param_flg = 0;
320 p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
321 p_tpa = &p_ramrod->tpa_param;
322 p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
323 p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
324 p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
325 p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
327 p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
328 p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
329 p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
330 p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
331 p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
332 p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
333 p_tpa->tpa_max_size = p_params->tpa_max_size;
334 p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
335 p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
339 ecore_sp_update_mcast_bin(struct ecore_hwfn *p_hwfn,
340 struct vport_update_ramrod_data *p_ramrod,
341 struct ecore_sp_vport_update_params *p_params)
345 OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0,
346 sizeof(p_ramrod->approx_mcast.bins));
348 if (!p_params->update_approx_mcast_flg)
351 p_ramrod->common.update_approx_mcast_flg = 1;
352 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
353 u32 *p_bins = (u32 *)p_params->bins;
355 p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
360 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
361 struct ecore_sp_vport_update_params *p_params,
362 enum spq_mode comp_mode,
363 struct ecore_spq_comp_cb *p_comp_data)
365 struct ecore_rss_params *p_rss_params = p_params->rss_params;
366 struct vport_update_ramrod_data_cmn *p_cmn;
367 struct ecore_sp_init_data init_data;
368 struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
369 struct ecore_spq_entry *p_ent = OSAL_NULL;
370 u8 abs_vport_id = 0, val;
371 enum _ecore_status_t rc = ECORE_NOTIMPL;
373 if (IS_VF(p_hwfn->p_dev)) {
374 rc = ecore_vf_pf_vport_update(p_hwfn, p_params);
378 rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
379 if (rc != ECORE_SUCCESS)
383 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
384 init_data.cid = ecore_spq_get_cid(p_hwfn);
385 init_data.opaque_fid = p_params->opaque_fid;
386 init_data.comp_mode = comp_mode;
387 init_data.p_comp_data = p_comp_data;
389 rc = ecore_sp_init_request(p_hwfn, &p_ent,
390 ETH_RAMROD_VPORT_UPDATE,
391 PROTOCOLID_ETH, &init_data);
392 if (rc != ECORE_SUCCESS)
395 /* Copy input params to ramrod according to FW struct */
396 p_ramrod = &p_ent->ramrod.vport_update;
397 p_cmn = &p_ramrod->common;
399 p_cmn->vport_id = abs_vport_id;
401 p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
402 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
403 p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
404 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
406 p_cmn->accept_any_vlan = p_params->accept_any_vlan;
407 val = p_params->update_accept_any_vlan_flg;
408 p_cmn->update_accept_any_vlan_flg = val;
410 p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
411 val = p_params->update_inner_vlan_removal_flg;
412 p_cmn->update_inner_vlan_removal_en_flg = val;
414 p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
415 val = p_params->update_default_vlan_enable_flg;
416 p_cmn->update_default_vlan_en_flg = val;
418 p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan);
419 p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
421 p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
423 p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
426 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
427 if (p_ramrod->common.tx_switching_en ||
428 p_ramrod->common.update_tx_switching_en_flg) {
429 DP_NOTICE(p_hwfn, false,
430 "FPGA - why are we seeing tx-switching? Overriding it\n");
431 p_ramrod->common.tx_switching_en = 0;
432 p_ramrod->common.update_tx_switching_en_flg = 1;
435 p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
437 p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
438 val = p_params->update_anti_spoofing_en_flg;
439 p_ramrod->common.update_anti_spoofing_en_flg = val;
441 rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
442 if (rc != ECORE_SUCCESS) {
443 /* Return spq entry which is taken in ecore_sp_init_request()*/
444 ecore_spq_return_entry(p_hwfn, p_ent);
448 /* Update mcast bins for VFs, PF doesn't use this functionality */
449 ecore_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
451 ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
452 ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod,
453 p_params->sge_tpa_params);
455 p_ramrod->common.update_mtu_flg = 1;
456 p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu);
459 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
462 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
463 u16 opaque_fid, u8 vport_id)
465 struct vport_stop_ramrod_data *p_ramrod;
466 struct ecore_sp_init_data init_data;
467 struct ecore_spq_entry *p_ent;
469 enum _ecore_status_t rc;
471 if (IS_VF(p_hwfn->p_dev))
472 return ecore_vf_pf_vport_stop(p_hwfn);
474 rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
475 if (rc != ECORE_SUCCESS)
479 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
480 init_data.cid = ecore_spq_get_cid(p_hwfn);
481 init_data.opaque_fid = opaque_fid;
482 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
484 rc = ecore_sp_init_request(p_hwfn, &p_ent,
485 ETH_RAMROD_VPORT_STOP,
486 PROTOCOLID_ETH, &init_data);
487 if (rc != ECORE_SUCCESS)
490 p_ramrod = &p_ent->ramrod.vport_stop;
491 p_ramrod->vport_id = abs_vport_id;
493 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
496 static enum _ecore_status_t
497 ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn,
498 struct ecore_filter_accept_flags *p_accept_flags)
500 struct ecore_sp_vport_update_params s_params;
502 OSAL_MEMSET(&s_params, 0, sizeof(s_params));
503 OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags,
504 sizeof(struct ecore_filter_accept_flags));
506 return ecore_vf_pf_vport_update(p_hwfn, &s_params);
510 ecore_filter_accept_cmd(struct ecore_dev *p_dev,
512 struct ecore_filter_accept_flags accept_flags,
513 u8 update_accept_any_vlan,
515 enum spq_mode comp_mode,
516 struct ecore_spq_comp_cb *p_comp_data)
518 struct ecore_sp_vport_update_params vport_update_params;
521 /* Prepare and send the vport rx_mode change */
522 OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params));
523 vport_update_params.vport_id = vport;
524 vport_update_params.accept_flags = accept_flags;
525 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
526 vport_update_params.accept_any_vlan = accept_any_vlan;
528 for_each_hwfn(p_dev, i) {
529 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
531 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
534 rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);
535 if (rc != ECORE_SUCCESS)
540 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
541 comp_mode, p_comp_data);
542 if (rc != ECORE_SUCCESS) {
543 DP_ERR(p_dev, "Update rx_mode failed %d\n", rc);
547 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
548 "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
549 accept_flags.rx_accept_filter,
550 accept_flags.tx_accept_filter);
552 if (update_accept_any_vlan)
553 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
554 "accept_any_vlan=%d configured\n",
561 static void ecore_sp_release_queue_cid(struct ecore_hwfn *p_hwfn,
562 struct ecore_hw_cid_data *p_cid_data)
564 if (!p_cid_data->b_cid_allocated)
567 ecore_cxt_release_cid(p_hwfn, p_cid_data->cid);
568 p_cid_data->b_cid_allocated = false;
572 ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
575 struct ecore_queue_start_common_params *p_params,
577 dma_addr_t bd_chain_phys_addr,
578 dma_addr_t cqe_pbl_addr,
579 u16 cqe_pbl_size, bool b_use_zone_a_prod)
581 struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
582 struct ecore_spq_entry *p_ent = OSAL_NULL;
583 struct ecore_sp_init_data init_data;
584 struct ecore_hw_cid_data *p_rx_cid;
587 enum _ecore_status_t rc = ECORE_NOTIMPL;
589 /* Store information for the stop */
590 p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
592 p_rx_cid->opaque_fid = opaque_fid;
593 p_rx_cid->vport_id = p_params->vport_id;
595 rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
596 if (rc != ECORE_SUCCESS)
599 rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
600 if (rc != ECORE_SUCCESS)
603 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
604 "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
605 opaque_fid, cid, p_params->queue_id,
606 p_params->vport_id, p_params->sb);
609 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
611 init_data.opaque_fid = opaque_fid;
612 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
614 rc = ecore_sp_init_request(p_hwfn, &p_ent,
615 ETH_RAMROD_RX_QUEUE_START,
616 PROTOCOLID_ETH, &init_data);
617 if (rc != ECORE_SUCCESS)
620 p_ramrod = &p_ent->ramrod.rx_queue_start;
622 p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
623 p_ramrod->sb_index = (u8)p_params->sb_idx;
624 p_ramrod->vport_id = abs_vport_id;
625 p_ramrod->stats_counter_id = p_params->stats_id;
626 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
627 p_ramrod->complete_cqe_flg = 0;
628 p_ramrod->complete_event_flg = 1;
630 p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes);
631 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
633 p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
634 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
636 if (p_params->vf_qid || b_use_zone_a_prod) {
637 p_ramrod->vf_rx_prod_index = (u8)p_params->vf_qid;
638 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
639 "Queue%s is meant for VF rxq[%02x]\n",
640 b_use_zone_a_prod ? " [legacy]" : "",
642 p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
645 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
649 ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
651 struct ecore_queue_start_common_params *p_params,
653 dma_addr_t bd_chain_phys_addr,
654 dma_addr_t cqe_pbl_addr,
656 void OSAL_IOMEM * *pp_prod)
658 struct ecore_hw_cid_data *p_rx_cid;
659 u32 init_prod_val = 0;
660 u16 abs_l2_queue = 0;
662 enum _ecore_status_t rc;
664 if (IS_VF(p_hwfn->p_dev)) {
665 return ecore_vf_pf_rxq_start(p_hwfn,
666 (u8)p_params->queue_id,
668 (u8)p_params->sb_idx,
672 cqe_pbl_size, pp_prod);
675 rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
676 if (rc != ECORE_SUCCESS)
679 rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
680 if (rc != ECORE_SUCCESS)
683 *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
684 GTT_BAR0_MAP_REG_MSDM_RAM +
685 MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
687 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
688 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
689 (u32 *)(&init_prod_val));
691 /* Allocate a CID for the queue */
692 p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
693 rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
695 if (rc != ECORE_SUCCESS) {
696 DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
699 p_rx_cid->b_cid_allocated = true;
700 p_params->stats_id = abs_stats_id;
701 p_params->vf_qid = 0;
703 rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn,
713 if (rc != ECORE_SUCCESS)
714 ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
720 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
724 u8 complete_event_flg,
725 enum spq_mode comp_mode,
726 struct ecore_spq_comp_cb *p_comp_data)
728 struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
729 struct ecore_spq_entry *p_ent = OSAL_NULL;
730 struct ecore_sp_init_data init_data;
731 struct ecore_hw_cid_data *p_rx_cid;
732 u16 qid, abs_rx_q_id = 0;
733 enum _ecore_status_t rc = ECORE_NOTIMPL;
736 if (IS_VF(p_hwfn->p_dev))
737 return ecore_vf_pf_rxqs_update(p_hwfn,
743 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
744 init_data.comp_mode = comp_mode;
745 init_data.p_comp_data = p_comp_data;
747 for (i = 0; i < num_rxqs; i++) {
748 qid = rx_queue_id + i;
749 p_rx_cid = &p_hwfn->p_rx_cids[qid];
752 init_data.cid = p_rx_cid->cid;
753 init_data.opaque_fid = p_rx_cid->opaque_fid;
755 rc = ecore_sp_init_request(p_hwfn, &p_ent,
756 ETH_RAMROD_RX_QUEUE_UPDATE,
757 PROTOCOLID_ETH, &init_data);
758 if (rc != ECORE_SUCCESS)
761 p_ramrod = &p_ent->ramrod.rx_queue_update;
763 ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
764 ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
765 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
766 p_ramrod->complete_cqe_flg = complete_cqe_flg;
767 p_ramrod->complete_event_flg = complete_event_flg;
769 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
778 ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
780 bool eq_completion_only, bool cqe_completion)
782 struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
783 struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
784 struct ecore_spq_entry *p_ent = OSAL_NULL;
785 struct ecore_sp_init_data init_data;
787 enum _ecore_status_t rc = ECORE_NOTIMPL;
789 if (IS_VF(p_hwfn->p_dev))
790 return ecore_vf_pf_rxq_stop(p_hwfn, rx_queue_id,
794 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
795 init_data.cid = p_rx_cid->cid;
796 init_data.opaque_fid = p_rx_cid->opaque_fid;
797 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
799 rc = ecore_sp_init_request(p_hwfn, &p_ent,
800 ETH_RAMROD_RX_QUEUE_STOP,
801 PROTOCOLID_ETH, &init_data);
802 if (rc != ECORE_SUCCESS)
805 p_ramrod = &p_ent->ramrod.rx_queue_stop;
807 ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
808 ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
809 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
811 /* Cleaning the queue requires the completion to arrive there.
812 * In addition, VFs require the answer to come as eqe to PF.
814 p_ramrod->complete_cqe_flg = (!!(p_rx_cid->opaque_fid ==
815 p_hwfn->hw_info.opaque_fid) &&
816 !eq_completion_only) || cqe_completion;
817 p_ramrod->complete_event_flg = !(p_rx_cid->opaque_fid ==
818 p_hwfn->hw_info.opaque_fid) ||
821 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
822 if (rc != ECORE_SUCCESS)
825 ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
831 ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
834 struct ecore_queue_start_common_params *p_params,
837 union ecore_qm_pq_params *p_pq_params)
839 struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
840 struct ecore_spq_entry *p_ent = OSAL_NULL;
841 struct ecore_sp_init_data init_data;
842 struct ecore_hw_cid_data *p_tx_cid;
843 u16 pq_id, abs_tx_qzone_id = 0;
844 enum _ecore_status_t rc = ECORE_NOTIMPL;
847 /* Store information for the stop */
848 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
850 p_tx_cid->opaque_fid = opaque_fid;
852 rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
853 if (rc != ECORE_SUCCESS)
856 rc = ecore_fw_l2_queue(p_hwfn, p_params->qzone_id, &abs_tx_qzone_id);
857 if (rc != ECORE_SUCCESS)
861 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
863 init_data.opaque_fid = opaque_fid;
864 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
866 rc = ecore_sp_init_request(p_hwfn, &p_ent,
867 ETH_RAMROD_TX_QUEUE_START,
868 PROTOCOLID_ETH, &init_data);
869 if (rc != ECORE_SUCCESS)
872 p_ramrod = &p_ent->ramrod.tx_queue_start;
873 p_ramrod->vport_id = abs_vport_id;
875 p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
876 p_ramrod->sb_index = (u8)p_params->sb_idx;
877 p_ramrod->stats_counter_id = p_params->stats_id;
879 p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(abs_tx_qzone_id);
881 p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
882 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
884 pq_id = ecore_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params);
885 p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
887 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
891 ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
893 struct ecore_queue_start_common_params *p_params,
897 void OSAL_IOMEM * *pp_doorbell)
899 struct ecore_hw_cid_data *p_tx_cid;
900 union ecore_qm_pq_params pq_params;
902 enum _ecore_status_t rc;
904 if (IS_VF(p_hwfn->p_dev)) {
905 return ecore_vf_pf_txq_start(p_hwfn,
908 (u8)p_params->sb_idx,
914 rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
915 if (rc != ECORE_SUCCESS)
918 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
919 OSAL_MEMSET(p_tx_cid, 0, sizeof(*p_tx_cid));
920 OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
922 pq_params.eth.tc = tc;
924 /* Allocate a CID for the queue */
925 rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
926 if (rc != ECORE_SUCCESS) {
927 DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
930 p_tx_cid->b_cid_allocated = true;
932 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
933 "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
934 opaque_fid, p_tx_cid->cid, p_params->queue_id,
935 p_params->vport_id, p_params->sb);
937 p_params->stats_id = abs_stats_id;
939 /* TODO - set tc in the pq_params for multi-cos */
940 rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
948 *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
949 DB_ADDR(p_tx_cid->cid, DQ_DEMS_LEGACY);
951 if (rc != ECORE_SUCCESS)
952 ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
957 enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn)
959 return ECORE_NOTIMPL;
962 enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
965 struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
966 struct ecore_spq_entry *p_ent = OSAL_NULL;
967 struct ecore_sp_init_data init_data;
968 enum _ecore_status_t rc = ECORE_NOTIMPL;
970 if (IS_VF(p_hwfn->p_dev))
971 return ecore_vf_pf_txq_stop(p_hwfn, tx_queue_id);
974 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
975 init_data.cid = p_tx_cid->cid;
976 init_data.opaque_fid = p_tx_cid->opaque_fid;
977 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
979 rc = ecore_sp_init_request(p_hwfn, &p_ent,
980 ETH_RAMROD_TX_QUEUE_STOP,
981 PROTOCOLID_ETH, &init_data);
982 if (rc != ECORE_SUCCESS)
985 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
986 if (rc != ECORE_SUCCESS)
989 ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
993 static enum eth_filter_action
994 ecore_filter_action(enum ecore_filter_opcode opcode)
996 enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
999 case ECORE_FILTER_ADD:
1000 action = ETH_FILTER_ACTION_ADD;
1002 case ECORE_FILTER_REMOVE:
1003 action = ETH_FILTER_ACTION_REMOVE;
1005 case ECORE_FILTER_FLUSH:
1006 action = ETH_FILTER_ACTION_REMOVE_ALL;
1009 action = MAX_ETH_FILTER_ACTION;
1015 static enum _ecore_status_t
1016 ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
1018 struct ecore_filter_ucast *p_filter_cmd,
1019 struct vport_filter_update_ramrod_data **pp_ramrod,
1020 struct ecore_spq_entry **pp_ent,
1021 enum spq_mode comp_mode,
1022 struct ecore_spq_comp_cb *p_comp_data)
1024 u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1025 struct vport_filter_update_ramrod_data *p_ramrod;
1026 struct eth_filter_cmd *p_first_filter;
1027 struct eth_filter_cmd *p_second_filter;
1028 struct ecore_sp_init_data init_data;
1029 enum eth_filter_action action;
1030 enum _ecore_status_t rc;
1032 rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1033 &vport_to_remove_from);
1034 if (rc != ECORE_SUCCESS)
1037 rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1039 if (rc != ECORE_SUCCESS)
1043 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1044 init_data.cid = ecore_spq_get_cid(p_hwfn);
1045 init_data.opaque_fid = opaque_fid;
1046 init_data.comp_mode = comp_mode;
1047 init_data.p_comp_data = p_comp_data;
1049 rc = ecore_sp_init_request(p_hwfn, pp_ent,
1050 ETH_RAMROD_FILTERS_UPDATE,
1051 PROTOCOLID_ETH, &init_data);
1052 if (rc != ECORE_SUCCESS)
1055 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1056 p_ramrod = *pp_ramrod;
1057 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1058 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1061 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1062 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1063 "Non-Asic - prevent Tx filters\n");
1064 p_ramrod->filter_cmd_hdr.tx = 0;
1068 switch (p_filter_cmd->opcode) {
1069 case ECORE_FILTER_REPLACE:
1070 case ECORE_FILTER_MOVE:
1071 p_ramrod->filter_cmd_hdr.cmd_cnt = 2;
1074 p_ramrod->filter_cmd_hdr.cmd_cnt = 1;
1078 p_first_filter = &p_ramrod->filter_cmds[0];
1079 p_second_filter = &p_ramrod->filter_cmds[1];
1081 switch (p_filter_cmd->type) {
1082 case ECORE_FILTER_MAC:
1083 p_first_filter->type = ETH_FILTER_TYPE_MAC;
1085 case ECORE_FILTER_VLAN:
1086 p_first_filter->type = ETH_FILTER_TYPE_VLAN;
1088 case ECORE_FILTER_MAC_VLAN:
1089 p_first_filter->type = ETH_FILTER_TYPE_PAIR;
1091 case ECORE_FILTER_INNER_MAC:
1092 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC;
1094 case ECORE_FILTER_INNER_VLAN:
1095 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN;
1097 case ECORE_FILTER_INNER_PAIR:
1098 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR;
1100 case ECORE_FILTER_INNER_MAC_VNI_PAIR:
1101 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1103 case ECORE_FILTER_MAC_VNI_PAIR:
1104 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR;
1106 case ECORE_FILTER_VNI:
1107 p_first_filter->type = ETH_FILTER_TYPE_VNI;
1109 case ECORE_FILTER_UNUSED: /* @DPDK */
1110 p_first_filter->type = MAX_ETH_FILTER_TYPE;
1114 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1115 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1116 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1117 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1118 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1119 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR))
1120 ecore_set_fw_mac_addr(&p_first_filter->mac_msb,
1121 &p_first_filter->mac_mid,
1122 &p_first_filter->mac_lsb,
1123 (u8 *)p_filter_cmd->mac);
1125 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1126 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1127 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1128 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1129 p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan);
1131 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1132 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1133 (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1134 p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni);
1136 if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) {
1137 p_second_filter->type = p_first_filter->type;
1138 p_second_filter->mac_msb = p_first_filter->mac_msb;
1139 p_second_filter->mac_mid = p_first_filter->mac_mid;
1140 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1141 p_second_filter->vlan_id = p_first_filter->vlan_id;
1142 p_second_filter->vni = p_first_filter->vni;
1144 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1146 p_first_filter->vport_id = vport_to_remove_from;
1148 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1149 p_second_filter->vport_id = vport_to_add_to;
1150 } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) {
1151 p_first_filter->vport_id = vport_to_add_to;
1152 OSAL_MEMCPY(p_second_filter, p_first_filter,
1153 sizeof(*p_second_filter));
1154 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
1155 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1157 action = ecore_filter_action(p_filter_cmd->opcode);
1159 if (action == MAX_ETH_FILTER_ACTION) {
1160 DP_NOTICE(p_hwfn, true,
1161 "%d is not supported yet\n",
1162 p_filter_cmd->opcode);
1163 return ECORE_NOTIMPL;
1166 p_first_filter->action = action;
1167 p_first_filter->vport_id =
1168 (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1169 vport_to_remove_from : vport_to_add_to;
1172 return ECORE_SUCCESS;
1175 enum _ecore_status_t
1176 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
1178 struct ecore_filter_ucast *p_filter_cmd,
1179 enum spq_mode comp_mode,
1180 struct ecore_spq_comp_cb *p_comp_data)
1182 struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL;
1183 struct ecore_spq_entry *p_ent = OSAL_NULL;
1184 struct eth_filter_cmd_header *p_header;
1185 enum _ecore_status_t rc;
1187 rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1189 comp_mode, p_comp_data);
1190 if (rc != ECORE_SUCCESS) {
1191 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1194 p_header = &p_ramrod->filter_cmd_hdr;
1195 p_header->assert_on_error = p_filter_cmd->assert_on_error;
1197 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1198 if (rc != ECORE_SUCCESS) {
1199 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1203 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1204 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1205 (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" :
1206 ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1208 ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ?
1209 "MOVE" : "REPLACE")),
1210 (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" :
1211 ((p_filter_cmd->type == ECORE_FILTER_VLAN) ?
1212 "VLAN" : "MAC & VLAN"),
1213 p_ramrod->filter_cmd_hdr.cmd_cnt,
1214 p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter);
1215 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1216 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1217 p_filter_cmd->vport_to_add_to,
1218 p_filter_cmd->vport_to_remove_from,
1219 p_filter_cmd->mac[0], p_filter_cmd->mac[1],
1220 p_filter_cmd->mac[2], p_filter_cmd->mac[3],
1221 p_filter_cmd->mac[4], p_filter_cmd->mac[5],
1222 p_filter_cmd->vlan);
1224 return ECORE_SUCCESS;
1227 /*******************************************************************************
1229 * Calculates crc 32 on a buffer
1230 * Note: crc32_length MUST be aligned to 8
1232 ******************************************************************************/
1233 static u32 ecore_calc_crc32c(u8 *crc32_packet,
1234 u32 crc32_length, u32 crc32_seed, u8 complement)
1236 u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1237 u8 msb = 0, current_byte = 0;
1239 if ((crc32_packet == OSAL_NULL) ||
1240 (crc32_length == 0) || ((crc32_length % 8) != 0)) {
1241 return crc32_result;
1244 for (byte = 0; byte < crc32_length; byte++) {
1245 current_byte = crc32_packet[byte];
1246 for (bit = 0; bit < 8; bit++) {
1247 msb = (u8)(crc32_result >> 31);
1248 crc32_result = crc32_result << 1;
1249 if (msb != (0x1 & (current_byte >> bit))) {
1250 crc32_result = crc32_result ^ CRC32_POLY;
1256 return crc32_result;
1259 static u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len)
1261 u32 packet_buf[2] = { 0 };
1263 OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
1264 return ecore_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1267 u8 ecore_mcast_bin_from_mac(u8 *mac)
1269 u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1275 static enum _ecore_status_t
1276 ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
1278 struct ecore_filter_mcast *p_filter_cmd,
1279 enum spq_mode comp_mode,
1280 struct ecore_spq_comp_cb *p_comp_data)
1282 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1283 struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
1284 struct ecore_spq_entry *p_ent = OSAL_NULL;
1285 struct ecore_sp_init_data init_data;
1286 u8 abs_vport_id = 0;
1287 enum _ecore_status_t rc;
1290 if (p_filter_cmd->opcode == ECORE_FILTER_ADD)
1291 rc = ecore_fw_vport(p_hwfn,
1292 p_filter_cmd->vport_to_add_to,
1295 rc = ecore_fw_vport(p_hwfn,
1296 p_filter_cmd->vport_to_remove_from,
1298 if (rc != ECORE_SUCCESS)
1302 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1303 init_data.cid = ecore_spq_get_cid(p_hwfn);
1304 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1305 init_data.comp_mode = comp_mode;
1306 init_data.p_comp_data = p_comp_data;
1308 rc = ecore_sp_init_request(p_hwfn, &p_ent,
1309 ETH_RAMROD_VPORT_UPDATE,
1310 PROTOCOLID_ETH, &init_data);
1311 if (rc != ECORE_SUCCESS) {
1312 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1316 p_ramrod = &p_ent->ramrod.vport_update;
1317 p_ramrod->common.update_approx_mcast_flg = 1;
1319 /* explicitly clear out the entire vector */
1320 OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
1321 0, sizeof(p_ramrod->approx_mcast.bins));
1322 OSAL_MEMSET(bins, 0, sizeof(unsigned long) *
1323 ETH_MULTICAST_MAC_BINS_IN_REGS);
1324 /* filter ADD op is explicit set op and it removes
1325 * any existing filters for the vport.
1327 if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
1328 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1331 bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1332 OSAL_SET_BIT(bit, bins);
1335 /* Convert to correct endianity */
1336 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1337 struct vport_update_ramrod_mcast *p_ramrod_bins;
1338 u32 *p_bins = (u32 *)bins;
1340 p_ramrod_bins = &p_ramrod->approx_mcast;
1341 p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
1345 p_ramrod->common.vport_id = abs_vport_id;
1347 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1348 if (rc != ECORE_SUCCESS)
1349 DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc);
1354 enum _ecore_status_t
1355 ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
1356 struct ecore_filter_mcast *p_filter_cmd,
1357 enum spq_mode comp_mode,
1358 struct ecore_spq_comp_cb *p_comp_data)
1360 enum _ecore_status_t rc = ECORE_SUCCESS;
1363 /* only ADD and REMOVE operations are supported for multi-cast */
1364 if ((p_filter_cmd->opcode != ECORE_FILTER_ADD &&
1365 (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) ||
1366 (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) {
1370 for_each_hwfn(p_dev, i) {
1371 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1375 ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1379 opaque_fid = p_hwfn->hw_info.opaque_fid;
1380 rc = ecore_sp_eth_filter_mcast(p_hwfn,
1383 comp_mode, p_comp_data);
1384 if (rc != ECORE_SUCCESS)
1391 enum _ecore_status_t
1392 ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
1393 struct ecore_filter_ucast *p_filter_cmd,
1394 enum spq_mode comp_mode,
1395 struct ecore_spq_comp_cb *p_comp_data)
1397 enum _ecore_status_t rc = ECORE_SUCCESS;
1400 for_each_hwfn(p_dev, i) {
1401 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1405 rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1409 opaque_fid = p_hwfn->hw_info.opaque_fid;
1410 rc = ecore_sp_eth_filter_ucast(p_hwfn,
1413 comp_mode, p_comp_data);
1414 if (rc != ECORE_SUCCESS)
1421 /* Statistics related code */
1422 static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
1423 u32 *p_addr, u32 *p_len,
1426 if (IS_PF(p_hwfn->p_dev)) {
1427 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1428 PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1429 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1431 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1432 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1434 *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1435 *p_len = p_resp->pfdev_info.stats_info.pstats.len;
1439 static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
1440 struct ecore_ptt *p_ptt,
1441 struct ecore_eth_stats *p_stats,
1444 struct eth_pstorm_per_queue_stat pstats;
1445 u32 pstats_addr = 0, pstats_len = 0;
1447 __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1450 OSAL_MEMSET(&pstats, 0, sizeof(pstats));
1451 ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1453 p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1454 p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1455 p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1456 p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1457 p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1458 p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1459 p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
1462 static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
1463 struct ecore_ptt *p_ptt,
1464 struct ecore_eth_stats *p_stats,
1467 struct tstorm_per_port_stat tstats;
1468 u32 tstats_addr, tstats_len;
1470 if (IS_PF(p_hwfn->p_dev)) {
1471 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1472 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1473 tstats_len = sizeof(struct tstorm_per_port_stat);
1475 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1476 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1478 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1479 tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1482 OSAL_MEMSET(&tstats, 0, sizeof(tstats));
1483 ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1485 p_stats->mftag_filter_discards +=
1486 HILO_64_REGPAIR(tstats.mftag_filter_discard);
1487 p_stats->mac_filter_discards +=
1488 HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1491 static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
1492 u32 *p_addr, u32 *p_len,
1495 if (IS_PF(p_hwfn->p_dev)) {
1496 *p_addr = BAR0_MAP_REG_USDM_RAM +
1497 USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1498 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1500 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1501 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1503 *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1504 *p_len = p_resp->pfdev_info.stats_info.ustats.len;
1508 static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
1509 struct ecore_ptt *p_ptt,
1510 struct ecore_eth_stats *p_stats,
1513 struct eth_ustorm_per_queue_stat ustats;
1514 u32 ustats_addr = 0, ustats_len = 0;
1516 __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1519 OSAL_MEMSET(&ustats, 0, sizeof(ustats));
1520 ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1522 p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1523 p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1524 p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1525 p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1526 p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1527 p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1530 static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
1531 u32 *p_addr, u32 *p_len,
1534 if (IS_PF(p_hwfn->p_dev)) {
1535 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1536 MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1537 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1539 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1540 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1542 *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1543 *p_len = p_resp->pfdev_info.stats_info.mstats.len;
1547 static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
1548 struct ecore_ptt *p_ptt,
1549 struct ecore_eth_stats *p_stats,
1552 struct eth_mstorm_per_queue_stat mstats;
1553 u32 mstats_addr = 0, mstats_len = 0;
1555 __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1558 OSAL_MEMSET(&mstats, 0, sizeof(mstats));
1559 ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1561 p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
1562 p_stats->packet_too_big_discard +=
1563 HILO_64_REGPAIR(mstats.packet_too_big_discard);
1564 p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1565 p_stats->tpa_coalesced_pkts +=
1566 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1567 p_stats->tpa_coalesced_events +=
1568 HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1569 p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
1570 p_stats->tpa_coalesced_bytes +=
1571 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1574 static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
1575 struct ecore_ptt *p_ptt,
1576 struct ecore_eth_stats *p_stats)
1578 struct port_stats port_stats;
1581 OSAL_MEMSET(&port_stats, 0, sizeof(port_stats));
1583 ecore_memcpy_from(p_hwfn, p_ptt, &port_stats,
1584 p_hwfn->mcp_info->port_addr +
1585 OFFSETOF(struct public_port, stats),
1586 sizeof(port_stats));
1588 p_stats->rx_64_byte_packets += port_stats.eth.r64;
1589 p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
1590 p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
1591 p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
1592 p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1593 p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1594 p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
1595 p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
1596 p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
1597 p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
1598 p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
1599 p_stats->rx_crc_errors += port_stats.eth.rfcs;
1600 p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
1601 p_stats->rx_pause_frames += port_stats.eth.rxpf;
1602 p_stats->rx_pfc_frames += port_stats.eth.rxpp;
1603 p_stats->rx_align_errors += port_stats.eth.raln;
1604 p_stats->rx_carrier_errors += port_stats.eth.rfcr;
1605 p_stats->rx_oversize_packets += port_stats.eth.rovr;
1606 p_stats->rx_jabbers += port_stats.eth.rjbr;
1607 p_stats->rx_undersize_packets += port_stats.eth.rund;
1608 p_stats->rx_fragments += port_stats.eth.rfrg;
1609 p_stats->tx_64_byte_packets += port_stats.eth.t64;
1610 p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
1611 p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
1612 p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
1613 p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1614 p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1615 p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
1616 p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
1617 p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
1618 p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
1619 p_stats->tx_pause_frames += port_stats.eth.txpf;
1620 p_stats->tx_pfc_frames += port_stats.eth.txpp;
1621 p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
1622 p_stats->tx_total_collisions += port_stats.eth.tncl;
1623 p_stats->rx_mac_bytes += port_stats.eth.rbyte;
1624 p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
1625 p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
1626 p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
1627 p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
1628 p_stats->tx_mac_bytes += port_stats.eth.tbyte;
1629 p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
1630 p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
1631 p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
1632 p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
1633 for (j = 0; j < 8; j++) {
1634 p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
1635 p_stats->brb_discards += port_stats.brb.brb_discard[j];
1639 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
1640 struct ecore_ptt *p_ptt,
1641 struct ecore_eth_stats *stats,
1642 u16 statistics_bin, bool b_get_port_stats)
1644 __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1645 __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1646 __ecore_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1647 __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1650 /* Avoid getting PORT stats for emulation. */
1651 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1655 if (b_get_port_stats && p_hwfn->mcp_info)
1656 __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats);
1659 static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
1660 struct ecore_eth_stats *stats)
1665 OSAL_MEMSET(stats, 0, sizeof(*stats));
1667 for_each_hwfn(p_dev, i) {
1668 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1669 struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
1670 ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
1673 /* The main vport index is relative first */
1674 if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {
1675 DP_ERR(p_hwfn, "No vport available!\n");
1680 if (IS_PF(p_dev) && !p_ptt) {
1681 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1685 __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1686 IS_PF(p_dev) ? true : false);
1689 if (IS_PF(p_dev) && p_ptt)
1690 ecore_ptt_release(p_hwfn, p_ptt);
1694 void ecore_get_vport_stats(struct ecore_dev *p_dev,
1695 struct ecore_eth_stats *stats)
1700 OSAL_MEMSET(stats, 0, sizeof(*stats));
1704 _ecore_get_vport_stats(p_dev, stats);
1706 if (!p_dev->reset_stats)
1709 /* Reduce the statistics baseline */
1710 for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++)
1711 ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i];
1714 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1715 void ecore_reset_vport_stats(struct ecore_dev *p_dev)
1719 for_each_hwfn(p_dev, i) {
1720 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1721 struct eth_mstorm_per_queue_stat mstats;
1722 struct eth_ustorm_per_queue_stat ustats;
1723 struct eth_pstorm_per_queue_stat pstats;
1724 struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
1725 ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
1726 u32 addr = 0, len = 0;
1728 if (IS_PF(p_dev) && !p_ptt) {
1729 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1733 OSAL_MEMSET(&mstats, 0, sizeof(mstats));
1734 __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1735 ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1737 OSAL_MEMSET(&ustats, 0, sizeof(ustats));
1738 __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1739 ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1741 OSAL_MEMSET(&pstats, 0, sizeof(pstats));
1742 __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1743 ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1746 ecore_ptt_release(p_hwfn, p_ptt);
1749 /* PORT statistics are not necessarily reset, so we need to
1750 * read and create a baseline for future statistics.
1752 if (!p_dev->reset_stats)
1753 DP_INFO(p_dev, "Reset stats not allocated\n");
1755 _ecore_get_vport_stats(p_dev, p_dev->reset_stats);