2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
12 #include "ecore_status.h"
13 #include "ecore_hsi_eth.h"
14 #include "ecore_chain.h"
15 #include "ecore_spq.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_cxt.h"
19 #include "ecore_sp_commands.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
23 #include "ecore_int.h"
26 #include "ecore_sriov.h"
27 #include "ecore_mcp.h"
29 #define ECORE_MAX_SGES_NUM 16
30 #define CRC32_POLY 0x1edc6f41
33 ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
34 struct ecore_sp_vport_start_params *p_params)
36 struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
37 struct ecore_spq_entry *p_ent = OSAL_NULL;
38 struct ecore_sp_init_data init_data;
40 enum _ecore_status_t rc = ECORE_NOTIMPL;
43 rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
44 if (rc != ECORE_SUCCESS)
48 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
49 init_data.cid = ecore_spq_get_cid(p_hwfn);
50 init_data.opaque_fid = p_params->opaque_fid;
51 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
53 rc = ecore_sp_init_request(p_hwfn, &p_ent,
54 ETH_RAMROD_VPORT_START,
55 PROTOCOLID_ETH, &init_data);
56 if (rc != ECORE_SUCCESS)
59 p_ramrod = &p_ent->ramrod.vport_start;
60 p_ramrod->vport_id = abs_vport_id;
62 p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
63 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
64 p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
65 p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
66 p_ramrod->untagged = p_params->only_untagged;
67 p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
69 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
70 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
72 p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode);
74 /* TPA related fields */
75 OSAL_MEMSET(&p_ramrod->tpa_param, 0,
76 sizeof(struct eth_vport_tpa_param));
77 p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
79 switch (p_params->tpa_mode) {
80 case ECORE_TPA_MODE_GRO:
81 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
82 p_ramrod->tpa_param.tpa_max_size = (u16)-1;
83 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
84 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
85 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
86 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
87 p_ramrod->tpa_param.tpa_ipv4_tunn_en_flg = 1;
88 p_ramrod->tpa_param.tpa_ipv6_tunn_en_flg = 1;
89 p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
90 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
96 p_ramrod->tx_switching_en = p_params->tx_switching;
98 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
99 p_ramrod->tx_switching_en = 0;
102 p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
103 p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
105 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
106 p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev,
107 p_params->concrete_fid);
109 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
113 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
114 struct ecore_sp_vport_start_params *p_params)
116 if (IS_VF(p_hwfn->p_dev))
117 return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id,
119 p_params->remove_inner_vlan,
121 p_params->max_buffers_per_cqe,
122 p_params->only_untagged);
124 return ecore_sp_eth_vport_start(p_hwfn, p_params);
127 static enum _ecore_status_t
128 ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
129 struct vport_update_ramrod_data *p_ramrod,
130 struct ecore_rss_params *p_rss)
132 enum _ecore_status_t rc = ECORE_SUCCESS;
133 struct eth_vport_rss_config *p_config;
134 u16 abs_l2_queue = 0;
138 p_ramrod->common.update_rss_flg = 0;
141 p_config = &p_ramrod->rss_config;
143 OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE !=
144 ETH_RSS_IND_TABLE_ENTRIES_NUM);
146 rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
147 if (rc != ECORE_SUCCESS)
150 p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
151 p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
152 p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
153 p_config->update_rss_key = p_rss->update_rss_key;
155 p_config->rss_mode = p_rss->rss_enable ?
156 ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED;
158 p_config->capabilities = 0;
160 SET_FIELD(p_config->capabilities,
161 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
162 !!(p_rss->rss_caps & ECORE_RSS_IPV4));
163 SET_FIELD(p_config->capabilities,
164 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
165 !!(p_rss->rss_caps & ECORE_RSS_IPV6));
166 SET_FIELD(p_config->capabilities,
167 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
168 !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
169 SET_FIELD(p_config->capabilities,
170 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
171 !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
172 SET_FIELD(p_config->capabilities,
173 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
174 !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
175 SET_FIELD(p_config->capabilities,
176 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
177 !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
178 p_config->tbl_size = p_rss->rss_table_size_log;
179 p_config->capabilities = OSAL_CPU_TO_LE16(p_config->capabilities);
181 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
182 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
183 p_ramrod->common.update_rss_flg,
185 p_config->update_rss_capabilities,
186 p_config->capabilities,
187 p_config->update_rss_ind_table, p_config->update_rss_key);
189 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
190 rc = ecore_fw_l2_queue(p_hwfn,
191 (u8)p_rss->rss_ind_table[i],
193 if (rc != ECORE_SUCCESS)
196 p_config->indirection_table[i] = OSAL_CPU_TO_LE16(abs_l2_queue);
197 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "i= %d, queue = %d\n",
198 i, p_config->indirection_table[i]);
201 for (i = 0; i < 10; i++)
202 p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]);
208 ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
209 struct vport_update_ramrod_data *p_ramrod,
210 struct ecore_filter_accept_flags accept_flags)
212 p_ramrod->common.update_rx_mode_flg =
213 accept_flags.update_rx_mode_config;
214 p_ramrod->common.update_tx_mode_flg =
215 accept_flags.update_tx_mode_config;
218 /* On B0 emulation we cannot enable Tx, since this would cause writes
219 * to PVFC HW block which isn't implemented in emulation.
221 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
222 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
223 "Non-Asic - prevent Tx mode in vport update\n");
224 p_ramrod->common.update_tx_mode_flg = 0;
228 /* Set Rx mode accept flags */
229 if (p_ramrod->common.update_rx_mode_flg) {
230 u8 accept_filter = accept_flags.rx_accept_filter;
233 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
234 !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) ||
235 !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
237 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
238 !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED));
240 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
241 !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) ||
242 !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
244 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
245 (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
246 !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
248 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
249 !!(accept_filter & ECORE_ACCEPT_BCAST));
251 p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
252 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
253 "p_ramrod->rx_mode.state = 0x%x\n",
257 /* Set Tx mode accept flags */
258 if (p_ramrod->common.update_tx_mode_flg) {
259 u8 accept_filter = accept_flags.tx_accept_filter;
262 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
263 !!(accept_filter & ECORE_ACCEPT_NONE));
265 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
266 !!(accept_filter & ECORE_ACCEPT_NONE));
268 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
269 (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
270 !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
272 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
273 !!(accept_filter & ECORE_ACCEPT_BCAST));
275 p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state);
276 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
277 "p_ramrod->tx_mode.state = 0x%x\n",
283 ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn,
284 struct vport_update_ramrod_data *p_ramrod,
285 struct ecore_sge_tpa_params *p_params)
287 struct eth_vport_tpa_param *p_tpa;
290 p_ramrod->common.update_tpa_param_flg = 0;
291 p_ramrod->common.update_tpa_en_flg = 0;
292 p_ramrod->common.update_tpa_param_flg = 0;
296 p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
297 p_tpa = &p_ramrod->tpa_param;
298 p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
299 p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
300 p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
301 p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
303 p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
304 p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
305 p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
306 p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
307 p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
308 p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
309 p_tpa->tpa_max_size = p_params->tpa_max_size;
310 p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
311 p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
315 ecore_sp_update_mcast_bin(struct ecore_hwfn *p_hwfn,
316 struct vport_update_ramrod_data *p_ramrod,
317 struct ecore_sp_vport_update_params *p_params)
321 OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0,
322 sizeof(p_ramrod->approx_mcast.bins));
324 if (!p_params->update_approx_mcast_flg)
327 p_ramrod->common.update_approx_mcast_flg = 1;
328 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
329 u32 *p_bins = (u32 *)p_params->bins;
331 p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
336 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
337 struct ecore_sp_vport_update_params *p_params,
338 enum spq_mode comp_mode,
339 struct ecore_spq_comp_cb *p_comp_data)
341 struct ecore_rss_params *p_rss_params = p_params->rss_params;
342 struct vport_update_ramrod_data_cmn *p_cmn;
343 struct ecore_sp_init_data init_data;
344 struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
345 struct ecore_spq_entry *p_ent = OSAL_NULL;
346 u8 abs_vport_id = 0, val;
347 enum _ecore_status_t rc = ECORE_NOTIMPL;
349 if (IS_VF(p_hwfn->p_dev)) {
350 rc = ecore_vf_pf_vport_update(p_hwfn, p_params);
354 rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
355 if (rc != ECORE_SUCCESS)
359 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
360 init_data.cid = ecore_spq_get_cid(p_hwfn);
361 init_data.opaque_fid = p_params->opaque_fid;
362 init_data.comp_mode = comp_mode;
363 init_data.p_comp_data = p_comp_data;
365 rc = ecore_sp_init_request(p_hwfn, &p_ent,
366 ETH_RAMROD_VPORT_UPDATE,
367 PROTOCOLID_ETH, &init_data);
368 if (rc != ECORE_SUCCESS)
371 /* Copy input params to ramrod according to FW struct */
372 p_ramrod = &p_ent->ramrod.vport_update;
373 p_cmn = &p_ramrod->common;
375 p_cmn->vport_id = abs_vport_id;
377 p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
378 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
379 p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
380 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
382 p_cmn->accept_any_vlan = p_params->accept_any_vlan;
383 val = p_params->update_accept_any_vlan_flg;
384 p_cmn->update_accept_any_vlan_flg = val;
386 p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
387 val = p_params->update_inner_vlan_removal_flg;
388 p_cmn->update_inner_vlan_removal_en_flg = val;
390 p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
391 val = p_params->update_default_vlan_enable_flg;
392 p_cmn->update_default_vlan_en_flg = val;
394 p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan);
395 p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
397 p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
399 p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
402 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
403 if (p_ramrod->common.tx_switching_en ||
404 p_ramrod->common.update_tx_switching_en_flg) {
405 DP_NOTICE(p_hwfn, false,
406 "FPGA - why are we seeing tx-switching? Overriding it\n");
407 p_ramrod->common.tx_switching_en = 0;
408 p_ramrod->common.update_tx_switching_en_flg = 1;
411 p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
413 p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
414 val = p_params->update_anti_spoofing_en_flg;
415 p_ramrod->common.update_anti_spoofing_en_flg = val;
417 rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
418 if (rc != ECORE_SUCCESS) {
419 /* Return spq entry which is taken in ecore_sp_init_request()*/
420 ecore_spq_return_entry(p_hwfn, p_ent);
424 /* Update mcast bins for VFs, PF doesn't use this functionality */
425 ecore_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
427 ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
428 ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod,
429 p_params->sge_tpa_params);
431 p_ramrod->common.update_mtu_flg = 1;
432 p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu);
435 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
438 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
439 u16 opaque_fid, u8 vport_id)
441 struct vport_stop_ramrod_data *p_ramrod;
442 struct ecore_sp_init_data init_data;
443 struct ecore_spq_entry *p_ent;
445 enum _ecore_status_t rc;
447 if (IS_VF(p_hwfn->p_dev))
448 return ecore_vf_pf_vport_stop(p_hwfn);
450 rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
451 if (rc != ECORE_SUCCESS)
455 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
456 init_data.cid = ecore_spq_get_cid(p_hwfn);
457 init_data.opaque_fid = opaque_fid;
458 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
460 rc = ecore_sp_init_request(p_hwfn, &p_ent,
461 ETH_RAMROD_VPORT_STOP,
462 PROTOCOLID_ETH, &init_data);
463 if (rc != ECORE_SUCCESS)
466 p_ramrod = &p_ent->ramrod.vport_stop;
467 p_ramrod->vport_id = abs_vport_id;
469 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
472 static enum _ecore_status_t
473 ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn,
474 struct ecore_filter_accept_flags *p_accept_flags)
476 struct ecore_sp_vport_update_params s_params;
478 OSAL_MEMSET(&s_params, 0, sizeof(s_params));
479 OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags,
480 sizeof(struct ecore_filter_accept_flags));
482 return ecore_vf_pf_vport_update(p_hwfn, &s_params);
486 ecore_filter_accept_cmd(struct ecore_dev *p_dev,
488 struct ecore_filter_accept_flags accept_flags,
489 u8 update_accept_any_vlan,
491 enum spq_mode comp_mode,
492 struct ecore_spq_comp_cb *p_comp_data)
494 struct ecore_sp_vport_update_params vport_update_params;
497 /* Prepare and send the vport rx_mode change */
498 OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params));
499 vport_update_params.vport_id = vport;
500 vport_update_params.accept_flags = accept_flags;
501 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
502 vport_update_params.accept_any_vlan = accept_any_vlan;
504 for_each_hwfn(p_dev, i) {
505 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
507 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
510 rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);
511 if (rc != ECORE_SUCCESS)
516 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
517 comp_mode, p_comp_data);
518 if (rc != ECORE_SUCCESS) {
519 DP_ERR(p_dev, "Update rx_mode failed %d\n", rc);
523 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
524 "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
525 accept_flags.rx_accept_filter,
526 accept_flags.tx_accept_filter);
528 if (update_accept_any_vlan)
529 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
530 "accept_any_vlan=%d configured\n",
537 static void ecore_sp_release_queue_cid(struct ecore_hwfn *p_hwfn,
538 struct ecore_hw_cid_data *p_cid_data)
540 if (!p_cid_data->b_cid_allocated)
543 ecore_cxt_release_cid(p_hwfn, p_cid_data->cid);
544 p_cid_data->b_cid_allocated = false;
548 ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
551 struct ecore_queue_start_common_params *p_params,
553 dma_addr_t bd_chain_phys_addr,
554 dma_addr_t cqe_pbl_addr,
555 u16 cqe_pbl_size, bool b_use_zone_a_prod)
557 struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
558 struct ecore_spq_entry *p_ent = OSAL_NULL;
559 struct ecore_sp_init_data init_data;
560 struct ecore_hw_cid_data *p_rx_cid;
563 enum _ecore_status_t rc = ECORE_NOTIMPL;
565 /* Store information for the stop */
566 p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
568 p_rx_cid->opaque_fid = opaque_fid;
569 p_rx_cid->vport_id = p_params->vport_id;
571 rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
572 if (rc != ECORE_SUCCESS)
575 rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
576 if (rc != ECORE_SUCCESS)
579 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
580 "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
581 opaque_fid, cid, p_params->queue_id,
582 p_params->vport_id, p_params->sb);
585 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
587 init_data.opaque_fid = opaque_fid;
588 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
590 rc = ecore_sp_init_request(p_hwfn, &p_ent,
591 ETH_RAMROD_RX_QUEUE_START,
592 PROTOCOLID_ETH, &init_data);
593 if (rc != ECORE_SUCCESS)
596 p_ramrod = &p_ent->ramrod.rx_queue_start;
598 p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
599 p_ramrod->sb_index = (u8)p_params->sb_idx;
600 p_ramrod->vport_id = abs_vport_id;
601 p_ramrod->stats_counter_id = p_params->stats_id;
602 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
603 p_ramrod->complete_cqe_flg = 0;
604 p_ramrod->complete_event_flg = 1;
606 p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes);
607 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
609 p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
610 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
612 if (p_params->vf_qid || b_use_zone_a_prod) {
613 p_ramrod->vf_rx_prod_index = (u8)p_params->vf_qid;
614 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
615 "Queue%s is meant for VF rxq[%02x]\n",
616 b_use_zone_a_prod ? " [legacy]" : "",
618 p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
621 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
625 ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
627 struct ecore_queue_start_common_params *p_params,
629 dma_addr_t bd_chain_phys_addr,
630 dma_addr_t cqe_pbl_addr,
632 void OSAL_IOMEM * *pp_prod)
634 struct ecore_hw_cid_data *p_rx_cid;
635 u32 init_prod_val = 0;
636 u16 abs_l2_queue = 0;
638 enum _ecore_status_t rc;
640 if (IS_VF(p_hwfn->p_dev)) {
641 return ecore_vf_pf_rxq_start(p_hwfn,
644 (u8)p_params->sb_idx,
648 cqe_pbl_size, pp_prod);
651 rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
652 if (rc != ECORE_SUCCESS)
655 rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
656 if (rc != ECORE_SUCCESS)
659 *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
660 GTT_BAR0_MAP_REG_MSDM_RAM +
661 MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
663 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
664 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
665 (u32 *)(&init_prod_val));
667 /* Allocate a CID for the queue */
668 p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
669 rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
671 if (rc != ECORE_SUCCESS) {
672 DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
675 p_rx_cid->b_cid_allocated = true;
676 p_params->stats_id = abs_stats_id;
677 p_params->vf_qid = 0;
679 rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn,
689 if (rc != ECORE_SUCCESS)
690 ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
696 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
700 u8 complete_event_flg,
701 enum spq_mode comp_mode,
702 struct ecore_spq_comp_cb *p_comp_data)
704 struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
705 struct ecore_spq_entry *p_ent = OSAL_NULL;
706 struct ecore_sp_init_data init_data;
707 struct ecore_hw_cid_data *p_rx_cid;
708 u16 qid, abs_rx_q_id = 0;
709 enum _ecore_status_t rc = ECORE_NOTIMPL;
712 if (IS_VF(p_hwfn->p_dev))
713 return ecore_vf_pf_rxqs_update(p_hwfn,
719 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
720 init_data.comp_mode = comp_mode;
721 init_data.p_comp_data = p_comp_data;
723 for (i = 0; i < num_rxqs; i++) {
724 qid = rx_queue_id + i;
725 p_rx_cid = &p_hwfn->p_rx_cids[qid];
728 init_data.cid = p_rx_cid->cid;
729 init_data.opaque_fid = p_rx_cid->opaque_fid;
731 rc = ecore_sp_init_request(p_hwfn, &p_ent,
732 ETH_RAMROD_RX_QUEUE_UPDATE,
733 PROTOCOLID_ETH, &init_data);
734 if (rc != ECORE_SUCCESS)
737 p_ramrod = &p_ent->ramrod.rx_queue_update;
739 ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
740 ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
741 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
742 p_ramrod->complete_cqe_flg = complete_cqe_flg;
743 p_ramrod->complete_event_flg = complete_event_flg;
745 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
754 ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
756 bool eq_completion_only, bool cqe_completion)
758 struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
759 struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
760 struct ecore_spq_entry *p_ent = OSAL_NULL;
761 struct ecore_sp_init_data init_data;
763 enum _ecore_status_t rc = ECORE_NOTIMPL;
765 if (IS_VF(p_hwfn->p_dev))
766 return ecore_vf_pf_rxq_stop(p_hwfn, rx_queue_id,
770 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
771 init_data.cid = p_rx_cid->cid;
772 init_data.opaque_fid = p_rx_cid->opaque_fid;
773 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
775 rc = ecore_sp_init_request(p_hwfn, &p_ent,
776 ETH_RAMROD_RX_QUEUE_STOP,
777 PROTOCOLID_ETH, &init_data);
778 if (rc != ECORE_SUCCESS)
781 p_ramrod = &p_ent->ramrod.rx_queue_stop;
783 ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
784 ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
785 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
787 /* Cleaning the queue requires the completion to arrive there.
788 * In addition, VFs require the answer to come as eqe to PF.
790 p_ramrod->complete_cqe_flg = (!!(p_rx_cid->opaque_fid ==
791 p_hwfn->hw_info.opaque_fid) &&
792 !eq_completion_only) || cqe_completion;
793 p_ramrod->complete_event_flg = !(p_rx_cid->opaque_fid ==
794 p_hwfn->hw_info.opaque_fid) ||
797 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
798 if (rc != ECORE_SUCCESS)
801 ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
807 ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
810 struct ecore_queue_start_common_params *p_params,
813 union ecore_qm_pq_params *p_pq_params)
815 struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
816 struct ecore_spq_entry *p_ent = OSAL_NULL;
817 struct ecore_sp_init_data init_data;
818 struct ecore_hw_cid_data *p_tx_cid;
819 u16 pq_id, abs_tx_q_id = 0;
821 enum _ecore_status_t rc = ECORE_NOTIMPL;
823 /* Store information for the stop */
824 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
826 p_tx_cid->opaque_fid = opaque_fid;
828 rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
829 if (rc != ECORE_SUCCESS)
832 rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id);
833 if (rc != ECORE_SUCCESS)
837 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
839 init_data.opaque_fid = opaque_fid;
840 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
842 rc = ecore_sp_init_request(p_hwfn, &p_ent,
843 ETH_RAMROD_TX_QUEUE_START,
844 PROTOCOLID_ETH, &init_data);
845 if (rc != ECORE_SUCCESS)
848 p_ramrod = &p_ent->ramrod.tx_queue_start;
849 p_ramrod->vport_id = abs_vport_id;
851 p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
852 p_ramrod->sb_index = (u8)p_params->sb_idx;
853 p_ramrod->stats_counter_id = p_params->stats_id;
855 p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(abs_tx_q_id);
857 p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
858 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
860 pq_id = ecore_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params);
861 p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
863 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
867 ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
869 struct ecore_queue_start_common_params *p_params,
873 void OSAL_IOMEM * *pp_doorbell)
875 struct ecore_hw_cid_data *p_tx_cid;
876 union ecore_qm_pq_params pq_params;
878 enum _ecore_status_t rc;
880 if (IS_VF(p_hwfn->p_dev)) {
881 return ecore_vf_pf_txq_start(p_hwfn,
884 (u8)p_params->sb_idx,
890 rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
891 if (rc != ECORE_SUCCESS)
894 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
895 OSAL_MEMSET(p_tx_cid, 0, sizeof(*p_tx_cid));
896 OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
898 pq_params.eth.tc = tc;
900 /* Allocate a CID for the queue */
901 rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
902 if (rc != ECORE_SUCCESS) {
903 DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
906 p_tx_cid->b_cid_allocated = true;
908 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
909 "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
910 opaque_fid, p_tx_cid->cid, p_params->queue_id,
911 p_params->vport_id, p_params->sb);
913 p_params->stats_id = abs_stats_id;
915 /* TODO - set tc in the pq_params for multi-cos */
916 rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
924 *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
925 DB_ADDR(p_tx_cid->cid, DQ_DEMS_LEGACY);
927 if (rc != ECORE_SUCCESS)
928 ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
933 enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn)
935 return ECORE_NOTIMPL;
938 enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
941 struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
942 struct ecore_spq_entry *p_ent = OSAL_NULL;
943 struct ecore_sp_init_data init_data;
944 enum _ecore_status_t rc = ECORE_NOTIMPL;
946 if (IS_VF(p_hwfn->p_dev))
947 return ecore_vf_pf_txq_stop(p_hwfn, tx_queue_id);
950 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
951 init_data.cid = p_tx_cid->cid;
952 init_data.opaque_fid = p_tx_cid->opaque_fid;
953 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
955 rc = ecore_sp_init_request(p_hwfn, &p_ent,
956 ETH_RAMROD_TX_QUEUE_STOP,
957 PROTOCOLID_ETH, &init_data);
958 if (rc != ECORE_SUCCESS)
961 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
962 if (rc != ECORE_SUCCESS)
965 ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
969 static enum eth_filter_action
970 ecore_filter_action(enum ecore_filter_opcode opcode)
972 enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
975 case ECORE_FILTER_ADD:
976 action = ETH_FILTER_ACTION_ADD;
978 case ECORE_FILTER_REMOVE:
979 action = ETH_FILTER_ACTION_REMOVE;
981 case ECORE_FILTER_FLUSH:
982 action = ETH_FILTER_ACTION_REMOVE_ALL;
985 action = MAX_ETH_FILTER_ACTION;
991 static void ecore_set_fw_mac_addr(__le16 *fw_msb,
992 __le16 *fw_mid, __le16 *fw_lsb, u8 *mac)
994 ((u8 *)fw_msb)[0] = mac[1];
995 ((u8 *)fw_msb)[1] = mac[0];
996 ((u8 *)fw_mid)[0] = mac[3];
997 ((u8 *)fw_mid)[1] = mac[2];
998 ((u8 *)fw_lsb)[0] = mac[5];
999 ((u8 *)fw_lsb)[1] = mac[4];
1002 static enum _ecore_status_t
1003 ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
1005 struct ecore_filter_ucast *p_filter_cmd,
1006 struct vport_filter_update_ramrod_data **pp_ramrod,
1007 struct ecore_spq_entry **pp_ent,
1008 enum spq_mode comp_mode,
1009 struct ecore_spq_comp_cb *p_comp_data)
1011 u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1012 struct vport_filter_update_ramrod_data *p_ramrod;
1013 struct eth_filter_cmd *p_first_filter;
1014 struct eth_filter_cmd *p_second_filter;
1015 struct ecore_sp_init_data init_data;
1016 enum eth_filter_action action;
1017 enum _ecore_status_t rc;
1019 rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1020 &vport_to_remove_from);
1021 if (rc != ECORE_SUCCESS)
1024 rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1026 if (rc != ECORE_SUCCESS)
1030 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1031 init_data.cid = ecore_spq_get_cid(p_hwfn);
1032 init_data.opaque_fid = opaque_fid;
1033 init_data.comp_mode = comp_mode;
1034 init_data.p_comp_data = p_comp_data;
1036 rc = ecore_sp_init_request(p_hwfn, pp_ent,
1037 ETH_RAMROD_FILTERS_UPDATE,
1038 PROTOCOLID_ETH, &init_data);
1039 if (rc != ECORE_SUCCESS)
1042 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1043 p_ramrod = *pp_ramrod;
1044 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1045 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1048 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1049 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1050 "Non-Asic - prevent Tx filters\n");
1051 p_ramrod->filter_cmd_hdr.tx = 0;
1055 switch (p_filter_cmd->opcode) {
1056 case ECORE_FILTER_REPLACE:
1057 case ECORE_FILTER_MOVE:
1058 p_ramrod->filter_cmd_hdr.cmd_cnt = 2;
1061 p_ramrod->filter_cmd_hdr.cmd_cnt = 1;
1065 p_first_filter = &p_ramrod->filter_cmds[0];
1066 p_second_filter = &p_ramrod->filter_cmds[1];
1068 switch (p_filter_cmd->type) {
1069 case ECORE_FILTER_MAC:
1070 p_first_filter->type = ETH_FILTER_TYPE_MAC;
1072 case ECORE_FILTER_VLAN:
1073 p_first_filter->type = ETH_FILTER_TYPE_VLAN;
1075 case ECORE_FILTER_MAC_VLAN:
1076 p_first_filter->type = ETH_FILTER_TYPE_PAIR;
1078 case ECORE_FILTER_INNER_MAC:
1079 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC;
1081 case ECORE_FILTER_INNER_VLAN:
1082 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN;
1084 case ECORE_FILTER_INNER_PAIR:
1085 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR;
1087 case ECORE_FILTER_INNER_MAC_VNI_PAIR:
1088 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1090 case ECORE_FILTER_MAC_VNI_PAIR:
1091 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR;
1093 case ECORE_FILTER_VNI:
1094 p_first_filter->type = ETH_FILTER_TYPE_VNI;
1096 case ECORE_FILTER_UNUSED: /* @DPDK */
1097 p_first_filter->type = MAX_ETH_FILTER_TYPE;
1101 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1102 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1103 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1104 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1105 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1106 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR))
1107 ecore_set_fw_mac_addr(&p_first_filter->mac_msb,
1108 &p_first_filter->mac_mid,
1109 &p_first_filter->mac_lsb,
1110 (u8 *)p_filter_cmd->mac);
1112 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1113 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1114 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1115 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1116 p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan);
1118 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1119 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1120 (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1121 p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni);
1123 if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) {
1124 p_second_filter->type = p_first_filter->type;
1125 p_second_filter->mac_msb = p_first_filter->mac_msb;
1126 p_second_filter->mac_mid = p_first_filter->mac_mid;
1127 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1128 p_second_filter->vlan_id = p_first_filter->vlan_id;
1129 p_second_filter->vni = p_first_filter->vni;
1131 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1133 p_first_filter->vport_id = vport_to_remove_from;
1135 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1136 p_second_filter->vport_id = vport_to_add_to;
1137 } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) {
1138 p_first_filter->vport_id = vport_to_add_to;
1139 OSAL_MEMCPY(p_second_filter, p_first_filter,
1140 sizeof(*p_second_filter));
1141 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
1142 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1144 action = ecore_filter_action(p_filter_cmd->opcode);
1146 if (action == MAX_ETH_FILTER_ACTION) {
1147 DP_NOTICE(p_hwfn, true,
1148 "%d is not supported yet\n",
1149 p_filter_cmd->opcode);
1150 return ECORE_NOTIMPL;
1153 p_first_filter->action = action;
1154 p_first_filter->vport_id =
1155 (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1156 vport_to_remove_from : vport_to_add_to;
1159 return ECORE_SUCCESS;
1162 enum _ecore_status_t
1163 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
1165 struct ecore_filter_ucast *p_filter_cmd,
1166 enum spq_mode comp_mode,
1167 struct ecore_spq_comp_cb *p_comp_data)
1169 struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL;
1170 struct ecore_spq_entry *p_ent = OSAL_NULL;
1171 struct eth_filter_cmd_header *p_header;
1172 enum _ecore_status_t rc;
1174 rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1176 comp_mode, p_comp_data);
1177 if (rc != ECORE_SUCCESS) {
1178 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1181 p_header = &p_ramrod->filter_cmd_hdr;
1182 p_header->assert_on_error = p_filter_cmd->assert_on_error;
1184 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1185 if (rc != ECORE_SUCCESS) {
1186 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1190 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1191 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1192 (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" :
1193 ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1195 ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ?
1196 "MOVE" : "REPLACE")),
1197 (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" :
1198 ((p_filter_cmd->type == ECORE_FILTER_VLAN) ?
1199 "VLAN" : "MAC & VLAN"),
1200 p_ramrod->filter_cmd_hdr.cmd_cnt,
1201 p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter);
1202 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1203 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1204 p_filter_cmd->vport_to_add_to,
1205 p_filter_cmd->vport_to_remove_from,
1206 p_filter_cmd->mac[0], p_filter_cmd->mac[1],
1207 p_filter_cmd->mac[2], p_filter_cmd->mac[3],
1208 p_filter_cmd->mac[4], p_filter_cmd->mac[5],
1209 p_filter_cmd->vlan);
1211 return ECORE_SUCCESS;
1214 /*******************************************************************************
1216 * Calculates crc 32 on a buffer
1217 * Note: crc32_length MUST be aligned to 8
1219 ******************************************************************************/
1220 static u32 ecore_calc_crc32c(u8 *crc32_packet,
1221 u32 crc32_length, u32 crc32_seed, u8 complement)
1223 u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1224 u8 msb = 0, current_byte = 0;
1226 if ((crc32_packet == OSAL_NULL) ||
1227 (crc32_length == 0) || ((crc32_length % 8) != 0)) {
1228 return crc32_result;
1231 for (byte = 0; byte < crc32_length; byte++) {
1232 current_byte = crc32_packet[byte];
1233 for (bit = 0; bit < 8; bit++) {
1234 msb = (u8)(crc32_result >> 31);
1235 crc32_result = crc32_result << 1;
1236 if (msb != (0x1 & (current_byte >> bit))) {
1237 crc32_result = crc32_result ^ CRC32_POLY;
1243 return crc32_result;
1246 static u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len)
1248 u32 packet_buf[2] = { 0 };
1250 OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
1251 return ecore_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1254 u8 ecore_mcast_bin_from_mac(u8 *mac)
1256 u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1262 static enum _ecore_status_t
1263 ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
1265 struct ecore_filter_mcast *p_filter_cmd,
1266 enum spq_mode comp_mode,
1267 struct ecore_spq_comp_cb *p_comp_data)
1269 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1270 struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
1271 struct ecore_spq_entry *p_ent = OSAL_NULL;
1272 struct ecore_sp_init_data init_data;
1273 u8 abs_vport_id = 0;
1274 enum _ecore_status_t rc;
1277 if (p_filter_cmd->opcode == ECORE_FILTER_ADD)
1278 rc = ecore_fw_vport(p_hwfn,
1279 p_filter_cmd->vport_to_add_to,
1282 rc = ecore_fw_vport(p_hwfn,
1283 p_filter_cmd->vport_to_remove_from,
1285 if (rc != ECORE_SUCCESS)
1289 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1290 init_data.cid = ecore_spq_get_cid(p_hwfn);
1291 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1292 init_data.comp_mode = comp_mode;
1293 init_data.p_comp_data = p_comp_data;
1295 rc = ecore_sp_init_request(p_hwfn, &p_ent,
1296 ETH_RAMROD_VPORT_UPDATE,
1297 PROTOCOLID_ETH, &init_data);
1298 if (rc != ECORE_SUCCESS) {
1299 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1303 p_ramrod = &p_ent->ramrod.vport_update;
1304 p_ramrod->common.update_approx_mcast_flg = 1;
1306 /* explicitly clear out the entire vector */
1307 OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
1308 0, sizeof(p_ramrod->approx_mcast.bins));
1309 OSAL_MEMSET(bins, 0, sizeof(unsigned long) *
1310 ETH_MULTICAST_MAC_BINS_IN_REGS);
1311 /* filter ADD op is explicit set op and it removes
1312 * any existing filters for the vport.
1314 if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
1315 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1318 bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1319 OSAL_SET_BIT(bit, bins);
1322 /* Convert to correct endianity */
1323 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1324 struct vport_update_ramrod_mcast *p_ramrod_bins;
1325 u32 *p_bins = (u32 *)bins;
1327 p_ramrod_bins = &p_ramrod->approx_mcast;
1328 p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
1332 p_ramrod->common.vport_id = abs_vport_id;
1334 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1335 if (rc != ECORE_SUCCESS)
1336 DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc);
1341 enum _ecore_status_t
1342 ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
1343 struct ecore_filter_mcast *p_filter_cmd,
1344 enum spq_mode comp_mode,
1345 struct ecore_spq_comp_cb *p_comp_data)
1347 enum _ecore_status_t rc = ECORE_SUCCESS;
1350 /* only ADD and REMOVE operations are supported for multi-cast */
1351 if ((p_filter_cmd->opcode != ECORE_FILTER_ADD &&
1352 (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) ||
1353 (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) {
1357 for_each_hwfn(p_dev, i) {
1358 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1362 ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1366 opaque_fid = p_hwfn->hw_info.opaque_fid;
1367 rc = ecore_sp_eth_filter_mcast(p_hwfn,
1370 comp_mode, p_comp_data);
1371 if (rc != ECORE_SUCCESS)
1378 enum _ecore_status_t
1379 ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
1380 struct ecore_filter_ucast *p_filter_cmd,
1381 enum spq_mode comp_mode,
1382 struct ecore_spq_comp_cb *p_comp_data)
1384 enum _ecore_status_t rc = ECORE_SUCCESS;
1387 for_each_hwfn(p_dev, i) {
1388 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1392 rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1396 opaque_fid = p_hwfn->hw_info.opaque_fid;
1397 rc = ecore_sp_eth_filter_ucast(p_hwfn,
1400 comp_mode, p_comp_data);
1401 if (rc != ECORE_SUCCESS)
1408 /* Statistics related code */
1409 static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
1410 u32 *p_addr, u32 *p_len,
1413 if (IS_PF(p_hwfn->p_dev)) {
1414 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1415 PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1416 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1418 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1419 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1421 *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1422 *p_len = p_resp->pfdev_info.stats_info.pstats.len;
1426 static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
1427 struct ecore_ptt *p_ptt,
1428 struct ecore_eth_stats *p_stats,
1431 struct eth_pstorm_per_queue_stat pstats;
1432 u32 pstats_addr = 0, pstats_len = 0;
1434 __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1437 OSAL_MEMSET(&pstats, 0, sizeof(pstats));
1438 ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1440 p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1441 p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1442 p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1443 p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1444 p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1445 p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1446 p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
1449 static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
1450 struct ecore_ptt *p_ptt,
1451 struct ecore_eth_stats *p_stats,
1454 struct tstorm_per_port_stat tstats;
1455 u32 tstats_addr, tstats_len;
1457 if (IS_PF(p_hwfn->p_dev)) {
1458 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1459 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1460 tstats_len = sizeof(struct tstorm_per_port_stat);
1462 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1463 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1465 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1466 tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1469 OSAL_MEMSET(&tstats, 0, sizeof(tstats));
1470 ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1472 p_stats->mftag_filter_discards +=
1473 HILO_64_REGPAIR(tstats.mftag_filter_discard);
1474 p_stats->mac_filter_discards +=
1475 HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1478 static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
1479 u32 *p_addr, u32 *p_len,
1482 if (IS_PF(p_hwfn->p_dev)) {
1483 *p_addr = BAR0_MAP_REG_USDM_RAM +
1484 USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1485 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1487 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1488 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1490 *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1491 *p_len = p_resp->pfdev_info.stats_info.ustats.len;
1495 static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
1496 struct ecore_ptt *p_ptt,
1497 struct ecore_eth_stats *p_stats,
1500 struct eth_ustorm_per_queue_stat ustats;
1501 u32 ustats_addr = 0, ustats_len = 0;
1503 __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1506 OSAL_MEMSET(&ustats, 0, sizeof(ustats));
1507 ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1509 p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1510 p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1511 p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1512 p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1513 p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1514 p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1517 static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
1518 u32 *p_addr, u32 *p_len,
1521 if (IS_PF(p_hwfn->p_dev)) {
1522 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1523 MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1524 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1526 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1527 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1529 *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1530 *p_len = p_resp->pfdev_info.stats_info.mstats.len;
1534 static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
1535 struct ecore_ptt *p_ptt,
1536 struct ecore_eth_stats *p_stats,
1539 struct eth_mstorm_per_queue_stat mstats;
1540 u32 mstats_addr = 0, mstats_len = 0;
1542 __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1545 OSAL_MEMSET(&mstats, 0, sizeof(mstats));
1546 ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1548 p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
1549 p_stats->packet_too_big_discard +=
1550 HILO_64_REGPAIR(mstats.packet_too_big_discard);
1551 p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1552 p_stats->tpa_coalesced_pkts +=
1553 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1554 p_stats->tpa_coalesced_events +=
1555 HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1556 p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
1557 p_stats->tpa_coalesced_bytes +=
1558 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1561 static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
1562 struct ecore_ptt *p_ptt,
1563 struct ecore_eth_stats *p_stats)
1565 struct port_stats port_stats;
1568 OSAL_MEMSET(&port_stats, 0, sizeof(port_stats));
1570 ecore_memcpy_from(p_hwfn, p_ptt, &port_stats,
1571 p_hwfn->mcp_info->port_addr +
1572 OFFSETOF(struct public_port, stats),
1573 sizeof(port_stats));
1575 p_stats->rx_64_byte_packets += port_stats.eth.r64;
1576 p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
1577 p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
1578 p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
1579 p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1580 p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1581 p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
1582 p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
1583 p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
1584 p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
1585 p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
1586 p_stats->rx_crc_errors += port_stats.eth.rfcs;
1587 p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
1588 p_stats->rx_pause_frames += port_stats.eth.rxpf;
1589 p_stats->rx_pfc_frames += port_stats.eth.rxpp;
1590 p_stats->rx_align_errors += port_stats.eth.raln;
1591 p_stats->rx_carrier_errors += port_stats.eth.rfcr;
1592 p_stats->rx_oversize_packets += port_stats.eth.rovr;
1593 p_stats->rx_jabbers += port_stats.eth.rjbr;
1594 p_stats->rx_undersize_packets += port_stats.eth.rund;
1595 p_stats->rx_fragments += port_stats.eth.rfrg;
1596 p_stats->tx_64_byte_packets += port_stats.eth.t64;
1597 p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
1598 p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
1599 p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
1600 p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1601 p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1602 p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
1603 p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
1604 p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
1605 p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
1606 p_stats->tx_pause_frames += port_stats.eth.txpf;
1607 p_stats->tx_pfc_frames += port_stats.eth.txpp;
1608 p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
1609 p_stats->tx_total_collisions += port_stats.eth.tncl;
1610 p_stats->rx_mac_bytes += port_stats.eth.rbyte;
1611 p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
1612 p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
1613 p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
1614 p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
1615 p_stats->tx_mac_bytes += port_stats.eth.tbyte;
1616 p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
1617 p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
1618 p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
1619 p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
1620 for (j = 0; j < 8; j++) {
1621 p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
1622 p_stats->brb_discards += port_stats.brb.brb_discard[j];
1626 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
1627 struct ecore_ptt *p_ptt,
1628 struct ecore_eth_stats *stats,
1629 u16 statistics_bin, bool b_get_port_stats)
1631 __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1632 __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1633 __ecore_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1634 __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1637 /* Avoid getting PORT stats for emulation. */
1638 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1642 if (b_get_port_stats && p_hwfn->mcp_info)
1643 __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats);
1646 static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
1647 struct ecore_eth_stats *stats)
1652 OSAL_MEMSET(stats, 0, sizeof(*stats));
1654 for_each_hwfn(p_dev, i) {
1655 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1656 struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
1657 ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
1660 /* The main vport index is relative first */
1661 if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {
1662 DP_ERR(p_hwfn, "No vport available!\n");
1667 if (IS_PF(p_dev) && !p_ptt) {
1668 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1672 __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1673 IS_PF(p_dev) ? true : false);
1676 if (IS_PF(p_dev) && p_ptt)
1677 ecore_ptt_release(p_hwfn, p_ptt);
1681 void ecore_get_vport_stats(struct ecore_dev *p_dev,
1682 struct ecore_eth_stats *stats)
1687 OSAL_MEMSET(stats, 0, sizeof(*stats));
1691 _ecore_get_vport_stats(p_dev, stats);
1693 if (!p_dev->reset_stats)
1696 /* Reduce the statistics baseline */
1697 for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++)
1698 ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i];
1701 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1702 void ecore_reset_vport_stats(struct ecore_dev *p_dev)
1706 for_each_hwfn(p_dev, i) {
1707 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1708 struct eth_mstorm_per_queue_stat mstats;
1709 struct eth_ustorm_per_queue_stat ustats;
1710 struct eth_pstorm_per_queue_stat pstats;
1711 struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
1712 ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
1713 u32 addr = 0, len = 0;
1715 if (IS_PF(p_dev) && !p_ptt) {
1716 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1720 OSAL_MEMSET(&mstats, 0, sizeof(mstats));
1721 __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1722 ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1724 OSAL_MEMSET(&ustats, 0, sizeof(ustats));
1725 __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1726 ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1728 OSAL_MEMSET(&pstats, 0, sizeof(pstats));
1729 __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1730 ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1733 ecore_ptt_release(p_hwfn, p_ptt);
1736 /* PORT statistics are not necessarily reset, so we need to
1737 * read and create a baseline for future statistics.
1739 if (!p_dev->reset_stats)
1740 DP_INFO(p_dev, "Reset stats not allocated\n");
1742 _ecore_get_vport_stats(p_dev, p_dev->reset_stats);