net/qede/base: move code bits
[dpdk.git] / drivers / net / qede / base / ecore_l2.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10
11 #include "ecore.h"
12 #include "ecore_status.h"
13 #include "ecore_hsi_eth.h"
14 #include "ecore_chain.h"
15 #include "ecore_spq.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_cxt.h"
18 #include "ecore_l2.h"
19 #include "ecore_sp_commands.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "reg_addr.h"
23 #include "ecore_int.h"
24 #include "ecore_hw.h"
25 #include "ecore_vf.h"
26 #include "ecore_sriov.h"
27 #include "ecore_mcp.h"
28
29 #define ECORE_MAX_SGES_NUM 16
30 #define CRC32_POLY 0x1edc6f41
31
32 void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
33                                  struct ecore_queue_cid *p_cid)
34 {
35         /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
36         if (!p_cid->is_vf && IS_PF(p_hwfn->p_dev))
37                 ecore_cxt_release_cid(p_hwfn, p_cid->cid);
38         OSAL_VFREE(p_hwfn->p_dev, p_cid);
39 }
40
41 /* The internal is only meant to be directly called by PFs initializeing CIDs
42  * for their VFs.
43  */
44 struct ecore_queue_cid *
45 _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
46                         u16 opaque_fid, u32 cid, u8 vf_qid,
47                         struct ecore_queue_start_common_params *p_params)
48 {
49         bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid);
50         struct ecore_queue_cid *p_cid;
51         enum _ecore_status_t rc;
52
53         p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid));
54         if (p_cid == OSAL_NULL)
55                 return OSAL_NULL;
56
57         p_cid->opaque_fid = opaque_fid;
58         p_cid->cid = cid;
59         p_cid->vf_qid = vf_qid;
60         p_cid->rel = *p_params;
61         p_cid->p_owner = p_hwfn;
62
63         /* Don't try calculating the absolute indices for VFs */
64         if (IS_VF(p_hwfn->p_dev)) {
65                 p_cid->abs = p_cid->rel;
66                 goto out;
67         }
68
69         /* Calculate the engine-absolute indices of the resources.
70          * The would guarantee they're valid later on.
71          * In some cases [SBs] we already have the right values.
72          */
73         rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
74         if (rc != ECORE_SUCCESS)
75                 goto fail;
76
77         rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id,
78                                &p_cid->abs.queue_id);
79         if (rc != ECORE_SUCCESS)
80                 goto fail;
81
82         /* In case of a PF configuring its VF's queues, the stats-id is already
83          * absolute [since there's a single index that's suitable per-VF].
84          */
85         if (b_is_same) {
86                 rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id,
87                                     &p_cid->abs.stats_id);
88                 if (rc != ECORE_SUCCESS)
89                         goto fail;
90         } else {
91                 p_cid->abs.stats_id = p_cid->rel.stats_id;
92         }
93
94         /* SBs relevant information was already provided as absolute */
95         p_cid->abs.sb = p_cid->rel.sb;
96         p_cid->abs.sb_idx = p_cid->rel.sb_idx;
97
98         /* This is tricky - we're actually interested in whehter this is a PF
99          * entry meant for the VF.
100          */
101         if (!b_is_same)
102                 p_cid->is_vf = true;
103 out:
104         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
105                    "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
106                    p_cid->opaque_fid, p_cid->cid,
107                    p_cid->rel.vport_id, p_cid->abs.vport_id,
108                    p_cid->rel.queue_id, p_cid->abs.queue_id,
109                    p_cid->rel.stats_id, p_cid->abs.stats_id,
110                    p_cid->abs.sb, p_cid->abs.sb_idx);
111
112         return p_cid;
113
114 fail:
115         OSAL_VFREE(p_hwfn->p_dev, p_cid);
116         return OSAL_NULL;
117 }
118
119 static struct ecore_queue_cid *
120 ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
121                        u16 opaque_fid,
122                        struct ecore_queue_start_common_params *p_params)
123 {
124         struct ecore_queue_cid *p_cid;
125         u32 cid = 0;
126
127         /* Get a unique firmware CID for this queue, in case it's a PF.
128          * VF's don't need a CID as the queue configuration will be done
129          * by PF.
130          */
131         if (IS_PF(p_hwfn->p_dev)) {
132                 if (ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
133                                           &cid) != ECORE_SUCCESS) {
134                         DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
135                         return OSAL_NULL;
136                 }
137         }
138
139         p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params);
140         if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev))
141                 ecore_cxt_release_cid(p_hwfn, cid);
142
143         return p_cid;
144 }
145
146 enum _ecore_status_t
147 ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
148                          struct ecore_sp_vport_start_params *p_params)
149 {
150         struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
151         struct ecore_spq_entry *p_ent = OSAL_NULL;
152         struct ecore_sp_init_data init_data;
153         u16 rx_mode = 0, tx_err = 0;
154         u8 abs_vport_id = 0;
155         enum _ecore_status_t rc = ECORE_NOTIMPL;
156
157         rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
158         if (rc != ECORE_SUCCESS)
159                 return rc;
160
161         /* Get SPQ entry */
162         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
163         init_data.cid = ecore_spq_get_cid(p_hwfn);
164         init_data.opaque_fid = p_params->opaque_fid;
165         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
166
167         rc = ecore_sp_init_request(p_hwfn, &p_ent,
168                                    ETH_RAMROD_VPORT_START,
169                                    PROTOCOLID_ETH, &init_data);
170         if (rc != ECORE_SUCCESS)
171                 return rc;
172
173         p_ramrod = &p_ent->ramrod.vport_start;
174         p_ramrod->vport_id = abs_vport_id;
175
176         p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
177         p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
178         p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
179         p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
180         p_ramrod->untagged = p_params->only_untagged;
181         p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
182
183         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
184         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
185
186         p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode);
187
188         /* Handle requests for strict behavior on transmission errors */
189         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE,
190                   p_params->b_err_illegal_vlan_mode ?
191                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
192         SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL,
193                   p_params->b_err_small_pkt ?
194                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
195         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR,
196                   p_params->b_err_anti_spoof ?
197                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
198         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS,
199                   p_params->b_err_illegal_inband_mode ?
200                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
201         SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG,
202                   p_params->b_err_vlan_insert_with_inband ?
203                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
204         SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION,
205                   p_params->b_err_big_pkt ?
206                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
207         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME,
208                   p_params->b_err_ctrl_frame ?
209                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
210         p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
211
212         /* TPA related fields */
213         OSAL_MEMSET(&p_ramrod->tpa_param, 0,
214                     sizeof(struct eth_vport_tpa_param));
215         p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
216
217         switch (p_params->tpa_mode) {
218         case ECORE_TPA_MODE_GRO:
219                 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
220                 p_ramrod->tpa_param.tpa_max_size = (u16)-1;
221                 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
222                 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
223                 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
224                 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
225                 p_ramrod->tpa_param.tpa_ipv4_tunn_en_flg = 1;
226                 p_ramrod->tpa_param.tpa_ipv6_tunn_en_flg = 1;
227                 p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
228                 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
229                 break;
230         default:
231                 break;
232         }
233
234         p_ramrod->tx_switching_en = p_params->tx_switching;
235 #ifndef ASIC_ONLY
236         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
237                 p_ramrod->tx_switching_en = 0;
238 #endif
239
240         p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
241         p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
242
243         /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
244         p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev,
245                                                     p_params->concrete_fid);
246
247         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
248 }
249
250 enum _ecore_status_t
251 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
252                      struct ecore_sp_vport_start_params *p_params)
253 {
254         if (IS_VF(p_hwfn->p_dev))
255                 return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id,
256                                                p_params->mtu,
257                                                p_params->remove_inner_vlan,
258                                                p_params->tpa_mode,
259                                                p_params->max_buffers_per_cqe,
260                                                p_params->only_untagged);
261
262         return ecore_sp_eth_vport_start(p_hwfn, p_params);
263 }
264
265 static enum _ecore_status_t
266 ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
267                           struct vport_update_ramrod_data *p_ramrod,
268                           struct ecore_rss_params *p_rss)
269 {
270         struct eth_vport_rss_config *p_config;
271         int i, table_size;
272         enum _ecore_status_t rc = ECORE_SUCCESS;
273
274         if (!p_rss) {
275                 p_ramrod->common.update_rss_flg = 0;
276                 return rc;
277         }
278         p_config = &p_ramrod->rss_config;
279
280         OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE !=
281                           ETH_RSS_IND_TABLE_ENTRIES_NUM);
282
283         rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
284         if (rc != ECORE_SUCCESS)
285                 return rc;
286
287         p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
288         p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
289         p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
290         p_config->update_rss_key = p_rss->update_rss_key;
291
292         p_config->rss_mode = p_rss->rss_enable ?
293             ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED;
294
295         p_config->capabilities = 0;
296
297         SET_FIELD(p_config->capabilities,
298                   ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
299                   !!(p_rss->rss_caps & ECORE_RSS_IPV4));
300         SET_FIELD(p_config->capabilities,
301                   ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
302                   !!(p_rss->rss_caps & ECORE_RSS_IPV6));
303         SET_FIELD(p_config->capabilities,
304                   ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
305                   !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
306         SET_FIELD(p_config->capabilities,
307                   ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
308                   !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
309         SET_FIELD(p_config->capabilities,
310                   ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
311                   !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
312         SET_FIELD(p_config->capabilities,
313                   ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
314                   !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
315         p_config->tbl_size = p_rss->rss_table_size_log;
316         p_config->capabilities = OSAL_CPU_TO_LE16(p_config->capabilities);
317
318         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
319                    "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
320                    p_ramrod->common.update_rss_flg,
321                    p_config->rss_mode,
322                    p_config->update_rss_capabilities,
323                    p_config->capabilities,
324                    p_config->update_rss_ind_table, p_config->update_rss_key);
325
326         table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE,
327                                 1 << p_config->tbl_size);
328         for (i = 0; i < table_size; i++) {
329                 struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i];
330
331                 if (!p_queue)
332                         return ECORE_INVAL;
333
334                 p_config->indirection_table[i] =
335                                 OSAL_CPU_TO_LE16(p_queue->abs.queue_id);
336         }
337
338         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
339                    "Configured RSS indirection table [%d entries]:\n",
340                    table_size);
341         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) {
342                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
343                            "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
344                            OSAL_LE16_TO_CPU(p_config->indirection_table[i]),
345                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]),
346                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]),
347                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]),
348                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]),
349                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]),
350                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]),
351                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]),
352                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]),
353                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]),
354                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]),
355                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]),
356                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]),
357                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]),
358                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]),
359                          OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15]));
360         }
361
362         for (i = 0; i < 10; i++)
363                 p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]);
364
365         return rc;
366 }
367
368 static void
369 ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
370                             struct vport_update_ramrod_data *p_ramrod,
371                             struct ecore_filter_accept_flags accept_flags)
372 {
373         p_ramrod->common.update_rx_mode_flg =
374                                         accept_flags.update_rx_mode_config;
375         p_ramrod->common.update_tx_mode_flg =
376                                         accept_flags.update_tx_mode_config;
377
378 #ifndef ASIC_ONLY
379         /* On B0 emulation we cannot enable Tx, since this would cause writes
380          * to PVFC HW block which isn't implemented in emulation.
381          */
382         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
383                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
384                            "Non-Asic - prevent Tx mode in vport update\n");
385                 p_ramrod->common.update_tx_mode_flg = 0;
386         }
387 #endif
388
389         /* Set Rx mode accept flags */
390         if (p_ramrod->common.update_rx_mode_flg) {
391                 u8 accept_filter = accept_flags.rx_accept_filter;
392                 u16 state = 0;
393
394                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
395                           !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) ||
396                            !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
397
398                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
399                           !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED));
400
401                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
402                           !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) ||
403                             !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
404
405                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
406                           (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
407                            !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
408
409                 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
410                           !!(accept_filter & ECORE_ACCEPT_BCAST));
411
412                 p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
413                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
414                            "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n",
415                            p_ramrod->common.vport_id, state);
416         }
417
418         /* Set Tx mode accept flags */
419         if (p_ramrod->common.update_tx_mode_flg) {
420                 u8 accept_filter = accept_flags.tx_accept_filter;
421                 u16 state = 0;
422
423                 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
424                           !!(accept_filter & ECORE_ACCEPT_NONE));
425
426                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
427                           !!(accept_filter & ECORE_ACCEPT_NONE));
428
429                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
430                           (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
431                            !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
432
433                 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
434                           !!(accept_filter & ECORE_ACCEPT_BCAST));
435
436                 p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state);
437                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
438                            "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n",
439                            p_ramrod->common.vport_id, state);
440         }
441 }
442
443 static void
444 ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn,
445                               struct vport_update_ramrod_data *p_ramrod,
446                               struct ecore_sge_tpa_params *p_params)
447 {
448         struct eth_vport_tpa_param *p_tpa;
449
450         if (!p_params) {
451                 p_ramrod->common.update_tpa_param_flg = 0;
452                 p_ramrod->common.update_tpa_en_flg = 0;
453                 p_ramrod->common.update_tpa_param_flg = 0;
454                 return;
455         }
456
457         p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
458         p_tpa = &p_ramrod->tpa_param;
459         p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
460         p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
461         p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
462         p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
463
464         p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
465         p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
466         p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
467         p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
468         p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
469         p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
470         p_tpa->tpa_max_size = p_params->tpa_max_size;
471         p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
472         p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
473 }
474
475 static void
476 ecore_sp_update_mcast_bin(struct ecore_hwfn *p_hwfn,
477                           struct vport_update_ramrod_data *p_ramrod,
478                           struct ecore_sp_vport_update_params *p_params)
479 {
480         int i;
481
482         OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0,
483                     sizeof(p_ramrod->approx_mcast.bins));
484
485         if (!p_params->update_approx_mcast_flg)
486                 return;
487
488         p_ramrod->common.update_approx_mcast_flg = 1;
489         for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
490                 u32 *p_bins = (u32 *)p_params->bins;
491
492                 p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
493         }
494 }
495
496 enum _ecore_status_t
497 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
498                       struct ecore_sp_vport_update_params *p_params,
499                       enum spq_mode comp_mode,
500                       struct ecore_spq_comp_cb *p_comp_data)
501 {
502         struct ecore_rss_params *p_rss_params = p_params->rss_params;
503         struct vport_update_ramrod_data_cmn *p_cmn;
504         struct ecore_sp_init_data init_data;
505         struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
506         struct ecore_spq_entry *p_ent = OSAL_NULL;
507         u8 abs_vport_id = 0, val;
508         enum _ecore_status_t rc = ECORE_NOTIMPL;
509
510         if (IS_VF(p_hwfn->p_dev)) {
511                 rc = ecore_vf_pf_vport_update(p_hwfn, p_params);
512                 return rc;
513         }
514
515         rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
516         if (rc != ECORE_SUCCESS)
517                 return rc;
518
519         /* Get SPQ entry */
520         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
521         init_data.cid = ecore_spq_get_cid(p_hwfn);
522         init_data.opaque_fid = p_params->opaque_fid;
523         init_data.comp_mode = comp_mode;
524         init_data.p_comp_data = p_comp_data;
525
526         rc = ecore_sp_init_request(p_hwfn, &p_ent,
527                                    ETH_RAMROD_VPORT_UPDATE,
528                                    PROTOCOLID_ETH, &init_data);
529         if (rc != ECORE_SUCCESS)
530                 return rc;
531
532         /* Copy input params to ramrod according to FW struct */
533         p_ramrod = &p_ent->ramrod.vport_update;
534         p_cmn = &p_ramrod->common;
535
536         p_cmn->vport_id = abs_vport_id;
537
538         p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
539         p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
540         p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
541         p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
542
543         p_cmn->accept_any_vlan = p_params->accept_any_vlan;
544         val = p_params->update_accept_any_vlan_flg;
545         p_cmn->update_accept_any_vlan_flg = val;
546
547         p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
548         val = p_params->update_inner_vlan_removal_flg;
549         p_cmn->update_inner_vlan_removal_en_flg = val;
550
551         p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
552         val = p_params->update_default_vlan_enable_flg;
553         p_cmn->update_default_vlan_en_flg = val;
554
555         p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan);
556         p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
557
558         p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
559
560         p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
561
562 #ifndef ASIC_ONLY
563         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
564                 if (p_ramrod->common.tx_switching_en ||
565                     p_ramrod->common.update_tx_switching_en_flg) {
566                         DP_NOTICE(p_hwfn, false,
567                                   "FPGA - why are we seeing tx-switching? Overriding it\n");
568                         p_ramrod->common.tx_switching_en = 0;
569                         p_ramrod->common.update_tx_switching_en_flg = 1;
570                 }
571 #endif
572         p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
573
574         p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
575         val = p_params->update_anti_spoofing_en_flg;
576         p_ramrod->common.update_anti_spoofing_en_flg = val;
577
578         rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
579         if (rc != ECORE_SUCCESS) {
580                 /* Return spq entry which is taken in ecore_sp_init_request()*/
581                 ecore_spq_return_entry(p_hwfn, p_ent);
582                 return rc;
583         }
584
585         /* Update mcast bins for VFs, PF doesn't use this functionality */
586         ecore_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
587
588         ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
589         ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod,
590                                       p_params->sge_tpa_params);
591         if (p_params->mtu) {
592                 p_ramrod->common.update_mtu_flg = 1;
593                 p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu);
594         }
595
596         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
597 }
598
599 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
600                                          u16 opaque_fid, u8 vport_id)
601 {
602         struct vport_stop_ramrod_data *p_ramrod;
603         struct ecore_sp_init_data init_data;
604         struct ecore_spq_entry *p_ent;
605         u8 abs_vport_id = 0;
606         enum _ecore_status_t rc;
607
608         if (IS_VF(p_hwfn->p_dev))
609                 return ecore_vf_pf_vport_stop(p_hwfn);
610
611         rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
612         if (rc != ECORE_SUCCESS)
613                 return rc;
614
615         /* Get SPQ entry */
616         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
617         init_data.cid = ecore_spq_get_cid(p_hwfn);
618         init_data.opaque_fid = opaque_fid;
619         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
620
621         rc = ecore_sp_init_request(p_hwfn, &p_ent,
622                                    ETH_RAMROD_VPORT_STOP,
623                                    PROTOCOLID_ETH, &init_data);
624         if (rc != ECORE_SUCCESS)
625                 return rc;
626
627         p_ramrod = &p_ent->ramrod.vport_stop;
628         p_ramrod->vport_id = abs_vport_id;
629
630         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
631 }
632
633 static enum _ecore_status_t
634 ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn,
635                          struct ecore_filter_accept_flags *p_accept_flags)
636 {
637         struct ecore_sp_vport_update_params s_params;
638
639         OSAL_MEMSET(&s_params, 0, sizeof(s_params));
640         OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags,
641                     sizeof(struct ecore_filter_accept_flags));
642
643         return ecore_vf_pf_vport_update(p_hwfn, &s_params);
644 }
645
646 enum _ecore_status_t
647 ecore_filter_accept_cmd(struct ecore_dev *p_dev,
648                         u8 vport,
649                         struct ecore_filter_accept_flags accept_flags,
650                         u8 update_accept_any_vlan,
651                         u8 accept_any_vlan,
652                         enum spq_mode comp_mode,
653                         struct ecore_spq_comp_cb *p_comp_data)
654 {
655         struct ecore_sp_vport_update_params vport_update_params;
656         int i, rc;
657
658         /* Prepare and send the vport rx_mode change */
659         OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params));
660         vport_update_params.vport_id = vport;
661         vport_update_params.accept_flags = accept_flags;
662         vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
663         vport_update_params.accept_any_vlan = accept_any_vlan;
664
665         for_each_hwfn(p_dev, i) {
666                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
667
668                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
669
670                 if (IS_VF(p_dev)) {
671                         rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);
672                         if (rc != ECORE_SUCCESS)
673                                 return rc;
674                         continue;
675                 }
676
677                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
678                                            comp_mode, p_comp_data);
679                 if (rc != ECORE_SUCCESS) {
680                         DP_ERR(p_dev, "Update rx_mode failed %d\n", rc);
681                         return rc;
682                 }
683
684                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
685                            "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
686                            accept_flags.rx_accept_filter,
687                            accept_flags.tx_accept_filter);
688
689                 if (update_accept_any_vlan)
690                         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
691                                    "accept_any_vlan=%d configured\n",
692                                    accept_any_vlan);
693         }
694
695         return 0;
696 }
697
698 enum _ecore_status_t
699 ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
700                            struct ecore_queue_cid *p_cid,
701                            u16 bd_max_bytes,
702                            dma_addr_t bd_chain_phys_addr,
703                            dma_addr_t cqe_pbl_addr,
704                            u16 cqe_pbl_size)
705 {
706         struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
707         struct ecore_spq_entry *p_ent = OSAL_NULL;
708         struct ecore_sp_init_data init_data;
709         enum _ecore_status_t rc = ECORE_NOTIMPL;
710
711         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
712                    "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
713                    p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
714                    p_cid->abs.vport_id, p_cid->abs.sb);
715
716         /* Get SPQ entry */
717         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
718         init_data.cid = p_cid->cid;
719         init_data.opaque_fid = p_cid->opaque_fid;
720         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
721
722         rc = ecore_sp_init_request(p_hwfn, &p_ent,
723                                    ETH_RAMROD_RX_QUEUE_START,
724                                    PROTOCOLID_ETH, &init_data);
725         if (rc != ECORE_SUCCESS)
726                 return rc;
727
728         p_ramrod = &p_ent->ramrod.rx_queue_start;
729
730         p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
731         p_ramrod->sb_index = p_cid->abs.sb_idx;
732         p_ramrod->vport_id = p_cid->abs.vport_id;
733         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
734         p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
735         p_ramrod->complete_cqe_flg = 0;
736         p_ramrod->complete_event_flg = 1;
737
738         p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes);
739         DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
740
741         p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
742         DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
743
744         if (p_cid->is_vf) {
745                 p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
746                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
747                            "Queue%s is meant for VF rxq[%02x]\n",
748                            !!p_cid->b_legacy_vf ? " [legacy]" : "",
749                            p_cid->vf_qid);
750                 p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
751         }
752
753         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
754 }
755
756 static enum _ecore_status_t
757 ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn,
758                             struct ecore_queue_cid *p_cid,
759                             u16 bd_max_bytes,
760                             dma_addr_t bd_chain_phys_addr,
761                             dma_addr_t cqe_pbl_addr,
762                             u16 cqe_pbl_size,
763                             void OSAL_IOMEM * *pp_producer)
764 {
765         u32 init_prod_val = 0;
766
767         *pp_producer = (u8 OSAL_IOMEM *)
768                        p_hwfn->regview +
769                        GTT_BAR0_MAP_REG_MSDM_RAM +
770                        MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
771
772         /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
773         __internal_ram_wr(p_hwfn, *pp_producer, sizeof(u32),
774                           (u32 *)(&init_prod_val));
775
776         return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
777                                           bd_max_bytes,
778                                           bd_chain_phys_addr,
779                                           cqe_pbl_addr, cqe_pbl_size);
780 }
781
782 enum _ecore_status_t
783 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
784                          u16 opaque_fid,
785                          struct ecore_queue_start_common_params *p_params,
786                          u16 bd_max_bytes,
787                          dma_addr_t bd_chain_phys_addr,
788                          dma_addr_t cqe_pbl_addr,
789                          u16 cqe_pbl_size,
790                          struct ecore_rxq_start_ret_params *p_ret_params)
791 {
792         struct ecore_queue_cid *p_cid;
793         enum _ecore_status_t rc;
794
795         /* Allocate a CID for the queue */
796         p_cid = ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
797         if (p_cid == OSAL_NULL)
798                 return ECORE_NOMEM;
799
800         if (IS_PF(p_hwfn->p_dev))
801                 rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid,
802                                                  bd_max_bytes,
803                                                  bd_chain_phys_addr,
804                                                  cqe_pbl_addr, cqe_pbl_size,
805                                                  &p_ret_params->p_prod);
806         else
807                 rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid,
808                                            bd_max_bytes,
809                                            bd_chain_phys_addr,
810                                            cqe_pbl_addr,
811                                            cqe_pbl_size,
812                                            &p_ret_params->p_prod);
813
814         /* Provide the caller with a reference to as handler */
815         if (rc != ECORE_SUCCESS)
816                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
817         else
818                 p_ret_params->p_handle = (void *)p_cid;
819
820         return rc;
821 }
822
823 enum _ecore_status_t
824 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
825                               void **pp_rxq_handles,
826                               u8 num_rxqs,
827                               u8 complete_cqe_flg,
828                               u8 complete_event_flg,
829                               enum spq_mode comp_mode,
830                               struct ecore_spq_comp_cb *p_comp_data)
831 {
832         struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
833         struct ecore_spq_entry *p_ent = OSAL_NULL;
834         struct ecore_sp_init_data init_data;
835         struct ecore_queue_cid *p_cid;
836         enum _ecore_status_t rc = ECORE_NOTIMPL;
837         u8 i;
838
839         if (IS_VF(p_hwfn->p_dev))
840                 return ecore_vf_pf_rxqs_update(p_hwfn,
841                                                (struct ecore_queue_cid **)
842                                                pp_rxq_handles,
843                                                num_rxqs,
844                                                complete_cqe_flg,
845                                                complete_event_flg);
846
847         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
848         init_data.comp_mode = comp_mode;
849         init_data.p_comp_data = p_comp_data;
850
851         for (i = 0; i < num_rxqs; i++) {
852                 p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i];
853
854                 /* Get SPQ entry */
855                 init_data.cid = p_cid->cid;
856                 init_data.opaque_fid = p_cid->opaque_fid;
857
858                 rc = ecore_sp_init_request(p_hwfn, &p_ent,
859                                            ETH_RAMROD_RX_QUEUE_UPDATE,
860                                            PROTOCOLID_ETH, &init_data);
861                 if (rc != ECORE_SUCCESS)
862                         return rc;
863
864                 p_ramrod = &p_ent->ramrod.rx_queue_update;
865                 p_ramrod->vport_id = p_cid->abs.vport_id;
866
867                 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
868                 p_ramrod->complete_cqe_flg = complete_cqe_flg;
869                 p_ramrod->complete_event_flg = complete_event_flg;
870
871                 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
872                 if (rc != ECORE_SUCCESS)
873                         return rc;
874         }
875
876         return rc;
877 }
878
879 static enum _ecore_status_t
880 ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
881                            struct ecore_queue_cid *p_cid,
882                            bool b_eq_completion_only,
883                            bool b_cqe_completion)
884 {
885         struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
886         struct ecore_spq_entry *p_ent = OSAL_NULL;
887         struct ecore_sp_init_data init_data;
888         enum _ecore_status_t rc;
889
890         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
891         init_data.cid = p_cid->cid;
892         init_data.opaque_fid = p_cid->opaque_fid;
893         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
894
895         rc = ecore_sp_init_request(p_hwfn, &p_ent,
896                                    ETH_RAMROD_RX_QUEUE_STOP,
897                                    PROTOCOLID_ETH, &init_data);
898         if (rc != ECORE_SUCCESS)
899                 return rc;
900
901         p_ramrod = &p_ent->ramrod.rx_queue_stop;
902         p_ramrod->vport_id = p_cid->abs.vport_id;
903         p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
904
905         /* Cleaning the queue requires the completion to arrive there.
906          * In addition, VFs require the answer to come as eqe to PF.
907          */
908         p_ramrod->complete_cqe_flg = (!p_cid->is_vf && !b_eq_completion_only) ||
909                                      b_cqe_completion;
910         p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only;
911
912         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
913 }
914
915 enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
916                                              void *p_rxq,
917                                              bool eq_completion_only,
918                                              bool cqe_completion)
919 {
920         struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq;
921         enum _ecore_status_t rc = ECORE_NOTIMPL;
922
923         if (IS_PF(p_hwfn->p_dev))
924                 rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid,
925                                                 eq_completion_only,
926                                                 cqe_completion);
927         else
928                 rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
929
930         if (rc == ECORE_SUCCESS)
931                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
932         return rc;
933 }
934
935 enum _ecore_status_t
936 ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
937                            struct ecore_queue_cid *p_cid,
938                            dma_addr_t pbl_addr, u16 pbl_size,
939                            u16 pq_id)
940 {
941         struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
942         struct ecore_spq_entry *p_ent = OSAL_NULL;
943         struct ecore_sp_init_data init_data;
944         enum _ecore_status_t rc = ECORE_NOTIMPL;
945
946         /* Get SPQ entry */
947         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
948         init_data.cid = p_cid->cid;
949         init_data.opaque_fid = p_cid->opaque_fid;
950         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
951
952         rc = ecore_sp_init_request(p_hwfn, &p_ent,
953                                    ETH_RAMROD_TX_QUEUE_START,
954                                    PROTOCOLID_ETH, &init_data);
955         if (rc != ECORE_SUCCESS)
956                 return rc;
957
958         p_ramrod = &p_ent->ramrod.tx_queue_start;
959         p_ramrod->vport_id = p_cid->abs.vport_id;
960
961         p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
962         p_ramrod->sb_index = p_cid->abs.sb_idx;
963         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
964
965         p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
966         p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
967
968         p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
969         DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
970
971         p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
972
973         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
974 }
975
976 static enum _ecore_status_t
977 ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
978                             struct ecore_queue_cid *p_cid,
979                             u8 tc,
980                             dma_addr_t pbl_addr, u16 pbl_size,
981                             void OSAL_IOMEM * *pp_doorbell)
982 {
983         enum _ecore_status_t rc;
984
985         /* TODO - set tc in the pq_params for multi-cos */
986         rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
987                                         pbl_addr, pbl_size,
988                                         ecore_get_cm_pq_idx_mcos(p_hwfn, tc));
989         if (rc != ECORE_SUCCESS)
990                 return rc;
991
992         /* Provide the caller with the necessary return values */
993         *pp_doorbell = (u8 OSAL_IOMEM *)
994                        p_hwfn->doorbells +
995                        DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY);
996
997         return ECORE_SUCCESS;
998 }
999
1000 enum _ecore_status_t
1001 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
1002                          struct ecore_queue_start_common_params *p_params,
1003                          u8 tc,
1004                          dma_addr_t pbl_addr, u16 pbl_size,
1005                          struct ecore_txq_start_ret_params *p_ret_params)
1006 {
1007         struct ecore_queue_cid *p_cid;
1008         enum _ecore_status_t rc;
1009
1010         p_cid = ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
1011         if (p_cid == OSAL_NULL)
1012                 return ECORE_INVAL;
1013
1014         if (IS_PF(p_hwfn->p_dev))
1015                 rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1016                                                  pbl_addr, pbl_size,
1017                                                  &p_ret_params->p_doorbell);
1018         else
1019                 rc = ecore_vf_pf_txq_start(p_hwfn, p_cid,
1020                                            pbl_addr, pbl_size,
1021                                            &p_ret_params->p_doorbell);
1022
1023         if (rc != ECORE_SUCCESS)
1024                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1025         else
1026                 p_ret_params->p_handle = (void *)p_cid;
1027
1028         return rc;
1029 }
1030
1031 static enum _ecore_status_t
1032 ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1033                            struct ecore_queue_cid *p_cid)
1034 {
1035         struct ecore_spq_entry *p_ent = OSAL_NULL;
1036         struct ecore_sp_init_data init_data;
1037         enum _ecore_status_t rc;
1038
1039         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1040         init_data.cid = p_cid->cid;
1041         init_data.opaque_fid = p_cid->opaque_fid;
1042         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1043
1044         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1045                                    ETH_RAMROD_TX_QUEUE_STOP,
1046                                    PROTOCOLID_ETH, &init_data);
1047         if (rc != ECORE_SUCCESS)
1048                 return rc;
1049
1050         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1051 }
1052
1053 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1054                                              void *p_handle)
1055 {
1056         struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
1057         enum _ecore_status_t rc;
1058
1059         if (IS_PF(p_hwfn->p_dev))
1060                 rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1061         else
1062                 rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid);
1063
1064         if (rc == ECORE_SUCCESS)
1065                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1066         return rc;
1067 }
1068
1069 static enum eth_filter_action
1070 ecore_filter_action(enum ecore_filter_opcode opcode)
1071 {
1072         enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1073
1074         switch (opcode) {
1075         case ECORE_FILTER_ADD:
1076                 action = ETH_FILTER_ACTION_ADD;
1077                 break;
1078         case ECORE_FILTER_REMOVE:
1079                 action = ETH_FILTER_ACTION_REMOVE;
1080                 break;
1081         case ECORE_FILTER_FLUSH:
1082                 action = ETH_FILTER_ACTION_REMOVE_ALL;
1083                 break;
1084         default:
1085                 action = MAX_ETH_FILTER_ACTION;
1086         }
1087
1088         return action;
1089 }
1090
1091 static enum _ecore_status_t
1092 ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
1093                           u16 opaque_fid,
1094                           struct ecore_filter_ucast *p_filter_cmd,
1095                           struct vport_filter_update_ramrod_data **pp_ramrod,
1096                           struct ecore_spq_entry **pp_ent,
1097                           enum spq_mode comp_mode,
1098                           struct ecore_spq_comp_cb *p_comp_data)
1099 {
1100         u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1101         struct vport_filter_update_ramrod_data *p_ramrod;
1102         struct eth_filter_cmd *p_first_filter;
1103         struct eth_filter_cmd *p_second_filter;
1104         struct ecore_sp_init_data init_data;
1105         enum eth_filter_action action;
1106         enum _ecore_status_t rc;
1107
1108         rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1109                             &vport_to_remove_from);
1110         if (rc != ECORE_SUCCESS)
1111                 return rc;
1112
1113         rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1114                             &vport_to_add_to);
1115         if (rc != ECORE_SUCCESS)
1116                 return rc;
1117
1118         /* Get SPQ entry */
1119         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1120         init_data.cid = ecore_spq_get_cid(p_hwfn);
1121         init_data.opaque_fid = opaque_fid;
1122         init_data.comp_mode = comp_mode;
1123         init_data.p_comp_data = p_comp_data;
1124
1125         rc = ecore_sp_init_request(p_hwfn, pp_ent,
1126                                    ETH_RAMROD_FILTERS_UPDATE,
1127                                    PROTOCOLID_ETH, &init_data);
1128         if (rc != ECORE_SUCCESS)
1129                 return rc;
1130
1131         *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1132         p_ramrod = *pp_ramrod;
1133         p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1134         p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1135
1136 #ifndef ASIC_ONLY
1137         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1138                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1139                            "Non-Asic - prevent Tx filters\n");
1140                 p_ramrod->filter_cmd_hdr.tx = 0;
1141         }
1142 #endif
1143
1144         switch (p_filter_cmd->opcode) {
1145         case ECORE_FILTER_REPLACE:
1146         case ECORE_FILTER_MOVE:
1147                 p_ramrod->filter_cmd_hdr.cmd_cnt = 2;
1148                 break;
1149         default:
1150                 p_ramrod->filter_cmd_hdr.cmd_cnt = 1;
1151                 break;
1152         }
1153
1154         p_first_filter = &p_ramrod->filter_cmds[0];
1155         p_second_filter = &p_ramrod->filter_cmds[1];
1156
1157         switch (p_filter_cmd->type) {
1158         case ECORE_FILTER_MAC:
1159                 p_first_filter->type = ETH_FILTER_TYPE_MAC;
1160                 break;
1161         case ECORE_FILTER_VLAN:
1162                 p_first_filter->type = ETH_FILTER_TYPE_VLAN;
1163                 break;
1164         case ECORE_FILTER_MAC_VLAN:
1165                 p_first_filter->type = ETH_FILTER_TYPE_PAIR;
1166                 break;
1167         case ECORE_FILTER_INNER_MAC:
1168                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC;
1169                 break;
1170         case ECORE_FILTER_INNER_VLAN:
1171                 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN;
1172                 break;
1173         case ECORE_FILTER_INNER_PAIR:
1174                 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR;
1175                 break;
1176         case ECORE_FILTER_INNER_MAC_VNI_PAIR:
1177                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1178                 break;
1179         case ECORE_FILTER_MAC_VNI_PAIR:
1180                 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR;
1181                 break;
1182         case ECORE_FILTER_VNI:
1183                 p_first_filter->type = ETH_FILTER_TYPE_VNI;
1184                 break;
1185         case ECORE_FILTER_UNUSED: /* @DPDK */
1186                 p_first_filter->type = MAX_ETH_FILTER_TYPE;
1187                 break;
1188         }
1189
1190         if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1191             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1192             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1193             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1194             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1195             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR))
1196                 ecore_set_fw_mac_addr(&p_first_filter->mac_msb,
1197                                       &p_first_filter->mac_mid,
1198                                       &p_first_filter->mac_lsb,
1199                                       (u8 *)p_filter_cmd->mac);
1200
1201         if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1202             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1203             (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1204             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1205                 p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan);
1206
1207         if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1208             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1209             (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1210                 p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni);
1211
1212         if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) {
1213                 p_second_filter->type = p_first_filter->type;
1214                 p_second_filter->mac_msb = p_first_filter->mac_msb;
1215                 p_second_filter->mac_mid = p_first_filter->mac_mid;
1216                 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1217                 p_second_filter->vlan_id = p_first_filter->vlan_id;
1218                 p_second_filter->vni = p_first_filter->vni;
1219
1220                 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1221
1222                 p_first_filter->vport_id = vport_to_remove_from;
1223
1224                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1225                 p_second_filter->vport_id = vport_to_add_to;
1226         } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) {
1227                 p_first_filter->vport_id = vport_to_add_to;
1228                 OSAL_MEMCPY(p_second_filter, p_first_filter,
1229                             sizeof(*p_second_filter));
1230                 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
1231                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1232         } else {
1233                 action = ecore_filter_action(p_filter_cmd->opcode);
1234
1235                 if (action == MAX_ETH_FILTER_ACTION) {
1236                         DP_NOTICE(p_hwfn, true,
1237                                   "%d is not supported yet\n",
1238                                   p_filter_cmd->opcode);
1239                         return ECORE_NOTIMPL;
1240                 }
1241
1242                 p_first_filter->action = action;
1243                 p_first_filter->vport_id =
1244                     (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1245                     vport_to_remove_from : vport_to_add_to;
1246         }
1247
1248         return ECORE_SUCCESS;
1249 }
1250
1251 enum _ecore_status_t
1252 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
1253                           u16 opaque_fid,
1254                           struct ecore_filter_ucast *p_filter_cmd,
1255                           enum spq_mode comp_mode,
1256                           struct ecore_spq_comp_cb *p_comp_data)
1257 {
1258         struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL;
1259         struct ecore_spq_entry *p_ent = OSAL_NULL;
1260         struct eth_filter_cmd_header *p_header;
1261         enum _ecore_status_t rc;
1262
1263         rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1264                                        &p_ramrod, &p_ent,
1265                                        comp_mode, p_comp_data);
1266         if (rc != ECORE_SUCCESS) {
1267                 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1268                 return rc;
1269         }
1270         p_header = &p_ramrod->filter_cmd_hdr;
1271         p_header->assert_on_error = p_filter_cmd->assert_on_error;
1272
1273         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1274         if (rc != ECORE_SUCCESS) {
1275                 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1276                 return rc;
1277         }
1278
1279         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1280                    "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1281                    (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" :
1282                    ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1283                     "REMOVE" :
1284                     ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ?
1285                      "MOVE" : "REPLACE")),
1286                    (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" :
1287                    ((p_filter_cmd->type == ECORE_FILTER_VLAN) ?
1288                     "VLAN" : "MAC & VLAN"),
1289                    p_ramrod->filter_cmd_hdr.cmd_cnt,
1290                    p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter);
1291         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1292                    "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1293                    p_filter_cmd->vport_to_add_to,
1294                    p_filter_cmd->vport_to_remove_from,
1295                    p_filter_cmd->mac[0], p_filter_cmd->mac[1],
1296                    p_filter_cmd->mac[2], p_filter_cmd->mac[3],
1297                    p_filter_cmd->mac[4], p_filter_cmd->mac[5],
1298                    p_filter_cmd->vlan);
1299
1300         return ECORE_SUCCESS;
1301 }
1302
1303 /*******************************************************************************
1304  * Description:
1305  *         Calculates crc 32 on a buffer
1306  *         Note: crc32_length MUST be aligned to 8
1307  * Return:
1308  ******************************************************************************/
1309 static u32 ecore_calc_crc32c(u8 *crc32_packet,
1310                              u32 crc32_length, u32 crc32_seed, u8 complement)
1311 {
1312         u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1313         u8 msb = 0, current_byte = 0;
1314
1315         if ((crc32_packet == OSAL_NULL) ||
1316             (crc32_length == 0) || ((crc32_length % 8) != 0)) {
1317                 return crc32_result;
1318         }
1319
1320         for (byte = 0; byte < crc32_length; byte++) {
1321                 current_byte = crc32_packet[byte];
1322                 for (bit = 0; bit < 8; bit++) {
1323                         msb = (u8)(crc32_result >> 31);
1324                         crc32_result = crc32_result << 1;
1325                         if (msb != (0x1 & (current_byte >> bit))) {
1326                                 crc32_result = crc32_result ^ CRC32_POLY;
1327                                 crc32_result |= 1;
1328                         }
1329                 }
1330         }
1331
1332         return crc32_result;
1333 }
1334
1335 static u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len)
1336 {
1337         u32 packet_buf[2] = { 0 };
1338
1339         OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
1340         return ecore_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1341 }
1342
1343 u8 ecore_mcast_bin_from_mac(u8 *mac)
1344 {
1345         u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1346                                   mac, ETH_ALEN);
1347
1348         return crc & 0xff;
1349 }
1350
1351 static enum _ecore_status_t
1352 ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
1353                           u16 opaque_fid,
1354                           struct ecore_filter_mcast *p_filter_cmd,
1355                           enum spq_mode comp_mode,
1356                           struct ecore_spq_comp_cb *p_comp_data)
1357 {
1358         unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1359         struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
1360         struct ecore_spq_entry *p_ent = OSAL_NULL;
1361         struct ecore_sp_init_data init_data;
1362         u8 abs_vport_id = 0;
1363         enum _ecore_status_t rc;
1364         int i;
1365
1366         if (p_filter_cmd->opcode == ECORE_FILTER_ADD)
1367                 rc = ecore_fw_vport(p_hwfn,
1368                                     p_filter_cmd->vport_to_add_to,
1369                                     &abs_vport_id);
1370         else
1371                 rc = ecore_fw_vport(p_hwfn,
1372                                     p_filter_cmd->vport_to_remove_from,
1373                                     &abs_vport_id);
1374         if (rc != ECORE_SUCCESS)
1375                 return rc;
1376
1377         /* Get SPQ entry */
1378         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1379         init_data.cid = ecore_spq_get_cid(p_hwfn);
1380         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1381         init_data.comp_mode = comp_mode;
1382         init_data.p_comp_data = p_comp_data;
1383
1384         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1385                                    ETH_RAMROD_VPORT_UPDATE,
1386                                    PROTOCOLID_ETH, &init_data);
1387         if (rc != ECORE_SUCCESS) {
1388                 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1389                 return rc;
1390         }
1391
1392         p_ramrod = &p_ent->ramrod.vport_update;
1393         p_ramrod->common.update_approx_mcast_flg = 1;
1394
1395         /* explicitly clear out the entire vector */
1396         OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
1397                     0, sizeof(p_ramrod->approx_mcast.bins));
1398         OSAL_MEMSET(bins, 0, sizeof(unsigned long) *
1399                     ETH_MULTICAST_MAC_BINS_IN_REGS);
1400         /* filter ADD op is explicit set op and it removes
1401         *  any existing filters for the vport.
1402         */
1403         if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
1404                 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1405                         u32 bit;
1406
1407                         bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1408                         OSAL_SET_BIT(bit, bins);
1409                 }
1410
1411                 /* Convert to correct endianity */
1412                 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1413                         struct vport_update_ramrod_mcast *p_ramrod_bins;
1414                         u32 *p_bins = (u32 *)bins;
1415
1416                         p_ramrod_bins = &p_ramrod->approx_mcast;
1417                         p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
1418                 }
1419         }
1420
1421         p_ramrod->common.vport_id = abs_vport_id;
1422
1423         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1424         if (rc != ECORE_SUCCESS)
1425                 DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc);
1426
1427         return rc;
1428 }
1429
1430 enum _ecore_status_t
1431 ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
1432                        struct ecore_filter_mcast *p_filter_cmd,
1433                        enum spq_mode comp_mode,
1434                        struct ecore_spq_comp_cb *p_comp_data)
1435 {
1436         enum _ecore_status_t rc = ECORE_SUCCESS;
1437         int i;
1438
1439         /* only ADD and REMOVE operations are supported for multi-cast */
1440         if ((p_filter_cmd->opcode != ECORE_FILTER_ADD &&
1441              (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) ||
1442             (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) {
1443                 return ECORE_INVAL;
1444         }
1445
1446         for_each_hwfn(p_dev, i) {
1447                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1448                 u16 opaque_fid;
1449
1450                 if (IS_VF(p_dev)) {
1451                         ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1452                         continue;
1453                 }
1454
1455                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1456                 rc = ecore_sp_eth_filter_mcast(p_hwfn,
1457                                                opaque_fid,
1458                                                p_filter_cmd,
1459                                                comp_mode, p_comp_data);
1460                 if (rc != ECORE_SUCCESS)
1461                         break;
1462         }
1463
1464         return rc;
1465 }
1466
1467 enum _ecore_status_t
1468 ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
1469                        struct ecore_filter_ucast *p_filter_cmd,
1470                        enum spq_mode comp_mode,
1471                        struct ecore_spq_comp_cb *p_comp_data)
1472 {
1473         enum _ecore_status_t rc = ECORE_SUCCESS;
1474         int i;
1475
1476         for_each_hwfn(p_dev, i) {
1477                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1478                 u16 opaque_fid;
1479
1480                 if (IS_VF(p_dev)) {
1481                         rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1482                         continue;
1483                 }
1484
1485                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1486                 rc = ecore_sp_eth_filter_ucast(p_hwfn,
1487                                                opaque_fid,
1488                                                p_filter_cmd,
1489                                                comp_mode, p_comp_data);
1490                 if (rc != ECORE_SUCCESS)
1491                         break;
1492         }
1493
1494         return rc;
1495 }
1496
1497 /* Statistics related code */
1498 static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
1499                                              u32 *p_addr, u32 *p_len,
1500                                              u16 statistics_bin)
1501 {
1502         if (IS_PF(p_hwfn->p_dev)) {
1503                 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1504                     PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1505                 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1506         } else {
1507                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1508                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1509
1510                 *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1511                 *p_len = p_resp->pfdev_info.stats_info.pstats.len;
1512         }
1513 }
1514
1515 static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
1516                                      struct ecore_ptt *p_ptt,
1517                                      struct ecore_eth_stats *p_stats,
1518                                      u16 statistics_bin)
1519 {
1520         struct eth_pstorm_per_queue_stat pstats;
1521         u32 pstats_addr = 0, pstats_len = 0;
1522
1523         __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1524                                          statistics_bin);
1525
1526         OSAL_MEMSET(&pstats, 0, sizeof(pstats));
1527         ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1528
1529         p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1530         p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1531         p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1532         p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1533         p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1534         p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1535         p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
1536 }
1537
1538 static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
1539                                      struct ecore_ptt *p_ptt,
1540                                      struct ecore_eth_stats *p_stats,
1541                                      u16 statistics_bin)
1542 {
1543         struct tstorm_per_port_stat tstats;
1544         u32 tstats_addr, tstats_len;
1545
1546         if (IS_PF(p_hwfn->p_dev)) {
1547                 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1548                     TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1549                 tstats_len = sizeof(struct tstorm_per_port_stat);
1550         } else {
1551                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1552                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1553
1554                 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1555                 tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1556         }
1557
1558         OSAL_MEMSET(&tstats, 0, sizeof(tstats));
1559         ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1560
1561         p_stats->mftag_filter_discards +=
1562             HILO_64_REGPAIR(tstats.mftag_filter_discard);
1563         p_stats->mac_filter_discards +=
1564             HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1565 }
1566
1567 static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
1568                                              u32 *p_addr, u32 *p_len,
1569                                              u16 statistics_bin)
1570 {
1571         if (IS_PF(p_hwfn->p_dev)) {
1572                 *p_addr = BAR0_MAP_REG_USDM_RAM +
1573                     USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1574                 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1575         } else {
1576                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1577                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1578
1579                 *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1580                 *p_len = p_resp->pfdev_info.stats_info.ustats.len;
1581         }
1582 }
1583
1584 static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
1585                                      struct ecore_ptt *p_ptt,
1586                                      struct ecore_eth_stats *p_stats,
1587                                      u16 statistics_bin)
1588 {
1589         struct eth_ustorm_per_queue_stat ustats;
1590         u32 ustats_addr = 0, ustats_len = 0;
1591
1592         __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1593                                          statistics_bin);
1594
1595         OSAL_MEMSET(&ustats, 0, sizeof(ustats));
1596         ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1597
1598         p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1599         p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1600         p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1601         p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1602         p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1603         p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1604 }
1605
1606 static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
1607                                              u32 *p_addr, u32 *p_len,
1608                                              u16 statistics_bin)
1609 {
1610         if (IS_PF(p_hwfn->p_dev)) {
1611                 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1612                     MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1613                 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1614         } else {
1615                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1616                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1617
1618                 *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1619                 *p_len = p_resp->pfdev_info.stats_info.mstats.len;
1620         }
1621 }
1622
1623 static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
1624                                      struct ecore_ptt *p_ptt,
1625                                      struct ecore_eth_stats *p_stats,
1626                                      u16 statistics_bin)
1627 {
1628         struct eth_mstorm_per_queue_stat mstats;
1629         u32 mstats_addr = 0, mstats_len = 0;
1630
1631         __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1632                                          statistics_bin);
1633
1634         OSAL_MEMSET(&mstats, 0, sizeof(mstats));
1635         ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1636
1637         p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
1638         p_stats->packet_too_big_discard +=
1639             HILO_64_REGPAIR(mstats.packet_too_big_discard);
1640         p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1641         p_stats->tpa_coalesced_pkts +=
1642             HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1643         p_stats->tpa_coalesced_events +=
1644             HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1645         p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
1646         p_stats->tpa_coalesced_bytes +=
1647             HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1648 }
1649
1650 static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
1651                                          struct ecore_ptt *p_ptt,
1652                                          struct ecore_eth_stats *p_stats)
1653 {
1654         struct port_stats port_stats;
1655         int j;
1656
1657         OSAL_MEMSET(&port_stats, 0, sizeof(port_stats));
1658
1659         ecore_memcpy_from(p_hwfn, p_ptt, &port_stats,
1660                           p_hwfn->mcp_info->port_addr +
1661                           OFFSETOF(struct public_port, stats),
1662                           sizeof(port_stats));
1663
1664         p_stats->rx_64_byte_packets += port_stats.eth.r64;
1665         p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
1666         p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
1667         p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
1668         p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1669         p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1670         p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
1671         p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
1672         p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
1673         p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
1674         p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
1675         p_stats->rx_crc_errors += port_stats.eth.rfcs;
1676         p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
1677         p_stats->rx_pause_frames += port_stats.eth.rxpf;
1678         p_stats->rx_pfc_frames += port_stats.eth.rxpp;
1679         p_stats->rx_align_errors += port_stats.eth.raln;
1680         p_stats->rx_carrier_errors += port_stats.eth.rfcr;
1681         p_stats->rx_oversize_packets += port_stats.eth.rovr;
1682         p_stats->rx_jabbers += port_stats.eth.rjbr;
1683         p_stats->rx_undersize_packets += port_stats.eth.rund;
1684         p_stats->rx_fragments += port_stats.eth.rfrg;
1685         p_stats->tx_64_byte_packets += port_stats.eth.t64;
1686         p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
1687         p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
1688         p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
1689         p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1690         p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1691         p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
1692         p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
1693         p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
1694         p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
1695         p_stats->tx_pause_frames += port_stats.eth.txpf;
1696         p_stats->tx_pfc_frames += port_stats.eth.txpp;
1697         p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
1698         p_stats->tx_total_collisions += port_stats.eth.tncl;
1699         p_stats->rx_mac_bytes += port_stats.eth.rbyte;
1700         p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
1701         p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
1702         p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
1703         p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
1704         p_stats->tx_mac_bytes += port_stats.eth.tbyte;
1705         p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
1706         p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
1707         p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
1708         p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
1709         for (j = 0; j < 8; j++) {
1710                 p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
1711                 p_stats->brb_discards += port_stats.brb.brb_discard[j];
1712         }
1713 }
1714
1715 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
1716                              struct ecore_ptt *p_ptt,
1717                              struct ecore_eth_stats *stats,
1718                              u16 statistics_bin, bool b_get_port_stats)
1719 {
1720         __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1721         __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1722         __ecore_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1723         __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1724
1725 #ifndef ASIC_ONLY
1726         /* Avoid getting PORT stats for emulation. */
1727         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1728                 return;
1729 #endif
1730
1731         if (b_get_port_stats && p_hwfn->mcp_info)
1732                 __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats);
1733 }
1734
1735 static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
1736                                    struct ecore_eth_stats *stats)
1737 {
1738         u8 fw_vport = 0;
1739         int i;
1740
1741         OSAL_MEMSET(stats, 0, sizeof(*stats));
1742
1743         for_each_hwfn(p_dev, i) {
1744                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1745                 struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
1746                     ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
1747
1748                 if (IS_PF(p_dev)) {
1749                         /* The main vport index is relative first */
1750                         if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {
1751                                 DP_ERR(p_hwfn, "No vport available!\n");
1752                                 goto out;
1753                         }
1754                 }
1755
1756                 if (IS_PF(p_dev) && !p_ptt) {
1757                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1758                         continue;
1759                 }
1760
1761                 __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1762                                         IS_PF(p_dev) ? true : false);
1763
1764 out:
1765                 if (IS_PF(p_dev) && p_ptt)
1766                         ecore_ptt_release(p_hwfn, p_ptt);
1767         }
1768 }
1769
1770 void ecore_get_vport_stats(struct ecore_dev *p_dev,
1771                            struct ecore_eth_stats *stats)
1772 {
1773         u32 i;
1774
1775         if (!p_dev) {
1776                 OSAL_MEMSET(stats, 0, sizeof(*stats));
1777                 return;
1778         }
1779
1780         _ecore_get_vport_stats(p_dev, stats);
1781
1782         if (!p_dev->reset_stats)
1783                 return;
1784
1785         /* Reduce the statistics baseline */
1786         for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++)
1787                 ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i];
1788 }
1789
1790 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1791 void ecore_reset_vport_stats(struct ecore_dev *p_dev)
1792 {
1793         int i;
1794
1795         for_each_hwfn(p_dev, i) {
1796                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1797                 struct eth_mstorm_per_queue_stat mstats;
1798                 struct eth_ustorm_per_queue_stat ustats;
1799                 struct eth_pstorm_per_queue_stat pstats;
1800                 struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
1801                     ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
1802                 u32 addr = 0, len = 0;
1803
1804                 if (IS_PF(p_dev) && !p_ptt) {
1805                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1806                         continue;
1807                 }
1808
1809                 OSAL_MEMSET(&mstats, 0, sizeof(mstats));
1810                 __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1811                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1812
1813                 OSAL_MEMSET(&ustats, 0, sizeof(ustats));
1814                 __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1815                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1816
1817                 OSAL_MEMSET(&pstats, 0, sizeof(pstats));
1818                 __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1819                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1820
1821                 if (IS_PF(p_dev))
1822                         ecore_ptt_release(p_hwfn, p_ptt);
1823         }
1824
1825         /* PORT statistics are not necessarily reset, so we need to
1826          * read and create a baseline for future statistics.
1827          */
1828         if (!p_dev->reset_stats)
1829                 DP_INFO(p_dev, "Reset stats not allocated\n");
1830         else
1831                 _ecore_get_vport_stats(p_dev, p_dev->reset_stats);
1832 }