net/qede/base: remove unused parameters
[dpdk.git] / drivers / net / qede / base / ecore_l2.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10
11 #include "ecore.h"
12 #include "ecore_status.h"
13 #include "ecore_hsi_eth.h"
14 #include "ecore_chain.h"
15 #include "ecore_spq.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_cxt.h"
18 #include "ecore_l2.h"
19 #include "ecore_sp_commands.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "reg_addr.h"
23 #include "ecore_int.h"
24 #include "ecore_hw.h"
25 #include "ecore_vf.h"
26 #include "ecore_sriov.h"
27 #include "ecore_mcp.h"
28
29 #define ECORE_MAX_SGES_NUM 16
30 #define CRC32_POLY 0x1edc6f41
31
32 struct ecore_l2_info {
33         u32 queues;
34         unsigned long **pp_qid_usage;
35
36         /* The lock is meant to synchronize access to the qid usage */
37         osal_mutex_t lock;
38 };
39
40 enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
41 {
42         struct ecore_l2_info *p_l2_info;
43         unsigned long **pp_qids;
44         u32 i;
45
46         if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
47                 return ECORE_SUCCESS;
48
49         p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info));
50         if (!p_l2_info)
51                 return ECORE_NOMEM;
52         p_hwfn->p_l2_info = p_l2_info;
53
54         if (IS_PF(p_hwfn->p_dev)) {
55                 p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
56         } else {
57                 u8 rx = 0, tx = 0;
58
59                 ecore_vf_get_num_rxqs(p_hwfn, &rx);
60                 ecore_vf_get_num_txqs(p_hwfn, &tx);
61
62                 p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx);
63         }
64
65         pp_qids = OSAL_VZALLOC(p_hwfn->p_dev,
66                                sizeof(unsigned long *) *
67                                p_l2_info->queues);
68         if (pp_qids == OSAL_NULL)
69                 return ECORE_NOMEM;
70         p_l2_info->pp_qid_usage = pp_qids;
71
72         for (i = 0; i < p_l2_info->queues; i++) {
73                 pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev,
74                                           MAX_QUEUES_PER_QZONE / 8);
75                 if (pp_qids[i] == OSAL_NULL)
76                         return ECORE_NOMEM;
77         }
78
79 #ifdef CONFIG_ECORE_LOCK_ALLOC
80         OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock);
81 #endif
82
83         return ECORE_SUCCESS;
84 }
85
86 void ecore_l2_setup(struct ecore_hwfn *p_hwfn)
87 {
88         if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
89                 return;
90
91         OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock);
92 }
93
94 void ecore_l2_free(struct ecore_hwfn *p_hwfn)
95 {
96         u32 i;
97
98         if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
99                 return;
100
101         if (p_hwfn->p_l2_info == OSAL_NULL)
102                 return;
103
104         if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL)
105                 goto out_l2_info;
106
107         /* Free until hit first uninitialized entry */
108         for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
109                 if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL)
110                         break;
111                 OSAL_VFREE(p_hwfn->p_dev,
112                            p_hwfn->p_l2_info->pp_qid_usage[i]);
113         }
114
115 #ifdef CONFIG_ECORE_LOCK_ALLOC
116         /* Lock is last to initialize, if everything else was */
117         if (i == p_hwfn->p_l2_info->queues)
118                 OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock);
119 #endif
120
121         OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
122
123 out_l2_info:
124         OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
125         p_hwfn->p_l2_info = OSAL_NULL;
126 }
127
128 /* TODO - we'll need locking around these... */
129 static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn,
130                                           struct ecore_queue_cid *p_cid)
131 {
132         struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info;
133         u16 queue_id = p_cid->rel.queue_id;
134         bool b_rc = true;
135         u8 first;
136
137         OSAL_MUTEX_ACQUIRE(&p_l2_info->lock);
138
139         if (queue_id > p_l2_info->queues) {
140                 DP_NOTICE(p_hwfn, true,
141                           "Requested to increase usage for qzone %04x out of %08x\n",
142                           queue_id, p_l2_info->queues);
143                 b_rc = false;
144                 goto out;
145         }
146
147         first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id],
148                                              MAX_QUEUES_PER_QZONE);
149         if (first >= MAX_QUEUES_PER_QZONE) {
150                 b_rc = false;
151                 goto out;
152         }
153
154         OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]);
155         p_cid->qid_usage_idx = first;
156
157 out:
158         OSAL_MUTEX_RELEASE(&p_l2_info->lock);
159         return b_rc;
160 }
161
162 static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn,
163                                           struct ecore_queue_cid *p_cid)
164 {
165         OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock);
166
167         OSAL_CLEAR_BIT(p_cid->qid_usage_idx,
168                        p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
169
170         OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock);
171 }
172
173 void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
174                                  struct ecore_queue_cid *p_cid)
175 {
176         bool b_legacy_vf = !!(p_cid->vf_legacy &
177                               ECORE_QCID_LEGACY_VF_CID);
178
179         /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF.
180          * For legacy vf-queues, the CID doesn't go through here.
181          */
182         if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
183                 _ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
184
185         /* VFs maintain the index inside queue-zone on their own */
186         if (p_cid->vfid == ECORE_QUEUE_CID_PF)
187                 ecore_eth_queue_qid_usage_del(p_hwfn, p_cid);
188
189         OSAL_VFREE(p_hwfn->p_dev, p_cid);
190 }
191
192 /* The internal is only meant to be directly called by PFs initializeing CIDs
193  * for their VFs.
194  */
195 static struct ecore_queue_cid *
196 _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
197                         u16 opaque_fid, u32 cid,
198                         struct ecore_queue_start_common_params *p_params,
199                         struct ecore_queue_cid_vf_params *p_vf_params)
200 {
201         struct ecore_queue_cid *p_cid;
202         enum _ecore_status_t rc;
203
204         p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid));
205         if (p_cid == OSAL_NULL)
206                 return OSAL_NULL;
207
208         p_cid->opaque_fid = opaque_fid;
209         p_cid->cid = cid;
210         p_cid->p_owner = p_hwfn;
211
212         /* Fill in parameters */
213         p_cid->rel.vport_id = p_params->vport_id;
214         p_cid->rel.queue_id = p_params->queue_id;
215         p_cid->rel.stats_id = p_params->stats_id;
216         p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
217         p_cid->sb_idx = p_params->sb_idx;
218
219         /* Fill-in bits related to VFs' queues if information was provided */
220         if (p_vf_params != OSAL_NULL) {
221                 p_cid->vfid = p_vf_params->vfid;
222                 p_cid->vf_qid = p_vf_params->vf_qid;
223                 p_cid->vf_legacy = p_vf_params->vf_legacy;
224         } else {
225                 p_cid->vfid = ECORE_QUEUE_CID_PF;
226         }
227
228         /* Don't try calculating the absolute indices for VFs */
229         if (IS_VF(p_hwfn->p_dev)) {
230                 p_cid->abs = p_cid->rel;
231
232                 goto out;
233         }
234
235         /* Calculate the engine-absolute indices of the resources.
236          * The would guarantee they're valid later on.
237          * In some cases [SBs] we already have the right values.
238          */
239         rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
240         if (rc != ECORE_SUCCESS)
241                 goto fail;
242
243         rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id,
244                                &p_cid->abs.queue_id);
245         if (rc != ECORE_SUCCESS)
246                 goto fail;
247
248         /* In case of a PF configuring its VF's queues, the stats-id is already
249          * absolute [since there's a single index that's suitable per-VF].
250          */
251         if (p_cid->vfid == ECORE_QUEUE_CID_PF) {
252                 rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id,
253                                     &p_cid->abs.stats_id);
254                 if (rc != ECORE_SUCCESS)
255                         goto fail;
256         } else {
257                 p_cid->abs.stats_id = p_cid->rel.stats_id;
258         }
259
260 out:
261         /* VF-images have provided the qid_usage_idx on their own.
262          * Otherwise, we need to allocate a unique one.
263          */
264         if (!p_vf_params) {
265                 if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid))
266                         goto fail;
267         } else {
268                 p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
269         }
270
271         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
272                    "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
273                    p_cid->opaque_fid, p_cid->cid,
274                    p_cid->rel.vport_id, p_cid->abs.vport_id,
275                    p_cid->rel.queue_id, p_cid->qid_usage_idx,
276                    p_cid->abs.queue_id,
277                    p_cid->rel.stats_id, p_cid->abs.stats_id,
278                    p_cid->sb_igu_id, p_cid->sb_idx);
279
280         return p_cid;
281
282 fail:
283         OSAL_VFREE(p_hwfn->p_dev, p_cid);
284         return OSAL_NULL;
285 }
286
287 struct ecore_queue_cid *
288 ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
289                        struct ecore_queue_start_common_params *p_params,
290                        struct ecore_queue_cid_vf_params *p_vf_params)
291 {
292         struct ecore_queue_cid *p_cid;
293         u8 vfid = ECORE_CXT_PF_CID;
294         bool b_legacy_vf = false;
295         u32 cid = 0;
296
297         /* In case of legacy VFs, The CID can be derived from the additional
298          * VF parameters - the VF assumes queue X uses CID X, so we can simply
299          * use the vf_qid for this purpose as well.
300          */
301         if (p_vf_params) {
302                 vfid = p_vf_params->vfid;
303
304                 if (p_vf_params->vf_legacy &
305                     ECORE_QCID_LEGACY_VF_CID) {
306                         b_legacy_vf = true;
307                         cid = p_vf_params->vf_qid;
308                 }
309         }
310
311         /* Get a unique firmware CID for this queue, in case it's a PF.
312          * VF's don't need a CID as the queue configuration will be done
313          * by PF.
314          */
315         if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) {
316                 if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
317                                            &cid, vfid) != ECORE_SUCCESS) {
318                         DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
319                         return OSAL_NULL;
320                 }
321         }
322
323         p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
324                                         p_params, p_vf_params);
325         if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
326                 _ecore_cxt_release_cid(p_hwfn, cid, vfid);
327
328         return p_cid;
329 }
330
331 static struct ecore_queue_cid *
332 ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
333                           struct ecore_queue_start_common_params *p_params)
334 {
335         return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, OSAL_NULL);
336 }
337
338 enum _ecore_status_t
339 ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
340                          struct ecore_sp_vport_start_params *p_params)
341 {
342         struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
343         struct ecore_spq_entry *p_ent = OSAL_NULL;
344         struct ecore_sp_init_data init_data;
345         u16 rx_mode = 0, tx_err = 0;
346         u8 abs_vport_id = 0;
347         enum _ecore_status_t rc = ECORE_NOTIMPL;
348
349         rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
350         if (rc != ECORE_SUCCESS)
351                 return rc;
352
353         /* Get SPQ entry */
354         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
355         init_data.cid = ecore_spq_get_cid(p_hwfn);
356         init_data.opaque_fid = p_params->opaque_fid;
357         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
358
359         rc = ecore_sp_init_request(p_hwfn, &p_ent,
360                                    ETH_RAMROD_VPORT_START,
361                                    PROTOCOLID_ETH, &init_data);
362         if (rc != ECORE_SUCCESS)
363                 return rc;
364
365         p_ramrod = &p_ent->ramrod.vport_start;
366         p_ramrod->vport_id = abs_vport_id;
367
368         p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
369         p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
370         p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
371         p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
372         p_ramrod->untagged = p_params->only_untagged;
373         p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
374
375         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
376         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
377
378         p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode);
379
380         /* Handle requests for strict behavior on transmission errors */
381         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE,
382                   p_params->b_err_illegal_vlan_mode ?
383                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
384         SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL,
385                   p_params->b_err_small_pkt ?
386                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
387         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR,
388                   p_params->b_err_anti_spoof ?
389                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
390         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS,
391                   p_params->b_err_illegal_inband_mode ?
392                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
393         SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG,
394                   p_params->b_err_vlan_insert_with_inband ?
395                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
396         SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION,
397                   p_params->b_err_big_pkt ?
398                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
399         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME,
400                   p_params->b_err_ctrl_frame ?
401                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
402         p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
403
404         /* TPA related fields */
405         OSAL_MEMSET(&p_ramrod->tpa_param, 0,
406                     sizeof(struct eth_vport_tpa_param));
407         p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
408
409         switch (p_params->tpa_mode) {
410         case ECORE_TPA_MODE_GRO:
411                 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
412                 p_ramrod->tpa_param.tpa_max_size = (u16)-1;
413                 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
414                 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
415                 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
416                 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
417                 p_ramrod->tpa_param.tpa_ipv4_tunn_en_flg = 1;
418                 p_ramrod->tpa_param.tpa_ipv6_tunn_en_flg = 1;
419                 p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
420                 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
421                 break;
422         default:
423                 break;
424         }
425
426         p_ramrod->tx_switching_en = p_params->tx_switching;
427 #ifndef ASIC_ONLY
428         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
429                 p_ramrod->tx_switching_en = 0;
430 #endif
431
432         p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
433         p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
434
435         /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
436         p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_params->concrete_fid);
437
438         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
439 }
440
441 enum _ecore_status_t
442 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
443                      struct ecore_sp_vport_start_params *p_params)
444 {
445         if (IS_VF(p_hwfn->p_dev))
446                 return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id,
447                                                p_params->mtu,
448                                                p_params->remove_inner_vlan,
449                                                p_params->tpa_mode,
450                                                p_params->max_buffers_per_cqe,
451                                                p_params->only_untagged);
452
453         return ecore_sp_eth_vport_start(p_hwfn, p_params);
454 }
455
456 static enum _ecore_status_t
457 ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
458                           struct vport_update_ramrod_data *p_ramrod,
459                           struct ecore_rss_params *p_rss)
460 {
461         struct eth_vport_rss_config *p_config;
462         int i, table_size;
463         enum _ecore_status_t rc = ECORE_SUCCESS;
464
465         if (!p_rss) {
466                 p_ramrod->common.update_rss_flg = 0;
467                 return rc;
468         }
469         p_config = &p_ramrod->rss_config;
470
471         OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE !=
472                           ETH_RSS_IND_TABLE_ENTRIES_NUM);
473
474         rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
475         if (rc != ECORE_SUCCESS)
476                 return rc;
477
478         p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
479         p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
480         p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
481         p_config->update_rss_key = p_rss->update_rss_key;
482
483         p_config->rss_mode = p_rss->rss_enable ?
484             ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED;
485
486         p_config->capabilities = 0;
487
488         SET_FIELD(p_config->capabilities,
489                   ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
490                   !!(p_rss->rss_caps & ECORE_RSS_IPV4));
491         SET_FIELD(p_config->capabilities,
492                   ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
493                   !!(p_rss->rss_caps & ECORE_RSS_IPV6));
494         SET_FIELD(p_config->capabilities,
495                   ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
496                   !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
497         SET_FIELD(p_config->capabilities,
498                   ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
499                   !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
500         SET_FIELD(p_config->capabilities,
501                   ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
502                   !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
503         SET_FIELD(p_config->capabilities,
504                   ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
505                   !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
506         p_config->tbl_size = p_rss->rss_table_size_log;
507         p_config->capabilities = OSAL_CPU_TO_LE16(p_config->capabilities);
508
509         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
510                    "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
511                    p_ramrod->common.update_rss_flg,
512                    p_config->rss_mode,
513                    p_config->update_rss_capabilities,
514                    p_config->capabilities,
515                    p_config->update_rss_ind_table, p_config->update_rss_key);
516
517         table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE,
518                                 1 << p_config->tbl_size);
519         for (i = 0; i < table_size; i++) {
520                 struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i];
521
522                 if (!p_queue)
523                         return ECORE_INVAL;
524
525                 p_config->indirection_table[i] =
526                                 OSAL_CPU_TO_LE16(p_queue->abs.queue_id);
527         }
528
529         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
530                    "Configured RSS indirection table [%d entries]:\n",
531                    table_size);
532         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) {
533                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
534                            "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
535                            OSAL_LE16_TO_CPU(p_config->indirection_table[i]),
536                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]),
537                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]),
538                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]),
539                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]),
540                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]),
541                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]),
542                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]),
543                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]),
544                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]),
545                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]),
546                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]),
547                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]),
548                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]),
549                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]),
550                          OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15]));
551         }
552
553         for (i = 0; i < 10; i++)
554                 p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]);
555
556         return rc;
557 }
558
559 static void
560 ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
561                             struct vport_update_ramrod_data *p_ramrod,
562                             struct ecore_filter_accept_flags accept_flags)
563 {
564         p_ramrod->common.update_rx_mode_flg =
565                                         accept_flags.update_rx_mode_config;
566         p_ramrod->common.update_tx_mode_flg =
567                                         accept_flags.update_tx_mode_config;
568
569 #ifndef ASIC_ONLY
570         /* On B0 emulation we cannot enable Tx, since this would cause writes
571          * to PVFC HW block which isn't implemented in emulation.
572          */
573         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
574                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
575                            "Non-Asic - prevent Tx mode in vport update\n");
576                 p_ramrod->common.update_tx_mode_flg = 0;
577         }
578 #endif
579
580         /* Set Rx mode accept flags */
581         if (p_ramrod->common.update_rx_mode_flg) {
582                 u8 accept_filter = accept_flags.rx_accept_filter;
583                 u16 state = 0;
584
585                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
586                           !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) ||
587                            !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
588
589                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
590                           !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED));
591
592                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
593                           !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) ||
594                             !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
595
596                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
597                           (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
598                            !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
599
600                 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
601                           !!(accept_filter & ECORE_ACCEPT_BCAST));
602
603                 p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
604                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
605                            "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n",
606                            p_ramrod->common.vport_id, state);
607         }
608
609         /* Set Tx mode accept flags */
610         if (p_ramrod->common.update_tx_mode_flg) {
611                 u8 accept_filter = accept_flags.tx_accept_filter;
612                 u16 state = 0;
613
614                 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
615                           !!(accept_filter & ECORE_ACCEPT_NONE));
616
617                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
618                           !!(accept_filter & ECORE_ACCEPT_NONE));
619
620                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
621                           (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
622                            !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
623
624                 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
625                           !!(accept_filter & ECORE_ACCEPT_BCAST));
626
627                 p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state);
628                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
629                            "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n",
630                            p_ramrod->common.vport_id, state);
631         }
632 }
633
634 static void
635 ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod,
636                               struct ecore_sge_tpa_params *p_params)
637 {
638         struct eth_vport_tpa_param *p_tpa;
639
640         if (!p_params) {
641                 p_ramrod->common.update_tpa_param_flg = 0;
642                 p_ramrod->common.update_tpa_en_flg = 0;
643                 p_ramrod->common.update_tpa_param_flg = 0;
644                 return;
645         }
646
647         p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
648         p_tpa = &p_ramrod->tpa_param;
649         p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
650         p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
651         p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
652         p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
653
654         p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
655         p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
656         p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
657         p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
658         p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
659         p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
660         p_tpa->tpa_max_size = p_params->tpa_max_size;
661         p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
662         p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
663 }
664
665 static void
666 ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod,
667                           struct ecore_sp_vport_update_params *p_params)
668 {
669         int i;
670
671         OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0,
672                     sizeof(p_ramrod->approx_mcast.bins));
673
674         if (!p_params->update_approx_mcast_flg)
675                 return;
676
677         p_ramrod->common.update_approx_mcast_flg = 1;
678         for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
679                 u32 *p_bins = (u32 *)p_params->bins;
680
681                 p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
682         }
683 }
684
685 enum _ecore_status_t
686 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
687                       struct ecore_sp_vport_update_params *p_params,
688                       enum spq_mode comp_mode,
689                       struct ecore_spq_comp_cb *p_comp_data)
690 {
691         struct ecore_rss_params *p_rss_params = p_params->rss_params;
692         struct vport_update_ramrod_data_cmn *p_cmn;
693         struct ecore_sp_init_data init_data;
694         struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
695         struct ecore_spq_entry *p_ent = OSAL_NULL;
696         u8 abs_vport_id = 0, val;
697         enum _ecore_status_t rc = ECORE_NOTIMPL;
698
699         if (IS_VF(p_hwfn->p_dev)) {
700                 rc = ecore_vf_pf_vport_update(p_hwfn, p_params);
701                 return rc;
702         }
703
704         rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
705         if (rc != ECORE_SUCCESS)
706                 return rc;
707
708         /* Get SPQ entry */
709         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
710         init_data.cid = ecore_spq_get_cid(p_hwfn);
711         init_data.opaque_fid = p_params->opaque_fid;
712         init_data.comp_mode = comp_mode;
713         init_data.p_comp_data = p_comp_data;
714
715         rc = ecore_sp_init_request(p_hwfn, &p_ent,
716                                    ETH_RAMROD_VPORT_UPDATE,
717                                    PROTOCOLID_ETH, &init_data);
718         if (rc != ECORE_SUCCESS)
719                 return rc;
720
721         /* Copy input params to ramrod according to FW struct */
722         p_ramrod = &p_ent->ramrod.vport_update;
723         p_cmn = &p_ramrod->common;
724
725         p_cmn->vport_id = abs_vport_id;
726
727         p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
728         p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
729         p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
730         p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
731
732         p_cmn->accept_any_vlan = p_params->accept_any_vlan;
733         val = p_params->update_accept_any_vlan_flg;
734         p_cmn->update_accept_any_vlan_flg = val;
735
736         p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
737         val = p_params->update_inner_vlan_removal_flg;
738         p_cmn->update_inner_vlan_removal_en_flg = val;
739
740         p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
741         val = p_params->update_default_vlan_enable_flg;
742         p_cmn->update_default_vlan_en_flg = val;
743
744         p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan);
745         p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
746
747         p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
748
749         p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
750
751 #ifndef ASIC_ONLY
752         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
753                 if (p_ramrod->common.tx_switching_en ||
754                     p_ramrod->common.update_tx_switching_en_flg) {
755                         DP_NOTICE(p_hwfn, false,
756                                   "FPGA - why are we seeing tx-switching? Overriding it\n");
757                         p_ramrod->common.tx_switching_en = 0;
758                         p_ramrod->common.update_tx_switching_en_flg = 1;
759                 }
760 #endif
761         p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
762
763         p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
764         val = p_params->update_anti_spoofing_en_flg;
765         p_ramrod->common.update_anti_spoofing_en_flg = val;
766
767         rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
768         if (rc != ECORE_SUCCESS) {
769                 /* Return spq entry which is taken in ecore_sp_init_request()*/
770                 ecore_spq_return_entry(p_hwfn, p_ent);
771                 return rc;
772         }
773
774         /* Update mcast bins for VFs, PF doesn't use this functionality */
775         ecore_sp_update_mcast_bin(p_ramrod, p_params);
776
777         ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
778         ecore_sp_vport_update_sge_tpa(p_ramrod, p_params->sge_tpa_params);
779         if (p_params->mtu) {
780                 p_ramrod->common.update_mtu_flg = 1;
781                 p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu);
782         }
783
784         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
785 }
786
787 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
788                                          u16 opaque_fid, u8 vport_id)
789 {
790         struct vport_stop_ramrod_data *p_ramrod;
791         struct ecore_sp_init_data init_data;
792         struct ecore_spq_entry *p_ent;
793         u8 abs_vport_id = 0;
794         enum _ecore_status_t rc;
795
796         if (IS_VF(p_hwfn->p_dev))
797                 return ecore_vf_pf_vport_stop(p_hwfn);
798
799         rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
800         if (rc != ECORE_SUCCESS)
801                 return rc;
802
803         /* Get SPQ entry */
804         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
805         init_data.cid = ecore_spq_get_cid(p_hwfn);
806         init_data.opaque_fid = opaque_fid;
807         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
808
809         rc = ecore_sp_init_request(p_hwfn, &p_ent,
810                                    ETH_RAMROD_VPORT_STOP,
811                                    PROTOCOLID_ETH, &init_data);
812         if (rc != ECORE_SUCCESS)
813                 return rc;
814
815         p_ramrod = &p_ent->ramrod.vport_stop;
816         p_ramrod->vport_id = abs_vport_id;
817
818         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
819 }
820
821 static enum _ecore_status_t
822 ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn,
823                          struct ecore_filter_accept_flags *p_accept_flags)
824 {
825         struct ecore_sp_vport_update_params s_params;
826
827         OSAL_MEMSET(&s_params, 0, sizeof(s_params));
828         OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags,
829                     sizeof(struct ecore_filter_accept_flags));
830
831         return ecore_vf_pf_vport_update(p_hwfn, &s_params);
832 }
833
834 enum _ecore_status_t
835 ecore_filter_accept_cmd(struct ecore_dev *p_dev,
836                         u8 vport,
837                         struct ecore_filter_accept_flags accept_flags,
838                         u8 update_accept_any_vlan,
839                         u8 accept_any_vlan,
840                         enum spq_mode comp_mode,
841                         struct ecore_spq_comp_cb *p_comp_data)
842 {
843         struct ecore_sp_vport_update_params vport_update_params;
844         int i, rc;
845
846         /* Prepare and send the vport rx_mode change */
847         OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params));
848         vport_update_params.vport_id = vport;
849         vport_update_params.accept_flags = accept_flags;
850         vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
851         vport_update_params.accept_any_vlan = accept_any_vlan;
852
853         for_each_hwfn(p_dev, i) {
854                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
855
856                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
857
858                 if (IS_VF(p_dev)) {
859                         rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);
860                         if (rc != ECORE_SUCCESS)
861                                 return rc;
862                         continue;
863                 }
864
865                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
866                                            comp_mode, p_comp_data);
867                 if (rc != ECORE_SUCCESS) {
868                         DP_ERR(p_dev, "Update rx_mode failed %d\n", rc);
869                         return rc;
870                 }
871
872                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
873                            "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
874                            accept_flags.rx_accept_filter,
875                            accept_flags.tx_accept_filter);
876
877                 if (update_accept_any_vlan)
878                         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
879                                    "accept_any_vlan=%d configured\n",
880                                    accept_any_vlan);
881         }
882
883         return 0;
884 }
885
886 enum _ecore_status_t
887 ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
888                            struct ecore_queue_cid *p_cid,
889                            u16 bd_max_bytes,
890                            dma_addr_t bd_chain_phys_addr,
891                            dma_addr_t cqe_pbl_addr,
892                            u16 cqe_pbl_size)
893 {
894         struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
895         struct ecore_spq_entry *p_ent = OSAL_NULL;
896         struct ecore_sp_init_data init_data;
897         enum _ecore_status_t rc = ECORE_NOTIMPL;
898
899         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
900                    "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
901                    p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
902                    p_cid->abs.vport_id, p_cid->sb_igu_id);
903
904         /* Get SPQ entry */
905         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
906         init_data.cid = p_cid->cid;
907         init_data.opaque_fid = p_cid->opaque_fid;
908         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
909
910         rc = ecore_sp_init_request(p_hwfn, &p_ent,
911                                    ETH_RAMROD_RX_QUEUE_START,
912                                    PROTOCOLID_ETH, &init_data);
913         if (rc != ECORE_SUCCESS)
914                 return rc;
915
916         p_ramrod = &p_ent->ramrod.rx_queue_start;
917
918         p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
919         p_ramrod->sb_index = p_cid->sb_idx;
920         p_ramrod->vport_id = p_cid->abs.vport_id;
921         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
922         p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
923         p_ramrod->complete_cqe_flg = 0;
924         p_ramrod->complete_event_flg = 1;
925
926         p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes);
927         DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
928
929         p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
930         DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
931
932         if (p_cid->vfid != ECORE_QUEUE_CID_PF) {
933                 bool b_legacy_vf = !!(p_cid->vf_legacy &
934                                       ECORE_QCID_LEGACY_VF_RX_PROD);
935
936                 p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
937                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
938                            "Queue%s is meant for VF rxq[%02x]\n",
939                            b_legacy_vf ? " [legacy]" : "",
940                            p_cid->vf_qid);
941                 p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
942         }
943
944         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
945 }
946
947 static enum _ecore_status_t
948 ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn,
949                             struct ecore_queue_cid *p_cid,
950                             u16 bd_max_bytes,
951                             dma_addr_t bd_chain_phys_addr,
952                             dma_addr_t cqe_pbl_addr,
953                             u16 cqe_pbl_size,
954                             void OSAL_IOMEM * *pp_prod)
955 {
956         u32 init_prod_val = 0;
957
958         *pp_prod = (u8 OSAL_IOMEM *)
959                     p_hwfn->regview +
960                     GTT_BAR0_MAP_REG_MSDM_RAM +
961                     MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
962
963         /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
964         __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
965                           (u32 *)(&init_prod_val));
966
967         return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
968                                           bd_max_bytes,
969                                           bd_chain_phys_addr,
970                                           cqe_pbl_addr, cqe_pbl_size);
971 }
972
973 enum _ecore_status_t
974 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
975                          u16 opaque_fid,
976                          struct ecore_queue_start_common_params *p_params,
977                          u16 bd_max_bytes,
978                          dma_addr_t bd_chain_phys_addr,
979                          dma_addr_t cqe_pbl_addr,
980                          u16 cqe_pbl_size,
981                          struct ecore_rxq_start_ret_params *p_ret_params)
982 {
983         struct ecore_queue_cid *p_cid;
984         enum _ecore_status_t rc;
985
986         /* Allocate a CID for the queue */
987         p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
988         if (p_cid == OSAL_NULL)
989                 return ECORE_NOMEM;
990
991         if (IS_PF(p_hwfn->p_dev))
992                 rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid,
993                                                  bd_max_bytes,
994                                                  bd_chain_phys_addr,
995                                                  cqe_pbl_addr, cqe_pbl_size,
996                                                  &p_ret_params->p_prod);
997         else
998                 rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid,
999                                            bd_max_bytes,
1000                                            bd_chain_phys_addr,
1001                                            cqe_pbl_addr,
1002                                            cqe_pbl_size,
1003                                            &p_ret_params->p_prod);
1004
1005         /* Provide the caller with a reference to as handler */
1006         if (rc != ECORE_SUCCESS)
1007                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1008         else
1009                 p_ret_params->p_handle = (void *)p_cid;
1010
1011         return rc;
1012 }
1013
1014 enum _ecore_status_t
1015 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
1016                               void **pp_rxq_handles,
1017                               u8 num_rxqs,
1018                               u8 complete_cqe_flg,
1019                               u8 complete_event_flg,
1020                               enum spq_mode comp_mode,
1021                               struct ecore_spq_comp_cb *p_comp_data)
1022 {
1023         struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
1024         struct ecore_spq_entry *p_ent = OSAL_NULL;
1025         struct ecore_sp_init_data init_data;
1026         struct ecore_queue_cid *p_cid;
1027         enum _ecore_status_t rc = ECORE_NOTIMPL;
1028         u8 i;
1029
1030         if (IS_VF(p_hwfn->p_dev))
1031                 return ecore_vf_pf_rxqs_update(p_hwfn,
1032                                                (struct ecore_queue_cid **)
1033                                                pp_rxq_handles,
1034                                                num_rxqs,
1035                                                complete_cqe_flg,
1036                                                complete_event_flg);
1037
1038         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1039         init_data.comp_mode = comp_mode;
1040         init_data.p_comp_data = p_comp_data;
1041
1042         for (i = 0; i < num_rxqs; i++) {
1043                 p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i];
1044
1045                 /* Get SPQ entry */
1046                 init_data.cid = p_cid->cid;
1047                 init_data.opaque_fid = p_cid->opaque_fid;
1048
1049                 rc = ecore_sp_init_request(p_hwfn, &p_ent,
1050                                            ETH_RAMROD_RX_QUEUE_UPDATE,
1051                                            PROTOCOLID_ETH, &init_data);
1052                 if (rc != ECORE_SUCCESS)
1053                         return rc;
1054
1055                 p_ramrod = &p_ent->ramrod.rx_queue_update;
1056                 p_ramrod->vport_id = p_cid->abs.vport_id;
1057
1058                 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1059                 p_ramrod->complete_cqe_flg = complete_cqe_flg;
1060                 p_ramrod->complete_event_flg = complete_event_flg;
1061
1062                 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1063                 if (rc != ECORE_SUCCESS)
1064                         return rc;
1065         }
1066
1067         return rc;
1068 }
1069
1070 static enum _ecore_status_t
1071 ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
1072                            struct ecore_queue_cid *p_cid,
1073                            bool b_eq_completion_only,
1074                            bool b_cqe_completion)
1075 {
1076         struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
1077         struct ecore_spq_entry *p_ent = OSAL_NULL;
1078         struct ecore_sp_init_data init_data;
1079         enum _ecore_status_t rc;
1080
1081         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1082         init_data.cid = p_cid->cid;
1083         init_data.opaque_fid = p_cid->opaque_fid;
1084         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1085
1086         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1087                                    ETH_RAMROD_RX_QUEUE_STOP,
1088                                    PROTOCOLID_ETH, &init_data);
1089         if (rc != ECORE_SUCCESS)
1090                 return rc;
1091
1092         p_ramrod = &p_ent->ramrod.rx_queue_stop;
1093         p_ramrod->vport_id = p_cid->abs.vport_id;
1094         p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1095
1096         /* Cleaning the queue requires the completion to arrive there.
1097          * In addition, VFs require the answer to come as eqe to PF.
1098          */
1099         p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) &&
1100                                       !b_eq_completion_only) ||
1101                                      b_cqe_completion;
1102         p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) ||
1103                                        b_eq_completion_only;
1104
1105         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1106 }
1107
1108 enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
1109                                              void *p_rxq,
1110                                              bool eq_completion_only,
1111                                              bool cqe_completion)
1112 {
1113         struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq;
1114         enum _ecore_status_t rc = ECORE_NOTIMPL;
1115
1116         if (IS_PF(p_hwfn->p_dev))
1117                 rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid,
1118                                                 eq_completion_only,
1119                                                 cqe_completion);
1120         else
1121                 rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
1122
1123         if (rc == ECORE_SUCCESS)
1124                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1125         return rc;
1126 }
1127
1128 enum _ecore_status_t
1129 ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
1130                            struct ecore_queue_cid *p_cid,
1131                            dma_addr_t pbl_addr, u16 pbl_size,
1132                            u16 pq_id)
1133 {
1134         struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
1135         struct ecore_spq_entry *p_ent = OSAL_NULL;
1136         struct ecore_sp_init_data init_data;
1137         enum _ecore_status_t rc = ECORE_NOTIMPL;
1138
1139         /* Get SPQ entry */
1140         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1141         init_data.cid = p_cid->cid;
1142         init_data.opaque_fid = p_cid->opaque_fid;
1143         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1144
1145         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1146                                    ETH_RAMROD_TX_QUEUE_START,
1147                                    PROTOCOLID_ETH, &init_data);
1148         if (rc != ECORE_SUCCESS)
1149                 return rc;
1150
1151         p_ramrod = &p_ent->ramrod.tx_queue_start;
1152         p_ramrod->vport_id = p_cid->abs.vport_id;
1153
1154         p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
1155         p_ramrod->sb_index = p_cid->sb_idx;
1156         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
1157
1158         p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1159         p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1160
1161         p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
1162         DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
1163
1164         p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
1165
1166         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1167 }
1168
1169 static enum _ecore_status_t
1170 ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
1171                             struct ecore_queue_cid *p_cid,
1172                             u8 tc,
1173                             dma_addr_t pbl_addr, u16 pbl_size,
1174                             void OSAL_IOMEM * *pp_doorbell)
1175 {
1176         enum _ecore_status_t rc;
1177
1178         /* TODO - set tc in the pq_params for multi-cos */
1179         rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
1180                                         pbl_addr, pbl_size,
1181                                         ecore_get_cm_pq_idx_mcos(p_hwfn, tc));
1182         if (rc != ECORE_SUCCESS)
1183                 return rc;
1184
1185         /* Provide the caller with the necessary return values */
1186         *pp_doorbell = (u8 OSAL_IOMEM *)
1187                        p_hwfn->doorbells +
1188                        DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY);
1189
1190         return ECORE_SUCCESS;
1191 }
1192
1193 enum _ecore_status_t
1194 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
1195                          struct ecore_queue_start_common_params *p_params,
1196                          u8 tc,
1197                          dma_addr_t pbl_addr, u16 pbl_size,
1198                          struct ecore_txq_start_ret_params *p_ret_params)
1199 {
1200         struct ecore_queue_cid *p_cid;
1201         enum _ecore_status_t rc;
1202
1203         p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
1204         if (p_cid == OSAL_NULL)
1205                 return ECORE_INVAL;
1206
1207         if (IS_PF(p_hwfn->p_dev))
1208                 rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1209                                                  pbl_addr, pbl_size,
1210                                                  &p_ret_params->p_doorbell);
1211         else
1212                 rc = ecore_vf_pf_txq_start(p_hwfn, p_cid,
1213                                            pbl_addr, pbl_size,
1214                                            &p_ret_params->p_doorbell);
1215
1216         if (rc != ECORE_SUCCESS)
1217                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1218         else
1219                 p_ret_params->p_handle = (void *)p_cid;
1220
1221         return rc;
1222 }
1223
1224 static enum _ecore_status_t
1225 ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1226                            struct ecore_queue_cid *p_cid)
1227 {
1228         struct ecore_spq_entry *p_ent = OSAL_NULL;
1229         struct ecore_sp_init_data init_data;
1230         enum _ecore_status_t rc;
1231
1232         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1233         init_data.cid = p_cid->cid;
1234         init_data.opaque_fid = p_cid->opaque_fid;
1235         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1236
1237         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1238                                    ETH_RAMROD_TX_QUEUE_STOP,
1239                                    PROTOCOLID_ETH, &init_data);
1240         if (rc != ECORE_SUCCESS)
1241                 return rc;
1242
1243         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1244 }
1245
1246 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1247                                              void *p_handle)
1248 {
1249         struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
1250         enum _ecore_status_t rc;
1251
1252         if (IS_PF(p_hwfn->p_dev))
1253                 rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1254         else
1255                 rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid);
1256
1257         if (rc == ECORE_SUCCESS)
1258                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1259         return rc;
1260 }
1261
1262 static enum eth_filter_action
1263 ecore_filter_action(enum ecore_filter_opcode opcode)
1264 {
1265         enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1266
1267         switch (opcode) {
1268         case ECORE_FILTER_ADD:
1269                 action = ETH_FILTER_ACTION_ADD;
1270                 break;
1271         case ECORE_FILTER_REMOVE:
1272                 action = ETH_FILTER_ACTION_REMOVE;
1273                 break;
1274         case ECORE_FILTER_FLUSH:
1275                 action = ETH_FILTER_ACTION_REMOVE_ALL;
1276                 break;
1277         default:
1278                 action = MAX_ETH_FILTER_ACTION;
1279         }
1280
1281         return action;
1282 }
1283
1284 static enum _ecore_status_t
1285 ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
1286                           u16 opaque_fid,
1287                           struct ecore_filter_ucast *p_filter_cmd,
1288                           struct vport_filter_update_ramrod_data **pp_ramrod,
1289                           struct ecore_spq_entry **pp_ent,
1290                           enum spq_mode comp_mode,
1291                           struct ecore_spq_comp_cb *p_comp_data)
1292 {
1293         u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1294         struct vport_filter_update_ramrod_data *p_ramrod;
1295         struct eth_filter_cmd *p_first_filter;
1296         struct eth_filter_cmd *p_second_filter;
1297         struct ecore_sp_init_data init_data;
1298         enum eth_filter_action action;
1299         enum _ecore_status_t rc;
1300
1301         rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1302                             &vport_to_remove_from);
1303         if (rc != ECORE_SUCCESS)
1304                 return rc;
1305
1306         rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1307                             &vport_to_add_to);
1308         if (rc != ECORE_SUCCESS)
1309                 return rc;
1310
1311         /* Get SPQ entry */
1312         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1313         init_data.cid = ecore_spq_get_cid(p_hwfn);
1314         init_data.opaque_fid = opaque_fid;
1315         init_data.comp_mode = comp_mode;
1316         init_data.p_comp_data = p_comp_data;
1317
1318         rc = ecore_sp_init_request(p_hwfn, pp_ent,
1319                                    ETH_RAMROD_FILTERS_UPDATE,
1320                                    PROTOCOLID_ETH, &init_data);
1321         if (rc != ECORE_SUCCESS)
1322                 return rc;
1323
1324         *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1325         p_ramrod = *pp_ramrod;
1326         p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1327         p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1328
1329 #ifndef ASIC_ONLY
1330         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1331                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1332                            "Non-Asic - prevent Tx filters\n");
1333                 p_ramrod->filter_cmd_hdr.tx = 0;
1334         }
1335 #endif
1336
1337         switch (p_filter_cmd->opcode) {
1338         case ECORE_FILTER_REPLACE:
1339         case ECORE_FILTER_MOVE:
1340                 p_ramrod->filter_cmd_hdr.cmd_cnt = 2;
1341                 break;
1342         default:
1343                 p_ramrod->filter_cmd_hdr.cmd_cnt = 1;
1344                 break;
1345         }
1346
1347         p_first_filter = &p_ramrod->filter_cmds[0];
1348         p_second_filter = &p_ramrod->filter_cmds[1];
1349
1350         switch (p_filter_cmd->type) {
1351         case ECORE_FILTER_MAC:
1352                 p_first_filter->type = ETH_FILTER_TYPE_MAC;
1353                 break;
1354         case ECORE_FILTER_VLAN:
1355                 p_first_filter->type = ETH_FILTER_TYPE_VLAN;
1356                 break;
1357         case ECORE_FILTER_MAC_VLAN:
1358                 p_first_filter->type = ETH_FILTER_TYPE_PAIR;
1359                 break;
1360         case ECORE_FILTER_INNER_MAC:
1361                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC;
1362                 break;
1363         case ECORE_FILTER_INNER_VLAN:
1364                 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN;
1365                 break;
1366         case ECORE_FILTER_INNER_PAIR:
1367                 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR;
1368                 break;
1369         case ECORE_FILTER_INNER_MAC_VNI_PAIR:
1370                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1371                 break;
1372         case ECORE_FILTER_MAC_VNI_PAIR:
1373                 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR;
1374                 break;
1375         case ECORE_FILTER_VNI:
1376                 p_first_filter->type = ETH_FILTER_TYPE_VNI;
1377                 break;
1378         case ECORE_FILTER_UNUSED: /* @DPDK */
1379                 p_first_filter->type = MAX_ETH_FILTER_TYPE;
1380                 break;
1381         }
1382
1383         if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1384             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1385             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1386             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1387             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1388             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR))
1389                 ecore_set_fw_mac_addr(&p_first_filter->mac_msb,
1390                                       &p_first_filter->mac_mid,
1391                                       &p_first_filter->mac_lsb,
1392                                       (u8 *)p_filter_cmd->mac);
1393
1394         if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1395             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1396             (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1397             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1398                 p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan);
1399
1400         if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1401             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1402             (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1403                 p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni);
1404
1405         if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) {
1406                 p_second_filter->type = p_first_filter->type;
1407                 p_second_filter->mac_msb = p_first_filter->mac_msb;
1408                 p_second_filter->mac_mid = p_first_filter->mac_mid;
1409                 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1410                 p_second_filter->vlan_id = p_first_filter->vlan_id;
1411                 p_second_filter->vni = p_first_filter->vni;
1412
1413                 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1414
1415                 p_first_filter->vport_id = vport_to_remove_from;
1416
1417                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1418                 p_second_filter->vport_id = vport_to_add_to;
1419         } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) {
1420                 p_first_filter->vport_id = vport_to_add_to;
1421                 OSAL_MEMCPY(p_second_filter, p_first_filter,
1422                             sizeof(*p_second_filter));
1423                 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
1424                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1425         } else {
1426                 action = ecore_filter_action(p_filter_cmd->opcode);
1427
1428                 if (action == MAX_ETH_FILTER_ACTION) {
1429                         DP_NOTICE(p_hwfn, true,
1430                                   "%d is not supported yet\n",
1431                                   p_filter_cmd->opcode);
1432                         return ECORE_NOTIMPL;
1433                 }
1434
1435                 p_first_filter->action = action;
1436                 p_first_filter->vport_id =
1437                     (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1438                     vport_to_remove_from : vport_to_add_to;
1439         }
1440
1441         return ECORE_SUCCESS;
1442 }
1443
1444 enum _ecore_status_t
1445 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
1446                           u16 opaque_fid,
1447                           struct ecore_filter_ucast *p_filter_cmd,
1448                           enum spq_mode comp_mode,
1449                           struct ecore_spq_comp_cb *p_comp_data)
1450 {
1451         struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL;
1452         struct ecore_spq_entry *p_ent = OSAL_NULL;
1453         struct eth_filter_cmd_header *p_header;
1454         enum _ecore_status_t rc;
1455
1456         rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1457                                        &p_ramrod, &p_ent,
1458                                        comp_mode, p_comp_data);
1459         if (rc != ECORE_SUCCESS) {
1460                 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1461                 return rc;
1462         }
1463         p_header = &p_ramrod->filter_cmd_hdr;
1464         p_header->assert_on_error = p_filter_cmd->assert_on_error;
1465
1466         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1467         if (rc != ECORE_SUCCESS) {
1468                 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1469                 return rc;
1470         }
1471
1472         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1473                    "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1474                    (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" :
1475                    ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1476                     "REMOVE" :
1477                     ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ?
1478                      "MOVE" : "REPLACE")),
1479                    (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" :
1480                    ((p_filter_cmd->type == ECORE_FILTER_VLAN) ?
1481                     "VLAN" : "MAC & VLAN"),
1482                    p_ramrod->filter_cmd_hdr.cmd_cnt,
1483                    p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter);
1484         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1485                    "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1486                    p_filter_cmd->vport_to_add_to,
1487                    p_filter_cmd->vport_to_remove_from,
1488                    p_filter_cmd->mac[0], p_filter_cmd->mac[1],
1489                    p_filter_cmd->mac[2], p_filter_cmd->mac[3],
1490                    p_filter_cmd->mac[4], p_filter_cmd->mac[5],
1491                    p_filter_cmd->vlan);
1492
1493         return ECORE_SUCCESS;
1494 }
1495
1496 /*******************************************************************************
1497  * Description:
1498  *         Calculates crc 32 on a buffer
1499  *         Note: crc32_length MUST be aligned to 8
1500  * Return:
1501  ******************************************************************************/
1502 static u32 ecore_calc_crc32c(u8 *crc32_packet, u32 crc32_length, u32 crc32_seed)
1503 {
1504         u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1505         u8 msb = 0, current_byte = 0;
1506
1507         if ((crc32_packet == OSAL_NULL) ||
1508             (crc32_length == 0) || ((crc32_length % 8) != 0)) {
1509                 return crc32_result;
1510         }
1511
1512         for (byte = 0; byte < crc32_length; byte++) {
1513                 current_byte = crc32_packet[byte];
1514                 for (bit = 0; bit < 8; bit++) {
1515                         msb = (u8)(crc32_result >> 31);
1516                         crc32_result = crc32_result << 1;
1517                         if (msb != (0x1 & (current_byte >> bit))) {
1518                                 crc32_result = crc32_result ^ CRC32_POLY;
1519                                 crc32_result |= 1;
1520                         }
1521                 }
1522         }
1523
1524         return crc32_result;
1525 }
1526
1527 static u32 ecore_crc32c_le(u32 seed, u8 *mac)
1528 {
1529         u32 packet_buf[2] = { 0 };
1530
1531         OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
1532         return ecore_calc_crc32c((u8 *)packet_buf, 8, seed);
1533 }
1534
1535 u8 ecore_mcast_bin_from_mac(u8 *mac)
1536 {
1537         u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac);
1538
1539         return crc & 0xff;
1540 }
1541
1542 static enum _ecore_status_t
1543 ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
1544                           struct ecore_filter_mcast *p_filter_cmd,
1545                           enum spq_mode comp_mode,
1546                           struct ecore_spq_comp_cb *p_comp_data)
1547 {
1548         unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1549         struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
1550         struct ecore_spq_entry *p_ent = OSAL_NULL;
1551         struct ecore_sp_init_data init_data;
1552         u8 abs_vport_id = 0;
1553         enum _ecore_status_t rc;
1554         int i;
1555
1556         if (p_filter_cmd->opcode == ECORE_FILTER_ADD)
1557                 rc = ecore_fw_vport(p_hwfn,
1558                                     p_filter_cmd->vport_to_add_to,
1559                                     &abs_vport_id);
1560         else
1561                 rc = ecore_fw_vport(p_hwfn,
1562                                     p_filter_cmd->vport_to_remove_from,
1563                                     &abs_vport_id);
1564         if (rc != ECORE_SUCCESS)
1565                 return rc;
1566
1567         /* Get SPQ entry */
1568         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1569         init_data.cid = ecore_spq_get_cid(p_hwfn);
1570         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1571         init_data.comp_mode = comp_mode;
1572         init_data.p_comp_data = p_comp_data;
1573
1574         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1575                                    ETH_RAMROD_VPORT_UPDATE,
1576                                    PROTOCOLID_ETH, &init_data);
1577         if (rc != ECORE_SUCCESS) {
1578                 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1579                 return rc;
1580         }
1581
1582         p_ramrod = &p_ent->ramrod.vport_update;
1583         p_ramrod->common.update_approx_mcast_flg = 1;
1584
1585         /* explicitly clear out the entire vector */
1586         OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
1587                     0, sizeof(p_ramrod->approx_mcast.bins));
1588         OSAL_MEMSET(bins, 0, sizeof(unsigned long) *
1589                     ETH_MULTICAST_MAC_BINS_IN_REGS);
1590         /* filter ADD op is explicit set op and it removes
1591         *  any existing filters for the vport.
1592         */
1593         if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
1594                 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1595                         u32 bit;
1596
1597                         bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1598                         OSAL_SET_BIT(bit, bins);
1599                 }
1600
1601                 /* Convert to correct endianity */
1602                 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1603                         struct vport_update_ramrod_mcast *p_ramrod_bins;
1604                         u32 *p_bins = (u32 *)bins;
1605
1606                         p_ramrod_bins = &p_ramrod->approx_mcast;
1607                         p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
1608                 }
1609         }
1610
1611         p_ramrod->common.vport_id = abs_vport_id;
1612
1613         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1614         if (rc != ECORE_SUCCESS)
1615                 DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc);
1616
1617         return rc;
1618 }
1619
1620 enum _ecore_status_t
1621 ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
1622                        struct ecore_filter_mcast *p_filter_cmd,
1623                        enum spq_mode comp_mode,
1624                        struct ecore_spq_comp_cb *p_comp_data)
1625 {
1626         enum _ecore_status_t rc = ECORE_SUCCESS;
1627         int i;
1628
1629         /* only ADD and REMOVE operations are supported for multi-cast */
1630         if ((p_filter_cmd->opcode != ECORE_FILTER_ADD &&
1631              (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) ||
1632             (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) {
1633                 return ECORE_INVAL;
1634         }
1635
1636         for_each_hwfn(p_dev, i) {
1637                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1638
1639                 if (IS_VF(p_dev)) {
1640                         ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1641                         continue;
1642                 }
1643
1644                 rc = ecore_sp_eth_filter_mcast(p_hwfn,
1645                                                p_filter_cmd,
1646                                                comp_mode, p_comp_data);
1647                 if (rc != ECORE_SUCCESS)
1648                         break;
1649         }
1650
1651         return rc;
1652 }
1653
1654 enum _ecore_status_t
1655 ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
1656                        struct ecore_filter_ucast *p_filter_cmd,
1657                        enum spq_mode comp_mode,
1658                        struct ecore_spq_comp_cb *p_comp_data)
1659 {
1660         enum _ecore_status_t rc = ECORE_SUCCESS;
1661         int i;
1662
1663         for_each_hwfn(p_dev, i) {
1664                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1665                 u16 opaque_fid;
1666
1667                 if (IS_VF(p_dev)) {
1668                         rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1669                         continue;
1670                 }
1671
1672                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1673                 rc = ecore_sp_eth_filter_ucast(p_hwfn,
1674                                                opaque_fid,
1675                                                p_filter_cmd,
1676                                                comp_mode, p_comp_data);
1677                 if (rc != ECORE_SUCCESS)
1678                         break;
1679         }
1680
1681         return rc;
1682 }
1683
1684 /* Statistics related code */
1685 static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
1686                                              u32 *p_addr, u32 *p_len,
1687                                              u16 statistics_bin)
1688 {
1689         if (IS_PF(p_hwfn->p_dev)) {
1690                 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1691                     PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1692                 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1693         } else {
1694                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1695                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1696
1697                 *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1698                 *p_len = p_resp->pfdev_info.stats_info.pstats.len;
1699         }
1700 }
1701
1702 static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
1703                                      struct ecore_ptt *p_ptt,
1704                                      struct ecore_eth_stats *p_stats,
1705                                      u16 statistics_bin)
1706 {
1707         struct eth_pstorm_per_queue_stat pstats;
1708         u32 pstats_addr = 0, pstats_len = 0;
1709
1710         __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1711                                          statistics_bin);
1712
1713         OSAL_MEMSET(&pstats, 0, sizeof(pstats));
1714         ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1715
1716         p_stats->common.tx_ucast_bytes +=
1717                 HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1718         p_stats->common.tx_mcast_bytes +=
1719                 HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1720         p_stats->common.tx_bcast_bytes +=
1721                 HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1722         p_stats->common.tx_ucast_pkts +=
1723                 HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1724         p_stats->common.tx_mcast_pkts +=
1725                 HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1726         p_stats->common.tx_bcast_pkts +=
1727                 HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1728         p_stats->common.tx_err_drop_pkts +=
1729                 HILO_64_REGPAIR(pstats.error_drop_pkts);
1730 }
1731
1732 static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
1733                                      struct ecore_ptt *p_ptt,
1734                                      struct ecore_eth_stats *p_stats)
1735 {
1736         struct tstorm_per_port_stat tstats;
1737         u32 tstats_addr, tstats_len;
1738
1739         if (IS_PF(p_hwfn->p_dev)) {
1740                 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1741                     TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1742                 tstats_len = sizeof(struct tstorm_per_port_stat);
1743         } else {
1744                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1745                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1746
1747                 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1748                 tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1749         }
1750
1751         OSAL_MEMSET(&tstats, 0, sizeof(tstats));
1752         ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1753
1754         p_stats->common.mftag_filter_discards +=
1755                 HILO_64_REGPAIR(tstats.mftag_filter_discard);
1756         p_stats->common.mac_filter_discards +=
1757                 HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1758 }
1759
1760 static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
1761                                              u32 *p_addr, u32 *p_len,
1762                                              u16 statistics_bin)
1763 {
1764         if (IS_PF(p_hwfn->p_dev)) {
1765                 *p_addr = BAR0_MAP_REG_USDM_RAM +
1766                     USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1767                 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1768         } else {
1769                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1770                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1771
1772                 *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1773                 *p_len = p_resp->pfdev_info.stats_info.ustats.len;
1774         }
1775 }
1776
1777 static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
1778                                      struct ecore_ptt *p_ptt,
1779                                      struct ecore_eth_stats *p_stats,
1780                                      u16 statistics_bin)
1781 {
1782         struct eth_ustorm_per_queue_stat ustats;
1783         u32 ustats_addr = 0, ustats_len = 0;
1784
1785         __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1786                                          statistics_bin);
1787
1788         OSAL_MEMSET(&ustats, 0, sizeof(ustats));
1789         ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1790
1791         p_stats->common.rx_ucast_bytes +=
1792                 HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1793         p_stats->common.rx_mcast_bytes +=
1794                 HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1795         p_stats->common.rx_bcast_bytes +=
1796                 HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1797         p_stats->common.rx_ucast_pkts +=
1798                 HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1799         p_stats->common.rx_mcast_pkts +=
1800                 HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1801         p_stats->common.rx_bcast_pkts +=
1802                 HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1803 }
1804
1805 static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
1806                                              u32 *p_addr, u32 *p_len,
1807                                              u16 statistics_bin)
1808 {
1809         if (IS_PF(p_hwfn->p_dev)) {
1810                 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1811                     MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1812                 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1813         } else {
1814                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1815                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1816
1817                 *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1818                 *p_len = p_resp->pfdev_info.stats_info.mstats.len;
1819         }
1820 }
1821
1822 static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
1823                                      struct ecore_ptt *p_ptt,
1824                                      struct ecore_eth_stats *p_stats,
1825                                      u16 statistics_bin)
1826 {
1827         struct eth_mstorm_per_queue_stat mstats;
1828         u32 mstats_addr = 0, mstats_len = 0;
1829
1830         __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1831                                          statistics_bin);
1832
1833         OSAL_MEMSET(&mstats, 0, sizeof(mstats));
1834         ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1835
1836         p_stats->common.no_buff_discards +=
1837                 HILO_64_REGPAIR(mstats.no_buff_discard);
1838         p_stats->common.packet_too_big_discard +=
1839                 HILO_64_REGPAIR(mstats.packet_too_big_discard);
1840         p_stats->common.ttl0_discard +=
1841                 HILO_64_REGPAIR(mstats.ttl0_discard);
1842         p_stats->common.tpa_coalesced_pkts +=
1843                 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1844         p_stats->common.tpa_coalesced_events +=
1845                 HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1846         p_stats->common.tpa_aborts_num +=
1847                 HILO_64_REGPAIR(mstats.tpa_aborts_num);
1848         p_stats->common.tpa_coalesced_bytes +=
1849                 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1850 }
1851
1852 static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
1853                                          struct ecore_ptt *p_ptt,
1854                                          struct ecore_eth_stats *p_stats)
1855 {
1856         struct ecore_eth_stats_common *p_common = &p_stats->common;
1857         struct port_stats port_stats;
1858         int j;
1859
1860         OSAL_MEMSET(&port_stats, 0, sizeof(port_stats));
1861
1862         ecore_memcpy_from(p_hwfn, p_ptt, &port_stats,
1863                           p_hwfn->mcp_info->port_addr +
1864                           OFFSETOF(struct public_port, stats),
1865                           sizeof(port_stats));
1866
1867         p_common->rx_64_byte_packets += port_stats.eth.r64;
1868         p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1869         p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1870         p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1871         p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1872         p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1873         p_common->rx_crc_errors += port_stats.eth.rfcs;
1874         p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1875         p_common->rx_pause_frames += port_stats.eth.rxpf;
1876         p_common->rx_pfc_frames += port_stats.eth.rxpp;
1877         p_common->rx_align_errors += port_stats.eth.raln;
1878         p_common->rx_carrier_errors += port_stats.eth.rfcr;
1879         p_common->rx_oversize_packets += port_stats.eth.rovr;
1880         p_common->rx_jabbers += port_stats.eth.rjbr;
1881         p_common->rx_undersize_packets += port_stats.eth.rund;
1882         p_common->rx_fragments += port_stats.eth.rfrg;
1883         p_common->tx_64_byte_packets += port_stats.eth.t64;
1884         p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1885         p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1886         p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1887         p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1888         p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1889         p_common->tx_pause_frames += port_stats.eth.txpf;
1890         p_common->tx_pfc_frames += port_stats.eth.txpp;
1891         p_common->rx_mac_bytes += port_stats.eth.rbyte;
1892         p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1893         p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1894         p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1895         p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1896         p_common->tx_mac_bytes += port_stats.eth.tbyte;
1897         p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1898         p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1899         p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1900         p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
1901         for (j = 0; j < 8; j++) {
1902                 p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1903                 p_common->brb_discards += port_stats.brb.brb_discard[j];
1904         }
1905
1906         if (ECORE_IS_BB(p_hwfn->p_dev)) {
1907                 struct ecore_eth_stats_bb *p_bb = &p_stats->bb;
1908
1909                 p_bb->rx_1519_to_1522_byte_packets +=
1910                         port_stats.eth.u0.bb0.r1522;
1911                 p_bb->rx_1519_to_2047_byte_packets +=
1912                         port_stats.eth.u0.bb0.r2047;
1913                 p_bb->rx_2048_to_4095_byte_packets +=
1914                         port_stats.eth.u0.bb0.r4095;
1915                 p_bb->rx_4096_to_9216_byte_packets +=
1916                         port_stats.eth.u0.bb0.r9216;
1917                 p_bb->rx_9217_to_16383_byte_packets +=
1918                         port_stats.eth.u0.bb0.r16383;
1919                 p_bb->tx_1519_to_2047_byte_packets +=
1920                         port_stats.eth.u1.bb1.t2047;
1921                 p_bb->tx_2048_to_4095_byte_packets +=
1922                         port_stats.eth.u1.bb1.t4095;
1923                 p_bb->tx_4096_to_9216_byte_packets +=
1924                         port_stats.eth.u1.bb1.t9216;
1925                 p_bb->tx_9217_to_16383_byte_packets +=
1926                         port_stats.eth.u1.bb1.t16383;
1927                 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
1928                 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
1929         } else {
1930                 struct ecore_eth_stats_ah *p_ah = &p_stats->ah;
1931
1932                 p_ah->rx_1519_to_max_byte_packets +=
1933                         port_stats.eth.u0.ah0.r1519_to_max;
1934                 p_ah->tx_1519_to_max_byte_packets =
1935                         port_stats.eth.u1.ah1.t1519_to_max;
1936         }
1937 }
1938
1939 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
1940                              struct ecore_ptt *p_ptt,
1941                              struct ecore_eth_stats *stats,
1942                              u16 statistics_bin, bool b_get_port_stats)
1943 {
1944         __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1945         __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1946         __ecore_get_vport_tstats(p_hwfn, p_ptt, stats);
1947         __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1948
1949 #ifndef ASIC_ONLY
1950         /* Avoid getting PORT stats for emulation. */
1951         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1952                 return;
1953 #endif
1954
1955         if (b_get_port_stats && p_hwfn->mcp_info)
1956                 __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats);
1957 }
1958
1959 static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
1960                                    struct ecore_eth_stats *stats)
1961 {
1962         u8 fw_vport = 0;
1963         int i;
1964
1965         OSAL_MEMSET(stats, 0, sizeof(*stats));
1966
1967         for_each_hwfn(p_dev, i) {
1968                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1969                 struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
1970                     ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
1971
1972                 if (IS_PF(p_dev)) {
1973                         /* The main vport index is relative first */
1974                         if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {
1975                                 DP_ERR(p_hwfn, "No vport available!\n");
1976                                 goto out;
1977                         }
1978                 }
1979
1980                 if (IS_PF(p_dev) && !p_ptt) {
1981                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1982                         continue;
1983                 }
1984
1985                 __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1986                                         IS_PF(p_dev) ? true : false);
1987
1988 out:
1989                 if (IS_PF(p_dev) && p_ptt)
1990                         ecore_ptt_release(p_hwfn, p_ptt);
1991         }
1992 }
1993
1994 void ecore_get_vport_stats(struct ecore_dev *p_dev,
1995                            struct ecore_eth_stats *stats)
1996 {
1997         u32 i;
1998
1999         if (!p_dev) {
2000                 OSAL_MEMSET(stats, 0, sizeof(*stats));
2001                 return;
2002         }
2003
2004         _ecore_get_vport_stats(p_dev, stats);
2005
2006         if (!p_dev->reset_stats)
2007                 return;
2008
2009         /* Reduce the statistics baseline */
2010         for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++)
2011                 ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i];
2012 }
2013
2014 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
2015 void ecore_reset_vport_stats(struct ecore_dev *p_dev)
2016 {
2017         int i;
2018
2019         for_each_hwfn(p_dev, i) {
2020                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
2021                 struct eth_mstorm_per_queue_stat mstats;
2022                 struct eth_ustorm_per_queue_stat ustats;
2023                 struct eth_pstorm_per_queue_stat pstats;
2024                 struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
2025                     ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
2026                 u32 addr = 0, len = 0;
2027
2028                 if (IS_PF(p_dev) && !p_ptt) {
2029                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2030                         continue;
2031                 }
2032
2033                 OSAL_MEMSET(&mstats, 0, sizeof(mstats));
2034                 __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
2035                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
2036
2037                 OSAL_MEMSET(&ustats, 0, sizeof(ustats));
2038                 __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
2039                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
2040
2041                 OSAL_MEMSET(&pstats, 0, sizeof(pstats));
2042                 __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
2043                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
2044
2045                 if (IS_PF(p_dev))
2046                         ecore_ptt_release(p_hwfn, p_ptt);
2047         }
2048
2049         /* PORT statistics are not necessarily reset, so we need to
2050          * read and create a baseline for future statistics.
2051          */
2052         if (!p_dev->reset_stats)
2053                 DP_INFO(p_dev, "Reset stats not allocated\n");
2054         else
2055                 _ecore_get_vport_stats(p_dev, p_dev->reset_stats);
2056 }
2057
2058 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
2059                                struct ecore_ptt *p_ptt,
2060                                struct ecore_arfs_config_params *p_cfg_params)
2061 {
2062         if (p_cfg_params->arfs_enable) {
2063                 ecore_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
2064                                           p_cfg_params->tcp,
2065                                           p_cfg_params->udp,
2066                                           p_cfg_params->ipv4,
2067                                           p_cfg_params->ipv6);
2068                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2069                            "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
2070                            p_cfg_params->tcp ? "Enable" : "Disable",
2071                            p_cfg_params->udp ? "Enable" : "Disable",
2072                            p_cfg_params->ipv4 ? "Enable" : "Disable",
2073                            p_cfg_params->ipv6 ? "Enable" : "Disable");
2074         } else {
2075                 ecore_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2076         }
2077         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
2078                    p_cfg_params->arfs_enable ? "Enable" : "Disable");
2079 }
2080
2081 enum _ecore_status_t
2082 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
2083                                   struct ecore_spq_comp_cb *p_cb,
2084                                   dma_addr_t p_addr, u16 length,
2085                                   u16 qid, u8 vport_id,
2086                                   bool b_is_add)
2087 {
2088         struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL;
2089         struct ecore_spq_entry *p_ent = OSAL_NULL;
2090         struct ecore_sp_init_data init_data;
2091         u16 abs_rx_q_id = 0;
2092         u8 abs_vport_id = 0;
2093         enum _ecore_status_t rc = ECORE_NOTIMPL;
2094
2095         rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
2096         if (rc != ECORE_SUCCESS)
2097                 return rc;
2098
2099         rc = ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
2100         if (rc != ECORE_SUCCESS)
2101                 return rc;
2102
2103         /* Get SPQ entry */
2104         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2105         init_data.cid = ecore_spq_get_cid(p_hwfn);
2106
2107         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2108
2109         if (p_cb) {
2110                 init_data.comp_mode = ECORE_SPQ_MODE_CB;
2111                 init_data.p_comp_data = p_cb;
2112         } else {
2113                 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2114         }
2115
2116         rc = ecore_sp_init_request(p_hwfn, &p_ent,
2117                                    ETH_RAMROD_GFT_UPDATE_FILTER,
2118                                    PROTOCOLID_ETH, &init_data);
2119         if (rc != ECORE_SUCCESS)
2120                 return rc;
2121
2122         p_ramrod = &p_ent->ramrod.rx_update_gft;
2123
2124         DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
2125         p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length);
2126         p_ramrod->rx_qid_or_action_icid = OSAL_CPU_TO_LE16(abs_rx_q_id);
2127         p_ramrod->vport_id = abs_vport_id;
2128         p_ramrod->filter_type = RFS_FILTER_TYPE;
2129         p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
2130                                            : GFT_DELETE_FILTER;
2131
2132         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2133                    "V[%0x], Q[%04x] - %s filter from 0x%lx [length %04xb]\n",
2134                    abs_vport_id, abs_rx_q_id,
2135                    b_is_add ? "Adding" : "Removing",
2136                    (unsigned long)p_addr, length);
2137
2138         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
2139 }