ec40aac04098e17bf6ca4d62b0a159b9cdb56602
[dpdk.git] / drivers / net / qede / base / ecore_l2.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include "bcm_osal.h"
8
9 #include "ecore.h"
10 #include "ecore_status.h"
11 #include "ecore_hsi_eth.h"
12 #include "ecore_chain.h"
13 #include "ecore_spq.h"
14 #include "ecore_init_fw_funcs.h"
15 #include "ecore_cxt.h"
16 #include "ecore_l2.h"
17 #include "ecore_sp_commands.h"
18 #include "ecore_gtt_reg_addr.h"
19 #include "ecore_iro.h"
20 #include "reg_addr.h"
21 #include "ecore_int.h"
22 #include "ecore_hw.h"
23 #include "ecore_vf.h"
24 #include "ecore_sriov.h"
25 #include "ecore_mcp.h"
26
27 #define ECORE_MAX_SGES_NUM 16
28 #define CRC32_POLY 0x1edc6f41
29
30 struct ecore_l2_info {
31         u32 queues;
32         unsigned long **pp_qid_usage;
33
34         /* The lock is meant to synchronize access to the qid usage */
35         osal_mutex_t lock;
36 };
37
38 enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
39 {
40         struct ecore_l2_info *p_l2_info;
41         unsigned long **pp_qids;
42         u32 i;
43
44         if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
45                 return ECORE_SUCCESS;
46
47         p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info));
48         if (!p_l2_info)
49                 return ECORE_NOMEM;
50         p_hwfn->p_l2_info = p_l2_info;
51
52         if (IS_PF(p_hwfn->p_dev)) {
53                 p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
54         } else {
55                 u8 rx = 0, tx = 0;
56
57                 ecore_vf_get_num_rxqs(p_hwfn, &rx);
58                 ecore_vf_get_num_txqs(p_hwfn, &tx);
59
60                 p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx);
61         }
62
63         pp_qids = OSAL_VZALLOC(p_hwfn->p_dev,
64                                sizeof(unsigned long *) *
65                                p_l2_info->queues);
66         if (pp_qids == OSAL_NULL)
67                 return ECORE_NOMEM;
68         p_l2_info->pp_qid_usage = pp_qids;
69
70         for (i = 0; i < p_l2_info->queues; i++) {
71                 pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev,
72                                           MAX_QUEUES_PER_QZONE / 8);
73                 if (pp_qids[i] == OSAL_NULL)
74                         return ECORE_NOMEM;
75         }
76
77 #ifdef CONFIG_ECORE_LOCK_ALLOC
78         if (OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock))
79                 return ECORE_NOMEM;
80 #endif
81
82         return ECORE_SUCCESS;
83 }
84
85 void ecore_l2_setup(struct ecore_hwfn *p_hwfn)
86 {
87         if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
88                 return;
89
90         OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock);
91 }
92
93 void ecore_l2_free(struct ecore_hwfn *p_hwfn)
94 {
95         u32 i;
96
97         if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
98                 return;
99
100         if (p_hwfn->p_l2_info == OSAL_NULL)
101                 return;
102
103         if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL)
104                 goto out_l2_info;
105
106         /* Free until hit first uninitialized entry */
107         for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
108                 if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL)
109                         break;
110                 OSAL_VFREE(p_hwfn->p_dev,
111                            p_hwfn->p_l2_info->pp_qid_usage[i]);
112                 p_hwfn->p_l2_info->pp_qid_usage[i] = OSAL_NULL;
113         }
114
115 #ifdef CONFIG_ECORE_LOCK_ALLOC
116         /* Lock is last to initialize, if everything else was */
117         if (i == p_hwfn->p_l2_info->queues)
118                 OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock);
119 #endif
120
121         OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
122         p_hwfn->p_l2_info->pp_qid_usage = OSAL_NULL;
123
124 out_l2_info:
125         OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
126         p_hwfn->p_l2_info = OSAL_NULL;
127 }
128
129 /* TODO - we'll need locking around these... */
130 static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn,
131                                           struct ecore_queue_cid *p_cid)
132 {
133         struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info;
134         u16 queue_id = p_cid->rel.queue_id;
135         bool b_rc = true;
136         u8 first;
137
138         OSAL_MUTEX_ACQUIRE(&p_l2_info->lock);
139
140         if (queue_id > p_l2_info->queues) {
141                 DP_NOTICE(p_hwfn, true,
142                           "Requested to increase usage for qzone %04x out of %08x\n",
143                           queue_id, p_l2_info->queues);
144                 b_rc = false;
145                 goto out;
146         }
147
148         first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id],
149                                              MAX_QUEUES_PER_QZONE);
150         if (first >= MAX_QUEUES_PER_QZONE) {
151                 b_rc = false;
152                 goto out;
153         }
154
155         OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]);
156         p_cid->qid_usage_idx = first;
157
158 out:
159         OSAL_MUTEX_RELEASE(&p_l2_info->lock);
160         return b_rc;
161 }
162
163 static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn,
164                                           struct ecore_queue_cid *p_cid)
165 {
166         OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock);
167
168         OSAL_CLEAR_BIT(p_cid->qid_usage_idx,
169                        p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
170
171         OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock);
172 }
173
174 void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
175                                  struct ecore_queue_cid *p_cid)
176 {
177         bool b_legacy_vf = !!(p_cid->vf_legacy &
178                               ECORE_QCID_LEGACY_VF_CID);
179
180         /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF.
181          * For legacy vf-queues, the CID doesn't go through here.
182          */
183         if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
184                 _ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
185
186         /* VFs maintain the index inside queue-zone on their own */
187         if (p_cid->vfid == ECORE_QUEUE_CID_PF)
188                 ecore_eth_queue_qid_usage_del(p_hwfn, p_cid);
189
190         OSAL_VFREE(p_hwfn->p_dev, p_cid);
191 }
192
193 /* The internal is only meant to be directly called by PFs initializeing CIDs
194  * for their VFs.
195  */
196 static struct ecore_queue_cid *
197 _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
198                         u16 opaque_fid, u32 cid,
199                         struct ecore_queue_start_common_params *p_params,
200                         bool b_is_rx,
201                         struct ecore_queue_cid_vf_params *p_vf_params)
202 {
203         struct ecore_queue_cid *p_cid;
204         enum _ecore_status_t rc;
205
206         p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid));
207         if (p_cid == OSAL_NULL)
208                 return OSAL_NULL;
209
210         p_cid->opaque_fid = opaque_fid;
211         p_cid->cid = cid;
212         p_cid->p_owner = p_hwfn;
213
214         /* Fill in parameters */
215         p_cid->rel.vport_id = p_params->vport_id;
216         p_cid->rel.queue_id = p_params->queue_id;
217         p_cid->rel.stats_id = p_params->stats_id;
218         p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
219         p_cid->b_is_rx = b_is_rx;
220         p_cid->sb_idx = p_params->sb_idx;
221
222         /* Fill-in bits related to VFs' queues if information was provided */
223         if (p_vf_params != OSAL_NULL) {
224                 p_cid->vfid = p_vf_params->vfid;
225                 p_cid->vf_qid = p_vf_params->vf_qid;
226                 p_cid->vf_legacy = p_vf_params->vf_legacy;
227         } else {
228                 p_cid->vfid = ECORE_QUEUE_CID_PF;
229         }
230
231         /* Don't try calculating the absolute indices for VFs */
232         if (IS_VF(p_hwfn->p_dev)) {
233                 p_cid->abs = p_cid->rel;
234
235                 goto out;
236         }
237
238         /* Calculate the engine-absolute indices of the resources.
239          * This would guarantee they're valid later on.
240          * In some cases [SBs] we already have the right values.
241          */
242         rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
243         if (rc != ECORE_SUCCESS)
244                 goto fail;
245
246         rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id,
247                                &p_cid->abs.queue_id);
248         if (rc != ECORE_SUCCESS)
249                 goto fail;
250
251         /* In case of a PF configuring its VF's queues, the stats-id is already
252          * absolute [since there's a single index that's suitable per-VF].
253          */
254         if (p_cid->vfid == ECORE_QUEUE_CID_PF) {
255                 rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id,
256                                     &p_cid->abs.stats_id);
257                 if (rc != ECORE_SUCCESS)
258                         goto fail;
259         } else {
260                 p_cid->abs.stats_id = p_cid->rel.stats_id;
261         }
262
263 out:
264         /* VF-images have provided the qid_usage_idx on their own.
265          * Otherwise, we need to allocate a unique one.
266          */
267         if (!p_vf_params) {
268                 if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid))
269                         goto fail;
270         } else {
271                 p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
272         }
273
274         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
275                    "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
276                    p_cid->opaque_fid, p_cid->cid,
277                    p_cid->rel.vport_id, p_cid->abs.vport_id,
278                    p_cid->rel.queue_id, p_cid->qid_usage_idx,
279                    p_cid->abs.queue_id,
280                    p_cid->rel.stats_id, p_cid->abs.stats_id,
281                    p_cid->sb_igu_id, p_cid->sb_idx);
282
283         return p_cid;
284
285 fail:
286         OSAL_VFREE(p_hwfn->p_dev, p_cid);
287         return OSAL_NULL;
288 }
289
290 struct ecore_queue_cid *
291 ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
292                        struct ecore_queue_start_common_params *p_params,
293                        bool b_is_rx,
294                        struct ecore_queue_cid_vf_params *p_vf_params)
295 {
296         struct ecore_queue_cid *p_cid;
297         u8 vfid = ECORE_CXT_PF_CID;
298         bool b_legacy_vf = false;
299         u32 cid = 0;
300
301         /* In case of legacy VFs, The CID can be derived from the additional
302          * VF parameters - the VF assumes queue X uses CID X, so we can simply
303          * use the vf_qid for this purpose as well.
304          */
305         if (p_vf_params) {
306                 vfid = p_vf_params->vfid;
307
308                 if (p_vf_params->vf_legacy &
309                     ECORE_QCID_LEGACY_VF_CID) {
310                         b_legacy_vf = true;
311                         cid = p_vf_params->vf_qid;
312                 }
313         }
314
315         /* Get a unique firmware CID for this queue, in case it's a PF.
316          * VF's don't need a CID as the queue configuration will be done
317          * by PF.
318          */
319         if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) {
320                 if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
321                                            &cid, vfid) != ECORE_SUCCESS) {
322                         DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
323                         return OSAL_NULL;
324                 }
325         }
326
327         p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
328                                         p_params, b_is_rx, p_vf_params);
329         if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
330                 _ecore_cxt_release_cid(p_hwfn, cid, vfid);
331
332         return p_cid;
333 }
334
335 static struct ecore_queue_cid *
336 ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
337                           bool b_is_rx,
338                           struct ecore_queue_start_common_params *p_params)
339 {
340         return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
341                                       OSAL_NULL);
342 }
343
344 enum _ecore_status_t
345 ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
346                          struct ecore_sp_vport_start_params *p_params)
347 {
348         struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
349         struct ecore_spq_entry *p_ent = OSAL_NULL;
350         struct ecore_sp_init_data init_data;
351         struct eth_vport_tpa_param *p_tpa;
352         u16 rx_mode = 0, tx_err = 0;
353         u8 abs_vport_id = 0;
354         enum _ecore_status_t rc = ECORE_NOTIMPL;
355
356         rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
357         if (rc != ECORE_SUCCESS)
358                 return rc;
359
360         /* Get SPQ entry */
361         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
362         init_data.cid = ecore_spq_get_cid(p_hwfn);
363         init_data.opaque_fid = p_params->opaque_fid;
364         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
365
366         rc = ecore_sp_init_request(p_hwfn, &p_ent,
367                                    ETH_RAMROD_VPORT_START,
368                                    PROTOCOLID_ETH, &init_data);
369         if (rc != ECORE_SUCCESS)
370                 return rc;
371
372         p_ramrod = &p_ent->ramrod.vport_start;
373         p_ramrod->vport_id = abs_vport_id;
374
375         p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
376         p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
377         p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
378         p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
379         p_ramrod->untagged = p_params->only_untagged;
380         p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
381
382         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
383         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
384
385         p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode);
386
387         /* Handle requests for strict behavior on transmission errors */
388         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE,
389                   p_params->b_err_illegal_vlan_mode ?
390                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
391         SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL,
392                   p_params->b_err_small_pkt ?
393                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
394         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR,
395                   p_params->b_err_anti_spoof ?
396                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
397         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS,
398                   p_params->b_err_illegal_inband_mode ?
399                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
400         SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG,
401                   p_params->b_err_vlan_insert_with_inband ?
402                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
403         SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION,
404                   p_params->b_err_big_pkt ?
405                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
406         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME,
407                   p_params->b_err_ctrl_frame ?
408                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
409         p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
410
411         /* TPA related fields */
412         p_tpa = &p_ramrod->tpa_param;
413         OSAL_MEMSET(p_tpa, 0, sizeof(struct eth_vport_tpa_param));
414         p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
415
416         switch (p_params->tpa_mode) {
417         case ECORE_TPA_MODE_GRO:
418                 p_tpa->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
419                 p_tpa->tpa_max_size = (u16)-1;
420                 p_tpa->tpa_min_size_to_cont = p_params->mtu / 2;
421                 p_tpa->tpa_min_size_to_start = p_params->mtu / 2;
422                 p_tpa->tpa_ipv4_en_flg = 1;
423                 p_tpa->tpa_ipv6_en_flg = 1;
424                 p_tpa->tpa_ipv4_tunn_en_flg = 1;
425                 p_tpa->tpa_ipv6_tunn_en_flg = 1;
426                 p_tpa->tpa_pkt_split_flg = 1;
427                 p_tpa->tpa_gro_consistent_flg = 1;
428                 break;
429         default:
430                 break;
431         }
432
433         p_ramrod->tx_switching_en = p_params->tx_switching;
434 #ifndef ASIC_ONLY
435         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
436                 p_ramrod->tx_switching_en = 0;
437 #endif
438
439         p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
440         p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
441
442         /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
443         p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_params->concrete_fid);
444
445         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
446 }
447
448 enum _ecore_status_t
449 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
450                      struct ecore_sp_vport_start_params *p_params)
451 {
452         if (IS_VF(p_hwfn->p_dev))
453                 return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id,
454                                                p_params->mtu,
455                                                p_params->remove_inner_vlan,
456                                                p_params->tpa_mode,
457                                                p_params->max_buffers_per_cqe,
458                                                p_params->only_untagged);
459
460         return ecore_sp_eth_vport_start(p_hwfn, p_params);
461 }
462
463 static enum _ecore_status_t
464 ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
465                           struct vport_update_ramrod_data *p_ramrod,
466                           struct ecore_rss_params *p_rss)
467 {
468         struct eth_vport_rss_config *p_config;
469         u16 capabilities = 0;
470         int i, table_size;
471         enum _ecore_status_t rc = ECORE_SUCCESS;
472
473         if (!p_rss) {
474                 p_ramrod->common.update_rss_flg = 0;
475                 return rc;
476         }
477         p_config = &p_ramrod->rss_config;
478
479         OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE !=
480                           ETH_RSS_IND_TABLE_ENTRIES_NUM);
481
482         rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
483         if (rc != ECORE_SUCCESS)
484                 return rc;
485
486         p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
487         p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
488         p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
489         p_config->update_rss_key = p_rss->update_rss_key;
490
491         p_config->rss_mode = p_rss->rss_enable ?
492             ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED;
493
494         p_config->capabilities = 0;
495
496         SET_FIELD(capabilities,
497                   ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
498                   !!(p_rss->rss_caps & ECORE_RSS_IPV4));
499         SET_FIELD(capabilities,
500                   ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
501                   !!(p_rss->rss_caps & ECORE_RSS_IPV6));
502         SET_FIELD(capabilities,
503                   ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
504                   !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
505         SET_FIELD(capabilities,
506                   ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
507                   !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
508         SET_FIELD(capabilities,
509                   ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
510                   !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
511         SET_FIELD(capabilities,
512                   ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
513                   !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
514         p_config->tbl_size = p_rss->rss_table_size_log;
515         p_config->capabilities = OSAL_CPU_TO_LE16(capabilities);
516
517         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
518                    "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
519                    p_ramrod->common.update_rss_flg,
520                    p_config->rss_mode,
521                    p_config->update_rss_capabilities,
522                    p_config->capabilities,
523                    p_config->update_rss_ind_table, p_config->update_rss_key);
524
525         table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE,
526                                 1 << p_config->tbl_size);
527         for (i = 0; i < table_size; i++) {
528                 struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i];
529
530                 if (!p_queue)
531                         return ECORE_INVAL;
532
533                 p_config->indirection_table[i] =
534                                 OSAL_CPU_TO_LE16(p_queue->abs.queue_id);
535         }
536
537         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
538                    "Configured RSS indirection table [%d entries]:\n",
539                    table_size);
540         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) {
541                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
542                            "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
543                            OSAL_LE16_TO_CPU(p_config->indirection_table[i]),
544                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]),
545                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]),
546                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]),
547                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]),
548                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]),
549                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]),
550                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]),
551                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]),
552                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]),
553                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]),
554                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]),
555                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]),
556                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]),
557                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]),
558                          OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15]));
559         }
560
561         for (i = 0; i < 10; i++)
562                 p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]);
563
564         return rc;
565 }
566
567 static void
568 ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
569                             struct vport_update_ramrod_data *p_ramrod,
570                             struct ecore_filter_accept_flags accept_flags)
571 {
572         p_ramrod->common.update_rx_mode_flg =
573                                         accept_flags.update_rx_mode_config;
574         p_ramrod->common.update_tx_mode_flg =
575                                         accept_flags.update_tx_mode_config;
576
577 #ifndef ASIC_ONLY
578         /* On B0 emulation we cannot enable Tx, since this would cause writes
579          * to PVFC HW block which isn't implemented in emulation.
580          */
581         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
582                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
583                            "Non-Asic - prevent Tx mode in vport update\n");
584                 p_ramrod->common.update_tx_mode_flg = 0;
585         }
586 #endif
587
588         /* Set Rx mode accept flags */
589         if (p_ramrod->common.update_rx_mode_flg) {
590                 u8 accept_filter = accept_flags.rx_accept_filter;
591                 u16 state = 0;
592
593                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
594                           !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) ||
595                            !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
596
597                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
598                           !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED));
599
600                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
601                           !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) ||
602                             !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
603
604                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
605                           (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
606                            !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
607
608                 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
609                           !!(accept_filter & ECORE_ACCEPT_BCAST));
610
611                 SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI,
612                           !!(accept_filter & ECORE_ACCEPT_ANY_VNI));
613
614                 p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
615                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
616                            "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n",
617                            p_ramrod->common.vport_id, state);
618         }
619
620         /* Set Tx mode accept flags */
621         if (p_ramrod->common.update_tx_mode_flg) {
622                 u8 accept_filter = accept_flags.tx_accept_filter;
623                 u16 state = 0;
624
625                 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
626                           !!(accept_filter & ECORE_ACCEPT_NONE));
627
628                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
629                           !!(accept_filter & ECORE_ACCEPT_NONE));
630
631                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
632                           (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
633                            !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
634
635                 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
636                           !!(accept_filter & ECORE_ACCEPT_BCAST));
637
638                 p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state);
639                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
640                            "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n",
641                            p_ramrod->common.vport_id, state);
642         }
643 }
644
645 static void
646 ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod,
647                               struct ecore_sge_tpa_params *p_params)
648 {
649         struct eth_vport_tpa_param *p_tpa;
650         u16 val;
651
652         if (!p_params) {
653                 p_ramrod->common.update_tpa_param_flg = 0;
654                 p_ramrod->common.update_tpa_en_flg = 0;
655                 p_ramrod->common.update_tpa_param_flg = 0;
656                 return;
657         }
658
659         p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
660         p_tpa = &p_ramrod->tpa_param;
661         p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
662         p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
663         p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
664         p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
665
666         p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
667         p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
668         p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
669         p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
670         p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
671         p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
672         val = p_params->tpa_max_size;
673         p_tpa->tpa_max_size = OSAL_CPU_TO_LE16(val);
674         val = p_params->tpa_min_size_to_start;
675         p_tpa->tpa_min_size_to_start = OSAL_CPU_TO_LE16(val);
676         val = p_params->tpa_min_size_to_cont;
677         p_tpa->tpa_min_size_to_cont = OSAL_CPU_TO_LE16(val);
678 }
679
680 static void
681 ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod,
682                           struct ecore_sp_vport_update_params *p_params)
683 {
684         int i;
685
686         OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0,
687                     sizeof(p_ramrod->approx_mcast.bins));
688
689         if (!p_params->update_approx_mcast_flg)
690                 return;
691
692         p_ramrod->common.update_approx_mcast_flg = 1;
693         for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
694                 u32 *p_bins = p_params->bins;
695
696                 p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
697         }
698 }
699
700 enum _ecore_status_t
701 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
702                       struct ecore_sp_vport_update_params *p_params,
703                       enum spq_mode comp_mode,
704                       struct ecore_spq_comp_cb *p_comp_data)
705 {
706         struct ecore_rss_params *p_rss_params = p_params->rss_params;
707         struct vport_update_ramrod_data_cmn *p_cmn;
708         struct ecore_sp_init_data init_data;
709         struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
710         struct ecore_spq_entry *p_ent = OSAL_NULL;
711         u8 abs_vport_id = 0, val;
712         enum _ecore_status_t rc = ECORE_NOTIMPL;
713
714         if (IS_VF(p_hwfn->p_dev)) {
715                 rc = ecore_vf_pf_vport_update(p_hwfn, p_params);
716                 return rc;
717         }
718
719         rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
720         if (rc != ECORE_SUCCESS)
721                 return rc;
722
723         /* Get SPQ entry */
724         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
725         init_data.cid = ecore_spq_get_cid(p_hwfn);
726         init_data.opaque_fid = p_params->opaque_fid;
727         init_data.comp_mode = comp_mode;
728         init_data.p_comp_data = p_comp_data;
729
730         rc = ecore_sp_init_request(p_hwfn, &p_ent,
731                                    ETH_RAMROD_VPORT_UPDATE,
732                                    PROTOCOLID_ETH, &init_data);
733         if (rc != ECORE_SUCCESS)
734                 return rc;
735
736         /* Copy input params to ramrod according to FW struct */
737         p_ramrod = &p_ent->ramrod.vport_update;
738         p_cmn = &p_ramrod->common;
739
740         p_cmn->vport_id = abs_vport_id;
741
742         p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
743         p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
744         p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
745         p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
746
747         p_cmn->accept_any_vlan = p_params->accept_any_vlan;
748         val = p_params->update_accept_any_vlan_flg;
749         p_cmn->update_accept_any_vlan_flg = val;
750
751         p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
752         val = p_params->update_inner_vlan_removal_flg;
753         p_cmn->update_inner_vlan_removal_en_flg = val;
754
755         p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
756         val = p_params->update_default_vlan_enable_flg;
757         p_cmn->update_default_vlan_en_flg = val;
758
759         p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan);
760         p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
761
762         p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
763
764         p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
765
766 #ifndef ASIC_ONLY
767         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
768                 if (p_ramrod->common.tx_switching_en ||
769                     p_ramrod->common.update_tx_switching_en_flg) {
770                         DP_NOTICE(p_hwfn, false,
771                                   "FPGA - why are we seeing tx-switching? Overriding it\n");
772                         p_ramrod->common.tx_switching_en = 0;
773                         p_ramrod->common.update_tx_switching_en_flg = 1;
774                 }
775 #endif
776         p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
777
778         p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
779         val = p_params->update_anti_spoofing_en_flg;
780         p_ramrod->common.update_anti_spoofing_en_flg = val;
781
782         rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
783         if (rc != ECORE_SUCCESS) {
784                 /* Return spq entry which is taken in ecore_sp_init_request()*/
785                 ecore_spq_return_entry(p_hwfn, p_ent);
786                 return rc;
787         }
788
789         /* Update mcast bins for VFs, PF doesn't use this functionality */
790         ecore_sp_update_mcast_bin(p_ramrod, p_params);
791
792         ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
793         ecore_sp_vport_update_sge_tpa(p_ramrod, p_params->sge_tpa_params);
794         if (p_params->mtu) {
795                 p_ramrod->common.update_mtu_flg = 1;
796                 p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu);
797         }
798
799         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
800 }
801
802 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
803                                          u16 opaque_fid, u8 vport_id)
804 {
805         struct vport_stop_ramrod_data *p_ramrod;
806         struct ecore_sp_init_data init_data;
807         struct ecore_spq_entry *p_ent;
808         u8 abs_vport_id = 0;
809         enum _ecore_status_t rc;
810
811         if (IS_VF(p_hwfn->p_dev))
812                 return ecore_vf_pf_vport_stop(p_hwfn);
813
814         rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
815         if (rc != ECORE_SUCCESS)
816                 return rc;
817
818         /* Get SPQ entry */
819         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
820         init_data.cid = ecore_spq_get_cid(p_hwfn);
821         init_data.opaque_fid = opaque_fid;
822         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
823
824         rc = ecore_sp_init_request(p_hwfn, &p_ent,
825                                    ETH_RAMROD_VPORT_STOP,
826                                    PROTOCOLID_ETH, &init_data);
827         if (rc != ECORE_SUCCESS)
828                 return rc;
829
830         p_ramrod = &p_ent->ramrod.vport_stop;
831         p_ramrod->vport_id = abs_vport_id;
832
833         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
834 }
835
836 static enum _ecore_status_t
837 ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn,
838                          struct ecore_filter_accept_flags *p_accept_flags)
839 {
840         struct ecore_sp_vport_update_params s_params;
841
842         OSAL_MEMSET(&s_params, 0, sizeof(s_params));
843         OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags,
844                     sizeof(struct ecore_filter_accept_flags));
845
846         return ecore_vf_pf_vport_update(p_hwfn, &s_params);
847 }
848
849 enum _ecore_status_t
850 ecore_filter_accept_cmd(struct ecore_dev *p_dev,
851                         u8 vport,
852                         struct ecore_filter_accept_flags accept_flags,
853                         u8 update_accept_any_vlan,
854                         u8 accept_any_vlan,
855                         enum spq_mode comp_mode,
856                         struct ecore_spq_comp_cb *p_comp_data)
857 {
858         struct ecore_sp_vport_update_params vport_update_params;
859         int i, rc;
860
861         /* Prepare and send the vport rx_mode change */
862         OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params));
863         vport_update_params.vport_id = vport;
864         vport_update_params.accept_flags = accept_flags;
865         vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
866         vport_update_params.accept_any_vlan = accept_any_vlan;
867
868         for_each_hwfn(p_dev, i) {
869                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
870
871                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
872
873                 if (IS_VF(p_dev)) {
874                         rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);
875                         if (rc != ECORE_SUCCESS)
876                                 return rc;
877                         continue;
878                 }
879
880                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
881                                            comp_mode, p_comp_data);
882                 if (rc != ECORE_SUCCESS) {
883                         DP_ERR(p_dev, "Update rx_mode failed %d\n", rc);
884                         return rc;
885                 }
886
887                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
888                            "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
889                            accept_flags.rx_accept_filter,
890                            accept_flags.tx_accept_filter);
891
892                 if (update_accept_any_vlan)
893                         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
894                                    "accept_any_vlan=%d configured\n",
895                                    accept_any_vlan);
896         }
897
898         return 0;
899 }
900
901 enum _ecore_status_t
902 ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
903                            struct ecore_queue_cid *p_cid,
904                            u16 bd_max_bytes,
905                            dma_addr_t bd_chain_phys_addr,
906                            dma_addr_t cqe_pbl_addr,
907                            u16 cqe_pbl_size)
908 {
909         struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
910         struct ecore_spq_entry *p_ent = OSAL_NULL;
911         struct ecore_sp_init_data init_data;
912         enum _ecore_status_t rc = ECORE_NOTIMPL;
913
914         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
915                    "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
916                    p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
917                    p_cid->abs.vport_id, p_cid->sb_igu_id);
918
919         /* Get SPQ entry */
920         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
921         init_data.cid = p_cid->cid;
922         init_data.opaque_fid = p_cid->opaque_fid;
923         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
924
925         rc = ecore_sp_init_request(p_hwfn, &p_ent,
926                                    ETH_RAMROD_RX_QUEUE_START,
927                                    PROTOCOLID_ETH, &init_data);
928         if (rc != ECORE_SUCCESS)
929                 return rc;
930
931         p_ramrod = &p_ent->ramrod.rx_queue_start;
932
933         p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
934         p_ramrod->sb_index = p_cid->sb_idx;
935         p_ramrod->vport_id = p_cid->abs.vport_id;
936         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
937         p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
938         p_ramrod->complete_cqe_flg = 0;
939         p_ramrod->complete_event_flg = 1;
940
941         p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes);
942         DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
943
944         p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
945         DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
946
947         if (p_cid->vfid != ECORE_QUEUE_CID_PF) {
948                 bool b_legacy_vf = !!(p_cid->vf_legacy &
949                                       ECORE_QCID_LEGACY_VF_RX_PROD);
950
951                 p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
952                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
953                            "Queue%s is meant for VF rxq[%02x]\n",
954                            b_legacy_vf ? " [legacy]" : "",
955                            p_cid->vf_qid);
956                 p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
957         }
958
959         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
960 }
961
962 static enum _ecore_status_t
963 ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn,
964                             struct ecore_queue_cid *p_cid,
965                             u16 bd_max_bytes,
966                             dma_addr_t bd_chain_phys_addr,
967                             dma_addr_t cqe_pbl_addr,
968                             u16 cqe_pbl_size,
969                             void OSAL_IOMEM * *pp_prod)
970 {
971         u32 init_prod_val = 0;
972
973         *pp_prod = (u8 OSAL_IOMEM *)
974                     p_hwfn->regview +
975                     GTT_BAR0_MAP_REG_MSDM_RAM +
976                     MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
977
978         /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
979         __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
980                           (u32 *)(&init_prod_val));
981
982         return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
983                                           bd_max_bytes,
984                                           bd_chain_phys_addr,
985                                           cqe_pbl_addr, cqe_pbl_size);
986 }
987
988 enum _ecore_status_t
989 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
990                          u16 opaque_fid,
991                          struct ecore_queue_start_common_params *p_params,
992                          u16 bd_max_bytes,
993                          dma_addr_t bd_chain_phys_addr,
994                          dma_addr_t cqe_pbl_addr,
995                          u16 cqe_pbl_size,
996                          struct ecore_rxq_start_ret_params *p_ret_params)
997 {
998         struct ecore_queue_cid *p_cid;
999         enum _ecore_status_t rc;
1000
1001         /* Allocate a CID for the queue */
1002         p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
1003         if (p_cid == OSAL_NULL)
1004                 return ECORE_NOMEM;
1005
1006         if (IS_PF(p_hwfn->p_dev))
1007                 rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid,
1008                                                  bd_max_bytes,
1009                                                  bd_chain_phys_addr,
1010                                                  cqe_pbl_addr, cqe_pbl_size,
1011                                                  &p_ret_params->p_prod);
1012         else
1013                 rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid,
1014                                            bd_max_bytes,
1015                                            bd_chain_phys_addr,
1016                                            cqe_pbl_addr,
1017                                            cqe_pbl_size,
1018                                            &p_ret_params->p_prod);
1019
1020         /* Provide the caller with a reference to as handler */
1021         if (rc != ECORE_SUCCESS)
1022                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1023         else
1024                 p_ret_params->p_handle = (void *)p_cid;
1025
1026         return rc;
1027 }
1028
1029 enum _ecore_status_t
1030 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
1031                               void **pp_rxq_handles,
1032                               u8 num_rxqs,
1033                               u8 complete_cqe_flg,
1034                               u8 complete_event_flg,
1035                               enum spq_mode comp_mode,
1036                               struct ecore_spq_comp_cb *p_comp_data)
1037 {
1038         struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
1039         struct ecore_spq_entry *p_ent = OSAL_NULL;
1040         struct ecore_sp_init_data init_data;
1041         struct ecore_queue_cid *p_cid;
1042         enum _ecore_status_t rc = ECORE_NOTIMPL;
1043         u8 i;
1044
1045         if (IS_VF(p_hwfn->p_dev))
1046                 return ecore_vf_pf_rxqs_update(p_hwfn,
1047                                                (struct ecore_queue_cid **)
1048                                                pp_rxq_handles,
1049                                                num_rxqs,
1050                                                complete_cqe_flg,
1051                                                complete_event_flg);
1052
1053         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1054         init_data.comp_mode = comp_mode;
1055         init_data.p_comp_data = p_comp_data;
1056
1057         for (i = 0; i < num_rxqs; i++) {
1058                 p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i];
1059
1060                 /* Get SPQ entry */
1061                 init_data.cid = p_cid->cid;
1062                 init_data.opaque_fid = p_cid->opaque_fid;
1063
1064                 rc = ecore_sp_init_request(p_hwfn, &p_ent,
1065                                            ETH_RAMROD_RX_QUEUE_UPDATE,
1066                                            PROTOCOLID_ETH, &init_data);
1067                 if (rc != ECORE_SUCCESS)
1068                         return rc;
1069
1070                 p_ramrod = &p_ent->ramrod.rx_queue_update;
1071                 p_ramrod->vport_id = p_cid->abs.vport_id;
1072
1073                 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1074                 p_ramrod->complete_cqe_flg = complete_cqe_flg;
1075                 p_ramrod->complete_event_flg = complete_event_flg;
1076
1077                 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1078                 if (rc != ECORE_SUCCESS)
1079                         return rc;
1080         }
1081
1082         return rc;
1083 }
1084
1085 static enum _ecore_status_t
1086 ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
1087                            struct ecore_queue_cid *p_cid,
1088                            bool b_eq_completion_only,
1089                            bool b_cqe_completion)
1090 {
1091         struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
1092         struct ecore_spq_entry *p_ent = OSAL_NULL;
1093         struct ecore_sp_init_data init_data;
1094         enum _ecore_status_t rc;
1095
1096         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1097         init_data.cid = p_cid->cid;
1098         init_data.opaque_fid = p_cid->opaque_fid;
1099         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1100
1101         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1102                                    ETH_RAMROD_RX_QUEUE_STOP,
1103                                    PROTOCOLID_ETH, &init_data);
1104         if (rc != ECORE_SUCCESS)
1105                 return rc;
1106
1107         p_ramrod = &p_ent->ramrod.rx_queue_stop;
1108         p_ramrod->vport_id = p_cid->abs.vport_id;
1109         p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1110
1111         /* Cleaning the queue requires the completion to arrive there.
1112          * In addition, VFs require the answer to come as eqe to PF.
1113          */
1114         p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) &&
1115                                       !b_eq_completion_only) ||
1116                                      b_cqe_completion;
1117         p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) ||
1118                                        b_eq_completion_only;
1119
1120         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1121 }
1122
1123 enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
1124                                              void *p_rxq,
1125                                              bool eq_completion_only,
1126                                              bool cqe_completion)
1127 {
1128         struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq;
1129         enum _ecore_status_t rc = ECORE_NOTIMPL;
1130
1131         if (IS_PF(p_hwfn->p_dev))
1132                 rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid,
1133                                                 eq_completion_only,
1134                                                 cqe_completion);
1135         else
1136                 rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
1137
1138         if (rc == ECORE_SUCCESS)
1139                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1140         return rc;
1141 }
1142
1143 enum _ecore_status_t
1144 ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
1145                            struct ecore_queue_cid *p_cid,
1146                            dma_addr_t pbl_addr, u16 pbl_size,
1147                            u16 pq_id)
1148 {
1149         struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
1150         struct ecore_spq_entry *p_ent = OSAL_NULL;
1151         struct ecore_sp_init_data init_data;
1152         enum _ecore_status_t rc = ECORE_NOTIMPL;
1153
1154         /* Get SPQ entry */
1155         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1156         init_data.cid = p_cid->cid;
1157         init_data.opaque_fid = p_cid->opaque_fid;
1158         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1159
1160         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1161                                    ETH_RAMROD_TX_QUEUE_START,
1162                                    PROTOCOLID_ETH, &init_data);
1163         if (rc != ECORE_SUCCESS)
1164                 return rc;
1165
1166         p_ramrod = &p_ent->ramrod.tx_queue_start;
1167         p_ramrod->vport_id = p_cid->abs.vport_id;
1168
1169         p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
1170         p_ramrod->sb_index = p_cid->sb_idx;
1171         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
1172
1173         p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1174         p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1175
1176         p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
1177         DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
1178
1179         p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
1180
1181         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1182 }
1183
1184 static enum _ecore_status_t
1185 ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
1186                             struct ecore_queue_cid *p_cid,
1187                             u8 tc,
1188                             dma_addr_t pbl_addr, u16 pbl_size,
1189                             void OSAL_IOMEM * *pp_doorbell)
1190 {
1191         enum _ecore_status_t rc;
1192         u16 pq_id;
1193
1194         /* TODO - set tc in the pq_params for multi-cos.
1195          * If pacing is enabled then select queue according to
1196          * rate limiter availability otherwise select queue based
1197          * on multi cos.
1198          */
1199         if (IS_ECORE_PACING(p_hwfn))
1200                 pq_id = ecore_get_cm_pq_idx_rl(p_hwfn, p_cid->rel.queue_id);
1201         else
1202                 pq_id = ecore_get_cm_pq_idx_mcos(p_hwfn, tc);
1203
1204         rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr,
1205                                         pbl_size, pq_id);
1206         if (rc != ECORE_SUCCESS)
1207                 return rc;
1208
1209         /* Provide the caller with the necessary return values */
1210         *pp_doorbell = (u8 OSAL_IOMEM *)
1211                        p_hwfn->doorbells +
1212                        DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY);
1213
1214         return ECORE_SUCCESS;
1215 }
1216
1217 enum _ecore_status_t
1218 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
1219                          struct ecore_queue_start_common_params *p_params,
1220                          u8 tc,
1221                          dma_addr_t pbl_addr, u16 pbl_size,
1222                          struct ecore_txq_start_ret_params *p_ret_params)
1223 {
1224         struct ecore_queue_cid *p_cid;
1225         enum _ecore_status_t rc;
1226
1227         p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
1228         if (p_cid == OSAL_NULL)
1229                 return ECORE_INVAL;
1230
1231         if (IS_PF(p_hwfn->p_dev))
1232                 rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1233                                                  pbl_addr, pbl_size,
1234                                                  &p_ret_params->p_doorbell);
1235         else
1236                 rc = ecore_vf_pf_txq_start(p_hwfn, p_cid,
1237                                            pbl_addr, pbl_size,
1238                                            &p_ret_params->p_doorbell);
1239
1240         if (rc != ECORE_SUCCESS)
1241                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1242         else
1243                 p_ret_params->p_handle = (void *)p_cid;
1244
1245         return rc;
1246 }
1247
1248 static enum _ecore_status_t
1249 ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1250                            struct ecore_queue_cid *p_cid)
1251 {
1252         struct ecore_spq_entry *p_ent = OSAL_NULL;
1253         struct ecore_sp_init_data init_data;
1254         enum _ecore_status_t rc;
1255
1256         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1257         init_data.cid = p_cid->cid;
1258         init_data.opaque_fid = p_cid->opaque_fid;
1259         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1260
1261         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1262                                    ETH_RAMROD_TX_QUEUE_STOP,
1263                                    PROTOCOLID_ETH, &init_data);
1264         if (rc != ECORE_SUCCESS)
1265                 return rc;
1266
1267         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1268 }
1269
1270 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1271                                              void *p_handle)
1272 {
1273         struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
1274         enum _ecore_status_t rc;
1275
1276         if (IS_PF(p_hwfn->p_dev))
1277                 rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1278         else
1279                 rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid);
1280
1281         if (rc == ECORE_SUCCESS)
1282                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1283         return rc;
1284 }
1285
1286 static enum eth_filter_action
1287 ecore_filter_action(enum ecore_filter_opcode opcode)
1288 {
1289         enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1290
1291         switch (opcode) {
1292         case ECORE_FILTER_ADD:
1293                 action = ETH_FILTER_ACTION_ADD;
1294                 break;
1295         case ECORE_FILTER_REMOVE:
1296                 action = ETH_FILTER_ACTION_REMOVE;
1297                 break;
1298         case ECORE_FILTER_FLUSH:
1299                 action = ETH_FILTER_ACTION_REMOVE_ALL;
1300                 break;
1301         default:
1302                 action = MAX_ETH_FILTER_ACTION;
1303         }
1304
1305         return action;
1306 }
1307
1308 static enum _ecore_status_t
1309 ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
1310                           u16 opaque_fid,
1311                           struct ecore_filter_ucast *p_filter_cmd,
1312                           struct vport_filter_update_ramrod_data **pp_ramrod,
1313                           struct ecore_spq_entry **pp_ent,
1314                           enum spq_mode comp_mode,
1315                           struct ecore_spq_comp_cb *p_comp_data)
1316 {
1317         u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1318         struct vport_filter_update_ramrod_data *p_ramrod;
1319         struct eth_filter_cmd *p_first_filter;
1320         struct eth_filter_cmd *p_second_filter;
1321         struct ecore_sp_init_data init_data;
1322         enum eth_filter_action action;
1323         enum _ecore_status_t rc;
1324
1325         rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1326                             &vport_to_remove_from);
1327         if (rc != ECORE_SUCCESS)
1328                 return rc;
1329
1330         rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1331                             &vport_to_add_to);
1332         if (rc != ECORE_SUCCESS)
1333                 return rc;
1334
1335         /* Get SPQ entry */
1336         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1337         init_data.cid = ecore_spq_get_cid(p_hwfn);
1338         init_data.opaque_fid = opaque_fid;
1339         init_data.comp_mode = comp_mode;
1340         init_data.p_comp_data = p_comp_data;
1341
1342         rc = ecore_sp_init_request(p_hwfn, pp_ent,
1343                                    ETH_RAMROD_FILTERS_UPDATE,
1344                                    PROTOCOLID_ETH, &init_data);
1345         if (rc != ECORE_SUCCESS)
1346                 return rc;
1347
1348         *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1349         p_ramrod = *pp_ramrod;
1350         p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1351         p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1352
1353 #ifndef ASIC_ONLY
1354         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1355                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1356                            "Non-Asic - prevent Tx filters\n");
1357                 p_ramrod->filter_cmd_hdr.tx = 0;
1358         }
1359 #endif
1360
1361         switch (p_filter_cmd->opcode) {
1362         case ECORE_FILTER_REPLACE:
1363         case ECORE_FILTER_MOVE:
1364                 p_ramrod->filter_cmd_hdr.cmd_cnt = 2;
1365                 break;
1366         default:
1367                 p_ramrod->filter_cmd_hdr.cmd_cnt = 1;
1368                 break;
1369         }
1370
1371         p_first_filter = &p_ramrod->filter_cmds[0];
1372         p_second_filter = &p_ramrod->filter_cmds[1];
1373
1374         switch (p_filter_cmd->type) {
1375         case ECORE_FILTER_MAC:
1376                 p_first_filter->type = ETH_FILTER_TYPE_MAC;
1377                 break;
1378         case ECORE_FILTER_VLAN:
1379                 p_first_filter->type = ETH_FILTER_TYPE_VLAN;
1380                 break;
1381         case ECORE_FILTER_MAC_VLAN:
1382                 p_first_filter->type = ETH_FILTER_TYPE_PAIR;
1383                 break;
1384         case ECORE_FILTER_INNER_MAC:
1385                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC;
1386                 break;
1387         case ECORE_FILTER_INNER_VLAN:
1388                 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN;
1389                 break;
1390         case ECORE_FILTER_INNER_PAIR:
1391                 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR;
1392                 break;
1393         case ECORE_FILTER_INNER_MAC_VNI_PAIR:
1394                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1395                 break;
1396         case ECORE_FILTER_MAC_VNI_PAIR:
1397                 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR;
1398                 break;
1399         case ECORE_FILTER_VNI:
1400                 p_first_filter->type = ETH_FILTER_TYPE_VNI;
1401                 break;
1402         case ECORE_FILTER_UNUSED: /* @DPDK */
1403                 p_first_filter->type = MAX_ETH_FILTER_TYPE;
1404                 break;
1405         }
1406
1407         if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1408             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1409             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1410             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1411             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1412             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR))
1413                 ecore_set_fw_mac_addr(&p_first_filter->mac_msb,
1414                                       &p_first_filter->mac_mid,
1415                                       &p_first_filter->mac_lsb,
1416                                       (u8 *)p_filter_cmd->mac);
1417
1418         if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1419             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1420             (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1421             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1422                 p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan);
1423
1424         if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1425             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1426             (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1427                 p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni);
1428
1429         if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) {
1430                 p_second_filter->type = p_first_filter->type;
1431                 p_second_filter->mac_msb = p_first_filter->mac_msb;
1432                 p_second_filter->mac_mid = p_first_filter->mac_mid;
1433                 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1434                 p_second_filter->vlan_id = p_first_filter->vlan_id;
1435                 p_second_filter->vni = p_first_filter->vni;
1436
1437                 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1438
1439                 p_first_filter->vport_id = vport_to_remove_from;
1440
1441                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1442                 p_second_filter->vport_id = vport_to_add_to;
1443         } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) {
1444                 p_first_filter->vport_id = vport_to_add_to;
1445                 OSAL_MEMCPY(p_second_filter, p_first_filter,
1446                             sizeof(*p_second_filter));
1447                 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
1448                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1449         } else {
1450                 action = ecore_filter_action(p_filter_cmd->opcode);
1451
1452                 if (action == MAX_ETH_FILTER_ACTION) {
1453                         DP_NOTICE(p_hwfn, true,
1454                                   "%d is not supported yet\n",
1455                                   p_filter_cmd->opcode);
1456                         return ECORE_NOTIMPL;
1457                 }
1458
1459                 p_first_filter->action = action;
1460                 p_first_filter->vport_id =
1461                     (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1462                     vport_to_remove_from : vport_to_add_to;
1463         }
1464
1465         return ECORE_SUCCESS;
1466 }
1467
1468 enum _ecore_status_t
1469 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
1470                           u16 opaque_fid,
1471                           struct ecore_filter_ucast *p_filter_cmd,
1472                           enum spq_mode comp_mode,
1473                           struct ecore_spq_comp_cb *p_comp_data)
1474 {
1475         struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL;
1476         struct ecore_spq_entry *p_ent = OSAL_NULL;
1477         struct eth_filter_cmd_header *p_header;
1478         enum _ecore_status_t rc;
1479
1480         rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1481                                        &p_ramrod, &p_ent,
1482                                        comp_mode, p_comp_data);
1483         if (rc != ECORE_SUCCESS) {
1484                 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1485                 return rc;
1486         }
1487         p_header = &p_ramrod->filter_cmd_hdr;
1488         p_header->assert_on_error = p_filter_cmd->assert_on_error;
1489
1490         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1491         if (rc != ECORE_SUCCESS) {
1492                 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1493                 return rc;
1494         }
1495
1496         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1497                    "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1498                    (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" :
1499                    ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1500                     "REMOVE" :
1501                     ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ?
1502                      "MOVE" : "REPLACE")),
1503                    (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" :
1504                    ((p_filter_cmd->type == ECORE_FILTER_VLAN) ?
1505                     "VLAN" : "MAC & VLAN"),
1506                    p_ramrod->filter_cmd_hdr.cmd_cnt,
1507                    p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter);
1508         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1509                    "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1510                    p_filter_cmd->vport_to_add_to,
1511                    p_filter_cmd->vport_to_remove_from,
1512                    p_filter_cmd->mac[0], p_filter_cmd->mac[1],
1513                    p_filter_cmd->mac[2], p_filter_cmd->mac[3],
1514                    p_filter_cmd->mac[4], p_filter_cmd->mac[5],
1515                    p_filter_cmd->vlan);
1516
1517         return ECORE_SUCCESS;
1518 }
1519
1520 /*******************************************************************************
1521  * Description:
1522  *         Calculates crc 32 on a buffer
1523  *         Note: crc32_length MUST be aligned to 8
1524  * Return:
1525  ******************************************************************************/
1526 static u32 ecore_calc_crc32c(u8 *crc32_packet, u32 crc32_length, u32 crc32_seed)
1527 {
1528         u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1529         u8 msb = 0, current_byte = 0;
1530
1531         if ((crc32_packet == OSAL_NULL) ||
1532             (crc32_length == 0) || ((crc32_length % 8) != 0)) {
1533                 return crc32_result;
1534         }
1535
1536         for (byte = 0; byte < crc32_length; byte++) {
1537                 current_byte = crc32_packet[byte];
1538                 for (bit = 0; bit < 8; bit++) {
1539                         msb = (u8)(crc32_result >> 31);
1540                         crc32_result = crc32_result << 1;
1541                         if (msb != (0x1 & (current_byte >> bit))) {
1542                                 crc32_result = crc32_result ^ CRC32_POLY;
1543                                 crc32_result |= 1;
1544                         }
1545                 }
1546         }
1547
1548         return crc32_result;
1549 }
1550
1551 static u32 ecore_crc32c_le(u32 seed, u8 *mac)
1552 {
1553         u32 packet_buf[2] = { 0 };
1554
1555         OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
1556         return ecore_calc_crc32c((u8 *)packet_buf, 8, seed);
1557 }
1558
1559 u8 ecore_mcast_bin_from_mac(u8 *mac)
1560 {
1561         u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac);
1562
1563         return crc & 0xff;
1564 }
1565
1566 static enum _ecore_status_t
1567 ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
1568                           struct ecore_filter_mcast *p_filter_cmd,
1569                           enum spq_mode comp_mode,
1570                           struct ecore_spq_comp_cb *p_comp_data)
1571 {
1572         struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
1573         u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1574         struct ecore_spq_entry *p_ent = OSAL_NULL;
1575         struct ecore_sp_init_data init_data;
1576         u8 abs_vport_id = 0;
1577         enum _ecore_status_t rc;
1578         int i;
1579
1580         if (p_filter_cmd->opcode == ECORE_FILTER_ADD)
1581                 rc = ecore_fw_vport(p_hwfn,
1582                                     p_filter_cmd->vport_to_add_to,
1583                                     &abs_vport_id);
1584         else
1585                 rc = ecore_fw_vport(p_hwfn,
1586                                     p_filter_cmd->vport_to_remove_from,
1587                                     &abs_vport_id);
1588         if (rc != ECORE_SUCCESS)
1589                 return rc;
1590
1591         /* Get SPQ entry */
1592         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1593         init_data.cid = ecore_spq_get_cid(p_hwfn);
1594         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1595         init_data.comp_mode = comp_mode;
1596         init_data.p_comp_data = p_comp_data;
1597
1598         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1599                                    ETH_RAMROD_VPORT_UPDATE,
1600                                    PROTOCOLID_ETH, &init_data);
1601         if (rc != ECORE_SUCCESS) {
1602                 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1603                 return rc;
1604         }
1605
1606         p_ramrod = &p_ent->ramrod.vport_update;
1607         p_ramrod->common.update_approx_mcast_flg = 1;
1608
1609         /* explicitly clear out the entire vector */
1610         OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
1611                     0, sizeof(p_ramrod->approx_mcast.bins));
1612         OSAL_MEMSET(bins, 0, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
1613         /* filter ADD op is explicit set op and it removes
1614         *  any existing filters for the vport.
1615         */
1616         if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
1617                 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1618                         u32 bit;
1619
1620                         bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1621                         bins[bit / 32] |= 1 << (bit % 32);
1622                 }
1623
1624                 /* Convert to correct endianity */
1625                 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1626                         struct vport_update_ramrod_mcast *p_ramrod_bins;
1627
1628                         p_ramrod_bins = &p_ramrod->approx_mcast;
1629                         p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(bins[i]);
1630                 }
1631         }
1632
1633         p_ramrod->common.vport_id = abs_vport_id;
1634
1635         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1636         if (rc != ECORE_SUCCESS)
1637                 DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc);
1638
1639         return rc;
1640 }
1641
1642 enum _ecore_status_t
1643 ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
1644                        struct ecore_filter_mcast *p_filter_cmd,
1645                        enum spq_mode comp_mode,
1646                        struct ecore_spq_comp_cb *p_comp_data)
1647 {
1648         enum _ecore_status_t rc = ECORE_SUCCESS;
1649         int i;
1650
1651         /* only ADD and REMOVE operations are supported for multi-cast */
1652         if ((p_filter_cmd->opcode != ECORE_FILTER_ADD &&
1653              (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) ||
1654             (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) {
1655                 return ECORE_INVAL;
1656         }
1657
1658         for_each_hwfn(p_dev, i) {
1659                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1660
1661                 if (IS_VF(p_dev)) {
1662                         ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1663                         continue;
1664                 }
1665
1666                 rc = ecore_sp_eth_filter_mcast(p_hwfn,
1667                                                p_filter_cmd,
1668                                                comp_mode, p_comp_data);
1669                 if (rc != ECORE_SUCCESS)
1670                         break;
1671         }
1672
1673         return rc;
1674 }
1675
1676 enum _ecore_status_t
1677 ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
1678                        struct ecore_filter_ucast *p_filter_cmd,
1679                        enum spq_mode comp_mode,
1680                        struct ecore_spq_comp_cb *p_comp_data)
1681 {
1682         enum _ecore_status_t rc = ECORE_SUCCESS;
1683         int i;
1684
1685         for_each_hwfn(p_dev, i) {
1686                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1687                 u16 opaque_fid;
1688
1689                 if (IS_VF(p_dev)) {
1690                         rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1691                         continue;
1692                 }
1693
1694                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1695                 rc = ecore_sp_eth_filter_ucast(p_hwfn,
1696                                                opaque_fid,
1697                                                p_filter_cmd,
1698                                                comp_mode, p_comp_data);
1699                 if (rc != ECORE_SUCCESS)
1700                         break;
1701         }
1702
1703         return rc;
1704 }
1705
1706 /* Statistics related code */
1707 static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
1708                                              u32 *p_addr, u32 *p_len,
1709                                              u16 statistics_bin)
1710 {
1711         if (IS_PF(p_hwfn->p_dev)) {
1712                 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1713                     PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1714                 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1715         } else {
1716                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1717                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1718
1719                 *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1720                 *p_len = p_resp->pfdev_info.stats_info.pstats.len;
1721         }
1722 }
1723
1724 static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
1725                                      struct ecore_ptt *p_ptt,
1726                                      struct ecore_eth_stats *p_stats,
1727                                      u16 statistics_bin)
1728 {
1729         struct eth_pstorm_per_queue_stat pstats;
1730         u32 pstats_addr = 0, pstats_len = 0;
1731
1732         __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1733                                          statistics_bin);
1734
1735         OSAL_MEMSET(&pstats, 0, sizeof(pstats));
1736         ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1737
1738         p_stats->common.tx_ucast_bytes +=
1739                 HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1740         p_stats->common.tx_mcast_bytes +=
1741                 HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1742         p_stats->common.tx_bcast_bytes +=
1743                 HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1744         p_stats->common.tx_ucast_pkts +=
1745                 HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1746         p_stats->common.tx_mcast_pkts +=
1747                 HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1748         p_stats->common.tx_bcast_pkts +=
1749                 HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1750         p_stats->common.tx_err_drop_pkts +=
1751                 HILO_64_REGPAIR(pstats.error_drop_pkts);
1752 }
1753
1754 static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
1755                                      struct ecore_ptt *p_ptt,
1756                                      struct ecore_eth_stats *p_stats)
1757 {
1758         struct tstorm_per_port_stat tstats;
1759         u32 tstats_addr, tstats_len;
1760
1761         if (IS_PF(p_hwfn->p_dev)) {
1762                 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1763                     TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1764                 tstats_len = sizeof(struct tstorm_per_port_stat);
1765         } else {
1766                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1767                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1768
1769                 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1770                 tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1771         }
1772
1773         OSAL_MEMSET(&tstats, 0, sizeof(tstats));
1774         ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1775
1776         p_stats->common.mftag_filter_discards +=
1777                 HILO_64_REGPAIR(tstats.mftag_filter_discard);
1778         p_stats->common.mac_filter_discards +=
1779                 HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1780 }
1781
1782 static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
1783                                              u32 *p_addr, u32 *p_len,
1784                                              u16 statistics_bin)
1785 {
1786         if (IS_PF(p_hwfn->p_dev)) {
1787                 *p_addr = BAR0_MAP_REG_USDM_RAM +
1788                     USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1789                 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1790         } else {
1791                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1792                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1793
1794                 *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1795                 *p_len = p_resp->pfdev_info.stats_info.ustats.len;
1796         }
1797 }
1798
1799 static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
1800                                      struct ecore_ptt *p_ptt,
1801                                      struct ecore_eth_stats *p_stats,
1802                                      u16 statistics_bin)
1803 {
1804         struct eth_ustorm_per_queue_stat ustats;
1805         u32 ustats_addr = 0, ustats_len = 0;
1806
1807         __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1808                                          statistics_bin);
1809
1810         OSAL_MEMSET(&ustats, 0, sizeof(ustats));
1811         ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1812
1813         p_stats->common.rx_ucast_bytes +=
1814                 HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1815         p_stats->common.rx_mcast_bytes +=
1816                 HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1817         p_stats->common.rx_bcast_bytes +=
1818                 HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1819         p_stats->common.rx_ucast_pkts +=
1820                 HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1821         p_stats->common.rx_mcast_pkts +=
1822                 HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1823         p_stats->common.rx_bcast_pkts +=
1824                 HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1825 }
1826
1827 static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
1828                                              u32 *p_addr, u32 *p_len,
1829                                              u16 statistics_bin)
1830 {
1831         if (IS_PF(p_hwfn->p_dev)) {
1832                 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1833                     MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1834                 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1835         } else {
1836                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1837                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1838
1839                 *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1840                 *p_len = p_resp->pfdev_info.stats_info.mstats.len;
1841         }
1842 }
1843
1844 static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
1845                                      struct ecore_ptt *p_ptt,
1846                                      struct ecore_eth_stats *p_stats,
1847                                      u16 statistics_bin)
1848 {
1849         struct eth_mstorm_per_queue_stat mstats;
1850         u32 mstats_addr = 0, mstats_len = 0;
1851
1852         __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1853                                          statistics_bin);
1854
1855         OSAL_MEMSET(&mstats, 0, sizeof(mstats));
1856         ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1857
1858         p_stats->common.no_buff_discards +=
1859                 HILO_64_REGPAIR(mstats.no_buff_discard);
1860         p_stats->common.packet_too_big_discard +=
1861                 HILO_64_REGPAIR(mstats.packet_too_big_discard);
1862         p_stats->common.ttl0_discard +=
1863                 HILO_64_REGPAIR(mstats.ttl0_discard);
1864         p_stats->common.tpa_coalesced_pkts +=
1865                 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1866         p_stats->common.tpa_coalesced_events +=
1867                 HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1868         p_stats->common.tpa_aborts_num +=
1869                 HILO_64_REGPAIR(mstats.tpa_aborts_num);
1870         p_stats->common.tpa_coalesced_bytes +=
1871                 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1872 }
1873
1874 static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
1875                                          struct ecore_ptt *p_ptt,
1876                                          struct ecore_eth_stats *p_stats)
1877 {
1878         struct ecore_eth_stats_common *p_common = &p_stats->common;
1879         struct port_stats port_stats;
1880         int j;
1881
1882         OSAL_MEMSET(&port_stats, 0, sizeof(port_stats));
1883
1884         ecore_memcpy_from(p_hwfn, p_ptt, &port_stats,
1885                           p_hwfn->mcp_info->port_addr +
1886                           OFFSETOF(struct public_port, stats),
1887                           sizeof(port_stats));
1888
1889         p_common->rx_64_byte_packets += port_stats.eth.r64;
1890         p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1891         p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1892         p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1893         p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1894         p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1895         p_common->rx_crc_errors += port_stats.eth.rfcs;
1896         p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1897         p_common->rx_pause_frames += port_stats.eth.rxpf;
1898         p_common->rx_pfc_frames += port_stats.eth.rxpp;
1899         p_common->rx_align_errors += port_stats.eth.raln;
1900         p_common->rx_carrier_errors += port_stats.eth.rfcr;
1901         p_common->rx_oversize_packets += port_stats.eth.rovr;
1902         p_common->rx_jabbers += port_stats.eth.rjbr;
1903         p_common->rx_undersize_packets += port_stats.eth.rund;
1904         p_common->rx_fragments += port_stats.eth.rfrg;
1905         p_common->tx_64_byte_packets += port_stats.eth.t64;
1906         p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1907         p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1908         p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1909         p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1910         p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1911         p_common->tx_pause_frames += port_stats.eth.txpf;
1912         p_common->tx_pfc_frames += port_stats.eth.txpp;
1913         p_common->rx_mac_bytes += port_stats.eth.rbyte;
1914         p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1915         p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1916         p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1917         p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1918         p_common->tx_mac_bytes += port_stats.eth.tbyte;
1919         p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1920         p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1921         p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1922         p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
1923         for (j = 0; j < 8; j++) {
1924                 p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1925                 p_common->brb_discards += port_stats.brb.brb_discard[j];
1926         }
1927
1928         if (ECORE_IS_BB(p_hwfn->p_dev)) {
1929                 struct ecore_eth_stats_bb *p_bb = &p_stats->bb;
1930
1931                 p_bb->rx_1519_to_1522_byte_packets +=
1932                         port_stats.eth.u0.bb0.r1522;
1933                 p_bb->rx_1519_to_2047_byte_packets +=
1934                         port_stats.eth.u0.bb0.r2047;
1935                 p_bb->rx_2048_to_4095_byte_packets +=
1936                         port_stats.eth.u0.bb0.r4095;
1937                 p_bb->rx_4096_to_9216_byte_packets +=
1938                         port_stats.eth.u0.bb0.r9216;
1939                 p_bb->rx_9217_to_16383_byte_packets +=
1940                         port_stats.eth.u0.bb0.r16383;
1941                 p_bb->tx_1519_to_2047_byte_packets +=
1942                         port_stats.eth.u1.bb1.t2047;
1943                 p_bb->tx_2048_to_4095_byte_packets +=
1944                         port_stats.eth.u1.bb1.t4095;
1945                 p_bb->tx_4096_to_9216_byte_packets +=
1946                         port_stats.eth.u1.bb1.t9216;
1947                 p_bb->tx_9217_to_16383_byte_packets +=
1948                         port_stats.eth.u1.bb1.t16383;
1949                 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
1950                 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
1951         } else {
1952                 struct ecore_eth_stats_ah *p_ah = &p_stats->ah;
1953
1954                 p_ah->rx_1519_to_max_byte_packets +=
1955                         port_stats.eth.u0.ah0.r1519_to_max;
1956                 p_ah->tx_1519_to_max_byte_packets =
1957                         port_stats.eth.u1.ah1.t1519_to_max;
1958         }
1959
1960         p_common->link_change_count = ecore_rd(p_hwfn, p_ptt,
1961                                                p_hwfn->mcp_info->port_addr +
1962                                                OFFSETOF(struct public_port,
1963                                                         link_change_count));
1964 }
1965
1966 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
1967                              struct ecore_ptt *p_ptt,
1968                              struct ecore_eth_stats *stats,
1969                              u16 statistics_bin, bool b_get_port_stats)
1970 {
1971         __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1972         __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1973         __ecore_get_vport_tstats(p_hwfn, p_ptt, stats);
1974         __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1975
1976 #ifndef ASIC_ONLY
1977         /* Avoid getting PORT stats for emulation. */
1978         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1979                 return;
1980 #endif
1981
1982         if (b_get_port_stats && p_hwfn->mcp_info)
1983                 __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats);
1984 }
1985
1986 static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
1987                                    struct ecore_eth_stats *stats)
1988 {
1989         u8 fw_vport = 0;
1990         int i;
1991
1992         OSAL_MEMSET(stats, 0, sizeof(*stats));
1993
1994         for_each_hwfn(p_dev, i) {
1995                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1996                 struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
1997                     ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
1998                 bool b_get_port_stats;
1999
2000                 if (IS_PF(p_dev)) {
2001                         /* The main vport index is relative first */
2002                         if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {
2003                                 DP_ERR(p_hwfn, "No vport available!\n");
2004                                 goto out;
2005                         }
2006                 }
2007
2008                 if (IS_PF(p_dev) && !p_ptt) {
2009                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2010                         continue;
2011                 }
2012
2013                 b_get_port_stats = IS_PF(p_dev) && IS_LEAD_HWFN(p_hwfn);
2014                 __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
2015                                         b_get_port_stats);
2016
2017 out:
2018                 if (IS_PF(p_dev) && p_ptt)
2019                         ecore_ptt_release(p_hwfn, p_ptt);
2020         }
2021 }
2022
2023 void ecore_get_vport_stats(struct ecore_dev *p_dev,
2024                            struct ecore_eth_stats *stats)
2025 {
2026         u32 i;
2027
2028         if (!p_dev) {
2029                 OSAL_MEMSET(stats, 0, sizeof(*stats));
2030                 return;
2031         }
2032
2033         _ecore_get_vport_stats(p_dev, stats);
2034
2035         if (!p_dev->reset_stats)
2036                 return;
2037
2038         /* Reduce the statistics baseline */
2039         for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++)
2040                 ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i];
2041 }
2042
2043 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
2044 void ecore_reset_vport_stats(struct ecore_dev *p_dev)
2045 {
2046         int i;
2047
2048         for_each_hwfn(p_dev, i) {
2049                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
2050                 struct eth_mstorm_per_queue_stat mstats;
2051                 struct eth_ustorm_per_queue_stat ustats;
2052                 struct eth_pstorm_per_queue_stat pstats;
2053                 struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
2054                     ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
2055                 u32 addr = 0, len = 0;
2056
2057                 if (IS_PF(p_dev) && !p_ptt) {
2058                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2059                         continue;
2060                 }
2061
2062                 OSAL_MEMSET(&mstats, 0, sizeof(mstats));
2063                 __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
2064                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
2065
2066                 OSAL_MEMSET(&ustats, 0, sizeof(ustats));
2067                 __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
2068                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
2069
2070                 OSAL_MEMSET(&pstats, 0, sizeof(pstats));
2071                 __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
2072                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
2073
2074                 if (IS_PF(p_dev))
2075                         ecore_ptt_release(p_hwfn, p_ptt);
2076         }
2077
2078         /* PORT statistics are not necessarily reset, so we need to
2079          * read and create a baseline for future statistics.
2080          * Link change stat is maintained by MFW, return its value as is.
2081          */
2082         if (!p_dev->reset_stats)
2083                 DP_INFO(p_dev, "Reset stats not allocated\n");
2084         else {
2085                 _ecore_get_vport_stats(p_dev, p_dev->reset_stats);
2086                 p_dev->reset_stats->common.link_change_count = 0;
2087         }
2088 }
2089
2090 static enum gft_profile_type
2091 ecore_arfs_mode_to_hsi(enum ecore_filter_config_mode mode)
2092 {
2093         if (mode == ECORE_FILTER_CONFIG_MODE_5_TUPLE)
2094                 return GFT_PROFILE_TYPE_4_TUPLE;
2095
2096         if (mode == ECORE_FILTER_CONFIG_MODE_IP_DEST)
2097                 return GFT_PROFILE_TYPE_IP_DST_ADDR;
2098
2099         if (mode == ECORE_FILTER_CONFIG_MODE_TUNN_TYPE)
2100                 return GFT_PROFILE_TYPE_TUNNEL_TYPE;
2101
2102         if (mode == ECORE_FILTER_CONFIG_MODE_IP_SRC)
2103                 return GFT_PROFILE_TYPE_IP_SRC_ADDR;
2104
2105         return GFT_PROFILE_TYPE_L4_DST_PORT;
2106 }
2107
2108 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
2109                                struct ecore_ptt *p_ptt,
2110                                struct ecore_arfs_config_params *p_cfg_params)
2111 {
2112         if (OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &p_hwfn->p_dev->mf_bits))
2113                 return;
2114
2115         if (p_cfg_params->mode != ECORE_FILTER_CONFIG_MODE_DISABLE) {
2116                 ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
2117                                  p_cfg_params->tcp,
2118                                  p_cfg_params->udp,
2119                                  p_cfg_params->ipv4,
2120                                  p_cfg_params->ipv6,
2121                                  ecore_arfs_mode_to_hsi(p_cfg_params->mode));
2122                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2123                            "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
2124                            p_cfg_params->tcp ? "Enable" : "Disable",
2125                            p_cfg_params->udp ? "Enable" : "Disable",
2126                            p_cfg_params->ipv4 ? "Enable" : "Disable",
2127                            p_cfg_params->ipv6 ? "Enable" : "Disable");
2128         } else {
2129                 ecore_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2130         }
2131         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %d\n",
2132                    (int)p_cfg_params->mode);
2133 }
2134
2135 enum _ecore_status_t
2136 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
2137                                   struct ecore_spq_comp_cb *p_cb,
2138                                   dma_addr_t p_addr, u16 length,
2139                                   u16 qid, u8 vport_id,
2140                                   bool b_is_add)
2141 {
2142         struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL;
2143         struct ecore_spq_entry *p_ent = OSAL_NULL;
2144         struct ecore_sp_init_data init_data;
2145         u16 abs_rx_q_id = 0;
2146         u8 abs_vport_id = 0;
2147         enum _ecore_status_t rc = ECORE_NOTIMPL;
2148
2149         rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
2150         if (rc != ECORE_SUCCESS)
2151                 return rc;
2152
2153         rc = ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
2154         if (rc != ECORE_SUCCESS)
2155                 return rc;
2156
2157         /* Get SPQ entry */
2158         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2159         init_data.cid = ecore_spq_get_cid(p_hwfn);
2160
2161         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2162
2163         if (p_cb) {
2164                 init_data.comp_mode = ECORE_SPQ_MODE_CB;
2165                 init_data.p_comp_data = p_cb;
2166         } else {
2167                 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2168         }
2169
2170         rc = ecore_sp_init_request(p_hwfn, &p_ent,
2171                                    ETH_RAMROD_GFT_UPDATE_FILTER,
2172                                    PROTOCOLID_ETH, &init_data);
2173         if (rc != ECORE_SUCCESS)
2174                 return rc;
2175
2176         p_ramrod = &p_ent->ramrod.rx_update_gft;
2177
2178         DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
2179         p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length);
2180
2181         p_ramrod->action_icid_valid = 0;
2182         p_ramrod->action_icid = 0;
2183
2184         p_ramrod->rx_qid_valid = 1;
2185         p_ramrod->rx_qid = OSAL_CPU_TO_LE16(abs_rx_q_id);
2186
2187         p_ramrod->flow_id_valid = 0;
2188         p_ramrod->flow_id = 0;
2189
2190         p_ramrod->vport_id = OSAL_CPU_TO_LE16((u16)abs_vport_id);
2191         p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
2192                                            : GFT_DELETE_FILTER;
2193
2194         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2195                    "V[%0x], Q[%04x] - %s filter from 0x%lx [length %04xb]\n",
2196                    abs_vport_id, abs_rx_q_id,
2197                    b_is_add ? "Adding" : "Removing",
2198                    (unsigned long)p_addr, length);
2199
2200         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
2201 }
2202
2203 int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
2204                            struct ecore_ptt *p_ptt,
2205                            struct ecore_queue_cid *p_cid,
2206                            u16 *p_rx_coal)
2207 {
2208         u32 coalesce, address, is_valid;
2209         struct cau_sb_entry sb_entry;
2210         u8 timer_res;
2211         enum _ecore_status_t rc;
2212
2213         rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2214                                  p_cid->sb_igu_id * sizeof(u64),
2215                                  (u64)(osal_uintptr_t)&sb_entry, 2, 0);
2216         if (rc != ECORE_SUCCESS) {
2217                 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2218                 return rc;
2219         }
2220
2221         timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
2222
2223         address = BAR0_MAP_REG_USDM_RAM +
2224                   USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2225         coalesce = ecore_rd(p_hwfn, p_ptt, address);
2226
2227         is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2228         if (!is_valid)
2229                 return ECORE_INVAL;
2230
2231         coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2232         *p_rx_coal = (u16)(coalesce << timer_res);
2233
2234         return ECORE_SUCCESS;
2235 }
2236
2237 int ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
2238                            struct ecore_ptt *p_ptt,
2239                            struct ecore_queue_cid *p_cid,
2240                            u16 *p_tx_coal)
2241 {
2242         u32 coalesce, address, is_valid;
2243         struct cau_sb_entry sb_entry;
2244         u8 timer_res;
2245         enum _ecore_status_t rc;
2246
2247         rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2248                                  p_cid->sb_igu_id * sizeof(u64),
2249                                  (u64)(osal_uintptr_t)&sb_entry, 2, 0);
2250         if (rc != ECORE_SUCCESS) {
2251                 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2252                 return rc;
2253         }
2254
2255         timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
2256
2257         address = BAR0_MAP_REG_XSDM_RAM +
2258                   XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2259         coalesce = ecore_rd(p_hwfn, p_ptt, address);
2260
2261         is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2262         if (!is_valid)
2263                 return ECORE_INVAL;
2264
2265         coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2266         *p_tx_coal = (u16)(coalesce << timer_res);
2267
2268         return ECORE_SUCCESS;
2269 }
2270
2271 enum _ecore_status_t
2272 ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *p_coal,
2273                          void *handle)
2274 {
2275         struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)handle;
2276         enum _ecore_status_t rc = ECORE_SUCCESS;
2277         struct ecore_ptt *p_ptt;
2278
2279         if (IS_VF(p_hwfn->p_dev)) {
2280                 rc = ecore_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
2281                 if (rc != ECORE_SUCCESS)
2282                         DP_NOTICE(p_hwfn, false,
2283                                   "Unable to read queue calescing\n");
2284
2285                 return rc;
2286         }
2287
2288         p_ptt = ecore_ptt_acquire(p_hwfn);
2289         if (!p_ptt)
2290                 return ECORE_AGAIN;
2291
2292         if (p_cid->b_is_rx) {
2293                 rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2294                 if (rc != ECORE_SUCCESS)
2295                         goto out;
2296         } else {
2297                 rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2298                 if (rc != ECORE_SUCCESS)
2299                         goto out;
2300         }
2301
2302 out:
2303         ecore_ptt_release(p_hwfn, p_ptt);
2304
2305         return rc;
2306 }
2307
2308 enum _ecore_status_t
2309 ecore_eth_tx_queue_maxrate(struct ecore_hwfn *p_hwfn,
2310                            struct ecore_ptt *p_ptt,
2311                            struct ecore_queue_cid *p_cid, u32 rate)
2312 {
2313         struct ecore_mcp_link_state *p_link;
2314         u8 vport;
2315
2316         vport = (u8)ecore_get_qm_vport_idx_rl(p_hwfn, p_cid->rel.queue_id);
2317         p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
2318
2319         DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
2320                    "About to rate limit qm vport %d for queue %d with rate %d\n",
2321                    vport, p_cid->rel.queue_id, rate);
2322
2323         return ecore_init_vport_rl(p_hwfn, p_ptt, vport, rate,
2324                                    p_link->speed);
2325 }