703c1e81f0f239643621711909a3282076b5d6b0
[dpdk.git] / drivers / net / qede / base / ecore_sriov.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "reg_addr.h"
12 #include "ecore_sriov.h"
13 #include "ecore_status.h"
14 #include "ecore_hw.h"
15 #include "ecore_hw_defs.h"
16 #include "ecore_int.h"
17 #include "ecore_hsi_eth.h"
18 #include "ecore_l2.h"
19 #include "ecore_vfpf_if.h"
20 #include "ecore_rt_defs.h"
21 #include "ecore_init_ops.h"
22 #include "ecore_gtt_reg_addr.h"
23 #include "ecore_iro.h"
24 #include "ecore_mcp.h"
25 #include "ecore_cxt.h"
26 #include "ecore_vf.h"
27 #include "ecore_init_fw_funcs.h"
28 #include "ecore_sp_commands.h"
29
30 const char *ecore_channel_tlvs_string[] = {
31         "CHANNEL_TLV_NONE",     /* ends tlv sequence */
32         "CHANNEL_TLV_ACQUIRE",
33         "CHANNEL_TLV_VPORT_START",
34         "CHANNEL_TLV_VPORT_UPDATE",
35         "CHANNEL_TLV_VPORT_TEARDOWN",
36         "CHANNEL_TLV_START_RXQ",
37         "CHANNEL_TLV_START_TXQ",
38         "CHANNEL_TLV_STOP_RXQ",
39         "CHANNEL_TLV_STOP_TXQ",
40         "CHANNEL_TLV_UPDATE_RXQ",
41         "CHANNEL_TLV_INT_CLEANUP",
42         "CHANNEL_TLV_CLOSE",
43         "CHANNEL_TLV_RELEASE",
44         "CHANNEL_TLV_LIST_END",
45         "CHANNEL_TLV_UCAST_FILTER",
46         "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
47         "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
48         "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
49         "CHANNEL_TLV_VPORT_UPDATE_MCAST",
50         "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
51         "CHANNEL_TLV_VPORT_UPDATE_RSS",
52         "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
53         "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
54         "CHANNEL_TLV_UPDATE_TUNN_PARAM",
55         "CHANNEL_TLV_MAX"
56 };
57
58 /* IOV ramrods */
59 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
60                                               struct ecore_vf_info *p_vf)
61 {
62         struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
63         struct ecore_spq_entry *p_ent = OSAL_NULL;
64         struct ecore_sp_init_data init_data;
65         enum _ecore_status_t rc = ECORE_NOTIMPL;
66         u8 fp_minor;
67
68         /* Get SPQ entry */
69         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
70         init_data.cid = ecore_spq_get_cid(p_hwfn);
71         init_data.opaque_fid = p_vf->opaque_fid;
72         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
73
74         rc = ecore_sp_init_request(p_hwfn, &p_ent,
75                                    COMMON_RAMROD_VF_START,
76                                    PROTOCOLID_COMMON, &init_data);
77         if (rc != ECORE_SUCCESS)
78                 return rc;
79
80         p_ramrod = &p_ent->ramrod.vf_start;
81
82         p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
83         p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
84
85         switch (p_hwfn->hw_info.personality) {
86         case ECORE_PCI_ETH:
87                 p_ramrod->personality = PERSONALITY_ETH;
88                 break;
89         case ECORE_PCI_ETH_ROCE:
90         case ECORE_PCI_ETH_IWARP:
91                 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
92                 break;
93         default:
94                 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
95                           p_hwfn->hw_info.personality);
96                 return ECORE_INVAL;
97         }
98
99         fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
100         if (fp_minor > ETH_HSI_VER_MINOR &&
101             fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
102                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
103                            "VF [%d] - Requested fp hsi %02x.%02x which is"
104                            " slightly newer than PF's %02x.%02x; Configuring"
105                            " PFs version\n",
106                            p_vf->abs_vf_id,
107                            ETH_HSI_VER_MAJOR, fp_minor,
108                            ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
109                 fp_minor = ETH_HSI_VER_MINOR;
110         }
111
112         p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
113         p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
114
115         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
116                    "VF[%d] - Starting using HSI %02x.%02x\n",
117                    p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
118
119         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
120 }
121
122 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
123                                              u32 concrete_vfid,
124                                              u16 opaque_vfid)
125 {
126         struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
127         struct ecore_spq_entry *p_ent = OSAL_NULL;
128         struct ecore_sp_init_data init_data;
129         enum _ecore_status_t rc = ECORE_NOTIMPL;
130
131         /* Get SPQ entry */
132         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
133         init_data.cid = ecore_spq_get_cid(p_hwfn);
134         init_data.opaque_fid = opaque_vfid;
135         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
136
137         rc = ecore_sp_init_request(p_hwfn, &p_ent,
138                                    COMMON_RAMROD_VF_STOP,
139                                    PROTOCOLID_COMMON, &init_data);
140         if (rc != ECORE_SUCCESS)
141                 return rc;
142
143         p_ramrod = &p_ent->ramrod.vf_stop;
144
145         p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
146
147         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
148 }
149
150 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
151                              bool b_enabled_only, bool b_non_malicious)
152 {
153         if (!p_hwfn->pf_iov_info) {
154                 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
155                 return false;
156         }
157
158         if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
159             (rel_vf_id < 0))
160                 return false;
161
162         if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
163             b_enabled_only)
164                 return false;
165
166         if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
167             b_non_malicious)
168                 return false;
169
170         return true;
171 }
172
173 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
174                                             u16 relative_vf_id,
175                                             bool b_enabled_only)
176 {
177         struct ecore_vf_info *vf = OSAL_NULL;
178
179         if (!p_hwfn->pf_iov_info) {
180                 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
181                 return OSAL_NULL;
182         }
183
184         if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
185                                     b_enabled_only, false))
186                 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
187         else
188                 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
189                        relative_vf_id);
190
191         return vf;
192 }
193
194 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
195                                    struct ecore_vf_info *p_vf,
196                                    u16 rx_qid)
197 {
198         if (rx_qid >= p_vf->num_rxqs)
199                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
200                            "VF[0x%02x] - can't touch Rx queue[%04x];"
201                            " Only 0x%04x are allocated\n",
202                            p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
203         return rx_qid < p_vf->num_rxqs;
204 }
205
206 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
207                                    struct ecore_vf_info *p_vf,
208                                    u16 tx_qid)
209 {
210         if (tx_qid >= p_vf->num_txqs)
211                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
212                            "VF[0x%02x] - can't touch Tx queue[%04x];"
213                            " Only 0x%04x are allocated\n",
214                            p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
215         return tx_qid < p_vf->num_txqs;
216 }
217
218 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
219                                   struct ecore_vf_info *p_vf,
220                                   u16 sb_idx)
221 {
222         int i;
223
224         for (i = 0; i < p_vf->num_sbs; i++)
225                 if (p_vf->igu_sbs[i] == sb_idx)
226                         return true;
227
228         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
229                    "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
230                    " one of its 0x%02x SBs\n",
231                    p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
232
233         return false;
234 }
235
236 static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
237                                           struct ecore_vf_info *p_vf)
238 {
239         u8 i;
240
241         for (i = 0; i < p_vf->num_rxqs; i++)
242                 if (p_vf->vf_queues[i].p_rx_cid)
243                         return true;
244
245         return false;
246 }
247
248 static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
249                                           struct ecore_vf_info *p_vf)
250 {
251         u8 i;
252
253         for (i = 0; i < p_vf->num_rxqs; i++)
254                 if (p_vf->vf_queues[i].p_tx_cid)
255                         return true;
256
257         return false;
258 }
259
260 /* TODO - this is linux crc32; Need a way to ifdef it out for linux */
261 u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
262 {
263         int i;
264
265         while (length--) {
266                 crc ^= *ptr++;
267                 for (i = 0; i < 8; i++)
268                         crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
269         }
270         return crc;
271 }
272
273 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
274                                                 int vfid,
275                                                 struct ecore_ptt *p_ptt)
276 {
277         struct ecore_bulletin_content *p_bulletin;
278         int crc_size = sizeof(p_bulletin->crc);
279         struct ecore_dmae_params params;
280         struct ecore_vf_info *p_vf;
281
282         p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
283         if (!p_vf)
284                 return ECORE_INVAL;
285
286         /* TODO - check VF is in a state where it can accept message */
287         if (!p_vf->vf_bulletin)
288                 return ECORE_INVAL;
289
290         p_bulletin = p_vf->bulletin.p_virt;
291
292         /* Increment bulletin board version and compute crc */
293         p_bulletin->version++;
294         p_bulletin->crc = ecore_crc32(0, (u8 *)p_bulletin + crc_size,
295                                       p_vf->bulletin.size - crc_size);
296
297         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
298                    "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
299                    p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
300
301         /* propagate bulletin board via dmae to vm memory */
302         OSAL_MEMSET(&params, 0, sizeof(params));
303         params.flags = ECORE_DMAE_FLAG_VF_DST;
304         params.dst_vfid = p_vf->abs_vf_id;
305         return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
306                                     p_vf->vf_bulletin, p_vf->bulletin.size / 4,
307                                     &params);
308 }
309
310 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
311 {
312         struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
313         int pos = iov->pos;
314
315         DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
316         OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
317
318         OSAL_PCI_READ_CONFIG_WORD(p_dev,
319                                   pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
320         OSAL_PCI_READ_CONFIG_WORD(p_dev,
321                                   pos + PCI_SRIOV_INITIAL_VF,
322                                   &iov->initial_vfs);
323
324         OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
325         if (iov->num_vfs) {
326                 /* @@@TODO - in future we might want to add an OSAL here to
327                  * allow each OS to decide on its own how to act.
328                  */
329                 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
330                            "Number of VFs are already set to non-zero value."
331                            " Ignoring PCI configuration value\n");
332                 iov->num_vfs = 0;
333         }
334
335         OSAL_PCI_READ_CONFIG_WORD(p_dev,
336                                   pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
337
338         OSAL_PCI_READ_CONFIG_WORD(p_dev,
339                                   pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
340
341         OSAL_PCI_READ_CONFIG_WORD(p_dev,
342                                   pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
343
344         OSAL_PCI_READ_CONFIG_DWORD(p_dev,
345                                    pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
346
347         OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
348
349         OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
350
351         DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
352                    "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
353                    " stride %d, page size 0x%x\n",
354                    iov->nres, iov->cap, iov->ctrl,
355                    iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
356                    iov->offset, iov->stride, iov->pgsz);
357
358         /* Some sanity checks */
359         if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
360             iov->total_vfs > NUM_OF_VFS(p_dev)) {
361                 /* This can happen only due to a bug. In this case we set
362                  * num_vfs to zero to avoid memory corruption in the code that
363                  * assumes max number of vfs
364                  */
365                 DP_NOTICE(p_dev, false,
366                           "IOV: Unexpected number of vfs set: %d"
367                           " setting num_vf to zero\n",
368                           iov->num_vfs);
369
370                 iov->num_vfs = 0;
371                 iov->total_vfs = 0;
372         }
373
374         return ECORE_SUCCESS;
375 }
376
377 static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
378                                           struct ecore_ptt *p_ptt)
379 {
380         struct ecore_igu_block *p_sb;
381         u16 sb_id;
382         u32 val;
383
384         if (!p_hwfn->hw_info.p_igu_info) {
385                 DP_ERR(p_hwfn,
386                        "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
387                 return;
388         }
389
390         for (sb_id = 0;
391              sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {
392                 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
393                 if ((p_sb->status & ECORE_IGU_STATUS_FREE) &&
394                     !(p_sb->status & ECORE_IGU_STATUS_PF)) {
395                         val = ecore_rd(p_hwfn, p_ptt,
396                                        IGU_REG_MAPPING_MEMORY + sb_id * 4);
397                         SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
398                         ecore_wr(p_hwfn, p_ptt,
399                                  IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
400                 }
401         }
402 }
403
404 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
405 {
406         struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
407         struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
408         struct ecore_bulletin_content *p_bulletin_virt;
409         dma_addr_t req_p, rply_p, bulletin_p;
410         union pfvf_tlvs *p_reply_virt_addr;
411         union vfpf_tlvs *p_req_virt_addr;
412         u8 idx = 0;
413
414         OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
415
416         p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
417         req_p = p_iov_info->mbx_msg_phys_addr;
418         p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
419         rply_p = p_iov_info->mbx_reply_phys_addr;
420         p_bulletin_virt = p_iov_info->p_bulletins;
421         bulletin_p = p_iov_info->bulletins_phys;
422         if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
423                 DP_ERR(p_hwfn,
424                        "ecore_iov_setup_vfdb called without alloc mem first\n");
425                 return;
426         }
427
428         p_iov_info->base_vport_id = 1;  /* @@@TBD resource allocation */
429
430         for (idx = 0; idx < p_iov->total_vfs; idx++) {
431                 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
432                 u32 concrete;
433
434                 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
435                 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
436                 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
437                 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
438
439 #ifdef CONFIG_ECORE_SW_CHANNEL
440                 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
441                 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
442 #endif
443                 vf->state = VF_STOPPED;
444                 vf->b_init = false;
445
446                 vf->bulletin.phys = idx *
447                     sizeof(struct ecore_bulletin_content) + bulletin_p;
448                 vf->bulletin.p_virt = p_bulletin_virt + idx;
449                 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
450
451                 vf->relative_vf_id = idx;
452                 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
453                 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
454                 vf->concrete_fid = concrete;
455                 /* TODO - need to devise a better way of getting opaque */
456                 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
457                     (vf->abs_vf_id << 8);
458                 /* @@TBD MichalK - add base vport_id of VFs to equation */
459                 vf->vport_id = p_iov_info->base_vport_id + idx;
460
461                 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
462                 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
463         }
464 }
465
466 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
467 {
468         struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
469         void **p_v_addr;
470         u16 num_vfs = 0;
471
472         num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
473
474         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
475                    "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
476
477         /* Allocate PF Mailbox buffer (per-VF) */
478         p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
479         p_v_addr = &p_iov_info->mbx_msg_virt_addr;
480         *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
481                                             &p_iov_info->mbx_msg_phys_addr,
482                                             p_iov_info->mbx_msg_size);
483         if (!*p_v_addr)
484                 return ECORE_NOMEM;
485
486         /* Allocate PF Mailbox Reply buffer (per-VF) */
487         p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
488         p_v_addr = &p_iov_info->mbx_reply_virt_addr;
489         *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
490                                             &p_iov_info->mbx_reply_phys_addr,
491                                             p_iov_info->mbx_reply_size);
492         if (!*p_v_addr)
493                 return ECORE_NOMEM;
494
495         p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
496             num_vfs;
497         p_v_addr = &p_iov_info->p_bulletins;
498         *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
499                                             &p_iov_info->bulletins_phys,
500                                             p_iov_info->bulletins_size);
501         if (!*p_v_addr)
502                 return ECORE_NOMEM;
503
504         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
505                    "PF's Requests mailbox [%p virt 0x%lx phys],  "
506                    "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
507                    " [%p virt 0x%lx phys]\n",
508                    p_iov_info->mbx_msg_virt_addr,
509                    (unsigned long)p_iov_info->mbx_msg_phys_addr,
510                    p_iov_info->mbx_reply_virt_addr,
511                    (unsigned long)p_iov_info->mbx_reply_phys_addr,
512                    p_iov_info->p_bulletins,
513                    (unsigned long)p_iov_info->bulletins_phys);
514
515         return ECORE_SUCCESS;
516 }
517
518 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
519 {
520         struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
521
522         if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
523                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
524                                        p_iov_info->mbx_msg_virt_addr,
525                                        p_iov_info->mbx_msg_phys_addr,
526                                        p_iov_info->mbx_msg_size);
527
528         if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
529                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
530                                        p_iov_info->mbx_reply_virt_addr,
531                                        p_iov_info->mbx_reply_phys_addr,
532                                        p_iov_info->mbx_reply_size);
533
534         if (p_iov_info->p_bulletins)
535                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
536                                        p_iov_info->p_bulletins,
537                                        p_iov_info->bulletins_phys,
538                                        p_iov_info->bulletins_size);
539 }
540
541 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
542 {
543         struct ecore_pf_iov *p_sriov;
544
545         if (!IS_PF_SRIOV(p_hwfn)) {
546                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
547                            "No SR-IOV - no need for IOV db\n");
548                 return ECORE_SUCCESS;
549         }
550
551         p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
552         if (!p_sriov) {
553                 DP_NOTICE(p_hwfn, true,
554                           "Failed to allocate `struct ecore_sriov'\n");
555                 return ECORE_NOMEM;
556         }
557
558         p_hwfn->pf_iov_info = p_sriov;
559
560         return ecore_iov_allocate_vfdb(p_hwfn);
561 }
562
563 void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
564 {
565         if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
566                 return;
567
568         ecore_iov_setup_vfdb(p_hwfn);
569         ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
570 }
571
572 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
573 {
574         if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
575                 ecore_iov_free_vfdb(p_hwfn);
576                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
577         }
578 }
579
580 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
581 {
582         OSAL_FREE(p_dev, p_dev->p_iov_info);
583 }
584
585 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
586 {
587         struct ecore_dev *p_dev = p_hwfn->p_dev;
588         int pos;
589         enum _ecore_status_t rc;
590
591         if (IS_VF(p_hwfn->p_dev))
592                 return ECORE_SUCCESS;
593
594         /* Learn the PCI configuration */
595         pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
596                                            PCI_EXT_CAP_ID_SRIOV);
597         if (!pos) {
598                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
599                 return ECORE_SUCCESS;
600         }
601
602         /* Allocate a new struct for IOV information */
603         /* TODO - can change to VALLOC when its available */
604         p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
605                                         sizeof(*p_dev->p_iov_info));
606         if (!p_dev->p_iov_info) {
607                 DP_NOTICE(p_hwfn, true,
608                           "Can't support IOV due to lack of memory\n");
609                 return ECORE_NOMEM;
610         }
611         p_dev->p_iov_info->pos = pos;
612
613         rc = ecore_iov_pci_cfg_info(p_dev);
614         if (rc)
615                 return rc;
616
617         /* We want PF IOV to be synonemous with the existence of p_iov_info;
618          * In case the capability is published but there are no VFs, simply
619          * de-allocate the struct.
620          */
621         if (!p_dev->p_iov_info->total_vfs) {
622                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
623                            "IOV capabilities, but no VFs are published\n");
624                 OSAL_FREE(p_dev, p_dev->p_iov_info);
625                 return ECORE_SUCCESS;
626         }
627
628         /* First VF index based on offset is tricky:
629          *  - If ARI is supported [likely], offset - (16 - pf_id) would
630          *    provide the number for eng0. 2nd engine Vfs would begin
631          *    after the first engine's VFs.
632          *  - If !ARI, VFs would start on next device.
633          *    so offset - (256 - pf_id) would provide the number.
634          * Utilize the fact that (256 - pf_id) is achieved only be later
635          * to diffrentiate between the two.
636          */
637
638         if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
639                 u32 first = p_hwfn->p_dev->p_iov_info->offset +
640                             p_hwfn->abs_pf_id - 16;
641
642                 p_dev->p_iov_info->first_vf_in_pf = first;
643
644                 if (ECORE_PATH_ID(p_hwfn))
645                         p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
646         } else {
647                 u32 first = p_hwfn->p_dev->p_iov_info->offset +
648                             p_hwfn->abs_pf_id - 256;
649
650                 p_dev->p_iov_info->first_vf_in_pf = first;
651         }
652
653         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
654                    "First VF in hwfn 0x%08x\n",
655                    p_dev->p_iov_info->first_vf_in_pf);
656
657         return ECORE_SUCCESS;
658 }
659
660 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
661                                        bool b_fail_malicious)
662 {
663         /* Check PF supports sriov */
664         if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
665             !IS_PF_SRIOV_ALLOC(p_hwfn))
666                 return false;
667
668         /* Check VF validity */
669         if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
670                 return false;
671
672         return true;
673 }
674
675 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
676 {
677         return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
678 }
679
680 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
681                                  u16 rel_vf_id, u8 to_disable)
682 {
683         struct ecore_vf_info *vf;
684         int i;
685
686         for_each_hwfn(p_dev, i) {
687                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
688
689                 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
690                 if (!vf)
691                         continue;
692
693                 vf->to_disable = to_disable;
694         }
695 }
696
697 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
698                                   u8 to_disable)
699 {
700         u16 i;
701
702         if (!IS_ECORE_SRIOV(p_dev))
703                 return;
704
705         for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
706                 ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
707 }
708
709 #ifndef LINUX_REMOVE
710 /* @@@TBD Consider taking outside of ecore... */
711 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
712                                           u16               vf_id,
713                                           void              *ctx)
714 {
715         enum _ecore_status_t rc = ECORE_SUCCESS;
716         struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
717
718         if (vf != OSAL_NULL) {
719                 vf->ctx = ctx;
720 #ifdef CONFIG_ECORE_SW_CHANNEL
721                 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
722 #endif
723         } else {
724                 rc = ECORE_UNKNOWN_ERROR;
725         }
726         return rc;
727 }
728 #endif
729
730 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn      *p_hwfn,
731                                          struct ecore_ptt       *p_ptt,
732                                          u8                     abs_vfid)
733 {
734         ecore_wr(p_hwfn, p_ptt,
735                  PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
736                  1 << (abs_vfid & 0x1f));
737 }
738
739 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
740                                    struct ecore_ptt *p_ptt,
741                                    struct ecore_vf_info *vf)
742 {
743         int i;
744
745         /* Set VF masks and configuration - pretend */
746         ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
747
748         ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
749
750         /* unpretend */
751         ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
752
753         /* iterate over all queues, clear sb consumer */
754         for (i = 0; i < vf->num_sbs; i++)
755                 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
756                                                   vf->igu_sbs[i],
757                                                   vf->opaque_fid, true);
758 }
759
760 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
761                                      struct ecore_ptt *p_ptt,
762                                      struct ecore_vf_info *vf, bool enable)
763 {
764         u32 igu_vf_conf;
765
766         ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
767
768         igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
769
770         if (enable)
771                 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
772         else
773                 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
774
775         ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
776
777         /* unpretend */
778         ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
779 }
780
781 static enum _ecore_status_t
782 ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
783                            struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
784 {
785         u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
786         enum _ecore_status_t rc;
787
788         if (vf->to_disable)
789                 return ECORE_SUCCESS;
790
791         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
792                    "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
793                    ECORE_VF_ABS_ID(p_hwfn, vf));
794
795         ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
796                                      ECORE_VF_ABS_ID(p_hwfn, vf));
797
798         ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
799
800         /* It's possible VF was previously considered malicious */
801         vf->b_malicious = false;
802
803         rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
804                                       vf->abs_vf_id, vf->num_sbs);
805         if (rc != ECORE_SUCCESS)
806                 return rc;
807
808         ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
809
810         SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
811         STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
812
813         ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
814                        p_hwfn->hw_info.hw_mode);
815
816         /* unpretend */
817         ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
818
819         vf->state = VF_FREE;
820
821         return rc;
822 }
823
824 /**
825  *
826  * @brief ecore_iov_config_perm_table - configure the permission
827  *      zone table.
828  *      In E4, queue zone permission table size is 320x9. There
829  *      are 320 VF queues for single engine device (256 for dual
830  *      engine device), and each entry has the following format:
831  *      {Valid, VF[7:0]}
832  * @param p_hwfn
833  * @param p_ptt
834  * @param vf
835  * @param enable
836  */
837 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
838                                         struct ecore_ptt *p_ptt,
839                                         struct ecore_vf_info *vf, u8 enable)
840 {
841         u32 reg_addr, val;
842         u16 qzone_id = 0;
843         int qid;
844
845         for (qid = 0; qid < vf->num_rxqs; qid++) {
846                 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
847                                   &qzone_id);
848
849                 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
850                 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
851                 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
852         }
853 }
854
855 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
856                                         struct ecore_ptt *p_ptt,
857                                         struct ecore_vf_info *vf)
858 {
859         /* Reset vf in IGU - interrupts are still disabled */
860         ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
861
862         ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
863
864         /* Permission Table */
865         ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
866 }
867
868 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
869                                      struct ecore_ptt *p_ptt,
870                                      struct ecore_vf_info *vf,
871                                      u16 num_rx_queues)
872 {
873         struct ecore_igu_block *igu_blocks;
874         int qid = 0, igu_id = 0;
875         u32 val = 0;
876
877         igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
878
879         if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
880                 num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
881
882         p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
883
884         SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
885         SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
886         SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
887
888         while ((qid < num_rx_queues) &&
889                (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {
890                 if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {
891                         struct cau_sb_entry sb_entry;
892
893                         vf->igu_sbs[qid] = (u16)igu_id;
894                         igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;
895
896                         SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
897
898                         ecore_wr(p_hwfn, p_ptt,
899                                  IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
900                                  val);
901
902                         /* Configure igu sb in CAU which were marked valid */
903                         ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
904                                                 p_hwfn->rel_pf_id,
905                                                 vf->abs_vf_id, 1);
906                         ecore_dmae_host2grc(p_hwfn, p_ptt,
907                                             (u64)(osal_uintptr_t)&sb_entry,
908                                             CAU_REG_SB_VAR_MEMORY +
909                                             igu_id * sizeof(u64), 2, 0);
910                         qid++;
911                 }
912                 igu_id++;
913         }
914
915         vf->num_sbs = (u8)num_rx_queues;
916
917         return vf->num_sbs;
918 }
919
920 /**
921  *
922  * @brief The function invalidates all the VF entries,
923  *        technically this isn't required, but added for
924  *        cleaness and ease of debugging incase a VF attempts to
925  *        produce an interrupt after it has been taken down.
926  *
927  * @param p_hwfn
928  * @param p_ptt
929  * @param vf
930  */
931 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
932                                       struct ecore_ptt *p_ptt,
933                                       struct ecore_vf_info *vf)
934 {
935         struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
936         int idx, igu_id;
937         u32 addr, val;
938
939         /* Invalidate igu CAM lines and mark them as free */
940         for (idx = 0; idx < vf->num_sbs; idx++) {
941                 igu_id = vf->igu_sbs[idx];
942                 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
943
944                 val = ecore_rd(p_hwfn, p_ptt, addr);
945                 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
946                 ecore_wr(p_hwfn, p_ptt, addr, val);
947
948                 p_info->igu_map.igu_blocks[igu_id].status |=
949                     ECORE_IGU_STATUS_FREE;
950
951                 p_hwfn->hw_info.p_igu_info->free_blks++;
952         }
953
954         vf->num_sbs = 0;
955 }
956
957 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
958                         u16 vfid,
959                         struct ecore_mcp_link_params *params,
960                         struct ecore_mcp_link_state *link,
961                         struct ecore_mcp_link_capabilities *p_caps)
962 {
963         struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
964         struct ecore_bulletin_content *p_bulletin;
965
966         if (!p_vf)
967                 return;
968
969         p_bulletin = p_vf->bulletin.p_virt;
970         p_bulletin->req_autoneg = params->speed.autoneg;
971         p_bulletin->req_adv_speed = params->speed.advertised_speeds;
972         p_bulletin->req_forced_speed = params->speed.forced_speed;
973         p_bulletin->req_autoneg_pause = params->pause.autoneg;
974         p_bulletin->req_forced_rx = params->pause.forced_rx;
975         p_bulletin->req_forced_tx = params->pause.forced_tx;
976         p_bulletin->req_loopback = params->loopback_mode;
977
978         p_bulletin->link_up = link->link_up;
979         p_bulletin->speed = link->speed;
980         p_bulletin->full_duplex = link->full_duplex;
981         p_bulletin->autoneg = link->an;
982         p_bulletin->autoneg_complete = link->an_complete;
983         p_bulletin->parallel_detection = link->parallel_detection;
984         p_bulletin->pfc_enabled = link->pfc_enabled;
985         p_bulletin->partner_adv_speed = link->partner_adv_speed;
986         p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
987         p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
988         p_bulletin->partner_adv_pause = link->partner_adv_pause;
989         p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
990
991         p_bulletin->capability_speed = p_caps->speed_capabilities;
992 }
993
994 enum _ecore_status_t
995 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
996                          struct ecore_ptt *p_ptt,
997                          struct ecore_iov_vf_init_params *p_params)
998 {
999         struct ecore_mcp_link_capabilities link_caps;
1000         struct ecore_mcp_link_params link_params;
1001         struct ecore_mcp_link_state link_state;
1002         u8 num_of_vf_available_chains  = 0;
1003         struct ecore_vf_info *vf = OSAL_NULL;
1004         u16 qid, num_irqs;
1005         enum _ecore_status_t rc = ECORE_SUCCESS;
1006         u32 cids;
1007         u8 i;
1008
1009         vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1010         if (!vf) {
1011                 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
1012                 return ECORE_UNKNOWN_ERROR;
1013         }
1014
1015         if (vf->b_init) {
1016                 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
1017                           p_params->rel_vf_id);
1018                 return ECORE_INVAL;
1019         }
1020
1021         /* Perform sanity checking on the requested queue_id */
1022         for (i = 0; i < p_params->num_queues; i++) {
1023                 u16 min_vf_qzone = (u16)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
1024                 u16 max_vf_qzone = min_vf_qzone +
1025                                    FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) - 1;
1026
1027                 qid = p_params->req_rx_queue[i];
1028                 if (qid < min_vf_qzone || qid > max_vf_qzone) {
1029                         DP_NOTICE(p_hwfn, true,
1030                                   "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
1031                                   qid, p_params->rel_vf_id,
1032                                   min_vf_qzone, max_vf_qzone);
1033                         return ECORE_INVAL;
1034                 }
1035
1036                 qid = p_params->req_tx_queue[i];
1037                 if (qid > max_vf_qzone) {
1038                         DP_NOTICE(p_hwfn, true,
1039                                   "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
1040                                   qid, p_params->rel_vf_id, max_vf_qzone);
1041                         return ECORE_INVAL;
1042                 }
1043
1044                 /* If client *really* wants, Tx qid can be shared with PF */
1045                 if (qid < min_vf_qzone)
1046                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1047                                    "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1048                                    p_params->rel_vf_id, qid, i);
1049         }
1050
1051         /* Limit number of queues according to number of CIDs */
1052         ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1053         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1054                    "VF[%d] - requesting to initialize for 0x%04x queues"
1055                    " [0x%04x CIDs available]\n",
1056                    vf->relative_vf_id, p_params->num_queues, (u16)cids);
1057         num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
1058
1059         num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
1060                                                                p_ptt,
1061                                                                vf,
1062                                                                num_irqs);
1063         if (num_of_vf_available_chains == 0) {
1064                 DP_ERR(p_hwfn, "no available igu sbs\n");
1065                 return ECORE_NOMEM;
1066         }
1067
1068         /* Choose queue number and index ranges */
1069         vf->num_rxqs = num_of_vf_available_chains;
1070         vf->num_txqs = num_of_vf_available_chains;
1071
1072         for (i = 0; i < vf->num_rxqs; i++) {
1073                 struct ecore_vf_q_info *p_queue = &vf->vf_queues[i];
1074
1075                 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1076                 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1077
1078                 /* CIDs are per-VF, so no problem having them 0-based. */
1079                 p_queue->fw_cid = i;
1080
1081                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1082                            "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]  CID %04x\n",
1083                            vf->relative_vf_id, i, vf->igu_sbs[i],
1084                            p_queue->fw_rx_qid, p_queue->fw_tx_qid,
1085                            p_queue->fw_cid);
1086         }
1087
1088         /* Update the link configuration in bulletin.
1089          */
1090         OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
1091                     sizeof(link_params));
1092         OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
1093                     sizeof(link_state));
1094         OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
1095                     sizeof(link_caps));
1096         ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
1097                            &link_params, &link_state, &link_caps);
1098
1099         rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1100
1101         if (rc == ECORE_SUCCESS) {
1102                 vf->b_init = true;
1103                 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
1104                         (1ULL << (vf->relative_vf_id % 64));
1105
1106                 if (IS_LEAD_HWFN(p_hwfn))
1107                         p_hwfn->p_dev->p_iov_info->num_vfs++;
1108         }
1109
1110         return rc;
1111 }
1112
1113 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1114                                                  struct ecore_ptt *p_ptt,
1115                                                  u16 rel_vf_id)
1116 {
1117         struct ecore_mcp_link_capabilities caps;
1118         struct ecore_mcp_link_params params;
1119         struct ecore_mcp_link_state link;
1120         struct ecore_vf_info *vf = OSAL_NULL;
1121
1122         vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1123         if (!vf) {
1124                 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1125                 return ECORE_UNKNOWN_ERROR;
1126         }
1127
1128         if (vf->bulletin.p_virt)
1129                 OSAL_MEMSET(vf->bulletin.p_virt, 0,
1130                             sizeof(*vf->bulletin.p_virt));
1131
1132         OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1133
1134         /* Get the link configuration back in bulletin so
1135          * that when VFs are re-enabled they get the actual
1136          * link configuration.
1137          */
1138         OSAL_MEMCPY(&params, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
1139         OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
1140         OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
1141                     sizeof(caps));
1142         ecore_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
1143
1144         /* Forget the VF's acquisition message */
1145         OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
1146
1147         /* disablng interrupts and resetting permission table was done during
1148          * vf-close, however, we could get here without going through vf_close
1149          */
1150         /* Disable Interrupts for VF */
1151         ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1152
1153         /* Reset Permission table */
1154         ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1155
1156         vf->num_rxqs = 0;
1157         vf->num_txqs = 0;
1158         ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1159
1160         if (vf->b_init) {
1161                 vf->b_init = false;
1162                 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
1163                                         ~(1ULL << (vf->relative_vf_id / 64));
1164
1165                 if (IS_LEAD_HWFN(p_hwfn))
1166                         p_hwfn->p_dev->p_iov_info->num_vfs--;
1167         }
1168
1169         return ECORE_SUCCESS;
1170 }
1171
1172 static bool ecore_iov_tlv_supported(u16 tlvtype)
1173 {
1174         return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
1175 }
1176
1177 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1178                                          struct ecore_vf_info *vf, u16 tlv)
1179 {
1180         /* lock the channel */
1181         /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1182
1183         /* record the locking op */
1184         /* vf->op_current = tlv; @@@TBD MichalK */
1185
1186         /* log the lock */
1187         if (ecore_iov_tlv_supported(tlv))
1188                 DP_VERBOSE(p_hwfn,
1189                            ECORE_MSG_IOV,
1190                            "VF[%d]: vf pf channel locked by %s\n",
1191                            vf->abs_vf_id,
1192                            ecore_channel_tlvs_string[tlv]);
1193         else
1194                 DP_VERBOSE(p_hwfn,
1195                            ECORE_MSG_IOV,
1196                            "VF[%d]: vf pf channel locked by %04x\n",
1197                            vf->abs_vf_id, tlv);
1198 }
1199
1200 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1201                                            struct ecore_vf_info *vf,
1202                                            u16 expected_tlv)
1203 {
1204         /* log the unlock */
1205         if (ecore_iov_tlv_supported(expected_tlv))
1206                 DP_VERBOSE(p_hwfn,
1207                            ECORE_MSG_IOV,
1208                            "VF[%d]: vf pf channel unlocked by %s\n",
1209                            vf->abs_vf_id,
1210                            ecore_channel_tlvs_string[expected_tlv]);
1211         else
1212                 DP_VERBOSE(p_hwfn,
1213                            ECORE_MSG_IOV,
1214                            "VF[%d]: vf pf channel unlocked by %04x\n",
1215                            vf->abs_vf_id, expected_tlv);
1216
1217         /* record the locking op */
1218         /* vf->op_current = CHANNEL_TLV_NONE; */
1219 }
1220
1221 /* place a given tlv on the tlv buffer, continuing current tlv list */
1222 void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
1223                     u8 **offset, u16 type, u16 length)
1224 {
1225         struct channel_tlv *tl = (struct channel_tlv *)*offset;
1226
1227         tl->type = type;
1228         tl->length = length;
1229
1230         /* Offset should keep pointing to next TLV (the end of the last) */
1231         *offset += length;
1232
1233         /* Return a pointer to the start of the added tlv */
1234         return *offset - length;
1235 }
1236
1237 /* list the types and lengths of the tlvs on the buffer */
1238 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
1239 {
1240         u16 i = 1, total_length = 0;
1241         struct channel_tlv *tlv;
1242
1243         do {
1244                 /* cast current tlv list entry to channel tlv header */
1245                 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1246
1247                 /* output tlv */
1248                 if (ecore_iov_tlv_supported(tlv->type))
1249                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1250                                    "TLV number %d: type %s, length %d\n",
1251                                    i, ecore_channel_tlvs_string[tlv->type],
1252                                    tlv->length);
1253                 else
1254                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1255                                    "TLV number %d: type %d, length %d\n",
1256                                    i, tlv->type, tlv->length);
1257
1258                 if (tlv->type == CHANNEL_TLV_LIST_END)
1259                         return;
1260
1261                 /* Validate entry - protect against malicious VFs */
1262                 if (!tlv->length) {
1263                         DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
1264                         return;
1265                 }
1266                 total_length += tlv->length;
1267                 if (total_length >= sizeof(struct tlv_buffer_size)) {
1268                         DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
1269                         return;
1270                 }
1271
1272                 i++;
1273         } while (1);
1274 }
1275
1276 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
1277                                     struct ecore_ptt *p_ptt,
1278                                     struct ecore_vf_info *p_vf,
1279                                     u16 length, u8 status)
1280 {
1281         struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1282         struct ecore_dmae_params params;
1283         u8 eng_vf_id;
1284
1285         mbx->reply_virt->default_resp.hdr.status = status;
1286
1287         ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1288
1289 #ifdef CONFIG_ECORE_SW_CHANNEL
1290         mbx->sw_mbx.response_size =
1291             length + sizeof(struct channel_list_end_tlv);
1292
1293         if (!p_hwfn->p_dev->b_hw_channel)
1294                 return;
1295 #endif
1296
1297         eng_vf_id = p_vf->abs_vf_id;
1298
1299         OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
1300         params.flags = ECORE_DMAE_FLAG_VF_DST;
1301         params.dst_vfid = eng_vf_id;
1302
1303         ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1304                              mbx->req_virt->first_tlv.reply_address +
1305                              sizeof(u64),
1306                              (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1307                              &params);
1308
1309         ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1310                              mbx->req_virt->first_tlv.reply_address,
1311                              sizeof(u64) / 4, &params);
1312
1313         REG_WR(p_hwfn,
1314                GTT_BAR0_MAP_REG_USDM_RAM +
1315                USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1316 }
1317
1318 static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
1319                                   enum ecore_iov_vport_update_flag flag)
1320 {
1321         switch (flag) {
1322         case ECORE_IOV_VP_UPDATE_ACTIVATE:
1323                 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1324         case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1325                 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1326         case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1327                 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1328         case ECORE_IOV_VP_UPDATE_MCAST:
1329                 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1330         case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1331                 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1332         case ECORE_IOV_VP_UPDATE_RSS:
1333                 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1334         case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1335                 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1336         case ECORE_IOV_VP_UPDATE_SGE_TPA:
1337                 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1338         default:
1339                 return 0;
1340         }
1341 }
1342
1343 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1344                                               struct ecore_vf_info *p_vf,
1345                                               struct ecore_iov_vf_mbx *p_mbx,
1346                                               u8 status, u16 tlvs_mask,
1347                                               u16 tlvs_accepted)
1348 {
1349         struct pfvf_def_resp_tlv *resp;
1350         u16 size, total_len, i;
1351
1352         OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1353         p_mbx->offset = (u8 *)p_mbx->reply_virt;
1354         size = sizeof(struct pfvf_def_resp_tlv);
1355         total_len = size;
1356
1357         ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1358
1359         /* Prepare response for all extended tlvs if they are found by PF */
1360         for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1361                 if (!(tlvs_mask & (1 << i)))
1362                         continue;
1363
1364                 resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
1365                                      ecore_iov_vport_to_tlv(p_hwfn, i), size);
1366
1367                 if (tlvs_accepted & (1 << i))
1368                         resp->hdr.status = status;
1369                 else
1370                         resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1371
1372                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1373                            "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1374                            p_vf->relative_vf_id,
1375                            ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1376
1377                 total_len += size;
1378         }
1379
1380         ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1381                       sizeof(struct channel_list_end_tlv));
1382
1383         return total_len;
1384 }
1385
1386 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1387                                    struct ecore_ptt *p_ptt,
1388                                    struct ecore_vf_info *vf_info,
1389                                    u16 type, u16 length, u8 status)
1390 {
1391         struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1392
1393         mbx->offset = (u8 *)mbx->reply_virt;
1394
1395         ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
1396         ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1397                       sizeof(struct channel_list_end_tlv));
1398
1399         ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1400
1401         OSAL_IOV_PF_RESP_TYPE(p_hwfn, vf_info->relative_vf_id, status);
1402 }
1403
1404 struct ecore_public_vf_info
1405 *ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
1406                               u16 relative_vf_id,
1407                               bool b_enabled_only)
1408 {
1409         struct ecore_vf_info *vf = OSAL_NULL;
1410
1411         vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1412         if (!vf)
1413                 return OSAL_NULL;
1414
1415         return &vf->p_vf_info;
1416 }
1417
1418 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1419                                  struct ecore_vf_info *p_vf)
1420 {
1421         u32 i;
1422         p_vf->vf_bulletin = 0;
1423         p_vf->vport_instance = 0;
1424         p_vf->configured_features = 0;
1425
1426         /* If VF previously requested less resources, go back to default */
1427         p_vf->num_rxqs = p_vf->num_sbs;
1428         p_vf->num_txqs = p_vf->num_sbs;
1429
1430         p_vf->num_active_rxqs = 0;
1431
1432         for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1433                 struct ecore_vf_q_info *p_queue = &p_vf->vf_queues[i];
1434
1435                 if (p_queue->p_rx_cid) {
1436                         ecore_eth_queue_cid_release(p_hwfn,
1437                                                     p_queue->p_rx_cid);
1438                         p_queue->p_rx_cid = OSAL_NULL;
1439                 }
1440
1441                 if (p_queue->p_tx_cid) {
1442                         ecore_eth_queue_cid_release(p_hwfn,
1443                                                     p_queue->p_tx_cid);
1444                         p_queue->p_tx_cid = OSAL_NULL;
1445                 }
1446         }
1447
1448         OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1449         OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1450         OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1451 }
1452
1453 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
1454                                         struct ecore_ptt *p_ptt,
1455                                         struct ecore_vf_info *p_vf,
1456                                         struct vf_pf_resc_request *p_req,
1457                                         struct pf_vf_resc *p_resp)
1458 {
1459         int i;
1460
1461         /* Queue related information */
1462         p_resp->num_rxqs = p_vf->num_rxqs;
1463         p_resp->num_txqs = p_vf->num_txqs;
1464         p_resp->num_sbs = p_vf->num_sbs;
1465
1466         for (i = 0; i < p_resp->num_sbs; i++) {
1467                 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1468                 /* TODO - what's this sb_qid field? Is it deprecated?
1469                  * or is there an ecore_client that looks at this?
1470                  */
1471                 p_resp->hw_sbs[i].sb_qid = 0;
1472         }
1473
1474         /* These fields are filled for backward compatibility.
1475          * Unused by modern vfs.
1476          */
1477         for (i = 0; i < p_resp->num_rxqs; i++) {
1478                 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1479                                   (u16 *)&p_resp->hw_qid[i]);
1480                 p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
1481         }
1482
1483         /* Filter related information */
1484         p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
1485                                              p_req->num_mac_filters);
1486         p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
1487                                               p_req->num_vlan_filters);
1488
1489         /* This isn't really needed/enforced, but some legacy VFs might depend
1490          * on the correct filling of this field.
1491          */
1492         p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
1493
1494         /* Validate sufficient resources for VF */
1495         if (p_resp->num_rxqs < p_req->num_rxqs ||
1496             p_resp->num_txqs < p_req->num_txqs ||
1497             p_resp->num_sbs < p_req->num_sbs ||
1498             p_resp->num_mac_filters < p_req->num_mac_filters ||
1499             p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1500             p_resp->num_mc_filters < p_req->num_mc_filters) {
1501                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1502                            "VF[%d] - Insufficient resources: rxq [%02x/%02x]"
1503                            " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
1504                            " vlan [%02x/%02x] mc [%02x/%02x]\n",
1505                            p_vf->abs_vf_id,
1506                            p_req->num_rxqs, p_resp->num_rxqs,
1507                            p_req->num_rxqs, p_resp->num_txqs,
1508                            p_req->num_sbs, p_resp->num_sbs,
1509                            p_req->num_mac_filters, p_resp->num_mac_filters,
1510                            p_req->num_vlan_filters, p_resp->num_vlan_filters,
1511                            p_req->num_mc_filters, p_resp->num_mc_filters);
1512
1513                 /* Some legacy OSes are incapable of correctly handling this
1514                  * failure.
1515                  */
1516                 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1517                      ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1518                     (p_vf->acquire.vfdev_info.os_type ==
1519                      VFPF_ACQUIRE_OS_WINDOWS))
1520                         return PFVF_STATUS_SUCCESS;
1521
1522                 return PFVF_STATUS_NO_RESOURCE;
1523         }
1524
1525         return PFVF_STATUS_SUCCESS;
1526 }
1527
1528 static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn *p_hwfn,
1529                                            struct pfvf_stats_info *p_stats)
1530 {
1531         p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1532                                   OFFSETOF(struct mstorm_vf_zone,
1533                                            non_trigger.eth_queue_stat);
1534         p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1535         p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1536                                   OFFSETOF(struct ustorm_vf_zone,
1537                                            non_trigger.eth_queue_stat);
1538         p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1539         p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1540                                   OFFSETOF(struct pstorm_vf_zone,
1541                                            non_trigger.eth_queue_stat);
1542         p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1543         p_stats->tstats.address = 0;
1544         p_stats->tstats.len = 0;
1545 }
1546
1547 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
1548                                      struct ecore_ptt        *p_ptt,
1549                                      struct ecore_vf_info    *vf)
1550 {
1551         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1552         struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1553         struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1554         struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1555         u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1556         struct pf_vf_resc *resc = &resp->resc;
1557         enum _ecore_status_t rc;
1558
1559         OSAL_MEMSET(resp, 0, sizeof(*resp));
1560
1561         /* Write the PF version so that VF would know which version
1562          * is supported - might be later overridden. This guarantees that
1563          * VF could recognize legacy PF based on lack of versions in reply.
1564          */
1565         pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1566         pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1567
1568         /* TODO - not doing anything is bad since we'll assert, but this isn't
1569          * necessarily the right behavior - perhaps we should have allowed some
1570          * versatility here.
1571          */
1572         if (vf->state != VF_FREE &&
1573             vf->state != VF_STOPPED) {
1574                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1575                            "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1576                            vf->abs_vf_id, vf->state);
1577                 goto out;
1578         }
1579
1580         /* Validate FW compatibility */
1581         if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1582                 if (req->vfdev_info.capabilities &
1583                     VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1584                         struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1585
1586                         /* This legacy support would need to be removed once
1587                          * the major has changed.
1588                          */
1589                         OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
1590
1591                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1592                                    "VF[%d] is pre-fastpath HSI\n",
1593                                    vf->abs_vf_id);
1594                         p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1595                         p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1596                 } else {
1597                         DP_INFO(p_hwfn,
1598                                 "VF[%d] needs fastpath HSI %02x.%02x, which is"
1599                                 " incompatible with loaded FW's faspath"
1600                                 " HSI %02x.%02x\n",
1601                                 vf->abs_vf_id,
1602                                 req->vfdev_info.eth_fp_hsi_major,
1603                                 req->vfdev_info.eth_fp_hsi_minor,
1604                                 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1605
1606                         goto out;
1607                 }
1608         }
1609
1610         /* On 100g PFs, prevent old VFs from loading */
1611         if ((p_hwfn->p_dev->num_hwfns > 1) &&
1612             !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1613                 DP_INFO(p_hwfn,
1614                         "VF[%d] is running an old driver that doesn't support"
1615                         " 100g\n",
1616                         vf->abs_vf_id);
1617                 goto out;
1618         }
1619
1620 #ifndef __EXTRACT__LINUX__
1621         if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1622                 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1623                 goto out;
1624         }
1625 #endif
1626
1627         /* Store the acquire message */
1628         OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
1629
1630         vf->opaque_fid = req->vfdev_info.opaque_fid;
1631
1632         vf->vf_bulletin = req->bulletin_addr;
1633         vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1634             vf->bulletin.size : req->bulletin_size;
1635
1636         /* fill in pfdev info */
1637         pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1638         pfdev_info->db_size = 0;        /* @@@ TBD MichalK Vf Doorbells */
1639         pfdev_info->indices_per_sb = PIS_PER_SB;
1640
1641         pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1642                                    PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1643         if (p_hwfn->p_dev->num_hwfns > 1)
1644                 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1645
1646         ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1647
1648         OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1649                     ETH_ALEN);
1650
1651         pfdev_info->fw_major = FW_MAJOR_VERSION;
1652         pfdev_info->fw_minor = FW_MINOR_VERSION;
1653         pfdev_info->fw_rev = FW_REVISION_VERSION;
1654         pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1655
1656         /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1657          * this field.
1658          */
1659         pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
1660                                               req->vfdev_info.eth_fp_hsi_minor);
1661         pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1662         ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
1663                               OSAL_NULL);
1664
1665         pfdev_info->dev_type = p_hwfn->p_dev->type;
1666         pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1667
1668         /* Fill resources available to VF; Make sure there are enough to
1669          * satisfy the VF's request.
1670          */
1671         vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1672                                                     &req->resc_request, resc);
1673         if (vfpf_status != PFVF_STATUS_SUCCESS)
1674                 goto out;
1675
1676         /* Start the VF in FW */
1677         rc = ecore_sp_vf_start(p_hwfn, vf);
1678         if (rc != ECORE_SUCCESS) {
1679                 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
1680                           vf->abs_vf_id);
1681                 vfpf_status = PFVF_STATUS_FAILURE;
1682                 goto out;
1683         }
1684
1685         /* Fill agreed size of bulletin board in response, and post
1686          * an initial image to the bulletin board.
1687          */
1688         resp->bulletin_size = vf->bulletin.size;
1689         ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1690
1691         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1692                    "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1693                    " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1694                    "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1695                    " n_vlans-%d\n",
1696                    vf->abs_vf_id, resp->pfdev_info.chip_num,
1697                    resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1698                    (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
1699                    resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1700                    resc->num_vlan_filters);
1701
1702         vf->state = VF_ACQUIRED;
1703
1704 out:
1705         /* Prepare Response */
1706         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1707                                sizeof(struct pfvf_acquire_resp_tlv),
1708                                vfpf_status);
1709 }
1710
1711 static enum _ecore_status_t
1712 __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1713                          struct ecore_vf_info *p_vf, bool val)
1714 {
1715         struct ecore_sp_vport_update_params params;
1716         enum _ecore_status_t rc;
1717
1718         if (val == p_vf->spoof_chk) {
1719                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1720                            "Spoofchk value[%d] is already configured\n", val);
1721                 return ECORE_SUCCESS;
1722         }
1723
1724         OSAL_MEMSET(&params, 0, sizeof(struct ecore_sp_vport_update_params));
1725         params.opaque_fid = p_vf->opaque_fid;
1726         params.vport_id = p_vf->vport_id;
1727         params.update_anti_spoofing_en_flg = 1;
1728         params.anti_spoofing_en = val;
1729
1730         rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
1731                                    OSAL_NULL);
1732         if (rc == ECORE_SUCCESS) {
1733                 p_vf->spoof_chk = val;
1734                 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1735                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1736                            "Spoofchk val[%d] configured\n", val);
1737         } else {
1738                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1739                            "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1740                            val, p_vf->relative_vf_id);
1741         }
1742
1743         return rc;
1744 }
1745
1746 static enum _ecore_status_t
1747 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1748                                    struct ecore_vf_info *p_vf)
1749 {
1750         struct ecore_filter_ucast filter;
1751         enum _ecore_status_t rc = ECORE_SUCCESS;
1752         int i;
1753
1754         OSAL_MEMSET(&filter, 0, sizeof(filter));
1755         filter.is_rx_filter = 1;
1756         filter.is_tx_filter = 1;
1757         filter.vport_to_add_to = p_vf->vport_id;
1758         filter.opcode = ECORE_FILTER_ADD;
1759
1760         /* Reconfigure vlans */
1761         for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1762                 if (!p_vf->shadow_config.vlans[i].used)
1763                         continue;
1764
1765                 filter.type = ECORE_FILTER_VLAN;
1766                 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1767                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1768                            "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1769                            filter.vlan, p_vf->relative_vf_id);
1770                 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1771                                                &filter, ECORE_SPQ_MODE_CB,
1772                                                OSAL_NULL);
1773                 if (rc) {
1774                         DP_NOTICE(p_hwfn, true,
1775                                   "Failed to configure VLAN [%04x]"
1776                                   " to VF [%04x]\n",
1777                                   filter.vlan, p_vf->relative_vf_id);
1778                         break;
1779                 }
1780         }
1781
1782         return rc;
1783 }
1784
1785 static enum _ecore_status_t
1786 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
1787                                      struct ecore_vf_info *p_vf, u64 events)
1788 {
1789         enum _ecore_status_t rc = ECORE_SUCCESS;
1790
1791         /*TODO - what about MACs? */
1792
1793         if ((events & (1 << VLAN_ADDR_FORCED)) &&
1794             !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1795                 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1796
1797         return rc;
1798 }
1799
1800 static  enum _ecore_status_t
1801 ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
1802                                  struct ecore_vf_info *p_vf,
1803                                  u64 events)
1804 {
1805         enum _ecore_status_t rc = ECORE_SUCCESS;
1806         struct ecore_filter_ucast filter;
1807
1808         if (!p_vf->vport_instance)
1809                 return ECORE_INVAL;
1810
1811         if (events & (1 << MAC_ADDR_FORCED)) {
1812                 /* Since there's no way [currently] of removing the MAC,
1813                  * we can always assume this means we need to force it.
1814                  */
1815                 OSAL_MEMSET(&filter, 0, sizeof(filter));
1816                 filter.type = ECORE_FILTER_MAC;
1817                 filter.opcode = ECORE_FILTER_REPLACE;
1818                 filter.is_rx_filter = 1;
1819                 filter.is_tx_filter = 1;
1820                 filter.vport_to_add_to = p_vf->vport_id;
1821                 OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
1822
1823                 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1824                                                &filter,
1825                                                ECORE_SPQ_MODE_CB, OSAL_NULL);
1826                 if (rc) {
1827                         DP_NOTICE(p_hwfn, true,
1828                                   "PF failed to configure MAC for VF\n");
1829                         return rc;
1830                 }
1831
1832                 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1833         }
1834
1835         if (events & (1 << VLAN_ADDR_FORCED)) {
1836                 struct ecore_sp_vport_update_params vport_update;
1837                 u8 removal;
1838                 int i;
1839
1840                 OSAL_MEMSET(&filter, 0, sizeof(filter));
1841                 filter.type = ECORE_FILTER_VLAN;
1842                 filter.is_rx_filter = 1;
1843                 filter.is_tx_filter = 1;
1844                 filter.vport_to_add_to = p_vf->vport_id;
1845                 filter.vlan = p_vf->bulletin.p_virt->pvid;
1846                 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
1847                     ECORE_FILTER_FLUSH;
1848
1849                 /* Send the ramrod */
1850                 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1851                                                &filter,
1852                                                ECORE_SPQ_MODE_CB, OSAL_NULL);
1853                 if (rc) {
1854                         DP_NOTICE(p_hwfn, true,
1855                                   "PF failed to configure VLAN for VF\n");
1856                         return rc;
1857                 }
1858
1859                 /* Update the default-vlan & silent vlan stripping */
1860                 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
1861                 vport_update.opaque_fid = p_vf->opaque_fid;
1862                 vport_update.vport_id = p_vf->vport_id;
1863                 vport_update.update_default_vlan_enable_flg = 1;
1864                 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1865                 vport_update.update_default_vlan_flg = 1;
1866                 vport_update.default_vlan = filter.vlan;
1867
1868                 vport_update.update_inner_vlan_removal_flg = 1;
1869                 removal = filter.vlan ?
1870                     1 : p_vf->shadow_config.inner_vlan_removal;
1871                 vport_update.inner_vlan_removal_flg = removal;
1872                 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1873                 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
1874                                            ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
1875                 if (rc) {
1876                         DP_NOTICE(p_hwfn, true,
1877                                   "PF failed to configure VF vport for vlan\n");
1878                         return rc;
1879                 }
1880
1881                 /* Update all the Rx queues */
1882                 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1883                         struct ecore_queue_cid *p_cid;
1884
1885                         p_cid = p_vf->vf_queues[i].p_rx_cid;
1886                         if (p_cid == OSAL_NULL)
1887                                 continue;
1888
1889                         rc = ecore_sp_eth_rx_queues_update(p_hwfn,
1890                                                            (void **)&p_cid,
1891                                                    1, 0, 1,
1892                                                    ECORE_SPQ_MODE_EBLOCK,
1893                                                    OSAL_NULL);
1894                         if (rc) {
1895                                 DP_NOTICE(p_hwfn, true,
1896                                           "Failed to send Rx update"
1897                                           " fo queue[0x%04x]\n",
1898                                           p_cid->rel.queue_id);
1899                                 return rc;
1900                         }
1901                 }
1902
1903                 if (filter.vlan)
1904                         p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1905                 else
1906                         p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
1907         }
1908
1909         /* If forced features are terminated, we need to configure the shadow
1910          * configuration back again.
1911          */
1912         if (events)
1913                 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1914
1915         return rc;
1916 }
1917
1918 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
1919                                          struct ecore_ptt *p_ptt,
1920                                          struct ecore_vf_info *vf)
1921 {
1922         struct ecore_sp_vport_start_params params = { 0 };
1923         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1924         struct vfpf_vport_start_tlv *start;
1925         u8 status = PFVF_STATUS_SUCCESS;
1926         struct ecore_vf_info *vf_info;
1927         u64 *p_bitmap;
1928         int sb_id;
1929         enum _ecore_status_t rc;
1930
1931         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
1932         if (!vf_info) {
1933                 DP_NOTICE(p_hwfn->p_dev, true,
1934                           "Failed to get VF info, invalid vfid [%d]\n",
1935                           vf->relative_vf_id);
1936                 return;
1937         }
1938
1939         vf->state = VF_ENABLED;
1940         start = &mbx->req_virt->start_vport;
1941
1942         /* Initialize Status block in CAU */
1943         for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1944                 if (!start->sb_addr[sb_id]) {
1945                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1946                                    "VF[%d] did not fill the address of SB %d\n",
1947                                    vf->relative_vf_id, sb_id);
1948                         break;
1949                 }
1950
1951                 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
1952                                       start->sb_addr[sb_id],
1953                                       vf->igu_sbs[sb_id],
1954                                       vf->abs_vf_id, 1);
1955         }
1956         ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1957
1958         vf->mtu = start->mtu;
1959         vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1960
1961         /* Take into consideration configuration forced by hypervisor;
1962          * If none is configured, use the supplied VF values [for old
1963          * vfs that would still be fine, since they passed '0' as padding].
1964          */
1965         p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
1966         if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
1967                 u8 vf_req = start->only_untagged;
1968
1969                 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1970                 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1971         }
1972
1973         params.tpa_mode = start->tpa_mode;
1974         params.remove_inner_vlan = start->inner_vlan_removal;
1975         params.tx_switching = true;
1976
1977 #ifndef ASIC_ONLY
1978         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1979                 DP_NOTICE(p_hwfn, false,
1980                           "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
1981                 params.tx_switching = false;
1982         }
1983 #endif
1984
1985         params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
1986         params.drop_ttl0 = false;
1987         params.concrete_fid = vf->concrete_fid;
1988         params.opaque_fid = vf->opaque_fid;
1989         params.vport_id = vf->vport_id;
1990         params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1991         params.mtu = vf->mtu;
1992         params.check_mac = true;
1993
1994         rc = ecore_sp_eth_vport_start(p_hwfn, &params);
1995         if (rc != ECORE_SUCCESS) {
1996                 DP_ERR(p_hwfn,
1997                        "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
1998                 status = PFVF_STATUS_FAILURE;
1999         } else {
2000                 vf->vport_instance++;
2001
2002                 /* Force configuration if needed on the newly opened vport */
2003                 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
2004                 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
2005                                           vf->vport_id, vf->opaque_fid);
2006                 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
2007         }
2008
2009         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
2010                                sizeof(struct pfvf_def_resp_tlv), status);
2011 }
2012
2013 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
2014                                         struct ecore_ptt *p_ptt,
2015                                         struct ecore_vf_info *vf)
2016 {
2017         u8 status = PFVF_STATUS_SUCCESS;
2018         enum _ecore_status_t rc;
2019
2020         vf->vport_instance--;
2021         vf->spoof_chk = false;
2022
2023         if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) ||
2024             (ecore_iov_validate_active_txq(p_hwfn, vf))) {
2025                 vf->b_malicious = true;
2026                 DP_NOTICE(p_hwfn, false,
2027                           "VF [%02x] - considered malicious;"
2028                           " Unable to stop RX/TX queuess\n",
2029                           vf->abs_vf_id);
2030         }
2031
2032         rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2033         if (rc != ECORE_SUCCESS) {
2034                 DP_ERR(p_hwfn,
2035                        "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
2036                 status = PFVF_STATUS_FAILURE;
2037         }
2038
2039         /* Forget the configuration on the vport */
2040         vf->configured_features = 0;
2041         OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2042
2043         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2044                                sizeof(struct pfvf_def_resp_tlv), status);
2045 }
2046
2047 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
2048                                             struct ecore_ptt *p_ptt,
2049                                             struct ecore_vf_info *vf,
2050                                             u8 status, bool b_legacy)
2051 {
2052         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2053         struct pfvf_start_queue_resp_tlv *p_tlv;
2054         struct vfpf_start_rxq_tlv *req;
2055         u16 length;
2056
2057         mbx->offset = (u8 *)mbx->reply_virt;
2058
2059         /* Taking a bigger struct instead of adding a TLV to list was a
2060          * mistake, but one which we're now stuck with, as some older
2061          * clients assume the size of the previous response.
2062          */
2063         if (!b_legacy)
2064                 length = sizeof(*p_tlv);
2065         else
2066                 length = sizeof(struct pfvf_def_resp_tlv);
2067
2068         p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
2069                               length);
2070         ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2071                       sizeof(struct channel_list_end_tlv));
2072
2073         /* Update the TLV with the response */
2074         if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2075                 req = &mbx->req_virt->start_rxq;
2076                 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2077                                 OFFSETOF(struct mstorm_vf_zone,
2078                                          non_trigger.eth_rx_queue_producers) +
2079                                 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2080         }
2081
2082         ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2083 }
2084
2085 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
2086                                        struct ecore_ptt *p_ptt,
2087                                        struct ecore_vf_info *vf)
2088 {
2089         struct ecore_queue_start_common_params params;
2090         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2091         u8 status = PFVF_STATUS_NO_RESOURCE;
2092         struct ecore_vf_q_info *p_queue;
2093         struct vfpf_start_rxq_tlv *req;
2094         bool b_legacy_vf = false;
2095         enum _ecore_status_t rc;
2096
2097         req = &mbx->req_virt->start_rxq;
2098
2099         if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
2100             !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2101                 goto out;
2102
2103         /* Acquire a new queue-cid */
2104         p_queue = &vf->vf_queues[req->rx_qid];
2105
2106         OSAL_MEMSET(&params, 0, sizeof(params));
2107         params.queue_id = (u8)p_queue->fw_rx_qid;
2108         params.vport_id = vf->vport_id;
2109         params.stats_id = vf->abs_vf_id + 0x10;
2110         params.sb = req->hw_sb;
2111         params.sb_idx = req->sb_index;
2112
2113         p_queue->p_rx_cid = _ecore_eth_queue_to_cid(p_hwfn,
2114                                                     vf->opaque_fid,
2115                                                     p_queue->fw_cid,
2116                                                     (u8)req->rx_qid,
2117                                                     &params);
2118         if (p_queue->p_rx_cid == OSAL_NULL)
2119                 goto out;
2120
2121         /* Legacy VFs have their Producers in a different location, which they
2122          * calculate on their own and clean the producer prior to this.
2123          */
2124         if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2125             ETH_HSI_VER_NO_PKT_LEN_TUNN)
2126                 b_legacy_vf = true;
2127         else
2128                 REG_WR(p_hwfn,
2129                        GTT_BAR0_MAP_REG_MSDM_RAM +
2130                        MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2131                        0);
2132         p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
2133
2134
2135         rc = ecore_eth_rxq_start_ramrod(p_hwfn,
2136                                         p_queue->p_rx_cid,
2137                                         req->bd_max_bytes,
2138                                         req->rxq_addr,
2139                                         req->cqe_pbl_addr,
2140                                         req->cqe_pbl_size);
2141         if (rc != ECORE_SUCCESS) {
2142                 status = PFVF_STATUS_FAILURE;
2143                 ecore_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
2144                 p_queue->p_rx_cid = OSAL_NULL;
2145         } else {
2146                 status = PFVF_STATUS_SUCCESS;
2147                 vf->num_active_rxqs++;
2148         }
2149
2150 out:
2151         ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2152                                         b_legacy_vf);
2153 }
2154
2155 static void
2156 ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2157                                  struct ecore_tunnel_info *p_tun,
2158                                  u16 tunn_feature_mask)
2159 {
2160         p_resp->tunn_feature_mask = tunn_feature_mask;
2161         p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2162         p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2163         p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2164         p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2165         p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2166         p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2167         p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2168         p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2169         p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2170         p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2171         p_resp->geneve_udp_port = p_tun->geneve_port.port;
2172         p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2173 }
2174
2175 static void
2176 __ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2177                                 struct ecore_tunn_update_type *p_tun,
2178                                 enum ecore_tunn_mode mask, u8 tun_cls)
2179 {
2180         if (p_req->tun_mode_update_mask & (1 << mask)) {
2181                 p_tun->b_update_mode = true;
2182
2183                 if (p_req->tunn_mode & (1 << mask))
2184                         p_tun->b_mode_enabled = true;
2185         }
2186
2187         p_tun->tun_cls = tun_cls;
2188 }
2189
2190 static void
2191 ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2192                               struct ecore_tunn_update_type *p_tun,
2193                               struct ecore_tunn_update_udp_port *p_port,
2194                               enum ecore_tunn_mode mask,
2195                               u8 tun_cls, u8 update_port, u16 port)
2196 {
2197         if (update_port) {
2198                 p_port->b_update_port = true;
2199                 p_port->port = port;
2200         }
2201
2202         __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2203 }
2204
2205 static bool
2206 ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2207 {
2208         bool b_update_requested = false;
2209
2210         if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2211             p_req->update_geneve_port || p_req->update_vxlan_port)
2212                 b_update_requested = true;
2213
2214         return b_update_requested;
2215 }
2216
2217 static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
2218                                                struct ecore_ptt *p_ptt,
2219                                                struct ecore_vf_info *p_vf)
2220 {
2221         struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
2222         struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2223         struct pfvf_update_tunn_param_tlv *p_resp;
2224         struct vfpf_update_tunn_param_tlv *p_req;
2225         enum _ecore_status_t rc = ECORE_SUCCESS;
2226         u8 status = PFVF_STATUS_SUCCESS;
2227         bool b_update_required = false;
2228         struct ecore_tunnel_info tunn;
2229         u16 tunn_feature_mask = 0;
2230
2231         mbx->offset = (u8 *)mbx->reply_virt;
2232
2233         OSAL_MEM_ZERO(&tunn, sizeof(tunn));
2234         p_req = &mbx->req_virt->tunn_param_update;
2235
2236         if (!ecore_iov_pf_validate_tunn_param(p_req)) {
2237                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2238                            "No tunnel update requested by VF\n");
2239                 status = PFVF_STATUS_FAILURE;
2240                 goto send_resp;
2241         }
2242
2243         tunn.b_update_rx_cls = p_req->update_tun_cls;
2244         tunn.b_update_tx_cls = p_req->update_tun_cls;
2245
2246         ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2247                                       ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2248                                       p_req->update_vxlan_port,
2249                                       p_req->vxlan_port);
2250         ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2251                                       ECORE_MODE_L2GENEVE_TUNN,
2252                                       p_req->l2geneve_clss,
2253                                       p_req->update_geneve_port,
2254                                       p_req->geneve_port);
2255         __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2256                                         ECORE_MODE_IPGENEVE_TUNN,
2257                                         p_req->ipgeneve_clss);
2258         __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2259                                         ECORE_MODE_L2GRE_TUNN,
2260                                         p_req->l2gre_clss);
2261         __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2262                                         ECORE_MODE_IPGRE_TUNN,
2263                                         p_req->ipgre_clss);
2264
2265         /* If PF modifies VF's req then it should
2266          * still return an error in case of partial configuration
2267          * or modified configuration as opposed to requested one.
2268          */
2269         rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
2270                                                  &b_update_required, &tunn);
2271
2272         if (rc != ECORE_SUCCESS)
2273                 status = PFVF_STATUS_FAILURE;
2274
2275         /* If ECORE client is willing to update anything ? */
2276         if (b_update_required) {
2277                 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
2278                                                  ECORE_SPQ_MODE_EBLOCK,
2279                                                  OSAL_NULL);
2280                 if (rc != ECORE_SUCCESS)
2281                         status = PFVF_STATUS_FAILURE;
2282         }
2283
2284 send_resp:
2285         p_resp = ecore_add_tlv(p_hwfn, &mbx->offset,
2286                                CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2287
2288         ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2289         ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2290                       sizeof(struct channel_list_end_tlv));
2291
2292         ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2293 }
2294
2295 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
2296                                             struct ecore_ptt *p_ptt,
2297                                             struct ecore_vf_info *p_vf,
2298                                             u8 status)
2299 {
2300         struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2301         struct pfvf_start_queue_resp_tlv *p_tlv;
2302         bool b_legacy = false;
2303         u16 length;
2304
2305         mbx->offset = (u8 *)mbx->reply_virt;
2306
2307         /* Taking a bigger struct instead of adding a TLV to list was a
2308          * mistake, but one which we're now stuck with, as some older
2309          * clients assume the size of the previous response.
2310          */
2311         if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2312             ETH_HSI_VER_NO_PKT_LEN_TUNN)
2313                 b_legacy = true;
2314
2315         if (!b_legacy)
2316                 length = sizeof(*p_tlv);
2317         else
2318                 length = sizeof(struct pfvf_def_resp_tlv);
2319
2320         p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
2321                               length);
2322         ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2323                       sizeof(struct channel_list_end_tlv));
2324
2325         /* Update the TLV with the response */
2326         if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2327                 u16 qid = mbx->req_virt->start_txq.tx_qid;
2328
2329                 p_tlv->offset = DB_ADDR_VF(p_vf->vf_queues[qid].fw_cid,
2330                                            DQ_DEMS_LEGACY);
2331         }
2332
2333         ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2334 }
2335
2336 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
2337                                        struct ecore_ptt *p_ptt,
2338                                        struct ecore_vf_info *vf)
2339 {
2340         struct ecore_queue_start_common_params params;
2341         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2342         u8 status = PFVF_STATUS_NO_RESOURCE;
2343         struct ecore_vf_q_info *p_queue;
2344         struct vfpf_start_txq_tlv *req;
2345         enum _ecore_status_t rc;
2346         u16 pq;
2347
2348         OSAL_MEMSET(&params, 0, sizeof(params));
2349         req = &mbx->req_virt->start_txq;
2350
2351         if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
2352             !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2353                 goto out;
2354
2355         /* Acquire a new queue-cid */
2356         p_queue = &vf->vf_queues[req->tx_qid];
2357
2358         params.queue_id = p_queue->fw_tx_qid;
2359         params.vport_id = vf->vport_id;
2360         params.stats_id = vf->abs_vf_id + 0x10;
2361         params.sb = req->hw_sb;
2362         params.sb_idx = req->sb_index;
2363
2364         p_queue->p_tx_cid = _ecore_eth_queue_to_cid(p_hwfn,
2365                                                     vf->opaque_fid,
2366                                                     p_queue->fw_cid,
2367                                                     (u8)req->tx_qid,
2368                                                     &params);
2369         if (p_queue->p_tx_cid == OSAL_NULL)
2370                 goto out;
2371
2372         pq = ecore_get_cm_pq_idx_vf(p_hwfn,
2373                                     vf->relative_vf_id);
2374         rc = ecore_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
2375                                         req->pbl_addr, req->pbl_size, pq);
2376         if (rc != ECORE_SUCCESS) {
2377                 status = PFVF_STATUS_FAILURE;
2378                 ecore_eth_queue_cid_release(p_hwfn,
2379                                             p_queue->p_tx_cid);
2380                 p_queue->p_tx_cid = OSAL_NULL;
2381         } else {
2382                 status = PFVF_STATUS_SUCCESS;
2383         }
2384
2385 out:
2386         ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
2387 }
2388
2389 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
2390                                                    struct ecore_vf_info *vf,
2391                                                    u16 rxq_id,
2392                                                    u8 num_rxqs,
2393                                                    bool cqe_completion)
2394 {
2395         struct ecore_vf_q_info *p_queue;
2396         enum _ecore_status_t rc = ECORE_SUCCESS;
2397         int qid;
2398
2399         if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
2400                 return ECORE_INVAL;
2401
2402         for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
2403                 p_queue = &vf->vf_queues[qid];
2404
2405                 if (!p_queue->p_rx_cid)
2406                         continue;
2407
2408                 rc = ecore_eth_rx_queue_stop(p_hwfn,
2409                                              p_queue->p_rx_cid,
2410                                              false, cqe_completion);
2411                 if (rc != ECORE_SUCCESS)
2412                         return rc;
2413
2414                 vf->vf_queues[qid].p_rx_cid = OSAL_NULL;
2415                 vf->num_active_rxqs--;
2416         }
2417
2418         return rc;
2419 }
2420
2421 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
2422                                                    struct ecore_vf_info *vf,
2423                                                    u16 txq_id, u8 num_txqs)
2424 {
2425         enum _ecore_status_t rc = ECORE_SUCCESS;
2426         struct ecore_vf_q_info *p_queue;
2427         int qid;
2428
2429         if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
2430                 return ECORE_INVAL;
2431
2432         for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
2433                 p_queue = &vf->vf_queues[qid];
2434                 if (!p_queue->p_tx_cid)
2435                         continue;
2436
2437                 rc = ecore_eth_tx_queue_stop(p_hwfn,
2438                                              p_queue->p_tx_cid);
2439                 if (rc != ECORE_SUCCESS)
2440                         return rc;
2441
2442                 p_queue->p_tx_cid = OSAL_NULL;
2443         }
2444         return rc;
2445 }
2446
2447 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
2448                                        struct ecore_ptt *p_ptt,
2449                                        struct ecore_vf_info *vf)
2450 {
2451         u16 length = sizeof(struct pfvf_def_resp_tlv);
2452         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2453         u8 status = PFVF_STATUS_SUCCESS;
2454         struct vfpf_stop_rxqs_tlv *req;
2455         enum _ecore_status_t rc;
2456
2457         /* We give the option of starting from qid != 0, in this case we
2458          * need to make sure that qid + num_qs doesn't exceed the actual
2459          * amount of queues that exist.
2460          */
2461         req = &mbx->req_virt->stop_rxqs;
2462         rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2463                                     req->num_rxqs, req->cqe_completion);
2464         if (rc)
2465                 status = PFVF_STATUS_FAILURE;
2466
2467         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2468                                length, status);
2469 }
2470
2471 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
2472                                        struct ecore_ptt *p_ptt,
2473                                        struct ecore_vf_info *vf)
2474 {
2475         u16 length = sizeof(struct pfvf_def_resp_tlv);
2476         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2477         u8 status = PFVF_STATUS_SUCCESS;
2478         struct vfpf_stop_txqs_tlv *req;
2479         enum _ecore_status_t rc;
2480
2481         /* We give the option of starting from qid != 0, in this case we
2482          * need to make sure that qid + num_qs doesn't exceed the actual
2483          * amount of queues that exist.
2484          */
2485         req = &mbx->req_virt->stop_txqs;
2486         rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
2487         if (rc)
2488                 status = PFVF_STATUS_FAILURE;
2489
2490         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2491                                length, status);
2492 }
2493
2494 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
2495                                          struct ecore_ptt *p_ptt,
2496                                          struct ecore_vf_info *vf)
2497 {
2498         struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
2499         u16 length = sizeof(struct pfvf_def_resp_tlv);
2500         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2501         struct vfpf_update_rxq_tlv *req;
2502         u8 status = PFVF_STATUS_FAILURE;
2503         u8 complete_event_flg;
2504         u8 complete_cqe_flg;
2505         u16 qid;
2506         enum _ecore_status_t rc;
2507         u8 i;
2508
2509         req = &mbx->req_virt->update_rxq;
2510         complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2511         complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2512
2513         /* Validaute inputs */
2514         if (req->num_rxqs + req->rx_qid > ECORE_MAX_VF_CHAINS_PER_PF ||
2515             !ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) {
2516                 DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2517                         vf->relative_vf_id, req->rx_qid, req->num_rxqs);
2518                 goto out;
2519         }
2520
2521         for (i = 0; i < req->num_rxqs; i++) {
2522                 qid = req->rx_qid + i;
2523
2524                 if (!vf->vf_queues[qid].p_rx_cid) {
2525                         DP_INFO(p_hwfn,
2526                                 "VF[%d] rx_qid = %d isn`t active!\n",
2527                                 vf->relative_vf_id, qid);
2528                         goto out;
2529                 }
2530
2531                 handlers[i] = vf->vf_queues[qid].p_rx_cid;
2532         }
2533
2534         rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2535                                            req->num_rxqs,
2536                                            complete_cqe_flg,
2537                                            complete_event_flg,
2538                                            ECORE_SPQ_MODE_EBLOCK,
2539                                            OSAL_NULL);
2540         if (rc)
2541                 goto out;
2542
2543         status = PFVF_STATUS_SUCCESS;
2544 out:
2545         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2546                                length, status);
2547 }
2548
2549 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
2550                                  void *p_tlvs_list, u16 req_type)
2551 {
2552         struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2553         int len = 0;
2554
2555         do {
2556                 if (!p_tlv->length) {
2557                         DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
2558                         return OSAL_NULL;
2559                 }
2560
2561                 if (p_tlv->type == req_type) {
2562                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2563                                    "Extended tlv type %s, length %d found\n",
2564                                    ecore_channel_tlvs_string[p_tlv->type],
2565                                    p_tlv->length);
2566                         return p_tlv;
2567                 }
2568
2569                 len += p_tlv->length;
2570                 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2571
2572                 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2573                         DP_NOTICE(p_hwfn, true,
2574                                   "TLVs has overrun the buffer size\n");
2575                         return OSAL_NULL;
2576                 }
2577         } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2578
2579         return OSAL_NULL;
2580 }
2581
2582 static void
2583 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
2584                               struct ecore_sp_vport_update_params *p_data,
2585                               struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2586 {
2587         struct vfpf_vport_update_activate_tlv *p_act_tlv;
2588         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2589
2590         p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2591             ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2592         if (!p_act_tlv)
2593                 return;
2594
2595         p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2596         p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2597         p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2598         p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2599         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
2600 }
2601
2602 static void
2603 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
2604                                struct ecore_sp_vport_update_params *p_data,
2605                                struct ecore_vf_info *p_vf,
2606                                struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2607 {
2608         struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2609         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2610
2611         p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2612             ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2613         if (!p_vlan_tlv)
2614                 return;
2615
2616         p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2617
2618         /* Ignore the VF request if we're forcing a vlan */
2619         if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
2620                 p_data->update_inner_vlan_removal_flg = 1;
2621                 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2622         }
2623
2624         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
2625 }
2626
2627 static void
2628 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
2629                               struct ecore_sp_vport_update_params *p_data,
2630                               struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2631 {
2632         struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2633         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2634
2635         p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2636             ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2637         if (!p_tx_switch_tlv)
2638                 return;
2639
2640 #ifndef ASIC_ONLY
2641         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2642                 DP_NOTICE(p_hwfn, false,
2643                           "FPGA: Ignore tx-switching configuration originating"
2644                           " from VFs\n");
2645                 return;
2646         }
2647 #endif
2648
2649         p_data->update_tx_switching_flg = 1;
2650         p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2651         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
2652 }
2653
2654 static void
2655 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
2656                                     struct ecore_sp_vport_update_params *p_data,
2657                                     struct ecore_iov_vf_mbx *p_mbx,
2658                                     u16 *tlvs_mask)
2659 {
2660         struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2661         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2662
2663         p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2664             ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2665         if (!p_mcast_tlv)
2666                 return;
2667
2668         p_data->update_approx_mcast_flg = 1;
2669         OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
2670                     sizeof(unsigned long) *
2671                     ETH_MULTICAST_MAC_BINS_IN_REGS);
2672         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
2673 }
2674
2675 static void
2676 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
2677                                 struct ecore_sp_vport_update_params *p_data,
2678                                 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2679 {
2680         struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
2681         struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2682         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2683
2684         p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2685             ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2686         if (!p_accept_tlv)
2687                 return;
2688
2689         p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2690         p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2691         p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2692         p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2693         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
2694 }
2695
2696 static void
2697 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
2698                                     struct ecore_sp_vport_update_params *p_data,
2699                                     struct ecore_iov_vf_mbx *p_mbx,
2700                                     u16 *tlvs_mask)
2701 {
2702         struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2703         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2704
2705         p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2706             ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2707         if (!p_accept_any_vlan)
2708                 return;
2709
2710         p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2711         p_data->update_accept_any_vlan_flg =
2712                         p_accept_any_vlan->update_accept_any_vlan_flg;
2713         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2714 }
2715
2716 static void
2717 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
2718                               struct ecore_vf_info *vf,
2719                               struct ecore_sp_vport_update_params *p_data,
2720                               struct ecore_rss_params *p_rss,
2721                               struct ecore_iov_vf_mbx *p_mbx,
2722                               u16 *tlvs_mask, u16 *tlvs_accepted)
2723 {
2724         struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2725         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2726         bool b_reject = false;
2727         u16 table_size;
2728         u16 i, q_idx;
2729
2730         p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2731             ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2732         if (!p_rss_tlv) {
2733                 p_data->rss_params = OSAL_NULL;
2734                 return;
2735         }
2736
2737         OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
2738
2739         p_rss->update_rss_config =
2740             !!(p_rss_tlv->update_rss_flags &
2741                 VFPF_UPDATE_RSS_CONFIG_FLAG);
2742         p_rss->update_rss_capabilities =
2743             !!(p_rss_tlv->update_rss_flags &
2744                 VFPF_UPDATE_RSS_CAPS_FLAG);
2745         p_rss->update_rss_ind_table =
2746             !!(p_rss_tlv->update_rss_flags &
2747                 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2748         p_rss->update_rss_key =
2749             !!(p_rss_tlv->update_rss_flags &
2750                 VFPF_UPDATE_RSS_KEY_FLAG);
2751
2752         p_rss->rss_enable = p_rss_tlv->rss_enable;
2753         p_rss->rss_eng_id = vf->relative_vf_id + 1;
2754         p_rss->rss_caps = p_rss_tlv->rss_caps;
2755         p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2756         OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
2757                     sizeof(p_rss->rss_key));
2758
2759         table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
2760                                 (1 << p_rss_tlv->rss_table_size_log));
2761
2762         for (i = 0; i < table_size; i++) {
2763                 q_idx = p_rss_tlv->rss_ind_table[i];
2764                 if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx)) {
2765                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2766                                    "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2767                                    vf->relative_vf_id, q_idx);
2768                         b_reject = true;
2769                         goto out;
2770                 }
2771
2772                 if (!vf->vf_queues[q_idx].p_rx_cid) {
2773                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2774                                    "VF[%d]: Omitting RSS due to inactive queue %08x\n",
2775                                    vf->relative_vf_id, q_idx);
2776                         b_reject = true;
2777                         goto out;
2778                 }
2779
2780                 p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
2781         }
2782
2783         p_data->rss_params = p_rss;
2784 out:
2785         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
2786         if (!b_reject)
2787                 *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
2788 }
2789
2790 static void
2791 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
2792                                   struct ecore_vf_info *vf,
2793                                   struct ecore_sp_vport_update_params *p_data,
2794                                   struct ecore_sge_tpa_params *p_sge_tpa,
2795                                   struct ecore_iov_vf_mbx *p_mbx,
2796                                   u16 *tlvs_mask)
2797 {
2798         struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2799         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2800
2801         p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2802             ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2803
2804         if (!p_sge_tpa_tlv) {
2805                 p_data->sge_tpa_params = OSAL_NULL;
2806                 return;
2807         }
2808
2809         OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
2810
2811         p_sge_tpa->update_tpa_en_flg =
2812             !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2813         p_sge_tpa->update_tpa_param_flg =
2814             !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2815                 VFPF_UPDATE_TPA_PARAM_FLAG);
2816
2817         p_sge_tpa->tpa_ipv4_en_flg =
2818             !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2819         p_sge_tpa->tpa_ipv6_en_flg =
2820             !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2821         p_sge_tpa->tpa_pkt_split_flg =
2822             !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2823         p_sge_tpa->tpa_hdr_data_split_flg =
2824             !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2825         p_sge_tpa->tpa_gro_consistent_flg =
2826             !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2827
2828         p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2829         p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2830         p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2831         p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2832         p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2833
2834         p_data->sge_tpa_params = p_sge_tpa;
2835
2836         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
2837 }
2838
2839 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
2840                                           struct ecore_ptt *p_ptt,
2841                                           struct ecore_vf_info *vf)
2842 {
2843         struct ecore_rss_params *p_rss_params = OSAL_NULL;
2844         struct ecore_sp_vport_update_params params;
2845         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2846         struct ecore_sge_tpa_params sge_tpa_params;
2847         u16 tlvs_mask = 0, tlvs_accepted = 0;
2848         u8 status = PFVF_STATUS_SUCCESS;
2849         u16 length;
2850         enum _ecore_status_t rc;
2851
2852         /* Valiate PF can send such a request */
2853         if (!vf->vport_instance) {
2854                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2855                            "No VPORT instance available for VF[%d],"
2856                            " failing vport update\n",
2857                            vf->abs_vf_id);
2858                 status = PFVF_STATUS_FAILURE;
2859                 goto out;
2860         }
2861
2862         p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
2863         if (p_rss_params == OSAL_NULL) {
2864                 status = PFVF_STATUS_FAILURE;
2865                 goto out;
2866         }
2867
2868         OSAL_MEMSET(&params, 0, sizeof(params));
2869         params.opaque_fid = vf->opaque_fid;
2870         params.vport_id = vf->vport_id;
2871         params.rss_params = OSAL_NULL;
2872
2873         /* Search for extended tlvs list and update values
2874          * from VF in struct ecore_sp_vport_update_params.
2875          */
2876         ecore_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
2877         ecore_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
2878         ecore_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
2879         ecore_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
2880         ecore_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
2881         ecore_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
2882         ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
2883                                           &sge_tpa_params, mbx, &tlvs_mask);
2884
2885         tlvs_accepted = tlvs_mask;
2886
2887         /* Some of the extended TLVs need to be validated first; In that case,
2888          * they can update the mask without updating the accepted [so that
2889          * PF could communicate to VF it has rejected request].
2890          */
2891         ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
2892                                       mbx, &tlvs_mask, &tlvs_accepted);
2893
2894         /* Just log a message if there is no single extended tlv in buffer.
2895          * When all features of vport update ramrod would be requested by VF
2896          * as extended TLVs in buffer then an error can be returned in response
2897          * if there is no extended TLV present in buffer.
2898          */
2899         if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
2900                                      &params, &tlvs_accepted) !=
2901             ECORE_SUCCESS) {
2902                 tlvs_accepted = 0;
2903                 status = PFVF_STATUS_NOT_SUPPORTED;
2904                 goto out;
2905         }
2906
2907         if (!tlvs_accepted) {
2908                 if (tlvs_mask)
2909                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2910                                    "Upper-layer prevents said VF"
2911                                    " configuration\n");
2912                 else
2913                         DP_NOTICE(p_hwfn, true,
2914                                   "No feature tlvs found for vport update\n");
2915                 status = PFVF_STATUS_NOT_SUPPORTED;
2916                 goto out;
2917         }
2918
2919         rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
2920                                    OSAL_NULL);
2921
2922         if (rc)
2923                 status = PFVF_STATUS_FAILURE;
2924
2925 out:
2926         OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
2927         length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
2928                                                     tlvs_mask, tlvs_accepted);
2929         ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2930 }
2931
2932 static enum _ecore_status_t
2933 ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
2934                                 struct ecore_vf_info *p_vf,
2935                                 struct ecore_filter_ucast *p_params)
2936 {
2937         int i;
2938
2939         /* First remove entries and then add new ones */
2940         if (p_params->opcode == ECORE_FILTER_REMOVE) {
2941                 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2942                         if (p_vf->shadow_config.vlans[i].used &&
2943                             p_vf->shadow_config.vlans[i].vid ==
2944                             p_params->vlan) {
2945                                 p_vf->shadow_config.vlans[i].used = false;
2946                                 break;
2947                         }
2948                 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
2949                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2950                                    "VF [%d] - Tries to remove a non-existing"
2951                                    " vlan\n",
2952                                    p_vf->relative_vf_id);
2953                         return ECORE_INVAL;
2954                 }
2955         } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
2956                    p_params->opcode == ECORE_FILTER_FLUSH) {
2957                 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2958                         p_vf->shadow_config.vlans[i].used = false;
2959         }
2960
2961         /* In forced mode, we're willing to remove entries - but we don't add
2962          * new ones.
2963          */
2964         if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
2965                 return ECORE_SUCCESS;
2966
2967         if (p_params->opcode == ECORE_FILTER_ADD ||
2968             p_params->opcode == ECORE_FILTER_REPLACE) {
2969                 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
2970                         if (p_vf->shadow_config.vlans[i].used)
2971                                 continue;
2972
2973                         p_vf->shadow_config.vlans[i].used = true;
2974                         p_vf->shadow_config.vlans[i].vid = p_params->vlan;
2975                         break;
2976                 }
2977
2978                 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
2979                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2980                                    "VF [%d] - Tries to configure more than %d"
2981                                    " vlan filters\n",
2982                                    p_vf->relative_vf_id,
2983                                    ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
2984                         return ECORE_INVAL;
2985                 }
2986         }
2987
2988         return ECORE_SUCCESS;
2989 }
2990
2991 static enum _ecore_status_t
2992 ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
2993                                struct ecore_vf_info *p_vf,
2994                                struct ecore_filter_ucast *p_params)
2995 {
2996         char empty_mac[ETH_ALEN];
2997         int i;
2998
2999         OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
3000
3001         /* If we're in forced-mode, we don't allow any change */
3002         /* TODO - this would change if we were ever to implement logic for
3003          * removing a forced MAC altogether [in which case, like for vlans,
3004          * we should be able to re-trace previous configuration.
3005          */
3006         if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
3007                 return ECORE_SUCCESS;
3008
3009         /* First remove entries and then add new ones */
3010         if (p_params->opcode == ECORE_FILTER_REMOVE) {
3011                 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3012                         if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3013                                          p_params->mac, ETH_ALEN)) {
3014                                 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
3015                                               ETH_ALEN);
3016                                 break;
3017                         }
3018                 }
3019
3020                 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3021                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3022                                    "MAC isn't configured\n");
3023                         return ECORE_INVAL;
3024                 }
3025         } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3026                    p_params->opcode == ECORE_FILTER_FLUSH) {
3027                 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
3028                         OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
3029         }
3030
3031         /* List the new MAC address */
3032         if (p_params->opcode != ECORE_FILTER_ADD &&
3033             p_params->opcode != ECORE_FILTER_REPLACE)
3034                 return ECORE_SUCCESS;
3035
3036         for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3037                 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3038                                  empty_mac, ETH_ALEN)) {
3039                         OSAL_MEMCPY(p_vf->shadow_config.macs[i],
3040                                     p_params->mac, ETH_ALEN);
3041                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3042                                    "Added MAC at %d entry in shadow\n", i);
3043                         break;
3044                 }
3045         }
3046
3047         if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3048                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3049                            "No available place for MAC\n");
3050                 return ECORE_INVAL;
3051         }
3052
3053         return ECORE_SUCCESS;
3054 }
3055
3056 static enum _ecore_status_t
3057 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
3058                                    struct ecore_vf_info *p_vf,
3059                                    struct ecore_filter_ucast *p_params)
3060 {
3061         enum _ecore_status_t rc = ECORE_SUCCESS;
3062
3063         if (p_params->type == ECORE_FILTER_MAC) {
3064                 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3065                 if (rc != ECORE_SUCCESS)
3066                         return rc;
3067         }
3068
3069         if (p_params->type == ECORE_FILTER_VLAN)
3070                 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3071
3072         return rc;
3073 }
3074
3075 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
3076                                           struct ecore_ptt *p_ptt,
3077                                           struct ecore_vf_info *vf)
3078 {
3079         struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3080         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3081         struct vfpf_ucast_filter_tlv *req;
3082         u8 status = PFVF_STATUS_SUCCESS;
3083         struct ecore_filter_ucast params;
3084         enum _ecore_status_t rc;
3085
3086         /* Prepare the unicast filter params */
3087         OSAL_MEMSET(&params, 0, sizeof(struct ecore_filter_ucast));
3088         req = &mbx->req_virt->ucast_filter;
3089         params.opcode = (enum ecore_filter_opcode)req->opcode;
3090         params.type = (enum ecore_filter_ucast_type)req->type;
3091
3092         /* @@@TBD - We might need logic on HV side in determining this */
3093         params.is_rx_filter = 1;
3094         params.is_tx_filter = 1;
3095         params.vport_to_remove_from = vf->vport_id;
3096         params.vport_to_add_to = vf->vport_id;
3097         OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
3098         params.vlan = req->vlan;
3099
3100         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3101                    "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
3102                    " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3103                    vf->abs_vf_id, params.opcode, params.type,
3104                    params.is_rx_filter ? "RX" : "",
3105                    params.is_tx_filter ? "TX" : "",
3106                    params.vport_to_add_to,
3107                    params.mac[0], params.mac[1], params.mac[2],
3108                    params.mac[3], params.mac[4], params.mac[5], params.vlan);
3109
3110         if (!vf->vport_instance) {
3111                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3112                            "No VPORT instance available for VF[%d],"
3113                            " failing ucast MAC configuration\n",
3114                            vf->abs_vf_id);
3115                 status = PFVF_STATUS_FAILURE;
3116                 goto out;
3117         }
3118
3119         /* Update shadow copy of the VF configuration */
3120         if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, &params) !=
3121             ECORE_SUCCESS) {
3122                 status = PFVF_STATUS_FAILURE;
3123                 goto out;
3124         }
3125
3126         /* Determine if the unicast filtering is acceptible by PF */
3127         if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
3128             (params.type == ECORE_FILTER_VLAN ||
3129              params.type == ECORE_FILTER_MAC_VLAN)) {
3130                 /* Once VLAN is forced or PVID is set, do not allow
3131                  * to add/replace any further VLANs.
3132                  */
3133                 if (params.opcode == ECORE_FILTER_ADD ||
3134                     params.opcode == ECORE_FILTER_REPLACE)
3135                         status = PFVF_STATUS_FORCED;
3136                 goto out;
3137         }
3138
3139         if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
3140             (params.type == ECORE_FILTER_MAC ||
3141              params.type == ECORE_FILTER_MAC_VLAN)) {
3142                 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
3143                     (params.opcode != ECORE_FILTER_ADD &&
3144                      params.opcode != ECORE_FILTER_REPLACE))
3145                         status = PFVF_STATUS_FORCED;
3146                 goto out;
3147         }
3148
3149         rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, &params);
3150         if (rc == ECORE_EXISTS) {
3151                 goto out;
3152         } else if (rc == ECORE_INVAL) {
3153                 status = PFVF_STATUS_FAILURE;
3154                 goto out;
3155         }
3156
3157         rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
3158                                        ECORE_SPQ_MODE_CB, OSAL_NULL);
3159         if (rc)
3160                 status = PFVF_STATUS_FAILURE;
3161
3162 out:
3163         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3164                                sizeof(struct pfvf_def_resp_tlv), status);
3165 }
3166
3167 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
3168                                          struct ecore_ptt *p_ptt,
3169                                          struct ecore_vf_info *vf)
3170 {
3171         int i;
3172
3173         /* Reset the SBs */
3174         for (i = 0; i < vf->num_sbs; i++)
3175                 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3176                                                   vf->igu_sbs[i],
3177                                                   vf->opaque_fid, false);
3178
3179         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3180                                sizeof(struct pfvf_def_resp_tlv),
3181                                PFVF_STATUS_SUCCESS);
3182 }
3183
3184 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
3185                                    struct ecore_ptt *p_ptt,
3186                                    struct ecore_vf_info *vf)
3187 {
3188         u16 length = sizeof(struct pfvf_def_resp_tlv);
3189         u8 status = PFVF_STATUS_SUCCESS;
3190
3191         /* Disable Interrupts for VF */
3192         ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3193
3194         /* Reset Permission table */
3195         ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3196
3197         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3198                                length, status);
3199 }
3200
3201 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
3202                                      struct ecore_ptt *p_ptt,
3203                                      struct ecore_vf_info *p_vf)
3204 {
3205         u16 length = sizeof(struct pfvf_def_resp_tlv);
3206         u8 status = PFVF_STATUS_SUCCESS;
3207         enum _ecore_status_t rc = ECORE_SUCCESS;
3208
3209         ecore_iov_vf_cleanup(p_hwfn, p_vf);
3210
3211         if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3212                 /* Stopping the VF */
3213                 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3214                                       p_vf->opaque_fid);
3215
3216                 if (rc != ECORE_SUCCESS) {
3217                         DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
3218                                rc);
3219                         status = PFVF_STATUS_FAILURE;
3220                 }
3221
3222                 p_vf->state = VF_STOPPED;
3223         }
3224
3225         ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3226                                length, status);
3227 }
3228
3229 static enum _ecore_status_t
3230 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
3231                            struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3232 {
3233         int cnt;
3234         u32 val;
3235
3236         ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
3237
3238         for (cnt = 0; cnt < 50; cnt++) {
3239                 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3240                 if (!val)
3241                         break;
3242                 OSAL_MSLEEP(20);
3243         }
3244         ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
3245
3246         if (cnt == 50) {
3247                 DP_ERR(p_hwfn,
3248                        "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3249                        p_vf->abs_vf_id, val);
3250                 return ECORE_TIMEOUT;
3251         }
3252
3253         return ECORE_SUCCESS;
3254 }
3255
3256 static enum _ecore_status_t
3257 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
3258                           struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3259 {
3260         u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
3261         int i, cnt;
3262
3263         /* Read initial consumers & producers */
3264         for (i = 0; i < MAX_NUM_VOQS; i++) {
3265                 u32 prod;
3266
3267                 cons[i] = ecore_rd(p_hwfn, p_ptt,
3268                                    PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3269                                    i * 0x40);
3270                 prod = ecore_rd(p_hwfn, p_ptt,
3271                                 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3272                                 i * 0x40);
3273                 distance[i] = prod - cons[i];
3274         }
3275
3276         /* Wait for consumers to pass the producers */
3277         i = 0;
3278         for (cnt = 0; cnt < 50; cnt++) {
3279                 for (; i < MAX_NUM_VOQS; i++) {
3280                         u32 tmp;
3281
3282                         tmp = ecore_rd(p_hwfn, p_ptt,
3283                                        PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3284                                        i * 0x40);
3285                         if (distance[i] > tmp - cons[i])
3286                                 break;
3287                 }
3288
3289                 if (i == MAX_NUM_VOQS)
3290                         break;
3291
3292                 OSAL_MSLEEP(20);
3293         }
3294
3295         if (cnt == 50) {
3296                 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3297                        p_vf->abs_vf_id, i);
3298                 return ECORE_TIMEOUT;
3299         }
3300
3301         return ECORE_SUCCESS;
3302 }
3303
3304 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
3305                                                   struct ecore_vf_info *p_vf,
3306                                                   struct ecore_ptt *p_ptt)
3307 {
3308         enum _ecore_status_t rc;
3309
3310         /* TODO - add SRC and TM polling once we add storage IOV */
3311
3312         rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3313         if (rc)
3314                 return rc;
3315
3316         rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3317         if (rc)
3318                 return rc;
3319
3320         return ECORE_SUCCESS;
3321 }
3322
3323 static enum _ecore_status_t
3324 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3325                                  struct ecore_ptt *p_ptt,
3326                                  u16 rel_vf_id, u32 *ack_vfs)
3327 {
3328         struct ecore_vf_info *p_vf;
3329         enum _ecore_status_t rc = ECORE_SUCCESS;
3330
3331         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3332         if (!p_vf)
3333                 return ECORE_SUCCESS;
3334
3335         if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3336             (1ULL << (rel_vf_id % 64))) {
3337                 u16 vfid = p_vf->abs_vf_id;
3338
3339                 /* TODO - should we lock channel? */
3340
3341                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3342                            "VF[%d] - Handling FLR\n", vfid);
3343
3344                 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3345
3346                 /* If VF isn't active, no need for anything but SW */
3347                 if (!p_vf->b_init)
3348                         goto cleanup;
3349
3350                 /* TODO - what to do in case of failure? */
3351                 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3352                 if (rc != ECORE_SUCCESS)
3353                         goto cleanup;
3354
3355                 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
3356                 if (rc) {
3357                         /* TODO - what's now? What a mess.... */
3358                         DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3359                         return rc;
3360                 }
3361
3362                 /* Workaround to make VF-PF channel ready, as FW
3363                  * doesn't do that as a part of FLR.
3364                  */
3365                 REG_WR(p_hwfn,
3366                        GTT_BAR0_MAP_REG_USDM_RAM +
3367                        USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3368
3369                 /* VF_STOPPED has to be set only after final cleanup
3370                  * but prior to re-enabling the VF.
3371                  */
3372                 p_vf->state = VF_STOPPED;
3373
3374                 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3375                 if (rc) {
3376                         /* TODO - again, a mess... */
3377                         DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3378                                vfid);
3379                         return rc;
3380                 }
3381 cleanup:
3382                 /* Mark VF for ack and clean pending state */
3383                 if (p_vf->state == VF_RESET)
3384                         p_vf->state = VF_STOPPED;
3385                 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
3386                 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3387                     ~(1ULL << (rel_vf_id % 64));
3388                 p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
3389                     ~(1ULL << (rel_vf_id % 64));
3390         }
3391
3392         return rc;
3393 }
3394
3395 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3396                                               struct ecore_ptt *p_ptt)
3397 {
3398         u32 ack_vfs[VF_MAX_STATIC / 32];
3399         enum _ecore_status_t rc = ECORE_SUCCESS;
3400         u16 i;
3401
3402         OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3403
3404         /* Since BRB <-> PRS interface can't be tested as part of the flr
3405          * polling due to HW limitations, simply sleep a bit. And since
3406          * there's no need to wait per-vf, do it before looping.
3407          */
3408         OSAL_MSLEEP(100);
3409
3410         for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
3411                 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3412
3413         rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3414         return rc;
3415 }
3416
3417 enum _ecore_status_t
3418 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3419                                 struct ecore_ptt *p_ptt, u16 rel_vf_id)
3420 {
3421         u32 ack_vfs[VF_MAX_STATIC / 32];
3422         enum _ecore_status_t rc = ECORE_SUCCESS;
3423
3424         OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3425
3426         /* Wait instead of polling the BRB <-> PRS interface */
3427         OSAL_MSLEEP(100);
3428
3429         ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
3430
3431         rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3432         return rc;
3433 }
3434
3435 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
3436 {
3437         bool found = false;
3438         u16 i;
3439
3440         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
3441         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3442                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3443                            "[%08x,...,%08x]: %08x\n",
3444                            i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3445
3446         if (!p_hwfn->p_dev->p_iov_info) {
3447                 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
3448                 return false;
3449         }
3450
3451         /* Mark VFs */
3452         for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
3453                 struct ecore_vf_info *p_vf;
3454                 u8 vfid;
3455
3456                 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
3457                 if (!p_vf)
3458                         continue;
3459
3460                 vfid = p_vf->abs_vf_id;
3461                 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3462                         u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3463                         u16 rel_vf_id = p_vf->relative_vf_id;
3464
3465                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3466                                    "VF[%d] [rel %d] got FLR-ed\n",
3467                                    vfid, rel_vf_id);
3468
3469                         p_vf->state = VF_RESET;
3470
3471                         /* No need to lock here, since pending_flr should
3472                          * only change here and before ACKing MFw. Since
3473                          * MFW will not trigger an additional attention for
3474                          * VF flr until ACKs, we're safe.
3475                          */
3476                         p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3477                         found = true;
3478                 }
3479         }
3480
3481         return found;
3482 }
3483
3484 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
3485                         u16 vfid,
3486                         struct ecore_mcp_link_params *p_params,
3487                         struct ecore_mcp_link_state *p_link,
3488                         struct ecore_mcp_link_capabilities *p_caps)
3489 {
3490         struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
3491         struct ecore_bulletin_content *p_bulletin;
3492
3493         if (!p_vf)
3494                 return;
3495
3496         p_bulletin = p_vf->bulletin.p_virt;
3497
3498         if (p_params)
3499                 __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3500         if (p_link)
3501                 __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3502         if (p_caps)
3503                 __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3504 }
3505
3506 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
3507                                struct ecore_ptt *p_ptt, int vfid)
3508 {
3509         struct ecore_iov_vf_mbx *mbx;
3510         struct ecore_vf_info *p_vf;
3511
3512         p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3513         if (!p_vf)
3514                 return;
3515
3516         mbx = &p_vf->vf_mbx;
3517
3518         /* ecore_iov_process_mbx_request */
3519         DP_VERBOSE(p_hwfn,
3520                    ECORE_MSG_IOV,
3521                    "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
3522
3523         mbx->first_tlv = mbx->req_virt->first_tlv;
3524
3525         OSAL_IOV_VF_MSG_TYPE(p_hwfn,
3526                              p_vf->relative_vf_id,
3527                              mbx->first_tlv.tl.type);
3528
3529         /* Lock the per vf op mutex and note the locker's identity.
3530          * The unlock will take place in mbx response.
3531          */
3532         ecore_iov_lock_vf_pf_channel(p_hwfn,
3533                                      p_vf, mbx->first_tlv.tl.type);
3534
3535         /* check if tlv type is known */
3536         if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3537             !p_vf->b_malicious) {
3538                 /* switch on the opcode */
3539                 switch (mbx->first_tlv.tl.type) {
3540                 case CHANNEL_TLV_ACQUIRE:
3541                         ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3542                         break;
3543                 case CHANNEL_TLV_VPORT_START:
3544                         ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3545                         break;
3546                 case CHANNEL_TLV_VPORT_TEARDOWN:
3547                         ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3548                         break;
3549                 case CHANNEL_TLV_START_RXQ:
3550                         ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3551                         break;
3552                 case CHANNEL_TLV_START_TXQ:
3553                         ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3554                         break;
3555                 case CHANNEL_TLV_STOP_RXQS:
3556                         ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3557                         break;
3558                 case CHANNEL_TLV_STOP_TXQS:
3559                         ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3560                         break;
3561                 case CHANNEL_TLV_UPDATE_RXQ:
3562                         ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3563                         break;
3564                 case CHANNEL_TLV_VPORT_UPDATE:
3565                         ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3566                         break;
3567                 case CHANNEL_TLV_UCAST_FILTER:
3568                         ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3569                         break;
3570                 case CHANNEL_TLV_CLOSE:
3571                         ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3572                         break;
3573                 case CHANNEL_TLV_INT_CLEANUP:
3574                         ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3575                         break;
3576                 case CHANNEL_TLV_RELEASE:
3577                         ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3578                         break;
3579                 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
3580                         ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
3581                         break;
3582                 }
3583         } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3584                 /* If we've received a message from a VF we consider malicious
3585                  * we ignore the messasge unless it's one for RELEASE, in which
3586                  * case we'll let it have the benefit of doubt, allowing the
3587                  * next loaded driver to start again.
3588                  */
3589                 if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
3590                         /* TODO - initiate FLR, remove malicious indication */
3591                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3592                                    "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
3593                                    p_vf->abs_vf_id);
3594                 } else {
3595                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3596                                    "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3597                                    p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3598                 }
3599
3600                 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3601                                        mbx->first_tlv.tl.type,
3602                                        sizeof(struct pfvf_def_resp_tlv),
3603                                        PFVF_STATUS_MALICIOUS);
3604         } else {
3605                 /* unknown TLV - this may belong to a VF driver from the future
3606                  * - a version written after this PF driver was written, which
3607                  * supports features unknown as of yet. Too bad since we don't
3608                  * support them. Or this may be because someone wrote a crappy
3609                  * VF driver and is sending garbage over the channel.
3610                  */
3611                 DP_NOTICE(p_hwfn, false,
3612                           "VF[%02x]: unknown TLV. type %04x length %04x"
3613                           " padding %08x reply address %lu\n",
3614                           p_vf->abs_vf_id,
3615                           mbx->first_tlv.tl.type,
3616                           mbx->first_tlv.tl.length,
3617                           mbx->first_tlv.padding,
3618                           (unsigned long)mbx->first_tlv.reply_address);
3619
3620                 /* Try replying in case reply address matches the acquisition's
3621                  * posted address.
3622                  */
3623                 if (p_vf->acquire.first_tlv.reply_address &&
3624                     (mbx->first_tlv.reply_address ==
3625                      p_vf->acquire.first_tlv.reply_address))
3626                         ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3627                                                mbx->first_tlv.tl.type,
3628                                                sizeof(struct pfvf_def_resp_tlv),
3629                                                PFVF_STATUS_NOT_SUPPORTED);
3630                 else
3631                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3632                                    "VF[%02x]: Can't respond to TLV -"
3633                                    " no valid reply address\n",
3634                                    p_vf->abs_vf_id);
3635         }
3636
3637         ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
3638                                        mbx->first_tlv.tl.type);
3639
3640 #ifdef CONFIG_ECORE_SW_CHANNEL
3641         mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
3642         mbx->sw_mbx.response_offset = 0;
3643 #endif
3644 }
3645
3646 void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
3647 {
3648         u64 add_bit = 1ULL << (vfid % 64);
3649
3650         /* TODO - add locking mechanisms [no atomics in ecore, so we can't
3651         * add the lock inside the ecore_pf_iov struct].
3652         */
3653         p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
3654 }
3655
3656 void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
3657                                                u64 *events)
3658 {
3659         u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
3660
3661         /* TODO - Take a lock */
3662         OSAL_MEMCPY(events, p_pending_events,
3663                     sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
3664         OSAL_MEMSET(p_pending_events, 0,
3665                     sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
3666 }
3667
3668 static struct ecore_vf_info *
3669 ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
3670 {
3671         u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
3672
3673         if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
3674                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3675                            "Got indication for VF [abs 0x%08x] that cannot be"
3676                            " handled by PF\n",
3677                            abs_vfid);
3678                 return OSAL_NULL;
3679         }
3680
3681         return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
3682 }
3683
3684 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
3685                                                  u16 abs_vfid,
3686                                                  struct regpair *vf_msg)
3687 {
3688         struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
3689                                                                    abs_vfid);
3690
3691         if (!p_vf)
3692                 return ECORE_SUCCESS;
3693
3694         /* List the physical address of the request so that handler
3695          * could later on copy the message from it.
3696          */
3697         p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
3698
3699         return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
3700 }
3701
3702 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
3703                                        struct malicious_vf_eqe_data *p_data)
3704 {
3705         struct ecore_vf_info *p_vf;
3706
3707         p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
3708
3709         if (!p_vf)
3710                 return;
3711
3712         DP_INFO(p_hwfn,
3713                 "VF [%d] - Malicious behavior [%02x]\n",
3714                 p_vf->abs_vf_id, p_data->errId);
3715
3716         p_vf->b_malicious = true;
3717
3718         OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
3719 }
3720
3721 enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
3722                                            u8 opcode,
3723                                            __le16 echo,
3724                                            union event_ring_data *data)
3725 {
3726         switch (opcode) {
3727         case COMMON_EVENT_VF_PF_CHANNEL:
3728                 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
3729                                             &data->vf_pf_channel.msg_addr);
3730         case COMMON_EVENT_VF_FLR:
3731                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3732                            "VF-FLR is still not supported\n");
3733                 return ECORE_SUCCESS;
3734         case COMMON_EVENT_MALICIOUS_VF:
3735                 ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
3736                 return ECORE_SUCCESS;
3737         default:
3738                 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
3739                         opcode);
3740                 return ECORE_INVAL;
3741         }
3742 }
3743
3744 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3745 {
3746         return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3747                    (1ULL << (rel_vf_id % 64)));
3748 }
3749
3750 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3751 {
3752         struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
3753         u16 i;
3754
3755         if (!p_iov)
3756                 goto out;
3757
3758         for (i = rel_vf_id; i < p_iov->total_vfs; i++)
3759                 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
3760                         return i;
3761
3762 out:
3763         return E4_MAX_NUM_VFS;
3764 }
3765
3766 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
3767                                            struct ecore_ptt *ptt, int vfid)
3768 {
3769         struct ecore_dmae_params params;
3770         struct ecore_vf_info *vf_info;
3771
3772         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3773         if (!vf_info)
3774                 return ECORE_INVAL;
3775
3776         OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
3777         params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
3778         params.src_vfid = vf_info->abs_vf_id;
3779
3780         if (ecore_dmae_host2host(p_hwfn, ptt,
3781                                  vf_info->vf_mbx.pending_req,
3782                                  vf_info->vf_mbx.req_phys,
3783                                  sizeof(union vfpf_tlvs) / 4, &params)) {
3784                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3785                            "Failed to copy message from VF 0x%02x\n", vfid);
3786
3787                 return ECORE_IO;
3788         }
3789
3790         return ECORE_SUCCESS;
3791 }
3792
3793 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
3794                                        u8 *mac, int vfid)
3795 {
3796         struct ecore_vf_info *vf_info;
3797         u64 feature;
3798
3799         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3800         if (!vf_info) {
3801                 DP_NOTICE(p_hwfn->p_dev, true,
3802                           "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3803                 return;
3804         }
3805         if (vf_info->b_malicious) {
3806                 DP_NOTICE(p_hwfn->p_dev, false,
3807                           "Can't set forced MAC to malicious VF [%d]\n",
3808                           vfid);
3809                 return;
3810         }
3811
3812         feature = 1 << MAC_ADDR_FORCED;
3813         OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3814
3815         vf_info->bulletin.p_virt->valid_bitmap |= feature;
3816         /* Forced MAC will disable MAC_ADDR */
3817         vf_info->bulletin.p_virt->valid_bitmap &=
3818             ~(1 << VFPF_BULLETIN_MAC_ADDR);
3819
3820         ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3821 }
3822
3823 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
3824                                                 u8 *mac, int vfid)
3825 {
3826         struct ecore_vf_info *vf_info;
3827         u64 feature;
3828
3829         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3830         if (!vf_info) {
3831                 DP_NOTICE(p_hwfn->p_dev, true,
3832                           "Can not set MAC, invalid vfid [%d]\n", vfid);
3833                 return ECORE_INVAL;
3834         }
3835         if (vf_info->b_malicious) {
3836                 DP_NOTICE(p_hwfn->p_dev, false,
3837                           "Can't set MAC to malicious VF [%d]\n",
3838                           vfid);
3839                 return ECORE_INVAL;
3840         }
3841
3842         if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
3843                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3844                            "Can not set MAC, Forced MAC is configured\n");
3845                 return ECORE_INVAL;
3846         }
3847
3848         feature = 1 << VFPF_BULLETIN_MAC_ADDR;
3849         OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3850
3851         vf_info->bulletin.p_virt->valid_bitmap |= feature;
3852
3853         return ECORE_SUCCESS;
3854 }
3855
3856 enum _ecore_status_t
3857 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
3858                                                bool b_untagged_only, int vfid)
3859 {
3860         struct ecore_vf_info *vf_info;
3861         u64 feature;
3862
3863         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3864         if (!vf_info) {
3865                 DP_NOTICE(p_hwfn->p_dev, true,
3866                           "Can not set untagged default, invalid vfid [%d]\n",
3867                           vfid);
3868                 return ECORE_INVAL;
3869         }
3870         if (vf_info->b_malicious) {
3871                 DP_NOTICE(p_hwfn->p_dev, false,
3872                           "Can't set untagged default to malicious VF [%d]\n",
3873                           vfid);
3874                 return ECORE_INVAL;
3875         }
3876
3877         /* Since this is configurable only during vport-start, don't take it
3878          * if we're past that point.
3879          */
3880         if (vf_info->state == VF_ENABLED) {
3881                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3882                            "Can't support untagged change for vfid[%d] -"
3883                            " VF is already active\n",
3884                            vfid);
3885                 return ECORE_INVAL;
3886         }
3887
3888         /* Set configuration; This will later be taken into account during the
3889          * VF initialization.
3890          */
3891         feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
3892             (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
3893         vf_info->bulletin.p_virt->valid_bitmap |= feature;
3894
3895         vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
3896             : 0;
3897
3898         return ECORE_SUCCESS;
3899 }
3900
3901 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
3902                                   u16 *opaque_fid)
3903 {
3904         struct ecore_vf_info *vf_info;
3905
3906         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3907         if (!vf_info)
3908                 return;
3909
3910         *opaque_fid = vf_info->opaque_fid;
3911 }
3912
3913 void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
3914                                 u8 *p_vort_id)
3915 {
3916         struct ecore_vf_info *vf_info;
3917
3918         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3919         if (!vf_info)
3920                 return;
3921
3922         *p_vort_id = vf_info->vport_id;
3923 }
3924
3925 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
3926                                         u16 pvid, int vfid)
3927 {
3928         struct ecore_vf_info *vf_info;
3929         u64 feature;
3930
3931         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3932         if (!vf_info) {
3933                 DP_NOTICE(p_hwfn->p_dev, true,
3934                           "Can not set forced MAC, invalid vfid [%d]\n",
3935                           vfid);
3936                 return;
3937         }
3938         if (vf_info->b_malicious) {
3939                 DP_NOTICE(p_hwfn->p_dev, false,
3940                           "Can't set forced vlan to malicious VF [%d]\n",
3941                           vfid);
3942                 return;
3943         }
3944
3945         feature = 1 << VLAN_ADDR_FORCED;
3946         vf_info->bulletin.p_virt->pvid = pvid;
3947         if (pvid)
3948                 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3949         else
3950                 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
3951
3952         ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3953 }
3954
3955 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
3956 {
3957         struct ecore_vf_info *p_vf_info;
3958
3959         p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3960         if (!p_vf_info)
3961                 return false;
3962
3963         return !!p_vf_info->vport_instance;
3964 }
3965
3966 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
3967 {
3968         struct ecore_vf_info *p_vf_info;
3969
3970         p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3971         if (!p_vf_info)
3972                 return true;
3973
3974         return p_vf_info->state == VF_STOPPED;
3975 }
3976
3977 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
3978 {
3979         struct ecore_vf_info *vf_info;
3980
3981         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3982         if (!vf_info)
3983                 return false;
3984
3985         return vf_info->spoof_chk;
3986 }
3987
3988 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
3989                                             int vfid, bool val)
3990 {
3991         struct ecore_vf_info *vf;
3992         enum _ecore_status_t rc = ECORE_INVAL;
3993
3994         if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
3995                 DP_NOTICE(p_hwfn, true,
3996                           "SR-IOV sanity check failed, can't set spoofchk\n");
3997                 goto out;
3998         }
3999
4000         vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4001         if (!vf)
4002                 goto out;
4003
4004         if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4005                 /* After VF VPORT start PF will configure spoof check */
4006                 vf->req_spoofchk_val = val;
4007                 rc = ECORE_SUCCESS;
4008                 goto out;
4009         }
4010
4011         rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
4012
4013 out:
4014         return rc;
4015 }
4016
4017 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
4018 {
4019         u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
4020
4021         max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
4022             : ECORE_MAX_VF_CHAINS_PER_PF;
4023
4024         return max_chains_per_vf;
4025 }
4026
4027 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4028                                           u16 rel_vf_id,
4029                                           void **pp_req_virt_addr,
4030                                           u16 *p_req_virt_size)
4031 {
4032         struct ecore_vf_info *vf_info =
4033             ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4034
4035         if (!vf_info)
4036                 return;
4037
4038         if (pp_req_virt_addr)
4039                 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
4040
4041         if (p_req_virt_size)
4042                 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
4043 }
4044
4045 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4046                                             u16 rel_vf_id,
4047                                             void **pp_reply_virt_addr,
4048                                             u16 *p_reply_virt_size)
4049 {
4050         struct ecore_vf_info *vf_info =
4051             ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4052
4053         if (!vf_info)
4054                 return;
4055
4056         if (pp_reply_virt_addr)
4057                 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
4058
4059         if (p_reply_virt_size)
4060                 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
4061 }
4062
4063 #ifdef CONFIG_ECORE_SW_CHANNEL
4064 struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
4065                                                  u16 rel_vf_id)
4066 {
4067         struct ecore_vf_info *vf_info =
4068             ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4069
4070         if (!vf_info)
4071                 return OSAL_NULL;
4072
4073         return &vf_info->vf_mbx.sw_mbx;
4074 }
4075 #endif
4076
4077 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
4078 {
4079         return (length >= sizeof(struct vfpf_first_tlv) &&
4080                 (length <= sizeof(union vfpf_tlvs)));
4081 }
4082
4083 u32 ecore_iov_pfvf_msg_length(void)
4084 {
4085         return sizeof(union pfvf_tlvs);
4086 }
4087
4088 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4089 {
4090         struct ecore_vf_info *p_vf;
4091
4092         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4093         if (!p_vf || !p_vf->bulletin.p_virt)
4094                 return OSAL_NULL;
4095
4096         if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
4097                 return OSAL_NULL;
4098
4099         return p_vf->bulletin.p_virt->mac;
4100 }
4101
4102 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
4103                                        u16 rel_vf_id)
4104 {
4105         struct ecore_vf_info *p_vf;
4106
4107         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4108         if (!p_vf || !p_vf->bulletin.p_virt)
4109                 return 0;
4110
4111         if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
4112                 return 0;
4113
4114         return p_vf->bulletin.p_virt->pvid;
4115 }
4116
4117 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
4118                                                  struct ecore_ptt *p_ptt,
4119                                                  int vfid, int val)
4120 {
4121         struct ecore_vf_info *vf;
4122         u8 abs_vp_id = 0;
4123         enum _ecore_status_t rc;
4124
4125         vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4126
4127         if (!vf)
4128                 return ECORE_INVAL;
4129
4130         rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4131         if (rc != ECORE_SUCCESS)
4132                 return rc;
4133
4134         return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
4135 }
4136
4137 enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
4138                                                      int vfid, u32 rate)
4139 {
4140         struct ecore_vf_info *vf;
4141         u8 vport_id;
4142         int i;
4143
4144         for_each_hwfn(p_dev, i) {
4145                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
4146
4147                 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4148                         DP_NOTICE(p_hwfn, true,
4149                                   "SR-IOV sanity check failed,"
4150                                   " can't set min rate\n");
4151                         return ECORE_INVAL;
4152                 }
4153         }
4154
4155         vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
4156         vport_id = vf->vport_id;
4157
4158         return ecore_configure_vport_wfq(p_dev, vport_id, rate);
4159 }
4160
4161 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
4162                                             struct ecore_ptt *p_ptt,
4163                                             int vfid,
4164                                             struct ecore_eth_stats *p_stats)
4165 {
4166         struct ecore_vf_info *vf;
4167
4168         vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4169         if (!vf)
4170                 return ECORE_INVAL;
4171
4172         if (vf->state != VF_ENABLED)
4173                 return ECORE_INVAL;
4174
4175         __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
4176                                 vf->abs_vf_id + 0x10, false);
4177
4178         return ECORE_SUCCESS;
4179 }
4180
4181 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4182 {
4183         struct ecore_vf_info *p_vf;
4184
4185         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4186         if (!p_vf)
4187                 return 0;
4188
4189         return p_vf->num_rxqs;
4190 }
4191
4192 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4193 {
4194         struct ecore_vf_info *p_vf;
4195
4196         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4197         if (!p_vf)
4198                 return 0;
4199
4200         return p_vf->num_active_rxqs;
4201 }
4202
4203 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4204 {
4205         struct ecore_vf_info *p_vf;
4206
4207         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4208         if (!p_vf)
4209                 return OSAL_NULL;
4210
4211         return p_vf->ctx;
4212 }
4213
4214 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4215 {
4216         struct ecore_vf_info *p_vf;
4217
4218         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4219         if (!p_vf)
4220                 return 0;
4221
4222         return p_vf->num_sbs;
4223 }
4224
4225 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4226 {
4227         struct ecore_vf_info *p_vf;
4228
4229         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4230         if (!p_vf)
4231                 return false;
4232
4233         return (p_vf->state == VF_FREE);
4234 }
4235
4236 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
4237                                               u16 rel_vf_id)
4238 {
4239         struct ecore_vf_info *p_vf;
4240
4241         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4242         if (!p_vf)
4243                 return false;
4244
4245         return (p_vf->state == VF_ACQUIRED);
4246 }
4247
4248 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4249 {
4250         struct ecore_vf_info *p_vf;
4251
4252         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4253         if (!p_vf)
4254                 return false;
4255
4256         return (p_vf->state == VF_ENABLED);
4257 }
4258
4259 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
4260                              u16 rel_vf_id)
4261 {
4262         struct ecore_vf_info *p_vf;
4263
4264         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4265         if (!p_vf)
4266                 return false;
4267
4268         return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
4269 }
4270
4271 enum _ecore_status_t
4272 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
4273 {
4274         struct ecore_wfq_data *vf_vp_wfq;
4275         struct ecore_vf_info *vf_info;
4276
4277         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4278         if (!vf_info)
4279                 return 0;
4280
4281         vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4282
4283         if (vf_vp_wfq->configured)
4284                 return vf_vp_wfq->min_speed;
4285         else
4286                 return 0;
4287 }