2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
12 #include "ecore_sriov.h"
13 #include "ecore_status.h"
15 #include "ecore_hw_defs.h"
16 #include "ecore_int.h"
17 #include "ecore_hsi_eth.h"
19 #include "ecore_vfpf_if.h"
20 #include "ecore_rt_defs.h"
21 #include "ecore_init_ops.h"
22 #include "ecore_gtt_reg_addr.h"
23 #include "ecore_iro.h"
24 #include "ecore_mcp.h"
25 #include "ecore_cxt.h"
27 #include "ecore_init_fw_funcs.h"
29 /* TEMPORARY until we implement print_enums... */
30 const char *ecore_channel_tlvs_string[] = {
31 "CHANNEL_TLV_NONE", /* ends tlv sequence */
32 "CHANNEL_TLV_ACQUIRE",
33 "CHANNEL_TLV_VPORT_START",
34 "CHANNEL_TLV_VPORT_UPDATE",
35 "CHANNEL_TLV_VPORT_TEARDOWN",
36 "CHANNEL_TLV_START_RXQ",
37 "CHANNEL_TLV_START_TXQ",
38 "CHANNEL_TLV_STOP_RXQ",
39 "CHANNEL_TLV_STOP_TXQ",
40 "CHANNEL_TLV_UPDATE_RXQ",
41 "CHANNEL_TLV_INT_CLEANUP",
43 "CHANNEL_TLV_RELEASE",
44 "CHANNEL_TLV_LIST_END",
45 "CHANNEL_TLV_UCAST_FILTER",
46 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
47 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
48 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
49 "CHANNEL_TLV_VPORT_UPDATE_MCAST",
50 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
51 "CHANNEL_TLV_VPORT_UPDATE_RSS",
52 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
53 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
57 /* TODO - this is linux crc32; Need a way to ifdef it out for linux */
58 u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
64 for (i = 0; i < 8; i++)
65 crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
70 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
72 struct ecore_ptt *p_ptt)
74 struct ecore_bulletin_content *p_bulletin;
75 struct ecore_dmae_params params;
76 struct ecore_vf_info *p_vf;
77 int crc_size = sizeof(p_bulletin->crc);
79 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
83 /* TODO - check VF is in a state where it can accept message */
84 if (!p_vf->vf_bulletin)
87 p_bulletin = p_vf->bulletin.p_virt;
89 /* Increment bulletin board version and compute crc */
90 p_bulletin->version++;
91 p_bulletin->crc = ecore_crc32(0, (u8 *)p_bulletin + crc_size,
92 p_vf->bulletin.size - crc_size);
94 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
95 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
96 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
98 /* propagate bulletin board via dmae to vm memory */
99 OSAL_MEMSET(¶ms, 0, sizeof(params));
100 params.flags = ECORE_DMAE_FLAG_VF_DST;
101 params.dst_vfid = p_vf->abs_vf_id;
102 return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
103 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
107 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
109 struct ecore_hw_sriov_info *iov = &p_dev->sriov_info;
112 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
113 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
115 OSAL_PCI_READ_CONFIG_WORD(p_dev,
116 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
117 OSAL_PCI_READ_CONFIG_WORD(p_dev,
118 pos + PCI_SRIOV_INITIAL_VF,
121 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
123 /* @@@TODO - in future we might want to add an OSAL here to
124 * allow each OS to decide on its own how to act.
126 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
127 "Number of VFs are already set to non-zero value."
128 " Ignoring PCI configuration value\n");
132 OSAL_PCI_READ_CONFIG_WORD(p_dev,
133 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
135 OSAL_PCI_READ_CONFIG_WORD(p_dev,
136 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
138 OSAL_PCI_READ_CONFIG_WORD(p_dev,
139 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
141 OSAL_PCI_READ_CONFIG_DWORD(p_dev,
142 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
144 OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
146 OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
148 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info[%d]: nres %d, cap 0x%x,"
149 "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
150 " stride %d, page size 0x%x\n", 0,
151 iov->nres, iov->cap, iov->ctrl,
152 iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
153 iov->offset, iov->stride, iov->pgsz);
155 /* Some sanity checks */
156 if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
157 iov->total_vfs > NUM_OF_VFS(p_dev)) {
158 /* This can happen only due to a bug. In this case we set
159 * num_vfs to zero to avoid memory corruption in the code that
160 * assumes max number of vfs
162 DP_NOTICE(p_dev, false,
163 "IOV: Unexpected number of vfs set: %d"
164 " setting num_vf to zero\n",
171 return ECORE_SUCCESS;
174 static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
175 struct ecore_ptt *p_ptt)
177 struct ecore_igu_block *p_sb;
181 if (!p_hwfn->hw_info.p_igu_info) {
183 "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
188 sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {
189 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
190 if ((p_sb->status & ECORE_IGU_STATUS_FREE) &&
191 !(p_sb->status & ECORE_IGU_STATUS_PF)) {
192 val = ecore_rd(p_hwfn, p_ptt,
193 IGU_REG_MAPPING_MEMORY + sb_id * 4);
194 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
195 ecore_wr(p_hwfn, p_ptt,
196 IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
201 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
203 u16 num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;
204 union pfvf_tlvs *p_reply_virt_addr;
205 union vfpf_tlvs *p_req_virt_addr;
206 struct ecore_bulletin_content *p_bulletin_virt;
207 struct ecore_pf_iov *p_iov_info;
208 dma_addr_t req_p, rply_p, bulletin_p;
211 p_iov_info = p_hwfn->pf_iov_info;
213 OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
215 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
216 req_p = p_iov_info->mbx_msg_phys_addr;
217 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
218 rply_p = p_iov_info->mbx_reply_phys_addr;
219 p_bulletin_virt = p_iov_info->p_bulletins;
220 bulletin_p = p_iov_info->bulletins_phys;
221 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
223 "ecore_iov_setup_vfdb called without alloc mem first\n");
227 p_iov_info->base_vport_id = 1; /* @@@TBD resource allocation */
229 for (idx = 0; idx < num_vfs; idx++) {
230 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
233 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
234 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
235 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
236 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
238 #ifdef CONFIG_ECORE_SW_CHANNEL
239 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
240 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
242 vf->state = VF_STOPPED;
244 vf->bulletin.phys = idx *
245 sizeof(struct ecore_bulletin_content) + bulletin_p;
246 vf->bulletin.p_virt = p_bulletin_virt + idx;
247 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
249 vf->relative_vf_id = idx;
250 vf->abs_vf_id = idx + p_hwfn->hw_info.first_vf_in_pf;
251 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
252 vf->concrete_fid = concrete;
253 /* TODO - need to devise a better way of getting opaque */
254 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
255 (vf->abs_vf_id << 8);
256 /* @@TBD MichalK - add base vport_id of VFs to equation */
257 vf->vport_id = p_iov_info->base_vport_id + idx;
261 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
263 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
267 num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;
269 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
270 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
272 /* Allocate PF Mailbox buffer (per-VF) */
273 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
274 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
275 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
276 &p_iov_info->mbx_msg_phys_addr,
277 p_iov_info->mbx_msg_size);
281 /* Allocate PF Mailbox Reply buffer (per-VF) */
282 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
283 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
284 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
285 &p_iov_info->mbx_reply_phys_addr,
286 p_iov_info->mbx_reply_size);
290 p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
292 p_v_addr = &p_iov_info->p_bulletins;
293 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
294 &p_iov_info->bulletins_phys,
295 p_iov_info->bulletins_size);
299 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
300 "PF's Requests mailbox [%p virt 0x%lx phys], Response"
301 " mailbox [%p virt 0x%lx phys] Bulletins"
302 " [%p virt 0x%lx phys]\n",
303 p_iov_info->mbx_msg_virt_addr,
304 (u64)p_iov_info->mbx_msg_phys_addr,
305 p_iov_info->mbx_reply_virt_addr,
306 (u64)p_iov_info->mbx_reply_phys_addr,
307 p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys);
309 /* @@@TBD MichalK - statistics / RSS */
311 return ECORE_SUCCESS;
314 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
316 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
318 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
319 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
320 p_iov_info->mbx_msg_virt_addr,
321 p_iov_info->mbx_msg_phys_addr,
322 p_iov_info->mbx_msg_size);
324 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
325 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
326 p_iov_info->mbx_reply_virt_addr,
327 p_iov_info->mbx_reply_phys_addr,
328 p_iov_info->mbx_reply_size);
330 if (p_iov_info->p_bulletins)
331 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
332 p_iov_info->p_bulletins,
333 p_iov_info->bulletins_phys,
334 p_iov_info->bulletins_size);
336 /* @@@TBD MichalK - statistics / RSS */
339 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
341 enum _ecore_status_t rc = ECORE_SUCCESS;
342 struct ecore_pf_iov *p_sriov;
344 if (!IS_PF_SRIOV(p_hwfn)) {
345 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
346 "No SR-IOV - no need for IOV db\n");
350 p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
352 DP_NOTICE(p_hwfn, true,
353 "Failed to allocate `struct ecore_sriov'");
357 p_hwfn->pf_iov_info = p_sriov;
359 rc = ecore_iov_allocate_vfdb(p_hwfn);
364 void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
366 if (!IS_PF_SRIOV(p_hwfn) || !p_hwfn->pf_iov_info)
369 ecore_iov_setup_vfdb(p_hwfn);
370 ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
373 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
375 if (p_hwfn->pf_iov_info) {
376 ecore_iov_free_vfdb(p_hwfn);
377 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
381 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn,
382 struct ecore_ptt *p_ptt)
384 enum _ecore_status_t rc;
386 /* @@@ TBD get this information from shmem / pci cfg */
387 if (IS_VF(p_hwfn->p_dev))
388 return ECORE_SUCCESS;
390 /* First hwfn should learn the PCI configuration */
391 if (IS_LEAD_HWFN(p_hwfn)) {
392 struct ecore_dev *p_dev = p_hwfn->p_dev;
393 int *pos = &p_hwfn->p_dev->sriov_info.pos;
395 *pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
396 PCI_EXT_CAP_ID_SRIOV);
398 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
399 "No PCIe IOV support\n");
400 return ECORE_SUCCESS;
403 rc = ecore_iov_pci_cfg_info(p_dev);
406 } else if (!p_hwfn->p_dev->sriov_info.pos) {
407 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
408 return ECORE_SUCCESS;
411 /* Calculate the first VF index - this is a bit tricky; Basically,
412 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
413 * after the first engine's VFs.
415 p_hwfn->hw_info.first_vf_in_pf = p_hwfn->p_dev->sriov_info.offset +
416 p_hwfn->abs_pf_id - 16;
417 if (ECORE_PATH_ID(p_hwfn))
418 p_hwfn->hw_info.first_vf_in_pf -= MAX_NUM_VFS_BB;
420 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
421 "First VF in hwfn 0x%08x\n", p_hwfn->hw_info.first_vf_in_pf);
423 return ECORE_SUCCESS;
426 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
430 struct ecore_vf_info *vf = OSAL_NULL;
432 if (!p_hwfn->pf_iov_info) {
433 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
437 if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
438 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
440 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
446 void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,
447 u16 rel_vf_id, u8 to_disable)
449 struct ecore_vf_info *vf;
451 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
455 vf->to_disable = to_disable;
458 void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn, u8 to_disable)
462 for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++)
463 ecore_iov_set_vf_to_disable(p_hwfn, i, to_disable);
467 /* @@@TBD Consider taking outside of ecore... */
468 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
469 u16 vf_id, void *ctx)
471 enum _ecore_status_t rc = ECORE_SUCCESS;
472 struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
474 if (vf != OSAL_NULL) {
476 #ifdef CONFIG_ECORE_SW_CHANNEL
477 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
480 rc = ECORE_UNKNOWN_ERROR;
487 * VF enable primitives
489 * when pretend is required the caller is reponsible
490 * for calling pretend prioir to calling these routines
493 /* clears vf error in all semi blocks
494 * Assumption: called under VF pretend...
496 static OSAL_INLINE void ecore_iov_vf_semi_clear_err(struct ecore_hwfn *p_hwfn,
497 struct ecore_ptt *p_ptt)
499 ecore_wr(p_hwfn, p_ptt, TSEM_REG_VF_ERROR, 1);
500 ecore_wr(p_hwfn, p_ptt, USEM_REG_VF_ERROR, 1);
501 ecore_wr(p_hwfn, p_ptt, MSEM_REG_VF_ERROR, 1);
502 ecore_wr(p_hwfn, p_ptt, XSEM_REG_VF_ERROR, 1);
503 ecore_wr(p_hwfn, p_ptt, YSEM_REG_VF_ERROR, 1);
504 ecore_wr(p_hwfn, p_ptt, PSEM_REG_VF_ERROR, 1);
507 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
508 struct ecore_ptt *p_ptt, u8 abs_vfid)
510 ecore_wr(p_hwfn, p_ptt,
511 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
512 1 << (abs_vfid & 0x1f));
515 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
516 struct ecore_ptt *p_ptt,
517 struct ecore_vf_info *vf)
522 /* Set VF masks and configuration - pretend */
523 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
525 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
527 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
528 "value in VF_CONFIGURATION of vf %d after write %x\n",
530 ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION));
533 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
535 /* iterate ove all queues, clear sb consumer */
536 for (i = 0; i < vf->num_sbs; i++) {
537 igu_sb_id = vf->igu_sbs[i];
538 /* Set then clear... */
539 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1,
541 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0,
546 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
547 struct ecore_ptt *p_ptt,
548 struct ecore_vf_info *vf, bool enable)
552 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
554 igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
557 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
559 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
561 ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
564 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
567 static enum _ecore_status_t
568 ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
569 struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
571 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
572 enum _ecore_status_t rc;
575 return ECORE_SUCCESS;
577 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
578 "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
579 ECORE_VF_ABS_ID(p_hwfn, vf));
581 ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
582 ECORE_VF_ABS_ID(p_hwfn, vf));
584 rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
585 vf->abs_vf_id, vf->num_sbs);
589 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
591 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
592 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
594 ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
595 p_hwfn->hw_info.hw_mode);
598 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
600 if (vf->state != VF_STOPPED) {
601 DP_NOTICE(p_hwfn, true, "VF[%02x] is already started\n",
607 rc = ecore_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
608 if (rc != ECORE_SUCCESS)
609 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
619 * @brief ecore_iov_config_perm_table - configure the permission
621 * In E4, queue zone permission table size is 320x9. There
622 * are 320 VF queues for single engine device (256 for dual
623 * engine device), and each entry has the following format:
630 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
631 struct ecore_ptt *p_ptt,
632 struct ecore_vf_info *vf, u8 enable)
639 for (qid = 0; qid < vf->num_rxqs; qid++) {
640 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
643 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
644 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
645 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
649 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
650 struct ecore_ptt *p_ptt,
651 struct ecore_vf_info *vf)
653 /* Reset vf in IGU interrupts are still disabled */
654 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
656 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1 /* enable */);
658 /* Permission Table */
659 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true /* enable */);
662 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
663 struct ecore_ptt *p_ptt,
664 struct ecore_vf_info *vf,
670 struct ecore_igu_block *igu_blocks =
671 p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
673 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
674 num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
676 p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
678 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
679 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
680 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
682 while ((qid < num_rx_queues) &&
683 (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {
684 if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {
685 struct cau_sb_entry sb_entry;
687 vf->igu_sbs[qid] = (u16)igu_id;
688 igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;
690 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
692 ecore_wr(p_hwfn, p_ptt,
693 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
696 /* Configure igu sb in CAU which were marked valid */
697 ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
700 ecore_dmae_host2grc(p_hwfn, p_ptt,
701 (u64)(osal_uintptr_t)&sb_entry,
702 CAU_REG_SB_VAR_MEMORY +
703 igu_id * sizeof(u64), 2, 0);
709 vf->num_sbs = (u8)num_rx_queues;
716 * @brief The function invalidates all the VF entries,
717 * technically this isn't required, but added for
718 * cleaness and ease of debugging incase a VF attempts to
719 * produce an interrupt after it has been taken down.
725 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
726 struct ecore_ptt *p_ptt,
727 struct ecore_vf_info *vf)
729 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
733 /* Invalidate igu CAM lines and mark them as free */
734 for (idx = 0; idx < vf->num_sbs; idx++) {
735 igu_id = vf->igu_sbs[idx];
736 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
738 val = ecore_rd(p_hwfn, p_ptt, addr);
739 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
740 ecore_wr(p_hwfn, p_ptt, addr, val);
742 p_info->igu_map.igu_blocks[igu_id].status |=
743 ECORE_IGU_STATUS_FREE;
745 p_hwfn->hw_info.p_igu_info->free_blks++;
751 enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
752 struct ecore_ptt *p_ptt,
753 u16 rel_vf_id, u16 num_rx_queues)
755 enum _ecore_status_t rc = ECORE_SUCCESS;
756 struct ecore_vf_info *vf = OSAL_NULL;
757 u8 num_of_vf_available_chains = 0;
761 if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id)) {
762 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
767 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
769 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
770 return ECORE_UNKNOWN_ERROR;
773 /* Limit number of queues according to number of CIDs */
774 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
775 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
776 "VF[%d] - requesting to initialize for 0x%04x queues"
777 " [0x%04x CIDs available]\n",
778 vf->relative_vf_id, num_rx_queues, (u16)cids);
779 num_rx_queues = OSAL_MIN_T(u16, num_rx_queues, ((u16)cids));
781 num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
785 if (num_of_vf_available_chains == 0) {
786 DP_ERR(p_hwfn, "no available igu sbs\n");
790 /* Choose queue number and index ranges */
791 vf->num_rxqs = num_of_vf_available_chains;
792 vf->num_txqs = num_of_vf_available_chains;
794 for (i = 0; i < vf->num_rxqs; i++) {
795 u16 queue_id = ecore_int_queue_id_from_sb_id(p_hwfn,
798 if (queue_id > RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
799 DP_NOTICE(p_hwfn, true,
800 "VF[%d] will require utilizing of"
801 " out-of-bounds queues - %04x\n",
802 vf->relative_vf_id, queue_id);
803 /* TODO - cleanup the already allocate SBs */
807 /* CIDs are per-VF, so no problem having them 0-based. */
808 vf->vf_queues[i].fw_rx_qid = queue_id;
809 vf->vf_queues[i].fw_tx_qid = queue_id;
810 vf->vf_queues[i].fw_cid = i;
812 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
813 "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
814 vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
817 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
819 if (rc == ECORE_SUCCESS) {
820 struct ecore_hw_sriov_info *p_iov = &p_hwfn->p_dev->sriov_info;
821 u16 vf_id = vf->relative_vf_id;
824 p_iov->active_vfs[vf_id / 64] |= (1ULL << (vf_id % 64));
830 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
831 struct ecore_ptt *p_ptt,
834 struct ecore_vf_info *vf = OSAL_NULL;
835 enum _ecore_status_t rc = ECORE_SUCCESS;
837 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
839 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
840 return ECORE_UNKNOWN_ERROR;
843 if (vf->state != VF_STOPPED) {
844 /* Stopping the VF */
845 rc = ecore_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
847 if (rc != ECORE_SUCCESS) {
848 DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
853 vf->state = VF_STOPPED;
856 /* disablng interrupts and resetting permission table was done during
857 * vf-close, however, we could get here without going through vf_close
859 /* Disable Interrupts for VF */
860 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0 /* disable */);
862 /* Reset Permission table */
863 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0 /* disable */);
867 ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
869 if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id)) {
870 struct ecore_hw_sriov_info *p_iov = &p_hwfn->p_dev->sriov_info;
871 u16 vf_id = vf->relative_vf_id;
874 p_iov->active_vfs[vf_id / 64] &= ~(1ULL << (vf_id % 64));
877 return ECORE_SUCCESS;
880 static bool ecore_iov_tlv_supported(u16 tlvtype)
882 return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
885 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
886 struct ecore_vf_info *vf, u16 tlv)
888 /* we don't lock the channel for unsupported tlvs */
889 if (!ecore_iov_tlv_supported(tlv))
892 /* lock the channel */
893 /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
895 /* record the locking op */
896 /* vf->op_current = tlv; @@@TBD MichalK */
901 "VF[%d]: vf pf channel locked by %s\n",
902 vf->abs_vf_id, ecore_channel_tlvs_string[tlv]);
905 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
906 struct ecore_vf_info *vf,
909 /* we don't unlock the channel for unsupported tlvs */
910 if (!ecore_iov_tlv_supported(expected_tlv))
913 /* WARN(expected_tlv != vf->op_current,
914 * "lock mismatch: expected %s found %s",
915 * channel_tlvs_string[expected_tlv],
916 * channel_tlvs_string[vf->op_current]);
920 /* lock the channel */
921 /* mutex_unlock(&vf->op_mutex); @@@TBD MichalK add the lock */
926 "VF[%d]: vf pf channel unlocked by %s\n",
927 vf->abs_vf_id, ecore_channel_tlvs_string[expected_tlv]);
929 /* record the locking op */
930 /* vf->op_current = CHANNEL_TLV_NONE; */
933 /* place a given tlv on the tlv buffer, continuing current tlv list */
934 void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
935 u8 **offset, u16 type, u16 length)
937 struct channel_tlv *tl = (struct channel_tlv *)*offset;
942 /* Offset should keep pointing to next TLV (the end of the last) */
945 /* Return a pointer to the start of the added tlv */
946 return *offset - length;
949 /* list the types and lengths of the tlvs on the buffer */
950 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
952 u16 i = 1, total_length = 0;
953 struct channel_tlv *tlv;
956 /* cast current tlv list entry to channel tlv header */
957 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
960 if (ecore_iov_tlv_supported(tlv->type))
961 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
962 "TLV number %d: type %s, length %d\n",
963 i, ecore_channel_tlvs_string[tlv->type],
966 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
967 "TLV number %d: type %d, length %d\n",
968 i, tlv->type, tlv->length);
970 if (tlv->type == CHANNEL_TLV_LIST_END)
973 /* Validate entry - protect against malicious VFs */
975 DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
978 total_length += tlv->length;
979 if (total_length >= sizeof(struct tlv_buffer_size)) {
980 DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
988 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
989 struct ecore_ptt *p_ptt,
990 struct ecore_vf_info *p_vf,
991 u16 length, u8 status)
993 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
994 struct ecore_dmae_params params;
997 mbx->reply_virt->default_resp.hdr.status = status;
999 #ifdef CONFIG_ECORE_SW_CHANNEL
1000 mbx->sw_mbx.response_size =
1001 length + sizeof(struct channel_list_end_tlv);
1004 ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1006 if (!p_hwfn->p_dev->sriov_info.b_hw_channel)
1009 eng_vf_id = p_vf->abs_vf_id;
1011 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
1012 params.flags = ECORE_DMAE_FLAG_VF_DST;
1013 params.dst_vfid = eng_vf_id;
1015 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1016 mbx->req_virt->first_tlv.reply_address +
1018 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1021 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1022 mbx->req_virt->first_tlv.reply_address,
1023 sizeof(u64) / 4, ¶ms);
1026 GTT_BAR0_MAP_REG_USDM_RAM +
1027 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1030 static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
1031 enum ecore_iov_vport_update_flag flag)
1034 case ECORE_IOV_VP_UPDATE_ACTIVATE:
1035 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1036 case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1037 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1038 case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1039 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1040 case ECORE_IOV_VP_UPDATE_MCAST:
1041 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1042 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1043 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1044 case ECORE_IOV_VP_UPDATE_RSS:
1045 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1046 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1047 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1048 case ECORE_IOV_VP_UPDATE_SGE_TPA:
1049 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1055 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1056 struct ecore_vf_info *p_vf,
1057 struct ecore_iov_vf_mbx *p_mbx,
1058 u8 status, u16 tlvs_mask,
1061 struct pfvf_def_resp_tlv *resp;
1062 u16 size, total_len, i;
1064 OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1065 p_mbx->offset = (u8 *)(p_mbx->reply_virt);
1066 size = sizeof(struct pfvf_def_resp_tlv);
1069 ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1071 /* Prepare response for all extended tlvs if they are found by PF */
1072 for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1073 if (!(tlvs_mask & (1 << i)))
1076 resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
1077 ecore_iov_vport_to_tlv(p_hwfn, i), size);
1079 if (tlvs_accepted & (1 << i))
1080 resp->hdr.status = status;
1082 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1084 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1085 "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1086 p_vf->relative_vf_id,
1087 ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1092 ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1093 sizeof(struct channel_list_end_tlv));
1098 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1099 struct ecore_ptt *p_ptt,
1100 struct ecore_vf_info *vf_info,
1101 u16 type, u16 length, u8 status)
1103 struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1105 mbx->offset = (u8 *)(mbx->reply_virt);
1107 ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
1108 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1109 sizeof(struct channel_list_end_tlv));
1111 ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1114 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1115 struct ecore_vf_info *p_vf)
1117 p_vf->vf_bulletin = 0;
1118 p_vf->vport_instance = 0;
1119 p_vf->num_mac_filters = 0;
1120 p_vf->num_vlan_filters = 0;
1121 p_vf->num_mc_filters = 0;
1122 p_vf->configured_features = 0;
1124 /* If VF previously requested less resources, go back to default */
1125 p_vf->num_rxqs = p_vf->num_sbs;
1126 p_vf->num_txqs = p_vf->num_sbs;
1128 p_vf->num_active_rxqs = 0;
1130 OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1131 OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1134 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
1135 struct ecore_ptt *p_ptt,
1136 struct ecore_vf_info *vf)
1138 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1139 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1140 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1141 struct pf_vf_resc *resc = &resp->resc;
1142 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1144 u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
1146 /* Validate FW compatibility */
1147 if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
1148 req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
1149 req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
1150 req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
1152 "VF[%d] is running an incompatible driver [VF needs"
1153 " FW %02x:%02x:%02x:%02x but Hypervisor is"
1154 " using %02x:%02x:%02x:%02x]\n",
1155 vf->abs_vf_id, req->vfdev_info.fw_major,
1156 req->vfdev_info.fw_minor, req->vfdev_info.fw_revision,
1157 req->vfdev_info.fw_engineering, FW_MAJOR_VERSION,
1158 FW_MINOR_VERSION, FW_REVISION_VERSION,
1159 FW_ENGINEERING_VERSION);
1160 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1163 #ifndef __EXTRACT__LINUX__
1164 if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1165 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1170 OSAL_MEMSET(resp, 0, sizeof(*resp));
1172 /* Fill in vf info stuff : @@@TBD MichalK Hard Coded for now... */
1173 vf->opaque_fid = req->vfdev_info.opaque_fid;
1174 vf->num_mac_filters = 1;
1175 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
1176 vf->num_mc_filters = ECORE_MAX_MC_ADDRS;
1178 vf->vf_bulletin = req->bulletin_addr;
1179 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1180 vf->bulletin.size : req->bulletin_size;
1182 /* fill in pfdev info */
1183 pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1184 pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
1185 pfdev_info->indices_per_sb = PIS_PER_SB;
1186 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED;
1188 pfdev_info->stats_info.mstats.address =
1189 PXP_VF_BAR0_START_MSDM_ZONE_B +
1190 OFFSETOF(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
1191 pfdev_info->stats_info.mstats.len =
1192 sizeof(struct eth_mstorm_per_queue_stat);
1194 pfdev_info->stats_info.ustats.address =
1195 PXP_VF_BAR0_START_USDM_ZONE_B +
1196 OFFSETOF(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
1197 pfdev_info->stats_info.ustats.len =
1198 sizeof(struct eth_ustorm_per_queue_stat);
1200 pfdev_info->stats_info.pstats.address =
1201 PXP_VF_BAR0_START_PSDM_ZONE_B +
1202 OFFSETOF(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
1203 pfdev_info->stats_info.pstats.len =
1204 sizeof(struct eth_pstorm_per_queue_stat);
1206 pfdev_info->stats_info.tstats.address = 0;
1207 pfdev_info->stats_info.tstats.len = 0;
1209 OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1212 pfdev_info->fw_major = FW_MAJOR_VERSION;
1213 pfdev_info->fw_minor = FW_MINOR_VERSION;
1214 pfdev_info->fw_rev = FW_REVISION_VERSION;
1215 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1216 pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1217 ecore_mcp_get_mfw_ver(p_hwfn->p_dev, p_ptt, &pfdev_info->mfw_ver,
1220 pfdev_info->dev_type = p_hwfn->p_dev->type;
1221 pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1223 /* Fill in resc : @@@TBD MichalK Hard Coded for now... */
1224 resc->num_rxqs = vf->num_rxqs;
1225 resc->num_txqs = vf->num_txqs;
1226 resc->num_sbs = vf->num_sbs;
1227 for (i = 0; i < resc->num_sbs; i++) {
1228 resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
1229 resc->hw_sbs[i].sb_qid = 0;
1232 for (i = 0; i < resc->num_rxqs; i++) {
1233 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
1234 (u16 *)&resc->hw_qid[i]);
1235 resc->cid[i] = vf->vf_queues[i].fw_cid;
1238 resc->num_mac_filters = OSAL_MIN_T(u8, vf->num_mac_filters,
1239 req->resc_request.num_mac_filters);
1240 resc->num_vlan_filters = OSAL_MIN_T(u8, vf->num_vlan_filters,
1241 req->resc_request.num_vlan_filters);
1242 resc->num_mc_filters = OSAL_MIN_T(u8, vf->num_mc_filters,
1243 req->resc_request.num_mc_filters);
1245 /* Fill agreed size of bulletin board in response, and post
1246 * an initial image to the bulletin board.
1248 resp->bulletin_size = vf->bulletin.size;
1249 ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1251 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1252 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1253 " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1254 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1255 " n_vlans-%d, n_mcs-%d\n",
1256 vf->abs_vf_id, resp->pfdev_info.chip_num,
1257 resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1258 resp->pfdev_info.capabilities, resc->num_rxqs,
1259 resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1260 resc->num_vlan_filters, resc->num_mc_filters);
1262 vf->state = VF_ACQUIRED;
1264 /* Prepare Response */
1265 length = sizeof(struct pfvf_acquire_resp_tlv);
1268 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1269 length, vfpf_status);
1271 /* @@@TBD Bulletin */
1274 static enum _ecore_status_t
1275 __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1276 struct ecore_vf_info *p_vf, bool val)
1278 struct ecore_sp_vport_update_params params;
1279 enum _ecore_status_t rc;
1281 if (val == p_vf->spoof_chk) {
1282 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1283 "Spoofchk value[%d] is already configured\n", val);
1284 return ECORE_SUCCESS;
1287 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1288 params.opaque_fid = p_vf->opaque_fid;
1289 params.vport_id = p_vf->vport_id;
1290 params.update_anti_spoofing_en_flg = 1;
1291 params.anti_spoofing_en = val;
1293 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
1295 if (rc == ECORE_SUCCESS) {
1296 p_vf->spoof_chk = val;
1297 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1298 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1299 "Spoofchk val[%d] configured\n", val);
1301 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1302 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1303 val, p_vf->relative_vf_id);
1309 static enum _ecore_status_t
1310 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1311 struct ecore_vf_info *p_vf)
1313 enum _ecore_status_t rc = ECORE_SUCCESS;
1314 struct ecore_filter_ucast filter;
1317 OSAL_MEMSET(&filter, 0, sizeof(filter));
1318 filter.is_rx_filter = 1;
1319 filter.is_tx_filter = 1;
1320 filter.vport_to_add_to = p_vf->vport_id;
1321 filter.opcode = ECORE_FILTER_ADD;
1323 /* Reconfigure vlans */
1324 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1325 if (p_vf->shadow_config.vlans[i].used) {
1326 filter.type = ECORE_FILTER_VLAN;
1327 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1328 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1329 "Reconfig VLAN [0x%04x] for VF [%04x]\n",
1330 filter.vlan, p_vf->relative_vf_id);
1331 rc = ecore_sp_eth_filter_ucast(p_hwfn,
1337 DP_NOTICE(p_hwfn, true,
1338 "Failed to configure VLAN [%04x]"
1340 filter.vlan, p_vf->relative_vf_id);
1349 static enum _ecore_status_t
1350 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
1351 struct ecore_vf_info *p_vf, u64 events)
1353 enum _ecore_status_t rc = ECORE_SUCCESS;
1355 /*TODO - what about MACs? */
1357 if ((events & (1 << VLAN_ADDR_FORCED)) &&
1358 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1359 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1364 static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
1365 struct ecore_vf_info *p_vf,
1368 enum _ecore_status_t rc = ECORE_SUCCESS;
1369 struct ecore_filter_ucast filter;
1371 if (!p_vf->vport_instance)
1374 if (events & (1 << MAC_ADDR_FORCED)) {
1375 /* Since there's no way [currently] of removing the MAC,
1376 * we can always assume this means we need to force it.
1378 OSAL_MEMSET(&filter, 0, sizeof(filter));
1379 filter.type = ECORE_FILTER_MAC;
1380 filter.opcode = ECORE_FILTER_REPLACE;
1381 filter.is_rx_filter = 1;
1382 filter.is_tx_filter = 1;
1383 filter.vport_to_add_to = p_vf->vport_id;
1384 OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
1386 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1388 ECORE_SPQ_MODE_CB, OSAL_NULL);
1390 DP_NOTICE(p_hwfn, true,
1391 "PF failed to configure MAC for VF\n");
1395 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1398 if (events & (1 << VLAN_ADDR_FORCED)) {
1399 struct ecore_sp_vport_update_params vport_update;
1403 OSAL_MEMSET(&filter, 0, sizeof(filter));
1404 filter.type = ECORE_FILTER_VLAN;
1405 filter.is_rx_filter = 1;
1406 filter.is_tx_filter = 1;
1407 filter.vport_to_add_to = p_vf->vport_id;
1408 filter.vlan = p_vf->bulletin.p_virt->pvid;
1409 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
1412 /* Send the ramrod */
1413 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1415 ECORE_SPQ_MODE_CB, OSAL_NULL);
1417 DP_NOTICE(p_hwfn, true,
1418 "PF failed to configure VLAN for VF\n");
1422 /* Update the default-vlan & silent vlan stripping */
1423 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
1424 vport_update.opaque_fid = p_vf->opaque_fid;
1425 vport_update.vport_id = p_vf->vport_id;
1426 vport_update.update_default_vlan_enable_flg = 1;
1427 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1428 vport_update.update_default_vlan_flg = 1;
1429 vport_update.default_vlan = filter.vlan;
1431 vport_update.update_inner_vlan_removal_flg = 1;
1432 removal = filter.vlan ?
1433 1 : p_vf->shadow_config.inner_vlan_removal;
1434 vport_update.inner_vlan_removal_flg = removal;
1435 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1436 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
1437 ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
1439 DP_NOTICE(p_hwfn, true,
1440 "PF failed to configure VF vport for vlan\n");
1444 /* Update all the Rx queues */
1445 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1448 if (!p_vf->vf_queues[i].rxq_active)
1451 qid = p_vf->vf_queues[i].fw_rx_qid;
1453 rc = ecore_sp_eth_rx_queues_update(p_hwfn, qid,
1455 ECORE_SPQ_MODE_EBLOCK,
1458 DP_NOTICE(p_hwfn, true,
1459 "Failed to send Rx update"
1467 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1469 p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
1472 /* If forced features are terminated, we need to configure the shadow
1473 * configuration back again.
1476 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1481 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
1482 struct ecore_ptt *p_ptt,
1483 struct ecore_vf_info *vf)
1485 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1486 struct vfpf_vport_start_tlv *start = &mbx->req_virt->start_vport;
1487 struct ecore_sp_vport_start_params params = { 0 };
1488 u8 status = PFVF_STATUS_SUCCESS;
1489 struct ecore_vf_info *vf_info;
1490 enum _ecore_status_t rc;
1494 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
1496 DP_NOTICE(p_hwfn->p_dev, true,
1497 "Failed to get VF info, invalid vfid [%d]\n",
1498 vf->relative_vf_id);
1502 vf->state = VF_ENABLED;
1504 /* Initialize Status block in CAU */
1505 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1506 if (!start->sb_addr[sb_id]) {
1507 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1508 "VF[%d] did not fill the address of SB %d\n",
1509 vf->relative_vf_id, sb_id);
1513 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
1514 start->sb_addr[sb_id],
1516 vf->abs_vf_id, 1 /* VF Valid */);
1518 ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1520 vf->mtu = start->mtu;
1521 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1523 /* Take into consideration configuration forced by hypervisor;
1524 * If none is configured, use the supplied VF values [for old
1525 * vfs that would still be fine, since they passed '0' as padding].
1527 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
1528 if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
1529 u8 vf_req = start->only_untagged;
1531 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1532 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1535 params.tpa_mode = start->tpa_mode;
1536 params.remove_inner_vlan = start->inner_vlan_removal;
1537 params.tx_switching = true;
1540 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1541 DP_NOTICE(p_hwfn, false,
1542 "FPGA: Don't confi VF for Tx-switching [no pVFC]\n");
1543 params.tx_switching = false;
1547 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
1548 params.drop_ttl0 = false;
1549 params.concrete_fid = vf->concrete_fid;
1550 params.opaque_fid = vf->opaque_fid;
1551 params.vport_id = vf->vport_id;
1552 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1553 params.mtu = vf->mtu;
1555 rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms);
1556 if (rc != ECORE_SUCCESS) {
1558 "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
1559 status = PFVF_STATUS_FAILURE;
1561 vf->vport_instance++;
1563 /* Force configuration if needed on the newly opened vport */
1564 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
1565 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
1566 vf->vport_id, vf->opaque_fid);
1567 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
1570 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1571 sizeof(struct pfvf_def_resp_tlv), status);
1574 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
1575 struct ecore_ptt *p_ptt,
1576 struct ecore_vf_info *vf)
1578 u8 status = PFVF_STATUS_SUCCESS;
1579 enum _ecore_status_t rc;
1581 vf->vport_instance--;
1582 vf->spoof_chk = false;
1584 rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
1585 if (rc != ECORE_SUCCESS) {
1587 "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
1588 status = PFVF_STATUS_FAILURE;
1591 /* Forget the configuration on the vport */
1592 vf->configured_features = 0;
1593 OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
1595 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
1596 sizeof(struct pfvf_def_resp_tlv), status);
1599 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
1600 struct ecore_ptt *p_ptt,
1601 struct ecore_vf_info *vf)
1603 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1604 struct vfpf_start_rxq_tlv *req = &mbx->req_virt->start_rxq;
1605 u16 length = sizeof(struct pfvf_def_resp_tlv);
1606 u8 status = PFVF_STATUS_SUCCESS;
1607 enum _ecore_status_t rc;
1609 rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
1610 vf->vf_queues[req->rx_qid].fw_cid,
1611 vf->vf_queues[req->rx_qid].fw_rx_qid,
1613 vf->abs_vf_id + 0x10,
1622 status = PFVF_STATUS_FAILURE;
1624 vf->vf_queues[req->rx_qid].rxq_active = true;
1625 vf->num_active_rxqs++;
1628 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_RXQ,
1632 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
1633 struct ecore_ptt *p_ptt,
1634 struct ecore_vf_info *vf)
1636 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1637 struct vfpf_start_txq_tlv *req = &mbx->req_virt->start_txq;
1638 u16 length = sizeof(struct pfvf_def_resp_tlv);
1639 union ecore_qm_pq_params pq_params;
1640 u8 status = PFVF_STATUS_SUCCESS;
1641 enum _ecore_status_t rc;
1643 /* Prepare the parameters which would choose the right PQ */
1644 OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
1645 pq_params.eth.is_vf = 1;
1646 pq_params.eth.vf_id = vf->relative_vf_id;
1648 rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
1650 vf->vf_queues[req->tx_qid].fw_tx_qid,
1651 vf->vf_queues[req->tx_qid].fw_cid,
1653 vf->abs_vf_id + 0x10,
1657 req->pbl_size, &pq_params);
1660 status = PFVF_STATUS_FAILURE;
1662 vf->vf_queues[req->tx_qid].txq_active = true;
1664 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ,
1668 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
1669 struct ecore_vf_info *vf,
1672 bool cqe_completion)
1674 enum _ecore_status_t rc = ECORE_SUCCESS;
1677 if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
1680 for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
1681 if (vf->vf_queues[qid].rxq_active) {
1682 rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
1690 vf->vf_queues[qid].rxq_active = false;
1691 vf->num_active_rxqs--;
1697 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
1698 struct ecore_vf_info *vf,
1699 u16 txq_id, u8 num_txqs)
1701 enum _ecore_status_t rc = ECORE_SUCCESS;
1704 if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
1707 for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
1708 if (vf->vf_queues[qid].txq_active) {
1709 rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
1716 vf->vf_queues[qid].txq_active = false;
1721 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
1722 struct ecore_ptt *p_ptt,
1723 struct ecore_vf_info *vf)
1725 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1726 struct vfpf_stop_rxqs_tlv *req = &mbx->req_virt->stop_rxqs;
1727 u16 length = sizeof(struct pfvf_def_resp_tlv);
1728 u8 status = PFVF_STATUS_SUCCESS;
1729 enum _ecore_status_t rc;
1731 /* We give the option of starting from qid != 0, in this case we
1732 * need to make sure that qid + num_qs doesn't exceed the actual
1733 * amount of queues that exist.
1735 rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
1736 req->num_rxqs, req->cqe_completion);
1738 status = PFVF_STATUS_FAILURE;
1740 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
1744 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
1745 struct ecore_ptt *p_ptt,
1746 struct ecore_vf_info *vf)
1748 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1749 struct vfpf_stop_txqs_tlv *req = &mbx->req_virt->stop_txqs;
1750 u16 length = sizeof(struct pfvf_def_resp_tlv);
1751 u8 status = PFVF_STATUS_SUCCESS;
1752 enum _ecore_status_t rc;
1754 /* We give the option of starting from qid != 0, in this case we
1755 * need to make sure that qid + num_qs doesn't exceed the actual
1756 * amount of queues that exist.
1758 rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
1760 status = PFVF_STATUS_FAILURE;
1762 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
1766 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
1767 struct ecore_ptt *p_ptt,
1768 struct ecore_vf_info *vf)
1770 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1771 struct vfpf_update_rxq_tlv *req = &mbx->req_virt->update_rxq;
1772 u16 length = sizeof(struct pfvf_def_resp_tlv);
1773 u8 status = PFVF_STATUS_SUCCESS;
1774 u8 complete_event_flg;
1775 u8 complete_cqe_flg;
1776 enum _ecore_status_t rc;
1780 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
1781 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
1783 for (i = 0; i < req->num_rxqs; i++) {
1784 qid = req->rx_qid + i;
1786 if (!vf->vf_queues[qid].rxq_active) {
1787 DP_NOTICE(p_hwfn, true,
1788 "VF rx_qid = %d isn`t active!\n", qid);
1789 status = PFVF_STATUS_FAILURE;
1793 rc = ecore_sp_eth_rx_queues_update(p_hwfn,
1794 vf->vf_queues[qid].fw_rx_qid,
1798 ECORE_SPQ_MODE_EBLOCK,
1802 status = PFVF_STATUS_FAILURE;
1807 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
1811 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
1812 void *p_tlvs_list, u16 req_type)
1814 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
1818 if (!p_tlv->length) {
1819 DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
1823 if (p_tlv->type == req_type) {
1824 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1825 "Extended tlv type %s, length %d found\n",
1826 ecore_channel_tlvs_string[p_tlv->type],
1831 len += p_tlv->length;
1832 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
1834 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
1835 DP_NOTICE(p_hwfn, true,
1836 "TLVs has overrun the buffer size\n");
1839 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
1845 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
1846 struct ecore_sp_vport_update_params *p_data,
1847 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
1849 struct vfpf_vport_update_activate_tlv *p_act_tlv;
1850 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1852 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
1853 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1855 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
1856 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
1857 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
1858 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
1859 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
1864 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
1865 struct ecore_sp_vport_update_params *p_data,
1866 struct ecore_vf_info *p_vf,
1867 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
1869 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
1870 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1872 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
1873 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1877 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
1879 /* Ignore the VF request if we're forcing a vlan */
1880 if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
1881 p_data->update_inner_vlan_removal_flg = 1;
1882 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
1885 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
1889 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
1890 struct ecore_sp_vport_update_params *p_data,
1891 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
1893 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
1894 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1896 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
1897 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1900 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1901 DP_NOTICE(p_hwfn, false,
1902 "FPGA: Ignore tx-switching configuration originating from VFs\n");
1907 if (p_tx_switch_tlv) {
1908 p_data->update_tx_switching_flg = 1;
1909 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
1910 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
1915 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
1916 struct ecore_sp_vport_update_params *p_data,
1917 struct ecore_iov_vf_mbx *p_mbx,
1920 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
1921 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
1923 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
1924 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1927 p_data->update_approx_mcast_flg = 1;
1928 OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
1929 sizeof(unsigned long) *
1930 ETH_MULTICAST_MAC_BINS_IN_REGS);
1931 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
1936 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
1937 struct ecore_sp_vport_update_params *p_data,
1938 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
1940 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
1941 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1943 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
1944 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1947 p_data->accept_flags.update_rx_mode_config =
1948 p_accept_tlv->update_rx_mode;
1949 p_data->accept_flags.rx_accept_filter =
1950 p_accept_tlv->rx_accept_filter;
1951 p_data->accept_flags.update_tx_mode_config =
1952 p_accept_tlv->update_tx_mode;
1953 p_data->accept_flags.tx_accept_filter =
1954 p_accept_tlv->tx_accept_filter;
1955 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
1960 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
1961 struct ecore_sp_vport_update_params *p_data,
1962 struct ecore_iov_vf_mbx *p_mbx,
1965 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
1966 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1968 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
1969 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1971 if (p_accept_any_vlan) {
1972 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
1973 p_data->update_accept_any_vlan_flg =
1974 p_accept_any_vlan->update_accept_any_vlan_flg;
1975 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
1980 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
1981 struct ecore_vf_info *vf,
1982 struct ecore_sp_vport_update_params *p_data,
1983 struct ecore_rss_params *p_rss,
1984 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
1986 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
1987 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
1989 u16 i, q_idx, max_q_idx;
1991 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
1992 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
1994 OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
1996 p_rss->update_rss_config =
1997 !!(p_rss_tlv->update_rss_flags &
1998 VFPF_UPDATE_RSS_CONFIG_FLAG);
1999 p_rss->update_rss_capabilities =
2000 !!(p_rss_tlv->update_rss_flags &
2001 VFPF_UPDATE_RSS_CAPS_FLAG);
2002 p_rss->update_rss_ind_table =
2003 !!(p_rss_tlv->update_rss_flags &
2004 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2005 p_rss->update_rss_key =
2006 !!(p_rss_tlv->update_rss_flags & VFPF_UPDATE_RSS_KEY_FLAG);
2008 p_rss->rss_enable = p_rss_tlv->rss_enable;
2009 p_rss->rss_eng_id = vf->relative_vf_id + 1;
2010 p_rss->rss_caps = p_rss_tlv->rss_caps;
2011 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2012 OSAL_MEMCPY(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
2013 sizeof(p_rss->rss_ind_table));
2014 OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
2015 sizeof(p_rss->rss_key));
2017 table_size = OSAL_MIN_T(u16,
2018 OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
2019 (1 << p_rss_tlv->rss_table_size_log));
2021 max_q_idx = OSAL_ARRAY_SIZE(vf->vf_queues);
2023 for (i = 0; i < table_size; i++) {
2024 q_idx = p_rss->rss_ind_table[i];
2025 if (q_idx >= max_q_idx) {
2026 DP_NOTICE(p_hwfn, true,
2027 "rss_ind_table[%d] = %d, rxq is out of range\n",
2029 /* TBD: fail the request mark VF as malicious */
2030 p_rss->rss_ind_table[i] =
2031 vf->vf_queues[0].fw_rx_qid;
2032 } else if (!vf->vf_queues[q_idx].rxq_active) {
2033 DP_NOTICE(p_hwfn, true,
2034 "rss_ind_table[%d] = %d, rxq is not active\n",
2036 /* TBD: fail the request mark VF as malicious */
2037 p_rss->rss_ind_table[i] =
2038 vf->vf_queues[0].fw_rx_qid;
2040 p_rss->rss_ind_table[i] =
2041 vf->vf_queues[q_idx].fw_rx_qid;
2045 p_data->rss_params = p_rss;
2046 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
2048 p_data->rss_params = OSAL_NULL;
2053 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
2054 struct ecore_vf_info *vf,
2055 struct ecore_sp_vport_update_params *p_data,
2056 struct ecore_sge_tpa_params *p_sge_tpa,
2057 struct ecore_iov_vf_mbx *p_mbx,
2060 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2061 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2063 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2064 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2066 if (!p_sge_tpa_tlv) {
2067 p_data->sge_tpa_params = OSAL_NULL;
2071 OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
2073 p_sge_tpa->update_tpa_en_flg =
2074 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2075 p_sge_tpa->update_tpa_param_flg =
2076 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2077 VFPF_UPDATE_TPA_PARAM_FLAG);
2079 p_sge_tpa->tpa_ipv4_en_flg =
2080 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2081 p_sge_tpa->tpa_ipv6_en_flg =
2082 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2083 p_sge_tpa->tpa_pkt_split_flg =
2084 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2085 p_sge_tpa->tpa_hdr_data_split_flg =
2086 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2087 p_sge_tpa->tpa_gro_consistent_flg =
2088 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2090 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2091 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2092 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2093 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2094 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2096 p_data->sge_tpa_params = p_sge_tpa;
2098 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
2101 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
2102 struct ecore_ptt *p_ptt,
2103 struct ecore_vf_info *vf)
2105 struct ecore_sp_vport_update_params params;
2106 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2107 struct ecore_sge_tpa_params sge_tpa_params;
2108 struct ecore_rss_params rss_params;
2109 u8 status = PFVF_STATUS_SUCCESS;
2110 enum _ecore_status_t rc;
2111 u16 tlvs_mask = 0, tlvs_accepted;
2114 OSAL_MEMSET(¶ms, 0, sizeof(params));
2115 params.opaque_fid = vf->opaque_fid;
2116 params.vport_id = vf->vport_id;
2117 params.rss_params = OSAL_NULL;
2119 /* Search for extended tlvs list and update values
2120 * from VF in struct ecore_sp_vport_update_params.
2122 ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
2123 ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
2124 ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
2125 ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
2126 ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
2127 ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, &rss_params,
2129 ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
2130 ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms,
2131 &sge_tpa_params, mbx, &tlvs_mask);
2133 /* Just log a message if there is no single extended tlv in buffer.
2134 * When all features of vport update ramrod would be requested by VF
2135 * as extended TLVs in buffer then an error can be returned in response
2136 * if there is no extended TLV present in buffer.
2138 tlvs_accepted = tlvs_mask;
2140 #ifndef __EXTRACT__LINUX__
2141 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
2142 ¶ms, &tlvs_accepted) !=
2145 status = PFVF_STATUS_NOT_SUPPORTED;
2150 if (!tlvs_accepted) {
2152 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2153 "Upper-layer prevents said VF configuration\n");
2155 DP_NOTICE(p_hwfn, true,
2156 "No feature tlvs found for vport update\n");
2157 status = PFVF_STATUS_NOT_SUPPORTED;
2161 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
2165 status = PFVF_STATUS_FAILURE;
2168 length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
2169 tlvs_mask, tlvs_accepted);
2170 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2173 static enum _ecore_status_t
2174 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
2175 struct ecore_vf_info *p_vf,
2176 struct ecore_filter_ucast *p_params)
2180 /* TODO - do we need a MAC shadow registery? */
2181 if (p_params->type == ECORE_FILTER_MAC)
2182 return ECORE_SUCCESS;
2184 /* First remove entries and then add new ones */
2185 if (p_params->opcode == ECORE_FILTER_REMOVE) {
2186 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2187 if (p_vf->shadow_config.vlans[i].used &&
2188 p_vf->shadow_config.vlans[i].vid ==
2190 p_vf->shadow_config.vlans[i].used = false;
2193 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
2194 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2195 "VF [%d] - Tries to remove a non-existing vlan\n",
2196 p_vf->relative_vf_id);
2199 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
2200 p_params->opcode == ECORE_FILTER_FLUSH) {
2201 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2202 p_vf->shadow_config.vlans[i].used = false;
2205 /* In forced mode, we're willing to remove entries - but we don't add
2208 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
2209 return ECORE_SUCCESS;
2211 if (p_params->opcode == ECORE_FILTER_ADD ||
2212 p_params->opcode == ECORE_FILTER_REPLACE) {
2213 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2214 if (!p_vf->shadow_config.vlans[i].used) {
2215 p_vf->shadow_config.vlans[i].used = true;
2216 p_vf->shadow_config.vlans[i].vid =
2220 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
2221 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2222 "VF [%d] - Tries to configure more than %d vlan filters\n",
2223 p_vf->relative_vf_id,
2224 ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
2229 return ECORE_SUCCESS;
2232 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
2233 struct ecore_ptt *p_ptt,
2234 struct ecore_vf_info *vf)
2236 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2237 struct vfpf_ucast_filter_tlv *req = &mbx->req_virt->ucast_filter;
2238 struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
2239 struct ecore_filter_ucast params;
2240 u8 status = PFVF_STATUS_SUCCESS;
2241 enum _ecore_status_t rc;
2243 /* Prepare the unicast filter params */
2244 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast));
2245 params.opcode = (enum ecore_filter_opcode)req->opcode;
2246 params.type = (enum ecore_filter_ucast_type)req->type;
2248 /* @@@TBD - We might need logic on HV side in determining this */
2249 params.is_rx_filter = 1;
2250 params.is_tx_filter = 1;
2251 params.vport_to_remove_from = vf->vport_id;
2252 params.vport_to_add_to = vf->vport_id;
2253 OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
2254 params.vlan = req->vlan;
2256 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2257 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
2258 vf->abs_vf_id, params.opcode, params.type,
2259 params.is_rx_filter ? "RX" : "",
2260 params.is_tx_filter ? "TX" : "",
2261 params.vport_to_add_to,
2262 params.mac[0], params.mac[1], params.mac[2],
2263 params.mac[3], params.mac[4], params.mac[5], params.vlan);
2265 if (!vf->vport_instance) {
2266 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2267 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
2269 status = PFVF_STATUS_FAILURE;
2273 /* Update shadow copy of the VF configuration */
2274 if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) !=
2276 status = PFVF_STATUS_FAILURE;
2280 /* Determine if the unicast filtering is acceptible by PF */
2281 if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
2282 (params.type == ECORE_FILTER_VLAN ||
2283 params.type == ECORE_FILTER_MAC_VLAN)) {
2284 /* Once VLAN is forced or PVID is set, do not allow
2285 * to add/replace any further VLANs.
2287 if (params.opcode == ECORE_FILTER_ADD ||
2288 params.opcode == ECORE_FILTER_REPLACE)
2289 status = PFVF_STATUS_FORCED;
2293 if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
2294 (params.type == ECORE_FILTER_MAC ||
2295 params.type == ECORE_FILTER_MAC_VLAN)) {
2296 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
2297 (params.opcode != ECORE_FILTER_ADD &&
2298 params.opcode != ECORE_FILTER_REPLACE))
2299 status = PFVF_STATUS_FORCED;
2303 rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms);
2304 if (rc == ECORE_EXISTS) {
2306 } else if (rc == ECORE_INVAL) {
2307 status = PFVF_STATUS_FAILURE;
2311 rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
2312 ECORE_SPQ_MODE_CB, OSAL_NULL);
2314 status = PFVF_STATUS_FAILURE;
2317 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
2318 sizeof(struct pfvf_def_resp_tlv), status);
2321 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
2322 struct ecore_ptt *p_ptt,
2323 struct ecore_vf_info *vf)
2328 for (i = 0; i < vf->num_sbs; i++)
2329 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
2331 vf->opaque_fid, false);
2333 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
2334 sizeof(struct pfvf_def_resp_tlv),
2335 PFVF_STATUS_SUCCESS);
2338 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
2339 struct ecore_ptt *p_ptt,
2340 struct ecore_vf_info *vf)
2342 u16 length = sizeof(struct pfvf_def_resp_tlv);
2343 u8 status = PFVF_STATUS_SUCCESS;
2345 /* Disable Interrupts for VF */
2346 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0 /* disable */);
2348 /* Reset Permission table */
2349 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0 /* disable */);
2351 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
2355 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
2356 struct ecore_ptt *p_ptt,
2357 struct ecore_vf_info *p_vf)
2359 u16 length = sizeof(struct pfvf_def_resp_tlv);
2361 ecore_iov_vf_cleanup(p_hwfn, p_vf);
2363 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
2364 length, PFVF_STATUS_SUCCESS);
2367 static enum _ecore_status_t
2368 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
2369 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
2374 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
2376 for (cnt = 0; cnt < 50; cnt++) {
2377 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
2382 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
2386 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
2387 p_vf->abs_vf_id, val);
2388 return ECORE_TIMEOUT;
2391 return ECORE_SUCCESS;
2394 static enum _ecore_status_t
2395 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
2396 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
2398 u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
2401 /* Read initial consumers & producers */
2402 for (i = 0; i < MAX_NUM_VOQS; i++) {
2405 cons[i] = ecore_rd(p_hwfn, p_ptt,
2406 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
2408 prod = ecore_rd(p_hwfn, p_ptt,
2409 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
2411 distance[i] = prod - cons[i];
2414 /* Wait for consumers to pass the producers */
2416 for (cnt = 0; cnt < 50; cnt++) {
2417 for (; i < MAX_NUM_VOQS; i++) {
2420 tmp = ecore_rd(p_hwfn, p_ptt,
2421 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
2423 if (distance[i] > tmp - cons[i])
2427 if (i == MAX_NUM_VOQS)
2434 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
2435 p_vf->abs_vf_id, i);
2436 return ECORE_TIMEOUT;
2439 return ECORE_SUCCESS;
2442 static enum _ecore_status_t
2443 ecore_iov_vf_flr_poll_prs(struct ecore_hwfn *p_hwfn,
2444 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
2446 u16 tc_cons[NUM_OF_TCS], tc_lb_cons[NUM_OF_TCS];
2447 u16 prod[NUM_OF_TCS];
2450 /* Read initial consumers & producers */
2451 for (i = 0; i < NUM_OF_TCS; i++) {
2452 tc_cons[i] = (u16)ecore_rd(p_hwfn, p_ptt,
2453 PRS_REG_MSG_CT_MAIN_0 + i * 0x4);
2454 tc_lb_cons[i] = (u16)ecore_rd(p_hwfn, p_ptt,
2455 PRS_REG_MSG_CT_LB_0 + i * 0x4);
2456 prod[i] = (u16)ecore_rd(p_hwfn, p_ptt,
2457 BRB_REG_PER_TC_COUNTERS +
2458 p_hwfn->port_id * 0x20 + i * 0x4);
2461 /* Wait for consumers to pass the producers */
2463 for (cnt = 0; cnt < 50; cnt++) {
2464 for (; i < NUM_OF_TCS; i++) {
2467 cons = (u16)ecore_rd(p_hwfn, p_ptt,
2468 PRS_REG_MSG_CT_MAIN_0 + i * 0x4);
2469 if (prod[i] - tc_cons[i] > cons - tc_cons[i])
2472 cons = (u16)ecore_rd(p_hwfn, p_ptt,
2473 PRS_REG_MSG_CT_LB_0 + i * 0x4);
2474 if (prod[i] - tc_lb_cons[i] > cons - tc_lb_cons[i])
2478 if (i == NUM_OF_TCS)
2481 /* 16-bit counters; Delay instead of sleep... */
2485 /* This is only optional polling for BB, since registers are only
2486 * 16-bit wide and guarantee is not good enough. Don't fail things
2487 * if polling didn't return the expected results.
2490 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2491 "VF[%d] - prs polling failed on TC %d\n",
2492 p_vf->abs_vf_id, i);
2494 return ECORE_SUCCESS;
2497 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
2498 struct ecore_vf_info *p_vf,
2499 struct ecore_ptt *p_ptt)
2501 enum _ecore_status_t rc;
2503 /* TODO - add SRC and TM polling once we add storage IOV */
2505 rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
2509 rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
2513 rc = ecore_iov_vf_flr_poll_prs(p_hwfn, p_vf, p_ptt);
2517 return ECORE_SUCCESS;
2520 static enum _ecore_status_t
2521 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
2522 struct ecore_ptt *p_ptt,
2523 u16 rel_vf_id, u32 *ack_vfs)
2525 enum _ecore_status_t rc = ECORE_SUCCESS;
2526 struct ecore_vf_info *p_vf;
2528 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
2530 return ECORE_SUCCESS;
2532 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
2533 (1ULL << (rel_vf_id % 64))) {
2534 u16 vfid = p_vf->abs_vf_id;
2536 /* TODO - should we lock channel? */
2538 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2539 "VF[%d] - Handling FLR\n", vfid);
2541 ecore_iov_vf_cleanup(p_hwfn, p_vf);
2543 /* If VF isn't active, no need for anything but SW */
2544 if (!ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, p_vf->relative_vf_id))
2547 /* TODO - what to do in case of failure? */
2548 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
2549 if (rc != ECORE_SUCCESS)
2552 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
2554 /* TODO - what's now? What a mess.... */
2555 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
2559 /* VF_STOPPED has to be set only after final cleanup
2560 * but prior to re-enabling the VF.
2562 p_vf->state = VF_STOPPED;
2564 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
2566 /* TODO - again, a mess... */
2567 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
2572 /* Mark VF for ack and clean pending state */
2573 if (p_vf->state == VF_RESET)
2574 p_vf->state = VF_STOPPED;
2575 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
2576 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
2577 ~(1ULL << (rel_vf_id % 64));
2578 p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
2579 ~(1ULL << (rel_vf_id % 64));
2585 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
2586 struct ecore_ptt *p_ptt)
2588 u32 ack_vfs[VF_MAX_STATIC / 32];
2589 enum _ecore_status_t rc = ECORE_SUCCESS;
2592 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
2594 for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++)
2595 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
2597 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
2601 enum _ecore_status_t
2602 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
2603 struct ecore_ptt *p_ptt, u16 rel_vf_id)
2605 u32 ack_vfs[VF_MAX_STATIC / 32];
2606 enum _ecore_status_t rc = ECORE_SUCCESS;
2608 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
2610 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
2612 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
2616 int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
2620 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
2621 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
2622 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2623 "[%08x,...,%08x]: %08x\n",
2624 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
2627 for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++) {
2628 struct ecore_vf_info *p_vf;
2631 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
2635 vfid = p_vf->abs_vf_id;
2636 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
2637 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
2638 u16 rel_vf_id = p_vf->relative_vf_id;
2640 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2641 "VF[%d] [rel %d] got FLR-ed\n",
2644 p_vf->state = VF_RESET;
2646 /* No need to lock here, since pending_flr should
2647 * only change here and before ACKing MFw. Since
2648 * MFW will not trigger an additional attention for
2649 * VF flr until ACKs, we're safe.
2651 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
2659 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
2661 struct ecore_mcp_link_params *params,
2662 struct ecore_mcp_link_state *link,
2663 struct ecore_mcp_link_capabilities *p_caps)
2665 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
2666 struct ecore_bulletin_content *p_bulletin;
2671 p_bulletin = p_vf->bulletin.p_virt;
2672 p_bulletin->req_autoneg = params->speed.autoneg;
2673 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
2674 p_bulletin->req_forced_speed = params->speed.forced_speed;
2675 p_bulletin->req_autoneg_pause = params->pause.autoneg;
2676 p_bulletin->req_forced_rx = params->pause.forced_rx;
2677 p_bulletin->req_forced_tx = params->pause.forced_tx;
2678 p_bulletin->req_loopback = params->loopback_mode;
2680 p_bulletin->link_up = link->link_up;
2681 p_bulletin->speed = link->speed;
2682 p_bulletin->full_duplex = link->full_duplex;
2683 p_bulletin->autoneg = link->an;
2684 p_bulletin->autoneg_complete = link->an_complete;
2685 p_bulletin->parallel_detection = link->parallel_detection;
2686 p_bulletin->pfc_enabled = link->pfc_enabled;
2687 p_bulletin->partner_adv_speed = link->partner_adv_speed;
2688 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
2689 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
2690 p_bulletin->partner_adv_pause = link->partner_adv_pause;
2691 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
2693 p_bulletin->capability_speed = p_caps->speed_capabilities;
2696 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
2698 struct ecore_mcp_link_params *p_params,
2699 struct ecore_mcp_link_state *p_link,
2700 struct ecore_mcp_link_capabilities *p_caps)
2702 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
2703 struct ecore_bulletin_content *p_bulletin;
2708 p_bulletin = p_vf->bulletin.p_virt;
2711 __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
2713 __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
2715 __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
2718 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
2719 struct ecore_ptt *p_ptt, int vfid)
2721 struct ecore_iov_vf_mbx *mbx;
2722 struct ecore_vf_info *p_vf;
2725 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
2729 mbx = &p_vf->vf_mbx;
2731 /* ecore_iov_process_mbx_request */
2734 "ecore_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
2736 mbx->first_tlv = mbx->req_virt->first_tlv;
2738 /* check if tlv type is known */
2739 if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
2740 /* Lock the per vf op mutex and note the locker's identity.
2741 * The unlock will take place in mbx response.
2743 ecore_iov_lock_vf_pf_channel(p_hwfn,
2744 p_vf, mbx->first_tlv.tl.type);
2746 /* switch on the opcode */
2747 switch (mbx->first_tlv.tl.type) {
2748 case CHANNEL_TLV_ACQUIRE:
2749 ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
2751 case CHANNEL_TLV_VPORT_START:
2752 ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
2754 case CHANNEL_TLV_VPORT_TEARDOWN:
2755 ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
2757 case CHANNEL_TLV_START_RXQ:
2758 ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
2760 case CHANNEL_TLV_START_TXQ:
2761 ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
2763 case CHANNEL_TLV_STOP_RXQS:
2764 ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
2766 case CHANNEL_TLV_STOP_TXQS:
2767 ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
2769 case CHANNEL_TLV_UPDATE_RXQ:
2770 ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
2772 case CHANNEL_TLV_VPORT_UPDATE:
2773 ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
2775 case CHANNEL_TLV_UCAST_FILTER:
2776 ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
2778 case CHANNEL_TLV_CLOSE:
2779 ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
2781 case CHANNEL_TLV_INT_CLEANUP:
2782 ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
2784 case CHANNEL_TLV_RELEASE:
2785 ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
2789 ecore_iov_unlock_vf_pf_channel(p_hwfn,
2790 p_vf, mbx->first_tlv.tl.type);
2793 /* unknown TLV - this may belong to a VF driver from the future
2794 * - a version written after this PF driver was written, which
2795 * supports features unknown as of yet. Too bad since we don't
2796 * support them. Or this may be because someone wrote a crappy
2797 * VF driver and is sending garbage over the channel.
2800 "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
2801 mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
2803 for (i = 0; i < 20; i++) {
2807 mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
2810 /* test whether we can respond to the VF (do we have an address
2813 if (p_vf->state == VF_ACQUIRED)
2814 DP_ERR(p_hwfn, "UNKNOWN TLV Not supported yet\n");
2817 #ifdef CONFIG_ECORE_SW_CHANNEL
2818 mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
2819 mbx->sw_mbx.response_offset = 0;
2823 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
2825 struct regpair *vf_msg)
2827 struct ecore_vf_info *p_vf;
2830 if (!p_hwfn->pf_iov_info || !p_hwfn->pf_iov_info->vfs_array) {
2831 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2832 "Got a message from VF while PF is not initialized for IOV support\n");
2833 return ECORE_SUCCESS;
2836 /* Find the VF record - message comes with realtive [engine] vfid */
2837 min = (u8)p_hwfn->hw_info.first_vf_in_pf;
2838 max = min + p_hwfn->p_dev->sriov_info.total_vfs;
2839 /* @@@TBD - for BE machines, should echo field be reversed? */
2840 if ((u8)vfid < min || (u8)vfid >= max) {
2842 "Got a message from VF with relative id 0x%08x, but PF's range is [0x%02x,...,0x%02x)\n",
2843 (u8)vfid, min, max);
2846 p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)vfid - min];
2848 /* List the physical address of the request so that handler
2849 * could later on copy the message from it.
2851 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
2853 return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
2856 enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
2859 union event_ring_data *data)
2862 case COMMON_EVENT_VF_PF_CHANNEL:
2863 return ecore_sriov_vfpf_msg(p_hwfn, echo,
2864 &data->vf_pf_channel.msg_addr);
2865 case COMMON_EVENT_VF_FLR:
2866 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2867 "VF-FLR is still not supported\n");
2868 return ECORE_SUCCESS;
2870 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
2876 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
2878 return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
2879 (1ULL << (rel_vf_id % 64)));
2882 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
2883 bool b_enabled_only)
2885 if (!p_hwfn->pf_iov_info) {
2886 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
2890 return b_enabled_only ? ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id) :
2891 (rel_vf_id < p_hwfn->p_dev->sriov_info.total_vfs);
2894 struct ecore_public_vf_info *ecore_iov_get_public_vf_info(struct ecore_hwfn
2897 bool b_enabled_only)
2899 struct ecore_vf_info *vf = OSAL_NULL;
2901 vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
2905 return &vf->p_vf_info;
2908 void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
2910 u64 add_bit = 1ULL << (vfid % 64);
2912 /* TODO - add locking mechanisms [no atomics in ecore, so we can't
2913 * add the lock inside the ecore_pf_iov struct].
2915 p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
2918 void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
2921 u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
2923 /* TODO - Take a lock */
2924 OSAL_MEMCPY(events, p_pending_events,
2925 sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
2926 OSAL_MEMSET(p_pending_events, 0, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
2929 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
2930 struct ecore_ptt *ptt, int vfid)
2932 struct ecore_dmae_params params;
2933 struct ecore_vf_info *vf_info;
2935 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
2939 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
2940 params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
2941 params.src_vfid = vf_info->abs_vf_id;
2943 if (ecore_dmae_host2host(p_hwfn, ptt,
2944 vf_info->vf_mbx.pending_req,
2945 vf_info->vf_mbx.req_phys,
2946 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
2947 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2948 "Failed to copy message from VF 0x%02x\n", vfid);
2953 return ECORE_SUCCESS;
2956 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
2959 struct ecore_vf_info *vf_info;
2962 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
2964 DP_NOTICE(p_hwfn->p_dev, true,
2965 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
2969 feature = 1 << MAC_ADDR_FORCED;
2970 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
2972 vf_info->bulletin.p_virt->valid_bitmap |= feature;
2973 /* Forced MAC will disable MAC_ADDR */
2974 vf_info->bulletin.p_virt->valid_bitmap &=
2975 ~(1 << VFPF_BULLETIN_MAC_ADDR);
2977 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
2980 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
2983 struct ecore_vf_info *vf_info;
2986 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
2988 DP_NOTICE(p_hwfn->p_dev, true,
2989 "Can not set MAC, invalid vfid [%d]\n", vfid);
2993 if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
2994 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2995 "Can not set MAC, Forced MAC is configured\n");
2999 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
3000 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3002 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3004 return ECORE_SUCCESS;
3007 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
3010 struct ecore_vf_info *vf_info;
3013 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3015 DP_NOTICE(p_hwfn->p_dev, true,
3016 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3020 feature = 1 << VLAN_ADDR_FORCED;
3021 vf_info->bulletin.p_virt->pvid = pvid;
3023 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3025 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
3027 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3030 enum _ecore_status_t
3031 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
3032 bool b_untagged_only, int vfid)
3034 struct ecore_vf_info *vf_info;
3037 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3039 DP_NOTICE(p_hwfn->p_dev, true,
3040 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3044 /* Since this is configurable only during vport-start, don't take it
3045 * if we're past that point.
3047 if (vf_info->state == VF_ENABLED) {
3048 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3049 "Can't support untagged change for vfid[%d] - VF is already active\n",
3054 /* Set configuration; This will later be taken into account during the
3055 * VF initialization.
3057 feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
3058 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
3059 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3061 vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
3064 return ECORE_SUCCESS;
3067 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
3070 struct ecore_vf_info *vf_info;
3072 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3076 *opaque_fid = vf_info->opaque_fid;
3079 void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
3082 struct ecore_vf_info *vf_info;
3084 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3088 *p_vort_id = vf_info->vport_id;
3091 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
3093 struct ecore_vf_info *p_vf_info;
3095 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3099 return !!p_vf_info->vport_instance;
3102 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
3104 struct ecore_vf_info *p_vf_info;
3106 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3108 return p_vf_info->state == VF_STOPPED;
3111 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
3113 struct ecore_vf_info *vf_info;
3115 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3119 return vf_info->spoof_chk;
3122 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
3124 if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
3125 !IS_PF_SRIOV_ALLOC(p_hwfn) ||
3126 !ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, vfid))
3132 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
3135 enum _ecore_status_t rc = ECORE_INVAL;
3136 struct ecore_vf_info *vf;
3138 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
3139 DP_NOTICE(p_hwfn, true,
3140 "SR-IOV sanity check failed, can't set spoofchk\n");
3144 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3148 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
3149 /* After VF VPORT start PF will configure spoof check */
3150 vf->req_spoofchk_val = val;
3155 rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
3161 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
3163 u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
3165 max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
3166 : ECORE_MAX_VF_CHAINS_PER_PF;
3168 return max_chains_per_vf;
3171 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
3173 void **pp_req_virt_addr,
3174 u16 *p_req_virt_size)
3176 struct ecore_vf_info *vf_info =
3177 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3182 if (pp_req_virt_addr)
3183 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
3185 if (p_req_virt_size)
3186 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
3189 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
3191 void **pp_reply_virt_addr,
3192 u16 *p_reply_virt_size)
3194 struct ecore_vf_info *vf_info =
3195 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3200 if (pp_reply_virt_addr)
3201 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
3203 if (p_reply_virt_size)
3204 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
3207 #ifdef CONFIG_ECORE_SW_CHANNEL
3208 struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
3211 struct ecore_vf_info *vf_info =
3212 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3217 return &vf_info->vf_mbx.sw_mbx;
3221 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
3223 return (length >= sizeof(struct vfpf_first_tlv) &&
3224 (length <= sizeof(union vfpf_tlvs)));
3227 u32 ecore_iov_pfvf_msg_length(void)
3229 return sizeof(union pfvf_tlvs);
3232 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3234 struct ecore_vf_info *p_vf;
3236 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3237 if (!p_vf || !p_vf->bulletin.p_virt)
3240 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
3243 return p_vf->bulletin.p_virt->mac;
3246 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
3249 struct ecore_vf_info *p_vf;
3251 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3252 if (!p_vf || !p_vf->bulletin.p_virt)
3255 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
3258 return p_vf->bulletin.p_virt->pvid;
3261 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
3262 struct ecore_ptt *p_ptt,
3265 struct ecore_vf_info *vf;
3266 enum _ecore_status_t rc;
3269 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3274 rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
3275 if (rc != ECORE_SUCCESS)
3278 rc = ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
3283 enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
3286 struct ecore_vf_info *vf;
3287 enum _ecore_status_t rc;
3291 for_each_hwfn(p_dev, i) {
3292 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
3294 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
3295 DP_NOTICE(p_hwfn, true,
3296 "SR-IOV sanity check failed, can't set min rate\n");
3301 vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
3302 vport_id = vf->vport_id;
3304 rc = ecore_configure_vport_wfq(p_dev, vport_id, rate);
3309 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
3310 struct ecore_ptt *p_ptt,
3312 struct ecore_eth_stats *p_stats)
3314 struct ecore_vf_info *vf;
3316 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3320 if (vf->state != VF_ENABLED)
3323 __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
3324 vf->abs_vf_id + 0x10, false);
3326 return ECORE_SUCCESS;
3329 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3331 struct ecore_vf_info *p_vf;
3333 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3337 return p_vf->num_rxqs;
3340 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3342 struct ecore_vf_info *p_vf;
3344 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3348 return p_vf->num_active_rxqs;
3351 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3353 struct ecore_vf_info *p_vf;
3355 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3362 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3364 struct ecore_vf_info *p_vf;
3366 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3370 return p_vf->num_sbs;
3373 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3375 struct ecore_vf_info *p_vf;
3377 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3381 return (p_vf->state == VF_FREE);
3384 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
3387 struct ecore_vf_info *p_vf;
3389 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3393 return (p_vf->state == VF_ACQUIRED);
3396 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3398 struct ecore_vf_info *p_vf;
3400 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3404 return (p_vf->state == VF_ENABLED);
3407 int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
3409 struct ecore_wfq_data *vf_vp_wfq;
3410 struct ecore_vf_info *vf_info;
3412 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3416 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
3418 if (vf_vp_wfq->configured)
3419 return vf_vp_wfq->min_speed;