2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
12 #include "ecore_sriov.h"
13 #include "ecore_status.h"
15 #include "ecore_hw_defs.h"
16 #include "ecore_int.h"
17 #include "ecore_hsi_eth.h"
19 #include "ecore_vfpf_if.h"
20 #include "ecore_rt_defs.h"
21 #include "ecore_init_ops.h"
22 #include "ecore_gtt_reg_addr.h"
23 #include "ecore_iro.h"
24 #include "ecore_mcp.h"
25 #include "ecore_cxt.h"
27 #include "ecore_init_fw_funcs.h"
28 #include "ecore_sp_commands.h"
30 const char *ecore_channel_tlvs_string[] = {
31 "CHANNEL_TLV_NONE", /* ends tlv sequence */
32 "CHANNEL_TLV_ACQUIRE",
33 "CHANNEL_TLV_VPORT_START",
34 "CHANNEL_TLV_VPORT_UPDATE",
35 "CHANNEL_TLV_VPORT_TEARDOWN",
36 "CHANNEL_TLV_START_RXQ",
37 "CHANNEL_TLV_START_TXQ",
38 "CHANNEL_TLV_STOP_RXQ",
39 "CHANNEL_TLV_STOP_TXQ",
40 "CHANNEL_TLV_UPDATE_RXQ",
41 "CHANNEL_TLV_INT_CLEANUP",
43 "CHANNEL_TLV_RELEASE",
44 "CHANNEL_TLV_LIST_END",
45 "CHANNEL_TLV_UCAST_FILTER",
46 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
47 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
48 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
49 "CHANNEL_TLV_VPORT_UPDATE_MCAST",
50 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
51 "CHANNEL_TLV_VPORT_UPDATE_RSS",
52 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
53 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
54 "CHANNEL_TLV_UPDATE_TUNN_PARAM",
55 "CHANNEL_TLV_COALESCE_UPDATE",
60 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
61 struct ecore_vf_info *p_vf)
63 struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
64 struct ecore_spq_entry *p_ent = OSAL_NULL;
65 struct ecore_sp_init_data init_data;
66 enum _ecore_status_t rc = ECORE_NOTIMPL;
70 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
71 init_data.cid = ecore_spq_get_cid(p_hwfn);
72 init_data.opaque_fid = p_vf->opaque_fid;
73 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
75 rc = ecore_sp_init_request(p_hwfn, &p_ent,
76 COMMON_RAMROD_VF_START,
77 PROTOCOLID_COMMON, &init_data);
78 if (rc != ECORE_SUCCESS)
81 p_ramrod = &p_ent->ramrod.vf_start;
83 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
84 p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
86 switch (p_hwfn->hw_info.personality) {
88 p_ramrod->personality = PERSONALITY_ETH;
90 case ECORE_PCI_ETH_ROCE:
91 case ECORE_PCI_ETH_IWARP:
92 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
95 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
96 p_hwfn->hw_info.personality);
100 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
101 if (fp_minor > ETH_HSI_VER_MINOR &&
102 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
103 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
104 "VF [%d] - Requested fp hsi %02x.%02x which is"
105 " slightly newer than PF's %02x.%02x; Configuring"
108 ETH_HSI_VER_MAJOR, fp_minor,
109 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
110 fp_minor = ETH_HSI_VER_MINOR;
113 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
114 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
116 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
117 "VF[%d] - Starting using HSI %02x.%02x\n",
118 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
120 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
123 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
127 struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
128 struct ecore_spq_entry *p_ent = OSAL_NULL;
129 struct ecore_sp_init_data init_data;
130 enum _ecore_status_t rc = ECORE_NOTIMPL;
133 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
134 init_data.cid = ecore_spq_get_cid(p_hwfn);
135 init_data.opaque_fid = opaque_vfid;
136 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
138 rc = ecore_sp_init_request(p_hwfn, &p_ent,
139 COMMON_RAMROD_VF_STOP,
140 PROTOCOLID_COMMON, &init_data);
141 if (rc != ECORE_SUCCESS)
144 p_ramrod = &p_ent->ramrod.vf_stop;
146 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
148 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
151 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
152 bool b_enabled_only, bool b_non_malicious)
154 if (!p_hwfn->pf_iov_info) {
155 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
159 if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
163 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
167 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
174 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
178 struct ecore_vf_info *vf = OSAL_NULL;
180 if (!p_hwfn->pf_iov_info) {
181 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
185 if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
186 b_enabled_only, false))
187 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
189 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
195 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
196 struct ecore_vf_info *p_vf,
199 if (rx_qid >= p_vf->num_rxqs)
200 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
201 "VF[0x%02x] - can't touch Rx queue[%04x];"
202 " Only 0x%04x are allocated\n",
203 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
204 return rx_qid < p_vf->num_rxqs;
207 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
208 struct ecore_vf_info *p_vf,
211 if (tx_qid >= p_vf->num_txqs)
212 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
213 "VF[0x%02x] - can't touch Tx queue[%04x];"
214 " Only 0x%04x are allocated\n",
215 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
216 return tx_qid < p_vf->num_txqs;
219 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
220 struct ecore_vf_info *p_vf,
225 for (i = 0; i < p_vf->num_sbs; i++)
226 if (p_vf->igu_sbs[i] == sb_idx)
229 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
230 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
231 " one of its 0x%02x SBs\n",
232 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
237 static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
238 struct ecore_vf_info *p_vf)
242 for (i = 0; i < p_vf->num_rxqs; i++)
243 if (p_vf->vf_queues[i].p_rx_cid)
249 static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
250 struct ecore_vf_info *p_vf)
254 for (i = 0; i < p_vf->num_rxqs; i++)
255 if (p_vf->vf_queues[i].p_tx_cid)
261 /* TODO - this is linux crc32; Need a way to ifdef it out for linux */
262 u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
268 for (i = 0; i < 8; i++)
269 crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
274 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
276 struct ecore_ptt *p_ptt)
278 struct ecore_bulletin_content *p_bulletin;
279 int crc_size = sizeof(p_bulletin->crc);
280 struct ecore_dmae_params params;
281 struct ecore_vf_info *p_vf;
283 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
287 /* TODO - check VF is in a state where it can accept message */
288 if (!p_vf->vf_bulletin)
291 p_bulletin = p_vf->bulletin.p_virt;
293 /* Increment bulletin board version and compute crc */
294 p_bulletin->version++;
295 p_bulletin->crc = ecore_crc32(0, (u8 *)p_bulletin + crc_size,
296 p_vf->bulletin.size - crc_size);
298 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
299 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
300 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
302 /* propagate bulletin board via dmae to vm memory */
303 OSAL_MEMSET(¶ms, 0, sizeof(params));
304 params.flags = ECORE_DMAE_FLAG_VF_DST;
305 params.dst_vfid = p_vf->abs_vf_id;
306 return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
307 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
311 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
313 struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
316 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
317 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
319 OSAL_PCI_READ_CONFIG_WORD(p_dev,
320 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
321 OSAL_PCI_READ_CONFIG_WORD(p_dev,
322 pos + PCI_SRIOV_INITIAL_VF,
325 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
327 /* @@@TODO - in future we might want to add an OSAL here to
328 * allow each OS to decide on its own how to act.
330 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
331 "Number of VFs are already set to non-zero value."
332 " Ignoring PCI configuration value\n");
336 OSAL_PCI_READ_CONFIG_WORD(p_dev,
337 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
339 OSAL_PCI_READ_CONFIG_WORD(p_dev,
340 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
342 OSAL_PCI_READ_CONFIG_WORD(p_dev,
343 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
345 OSAL_PCI_READ_CONFIG_DWORD(p_dev,
346 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
348 OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
350 OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
352 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
353 "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
354 " stride %d, page size 0x%x\n",
355 iov->nres, iov->cap, iov->ctrl,
356 iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
357 iov->offset, iov->stride, iov->pgsz);
359 /* Some sanity checks */
360 if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
361 iov->total_vfs > NUM_OF_VFS(p_dev)) {
362 /* This can happen only due to a bug. In this case we set
363 * num_vfs to zero to avoid memory corruption in the code that
364 * assumes max number of vfs
366 DP_NOTICE(p_dev, false,
367 "IOV: Unexpected number of vfs set: %d"
368 " setting num_vf to zero\n",
375 return ECORE_SUCCESS;
378 static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
379 struct ecore_ptt *p_ptt)
381 struct ecore_igu_block *p_sb;
385 if (!p_hwfn->hw_info.p_igu_info) {
387 "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
392 sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {
393 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
394 if ((p_sb->status & ECORE_IGU_STATUS_FREE) &&
395 !(p_sb->status & ECORE_IGU_STATUS_PF)) {
396 val = ecore_rd(p_hwfn, p_ptt,
397 IGU_REG_MAPPING_MEMORY + sb_id * 4);
398 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
399 ecore_wr(p_hwfn, p_ptt,
400 IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
405 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
407 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
408 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
409 struct ecore_bulletin_content *p_bulletin_virt;
410 dma_addr_t req_p, rply_p, bulletin_p;
411 union pfvf_tlvs *p_reply_virt_addr;
412 union vfpf_tlvs *p_req_virt_addr;
415 OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
417 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
418 req_p = p_iov_info->mbx_msg_phys_addr;
419 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
420 rply_p = p_iov_info->mbx_reply_phys_addr;
421 p_bulletin_virt = p_iov_info->p_bulletins;
422 bulletin_p = p_iov_info->bulletins_phys;
423 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
425 "ecore_iov_setup_vfdb called without alloc mem first\n");
429 for (idx = 0; idx < p_iov->total_vfs; idx++) {
430 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
433 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
434 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
435 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
436 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
438 #ifdef CONFIG_ECORE_SW_CHANNEL
439 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
440 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
442 vf->state = VF_STOPPED;
445 vf->bulletin.phys = idx *
446 sizeof(struct ecore_bulletin_content) + bulletin_p;
447 vf->bulletin.p_virt = p_bulletin_virt + idx;
448 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
450 vf->relative_vf_id = idx;
451 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
452 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
453 vf->concrete_fid = concrete;
454 /* TODO - need to devise a better way of getting opaque */
455 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
456 (vf->abs_vf_id << 8);
458 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
459 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
463 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
465 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
469 num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
471 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
472 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
474 /* Allocate PF Mailbox buffer (per-VF) */
475 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
476 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
477 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
478 &p_iov_info->mbx_msg_phys_addr,
479 p_iov_info->mbx_msg_size);
483 /* Allocate PF Mailbox Reply buffer (per-VF) */
484 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
485 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
486 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
487 &p_iov_info->mbx_reply_phys_addr,
488 p_iov_info->mbx_reply_size);
492 p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
494 p_v_addr = &p_iov_info->p_bulletins;
495 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
496 &p_iov_info->bulletins_phys,
497 p_iov_info->bulletins_size);
501 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
502 "PF's Requests mailbox [%p virt 0x%lx phys], "
503 "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
504 " [%p virt 0x%lx phys]\n",
505 p_iov_info->mbx_msg_virt_addr,
506 (unsigned long)p_iov_info->mbx_msg_phys_addr,
507 p_iov_info->mbx_reply_virt_addr,
508 (unsigned long)p_iov_info->mbx_reply_phys_addr,
509 p_iov_info->p_bulletins,
510 (unsigned long)p_iov_info->bulletins_phys);
512 return ECORE_SUCCESS;
515 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
517 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
519 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
520 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
521 p_iov_info->mbx_msg_virt_addr,
522 p_iov_info->mbx_msg_phys_addr,
523 p_iov_info->mbx_msg_size);
525 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
526 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
527 p_iov_info->mbx_reply_virt_addr,
528 p_iov_info->mbx_reply_phys_addr,
529 p_iov_info->mbx_reply_size);
531 if (p_iov_info->p_bulletins)
532 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
533 p_iov_info->p_bulletins,
534 p_iov_info->bulletins_phys,
535 p_iov_info->bulletins_size);
538 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
540 struct ecore_pf_iov *p_sriov;
542 if (!IS_PF_SRIOV(p_hwfn)) {
543 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
544 "No SR-IOV - no need for IOV db\n");
545 return ECORE_SUCCESS;
548 p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
550 DP_NOTICE(p_hwfn, true,
551 "Failed to allocate `struct ecore_sriov'\n");
555 p_hwfn->pf_iov_info = p_sriov;
557 return ecore_iov_allocate_vfdb(p_hwfn);
560 void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
562 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
565 ecore_iov_setup_vfdb(p_hwfn);
566 ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
569 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
571 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
572 ecore_iov_free_vfdb(p_hwfn);
573 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
577 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
579 OSAL_FREE(p_dev, p_dev->p_iov_info);
582 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
584 struct ecore_dev *p_dev = p_hwfn->p_dev;
586 enum _ecore_status_t rc;
588 if (IS_VF(p_hwfn->p_dev))
589 return ECORE_SUCCESS;
591 /* Learn the PCI configuration */
592 pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
593 PCI_EXT_CAP_ID_SRIOV);
595 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
596 return ECORE_SUCCESS;
599 /* Allocate a new struct for IOV information */
600 /* TODO - can change to VALLOC when its available */
601 p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
602 sizeof(*p_dev->p_iov_info));
603 if (!p_dev->p_iov_info) {
604 DP_NOTICE(p_hwfn, true,
605 "Can't support IOV due to lack of memory\n");
608 p_dev->p_iov_info->pos = pos;
610 rc = ecore_iov_pci_cfg_info(p_dev);
614 /* We want PF IOV to be synonemous with the existence of p_iov_info;
615 * In case the capability is published but there are no VFs, simply
616 * de-allocate the struct.
618 if (!p_dev->p_iov_info->total_vfs) {
619 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
620 "IOV capabilities, but no VFs are published\n");
621 OSAL_FREE(p_dev, p_dev->p_iov_info);
622 return ECORE_SUCCESS;
625 /* First VF index based on offset is tricky:
626 * - If ARI is supported [likely], offset - (16 - pf_id) would
627 * provide the number for eng0. 2nd engine Vfs would begin
628 * after the first engine's VFs.
629 * - If !ARI, VFs would start on next device.
630 * so offset - (256 - pf_id) would provide the number.
631 * Utilize the fact that (256 - pf_id) is achieved only be later
632 * to diffrentiate between the two.
635 if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
636 u32 first = p_hwfn->p_dev->p_iov_info->offset +
637 p_hwfn->abs_pf_id - 16;
639 p_dev->p_iov_info->first_vf_in_pf = first;
641 if (ECORE_PATH_ID(p_hwfn))
642 p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
644 u32 first = p_hwfn->p_dev->p_iov_info->offset +
645 p_hwfn->abs_pf_id - 256;
647 p_dev->p_iov_info->first_vf_in_pf = first;
650 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
651 "First VF in hwfn 0x%08x\n",
652 p_dev->p_iov_info->first_vf_in_pf);
654 return ECORE_SUCCESS;
657 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
658 bool b_fail_malicious)
660 /* Check PF supports sriov */
661 if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
662 !IS_PF_SRIOV_ALLOC(p_hwfn))
665 /* Check VF validity */
666 if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
672 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
674 return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
677 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
678 u16 rel_vf_id, u8 to_disable)
680 struct ecore_vf_info *vf;
683 for_each_hwfn(p_dev, i) {
684 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
686 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
690 vf->to_disable = to_disable;
694 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
699 if (!IS_ECORE_SRIOV(p_dev))
702 for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
703 ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
707 /* @@@TBD Consider taking outside of ecore... */
708 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
712 enum _ecore_status_t rc = ECORE_SUCCESS;
713 struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
715 if (vf != OSAL_NULL) {
717 #ifdef CONFIG_ECORE_SW_CHANNEL
718 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
721 rc = ECORE_UNKNOWN_ERROR;
727 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
728 struct ecore_ptt *p_ptt,
731 ecore_wr(p_hwfn, p_ptt,
732 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
733 1 << (abs_vfid & 0x1f));
736 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
737 struct ecore_ptt *p_ptt,
738 struct ecore_vf_info *vf)
742 /* Set VF masks and configuration - pretend */
743 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
745 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
748 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
750 /* iterate over all queues, clear sb consumer */
751 for (i = 0; i < vf->num_sbs; i++)
752 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
754 vf->opaque_fid, true);
757 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
758 struct ecore_ptt *p_ptt,
759 struct ecore_vf_info *vf, bool enable)
763 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
765 igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
768 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
770 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
772 ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
775 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
778 static enum _ecore_status_t
779 ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
780 struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
782 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
783 enum _ecore_status_t rc;
786 return ECORE_SUCCESS;
788 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
789 "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
790 ECORE_VF_ABS_ID(p_hwfn, vf));
792 ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
793 ECORE_VF_ABS_ID(p_hwfn, vf));
795 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
797 /* It's possible VF was previously considered malicious */
798 vf->b_malicious = false;
800 rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
801 vf->abs_vf_id, vf->num_sbs);
802 if (rc != ECORE_SUCCESS)
805 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
807 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
808 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
810 ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
811 p_hwfn->hw_info.hw_mode);
814 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
823 * @brief ecore_iov_config_perm_table - configure the permission
825 * In E4, queue zone permission table size is 320x9. There
826 * are 320 VF queues for single engine device (256 for dual
827 * engine device), and each entry has the following format:
834 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
835 struct ecore_ptt *p_ptt,
836 struct ecore_vf_info *vf, u8 enable)
842 for (qid = 0; qid < vf->num_rxqs; qid++) {
843 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
846 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
847 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
848 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
852 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
853 struct ecore_ptt *p_ptt,
854 struct ecore_vf_info *vf)
856 /* Reset vf in IGU - interrupts are still disabled */
857 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
859 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
861 /* Permission Table */
862 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
865 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
866 struct ecore_ptt *p_ptt,
867 struct ecore_vf_info *vf,
870 struct ecore_igu_block *igu_blocks;
871 int qid = 0, igu_id = 0;
874 igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
876 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
877 num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
879 p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
881 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
882 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
883 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
885 while ((qid < num_rx_queues) &&
886 (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {
887 if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {
888 struct cau_sb_entry sb_entry;
890 vf->igu_sbs[qid] = (u16)igu_id;
891 igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;
893 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
895 ecore_wr(p_hwfn, p_ptt,
896 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
899 /* Configure igu sb in CAU which were marked valid */
900 ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
903 ecore_dmae_host2grc(p_hwfn, p_ptt,
904 (u64)(osal_uintptr_t)&sb_entry,
905 CAU_REG_SB_VAR_MEMORY +
906 igu_id * sizeof(u64), 2, 0);
912 vf->num_sbs = (u8)num_rx_queues;
919 * @brief The function invalidates all the VF entries,
920 * technically this isn't required, but added for
921 * cleaness and ease of debugging incase a VF attempts to
922 * produce an interrupt after it has been taken down.
928 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
929 struct ecore_ptt *p_ptt,
930 struct ecore_vf_info *vf)
932 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
936 /* Invalidate igu CAM lines and mark them as free */
937 for (idx = 0; idx < vf->num_sbs; idx++) {
938 igu_id = vf->igu_sbs[idx];
939 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
941 val = ecore_rd(p_hwfn, p_ptt, addr);
942 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
943 ecore_wr(p_hwfn, p_ptt, addr, val);
945 p_info->igu_map.igu_blocks[igu_id].status |=
946 ECORE_IGU_STATUS_FREE;
948 p_hwfn->hw_info.p_igu_info->free_blks++;
954 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
956 struct ecore_mcp_link_params *params,
957 struct ecore_mcp_link_state *link,
958 struct ecore_mcp_link_capabilities *p_caps)
960 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
961 struct ecore_bulletin_content *p_bulletin;
966 p_bulletin = p_vf->bulletin.p_virt;
967 p_bulletin->req_autoneg = params->speed.autoneg;
968 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
969 p_bulletin->req_forced_speed = params->speed.forced_speed;
970 p_bulletin->req_autoneg_pause = params->pause.autoneg;
971 p_bulletin->req_forced_rx = params->pause.forced_rx;
972 p_bulletin->req_forced_tx = params->pause.forced_tx;
973 p_bulletin->req_loopback = params->loopback_mode;
975 p_bulletin->link_up = link->link_up;
976 p_bulletin->speed = link->speed;
977 p_bulletin->full_duplex = link->full_duplex;
978 p_bulletin->autoneg = link->an;
979 p_bulletin->autoneg_complete = link->an_complete;
980 p_bulletin->parallel_detection = link->parallel_detection;
981 p_bulletin->pfc_enabled = link->pfc_enabled;
982 p_bulletin->partner_adv_speed = link->partner_adv_speed;
983 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
984 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
985 p_bulletin->partner_adv_pause = link->partner_adv_pause;
986 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
988 p_bulletin->capability_speed = p_caps->speed_capabilities;
992 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
993 struct ecore_ptt *p_ptt,
994 struct ecore_iov_vf_init_params *p_params)
996 struct ecore_mcp_link_capabilities link_caps;
997 struct ecore_mcp_link_params link_params;
998 struct ecore_mcp_link_state link_state;
999 u8 num_of_vf_available_chains = 0;
1000 struct ecore_vf_info *vf = OSAL_NULL;
1002 enum _ecore_status_t rc = ECORE_SUCCESS;
1006 vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1008 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
1009 return ECORE_UNKNOWN_ERROR;
1013 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
1014 p_params->rel_vf_id);
1018 /* Perform sanity checking on the requested vport/rss */
1019 if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
1020 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
1021 p_params->rel_vf_id, p_params->vport_id);
1025 if ((p_params->num_queues > 1) &&
1026 (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
1027 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
1028 p_params->rel_vf_id, p_params->rss_eng_id);
1032 /* TODO - remove this once we get confidence of change */
1033 if (!p_params->vport_id) {
1034 DP_NOTICE(p_hwfn, false,
1035 "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
1036 p_params->rel_vf_id);
1038 if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
1039 DP_NOTICE(p_hwfn, false,
1040 "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
1041 p_params->rel_vf_id);
1043 vf->vport_id = p_params->vport_id;
1044 vf->rss_eng_id = p_params->rss_eng_id;
1046 /* Perform sanity checking on the requested queue_id */
1047 for (i = 0; i < p_params->num_queues; i++) {
1048 u16 min_vf_qzone = (u16)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
1049 u16 max_vf_qzone = min_vf_qzone +
1050 FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) - 1;
1052 qid = p_params->req_rx_queue[i];
1053 if (qid < min_vf_qzone || qid > max_vf_qzone) {
1054 DP_NOTICE(p_hwfn, true,
1055 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
1056 qid, p_params->rel_vf_id,
1057 min_vf_qzone, max_vf_qzone);
1061 qid = p_params->req_tx_queue[i];
1062 if (qid > max_vf_qzone) {
1063 DP_NOTICE(p_hwfn, true,
1064 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
1065 qid, p_params->rel_vf_id, max_vf_qzone);
1069 /* If client *really* wants, Tx qid can be shared with PF */
1070 if (qid < min_vf_qzone)
1071 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1072 "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1073 p_params->rel_vf_id, qid, i);
1076 /* Limit number of queues according to number of CIDs */
1077 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1078 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1079 "VF[%d] - requesting to initialize for 0x%04x queues"
1080 " [0x%04x CIDs available]\n",
1081 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1082 num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
1084 num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
1088 if (num_of_vf_available_chains == 0) {
1089 DP_ERR(p_hwfn, "no available igu sbs\n");
1093 /* Choose queue number and index ranges */
1094 vf->num_rxqs = num_of_vf_available_chains;
1095 vf->num_txqs = num_of_vf_available_chains;
1097 for (i = 0; i < vf->num_rxqs; i++) {
1098 struct ecore_vf_q_info *p_queue = &vf->vf_queues[i];
1100 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1101 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1103 /* CIDs are per-VF, so no problem having them 0-based. */
1104 p_queue->fw_cid = i;
1106 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1107 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x] CID %04x\n",
1108 vf->relative_vf_id, i, vf->igu_sbs[i],
1109 p_queue->fw_rx_qid, p_queue->fw_tx_qid,
1113 /* Update the link configuration in bulletin.
1115 OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
1116 sizeof(link_params));
1117 OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
1118 sizeof(link_state));
1119 OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
1121 ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
1122 &link_params, &link_state, &link_caps);
1124 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1126 if (rc == ECORE_SUCCESS) {
1128 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
1129 (1ULL << (vf->relative_vf_id % 64));
1131 if (IS_LEAD_HWFN(p_hwfn))
1132 p_hwfn->p_dev->p_iov_info->num_vfs++;
1138 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1139 struct ecore_ptt *p_ptt,
1142 struct ecore_mcp_link_capabilities caps;
1143 struct ecore_mcp_link_params params;
1144 struct ecore_mcp_link_state link;
1145 struct ecore_vf_info *vf = OSAL_NULL;
1147 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1149 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1150 return ECORE_UNKNOWN_ERROR;
1153 if (vf->bulletin.p_virt)
1154 OSAL_MEMSET(vf->bulletin.p_virt, 0,
1155 sizeof(*vf->bulletin.p_virt));
1157 OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1159 /* Get the link configuration back in bulletin so
1160 * that when VFs are re-enabled they get the actual
1161 * link configuration.
1163 OSAL_MEMCPY(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
1164 OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
1165 OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
1167 ecore_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1169 /* Forget the VF's acquisition message */
1170 OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
1172 /* disablng interrupts and resetting permission table was done during
1173 * vf-close, however, we could get here without going through vf_close
1175 /* Disable Interrupts for VF */
1176 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1178 /* Reset Permission table */
1179 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1183 ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1187 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
1188 ~(1ULL << (vf->relative_vf_id / 64));
1190 if (IS_LEAD_HWFN(p_hwfn))
1191 p_hwfn->p_dev->p_iov_info->num_vfs--;
1194 return ECORE_SUCCESS;
1197 static bool ecore_iov_tlv_supported(u16 tlvtype)
1199 return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
1202 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1203 struct ecore_vf_info *vf, u16 tlv)
1205 /* lock the channel */
1206 /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1208 /* record the locking op */
1209 /* vf->op_current = tlv; @@@TBD MichalK */
1212 if (ecore_iov_tlv_supported(tlv))
1215 "VF[%d]: vf pf channel locked by %s\n",
1217 ecore_channel_tlvs_string[tlv]);
1221 "VF[%d]: vf pf channel locked by %04x\n",
1222 vf->abs_vf_id, tlv);
1225 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1226 struct ecore_vf_info *vf,
1229 /* log the unlock */
1230 if (ecore_iov_tlv_supported(expected_tlv))
1233 "VF[%d]: vf pf channel unlocked by %s\n",
1235 ecore_channel_tlvs_string[expected_tlv]);
1239 "VF[%d]: vf pf channel unlocked by %04x\n",
1240 vf->abs_vf_id, expected_tlv);
1242 /* record the locking op */
1243 /* vf->op_current = CHANNEL_TLV_NONE; */
1246 /* place a given tlv on the tlv buffer, continuing current tlv list */
1247 void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
1248 u8 **offset, u16 type, u16 length)
1250 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1253 tl->length = length;
1255 /* Offset should keep pointing to next TLV (the end of the last) */
1258 /* Return a pointer to the start of the added tlv */
1259 return *offset - length;
1262 /* list the types and lengths of the tlvs on the buffer */
1263 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
1265 u16 i = 1, total_length = 0;
1266 struct channel_tlv *tlv;
1269 /* cast current tlv list entry to channel tlv header */
1270 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1273 if (ecore_iov_tlv_supported(tlv->type))
1274 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1275 "TLV number %d: type %s, length %d\n",
1276 i, ecore_channel_tlvs_string[tlv->type],
1279 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1280 "TLV number %d: type %d, length %d\n",
1281 i, tlv->type, tlv->length);
1283 if (tlv->type == CHANNEL_TLV_LIST_END)
1286 /* Validate entry - protect against malicious VFs */
1288 DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
1291 total_length += tlv->length;
1292 if (total_length >= sizeof(struct tlv_buffer_size)) {
1293 DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
1301 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
1302 struct ecore_ptt *p_ptt,
1303 struct ecore_vf_info *p_vf,
1304 u16 length, u8 status)
1306 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1307 struct ecore_dmae_params params;
1310 mbx->reply_virt->default_resp.hdr.status = status;
1312 ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1314 #ifdef CONFIG_ECORE_SW_CHANNEL
1315 mbx->sw_mbx.response_size =
1316 length + sizeof(struct channel_list_end_tlv);
1318 if (!p_hwfn->p_dev->b_hw_channel)
1322 eng_vf_id = p_vf->abs_vf_id;
1324 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
1325 params.flags = ECORE_DMAE_FLAG_VF_DST;
1326 params.dst_vfid = eng_vf_id;
1328 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1329 mbx->req_virt->first_tlv.reply_address +
1331 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1334 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1335 mbx->req_virt->first_tlv.reply_address,
1336 sizeof(u64) / 4, ¶ms);
1339 GTT_BAR0_MAP_REG_USDM_RAM +
1340 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1343 static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
1344 enum ecore_iov_vport_update_flag flag)
1347 case ECORE_IOV_VP_UPDATE_ACTIVATE:
1348 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1349 case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1350 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1351 case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1352 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1353 case ECORE_IOV_VP_UPDATE_MCAST:
1354 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1355 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1356 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1357 case ECORE_IOV_VP_UPDATE_RSS:
1358 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1359 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1360 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1361 case ECORE_IOV_VP_UPDATE_SGE_TPA:
1362 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1368 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1369 struct ecore_vf_info *p_vf,
1370 struct ecore_iov_vf_mbx *p_mbx,
1371 u8 status, u16 tlvs_mask,
1374 struct pfvf_def_resp_tlv *resp;
1375 u16 size, total_len, i;
1377 OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1378 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1379 size = sizeof(struct pfvf_def_resp_tlv);
1382 ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1384 /* Prepare response for all extended tlvs if they are found by PF */
1385 for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1386 if (!(tlvs_mask & (1 << i)))
1389 resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
1390 ecore_iov_vport_to_tlv(p_hwfn, i), size);
1392 if (tlvs_accepted & (1 << i))
1393 resp->hdr.status = status;
1395 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1397 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1398 "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1399 p_vf->relative_vf_id,
1400 ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1405 ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1406 sizeof(struct channel_list_end_tlv));
1411 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1412 struct ecore_ptt *p_ptt,
1413 struct ecore_vf_info *vf_info,
1414 u16 type, u16 length, u8 status)
1416 struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1418 mbx->offset = (u8 *)mbx->reply_virt;
1420 ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
1421 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1422 sizeof(struct channel_list_end_tlv));
1424 ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1426 OSAL_IOV_PF_RESP_TYPE(p_hwfn, vf_info->relative_vf_id, status);
1429 struct ecore_public_vf_info
1430 *ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
1432 bool b_enabled_only)
1434 struct ecore_vf_info *vf = OSAL_NULL;
1436 vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1440 return &vf->p_vf_info;
1443 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1444 struct ecore_vf_info *p_vf)
1447 p_vf->vf_bulletin = 0;
1448 p_vf->vport_instance = 0;
1449 p_vf->configured_features = 0;
1451 /* If VF previously requested less resources, go back to default */
1452 p_vf->num_rxqs = p_vf->num_sbs;
1453 p_vf->num_txqs = p_vf->num_sbs;
1455 p_vf->num_active_rxqs = 0;
1457 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1458 struct ecore_vf_q_info *p_queue = &p_vf->vf_queues[i];
1460 if (p_queue->p_rx_cid) {
1461 ecore_eth_queue_cid_release(p_hwfn,
1463 p_queue->p_rx_cid = OSAL_NULL;
1466 if (p_queue->p_tx_cid) {
1467 ecore_eth_queue_cid_release(p_hwfn,
1469 p_queue->p_tx_cid = OSAL_NULL;
1473 OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1474 OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1475 OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1478 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
1479 struct ecore_ptt *p_ptt,
1480 struct ecore_vf_info *p_vf,
1481 struct vf_pf_resc_request *p_req,
1482 struct pf_vf_resc *p_resp)
1486 /* Queue related information */
1487 p_resp->num_rxqs = p_vf->num_rxqs;
1488 p_resp->num_txqs = p_vf->num_txqs;
1489 p_resp->num_sbs = p_vf->num_sbs;
1491 for (i = 0; i < p_resp->num_sbs; i++) {
1492 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1493 /* TODO - what's this sb_qid field? Is it deprecated?
1494 * or is there an ecore_client that looks at this?
1496 p_resp->hw_sbs[i].sb_qid = 0;
1499 /* These fields are filled for backward compatibility.
1500 * Unused by modern vfs.
1502 for (i = 0; i < p_resp->num_rxqs; i++) {
1503 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1504 (u16 *)&p_resp->hw_qid[i]);
1505 p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
1508 /* Filter related information */
1509 p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
1510 p_req->num_mac_filters);
1511 p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
1512 p_req->num_vlan_filters);
1514 /* This isn't really needed/enforced, but some legacy VFs might depend
1515 * on the correct filling of this field.
1517 p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
1519 /* Validate sufficient resources for VF */
1520 if (p_resp->num_rxqs < p_req->num_rxqs ||
1521 p_resp->num_txqs < p_req->num_txqs ||
1522 p_resp->num_sbs < p_req->num_sbs ||
1523 p_resp->num_mac_filters < p_req->num_mac_filters ||
1524 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1525 p_resp->num_mc_filters < p_req->num_mc_filters) {
1526 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1527 "VF[%d] - Insufficient resources: rxq [%02x/%02x]"
1528 " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
1529 " vlan [%02x/%02x] mc [%02x/%02x]\n",
1531 p_req->num_rxqs, p_resp->num_rxqs,
1532 p_req->num_rxqs, p_resp->num_txqs,
1533 p_req->num_sbs, p_resp->num_sbs,
1534 p_req->num_mac_filters, p_resp->num_mac_filters,
1535 p_req->num_vlan_filters, p_resp->num_vlan_filters,
1536 p_req->num_mc_filters, p_resp->num_mc_filters);
1538 /* Some legacy OSes are incapable of correctly handling this
1541 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1542 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1543 (p_vf->acquire.vfdev_info.os_type ==
1544 VFPF_ACQUIRE_OS_WINDOWS))
1545 return PFVF_STATUS_SUCCESS;
1547 return PFVF_STATUS_NO_RESOURCE;
1550 return PFVF_STATUS_SUCCESS;
1553 static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn *p_hwfn,
1554 struct pfvf_stats_info *p_stats)
1556 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1557 OFFSETOF(struct mstorm_vf_zone,
1558 non_trigger.eth_queue_stat);
1559 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1560 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1561 OFFSETOF(struct ustorm_vf_zone,
1562 non_trigger.eth_queue_stat);
1563 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1564 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1565 OFFSETOF(struct pstorm_vf_zone,
1566 non_trigger.eth_queue_stat);
1567 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1568 p_stats->tstats.address = 0;
1569 p_stats->tstats.len = 0;
1572 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
1573 struct ecore_ptt *p_ptt,
1574 struct ecore_vf_info *vf)
1576 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1577 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1578 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1579 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1580 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1581 struct pf_vf_resc *resc = &resp->resc;
1582 enum _ecore_status_t rc;
1584 OSAL_MEMSET(resp, 0, sizeof(*resp));
1586 /* Write the PF version so that VF would know which version
1587 * is supported - might be later overridden. This guarantees that
1588 * VF could recognize legacy PF based on lack of versions in reply.
1590 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1591 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1593 /* TODO - not doing anything is bad since we'll assert, but this isn't
1594 * necessarily the right behavior - perhaps we should have allowed some
1597 if (vf->state != VF_FREE &&
1598 vf->state != VF_STOPPED) {
1599 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1600 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1601 vf->abs_vf_id, vf->state);
1605 /* Validate FW compatibility */
1606 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1607 if (req->vfdev_info.capabilities &
1608 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1609 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1611 /* This legacy support would need to be removed once
1612 * the major has changed.
1614 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
1616 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1617 "VF[%d] is pre-fastpath HSI\n",
1619 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1620 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1623 "VF[%d] needs fastpath HSI %02x.%02x, which is"
1624 " incompatible with loaded FW's faspath"
1627 req->vfdev_info.eth_fp_hsi_major,
1628 req->vfdev_info.eth_fp_hsi_minor,
1629 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1635 /* On 100g PFs, prevent old VFs from loading */
1636 if ((p_hwfn->p_dev->num_hwfns > 1) &&
1637 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1639 "VF[%d] is running an old driver that doesn't support"
1645 #ifndef __EXTRACT__LINUX__
1646 if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1647 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1652 /* Store the acquire message */
1653 OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
1655 vf->opaque_fid = req->vfdev_info.opaque_fid;
1657 vf->vf_bulletin = req->bulletin_addr;
1658 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1659 vf->bulletin.size : req->bulletin_size;
1661 /* fill in pfdev info */
1662 pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1663 pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
1664 pfdev_info->indices_per_sb = PIS_PER_SB;
1666 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1667 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1668 if (p_hwfn->p_dev->num_hwfns > 1)
1669 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1671 ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1673 OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1676 pfdev_info->fw_major = FW_MAJOR_VERSION;
1677 pfdev_info->fw_minor = FW_MINOR_VERSION;
1678 pfdev_info->fw_rev = FW_REVISION_VERSION;
1679 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1681 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1684 pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
1685 req->vfdev_info.eth_fp_hsi_minor);
1686 pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1687 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
1690 pfdev_info->dev_type = p_hwfn->p_dev->type;
1691 pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1693 /* Fill resources available to VF; Make sure there are enough to
1694 * satisfy the VF's request.
1696 vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1697 &req->resc_request, resc);
1698 if (vfpf_status != PFVF_STATUS_SUCCESS)
1701 /* Start the VF in FW */
1702 rc = ecore_sp_vf_start(p_hwfn, vf);
1703 if (rc != ECORE_SUCCESS) {
1704 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
1706 vfpf_status = PFVF_STATUS_FAILURE;
1710 /* Fill agreed size of bulletin board in response, and post
1711 * an initial image to the bulletin board.
1713 resp->bulletin_size = vf->bulletin.size;
1714 ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1716 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1717 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1718 " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1719 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1721 vf->abs_vf_id, resp->pfdev_info.chip_num,
1722 resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1723 (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
1724 resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1725 resc->num_vlan_filters);
1727 vf->state = VF_ACQUIRED;
1730 /* Prepare Response */
1731 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1732 sizeof(struct pfvf_acquire_resp_tlv),
1736 static enum _ecore_status_t
1737 __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1738 struct ecore_vf_info *p_vf, bool val)
1740 struct ecore_sp_vport_update_params params;
1741 enum _ecore_status_t rc;
1743 if (val == p_vf->spoof_chk) {
1744 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1745 "Spoofchk value[%d] is already configured\n", val);
1746 return ECORE_SUCCESS;
1749 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1750 params.opaque_fid = p_vf->opaque_fid;
1751 params.vport_id = p_vf->vport_id;
1752 params.update_anti_spoofing_en_flg = 1;
1753 params.anti_spoofing_en = val;
1755 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
1757 if (rc == ECORE_SUCCESS) {
1758 p_vf->spoof_chk = val;
1759 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1760 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1761 "Spoofchk val[%d] configured\n", val);
1763 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1764 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1765 val, p_vf->relative_vf_id);
1771 static enum _ecore_status_t
1772 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1773 struct ecore_vf_info *p_vf)
1775 struct ecore_filter_ucast filter;
1776 enum _ecore_status_t rc = ECORE_SUCCESS;
1779 OSAL_MEMSET(&filter, 0, sizeof(filter));
1780 filter.is_rx_filter = 1;
1781 filter.is_tx_filter = 1;
1782 filter.vport_to_add_to = p_vf->vport_id;
1783 filter.opcode = ECORE_FILTER_ADD;
1785 /* Reconfigure vlans */
1786 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1787 if (!p_vf->shadow_config.vlans[i].used)
1790 filter.type = ECORE_FILTER_VLAN;
1791 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1792 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1793 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1794 filter.vlan, p_vf->relative_vf_id);
1795 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1796 &filter, ECORE_SPQ_MODE_CB,
1799 DP_NOTICE(p_hwfn, true,
1800 "Failed to configure VLAN [%04x]"
1802 filter.vlan, p_vf->relative_vf_id);
1810 static enum _ecore_status_t
1811 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
1812 struct ecore_vf_info *p_vf, u64 events)
1814 enum _ecore_status_t rc = ECORE_SUCCESS;
1816 /*TODO - what about MACs? */
1818 if ((events & (1 << VLAN_ADDR_FORCED)) &&
1819 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1820 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1825 static enum _ecore_status_t
1826 ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
1827 struct ecore_vf_info *p_vf,
1830 enum _ecore_status_t rc = ECORE_SUCCESS;
1831 struct ecore_filter_ucast filter;
1833 if (!p_vf->vport_instance)
1836 if (events & (1 << MAC_ADDR_FORCED)) {
1837 /* Since there's no way [currently] of removing the MAC,
1838 * we can always assume this means we need to force it.
1840 OSAL_MEMSET(&filter, 0, sizeof(filter));
1841 filter.type = ECORE_FILTER_MAC;
1842 filter.opcode = ECORE_FILTER_REPLACE;
1843 filter.is_rx_filter = 1;
1844 filter.is_tx_filter = 1;
1845 filter.vport_to_add_to = p_vf->vport_id;
1846 OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
1848 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1850 ECORE_SPQ_MODE_CB, OSAL_NULL);
1852 DP_NOTICE(p_hwfn, true,
1853 "PF failed to configure MAC for VF\n");
1857 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1860 if (events & (1 << VLAN_ADDR_FORCED)) {
1861 struct ecore_sp_vport_update_params vport_update;
1865 OSAL_MEMSET(&filter, 0, sizeof(filter));
1866 filter.type = ECORE_FILTER_VLAN;
1867 filter.is_rx_filter = 1;
1868 filter.is_tx_filter = 1;
1869 filter.vport_to_add_to = p_vf->vport_id;
1870 filter.vlan = p_vf->bulletin.p_virt->pvid;
1871 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
1874 /* Send the ramrod */
1875 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1877 ECORE_SPQ_MODE_CB, OSAL_NULL);
1879 DP_NOTICE(p_hwfn, true,
1880 "PF failed to configure VLAN for VF\n");
1884 /* Update the default-vlan & silent vlan stripping */
1885 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
1886 vport_update.opaque_fid = p_vf->opaque_fid;
1887 vport_update.vport_id = p_vf->vport_id;
1888 vport_update.update_default_vlan_enable_flg = 1;
1889 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1890 vport_update.update_default_vlan_flg = 1;
1891 vport_update.default_vlan = filter.vlan;
1893 vport_update.update_inner_vlan_removal_flg = 1;
1894 removal = filter.vlan ?
1895 1 : p_vf->shadow_config.inner_vlan_removal;
1896 vport_update.inner_vlan_removal_flg = removal;
1897 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1898 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
1899 ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
1901 DP_NOTICE(p_hwfn, true,
1902 "PF failed to configure VF vport for vlan\n");
1906 /* Update all the Rx queues */
1907 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1908 struct ecore_queue_cid *p_cid;
1910 p_cid = p_vf->vf_queues[i].p_rx_cid;
1911 if (p_cid == OSAL_NULL)
1914 rc = ecore_sp_eth_rx_queues_update(p_hwfn,
1917 ECORE_SPQ_MODE_EBLOCK,
1920 DP_NOTICE(p_hwfn, true,
1921 "Failed to send Rx update"
1922 " fo queue[0x%04x]\n",
1923 p_cid->rel.queue_id);
1929 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1931 p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
1934 /* If forced features are terminated, we need to configure the shadow
1935 * configuration back again.
1938 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1943 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
1944 struct ecore_ptt *p_ptt,
1945 struct ecore_vf_info *vf)
1947 struct ecore_sp_vport_start_params params = { 0 };
1948 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1949 struct vfpf_vport_start_tlv *start;
1950 u8 status = PFVF_STATUS_SUCCESS;
1951 struct ecore_vf_info *vf_info;
1954 enum _ecore_status_t rc;
1956 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
1958 DP_NOTICE(p_hwfn->p_dev, true,
1959 "Failed to get VF info, invalid vfid [%d]\n",
1960 vf->relative_vf_id);
1964 vf->state = VF_ENABLED;
1965 start = &mbx->req_virt->start_vport;
1967 ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1969 /* Initialize Status block in CAU */
1970 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1971 if (!start->sb_addr[sb_id]) {
1972 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1973 "VF[%d] did not fill the address of SB %d\n",
1974 vf->relative_vf_id, sb_id);
1978 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
1979 start->sb_addr[sb_id],
1984 vf->mtu = start->mtu;
1985 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1987 /* Take into consideration configuration forced by hypervisor;
1988 * If none is configured, use the supplied VF values [for old
1989 * vfs that would still be fine, since they passed '0' as padding].
1991 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
1992 if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
1993 u8 vf_req = start->only_untagged;
1995 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1996 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1999 params.tpa_mode = start->tpa_mode;
2000 params.remove_inner_vlan = start->inner_vlan_removal;
2001 params.tx_switching = true;
2004 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2005 DP_NOTICE(p_hwfn, false,
2006 "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
2007 params.tx_switching = false;
2011 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
2012 params.drop_ttl0 = false;
2013 params.concrete_fid = vf->concrete_fid;
2014 params.opaque_fid = vf->opaque_fid;
2015 params.vport_id = vf->vport_id;
2016 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
2017 params.mtu = vf->mtu;
2018 params.check_mac = true;
2020 rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms);
2021 if (rc != ECORE_SUCCESS) {
2023 "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
2024 status = PFVF_STATUS_FAILURE;
2026 vf->vport_instance++;
2028 /* Force configuration if needed on the newly opened vport */
2029 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
2030 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
2031 vf->vport_id, vf->opaque_fid);
2032 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
2035 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
2036 sizeof(struct pfvf_def_resp_tlv), status);
2039 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
2040 struct ecore_ptt *p_ptt,
2041 struct ecore_vf_info *vf)
2043 u8 status = PFVF_STATUS_SUCCESS;
2044 enum _ecore_status_t rc;
2046 vf->vport_instance--;
2047 vf->spoof_chk = false;
2049 if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) ||
2050 (ecore_iov_validate_active_txq(p_hwfn, vf))) {
2051 vf->b_malicious = true;
2052 DP_NOTICE(p_hwfn, false,
2053 "VF [%02x] - considered malicious;"
2054 " Unable to stop RX/TX queuess\n",
2058 rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2059 if (rc != ECORE_SUCCESS) {
2061 "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
2062 status = PFVF_STATUS_FAILURE;
2065 /* Forget the configuration on the vport */
2066 vf->configured_features = 0;
2067 OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2069 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2070 sizeof(struct pfvf_def_resp_tlv), status);
2073 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
2074 struct ecore_ptt *p_ptt,
2075 struct ecore_vf_info *vf,
2076 u8 status, bool b_legacy)
2078 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2079 struct pfvf_start_queue_resp_tlv *p_tlv;
2080 struct vfpf_start_rxq_tlv *req;
2083 mbx->offset = (u8 *)mbx->reply_virt;
2085 /* Taking a bigger struct instead of adding a TLV to list was a
2086 * mistake, but one which we're now stuck with, as some older
2087 * clients assume the size of the previous response.
2090 length = sizeof(*p_tlv);
2092 length = sizeof(struct pfvf_def_resp_tlv);
2094 p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
2096 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2097 sizeof(struct channel_list_end_tlv));
2099 /* Update the TLV with the response */
2100 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2101 req = &mbx->req_virt->start_rxq;
2102 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2103 OFFSETOF(struct mstorm_vf_zone,
2104 non_trigger.eth_rx_queue_producers) +
2105 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2108 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2111 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
2112 struct ecore_ptt *p_ptt,
2113 struct ecore_vf_info *vf)
2115 struct ecore_queue_start_common_params params;
2116 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2117 u8 status = PFVF_STATUS_NO_RESOURCE;
2118 struct ecore_vf_q_info *p_queue;
2119 struct vfpf_start_rxq_tlv *req;
2120 bool b_legacy_vf = false;
2121 enum _ecore_status_t rc;
2123 req = &mbx->req_virt->start_rxq;
2125 if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
2126 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2129 /* Acquire a new queue-cid */
2130 p_queue = &vf->vf_queues[req->rx_qid];
2132 OSAL_MEMSET(¶ms, 0, sizeof(params));
2133 params.queue_id = (u8)p_queue->fw_rx_qid;
2134 params.vport_id = vf->vport_id;
2135 params.stats_id = vf->abs_vf_id + 0x10;
2136 params.sb = req->hw_sb;
2137 params.sb_idx = req->sb_index;
2139 p_queue->p_rx_cid = _ecore_eth_queue_to_cid(p_hwfn,
2144 if (p_queue->p_rx_cid == OSAL_NULL)
2147 /* Legacy VFs have their Producers in a different location, which they
2148 * calculate on their own and clean the producer prior to this.
2150 if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2151 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2155 GTT_BAR0_MAP_REG_MSDM_RAM +
2156 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2158 p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
2161 rc = ecore_eth_rxq_start_ramrod(p_hwfn,
2167 if (rc != ECORE_SUCCESS) {
2168 status = PFVF_STATUS_FAILURE;
2169 ecore_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
2170 p_queue->p_rx_cid = OSAL_NULL;
2172 status = PFVF_STATUS_SUCCESS;
2173 vf->num_active_rxqs++;
2177 ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2182 ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2183 struct ecore_tunnel_info *p_tun,
2184 u16 tunn_feature_mask)
2186 p_resp->tunn_feature_mask = tunn_feature_mask;
2187 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2188 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2189 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2190 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2191 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2192 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2193 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2194 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2195 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2196 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2197 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2198 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2202 __ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2203 struct ecore_tunn_update_type *p_tun,
2204 enum ecore_tunn_mode mask, u8 tun_cls)
2206 if (p_req->tun_mode_update_mask & (1 << mask)) {
2207 p_tun->b_update_mode = true;
2209 if (p_req->tunn_mode & (1 << mask))
2210 p_tun->b_mode_enabled = true;
2213 p_tun->tun_cls = tun_cls;
2217 ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2218 struct ecore_tunn_update_type *p_tun,
2219 struct ecore_tunn_update_udp_port *p_port,
2220 enum ecore_tunn_mode mask,
2221 u8 tun_cls, u8 update_port, u16 port)
2224 p_port->b_update_port = true;
2225 p_port->port = port;
2228 __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2232 ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2234 bool b_update_requested = false;
2236 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2237 p_req->update_geneve_port || p_req->update_vxlan_port)
2238 b_update_requested = true;
2240 return b_update_requested;
2243 static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
2244 struct ecore_ptt *p_ptt,
2245 struct ecore_vf_info *p_vf)
2247 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
2248 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2249 struct pfvf_update_tunn_param_tlv *p_resp;
2250 struct vfpf_update_tunn_param_tlv *p_req;
2251 enum _ecore_status_t rc = ECORE_SUCCESS;
2252 u8 status = PFVF_STATUS_SUCCESS;
2253 bool b_update_required = false;
2254 struct ecore_tunnel_info tunn;
2255 u16 tunn_feature_mask = 0;
2258 mbx->offset = (u8 *)mbx->reply_virt;
2260 OSAL_MEM_ZERO(&tunn, sizeof(tunn));
2261 p_req = &mbx->req_virt->tunn_param_update;
2263 if (!ecore_iov_pf_validate_tunn_param(p_req)) {
2264 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2265 "No tunnel update requested by VF\n");
2266 status = PFVF_STATUS_FAILURE;
2270 tunn.b_update_rx_cls = p_req->update_tun_cls;
2271 tunn.b_update_tx_cls = p_req->update_tun_cls;
2273 ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2274 ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2275 p_req->update_vxlan_port,
2277 ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2278 ECORE_MODE_L2GENEVE_TUNN,
2279 p_req->l2geneve_clss,
2280 p_req->update_geneve_port,
2281 p_req->geneve_port);
2282 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2283 ECORE_MODE_IPGENEVE_TUNN,
2284 p_req->ipgeneve_clss);
2285 __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2286 ECORE_MODE_L2GRE_TUNN,
2288 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2289 ECORE_MODE_IPGRE_TUNN,
2292 /* If PF modifies VF's req then it should
2293 * still return an error in case of partial configuration
2294 * or modified configuration as opposed to requested one.
2296 rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
2297 &b_update_required, &tunn);
2299 if (rc != ECORE_SUCCESS)
2300 status = PFVF_STATUS_FAILURE;
2302 /* If ECORE client is willing to update anything ? */
2303 if (b_update_required) {
2306 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
2307 ECORE_SPQ_MODE_EBLOCK,
2309 if (rc != ECORE_SUCCESS)
2310 status = PFVF_STATUS_FAILURE;
2312 geneve_port = p_tun->geneve_port.port;
2313 ecore_for_each_vf(p_hwfn, i) {
2314 ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
2315 p_tun->vxlan_port.port,
2321 p_resp = ecore_add_tlv(p_hwfn, &mbx->offset,
2322 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2324 ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2325 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2326 sizeof(struct channel_list_end_tlv));
2328 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2331 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
2332 struct ecore_ptt *p_ptt,
2333 struct ecore_vf_info *p_vf,
2336 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2337 struct pfvf_start_queue_resp_tlv *p_tlv;
2338 bool b_legacy = false;
2341 mbx->offset = (u8 *)mbx->reply_virt;
2343 /* Taking a bigger struct instead of adding a TLV to list was a
2344 * mistake, but one which we're now stuck with, as some older
2345 * clients assume the size of the previous response.
2347 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2348 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2352 length = sizeof(*p_tlv);
2354 length = sizeof(struct pfvf_def_resp_tlv);
2356 p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
2358 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2359 sizeof(struct channel_list_end_tlv));
2361 /* Update the TLV with the response */
2362 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2363 u16 qid = mbx->req_virt->start_txq.tx_qid;
2365 p_tlv->offset = DB_ADDR_VF(p_vf->vf_queues[qid].fw_cid,
2369 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2372 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
2373 struct ecore_ptt *p_ptt,
2374 struct ecore_vf_info *vf)
2376 struct ecore_queue_start_common_params params;
2377 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2378 u8 status = PFVF_STATUS_NO_RESOURCE;
2379 struct ecore_vf_q_info *p_queue;
2380 struct vfpf_start_txq_tlv *req;
2381 enum _ecore_status_t rc;
2384 OSAL_MEMSET(¶ms, 0, sizeof(params));
2385 req = &mbx->req_virt->start_txq;
2387 if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
2388 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2391 /* Acquire a new queue-cid */
2392 p_queue = &vf->vf_queues[req->tx_qid];
2394 params.queue_id = p_queue->fw_tx_qid;
2395 params.vport_id = vf->vport_id;
2396 params.stats_id = vf->abs_vf_id + 0x10;
2397 params.sb = req->hw_sb;
2398 params.sb_idx = req->sb_index;
2400 p_queue->p_tx_cid = _ecore_eth_queue_to_cid(p_hwfn,
2405 if (p_queue->p_tx_cid == OSAL_NULL)
2408 pq = ecore_get_cm_pq_idx_vf(p_hwfn,
2409 vf->relative_vf_id);
2410 rc = ecore_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
2411 req->pbl_addr, req->pbl_size, pq);
2412 if (rc != ECORE_SUCCESS) {
2413 status = PFVF_STATUS_FAILURE;
2414 ecore_eth_queue_cid_release(p_hwfn,
2416 p_queue->p_tx_cid = OSAL_NULL;
2418 status = PFVF_STATUS_SUCCESS;
2422 ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
2425 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
2426 struct ecore_vf_info *vf,
2429 bool cqe_completion)
2431 struct ecore_vf_q_info *p_queue;
2432 enum _ecore_status_t rc = ECORE_SUCCESS;
2435 if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
2438 for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
2439 p_queue = &vf->vf_queues[qid];
2441 if (!p_queue->p_rx_cid)
2444 rc = ecore_eth_rx_queue_stop(p_hwfn,
2446 false, cqe_completion);
2447 if (rc != ECORE_SUCCESS)
2450 vf->vf_queues[qid].p_rx_cid = OSAL_NULL;
2451 vf->num_active_rxqs--;
2457 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
2458 struct ecore_vf_info *vf,
2459 u16 txq_id, u8 num_txqs)
2461 enum _ecore_status_t rc = ECORE_SUCCESS;
2462 struct ecore_vf_q_info *p_queue;
2465 if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
2468 for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
2469 p_queue = &vf->vf_queues[qid];
2470 if (!p_queue->p_tx_cid)
2473 rc = ecore_eth_tx_queue_stop(p_hwfn,
2475 if (rc != ECORE_SUCCESS)
2478 p_queue->p_tx_cid = OSAL_NULL;
2483 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
2484 struct ecore_ptt *p_ptt,
2485 struct ecore_vf_info *vf)
2487 u16 length = sizeof(struct pfvf_def_resp_tlv);
2488 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2489 u8 status = PFVF_STATUS_SUCCESS;
2490 struct vfpf_stop_rxqs_tlv *req;
2491 enum _ecore_status_t rc;
2493 /* We give the option of starting from qid != 0, in this case we
2494 * need to make sure that qid + num_qs doesn't exceed the actual
2495 * amount of queues that exist.
2497 req = &mbx->req_virt->stop_rxqs;
2498 rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2499 req->num_rxqs, req->cqe_completion);
2501 status = PFVF_STATUS_FAILURE;
2503 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2507 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
2508 struct ecore_ptt *p_ptt,
2509 struct ecore_vf_info *vf)
2511 u16 length = sizeof(struct pfvf_def_resp_tlv);
2512 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2513 u8 status = PFVF_STATUS_SUCCESS;
2514 struct vfpf_stop_txqs_tlv *req;
2515 enum _ecore_status_t rc;
2517 /* We give the option of starting from qid != 0, in this case we
2518 * need to make sure that qid + num_qs doesn't exceed the actual
2519 * amount of queues that exist.
2521 req = &mbx->req_virt->stop_txqs;
2522 rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
2524 status = PFVF_STATUS_FAILURE;
2526 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2530 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
2531 struct ecore_ptt *p_ptt,
2532 struct ecore_vf_info *vf)
2534 struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
2535 u16 length = sizeof(struct pfvf_def_resp_tlv);
2536 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2537 struct vfpf_update_rxq_tlv *req;
2538 u8 status = PFVF_STATUS_FAILURE;
2539 u8 complete_event_flg;
2540 u8 complete_cqe_flg;
2542 enum _ecore_status_t rc;
2545 req = &mbx->req_virt->update_rxq;
2546 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2547 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2549 /* Validaute inputs */
2550 if (req->num_rxqs + req->rx_qid > ECORE_MAX_VF_CHAINS_PER_PF ||
2551 !ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) {
2552 DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2553 vf->relative_vf_id, req->rx_qid, req->num_rxqs);
2557 for (i = 0; i < req->num_rxqs; i++) {
2558 qid = req->rx_qid + i;
2560 if (!vf->vf_queues[qid].p_rx_cid) {
2562 "VF[%d] rx_qid = %d isn`t active!\n",
2563 vf->relative_vf_id, qid);
2567 handlers[i] = vf->vf_queues[qid].p_rx_cid;
2570 rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2574 ECORE_SPQ_MODE_EBLOCK,
2579 status = PFVF_STATUS_SUCCESS;
2581 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2585 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
2586 void *p_tlvs_list, u16 req_type)
2588 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2592 if (!p_tlv->length) {
2593 DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
2597 if (p_tlv->type == req_type) {
2598 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2599 "Extended tlv type %s, length %d found\n",
2600 ecore_channel_tlvs_string[p_tlv->type],
2605 len += p_tlv->length;
2606 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2608 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2609 DP_NOTICE(p_hwfn, true,
2610 "TLVs has overrun the buffer size\n");
2613 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2619 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
2620 struct ecore_sp_vport_update_params *p_data,
2621 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2623 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2624 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2626 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2627 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2631 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2632 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2633 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2634 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2635 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
2639 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
2640 struct ecore_sp_vport_update_params *p_data,
2641 struct ecore_vf_info *p_vf,
2642 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2644 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2645 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2647 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2648 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2652 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2654 /* Ignore the VF request if we're forcing a vlan */
2655 if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
2656 p_data->update_inner_vlan_removal_flg = 1;
2657 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2660 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
2664 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
2665 struct ecore_sp_vport_update_params *p_data,
2666 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2668 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2669 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2671 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2672 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2673 if (!p_tx_switch_tlv)
2677 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2678 DP_NOTICE(p_hwfn, false,
2679 "FPGA: Ignore tx-switching configuration originating"
2685 p_data->update_tx_switching_flg = 1;
2686 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2687 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
2691 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
2692 struct ecore_sp_vport_update_params *p_data,
2693 struct ecore_iov_vf_mbx *p_mbx,
2696 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2697 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2699 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2700 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2704 p_data->update_approx_mcast_flg = 1;
2705 OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
2706 sizeof(unsigned long) *
2707 ETH_MULTICAST_MAC_BINS_IN_REGS);
2708 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
2712 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
2713 struct ecore_sp_vport_update_params *p_data,
2714 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2716 struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
2717 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2718 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2720 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2721 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2725 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2726 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2727 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2728 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2729 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
2733 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
2734 struct ecore_sp_vport_update_params *p_data,
2735 struct ecore_iov_vf_mbx *p_mbx,
2738 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2739 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2741 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2742 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2743 if (!p_accept_any_vlan)
2746 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2747 p_data->update_accept_any_vlan_flg =
2748 p_accept_any_vlan->update_accept_any_vlan_flg;
2749 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2753 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
2754 struct ecore_vf_info *vf,
2755 struct ecore_sp_vport_update_params *p_data,
2756 struct ecore_rss_params *p_rss,
2757 struct ecore_iov_vf_mbx *p_mbx,
2758 u16 *tlvs_mask, u16 *tlvs_accepted)
2760 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2761 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2762 bool b_reject = false;
2766 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2767 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2769 p_data->rss_params = OSAL_NULL;
2773 OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
2775 p_rss->update_rss_config =
2776 !!(p_rss_tlv->update_rss_flags &
2777 VFPF_UPDATE_RSS_CONFIG_FLAG);
2778 p_rss->update_rss_capabilities =
2779 !!(p_rss_tlv->update_rss_flags &
2780 VFPF_UPDATE_RSS_CAPS_FLAG);
2781 p_rss->update_rss_ind_table =
2782 !!(p_rss_tlv->update_rss_flags &
2783 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2784 p_rss->update_rss_key =
2785 !!(p_rss_tlv->update_rss_flags &
2786 VFPF_UPDATE_RSS_KEY_FLAG);
2788 p_rss->rss_enable = p_rss_tlv->rss_enable;
2789 p_rss->rss_eng_id = vf->rss_eng_id;
2790 p_rss->rss_caps = p_rss_tlv->rss_caps;
2791 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2792 OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
2793 sizeof(p_rss->rss_key));
2795 table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
2796 (1 << p_rss_tlv->rss_table_size_log));
2798 for (i = 0; i < table_size; i++) {
2799 q_idx = p_rss_tlv->rss_ind_table[i];
2800 if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx)) {
2801 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2802 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2803 vf->relative_vf_id, q_idx);
2808 if (!vf->vf_queues[q_idx].p_rx_cid) {
2809 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2810 "VF[%d]: Omitting RSS due to inactive queue %08x\n",
2811 vf->relative_vf_id, q_idx);
2816 p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
2819 p_data->rss_params = p_rss;
2821 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
2823 *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
2827 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
2828 struct ecore_vf_info *vf,
2829 struct ecore_sp_vport_update_params *p_data,
2830 struct ecore_sge_tpa_params *p_sge_tpa,
2831 struct ecore_iov_vf_mbx *p_mbx,
2834 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2835 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2837 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2838 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2840 if (!p_sge_tpa_tlv) {
2841 p_data->sge_tpa_params = OSAL_NULL;
2845 OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
2847 p_sge_tpa->update_tpa_en_flg =
2848 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2849 p_sge_tpa->update_tpa_param_flg =
2850 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2851 VFPF_UPDATE_TPA_PARAM_FLAG);
2853 p_sge_tpa->tpa_ipv4_en_flg =
2854 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2855 p_sge_tpa->tpa_ipv6_en_flg =
2856 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2857 p_sge_tpa->tpa_pkt_split_flg =
2858 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2859 p_sge_tpa->tpa_hdr_data_split_flg =
2860 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2861 p_sge_tpa->tpa_gro_consistent_flg =
2862 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2864 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2865 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2866 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2867 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2868 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2870 p_data->sge_tpa_params = p_sge_tpa;
2872 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
2875 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
2876 struct ecore_ptt *p_ptt,
2877 struct ecore_vf_info *vf)
2879 struct ecore_rss_params *p_rss_params = OSAL_NULL;
2880 struct ecore_sp_vport_update_params params;
2881 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2882 struct ecore_sge_tpa_params sge_tpa_params;
2883 u16 tlvs_mask = 0, tlvs_accepted = 0;
2884 u8 status = PFVF_STATUS_SUCCESS;
2886 enum _ecore_status_t rc;
2888 /* Valiate PF can send such a request */
2889 if (!vf->vport_instance) {
2890 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2891 "No VPORT instance available for VF[%d],"
2892 " failing vport update\n",
2894 status = PFVF_STATUS_FAILURE;
2898 p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
2899 if (p_rss_params == OSAL_NULL) {
2900 status = PFVF_STATUS_FAILURE;
2904 OSAL_MEMSET(¶ms, 0, sizeof(params));
2905 params.opaque_fid = vf->opaque_fid;
2906 params.vport_id = vf->vport_id;
2907 params.rss_params = OSAL_NULL;
2909 /* Search for extended tlvs list and update values
2910 * from VF in struct ecore_sp_vport_update_params.
2912 ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
2913 ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
2914 ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
2915 ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
2916 ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
2917 ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
2918 ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms,
2919 &sge_tpa_params, mbx, &tlvs_mask);
2921 tlvs_accepted = tlvs_mask;
2923 /* Some of the extended TLVs need to be validated first; In that case,
2924 * they can update the mask without updating the accepted [so that
2925 * PF could communicate to VF it has rejected request].
2927 ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
2928 mbx, &tlvs_mask, &tlvs_accepted);
2930 /* Just log a message if there is no single extended tlv in buffer.
2931 * When all features of vport update ramrod would be requested by VF
2932 * as extended TLVs in buffer then an error can be returned in response
2933 * if there is no extended TLV present in buffer.
2935 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
2936 ¶ms, &tlvs_accepted) !=
2939 status = PFVF_STATUS_NOT_SUPPORTED;
2943 if (!tlvs_accepted) {
2945 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2946 "Upper-layer prevents said VF"
2947 " configuration\n");
2949 DP_NOTICE(p_hwfn, true,
2950 "No feature tlvs found for vport update\n");
2951 status = PFVF_STATUS_NOT_SUPPORTED;
2955 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
2959 status = PFVF_STATUS_FAILURE;
2962 OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
2963 length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
2964 tlvs_mask, tlvs_accepted);
2965 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2968 static enum _ecore_status_t
2969 ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
2970 struct ecore_vf_info *p_vf,
2971 struct ecore_filter_ucast *p_params)
2975 /* First remove entries and then add new ones */
2976 if (p_params->opcode == ECORE_FILTER_REMOVE) {
2977 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2978 if (p_vf->shadow_config.vlans[i].used &&
2979 p_vf->shadow_config.vlans[i].vid ==
2981 p_vf->shadow_config.vlans[i].used = false;
2984 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
2985 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2986 "VF [%d] - Tries to remove a non-existing"
2988 p_vf->relative_vf_id);
2991 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
2992 p_params->opcode == ECORE_FILTER_FLUSH) {
2993 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2994 p_vf->shadow_config.vlans[i].used = false;
2997 /* In forced mode, we're willing to remove entries - but we don't add
3000 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
3001 return ECORE_SUCCESS;
3003 if (p_params->opcode == ECORE_FILTER_ADD ||
3004 p_params->opcode == ECORE_FILTER_REPLACE) {
3005 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3006 if (p_vf->shadow_config.vlans[i].used)
3009 p_vf->shadow_config.vlans[i].used = true;
3010 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3014 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3015 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3016 "VF [%d] - Tries to configure more than %d"
3018 p_vf->relative_vf_id,
3019 ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
3024 return ECORE_SUCCESS;
3027 static enum _ecore_status_t
3028 ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
3029 struct ecore_vf_info *p_vf,
3030 struct ecore_filter_ucast *p_params)
3032 char empty_mac[ETH_ALEN];
3035 OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
3037 /* If we're in forced-mode, we don't allow any change */
3038 /* TODO - this would change if we were ever to implement logic for
3039 * removing a forced MAC altogether [in which case, like for vlans,
3040 * we should be able to re-trace previous configuration.
3042 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
3043 return ECORE_SUCCESS;
3045 /* First remove entries and then add new ones */
3046 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3047 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3048 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3049 p_params->mac, ETH_ALEN)) {
3050 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
3056 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3057 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3058 "MAC isn't configured\n");
3061 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3062 p_params->opcode == ECORE_FILTER_FLUSH) {
3063 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
3064 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
3067 /* List the new MAC address */
3068 if (p_params->opcode != ECORE_FILTER_ADD &&
3069 p_params->opcode != ECORE_FILTER_REPLACE)
3070 return ECORE_SUCCESS;
3072 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3073 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3074 empty_mac, ETH_ALEN)) {
3075 OSAL_MEMCPY(p_vf->shadow_config.macs[i],
3076 p_params->mac, ETH_ALEN);
3077 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3078 "Added MAC at %d entry in shadow\n", i);
3083 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3084 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3085 "No available place for MAC\n");
3089 return ECORE_SUCCESS;
3092 static enum _ecore_status_t
3093 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
3094 struct ecore_vf_info *p_vf,
3095 struct ecore_filter_ucast *p_params)
3097 enum _ecore_status_t rc = ECORE_SUCCESS;
3099 if (p_params->type == ECORE_FILTER_MAC) {
3100 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3101 if (rc != ECORE_SUCCESS)
3105 if (p_params->type == ECORE_FILTER_VLAN)
3106 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3111 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
3112 struct ecore_ptt *p_ptt,
3113 struct ecore_vf_info *vf)
3115 struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3116 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3117 struct vfpf_ucast_filter_tlv *req;
3118 u8 status = PFVF_STATUS_SUCCESS;
3119 struct ecore_filter_ucast params;
3120 enum _ecore_status_t rc;
3122 /* Prepare the unicast filter params */
3123 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast));
3124 req = &mbx->req_virt->ucast_filter;
3125 params.opcode = (enum ecore_filter_opcode)req->opcode;
3126 params.type = (enum ecore_filter_ucast_type)req->type;
3128 /* @@@TBD - We might need logic on HV side in determining this */
3129 params.is_rx_filter = 1;
3130 params.is_tx_filter = 1;
3131 params.vport_to_remove_from = vf->vport_id;
3132 params.vport_to_add_to = vf->vport_id;
3133 OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
3134 params.vlan = req->vlan;
3136 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3137 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
3138 " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3139 vf->abs_vf_id, params.opcode, params.type,
3140 params.is_rx_filter ? "RX" : "",
3141 params.is_tx_filter ? "TX" : "",
3142 params.vport_to_add_to,
3143 params.mac[0], params.mac[1], params.mac[2],
3144 params.mac[3], params.mac[4], params.mac[5], params.vlan);
3146 if (!vf->vport_instance) {
3147 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3148 "No VPORT instance available for VF[%d],"
3149 " failing ucast MAC configuration\n",
3151 status = PFVF_STATUS_FAILURE;
3155 /* Update shadow copy of the VF configuration */
3156 if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) !=
3158 status = PFVF_STATUS_FAILURE;
3162 /* Determine if the unicast filtering is acceptible by PF */
3163 if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
3164 (params.type == ECORE_FILTER_VLAN ||
3165 params.type == ECORE_FILTER_MAC_VLAN)) {
3166 /* Once VLAN is forced or PVID is set, do not allow
3167 * to add/replace any further VLANs.
3169 if (params.opcode == ECORE_FILTER_ADD ||
3170 params.opcode == ECORE_FILTER_REPLACE)
3171 status = PFVF_STATUS_FORCED;
3175 if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
3176 (params.type == ECORE_FILTER_MAC ||
3177 params.type == ECORE_FILTER_MAC_VLAN)) {
3178 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
3179 (params.opcode != ECORE_FILTER_ADD &&
3180 params.opcode != ECORE_FILTER_REPLACE))
3181 status = PFVF_STATUS_FORCED;
3185 rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms);
3186 if (rc == ECORE_EXISTS) {
3188 } else if (rc == ECORE_INVAL) {
3189 status = PFVF_STATUS_FAILURE;
3193 rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
3194 ECORE_SPQ_MODE_CB, OSAL_NULL);
3196 status = PFVF_STATUS_FAILURE;
3199 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3200 sizeof(struct pfvf_def_resp_tlv), status);
3203 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
3204 struct ecore_ptt *p_ptt,
3205 struct ecore_vf_info *vf)
3210 for (i = 0; i < vf->num_sbs; i++)
3211 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3213 vf->opaque_fid, false);
3215 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3216 sizeof(struct pfvf_def_resp_tlv),
3217 PFVF_STATUS_SUCCESS);
3220 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
3221 struct ecore_ptt *p_ptt,
3222 struct ecore_vf_info *vf)
3224 u16 length = sizeof(struct pfvf_def_resp_tlv);
3225 u8 status = PFVF_STATUS_SUCCESS;
3227 /* Disable Interrupts for VF */
3228 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3230 /* Reset Permission table */
3231 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3233 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3237 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
3238 struct ecore_ptt *p_ptt,
3239 struct ecore_vf_info *p_vf)
3241 u16 length = sizeof(struct pfvf_def_resp_tlv);
3242 u8 status = PFVF_STATUS_SUCCESS;
3243 enum _ecore_status_t rc = ECORE_SUCCESS;
3245 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3247 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3248 /* Stopping the VF */
3249 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3252 if (rc != ECORE_SUCCESS) {
3253 DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
3255 status = PFVF_STATUS_FAILURE;
3258 p_vf->state = VF_STOPPED;
3261 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3265 static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
3266 struct ecore_ptt *p_ptt,
3267 struct ecore_vf_info *vf)
3269 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3270 enum _ecore_status_t rc = ECORE_SUCCESS;
3271 struct vfpf_update_coalesce *req;
3272 u8 status = PFVF_STATUS_FAILURE;
3273 struct ecore_queue_cid *p_cid;
3274 u16 rx_coal, tx_coal;
3277 req = &mbx->req_virt->update_coalesce;
3279 rx_coal = req->rx_coal;
3280 tx_coal = req->tx_coal;
3282 p_cid = vf->vf_queues[qid].p_rx_cid;
3284 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid)) {
3285 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3286 vf->abs_vf_id, qid);
3290 if (!ecore_iov_validate_txq(p_hwfn, vf, qid)) {
3291 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3292 vf->abs_vf_id, qid);
3296 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3297 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3298 vf->abs_vf_id, rx_coal, tx_coal, qid);
3300 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3301 if (rc != ECORE_SUCCESS) {
3302 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3303 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3304 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3309 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
3310 if (rc != ECORE_SUCCESS) {
3311 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3312 "VF[%d]: Unable to set tx queue = %d coalesce\n",
3313 vf->abs_vf_id, vf->vf_queues[qid].fw_tx_qid);
3318 status = PFVF_STATUS_SUCCESS;
3320 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3321 sizeof(struct pfvf_def_resp_tlv), status);
3324 static enum _ecore_status_t
3325 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
3326 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3331 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
3333 for (cnt = 0; cnt < 50; cnt++) {
3334 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3339 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
3343 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3344 p_vf->abs_vf_id, val);
3345 return ECORE_TIMEOUT;
3348 return ECORE_SUCCESS;
3351 static enum _ecore_status_t
3352 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
3353 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3355 u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
3358 /* Read initial consumers & producers */
3359 for (i = 0; i < MAX_NUM_VOQS; i++) {
3362 cons[i] = ecore_rd(p_hwfn, p_ptt,
3363 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3365 prod = ecore_rd(p_hwfn, p_ptt,
3366 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3368 distance[i] = prod - cons[i];
3371 /* Wait for consumers to pass the producers */
3373 for (cnt = 0; cnt < 50; cnt++) {
3374 for (; i < MAX_NUM_VOQS; i++) {
3377 tmp = ecore_rd(p_hwfn, p_ptt,
3378 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3380 if (distance[i] > tmp - cons[i])
3384 if (i == MAX_NUM_VOQS)
3391 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3392 p_vf->abs_vf_id, i);
3393 return ECORE_TIMEOUT;
3396 return ECORE_SUCCESS;
3399 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
3400 struct ecore_vf_info *p_vf,
3401 struct ecore_ptt *p_ptt)
3403 enum _ecore_status_t rc;
3405 /* TODO - add SRC and TM polling once we add storage IOV */
3407 rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3411 rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3415 return ECORE_SUCCESS;
3418 static enum _ecore_status_t
3419 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3420 struct ecore_ptt *p_ptt,
3421 u16 rel_vf_id, u32 *ack_vfs)
3423 struct ecore_vf_info *p_vf;
3424 enum _ecore_status_t rc = ECORE_SUCCESS;
3426 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3428 return ECORE_SUCCESS;
3430 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3431 (1ULL << (rel_vf_id % 64))) {
3432 u16 vfid = p_vf->abs_vf_id;
3434 /* TODO - should we lock channel? */
3436 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3437 "VF[%d] - Handling FLR\n", vfid);
3439 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3441 /* If VF isn't active, no need for anything but SW */
3445 /* TODO - what to do in case of failure? */
3446 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3447 if (rc != ECORE_SUCCESS)
3450 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
3452 /* TODO - what's now? What a mess.... */
3453 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3457 /* Workaround to make VF-PF channel ready, as FW
3458 * doesn't do that as a part of FLR.
3461 GTT_BAR0_MAP_REG_USDM_RAM +
3462 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3464 /* VF_STOPPED has to be set only after final cleanup
3465 * but prior to re-enabling the VF.
3467 p_vf->state = VF_STOPPED;
3469 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3471 /* TODO - again, a mess... */
3472 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3477 /* Mark VF for ack and clean pending state */
3478 if (p_vf->state == VF_RESET)
3479 p_vf->state = VF_STOPPED;
3480 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
3481 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3482 ~(1ULL << (rel_vf_id % 64));
3483 p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
3484 ~(1ULL << (rel_vf_id % 64));
3490 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3491 struct ecore_ptt *p_ptt)
3493 u32 ack_vfs[VF_MAX_STATIC / 32];
3494 enum _ecore_status_t rc = ECORE_SUCCESS;
3497 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3499 /* Since BRB <-> PRS interface can't be tested as part of the flr
3500 * polling due to HW limitations, simply sleep a bit. And since
3501 * there's no need to wait per-vf, do it before looping.
3505 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
3506 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3508 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3512 enum _ecore_status_t
3513 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3514 struct ecore_ptt *p_ptt, u16 rel_vf_id)
3516 u32 ack_vfs[VF_MAX_STATIC / 32];
3517 enum _ecore_status_t rc = ECORE_SUCCESS;
3519 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3521 /* Wait instead of polling the BRB <-> PRS interface */
3524 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
3526 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3530 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
3535 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
3536 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3537 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3538 "[%08x,...,%08x]: %08x\n",
3539 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3541 if (!p_hwfn->p_dev->p_iov_info) {
3542 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
3547 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
3548 struct ecore_vf_info *p_vf;
3551 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
3555 vfid = p_vf->abs_vf_id;
3556 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3557 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3558 u16 rel_vf_id = p_vf->relative_vf_id;
3560 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3561 "VF[%d] [rel %d] got FLR-ed\n",
3564 p_vf->state = VF_RESET;
3566 /* No need to lock here, since pending_flr should
3567 * only change here and before ACKing MFw. Since
3568 * MFW will not trigger an additional attention for
3569 * VF flr until ACKs, we're safe.
3571 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3579 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
3581 struct ecore_mcp_link_params *p_params,
3582 struct ecore_mcp_link_state *p_link,
3583 struct ecore_mcp_link_capabilities *p_caps)
3585 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
3586 struct ecore_bulletin_content *p_bulletin;
3591 p_bulletin = p_vf->bulletin.p_virt;
3594 __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3596 __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3598 __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3601 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
3602 struct ecore_ptt *p_ptt, int vfid)
3604 struct ecore_iov_vf_mbx *mbx;
3605 struct ecore_vf_info *p_vf;
3607 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3611 mbx = &p_vf->vf_mbx;
3613 /* ecore_iov_process_mbx_request */
3616 "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
3618 mbx->first_tlv = mbx->req_virt->first_tlv;
3620 OSAL_IOV_VF_MSG_TYPE(p_hwfn,
3621 p_vf->relative_vf_id,
3622 mbx->first_tlv.tl.type);
3624 /* Lock the per vf op mutex and note the locker's identity.
3625 * The unlock will take place in mbx response.
3627 ecore_iov_lock_vf_pf_channel(p_hwfn,
3628 p_vf, mbx->first_tlv.tl.type);
3630 /* check if tlv type is known */
3631 if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3632 !p_vf->b_malicious) {
3633 /* switch on the opcode */
3634 switch (mbx->first_tlv.tl.type) {
3635 case CHANNEL_TLV_ACQUIRE:
3636 ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3638 case CHANNEL_TLV_VPORT_START:
3639 ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3641 case CHANNEL_TLV_VPORT_TEARDOWN:
3642 ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3644 case CHANNEL_TLV_START_RXQ:
3645 ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3647 case CHANNEL_TLV_START_TXQ:
3648 ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3650 case CHANNEL_TLV_STOP_RXQS:
3651 ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3653 case CHANNEL_TLV_STOP_TXQS:
3654 ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3656 case CHANNEL_TLV_UPDATE_RXQ:
3657 ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3659 case CHANNEL_TLV_VPORT_UPDATE:
3660 ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3662 case CHANNEL_TLV_UCAST_FILTER:
3663 ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3665 case CHANNEL_TLV_CLOSE:
3666 ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3668 case CHANNEL_TLV_INT_CLEANUP:
3669 ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3671 case CHANNEL_TLV_RELEASE:
3672 ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3674 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
3675 ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
3677 case CHANNEL_TLV_COALESCE_UPDATE:
3678 ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
3681 } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3682 /* If we've received a message from a VF we consider malicious
3683 * we ignore the messasge unless it's one for RELEASE, in which
3684 * case we'll let it have the benefit of doubt, allowing the
3685 * next loaded driver to start again.
3687 if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
3688 /* TODO - initiate FLR, remove malicious indication */
3689 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3690 "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
3693 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3694 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3695 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3698 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3699 mbx->first_tlv.tl.type,
3700 sizeof(struct pfvf_def_resp_tlv),
3701 PFVF_STATUS_MALICIOUS);
3703 /* unknown TLV - this may belong to a VF driver from the future
3704 * - a version written after this PF driver was written, which
3705 * supports features unknown as of yet. Too bad since we don't
3706 * support them. Or this may be because someone wrote a crappy
3707 * VF driver and is sending garbage over the channel.
3709 DP_NOTICE(p_hwfn, false,
3710 "VF[%02x]: unknown TLV. type %04x length %04x"
3711 " padding %08x reply address %lu\n",
3713 mbx->first_tlv.tl.type,
3714 mbx->first_tlv.tl.length,
3715 mbx->first_tlv.padding,
3716 (unsigned long)mbx->first_tlv.reply_address);
3718 /* Try replying in case reply address matches the acquisition's
3721 if (p_vf->acquire.first_tlv.reply_address &&
3722 (mbx->first_tlv.reply_address ==
3723 p_vf->acquire.first_tlv.reply_address))
3724 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3725 mbx->first_tlv.tl.type,
3726 sizeof(struct pfvf_def_resp_tlv),
3727 PFVF_STATUS_NOT_SUPPORTED);
3729 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3730 "VF[%02x]: Can't respond to TLV -"
3731 " no valid reply address\n",
3735 ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
3736 mbx->first_tlv.tl.type);
3738 #ifdef CONFIG_ECORE_SW_CHANNEL
3739 mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
3740 mbx->sw_mbx.response_offset = 0;
3744 void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
3746 u64 add_bit = 1ULL << (vfid % 64);
3748 /* TODO - add locking mechanisms [no atomics in ecore, so we can't
3749 * add the lock inside the ecore_pf_iov struct].
3751 p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
3754 void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
3757 u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
3759 /* TODO - Take a lock */
3760 OSAL_MEMCPY(events, p_pending_events,
3761 sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
3762 OSAL_MEMSET(p_pending_events, 0,
3763 sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
3766 static struct ecore_vf_info *
3767 ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
3769 u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
3771 if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
3772 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3773 "Got indication for VF [abs 0x%08x] that cannot be"
3779 return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
3782 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
3784 struct regpair *vf_msg)
3786 struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
3790 return ECORE_SUCCESS;
3792 /* List the physical address of the request so that handler
3793 * could later on copy the message from it.
3795 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
3797 return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
3800 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
3801 struct malicious_vf_eqe_data *p_data)
3803 struct ecore_vf_info *p_vf;
3805 p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
3811 "VF [%d] - Malicious behavior [%02x]\n",
3812 p_vf->abs_vf_id, p_data->errId);
3814 p_vf->b_malicious = true;
3816 OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
3819 enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
3822 union event_ring_data *data)
3825 case COMMON_EVENT_VF_PF_CHANNEL:
3826 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
3827 &data->vf_pf_channel.msg_addr);
3828 case COMMON_EVENT_VF_FLR:
3829 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3830 "VF-FLR is still not supported\n");
3831 return ECORE_SUCCESS;
3832 case COMMON_EVENT_MALICIOUS_VF:
3833 ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
3834 return ECORE_SUCCESS;
3836 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
3842 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3844 return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3845 (1ULL << (rel_vf_id % 64)));
3848 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3850 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
3856 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
3857 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
3861 return E4_MAX_NUM_VFS;
3864 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
3865 struct ecore_ptt *ptt, int vfid)
3867 struct ecore_dmae_params params;
3868 struct ecore_vf_info *vf_info;
3870 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3874 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
3875 params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
3876 params.src_vfid = vf_info->abs_vf_id;
3878 if (ecore_dmae_host2host(p_hwfn, ptt,
3879 vf_info->vf_mbx.pending_req,
3880 vf_info->vf_mbx.req_phys,
3881 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
3882 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3883 "Failed to copy message from VF 0x%02x\n", vfid);
3888 return ECORE_SUCCESS;
3891 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
3894 struct ecore_vf_info *vf_info;
3897 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3899 DP_NOTICE(p_hwfn->p_dev, true,
3900 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3903 if (vf_info->b_malicious) {
3904 DP_NOTICE(p_hwfn->p_dev, false,
3905 "Can't set forced MAC to malicious VF [%d]\n",
3910 feature = 1 << MAC_ADDR_FORCED;
3911 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3913 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3914 /* Forced MAC will disable MAC_ADDR */
3915 vf_info->bulletin.p_virt->valid_bitmap &=
3916 ~(1 << VFPF_BULLETIN_MAC_ADDR);
3918 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3921 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
3924 struct ecore_vf_info *vf_info;
3927 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3929 DP_NOTICE(p_hwfn->p_dev, true,
3930 "Can not set MAC, invalid vfid [%d]\n", vfid);
3933 if (vf_info->b_malicious) {
3934 DP_NOTICE(p_hwfn->p_dev, false,
3935 "Can't set MAC to malicious VF [%d]\n",
3940 if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
3941 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3942 "Can not set MAC, Forced MAC is configured\n");
3946 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
3947 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3949 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3951 return ECORE_SUCCESS;
3954 enum _ecore_status_t
3955 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
3956 bool b_untagged_only, int vfid)
3958 struct ecore_vf_info *vf_info;
3961 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3963 DP_NOTICE(p_hwfn->p_dev, true,
3964 "Can not set untagged default, invalid vfid [%d]\n",
3968 if (vf_info->b_malicious) {
3969 DP_NOTICE(p_hwfn->p_dev, false,
3970 "Can't set untagged default to malicious VF [%d]\n",
3975 /* Since this is configurable only during vport-start, don't take it
3976 * if we're past that point.
3978 if (vf_info->state == VF_ENABLED) {
3979 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3980 "Can't support untagged change for vfid[%d] -"
3981 " VF is already active\n",
3986 /* Set configuration; This will later be taken into account during the
3987 * VF initialization.
3989 feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
3990 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
3991 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3993 vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
3996 return ECORE_SUCCESS;
3999 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
4002 struct ecore_vf_info *vf_info;
4004 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4008 *opaque_fid = vf_info->opaque_fid;
4011 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
4014 struct ecore_vf_info *vf_info;
4017 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4019 DP_NOTICE(p_hwfn->p_dev, true,
4020 "Can not set forced MAC, invalid vfid [%d]\n",
4024 if (vf_info->b_malicious) {
4025 DP_NOTICE(p_hwfn->p_dev, false,
4026 "Can't set forced vlan to malicious VF [%d]\n",
4031 feature = 1 << VLAN_ADDR_FORCED;
4032 vf_info->bulletin.p_virt->pvid = pvid;
4034 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4036 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4038 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4041 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
4042 int vfid, u16 vxlan_port, u16 geneve_port)
4044 struct ecore_vf_info *vf_info;
4046 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4048 DP_NOTICE(p_hwfn->p_dev, true,
4049 "Can not set udp ports, invalid vfid [%d]\n", vfid);
4053 if (vf_info->b_malicious) {
4054 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4055 "Can not set udp ports to malicious VF [%d]\n",
4060 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4061 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4064 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
4066 struct ecore_vf_info *p_vf_info;
4068 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4072 return !!p_vf_info->vport_instance;
4075 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
4077 struct ecore_vf_info *p_vf_info;
4079 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4083 return p_vf_info->state == VF_STOPPED;
4086 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
4088 struct ecore_vf_info *vf_info;
4090 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4094 return vf_info->spoof_chk;
4097 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
4100 struct ecore_vf_info *vf;
4101 enum _ecore_status_t rc = ECORE_INVAL;
4103 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4104 DP_NOTICE(p_hwfn, true,
4105 "SR-IOV sanity check failed, can't set spoofchk\n");
4109 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4113 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4114 /* After VF VPORT start PF will configure spoof check */
4115 vf->req_spoofchk_val = val;
4120 rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
4126 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
4128 u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
4130 max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
4131 : ECORE_MAX_VF_CHAINS_PER_PF;
4133 return max_chains_per_vf;
4136 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4138 void **pp_req_virt_addr,
4139 u16 *p_req_virt_size)
4141 struct ecore_vf_info *vf_info =
4142 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4147 if (pp_req_virt_addr)
4148 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
4150 if (p_req_virt_size)
4151 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
4154 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4156 void **pp_reply_virt_addr,
4157 u16 *p_reply_virt_size)
4159 struct ecore_vf_info *vf_info =
4160 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4165 if (pp_reply_virt_addr)
4166 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
4168 if (p_reply_virt_size)
4169 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
4172 #ifdef CONFIG_ECORE_SW_CHANNEL
4173 struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
4176 struct ecore_vf_info *vf_info =
4177 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4182 return &vf_info->vf_mbx.sw_mbx;
4186 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
4188 return (length >= sizeof(struct vfpf_first_tlv) &&
4189 (length <= sizeof(union vfpf_tlvs)));
4192 u32 ecore_iov_pfvf_msg_length(void)
4194 return sizeof(union pfvf_tlvs);
4197 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4199 struct ecore_vf_info *p_vf;
4201 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4202 if (!p_vf || !p_vf->bulletin.p_virt)
4205 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
4208 return p_vf->bulletin.p_virt->mac;
4211 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
4214 struct ecore_vf_info *p_vf;
4216 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4217 if (!p_vf || !p_vf->bulletin.p_virt)
4220 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
4223 return p_vf->bulletin.p_virt->pvid;
4226 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
4227 struct ecore_ptt *p_ptt,
4230 struct ecore_vf_info *vf;
4232 enum _ecore_status_t rc;
4234 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4239 rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4240 if (rc != ECORE_SUCCESS)
4243 return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
4246 enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
4249 struct ecore_vf_info *vf;
4253 for_each_hwfn(p_dev, i) {
4254 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
4256 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4257 DP_NOTICE(p_hwfn, true,
4258 "SR-IOV sanity check failed,"
4259 " can't set min rate\n");
4264 vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
4265 vport_id = vf->vport_id;
4267 return ecore_configure_vport_wfq(p_dev, vport_id, rate);
4270 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
4271 struct ecore_ptt *p_ptt,
4273 struct ecore_eth_stats *p_stats)
4275 struct ecore_vf_info *vf;
4277 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4281 if (vf->state != VF_ENABLED)
4284 __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
4285 vf->abs_vf_id + 0x10, false);
4287 return ECORE_SUCCESS;
4290 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4292 struct ecore_vf_info *p_vf;
4294 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4298 return p_vf->num_rxqs;
4301 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4303 struct ecore_vf_info *p_vf;
4305 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4309 return p_vf->num_active_rxqs;
4312 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4314 struct ecore_vf_info *p_vf;
4316 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4323 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4325 struct ecore_vf_info *p_vf;
4327 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4331 return p_vf->num_sbs;
4334 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4336 struct ecore_vf_info *p_vf;
4338 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4342 return (p_vf->state == VF_FREE);
4345 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
4348 struct ecore_vf_info *p_vf;
4350 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4354 return (p_vf->state == VF_ACQUIRED);
4357 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4359 struct ecore_vf_info *p_vf;
4361 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4365 return (p_vf->state == VF_ENABLED);
4368 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
4371 struct ecore_vf_info *p_vf;
4373 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4377 return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
4380 enum _ecore_status_t
4381 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
4383 struct ecore_wfq_data *vf_vp_wfq;
4384 struct ecore_vf_info *vf_info;
4386 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4390 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4392 if (vf_vp_wfq->configured)
4393 return vf_vp_wfq->min_speed;