2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
12 #include "ecore_sriov.h"
13 #include "ecore_status.h"
15 #include "ecore_hw_defs.h"
16 #include "ecore_int.h"
17 #include "ecore_hsi_eth.h"
19 #include "ecore_vfpf_if.h"
20 #include "ecore_rt_defs.h"
21 #include "ecore_init_ops.h"
22 #include "ecore_gtt_reg_addr.h"
23 #include "ecore_iro.h"
24 #include "ecore_mcp.h"
25 #include "ecore_cxt.h"
27 #include "ecore_init_fw_funcs.h"
28 #include "ecore_sp_commands.h"
30 const char *ecore_channel_tlvs_string[] = {
31 "CHANNEL_TLV_NONE", /* ends tlv sequence */
32 "CHANNEL_TLV_ACQUIRE",
33 "CHANNEL_TLV_VPORT_START",
34 "CHANNEL_TLV_VPORT_UPDATE",
35 "CHANNEL_TLV_VPORT_TEARDOWN",
36 "CHANNEL_TLV_START_RXQ",
37 "CHANNEL_TLV_START_TXQ",
38 "CHANNEL_TLV_STOP_RXQ",
39 "CHANNEL_TLV_STOP_TXQ",
40 "CHANNEL_TLV_UPDATE_RXQ",
41 "CHANNEL_TLV_INT_CLEANUP",
43 "CHANNEL_TLV_RELEASE",
44 "CHANNEL_TLV_LIST_END",
45 "CHANNEL_TLV_UCAST_FILTER",
46 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
47 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
48 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
49 "CHANNEL_TLV_VPORT_UPDATE_MCAST",
50 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
51 "CHANNEL_TLV_VPORT_UPDATE_RSS",
52 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
53 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
58 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
59 struct ecore_vf_info *p_vf)
61 struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
62 struct ecore_spq_entry *p_ent = OSAL_NULL;
63 struct ecore_sp_init_data init_data;
64 enum _ecore_status_t rc = ECORE_NOTIMPL;
68 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
69 init_data.cid = ecore_spq_get_cid(p_hwfn);
70 init_data.opaque_fid = p_vf->opaque_fid;
71 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
73 rc = ecore_sp_init_request(p_hwfn, &p_ent,
74 COMMON_RAMROD_VF_START,
75 PROTOCOLID_COMMON, &init_data);
76 if (rc != ECORE_SUCCESS)
79 p_ramrod = &p_ent->ramrod.vf_start;
81 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
82 p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
84 switch (p_hwfn->hw_info.personality) {
86 p_ramrod->personality = PERSONALITY_ETH;
88 case ECORE_PCI_ETH_ROCE:
89 case ECORE_PCI_ETH_IWARP:
90 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
93 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
94 p_hwfn->hw_info.personality);
98 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
99 if (fp_minor > ETH_HSI_VER_MINOR &&
100 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
101 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
102 "VF [%d] - Requested fp hsi %02x.%02x which is"
103 " slightly newer than PF's %02x.%02x; Configuring"
106 ETH_HSI_VER_MAJOR, fp_minor,
107 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
108 fp_minor = ETH_HSI_VER_MINOR;
111 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
112 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
114 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
115 "VF[%d] - Starting using HSI %02x.%02x\n",
116 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
118 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
121 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
125 struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
126 struct ecore_spq_entry *p_ent = OSAL_NULL;
127 struct ecore_sp_init_data init_data;
128 enum _ecore_status_t rc = ECORE_NOTIMPL;
131 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
132 init_data.cid = ecore_spq_get_cid(p_hwfn);
133 init_data.opaque_fid = opaque_vfid;
134 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
136 rc = ecore_sp_init_request(p_hwfn, &p_ent,
137 COMMON_RAMROD_VF_STOP,
138 PROTOCOLID_COMMON, &init_data);
139 if (rc != ECORE_SUCCESS)
142 p_ramrod = &p_ent->ramrod.vf_stop;
144 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
146 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
149 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
150 bool b_enabled_only, bool b_non_malicious)
152 if (!p_hwfn->pf_iov_info) {
153 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
157 if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
161 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
165 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
172 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
176 struct ecore_vf_info *vf = OSAL_NULL;
178 if (!p_hwfn->pf_iov_info) {
179 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
183 if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
184 b_enabled_only, false))
185 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
187 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
193 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
194 struct ecore_vf_info *p_vf,
197 if (rx_qid >= p_vf->num_rxqs)
198 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
199 "VF[0x%02x] - can't touch Rx queue[%04x];"
200 " Only 0x%04x are allocated\n",
201 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
202 return rx_qid < p_vf->num_rxqs;
205 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
206 struct ecore_vf_info *p_vf,
209 if (tx_qid >= p_vf->num_txqs)
210 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
211 "VF[0x%02x] - can't touch Tx queue[%04x];"
212 " Only 0x%04x are allocated\n",
213 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
214 return tx_qid < p_vf->num_txqs;
217 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
218 struct ecore_vf_info *p_vf,
223 for (i = 0; i < p_vf->num_sbs; i++)
224 if (p_vf->igu_sbs[i] == sb_idx)
227 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
228 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
229 " one of its 0x%02x SBs\n",
230 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
235 static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
236 struct ecore_vf_info *p_vf)
240 for (i = 0; i < p_vf->num_rxqs; i++)
241 if (p_vf->vf_queues[i].rxq_active)
247 static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
248 struct ecore_vf_info *p_vf)
252 for (i = 0; i < p_vf->num_rxqs; i++)
253 if (p_vf->vf_queues[i].txq_active)
259 /* TODO - this is linux crc32; Need a way to ifdef it out for linux */
260 u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
266 for (i = 0; i < 8; i++)
267 crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
272 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
274 struct ecore_ptt *p_ptt)
276 struct ecore_bulletin_content *p_bulletin;
277 int crc_size = sizeof(p_bulletin->crc);
278 struct ecore_dmae_params params;
279 struct ecore_vf_info *p_vf;
281 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
285 /* TODO - check VF is in a state where it can accept message */
286 if (!p_vf->vf_bulletin)
289 p_bulletin = p_vf->bulletin.p_virt;
291 /* Increment bulletin board version and compute crc */
292 p_bulletin->version++;
293 p_bulletin->crc = ecore_crc32(0, (u8 *)p_bulletin + crc_size,
294 p_vf->bulletin.size - crc_size);
296 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
297 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
298 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
300 /* propagate bulletin board via dmae to vm memory */
301 OSAL_MEMSET(¶ms, 0, sizeof(params));
302 params.flags = ECORE_DMAE_FLAG_VF_DST;
303 params.dst_vfid = p_vf->abs_vf_id;
304 return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
305 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
309 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
311 struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
314 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
315 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
317 OSAL_PCI_READ_CONFIG_WORD(p_dev,
318 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
319 OSAL_PCI_READ_CONFIG_WORD(p_dev,
320 pos + PCI_SRIOV_INITIAL_VF,
323 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
325 /* @@@TODO - in future we might want to add an OSAL here to
326 * allow each OS to decide on its own how to act.
328 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
329 "Number of VFs are already set to non-zero value."
330 " Ignoring PCI configuration value\n");
334 OSAL_PCI_READ_CONFIG_WORD(p_dev,
335 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
337 OSAL_PCI_READ_CONFIG_WORD(p_dev,
338 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
340 OSAL_PCI_READ_CONFIG_WORD(p_dev,
341 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
343 OSAL_PCI_READ_CONFIG_DWORD(p_dev,
344 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
346 OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
348 OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
350 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
351 "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
352 " stride %d, page size 0x%x\n",
353 iov->nres, iov->cap, iov->ctrl,
354 iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
355 iov->offset, iov->stride, iov->pgsz);
357 /* Some sanity checks */
358 if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
359 iov->total_vfs > NUM_OF_VFS(p_dev)) {
360 /* This can happen only due to a bug. In this case we set
361 * num_vfs to zero to avoid memory corruption in the code that
362 * assumes max number of vfs
364 DP_NOTICE(p_dev, false,
365 "IOV: Unexpected number of vfs set: %d"
366 " setting num_vf to zero\n",
373 return ECORE_SUCCESS;
376 static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
377 struct ecore_ptt *p_ptt)
379 struct ecore_igu_block *p_sb;
383 if (!p_hwfn->hw_info.p_igu_info) {
385 "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
390 sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {
391 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
392 if ((p_sb->status & ECORE_IGU_STATUS_FREE) &&
393 !(p_sb->status & ECORE_IGU_STATUS_PF)) {
394 val = ecore_rd(p_hwfn, p_ptt,
395 IGU_REG_MAPPING_MEMORY + sb_id * 4);
396 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
397 ecore_wr(p_hwfn, p_ptt,
398 IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
403 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
405 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
406 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
407 struct ecore_bulletin_content *p_bulletin_virt;
408 dma_addr_t req_p, rply_p, bulletin_p;
409 union pfvf_tlvs *p_reply_virt_addr;
410 union vfpf_tlvs *p_req_virt_addr;
413 OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
415 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
416 req_p = p_iov_info->mbx_msg_phys_addr;
417 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
418 rply_p = p_iov_info->mbx_reply_phys_addr;
419 p_bulletin_virt = p_iov_info->p_bulletins;
420 bulletin_p = p_iov_info->bulletins_phys;
421 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
423 "ecore_iov_setup_vfdb called without alloc mem first\n");
427 p_iov_info->base_vport_id = 1; /* @@@TBD resource allocation */
429 for (idx = 0; idx < p_iov->total_vfs; idx++) {
430 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
433 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
434 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
435 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
436 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
438 #ifdef CONFIG_ECORE_SW_CHANNEL
439 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
440 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
442 vf->state = VF_STOPPED;
445 vf->bulletin.phys = idx *
446 sizeof(struct ecore_bulletin_content) + bulletin_p;
447 vf->bulletin.p_virt = p_bulletin_virt + idx;
448 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
450 vf->relative_vf_id = idx;
451 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
452 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
453 vf->concrete_fid = concrete;
454 /* TODO - need to devise a better way of getting opaque */
455 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
456 (vf->abs_vf_id << 8);
457 /* @@TBD MichalK - add base vport_id of VFs to equation */
458 vf->vport_id = p_iov_info->base_vport_id + idx;
460 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
461 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
465 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
467 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
471 num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
473 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
474 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
476 /* Allocate PF Mailbox buffer (per-VF) */
477 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
478 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
479 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
480 &p_iov_info->mbx_msg_phys_addr,
481 p_iov_info->mbx_msg_size);
485 /* Allocate PF Mailbox Reply buffer (per-VF) */
486 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
487 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
488 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
489 &p_iov_info->mbx_reply_phys_addr,
490 p_iov_info->mbx_reply_size);
494 p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
496 p_v_addr = &p_iov_info->p_bulletins;
497 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
498 &p_iov_info->bulletins_phys,
499 p_iov_info->bulletins_size);
503 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
504 "PF's Requests mailbox [%p virt 0x%lx phys], "
505 "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
506 " [%p virt 0x%lx phys]\n",
507 p_iov_info->mbx_msg_virt_addr,
508 (unsigned long)p_iov_info->mbx_msg_phys_addr,
509 p_iov_info->mbx_reply_virt_addr,
510 (unsigned long)p_iov_info->mbx_reply_phys_addr,
511 p_iov_info->p_bulletins,
512 (unsigned long)p_iov_info->bulletins_phys);
514 return ECORE_SUCCESS;
517 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
519 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
521 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
522 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
523 p_iov_info->mbx_msg_virt_addr,
524 p_iov_info->mbx_msg_phys_addr,
525 p_iov_info->mbx_msg_size);
527 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
528 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
529 p_iov_info->mbx_reply_virt_addr,
530 p_iov_info->mbx_reply_phys_addr,
531 p_iov_info->mbx_reply_size);
533 if (p_iov_info->p_bulletins)
534 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
535 p_iov_info->p_bulletins,
536 p_iov_info->bulletins_phys,
537 p_iov_info->bulletins_size);
540 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
542 struct ecore_pf_iov *p_sriov;
544 if (!IS_PF_SRIOV(p_hwfn)) {
545 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
546 "No SR-IOV - no need for IOV db\n");
547 return ECORE_SUCCESS;
550 p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
552 DP_NOTICE(p_hwfn, true,
553 "Failed to allocate `struct ecore_sriov'\n");
557 p_hwfn->pf_iov_info = p_sriov;
559 return ecore_iov_allocate_vfdb(p_hwfn);
562 void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
564 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
567 ecore_iov_setup_vfdb(p_hwfn);
568 ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
571 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
573 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
574 ecore_iov_free_vfdb(p_hwfn);
575 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
579 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
581 OSAL_FREE(p_dev, p_dev->p_iov_info);
584 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
586 struct ecore_dev *p_dev = p_hwfn->p_dev;
588 enum _ecore_status_t rc;
590 if (IS_VF(p_hwfn->p_dev))
591 return ECORE_SUCCESS;
593 /* Learn the PCI configuration */
594 pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
595 PCI_EXT_CAP_ID_SRIOV);
597 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
598 return ECORE_SUCCESS;
601 /* Allocate a new struct for IOV information */
602 /* TODO - can change to VALLOC when its available */
603 p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
604 sizeof(*p_dev->p_iov_info));
605 if (!p_dev->p_iov_info) {
606 DP_NOTICE(p_hwfn, true,
607 "Can't support IOV due to lack of memory\n");
610 p_dev->p_iov_info->pos = pos;
612 rc = ecore_iov_pci_cfg_info(p_dev);
616 /* We want PF IOV to be synonemous with the existence of p_iov_info;
617 * In case the capability is published but there are no VFs, simply
618 * de-allocate the struct.
620 if (!p_dev->p_iov_info->total_vfs) {
621 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
622 "IOV capabilities, but no VFs are published\n");
623 OSAL_FREE(p_dev, p_dev->p_iov_info);
624 return ECORE_SUCCESS;
627 /* First VF index based on offset is tricky:
628 * - If ARI is supported [likely], offset - (16 - pf_id) would
629 * provide the number for eng0. 2nd engine Vfs would begin
630 * after the first engine's VFs.
631 * - If !ARI, VFs would start on next device.
632 * so offset - (256 - pf_id) would provide the number.
633 * Utilize the fact that (256 - pf_id) is achieved only be later
634 * to diffrentiate between the two.
637 if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
638 u32 first = p_hwfn->p_dev->p_iov_info->offset +
639 p_hwfn->abs_pf_id - 16;
641 p_dev->p_iov_info->first_vf_in_pf = first;
643 if (ECORE_PATH_ID(p_hwfn))
644 p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
646 u32 first = p_hwfn->p_dev->p_iov_info->offset +
647 p_hwfn->abs_pf_id - 256;
649 p_dev->p_iov_info->first_vf_in_pf = first;
652 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
653 "First VF in hwfn 0x%08x\n",
654 p_dev->p_iov_info->first_vf_in_pf);
656 return ECORE_SUCCESS;
659 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
660 bool b_fail_malicious)
662 /* Check PF supports sriov */
663 if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
664 !IS_PF_SRIOV_ALLOC(p_hwfn))
667 /* Check VF validity */
668 if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
674 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
676 return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
679 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
680 u16 rel_vf_id, u8 to_disable)
682 struct ecore_vf_info *vf;
685 for_each_hwfn(p_dev, i) {
686 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
688 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
692 vf->to_disable = to_disable;
696 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
701 if (!IS_ECORE_SRIOV(p_dev))
704 for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
705 ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
709 /* @@@TBD Consider taking outside of ecore... */
710 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
714 enum _ecore_status_t rc = ECORE_SUCCESS;
715 struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
717 if (vf != OSAL_NULL) {
719 #ifdef CONFIG_ECORE_SW_CHANNEL
720 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
723 rc = ECORE_UNKNOWN_ERROR;
729 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
730 struct ecore_ptt *p_ptt,
733 ecore_wr(p_hwfn, p_ptt,
734 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
735 1 << (abs_vfid & 0x1f));
738 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
739 struct ecore_ptt *p_ptt,
740 struct ecore_vf_info *vf)
744 /* Set VF masks and configuration - pretend */
745 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
747 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
750 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
752 /* iterate over all queues, clear sb consumer */
753 for (i = 0; i < vf->num_sbs; i++)
754 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
756 vf->opaque_fid, true);
759 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
760 struct ecore_ptt *p_ptt,
761 struct ecore_vf_info *vf, bool enable)
765 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
767 igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
770 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
772 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
774 ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
777 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
780 static enum _ecore_status_t
781 ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
782 struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
784 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
785 enum _ecore_status_t rc;
788 return ECORE_SUCCESS;
790 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
791 "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
792 ECORE_VF_ABS_ID(p_hwfn, vf));
794 ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
795 ECORE_VF_ABS_ID(p_hwfn, vf));
797 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
799 /* It's possible VF was previously considered malicious */
800 vf->b_malicious = false;
802 rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
803 vf->abs_vf_id, vf->num_sbs);
804 if (rc != ECORE_SUCCESS)
807 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
809 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
810 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
812 ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
813 p_hwfn->hw_info.hw_mode);
816 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
825 * @brief ecore_iov_config_perm_table - configure the permission
827 * In E4, queue zone permission table size is 320x9. There
828 * are 320 VF queues for single engine device (256 for dual
829 * engine device), and each entry has the following format:
836 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
837 struct ecore_ptt *p_ptt,
838 struct ecore_vf_info *vf, u8 enable)
844 for (qid = 0; qid < vf->num_rxqs; qid++) {
845 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
848 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
849 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
850 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
854 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
855 struct ecore_ptt *p_ptt,
856 struct ecore_vf_info *vf)
858 /* Reset vf in IGU - interrupts are still disabled */
859 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
861 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
863 /* Permission Table */
864 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
867 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
868 struct ecore_ptt *p_ptt,
869 struct ecore_vf_info *vf,
872 struct ecore_igu_block *igu_blocks;
873 int qid = 0, igu_id = 0;
876 igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
878 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
879 num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
881 p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
883 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
884 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
885 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
887 while ((qid < num_rx_queues) &&
888 (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {
889 if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {
890 struct cau_sb_entry sb_entry;
892 vf->igu_sbs[qid] = (u16)igu_id;
893 igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;
895 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
897 ecore_wr(p_hwfn, p_ptt,
898 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
901 /* Configure igu sb in CAU which were marked valid */
902 ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
905 ecore_dmae_host2grc(p_hwfn, p_ptt,
906 (u64)(osal_uintptr_t)&sb_entry,
907 CAU_REG_SB_VAR_MEMORY +
908 igu_id * sizeof(u64), 2, 0);
914 vf->num_sbs = (u8)num_rx_queues;
921 * @brief The function invalidates all the VF entries,
922 * technically this isn't required, but added for
923 * cleaness and ease of debugging incase a VF attempts to
924 * produce an interrupt after it has been taken down.
930 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
931 struct ecore_ptt *p_ptt,
932 struct ecore_vf_info *vf)
934 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
938 /* Invalidate igu CAM lines and mark them as free */
939 for (idx = 0; idx < vf->num_sbs; idx++) {
940 igu_id = vf->igu_sbs[idx];
941 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
943 val = ecore_rd(p_hwfn, p_ptt, addr);
944 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
945 ecore_wr(p_hwfn, p_ptt, addr, val);
947 p_info->igu_map.igu_blocks[igu_id].status |=
948 ECORE_IGU_STATUS_FREE;
950 p_hwfn->hw_info.p_igu_info->free_blks++;
956 enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
957 struct ecore_ptt *p_ptt,
958 u16 rel_vf_id, u16 num_rx_queues)
960 u8 num_of_vf_available_chains = 0;
961 struct ecore_vf_info *vf = OSAL_NULL;
962 enum _ecore_status_t rc = ECORE_SUCCESS;
966 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
968 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
969 return ECORE_UNKNOWN_ERROR;
973 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
978 /* Limit number of queues according to number of CIDs */
979 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
980 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
981 "VF[%d] - requesting to initialize for 0x%04x queues"
982 " [0x%04x CIDs available]\n",
983 vf->relative_vf_id, num_rx_queues, (u16)cids);
984 num_rx_queues = OSAL_MIN_T(u16, num_rx_queues, ((u16)cids));
986 num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
990 if (num_of_vf_available_chains == 0) {
991 DP_ERR(p_hwfn, "no available igu sbs\n");
995 /* Choose queue number and index ranges */
996 vf->num_rxqs = num_of_vf_available_chains;
997 vf->num_txqs = num_of_vf_available_chains;
999 for (i = 0; i < vf->num_rxqs; i++) {
1000 u16 queue_id = ecore_int_queue_id_from_sb_id(p_hwfn,
1003 if (queue_id > RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1004 DP_NOTICE(p_hwfn, true,
1005 "VF[%d] will require utilizing of"
1006 " out-of-bounds queues - %04x\n",
1007 vf->relative_vf_id, queue_id);
1008 /* TODO - cleanup the already allocate SBs */
1012 /* CIDs are per-VF, so no problem having them 0-based. */
1013 vf->vf_queues[i].fw_rx_qid = queue_id;
1014 vf->vf_queues[i].fw_tx_qid = queue_id;
1015 vf->vf_queues[i].fw_cid = i;
1017 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1018 "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
1019 vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
1022 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1024 if (rc == ECORE_SUCCESS) {
1026 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
1027 (1ULL << (vf->relative_vf_id % 64));
1029 if (IS_LEAD_HWFN(p_hwfn))
1030 p_hwfn->p_dev->p_iov_info->num_vfs++;
1036 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
1038 struct ecore_mcp_link_params *params,
1039 struct ecore_mcp_link_state *link,
1040 struct ecore_mcp_link_capabilities *p_caps)
1042 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
1043 struct ecore_bulletin_content *p_bulletin;
1048 p_bulletin = p_vf->bulletin.p_virt;
1049 p_bulletin->req_autoneg = params->speed.autoneg;
1050 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
1051 p_bulletin->req_forced_speed = params->speed.forced_speed;
1052 p_bulletin->req_autoneg_pause = params->pause.autoneg;
1053 p_bulletin->req_forced_rx = params->pause.forced_rx;
1054 p_bulletin->req_forced_tx = params->pause.forced_tx;
1055 p_bulletin->req_loopback = params->loopback_mode;
1057 p_bulletin->link_up = link->link_up;
1058 p_bulletin->speed = link->speed;
1059 p_bulletin->full_duplex = link->full_duplex;
1060 p_bulletin->autoneg = link->an;
1061 p_bulletin->autoneg_complete = link->an_complete;
1062 p_bulletin->parallel_detection = link->parallel_detection;
1063 p_bulletin->pfc_enabled = link->pfc_enabled;
1064 p_bulletin->partner_adv_speed = link->partner_adv_speed;
1065 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
1066 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
1067 p_bulletin->partner_adv_pause = link->partner_adv_pause;
1068 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
1070 p_bulletin->capability_speed = p_caps->speed_capabilities;
1073 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1074 struct ecore_ptt *p_ptt,
1077 struct ecore_mcp_link_capabilities caps;
1078 struct ecore_mcp_link_params params;
1079 struct ecore_mcp_link_state link;
1080 struct ecore_vf_info *vf = OSAL_NULL;
1082 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1084 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1085 return ECORE_UNKNOWN_ERROR;
1088 if (vf->bulletin.p_virt)
1089 OSAL_MEMSET(vf->bulletin.p_virt, 0,
1090 sizeof(*vf->bulletin.p_virt));
1092 OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1094 /* Get the link configuration back in bulletin so
1095 * that when VFs are re-enabled they get the actual
1096 * link configuration.
1098 OSAL_MEMCPY(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
1099 OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
1100 OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
1102 ecore_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1104 /* Forget the VF's acquisition message */
1105 OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
1107 /* disablng interrupts and resetting permission table was done during
1108 * vf-close, however, we could get here without going through vf_close
1110 /* Disable Interrupts for VF */
1111 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1113 /* Reset Permission table */
1114 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1118 ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1122 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
1123 ~(1ULL << (vf->relative_vf_id / 64));
1125 if (IS_LEAD_HWFN(p_hwfn))
1126 p_hwfn->p_dev->p_iov_info->num_vfs--;
1129 return ECORE_SUCCESS;
1132 static bool ecore_iov_tlv_supported(u16 tlvtype)
1134 return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
1137 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1138 struct ecore_vf_info *vf, u16 tlv)
1140 /* lock the channel */
1141 /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1143 /* record the locking op */
1144 /* vf->op_current = tlv; @@@TBD MichalK */
1147 if (ecore_iov_tlv_supported(tlv))
1150 "VF[%d]: vf pf channel locked by %s\n",
1152 ecore_channel_tlvs_string[tlv]);
1156 "VF[%d]: vf pf channel locked by %04x\n",
1157 vf->abs_vf_id, tlv);
1160 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1161 struct ecore_vf_info *vf,
1164 /* log the unlock */
1165 if (ecore_iov_tlv_supported(expected_tlv))
1168 "VF[%d]: vf pf channel unlocked by %s\n",
1170 ecore_channel_tlvs_string[expected_tlv]);
1174 "VF[%d]: vf pf channel unlocked by %04x\n",
1175 vf->abs_vf_id, expected_tlv);
1177 /* record the locking op */
1178 /* vf->op_current = CHANNEL_TLV_NONE; */
1181 /* place a given tlv on the tlv buffer, continuing current tlv list */
1182 void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
1183 u8 **offset, u16 type, u16 length)
1185 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1188 tl->length = length;
1190 /* Offset should keep pointing to next TLV (the end of the last) */
1193 /* Return a pointer to the start of the added tlv */
1194 return *offset - length;
1197 /* list the types and lengths of the tlvs on the buffer */
1198 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
1200 u16 i = 1, total_length = 0;
1201 struct channel_tlv *tlv;
1204 /* cast current tlv list entry to channel tlv header */
1205 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1208 if (ecore_iov_tlv_supported(tlv->type))
1209 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1210 "TLV number %d: type %s, length %d\n",
1211 i, ecore_channel_tlvs_string[tlv->type],
1214 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1215 "TLV number %d: type %d, length %d\n",
1216 i, tlv->type, tlv->length);
1218 if (tlv->type == CHANNEL_TLV_LIST_END)
1221 /* Validate entry - protect against malicious VFs */
1223 DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
1226 total_length += tlv->length;
1227 if (total_length >= sizeof(struct tlv_buffer_size)) {
1228 DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
1236 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
1237 struct ecore_ptt *p_ptt,
1238 struct ecore_vf_info *p_vf,
1239 u16 length, u8 status)
1241 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1242 struct ecore_dmae_params params;
1245 mbx->reply_virt->default_resp.hdr.status = status;
1247 ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1249 #ifdef CONFIG_ECORE_SW_CHANNEL
1250 mbx->sw_mbx.response_size =
1251 length + sizeof(struct channel_list_end_tlv);
1253 if (!p_hwfn->p_dev->b_hw_channel)
1257 eng_vf_id = p_vf->abs_vf_id;
1259 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
1260 params.flags = ECORE_DMAE_FLAG_VF_DST;
1261 params.dst_vfid = eng_vf_id;
1263 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1264 mbx->req_virt->first_tlv.reply_address +
1266 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1269 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1270 mbx->req_virt->first_tlv.reply_address,
1271 sizeof(u64) / 4, ¶ms);
1274 GTT_BAR0_MAP_REG_USDM_RAM +
1275 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1278 static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
1279 enum ecore_iov_vport_update_flag flag)
1282 case ECORE_IOV_VP_UPDATE_ACTIVATE:
1283 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1284 case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1285 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1286 case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1287 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1288 case ECORE_IOV_VP_UPDATE_MCAST:
1289 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1290 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1291 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1292 case ECORE_IOV_VP_UPDATE_RSS:
1293 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1294 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1295 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1296 case ECORE_IOV_VP_UPDATE_SGE_TPA:
1297 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1303 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1304 struct ecore_vf_info *p_vf,
1305 struct ecore_iov_vf_mbx *p_mbx,
1306 u8 status, u16 tlvs_mask,
1309 struct pfvf_def_resp_tlv *resp;
1310 u16 size, total_len, i;
1312 OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1313 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1314 size = sizeof(struct pfvf_def_resp_tlv);
1317 ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1319 /* Prepare response for all extended tlvs if they are found by PF */
1320 for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1321 if (!(tlvs_mask & (1 << i)))
1324 resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
1325 ecore_iov_vport_to_tlv(p_hwfn, i), size);
1327 if (tlvs_accepted & (1 << i))
1328 resp->hdr.status = status;
1330 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1332 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1333 "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1334 p_vf->relative_vf_id,
1335 ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1340 ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1341 sizeof(struct channel_list_end_tlv));
1346 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1347 struct ecore_ptt *p_ptt,
1348 struct ecore_vf_info *vf_info,
1349 u16 type, u16 length, u8 status)
1351 struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1353 mbx->offset = (u8 *)mbx->reply_virt;
1355 ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
1356 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1357 sizeof(struct channel_list_end_tlv));
1359 ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1361 OSAL_IOV_PF_RESP_TYPE(p_hwfn, vf_info->relative_vf_id, status);
1364 struct ecore_public_vf_info
1365 *ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
1367 bool b_enabled_only)
1369 struct ecore_vf_info *vf = OSAL_NULL;
1371 vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1375 return &vf->p_vf_info;
1378 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1379 struct ecore_vf_info *p_vf)
1382 p_vf->vf_bulletin = 0;
1383 p_vf->vport_instance = 0;
1384 p_vf->configured_features = 0;
1386 /* If VF previously requested less resources, go back to default */
1387 p_vf->num_rxqs = p_vf->num_sbs;
1388 p_vf->num_txqs = p_vf->num_sbs;
1390 p_vf->num_active_rxqs = 0;
1392 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1393 p_vf->vf_queues[i].rxq_active = 0;
1394 p_vf->vf_queues[i].txq_active = 0;
1397 OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1398 OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1399 OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1402 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
1403 struct ecore_ptt *p_ptt,
1404 struct ecore_vf_info *p_vf,
1405 struct vf_pf_resc_request *p_req,
1406 struct pf_vf_resc *p_resp)
1410 /* Queue related information */
1411 p_resp->num_rxqs = p_vf->num_rxqs;
1412 p_resp->num_txqs = p_vf->num_txqs;
1413 p_resp->num_sbs = p_vf->num_sbs;
1415 for (i = 0; i < p_resp->num_sbs; i++) {
1416 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1417 /* TODO - what's this sb_qid field? Is it deprecated?
1418 * or is there an ecore_client that looks at this?
1420 p_resp->hw_sbs[i].sb_qid = 0;
1423 /* These fields are filled for backward compatibility.
1424 * Unused by modern vfs.
1426 for (i = 0; i < p_resp->num_rxqs; i++) {
1427 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1428 (u16 *)&p_resp->hw_qid[i]);
1429 p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
1432 /* Filter related information */
1433 p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
1434 p_req->num_mac_filters);
1435 p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
1436 p_req->num_vlan_filters);
1438 /* This isn't really needed/enforced, but some legacy VFs might depend
1439 * on the correct filling of this field.
1441 p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
1443 /* Validate sufficient resources for VF */
1444 if (p_resp->num_rxqs < p_req->num_rxqs ||
1445 p_resp->num_txqs < p_req->num_txqs ||
1446 p_resp->num_sbs < p_req->num_sbs ||
1447 p_resp->num_mac_filters < p_req->num_mac_filters ||
1448 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1449 p_resp->num_mc_filters < p_req->num_mc_filters) {
1450 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1451 "VF[%d] - Insufficient resources: rxq [%02x/%02x]"
1452 " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
1453 " vlan [%02x/%02x] mc [%02x/%02x]\n",
1455 p_req->num_rxqs, p_resp->num_rxqs,
1456 p_req->num_rxqs, p_resp->num_txqs,
1457 p_req->num_sbs, p_resp->num_sbs,
1458 p_req->num_mac_filters, p_resp->num_mac_filters,
1459 p_req->num_vlan_filters, p_resp->num_vlan_filters,
1460 p_req->num_mc_filters, p_resp->num_mc_filters);
1462 /* Some legacy OSes are incapable of correctly handling this
1465 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1466 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1467 (p_vf->acquire.vfdev_info.os_type ==
1468 VFPF_ACQUIRE_OS_WINDOWS))
1469 return PFVF_STATUS_SUCCESS;
1471 return PFVF_STATUS_NO_RESOURCE;
1474 return PFVF_STATUS_SUCCESS;
1477 static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn *p_hwfn,
1478 struct pfvf_stats_info *p_stats)
1480 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1481 OFFSETOF(struct mstorm_vf_zone,
1482 non_trigger.eth_queue_stat);
1483 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1484 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1485 OFFSETOF(struct ustorm_vf_zone,
1486 non_trigger.eth_queue_stat);
1487 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1488 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1489 OFFSETOF(struct pstorm_vf_zone,
1490 non_trigger.eth_queue_stat);
1491 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1492 p_stats->tstats.address = 0;
1493 p_stats->tstats.len = 0;
1496 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
1497 struct ecore_ptt *p_ptt,
1498 struct ecore_vf_info *vf)
1500 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1501 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1502 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1503 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1504 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1505 struct pf_vf_resc *resc = &resp->resc;
1506 enum _ecore_status_t rc;
1508 OSAL_MEMSET(resp, 0, sizeof(*resp));
1510 /* Write the PF version so that VF would know which version
1511 * is supported - might be later overridden. This guarantees that
1512 * VF could recognize legacy PF based on lack of versions in reply.
1514 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1515 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1517 /* TODO - not doing anything is bad since we'll assert, but this isn't
1518 * necessarily the right behavior - perhaps we should have allowed some
1521 if (vf->state != VF_FREE &&
1522 vf->state != VF_STOPPED) {
1523 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1524 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1525 vf->abs_vf_id, vf->state);
1529 /* Validate FW compatibility */
1530 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1531 if (req->vfdev_info.capabilities &
1532 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1533 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1535 /* This legacy support would need to be removed once
1536 * the major has changed.
1538 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
1540 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1541 "VF[%d] is pre-fastpath HSI\n",
1543 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1544 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1547 "VF[%d] needs fastpath HSI %02x.%02x, which is"
1548 " incompatible with loaded FW's faspath"
1551 req->vfdev_info.eth_fp_hsi_major,
1552 req->vfdev_info.eth_fp_hsi_minor,
1553 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1559 /* On 100g PFs, prevent old VFs from loading */
1560 if ((p_hwfn->p_dev->num_hwfns > 1) &&
1561 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1563 "VF[%d] is running an old driver that doesn't support"
1569 #ifndef __EXTRACT__LINUX__
1570 if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1571 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1576 /* Store the acquire message */
1577 OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
1579 vf->opaque_fid = req->vfdev_info.opaque_fid;
1581 vf->vf_bulletin = req->bulletin_addr;
1582 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1583 vf->bulletin.size : req->bulletin_size;
1585 /* fill in pfdev info */
1586 pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1587 pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
1588 pfdev_info->indices_per_sb = PIS_PER_SB;
1590 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1591 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1592 if (p_hwfn->p_dev->num_hwfns > 1)
1593 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1595 ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1597 OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1600 pfdev_info->fw_major = FW_MAJOR_VERSION;
1601 pfdev_info->fw_minor = FW_MINOR_VERSION;
1602 pfdev_info->fw_rev = FW_REVISION_VERSION;
1603 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1605 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1608 pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
1609 req->vfdev_info.eth_fp_hsi_minor);
1610 pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1611 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
1614 pfdev_info->dev_type = p_hwfn->p_dev->type;
1615 pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1617 /* Fill resources available to VF; Make sure there are enough to
1618 * satisfy the VF's request.
1620 vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1621 &req->resc_request, resc);
1622 if (vfpf_status != PFVF_STATUS_SUCCESS)
1625 /* Start the VF in FW */
1626 rc = ecore_sp_vf_start(p_hwfn, vf);
1627 if (rc != ECORE_SUCCESS) {
1628 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
1630 vfpf_status = PFVF_STATUS_FAILURE;
1634 /* Fill agreed size of bulletin board in response, and post
1635 * an initial image to the bulletin board.
1637 resp->bulletin_size = vf->bulletin.size;
1638 ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1640 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1641 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1642 " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1643 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1645 vf->abs_vf_id, resp->pfdev_info.chip_num,
1646 resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1647 (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
1648 resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1649 resc->num_vlan_filters);
1651 vf->state = VF_ACQUIRED;
1654 /* Prepare Response */
1655 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1656 sizeof(struct pfvf_acquire_resp_tlv),
1660 static enum _ecore_status_t
1661 __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1662 struct ecore_vf_info *p_vf, bool val)
1664 struct ecore_sp_vport_update_params params;
1665 enum _ecore_status_t rc;
1667 if (val == p_vf->spoof_chk) {
1668 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1669 "Spoofchk value[%d] is already configured\n", val);
1670 return ECORE_SUCCESS;
1673 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1674 params.opaque_fid = p_vf->opaque_fid;
1675 params.vport_id = p_vf->vport_id;
1676 params.update_anti_spoofing_en_flg = 1;
1677 params.anti_spoofing_en = val;
1679 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
1681 if (rc == ECORE_SUCCESS) {
1682 p_vf->spoof_chk = val;
1683 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1684 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1685 "Spoofchk val[%d] configured\n", val);
1687 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1688 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1689 val, p_vf->relative_vf_id);
1695 static enum _ecore_status_t
1696 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1697 struct ecore_vf_info *p_vf)
1699 struct ecore_filter_ucast filter;
1700 enum _ecore_status_t rc = ECORE_SUCCESS;
1703 OSAL_MEMSET(&filter, 0, sizeof(filter));
1704 filter.is_rx_filter = 1;
1705 filter.is_tx_filter = 1;
1706 filter.vport_to_add_to = p_vf->vport_id;
1707 filter.opcode = ECORE_FILTER_ADD;
1709 /* Reconfigure vlans */
1710 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1711 if (!p_vf->shadow_config.vlans[i].used)
1714 filter.type = ECORE_FILTER_VLAN;
1715 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1716 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1717 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1718 filter.vlan, p_vf->relative_vf_id);
1719 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1720 &filter, ECORE_SPQ_MODE_CB,
1723 DP_NOTICE(p_hwfn, true,
1724 "Failed to configure VLAN [%04x]"
1726 filter.vlan, p_vf->relative_vf_id);
1734 static enum _ecore_status_t
1735 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
1736 struct ecore_vf_info *p_vf, u64 events)
1738 enum _ecore_status_t rc = ECORE_SUCCESS;
1740 /*TODO - what about MACs? */
1742 if ((events & (1 << VLAN_ADDR_FORCED)) &&
1743 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1744 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1749 static enum _ecore_status_t
1750 ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
1751 struct ecore_vf_info *p_vf,
1754 enum _ecore_status_t rc = ECORE_SUCCESS;
1755 struct ecore_filter_ucast filter;
1757 if (!p_vf->vport_instance)
1760 if (events & (1 << MAC_ADDR_FORCED)) {
1761 /* Since there's no way [currently] of removing the MAC,
1762 * we can always assume this means we need to force it.
1764 OSAL_MEMSET(&filter, 0, sizeof(filter));
1765 filter.type = ECORE_FILTER_MAC;
1766 filter.opcode = ECORE_FILTER_REPLACE;
1767 filter.is_rx_filter = 1;
1768 filter.is_tx_filter = 1;
1769 filter.vport_to_add_to = p_vf->vport_id;
1770 OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
1772 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1774 ECORE_SPQ_MODE_CB, OSAL_NULL);
1776 DP_NOTICE(p_hwfn, true,
1777 "PF failed to configure MAC for VF\n");
1781 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1784 if (events & (1 << VLAN_ADDR_FORCED)) {
1785 struct ecore_sp_vport_update_params vport_update;
1789 OSAL_MEMSET(&filter, 0, sizeof(filter));
1790 filter.type = ECORE_FILTER_VLAN;
1791 filter.is_rx_filter = 1;
1792 filter.is_tx_filter = 1;
1793 filter.vport_to_add_to = p_vf->vport_id;
1794 filter.vlan = p_vf->bulletin.p_virt->pvid;
1795 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
1798 /* Send the ramrod */
1799 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1801 ECORE_SPQ_MODE_CB, OSAL_NULL);
1803 DP_NOTICE(p_hwfn, true,
1804 "PF failed to configure VLAN for VF\n");
1808 /* Update the default-vlan & silent vlan stripping */
1809 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
1810 vport_update.opaque_fid = p_vf->opaque_fid;
1811 vport_update.vport_id = p_vf->vport_id;
1812 vport_update.update_default_vlan_enable_flg = 1;
1813 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1814 vport_update.update_default_vlan_flg = 1;
1815 vport_update.default_vlan = filter.vlan;
1817 vport_update.update_inner_vlan_removal_flg = 1;
1818 removal = filter.vlan ?
1819 1 : p_vf->shadow_config.inner_vlan_removal;
1820 vport_update.inner_vlan_removal_flg = removal;
1821 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1822 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
1823 ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
1825 DP_NOTICE(p_hwfn, true,
1826 "PF failed to configure VF vport for vlan\n");
1830 /* Update all the Rx queues */
1831 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1834 if (!p_vf->vf_queues[i].rxq_active)
1837 qid = p_vf->vf_queues[i].fw_rx_qid;
1839 rc = ecore_sp_eth_rx_queues_update(p_hwfn, qid,
1841 ECORE_SPQ_MODE_EBLOCK,
1844 DP_NOTICE(p_hwfn, true,
1845 "Failed to send Rx update"
1846 " fo queue[0x%04x]\n",
1853 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1855 p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
1858 /* If forced features are terminated, we need to configure the shadow
1859 * configuration back again.
1862 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1867 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
1868 struct ecore_ptt *p_ptt,
1869 struct ecore_vf_info *vf)
1871 struct ecore_sp_vport_start_params params = { 0 };
1872 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1873 struct vfpf_vport_start_tlv *start;
1874 u8 status = PFVF_STATUS_SUCCESS;
1875 struct ecore_vf_info *vf_info;
1878 enum _ecore_status_t rc;
1880 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
1882 DP_NOTICE(p_hwfn->p_dev, true,
1883 "Failed to get VF info, invalid vfid [%d]\n",
1884 vf->relative_vf_id);
1888 vf->state = VF_ENABLED;
1889 start = &mbx->req_virt->start_vport;
1891 /* Initialize Status block in CAU */
1892 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1893 if (!start->sb_addr[sb_id]) {
1894 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1895 "VF[%d] did not fill the address of SB %d\n",
1896 vf->relative_vf_id, sb_id);
1900 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
1901 start->sb_addr[sb_id],
1905 ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1907 vf->mtu = start->mtu;
1908 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1910 /* Take into consideration configuration forced by hypervisor;
1911 * If none is configured, use the supplied VF values [for old
1912 * vfs that would still be fine, since they passed '0' as padding].
1914 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
1915 if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
1916 u8 vf_req = start->only_untagged;
1918 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1919 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1922 params.tpa_mode = start->tpa_mode;
1923 params.remove_inner_vlan = start->inner_vlan_removal;
1924 params.tx_switching = true;
1927 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1928 DP_NOTICE(p_hwfn, false,
1929 "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
1930 params.tx_switching = false;
1934 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
1935 params.drop_ttl0 = false;
1936 params.concrete_fid = vf->concrete_fid;
1937 params.opaque_fid = vf->opaque_fid;
1938 params.vport_id = vf->vport_id;
1939 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1940 params.mtu = vf->mtu;
1941 params.check_mac = true;
1943 rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms);
1944 if (rc != ECORE_SUCCESS) {
1946 "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
1947 status = PFVF_STATUS_FAILURE;
1949 vf->vport_instance++;
1951 /* Force configuration if needed on the newly opened vport */
1952 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
1953 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
1954 vf->vport_id, vf->opaque_fid);
1955 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
1958 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1959 sizeof(struct pfvf_def_resp_tlv), status);
1962 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
1963 struct ecore_ptt *p_ptt,
1964 struct ecore_vf_info *vf)
1966 u8 status = PFVF_STATUS_SUCCESS;
1967 enum _ecore_status_t rc;
1969 vf->vport_instance--;
1970 vf->spoof_chk = false;
1972 if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) ||
1973 (ecore_iov_validate_active_txq(p_hwfn, vf))) {
1974 vf->b_malicious = true;
1975 DP_NOTICE(p_hwfn, false,
1976 "VF [%02x] - considered malicious;"
1977 " Unable to stop RX/TX queuess\n",
1981 rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
1982 if (rc != ECORE_SUCCESS) {
1984 "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
1985 status = PFVF_STATUS_FAILURE;
1988 /* Forget the configuration on the vport */
1989 vf->configured_features = 0;
1990 OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
1992 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
1993 sizeof(struct pfvf_def_resp_tlv), status);
1996 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
1997 struct ecore_ptt *p_ptt,
1998 struct ecore_vf_info *vf,
1999 u8 status, bool b_legacy)
2001 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2002 struct pfvf_start_queue_resp_tlv *p_tlv;
2003 struct vfpf_start_rxq_tlv *req;
2006 mbx->offset = (u8 *)mbx->reply_virt;
2008 /* Taking a bigger struct instead of adding a TLV to list was a
2009 * mistake, but one which we're now stuck with, as some older
2010 * clients assume the size of the previous response.
2013 length = sizeof(*p_tlv);
2015 length = sizeof(struct pfvf_def_resp_tlv);
2017 p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
2019 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2020 sizeof(struct channel_list_end_tlv));
2022 /* Update the TLV with the response */
2023 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2024 req = &mbx->req_virt->start_rxq;
2025 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2026 OFFSETOF(struct mstorm_vf_zone,
2027 non_trigger.eth_rx_queue_producers) +
2028 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2031 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2034 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
2035 struct ecore_ptt *p_ptt,
2036 struct ecore_vf_info *vf)
2038 struct ecore_queue_start_common_params params;
2039 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2040 u8 status = PFVF_STATUS_NO_RESOURCE;
2041 struct vfpf_start_rxq_tlv *req;
2042 bool b_legacy_vf = false;
2043 enum _ecore_status_t rc;
2045 req = &mbx->req_virt->start_rxq;
2047 if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
2048 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2051 OSAL_MEMSET(¶ms, 0, sizeof(params));
2052 params.queue_id = (u8)vf->vf_queues[req->rx_qid].fw_rx_qid;
2053 params.vf_qid = req->rx_qid;
2054 params.vport_id = vf->vport_id;
2055 params.stats_id = vf->abs_vf_id + 0x10;
2056 params.sb = req->hw_sb;
2057 params.sb_idx = req->sb_index;
2059 /* Legacy VFs have their Producers in a different location, which they
2060 * calculate on their own and clean the producer prior to this.
2062 if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2063 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2067 GTT_BAR0_MAP_REG_MSDM_RAM +
2068 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2071 rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
2072 vf->vf_queues[req->rx_qid].fw_cid,
2081 status = PFVF_STATUS_FAILURE;
2083 status = PFVF_STATUS_SUCCESS;
2084 vf->vf_queues[req->rx_qid].rxq_active = true;
2085 vf->num_active_rxqs++;
2089 ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf,
2090 status, b_legacy_vf);
2093 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
2094 struct ecore_ptt *p_ptt,
2095 struct ecore_vf_info *p_vf,
2098 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2099 struct pfvf_start_queue_resp_tlv *p_tlv;
2100 bool b_legacy = false;
2103 mbx->offset = (u8 *)mbx->reply_virt;
2105 /* Taking a bigger struct instead of adding a TLV to list was a
2106 * mistake, but one which we're now stuck with, as some older
2107 * clients assume the size of the previous response.
2109 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2110 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2114 length = sizeof(*p_tlv);
2116 length = sizeof(struct pfvf_def_resp_tlv);
2118 p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
2120 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2121 sizeof(struct channel_list_end_tlv));
2123 /* Update the TLV with the response */
2124 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2125 u16 qid = mbx->req_virt->start_txq.tx_qid;
2127 p_tlv->offset = DB_ADDR_VF(p_vf->vf_queues[qid].fw_cid,
2131 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2134 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
2135 struct ecore_ptt *p_ptt,
2136 struct ecore_vf_info *vf)
2138 struct ecore_queue_start_common_params params;
2139 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2140 u8 status = PFVF_STATUS_NO_RESOURCE;
2141 struct vfpf_start_txq_tlv *req;
2142 enum _ecore_status_t rc;
2144 OSAL_MEMSET(¶ms, 0, sizeof(params));
2145 req = &mbx->req_virt->start_txq;
2147 if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
2148 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2151 params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
2152 params.qzone_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
2153 params.vport_id = vf->vport_id;
2154 params.stats_id = vf->abs_vf_id + 0x10;
2155 params.sb = req->hw_sb;
2156 params.sb_idx = req->sb_index;
2158 rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
2160 vf->vf_queues[req->tx_qid].fw_cid,
2164 ecore_get_cm_pq_idx_vf(p_hwfn,
2165 vf->relative_vf_id));
2168 status = PFVF_STATUS_FAILURE;
2170 status = PFVF_STATUS_SUCCESS;
2171 vf->vf_queues[req->tx_qid].txq_active = true;
2175 ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
2178 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
2179 struct ecore_vf_info *vf,
2182 bool cqe_completion)
2184 enum _ecore_status_t rc = ECORE_SUCCESS;
2187 if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
2190 for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
2191 if (vf->vf_queues[qid].rxq_active) {
2192 rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
2200 vf->vf_queues[qid].rxq_active = false;
2201 vf->num_active_rxqs--;
2207 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
2208 struct ecore_vf_info *vf,
2209 u16 txq_id, u8 num_txqs)
2211 enum _ecore_status_t rc = ECORE_SUCCESS;
2214 if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
2217 for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
2218 if (vf->vf_queues[qid].txq_active) {
2219 rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
2226 vf->vf_queues[qid].txq_active = false;
2231 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
2232 struct ecore_ptt *p_ptt,
2233 struct ecore_vf_info *vf)
2235 u16 length = sizeof(struct pfvf_def_resp_tlv);
2236 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2237 u8 status = PFVF_STATUS_SUCCESS;
2238 struct vfpf_stop_rxqs_tlv *req;
2239 enum _ecore_status_t rc;
2241 /* We give the option of starting from qid != 0, in this case we
2242 * need to make sure that qid + num_qs doesn't exceed the actual
2243 * amount of queues that exist.
2245 req = &mbx->req_virt->stop_rxqs;
2246 rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2247 req->num_rxqs, req->cqe_completion);
2249 status = PFVF_STATUS_FAILURE;
2251 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2255 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
2256 struct ecore_ptt *p_ptt,
2257 struct ecore_vf_info *vf)
2259 u16 length = sizeof(struct pfvf_def_resp_tlv);
2260 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2261 u8 status = PFVF_STATUS_SUCCESS;
2262 struct vfpf_stop_txqs_tlv *req;
2263 enum _ecore_status_t rc;
2265 /* We give the option of starting from qid != 0, in this case we
2266 * need to make sure that qid + num_qs doesn't exceed the actual
2267 * amount of queues that exist.
2269 req = &mbx->req_virt->stop_txqs;
2270 rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
2272 status = PFVF_STATUS_FAILURE;
2274 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2278 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
2279 struct ecore_ptt *p_ptt,
2280 struct ecore_vf_info *vf)
2282 u16 length = sizeof(struct pfvf_def_resp_tlv);
2283 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2284 struct vfpf_update_rxq_tlv *req;
2285 u8 status = PFVF_STATUS_SUCCESS;
2286 u8 complete_event_flg;
2287 u8 complete_cqe_flg;
2289 enum _ecore_status_t rc;
2292 req = &mbx->req_virt->update_rxq;
2293 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2294 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2296 for (i = 0; i < req->num_rxqs; i++) {
2297 qid = req->rx_qid + i;
2299 if (!vf->vf_queues[qid].rxq_active) {
2300 DP_NOTICE(p_hwfn, true,
2301 "VF rx_qid = %d isn`t active!\n", qid);
2302 status = PFVF_STATUS_FAILURE;
2306 rc = ecore_sp_eth_rx_queues_update(p_hwfn,
2307 vf->vf_queues[qid].fw_rx_qid,
2311 ECORE_SPQ_MODE_EBLOCK,
2315 status = PFVF_STATUS_FAILURE;
2320 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2324 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
2325 void *p_tlvs_list, u16 req_type)
2327 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2331 if (!p_tlv->length) {
2332 DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
2336 if (p_tlv->type == req_type) {
2337 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2338 "Extended tlv type %s, length %d found\n",
2339 ecore_channel_tlvs_string[p_tlv->type],
2344 len += p_tlv->length;
2345 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2347 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2348 DP_NOTICE(p_hwfn, true,
2349 "TLVs has overrun the buffer size\n");
2352 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2358 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
2359 struct ecore_sp_vport_update_params *p_data,
2360 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2362 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2363 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2365 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2366 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2370 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2371 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2372 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2373 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2374 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
2378 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
2379 struct ecore_sp_vport_update_params *p_data,
2380 struct ecore_vf_info *p_vf,
2381 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2383 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2384 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2386 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2387 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2391 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2393 /* Ignore the VF request if we're forcing a vlan */
2394 if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
2395 p_data->update_inner_vlan_removal_flg = 1;
2396 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2399 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
2403 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
2404 struct ecore_sp_vport_update_params *p_data,
2405 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2407 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2408 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2410 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2411 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2412 if (!p_tx_switch_tlv)
2416 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2417 DP_NOTICE(p_hwfn, false,
2418 "FPGA: Ignore tx-switching configuration originating"
2424 p_data->update_tx_switching_flg = 1;
2425 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2426 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
2430 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
2431 struct ecore_sp_vport_update_params *p_data,
2432 struct ecore_iov_vf_mbx *p_mbx,
2435 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2436 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2438 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2439 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2443 p_data->update_approx_mcast_flg = 1;
2444 OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
2445 sizeof(unsigned long) *
2446 ETH_MULTICAST_MAC_BINS_IN_REGS);
2447 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
2451 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
2452 struct ecore_sp_vport_update_params *p_data,
2453 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2455 struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
2456 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2457 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2459 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2460 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2464 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2465 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2466 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2467 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2468 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
2472 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
2473 struct ecore_sp_vport_update_params *p_data,
2474 struct ecore_iov_vf_mbx *p_mbx,
2477 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2478 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2480 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2481 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2482 if (!p_accept_any_vlan)
2485 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2486 p_data->update_accept_any_vlan_flg =
2487 p_accept_any_vlan->update_accept_any_vlan_flg;
2488 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2492 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
2493 struct ecore_vf_info *vf,
2494 struct ecore_sp_vport_update_params *p_data,
2495 struct ecore_rss_params *p_rss,
2496 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2498 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2499 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2500 u16 i, q_idx, max_q_idx;
2503 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2504 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2506 p_data->rss_params = OSAL_NULL;
2510 OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
2512 p_rss->update_rss_config =
2513 !!(p_rss_tlv->update_rss_flags &
2514 VFPF_UPDATE_RSS_CONFIG_FLAG);
2515 p_rss->update_rss_capabilities =
2516 !!(p_rss_tlv->update_rss_flags &
2517 VFPF_UPDATE_RSS_CAPS_FLAG);
2518 p_rss->update_rss_ind_table =
2519 !!(p_rss_tlv->update_rss_flags &
2520 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2521 p_rss->update_rss_key =
2522 !!(p_rss_tlv->update_rss_flags &
2523 VFPF_UPDATE_RSS_KEY_FLAG);
2525 p_rss->rss_enable = p_rss_tlv->rss_enable;
2526 p_rss->rss_eng_id = vf->relative_vf_id + 1;
2527 p_rss->rss_caps = p_rss_tlv->rss_caps;
2528 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2529 OSAL_MEMCPY(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
2530 sizeof(p_rss->rss_ind_table));
2531 OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
2532 sizeof(p_rss->rss_key));
2534 table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
2535 (1 << p_rss_tlv->rss_table_size_log));
2537 max_q_idx = OSAL_ARRAY_SIZE(vf->vf_queues);
2539 for (i = 0; i < table_size; i++) {
2540 u16 index = vf->vf_queues[0].fw_rx_qid;
2542 q_idx = p_rss->rss_ind_table[i];
2543 if (q_idx >= max_q_idx)
2544 DP_NOTICE(p_hwfn, true,
2545 "rss_ind_table[%d] = %d,"
2546 " rxq is out of range\n",
2548 else if (!vf->vf_queues[q_idx].rxq_active)
2549 DP_NOTICE(p_hwfn, true,
2550 "rss_ind_table[%d] = %d, rxq is not active\n",
2553 index = vf->vf_queues[q_idx].fw_rx_qid;
2554 p_rss->rss_ind_table[i] = index;
2557 p_data->rss_params = p_rss;
2558 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
2562 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
2563 struct ecore_vf_info *vf,
2564 struct ecore_sp_vport_update_params *p_data,
2565 struct ecore_sge_tpa_params *p_sge_tpa,
2566 struct ecore_iov_vf_mbx *p_mbx,
2569 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2570 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2572 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2573 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2575 if (!p_sge_tpa_tlv) {
2576 p_data->sge_tpa_params = OSAL_NULL;
2580 OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
2582 p_sge_tpa->update_tpa_en_flg =
2583 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2584 p_sge_tpa->update_tpa_param_flg =
2585 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2586 VFPF_UPDATE_TPA_PARAM_FLAG);
2588 p_sge_tpa->tpa_ipv4_en_flg =
2589 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2590 p_sge_tpa->tpa_ipv6_en_flg =
2591 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2592 p_sge_tpa->tpa_pkt_split_flg =
2593 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2594 p_sge_tpa->tpa_hdr_data_split_flg =
2595 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2596 p_sge_tpa->tpa_gro_consistent_flg =
2597 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2599 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2600 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2601 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2602 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2603 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2605 p_data->sge_tpa_params = p_sge_tpa;
2607 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
2610 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
2611 struct ecore_ptt *p_ptt,
2612 struct ecore_vf_info *vf)
2614 struct ecore_sp_vport_update_params params;
2615 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2616 struct ecore_sge_tpa_params sge_tpa_params;
2617 u16 tlvs_mask = 0, tlvs_accepted = 0;
2618 struct ecore_rss_params rss_params;
2619 u8 status = PFVF_STATUS_SUCCESS;
2621 enum _ecore_status_t rc;
2623 /* Valiate PF can send such a request */
2624 if (!vf->vport_instance) {
2625 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2626 "No VPORT instance available for VF[%d],"
2627 " failing vport update\n",
2629 status = PFVF_STATUS_FAILURE;
2633 OSAL_MEMSET(¶ms, 0, sizeof(params));
2634 params.opaque_fid = vf->opaque_fid;
2635 params.vport_id = vf->vport_id;
2636 params.rss_params = OSAL_NULL;
2638 /* Search for extended tlvs list and update values
2639 * from VF in struct ecore_sp_vport_update_params.
2641 ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
2642 ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
2643 ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
2644 ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
2645 ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
2646 ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, &rss_params,
2648 ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
2649 ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms,
2650 &sge_tpa_params, mbx, &tlvs_mask);
2652 /* Just log a message if there is no single extended tlv in buffer.
2653 * When all features of vport update ramrod would be requested by VF
2654 * as extended TLVs in buffer then an error can be returned in response
2655 * if there is no extended TLV present in buffer.
2657 tlvs_accepted = tlvs_mask;
2659 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
2660 ¶ms, &tlvs_accepted) !=
2663 status = PFVF_STATUS_NOT_SUPPORTED;
2667 if (!tlvs_accepted) {
2669 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2670 "Upper-layer prevents said VF"
2671 " configuration\n");
2673 DP_NOTICE(p_hwfn, true,
2674 "No feature tlvs found for vport update\n");
2675 status = PFVF_STATUS_NOT_SUPPORTED;
2679 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
2683 status = PFVF_STATUS_FAILURE;
2686 length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
2687 tlvs_mask, tlvs_accepted);
2688 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2691 static enum _ecore_status_t
2692 ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
2693 struct ecore_vf_info *p_vf,
2694 struct ecore_filter_ucast *p_params)
2698 /* First remove entries and then add new ones */
2699 if (p_params->opcode == ECORE_FILTER_REMOVE) {
2700 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2701 if (p_vf->shadow_config.vlans[i].used &&
2702 p_vf->shadow_config.vlans[i].vid ==
2704 p_vf->shadow_config.vlans[i].used = false;
2707 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
2708 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2709 "VF [%d] - Tries to remove a non-existing"
2711 p_vf->relative_vf_id);
2714 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
2715 p_params->opcode == ECORE_FILTER_FLUSH) {
2716 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2717 p_vf->shadow_config.vlans[i].used = false;
2720 /* In forced mode, we're willing to remove entries - but we don't add
2723 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
2724 return ECORE_SUCCESS;
2726 if (p_params->opcode == ECORE_FILTER_ADD ||
2727 p_params->opcode == ECORE_FILTER_REPLACE) {
2728 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
2729 if (p_vf->shadow_config.vlans[i].used)
2732 p_vf->shadow_config.vlans[i].used = true;
2733 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
2737 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
2738 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2739 "VF [%d] - Tries to configure more than %d"
2741 p_vf->relative_vf_id,
2742 ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
2747 return ECORE_SUCCESS;
2750 static enum _ecore_status_t
2751 ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
2752 struct ecore_vf_info *p_vf,
2753 struct ecore_filter_ucast *p_params)
2755 char empty_mac[ETH_ALEN];
2758 OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
2760 /* If we're in forced-mode, we don't allow any change */
2761 /* TODO - this would change if we were ever to implement logic for
2762 * removing a forced MAC altogether [in which case, like for vlans,
2763 * we should be able to re-trace previous configuration.
2765 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
2766 return ECORE_SUCCESS;
2768 /* First remove entries and then add new ones */
2769 if (p_params->opcode == ECORE_FILTER_REMOVE) {
2770 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
2771 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
2772 p_params->mac, ETH_ALEN)) {
2773 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
2779 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
2780 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2781 "MAC isn't configured\n");
2784 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
2785 p_params->opcode == ECORE_FILTER_FLUSH) {
2786 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
2787 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
2790 /* List the new MAC address */
2791 if (p_params->opcode != ECORE_FILTER_ADD &&
2792 p_params->opcode != ECORE_FILTER_REPLACE)
2793 return ECORE_SUCCESS;
2795 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
2796 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
2797 empty_mac, ETH_ALEN)) {
2798 OSAL_MEMCPY(p_vf->shadow_config.macs[i],
2799 p_params->mac, ETH_ALEN);
2800 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2801 "Added MAC at %d entry in shadow\n", i);
2806 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
2807 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2808 "No available place for MAC\n");
2812 return ECORE_SUCCESS;
2815 static enum _ecore_status_t
2816 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
2817 struct ecore_vf_info *p_vf,
2818 struct ecore_filter_ucast *p_params)
2820 enum _ecore_status_t rc = ECORE_SUCCESS;
2822 if (p_params->type == ECORE_FILTER_MAC) {
2823 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
2824 if (rc != ECORE_SUCCESS)
2828 if (p_params->type == ECORE_FILTER_VLAN)
2829 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
2834 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
2835 struct ecore_ptt *p_ptt,
2836 struct ecore_vf_info *vf)
2838 struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
2839 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2840 struct vfpf_ucast_filter_tlv *req;
2841 u8 status = PFVF_STATUS_SUCCESS;
2842 struct ecore_filter_ucast params;
2843 enum _ecore_status_t rc;
2845 /* Prepare the unicast filter params */
2846 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast));
2847 req = &mbx->req_virt->ucast_filter;
2848 params.opcode = (enum ecore_filter_opcode)req->opcode;
2849 params.type = (enum ecore_filter_ucast_type)req->type;
2851 /* @@@TBD - We might need logic on HV side in determining this */
2852 params.is_rx_filter = 1;
2853 params.is_tx_filter = 1;
2854 params.vport_to_remove_from = vf->vport_id;
2855 params.vport_to_add_to = vf->vport_id;
2856 OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
2857 params.vlan = req->vlan;
2859 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2860 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
2861 " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
2862 vf->abs_vf_id, params.opcode, params.type,
2863 params.is_rx_filter ? "RX" : "",
2864 params.is_tx_filter ? "TX" : "",
2865 params.vport_to_add_to,
2866 params.mac[0], params.mac[1], params.mac[2],
2867 params.mac[3], params.mac[4], params.mac[5], params.vlan);
2869 if (!vf->vport_instance) {
2870 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2871 "No VPORT instance available for VF[%d],"
2872 " failing ucast MAC configuration\n",
2874 status = PFVF_STATUS_FAILURE;
2878 /* Update shadow copy of the VF configuration */
2879 if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) !=
2881 status = PFVF_STATUS_FAILURE;
2885 /* Determine if the unicast filtering is acceptible by PF */
2886 if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
2887 (params.type == ECORE_FILTER_VLAN ||
2888 params.type == ECORE_FILTER_MAC_VLAN)) {
2889 /* Once VLAN is forced or PVID is set, do not allow
2890 * to add/replace any further VLANs.
2892 if (params.opcode == ECORE_FILTER_ADD ||
2893 params.opcode == ECORE_FILTER_REPLACE)
2894 status = PFVF_STATUS_FORCED;
2898 if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
2899 (params.type == ECORE_FILTER_MAC ||
2900 params.type == ECORE_FILTER_MAC_VLAN)) {
2901 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
2902 (params.opcode != ECORE_FILTER_ADD &&
2903 params.opcode != ECORE_FILTER_REPLACE))
2904 status = PFVF_STATUS_FORCED;
2908 rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms);
2909 if (rc == ECORE_EXISTS) {
2911 } else if (rc == ECORE_INVAL) {
2912 status = PFVF_STATUS_FAILURE;
2916 rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
2917 ECORE_SPQ_MODE_CB, OSAL_NULL);
2919 status = PFVF_STATUS_FAILURE;
2922 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
2923 sizeof(struct pfvf_def_resp_tlv), status);
2926 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
2927 struct ecore_ptt *p_ptt,
2928 struct ecore_vf_info *vf)
2933 for (i = 0; i < vf->num_sbs; i++)
2934 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
2936 vf->opaque_fid, false);
2938 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
2939 sizeof(struct pfvf_def_resp_tlv),
2940 PFVF_STATUS_SUCCESS);
2943 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
2944 struct ecore_ptt *p_ptt,
2945 struct ecore_vf_info *vf)
2947 u16 length = sizeof(struct pfvf_def_resp_tlv);
2948 u8 status = PFVF_STATUS_SUCCESS;
2950 /* Disable Interrupts for VF */
2951 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
2953 /* Reset Permission table */
2954 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
2956 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
2960 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
2961 struct ecore_ptt *p_ptt,
2962 struct ecore_vf_info *p_vf)
2964 u16 length = sizeof(struct pfvf_def_resp_tlv);
2965 u8 status = PFVF_STATUS_SUCCESS;
2966 enum _ecore_status_t rc = ECORE_SUCCESS;
2968 ecore_iov_vf_cleanup(p_hwfn, p_vf);
2970 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
2971 /* Stopping the VF */
2972 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
2975 if (rc != ECORE_SUCCESS) {
2976 DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
2978 status = PFVF_STATUS_FAILURE;
2981 p_vf->state = VF_STOPPED;
2984 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
2988 static enum _ecore_status_t
2989 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
2990 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
2995 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
2997 for (cnt = 0; cnt < 50; cnt++) {
2998 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3003 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
3007 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3008 p_vf->abs_vf_id, val);
3009 return ECORE_TIMEOUT;
3012 return ECORE_SUCCESS;
3015 static enum _ecore_status_t
3016 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
3017 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3019 u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
3022 /* Read initial consumers & producers */
3023 for (i = 0; i < MAX_NUM_VOQS; i++) {
3026 cons[i] = ecore_rd(p_hwfn, p_ptt,
3027 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3029 prod = ecore_rd(p_hwfn, p_ptt,
3030 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3032 distance[i] = prod - cons[i];
3035 /* Wait for consumers to pass the producers */
3037 for (cnt = 0; cnt < 50; cnt++) {
3038 for (; i < MAX_NUM_VOQS; i++) {
3041 tmp = ecore_rd(p_hwfn, p_ptt,
3042 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3044 if (distance[i] > tmp - cons[i])
3048 if (i == MAX_NUM_VOQS)
3055 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3056 p_vf->abs_vf_id, i);
3057 return ECORE_TIMEOUT;
3060 return ECORE_SUCCESS;
3063 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
3064 struct ecore_vf_info *p_vf,
3065 struct ecore_ptt *p_ptt)
3067 enum _ecore_status_t rc;
3069 /* TODO - add SRC and TM polling once we add storage IOV */
3071 rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3075 rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3079 return ECORE_SUCCESS;
3082 static enum _ecore_status_t
3083 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3084 struct ecore_ptt *p_ptt,
3085 u16 rel_vf_id, u32 *ack_vfs)
3087 struct ecore_vf_info *p_vf;
3088 enum _ecore_status_t rc = ECORE_SUCCESS;
3090 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3092 return ECORE_SUCCESS;
3094 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3095 (1ULL << (rel_vf_id % 64))) {
3096 u16 vfid = p_vf->abs_vf_id;
3098 /* TODO - should we lock channel? */
3100 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3101 "VF[%d] - Handling FLR\n", vfid);
3103 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3105 /* If VF isn't active, no need for anything but SW */
3109 /* TODO - what to do in case of failure? */
3110 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3111 if (rc != ECORE_SUCCESS)
3114 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
3116 /* TODO - what's now? What a mess.... */
3117 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3121 /* Workaround to make VF-PF channel ready, as FW
3122 * doesn't do that as a part of FLR.
3125 GTT_BAR0_MAP_REG_USDM_RAM +
3126 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3128 /* VF_STOPPED has to be set only after final cleanup
3129 * but prior to re-enabling the VF.
3131 p_vf->state = VF_STOPPED;
3133 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3135 /* TODO - again, a mess... */
3136 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3141 /* Mark VF for ack and clean pending state */
3142 if (p_vf->state == VF_RESET)
3143 p_vf->state = VF_STOPPED;
3144 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
3145 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3146 ~(1ULL << (rel_vf_id % 64));
3147 p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
3148 ~(1ULL << (rel_vf_id % 64));
3154 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3155 struct ecore_ptt *p_ptt)
3157 u32 ack_vfs[VF_MAX_STATIC / 32];
3158 enum _ecore_status_t rc = ECORE_SUCCESS;
3161 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3163 /* Since BRB <-> PRS interface can't be tested as part of the flr
3164 * polling due to HW limitations, simply sleep a bit. And since
3165 * there's no need to wait per-vf, do it before looping.
3169 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
3170 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3172 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3176 enum _ecore_status_t
3177 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3178 struct ecore_ptt *p_ptt, u16 rel_vf_id)
3180 u32 ack_vfs[VF_MAX_STATIC / 32];
3181 enum _ecore_status_t rc = ECORE_SUCCESS;
3183 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3185 /* Wait instead of polling the BRB <-> PRS interface */
3188 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
3190 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3194 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
3199 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
3200 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3201 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3202 "[%08x,...,%08x]: %08x\n",
3203 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3205 if (!p_hwfn->p_dev->p_iov_info) {
3206 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
3211 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
3212 struct ecore_vf_info *p_vf;
3215 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
3219 vfid = p_vf->abs_vf_id;
3220 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3221 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3222 u16 rel_vf_id = p_vf->relative_vf_id;
3224 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3225 "VF[%d] [rel %d] got FLR-ed\n",
3228 p_vf->state = VF_RESET;
3230 /* No need to lock here, since pending_flr should
3231 * only change here and before ACKing MFw. Since
3232 * MFW will not trigger an additional attention for
3233 * VF flr until ACKs, we're safe.
3235 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3243 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
3245 struct ecore_mcp_link_params *p_params,
3246 struct ecore_mcp_link_state *p_link,
3247 struct ecore_mcp_link_capabilities *p_caps)
3249 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
3250 struct ecore_bulletin_content *p_bulletin;
3255 p_bulletin = p_vf->bulletin.p_virt;
3258 __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3260 __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3262 __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3265 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
3266 struct ecore_ptt *p_ptt, int vfid)
3268 struct ecore_iov_vf_mbx *mbx;
3269 struct ecore_vf_info *p_vf;
3271 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3275 mbx = &p_vf->vf_mbx;
3277 /* ecore_iov_process_mbx_request */
3280 "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
3282 mbx->first_tlv = mbx->req_virt->first_tlv;
3284 OSAL_IOV_VF_MSG_TYPE(p_hwfn,
3285 p_vf->relative_vf_id,
3286 mbx->first_tlv.tl.type);
3288 /* Lock the per vf op mutex and note the locker's identity.
3289 * The unlock will take place in mbx response.
3291 ecore_iov_lock_vf_pf_channel(p_hwfn,
3292 p_vf, mbx->first_tlv.tl.type);
3294 /* check if tlv type is known */
3295 if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3296 !p_vf->b_malicious) {
3297 /* switch on the opcode */
3298 switch (mbx->first_tlv.tl.type) {
3299 case CHANNEL_TLV_ACQUIRE:
3300 ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3302 case CHANNEL_TLV_VPORT_START:
3303 ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3305 case CHANNEL_TLV_VPORT_TEARDOWN:
3306 ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3308 case CHANNEL_TLV_START_RXQ:
3309 ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3311 case CHANNEL_TLV_START_TXQ:
3312 ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3314 case CHANNEL_TLV_STOP_RXQS:
3315 ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3317 case CHANNEL_TLV_STOP_TXQS:
3318 ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3320 case CHANNEL_TLV_UPDATE_RXQ:
3321 ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3323 case CHANNEL_TLV_VPORT_UPDATE:
3324 ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3326 case CHANNEL_TLV_UCAST_FILTER:
3327 ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3329 case CHANNEL_TLV_CLOSE:
3330 ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3332 case CHANNEL_TLV_INT_CLEANUP:
3333 ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3335 case CHANNEL_TLV_RELEASE:
3336 ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3339 } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3340 /* If we've received a message from a VF we consider malicious
3341 * we ignore the messasge unless it's one for RELEASE, in which
3342 * case we'll let it have the benefit of doubt, allowing the
3343 * next loaded driver to start again.
3345 if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
3346 /* TODO - initiate FLR, remove malicious indication */
3347 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3348 "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
3351 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3352 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3353 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3356 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3357 mbx->first_tlv.tl.type,
3358 sizeof(struct pfvf_def_resp_tlv),
3359 PFVF_STATUS_MALICIOUS);
3361 /* unknown TLV - this may belong to a VF driver from the future
3362 * - a version written after this PF driver was written, which
3363 * supports features unknown as of yet. Too bad since we don't
3364 * support them. Or this may be because someone wrote a crappy
3365 * VF driver and is sending garbage over the channel.
3367 DP_NOTICE(p_hwfn, false,
3368 "VF[%02x]: unknown TLV. type %04x length %04x"
3369 " padding %08x reply address %lu\n",
3371 mbx->first_tlv.tl.type,
3372 mbx->first_tlv.tl.length,
3373 mbx->first_tlv.padding,
3374 (unsigned long)mbx->first_tlv.reply_address);
3376 /* Try replying in case reply address matches the acquisition's
3379 if (p_vf->acquire.first_tlv.reply_address &&
3380 (mbx->first_tlv.reply_address ==
3381 p_vf->acquire.first_tlv.reply_address))
3382 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3383 mbx->first_tlv.tl.type,
3384 sizeof(struct pfvf_def_resp_tlv),
3385 PFVF_STATUS_NOT_SUPPORTED);
3387 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3388 "VF[%02x]: Can't respond to TLV -"
3389 " no valid reply address\n",
3393 ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
3394 mbx->first_tlv.tl.type);
3396 #ifdef CONFIG_ECORE_SW_CHANNEL
3397 mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
3398 mbx->sw_mbx.response_offset = 0;
3402 void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
3404 u64 add_bit = 1ULL << (vfid % 64);
3406 /* TODO - add locking mechanisms [no atomics in ecore, so we can't
3407 * add the lock inside the ecore_pf_iov struct].
3409 p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
3412 void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
3415 u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
3417 /* TODO - Take a lock */
3418 OSAL_MEMCPY(events, p_pending_events,
3419 sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
3420 OSAL_MEMSET(p_pending_events, 0,
3421 sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
3424 static struct ecore_vf_info *
3425 ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
3427 u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
3429 if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
3430 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3431 "Got indication for VF [abs 0x%08x] that cannot be"
3437 return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
3440 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
3442 struct regpair *vf_msg)
3444 struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
3448 return ECORE_SUCCESS;
3450 /* List the physical address of the request so that handler
3451 * could later on copy the message from it.
3453 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
3455 return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
3458 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
3459 struct malicious_vf_eqe_data *p_data)
3461 struct ecore_vf_info *p_vf;
3463 p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
3469 "VF [%d] - Malicious behavior [%02x]\n",
3470 p_vf->abs_vf_id, p_data->errId);
3472 p_vf->b_malicious = true;
3474 OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
3477 enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
3480 union event_ring_data *data)
3483 case COMMON_EVENT_VF_PF_CHANNEL:
3484 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
3485 &data->vf_pf_channel.msg_addr);
3486 case COMMON_EVENT_VF_FLR:
3487 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3488 "VF-FLR is still not supported\n");
3489 return ECORE_SUCCESS;
3490 case COMMON_EVENT_MALICIOUS_VF:
3491 ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
3492 return ECORE_SUCCESS;
3494 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
3500 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3502 return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3503 (1ULL << (rel_vf_id % 64)));
3506 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3508 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
3514 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
3515 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
3519 return E4_MAX_NUM_VFS;
3522 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
3523 struct ecore_ptt *ptt, int vfid)
3525 struct ecore_dmae_params params;
3526 struct ecore_vf_info *vf_info;
3528 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3532 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
3533 params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
3534 params.src_vfid = vf_info->abs_vf_id;
3536 if (ecore_dmae_host2host(p_hwfn, ptt,
3537 vf_info->vf_mbx.pending_req,
3538 vf_info->vf_mbx.req_phys,
3539 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
3540 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3541 "Failed to copy message from VF 0x%02x\n", vfid);
3546 return ECORE_SUCCESS;
3549 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
3552 struct ecore_vf_info *vf_info;
3555 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3557 DP_NOTICE(p_hwfn->p_dev, true,
3558 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3561 if (vf_info->b_malicious) {
3562 DP_NOTICE(p_hwfn->p_dev, false,
3563 "Can't set forced MAC to malicious VF [%d]\n",
3568 feature = 1 << MAC_ADDR_FORCED;
3569 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3571 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3572 /* Forced MAC will disable MAC_ADDR */
3573 vf_info->bulletin.p_virt->valid_bitmap &=
3574 ~(1 << VFPF_BULLETIN_MAC_ADDR);
3576 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3579 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
3582 struct ecore_vf_info *vf_info;
3585 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3587 DP_NOTICE(p_hwfn->p_dev, true,
3588 "Can not set MAC, invalid vfid [%d]\n", vfid);
3591 if (vf_info->b_malicious) {
3592 DP_NOTICE(p_hwfn->p_dev, false,
3593 "Can't set MAC to malicious VF [%d]\n",
3598 if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
3599 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3600 "Can not set MAC, Forced MAC is configured\n");
3604 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
3605 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3607 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3609 return ECORE_SUCCESS;
3612 enum _ecore_status_t
3613 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
3614 bool b_untagged_only, int vfid)
3616 struct ecore_vf_info *vf_info;
3619 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3621 DP_NOTICE(p_hwfn->p_dev, true,
3622 "Can not set untagged default, invalid vfid [%d]\n",
3626 if (vf_info->b_malicious) {
3627 DP_NOTICE(p_hwfn->p_dev, false,
3628 "Can't set untagged default to malicious VF [%d]\n",
3633 /* Since this is configurable only during vport-start, don't take it
3634 * if we're past that point.
3636 if (vf_info->state == VF_ENABLED) {
3637 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3638 "Can't support untagged change for vfid[%d] -"
3639 " VF is already active\n",
3644 /* Set configuration; This will later be taken into account during the
3645 * VF initialization.
3647 feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
3648 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
3649 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3651 vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
3654 return ECORE_SUCCESS;
3657 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
3660 struct ecore_vf_info *vf_info;
3662 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3666 *opaque_fid = vf_info->opaque_fid;
3669 void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
3672 struct ecore_vf_info *vf_info;
3674 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3678 *p_vort_id = vf_info->vport_id;
3681 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
3684 struct ecore_vf_info *vf_info;
3687 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3689 DP_NOTICE(p_hwfn->p_dev, true,
3690 "Can not set forced MAC, invalid vfid [%d]\n",
3694 if (vf_info->b_malicious) {
3695 DP_NOTICE(p_hwfn->p_dev, false,
3696 "Can't set forced vlan to malicious VF [%d]\n",
3701 feature = 1 << VLAN_ADDR_FORCED;
3702 vf_info->bulletin.p_virt->pvid = pvid;
3704 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3706 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
3708 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3711 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
3713 struct ecore_vf_info *p_vf_info;
3715 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3719 return !!p_vf_info->vport_instance;
3722 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
3724 struct ecore_vf_info *p_vf_info;
3726 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3730 return p_vf_info->state == VF_STOPPED;
3733 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
3735 struct ecore_vf_info *vf_info;
3737 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3741 return vf_info->spoof_chk;
3744 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
3747 struct ecore_vf_info *vf;
3748 enum _ecore_status_t rc = ECORE_INVAL;
3750 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
3751 DP_NOTICE(p_hwfn, true,
3752 "SR-IOV sanity check failed, can't set spoofchk\n");
3756 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3760 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
3761 /* After VF VPORT start PF will configure spoof check */
3762 vf->req_spoofchk_val = val;
3767 rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
3773 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
3775 u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
3777 max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
3778 : ECORE_MAX_VF_CHAINS_PER_PF;
3780 return max_chains_per_vf;
3783 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
3785 void **pp_req_virt_addr,
3786 u16 *p_req_virt_size)
3788 struct ecore_vf_info *vf_info =
3789 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3794 if (pp_req_virt_addr)
3795 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
3797 if (p_req_virt_size)
3798 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
3801 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
3803 void **pp_reply_virt_addr,
3804 u16 *p_reply_virt_size)
3806 struct ecore_vf_info *vf_info =
3807 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3812 if (pp_reply_virt_addr)
3813 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
3815 if (p_reply_virt_size)
3816 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
3819 #ifdef CONFIG_ECORE_SW_CHANNEL
3820 struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
3823 struct ecore_vf_info *vf_info =
3824 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3829 return &vf_info->vf_mbx.sw_mbx;
3833 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
3835 return (length >= sizeof(struct vfpf_first_tlv) &&
3836 (length <= sizeof(union vfpf_tlvs)));
3839 u32 ecore_iov_pfvf_msg_length(void)
3841 return sizeof(union pfvf_tlvs);
3844 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3846 struct ecore_vf_info *p_vf;
3848 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3849 if (!p_vf || !p_vf->bulletin.p_virt)
3852 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
3855 return p_vf->bulletin.p_virt->mac;
3858 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
3861 struct ecore_vf_info *p_vf;
3863 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3864 if (!p_vf || !p_vf->bulletin.p_virt)
3867 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
3870 return p_vf->bulletin.p_virt->pvid;
3873 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
3874 struct ecore_ptt *p_ptt,
3877 struct ecore_vf_info *vf;
3879 enum _ecore_status_t rc;
3881 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3886 rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
3887 if (rc != ECORE_SUCCESS)
3890 return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
3893 enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
3896 struct ecore_vf_info *vf;
3900 for_each_hwfn(p_dev, i) {
3901 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
3903 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
3904 DP_NOTICE(p_hwfn, true,
3905 "SR-IOV sanity check failed,"
3906 " can't set min rate\n");
3911 vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
3912 vport_id = vf->vport_id;
3914 return ecore_configure_vport_wfq(p_dev, vport_id, rate);
3917 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
3918 struct ecore_ptt *p_ptt,
3920 struct ecore_eth_stats *p_stats)
3922 struct ecore_vf_info *vf;
3924 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3928 if (vf->state != VF_ENABLED)
3931 __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
3932 vf->abs_vf_id + 0x10, false);
3934 return ECORE_SUCCESS;
3937 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3939 struct ecore_vf_info *p_vf;
3941 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3945 return p_vf->num_rxqs;
3948 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3950 struct ecore_vf_info *p_vf;
3952 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3956 return p_vf->num_active_rxqs;
3959 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3961 struct ecore_vf_info *p_vf;
3963 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3970 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3972 struct ecore_vf_info *p_vf;
3974 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3978 return p_vf->num_sbs;
3981 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3983 struct ecore_vf_info *p_vf;
3985 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3989 return (p_vf->state == VF_FREE);
3992 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
3995 struct ecore_vf_info *p_vf;
3997 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4001 return (p_vf->state == VF_ACQUIRED);
4004 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4006 struct ecore_vf_info *p_vf;
4008 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4012 return (p_vf->state == VF_ENABLED);
4015 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
4018 struct ecore_vf_info *p_vf;
4020 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4024 return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
4027 enum _ecore_status_t
4028 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
4030 struct ecore_wfq_data *vf_vp_wfq;
4031 struct ecore_vf_info *vf_info;
4033 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4037 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4039 if (vf_vp_wfq->configured)
4040 return vf_vp_wfq->min_speed;