2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
12 #include "ecore_sriov.h"
13 #include "ecore_status.h"
15 #include "ecore_hw_defs.h"
16 #include "ecore_int.h"
17 #include "ecore_hsi_eth.h"
19 #include "ecore_vfpf_if.h"
20 #include "ecore_rt_defs.h"
21 #include "ecore_init_ops.h"
22 #include "ecore_gtt_reg_addr.h"
23 #include "ecore_iro.h"
24 #include "ecore_mcp.h"
25 #include "ecore_cxt.h"
27 #include "ecore_init_fw_funcs.h"
28 #include "ecore_sp_commands.h"
30 const char *ecore_channel_tlvs_string[] = {
31 "CHANNEL_TLV_NONE", /* ends tlv sequence */
32 "CHANNEL_TLV_ACQUIRE",
33 "CHANNEL_TLV_VPORT_START",
34 "CHANNEL_TLV_VPORT_UPDATE",
35 "CHANNEL_TLV_VPORT_TEARDOWN",
36 "CHANNEL_TLV_START_RXQ",
37 "CHANNEL_TLV_START_TXQ",
38 "CHANNEL_TLV_STOP_RXQ",
39 "CHANNEL_TLV_STOP_TXQ",
40 "CHANNEL_TLV_UPDATE_RXQ",
41 "CHANNEL_TLV_INT_CLEANUP",
43 "CHANNEL_TLV_RELEASE",
44 "CHANNEL_TLV_LIST_END",
45 "CHANNEL_TLV_UCAST_FILTER",
46 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
47 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
48 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
49 "CHANNEL_TLV_VPORT_UPDATE_MCAST",
50 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
51 "CHANNEL_TLV_VPORT_UPDATE_RSS",
52 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
53 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
54 "CHANNEL_TLV_UPDATE_TUNN_PARAM",
59 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
60 struct ecore_vf_info *p_vf)
62 struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
63 struct ecore_spq_entry *p_ent = OSAL_NULL;
64 struct ecore_sp_init_data init_data;
65 enum _ecore_status_t rc = ECORE_NOTIMPL;
69 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
70 init_data.cid = ecore_spq_get_cid(p_hwfn);
71 init_data.opaque_fid = p_vf->opaque_fid;
72 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
74 rc = ecore_sp_init_request(p_hwfn, &p_ent,
75 COMMON_RAMROD_VF_START,
76 PROTOCOLID_COMMON, &init_data);
77 if (rc != ECORE_SUCCESS)
80 p_ramrod = &p_ent->ramrod.vf_start;
82 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
83 p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
85 switch (p_hwfn->hw_info.personality) {
87 p_ramrod->personality = PERSONALITY_ETH;
89 case ECORE_PCI_ETH_ROCE:
90 case ECORE_PCI_ETH_IWARP:
91 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
94 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
95 p_hwfn->hw_info.personality);
99 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
100 if (fp_minor > ETH_HSI_VER_MINOR &&
101 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
102 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
103 "VF [%d] - Requested fp hsi %02x.%02x which is"
104 " slightly newer than PF's %02x.%02x; Configuring"
107 ETH_HSI_VER_MAJOR, fp_minor,
108 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
109 fp_minor = ETH_HSI_VER_MINOR;
112 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
113 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
115 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
116 "VF[%d] - Starting using HSI %02x.%02x\n",
117 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
119 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
122 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
126 struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
127 struct ecore_spq_entry *p_ent = OSAL_NULL;
128 struct ecore_sp_init_data init_data;
129 enum _ecore_status_t rc = ECORE_NOTIMPL;
132 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
133 init_data.cid = ecore_spq_get_cid(p_hwfn);
134 init_data.opaque_fid = opaque_vfid;
135 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
137 rc = ecore_sp_init_request(p_hwfn, &p_ent,
138 COMMON_RAMROD_VF_STOP,
139 PROTOCOLID_COMMON, &init_data);
140 if (rc != ECORE_SUCCESS)
143 p_ramrod = &p_ent->ramrod.vf_stop;
145 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
147 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
150 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
151 bool b_enabled_only, bool b_non_malicious)
153 if (!p_hwfn->pf_iov_info) {
154 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
158 if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
162 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
166 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
173 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
177 struct ecore_vf_info *vf = OSAL_NULL;
179 if (!p_hwfn->pf_iov_info) {
180 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
184 if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
185 b_enabled_only, false))
186 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
188 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
194 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
195 struct ecore_vf_info *p_vf,
198 if (rx_qid >= p_vf->num_rxqs)
199 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
200 "VF[0x%02x] - can't touch Rx queue[%04x];"
201 " Only 0x%04x are allocated\n",
202 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
203 return rx_qid < p_vf->num_rxqs;
206 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
207 struct ecore_vf_info *p_vf,
210 if (tx_qid >= p_vf->num_txqs)
211 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
212 "VF[0x%02x] - can't touch Tx queue[%04x];"
213 " Only 0x%04x are allocated\n",
214 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
215 return tx_qid < p_vf->num_txqs;
218 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
219 struct ecore_vf_info *p_vf,
224 for (i = 0; i < p_vf->num_sbs; i++)
225 if (p_vf->igu_sbs[i] == sb_idx)
228 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
229 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
230 " one of its 0x%02x SBs\n",
231 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
236 static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
237 struct ecore_vf_info *p_vf)
241 for (i = 0; i < p_vf->num_rxqs; i++)
242 if (p_vf->vf_queues[i].p_rx_cid)
248 static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
249 struct ecore_vf_info *p_vf)
253 for (i = 0; i < p_vf->num_rxqs; i++)
254 if (p_vf->vf_queues[i].p_tx_cid)
260 /* TODO - this is linux crc32; Need a way to ifdef it out for linux */
261 u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
267 for (i = 0; i < 8; i++)
268 crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
273 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
275 struct ecore_ptt *p_ptt)
277 struct ecore_bulletin_content *p_bulletin;
278 int crc_size = sizeof(p_bulletin->crc);
279 struct ecore_dmae_params params;
280 struct ecore_vf_info *p_vf;
282 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
286 /* TODO - check VF is in a state where it can accept message */
287 if (!p_vf->vf_bulletin)
290 p_bulletin = p_vf->bulletin.p_virt;
292 /* Increment bulletin board version and compute crc */
293 p_bulletin->version++;
294 p_bulletin->crc = ecore_crc32(0, (u8 *)p_bulletin + crc_size,
295 p_vf->bulletin.size - crc_size);
297 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
298 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
299 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
301 /* propagate bulletin board via dmae to vm memory */
302 OSAL_MEMSET(¶ms, 0, sizeof(params));
303 params.flags = ECORE_DMAE_FLAG_VF_DST;
304 params.dst_vfid = p_vf->abs_vf_id;
305 return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
306 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
310 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
312 struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
315 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
316 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
318 OSAL_PCI_READ_CONFIG_WORD(p_dev,
319 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
320 OSAL_PCI_READ_CONFIG_WORD(p_dev,
321 pos + PCI_SRIOV_INITIAL_VF,
324 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
326 /* @@@TODO - in future we might want to add an OSAL here to
327 * allow each OS to decide on its own how to act.
329 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
330 "Number of VFs are already set to non-zero value."
331 " Ignoring PCI configuration value\n");
335 OSAL_PCI_READ_CONFIG_WORD(p_dev,
336 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
338 OSAL_PCI_READ_CONFIG_WORD(p_dev,
339 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
341 OSAL_PCI_READ_CONFIG_WORD(p_dev,
342 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
344 OSAL_PCI_READ_CONFIG_DWORD(p_dev,
345 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
347 OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
349 OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
351 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
352 "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
353 " stride %d, page size 0x%x\n",
354 iov->nres, iov->cap, iov->ctrl,
355 iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
356 iov->offset, iov->stride, iov->pgsz);
358 /* Some sanity checks */
359 if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
360 iov->total_vfs > NUM_OF_VFS(p_dev)) {
361 /* This can happen only due to a bug. In this case we set
362 * num_vfs to zero to avoid memory corruption in the code that
363 * assumes max number of vfs
365 DP_NOTICE(p_dev, false,
366 "IOV: Unexpected number of vfs set: %d"
367 " setting num_vf to zero\n",
374 return ECORE_SUCCESS;
377 static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
378 struct ecore_ptt *p_ptt)
380 struct ecore_igu_block *p_sb;
384 if (!p_hwfn->hw_info.p_igu_info) {
386 "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
391 sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {
392 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
393 if ((p_sb->status & ECORE_IGU_STATUS_FREE) &&
394 !(p_sb->status & ECORE_IGU_STATUS_PF)) {
395 val = ecore_rd(p_hwfn, p_ptt,
396 IGU_REG_MAPPING_MEMORY + sb_id * 4);
397 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
398 ecore_wr(p_hwfn, p_ptt,
399 IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
404 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
406 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
407 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
408 struct ecore_bulletin_content *p_bulletin_virt;
409 dma_addr_t req_p, rply_p, bulletin_p;
410 union pfvf_tlvs *p_reply_virt_addr;
411 union vfpf_tlvs *p_req_virt_addr;
414 OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
416 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
417 req_p = p_iov_info->mbx_msg_phys_addr;
418 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
419 rply_p = p_iov_info->mbx_reply_phys_addr;
420 p_bulletin_virt = p_iov_info->p_bulletins;
421 bulletin_p = p_iov_info->bulletins_phys;
422 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
424 "ecore_iov_setup_vfdb called without alloc mem first\n");
428 p_iov_info->base_vport_id = 1; /* @@@TBD resource allocation */
430 for (idx = 0; idx < p_iov->total_vfs; idx++) {
431 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
434 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
435 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
436 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
437 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
439 #ifdef CONFIG_ECORE_SW_CHANNEL
440 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
441 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
443 vf->state = VF_STOPPED;
446 vf->bulletin.phys = idx *
447 sizeof(struct ecore_bulletin_content) + bulletin_p;
448 vf->bulletin.p_virt = p_bulletin_virt + idx;
449 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
451 vf->relative_vf_id = idx;
452 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
453 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
454 vf->concrete_fid = concrete;
455 /* TODO - need to devise a better way of getting opaque */
456 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
457 (vf->abs_vf_id << 8);
458 /* @@TBD MichalK - add base vport_id of VFs to equation */
459 vf->vport_id = p_iov_info->base_vport_id + idx;
461 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
462 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
466 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
468 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
472 num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
474 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
475 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
477 /* Allocate PF Mailbox buffer (per-VF) */
478 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
479 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
480 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
481 &p_iov_info->mbx_msg_phys_addr,
482 p_iov_info->mbx_msg_size);
486 /* Allocate PF Mailbox Reply buffer (per-VF) */
487 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
488 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
489 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
490 &p_iov_info->mbx_reply_phys_addr,
491 p_iov_info->mbx_reply_size);
495 p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
497 p_v_addr = &p_iov_info->p_bulletins;
498 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
499 &p_iov_info->bulletins_phys,
500 p_iov_info->bulletins_size);
504 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
505 "PF's Requests mailbox [%p virt 0x%lx phys], "
506 "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
507 " [%p virt 0x%lx phys]\n",
508 p_iov_info->mbx_msg_virt_addr,
509 (unsigned long)p_iov_info->mbx_msg_phys_addr,
510 p_iov_info->mbx_reply_virt_addr,
511 (unsigned long)p_iov_info->mbx_reply_phys_addr,
512 p_iov_info->p_bulletins,
513 (unsigned long)p_iov_info->bulletins_phys);
515 return ECORE_SUCCESS;
518 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
520 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
522 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
523 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
524 p_iov_info->mbx_msg_virt_addr,
525 p_iov_info->mbx_msg_phys_addr,
526 p_iov_info->mbx_msg_size);
528 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
529 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
530 p_iov_info->mbx_reply_virt_addr,
531 p_iov_info->mbx_reply_phys_addr,
532 p_iov_info->mbx_reply_size);
534 if (p_iov_info->p_bulletins)
535 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
536 p_iov_info->p_bulletins,
537 p_iov_info->bulletins_phys,
538 p_iov_info->bulletins_size);
541 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
543 struct ecore_pf_iov *p_sriov;
545 if (!IS_PF_SRIOV(p_hwfn)) {
546 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
547 "No SR-IOV - no need for IOV db\n");
548 return ECORE_SUCCESS;
551 p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
553 DP_NOTICE(p_hwfn, true,
554 "Failed to allocate `struct ecore_sriov'\n");
558 p_hwfn->pf_iov_info = p_sriov;
560 return ecore_iov_allocate_vfdb(p_hwfn);
563 void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
565 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
568 ecore_iov_setup_vfdb(p_hwfn);
569 ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
572 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
574 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
575 ecore_iov_free_vfdb(p_hwfn);
576 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
580 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
582 OSAL_FREE(p_dev, p_dev->p_iov_info);
585 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
587 struct ecore_dev *p_dev = p_hwfn->p_dev;
589 enum _ecore_status_t rc;
591 if (IS_VF(p_hwfn->p_dev))
592 return ECORE_SUCCESS;
594 /* Learn the PCI configuration */
595 pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
596 PCI_EXT_CAP_ID_SRIOV);
598 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
599 return ECORE_SUCCESS;
602 /* Allocate a new struct for IOV information */
603 /* TODO - can change to VALLOC when its available */
604 p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
605 sizeof(*p_dev->p_iov_info));
606 if (!p_dev->p_iov_info) {
607 DP_NOTICE(p_hwfn, true,
608 "Can't support IOV due to lack of memory\n");
611 p_dev->p_iov_info->pos = pos;
613 rc = ecore_iov_pci_cfg_info(p_dev);
617 /* We want PF IOV to be synonemous with the existence of p_iov_info;
618 * In case the capability is published but there are no VFs, simply
619 * de-allocate the struct.
621 if (!p_dev->p_iov_info->total_vfs) {
622 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
623 "IOV capabilities, but no VFs are published\n");
624 OSAL_FREE(p_dev, p_dev->p_iov_info);
625 return ECORE_SUCCESS;
628 /* First VF index based on offset is tricky:
629 * - If ARI is supported [likely], offset - (16 - pf_id) would
630 * provide the number for eng0. 2nd engine Vfs would begin
631 * after the first engine's VFs.
632 * - If !ARI, VFs would start on next device.
633 * so offset - (256 - pf_id) would provide the number.
634 * Utilize the fact that (256 - pf_id) is achieved only be later
635 * to diffrentiate between the two.
638 if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
639 u32 first = p_hwfn->p_dev->p_iov_info->offset +
640 p_hwfn->abs_pf_id - 16;
642 p_dev->p_iov_info->first_vf_in_pf = first;
644 if (ECORE_PATH_ID(p_hwfn))
645 p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
647 u32 first = p_hwfn->p_dev->p_iov_info->offset +
648 p_hwfn->abs_pf_id - 256;
650 p_dev->p_iov_info->first_vf_in_pf = first;
653 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
654 "First VF in hwfn 0x%08x\n",
655 p_dev->p_iov_info->first_vf_in_pf);
657 return ECORE_SUCCESS;
660 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
661 bool b_fail_malicious)
663 /* Check PF supports sriov */
664 if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
665 !IS_PF_SRIOV_ALLOC(p_hwfn))
668 /* Check VF validity */
669 if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
675 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
677 return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
680 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
681 u16 rel_vf_id, u8 to_disable)
683 struct ecore_vf_info *vf;
686 for_each_hwfn(p_dev, i) {
687 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
689 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
693 vf->to_disable = to_disable;
697 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
702 if (!IS_ECORE_SRIOV(p_dev))
705 for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
706 ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
710 /* @@@TBD Consider taking outside of ecore... */
711 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
715 enum _ecore_status_t rc = ECORE_SUCCESS;
716 struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
718 if (vf != OSAL_NULL) {
720 #ifdef CONFIG_ECORE_SW_CHANNEL
721 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
724 rc = ECORE_UNKNOWN_ERROR;
730 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
731 struct ecore_ptt *p_ptt,
734 ecore_wr(p_hwfn, p_ptt,
735 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
736 1 << (abs_vfid & 0x1f));
739 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
740 struct ecore_ptt *p_ptt,
741 struct ecore_vf_info *vf)
745 /* Set VF masks and configuration - pretend */
746 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
748 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
751 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
753 /* iterate over all queues, clear sb consumer */
754 for (i = 0; i < vf->num_sbs; i++)
755 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
757 vf->opaque_fid, true);
760 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
761 struct ecore_ptt *p_ptt,
762 struct ecore_vf_info *vf, bool enable)
766 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
768 igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
771 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
773 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
775 ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
778 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
781 static enum _ecore_status_t
782 ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
783 struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
785 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
786 enum _ecore_status_t rc;
789 return ECORE_SUCCESS;
791 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
792 "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
793 ECORE_VF_ABS_ID(p_hwfn, vf));
795 ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
796 ECORE_VF_ABS_ID(p_hwfn, vf));
798 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
800 /* It's possible VF was previously considered malicious */
801 vf->b_malicious = false;
803 rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
804 vf->abs_vf_id, vf->num_sbs);
805 if (rc != ECORE_SUCCESS)
808 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
810 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
811 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
813 ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
814 p_hwfn->hw_info.hw_mode);
817 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
826 * @brief ecore_iov_config_perm_table - configure the permission
828 * In E4, queue zone permission table size is 320x9. There
829 * are 320 VF queues for single engine device (256 for dual
830 * engine device), and each entry has the following format:
837 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
838 struct ecore_ptt *p_ptt,
839 struct ecore_vf_info *vf, u8 enable)
845 for (qid = 0; qid < vf->num_rxqs; qid++) {
846 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
849 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
850 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
851 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
855 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
856 struct ecore_ptt *p_ptt,
857 struct ecore_vf_info *vf)
859 /* Reset vf in IGU - interrupts are still disabled */
860 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
862 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
864 /* Permission Table */
865 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
868 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
869 struct ecore_ptt *p_ptt,
870 struct ecore_vf_info *vf,
873 struct ecore_igu_block *igu_blocks;
874 int qid = 0, igu_id = 0;
877 igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
879 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
880 num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
882 p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
884 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
885 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
886 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
888 while ((qid < num_rx_queues) &&
889 (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {
890 if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {
891 struct cau_sb_entry sb_entry;
893 vf->igu_sbs[qid] = (u16)igu_id;
894 igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;
896 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
898 ecore_wr(p_hwfn, p_ptt,
899 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
902 /* Configure igu sb in CAU which were marked valid */
903 ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
906 ecore_dmae_host2grc(p_hwfn, p_ptt,
907 (u64)(osal_uintptr_t)&sb_entry,
908 CAU_REG_SB_VAR_MEMORY +
909 igu_id * sizeof(u64), 2, 0);
915 vf->num_sbs = (u8)num_rx_queues;
922 * @brief The function invalidates all the VF entries,
923 * technically this isn't required, but added for
924 * cleaness and ease of debugging incase a VF attempts to
925 * produce an interrupt after it has been taken down.
931 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
932 struct ecore_ptt *p_ptt,
933 struct ecore_vf_info *vf)
935 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
939 /* Invalidate igu CAM lines and mark them as free */
940 for (idx = 0; idx < vf->num_sbs; idx++) {
941 igu_id = vf->igu_sbs[idx];
942 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
944 val = ecore_rd(p_hwfn, p_ptt, addr);
945 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
946 ecore_wr(p_hwfn, p_ptt, addr, val);
948 p_info->igu_map.igu_blocks[igu_id].status |=
949 ECORE_IGU_STATUS_FREE;
951 p_hwfn->hw_info.p_igu_info->free_blks++;
957 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
959 struct ecore_mcp_link_params *params,
960 struct ecore_mcp_link_state *link,
961 struct ecore_mcp_link_capabilities *p_caps)
963 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
964 struct ecore_bulletin_content *p_bulletin;
969 p_bulletin = p_vf->bulletin.p_virt;
970 p_bulletin->req_autoneg = params->speed.autoneg;
971 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
972 p_bulletin->req_forced_speed = params->speed.forced_speed;
973 p_bulletin->req_autoneg_pause = params->pause.autoneg;
974 p_bulletin->req_forced_rx = params->pause.forced_rx;
975 p_bulletin->req_forced_tx = params->pause.forced_tx;
976 p_bulletin->req_loopback = params->loopback_mode;
978 p_bulletin->link_up = link->link_up;
979 p_bulletin->speed = link->speed;
980 p_bulletin->full_duplex = link->full_duplex;
981 p_bulletin->autoneg = link->an;
982 p_bulletin->autoneg_complete = link->an_complete;
983 p_bulletin->parallel_detection = link->parallel_detection;
984 p_bulletin->pfc_enabled = link->pfc_enabled;
985 p_bulletin->partner_adv_speed = link->partner_adv_speed;
986 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
987 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
988 p_bulletin->partner_adv_pause = link->partner_adv_pause;
989 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
991 p_bulletin->capability_speed = p_caps->speed_capabilities;
995 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
996 struct ecore_ptt *p_ptt,
997 struct ecore_iov_vf_init_params *p_params)
999 struct ecore_mcp_link_capabilities link_caps;
1000 struct ecore_mcp_link_params link_params;
1001 struct ecore_mcp_link_state link_state;
1002 u8 num_of_vf_available_chains = 0;
1003 struct ecore_vf_info *vf = OSAL_NULL;
1005 enum _ecore_status_t rc = ECORE_SUCCESS;
1009 vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1011 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
1012 return ECORE_UNKNOWN_ERROR;
1016 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
1017 p_params->rel_vf_id);
1021 /* Perform sanity checking on the requested queue_id */
1022 for (i = 0; i < p_params->num_queues; i++) {
1023 u16 min_vf_qzone = (u16)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
1024 u16 max_vf_qzone = min_vf_qzone +
1025 FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) - 1;
1027 qid = p_params->req_rx_queue[i];
1028 if (qid < min_vf_qzone || qid > max_vf_qzone) {
1029 DP_NOTICE(p_hwfn, true,
1030 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
1031 qid, p_params->rel_vf_id,
1032 min_vf_qzone, max_vf_qzone);
1036 qid = p_params->req_tx_queue[i];
1037 if (qid > max_vf_qzone) {
1038 DP_NOTICE(p_hwfn, true,
1039 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
1040 qid, p_params->rel_vf_id, max_vf_qzone);
1044 /* If client *really* wants, Tx qid can be shared with PF */
1045 if (qid < min_vf_qzone)
1046 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1047 "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1048 p_params->rel_vf_id, qid, i);
1051 /* Limit number of queues according to number of CIDs */
1052 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1053 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1054 "VF[%d] - requesting to initialize for 0x%04x queues"
1055 " [0x%04x CIDs available]\n",
1056 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1057 num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
1059 num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
1063 if (num_of_vf_available_chains == 0) {
1064 DP_ERR(p_hwfn, "no available igu sbs\n");
1068 /* Choose queue number and index ranges */
1069 vf->num_rxqs = num_of_vf_available_chains;
1070 vf->num_txqs = num_of_vf_available_chains;
1072 for (i = 0; i < vf->num_rxqs; i++) {
1073 struct ecore_vf_q_info *p_queue = &vf->vf_queues[i];
1075 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1076 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1078 /* CIDs are per-VF, so no problem having them 0-based. */
1079 p_queue->fw_cid = i;
1081 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1082 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x] CID %04x\n",
1083 vf->relative_vf_id, i, vf->igu_sbs[i],
1084 p_queue->fw_rx_qid, p_queue->fw_tx_qid,
1088 /* Update the link configuration in bulletin.
1090 OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
1091 sizeof(link_params));
1092 OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
1093 sizeof(link_state));
1094 OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
1096 ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
1097 &link_params, &link_state, &link_caps);
1099 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1101 if (rc == ECORE_SUCCESS) {
1103 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
1104 (1ULL << (vf->relative_vf_id % 64));
1106 if (IS_LEAD_HWFN(p_hwfn))
1107 p_hwfn->p_dev->p_iov_info->num_vfs++;
1113 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1114 struct ecore_ptt *p_ptt,
1117 struct ecore_mcp_link_capabilities caps;
1118 struct ecore_mcp_link_params params;
1119 struct ecore_mcp_link_state link;
1120 struct ecore_vf_info *vf = OSAL_NULL;
1122 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1124 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1125 return ECORE_UNKNOWN_ERROR;
1128 if (vf->bulletin.p_virt)
1129 OSAL_MEMSET(vf->bulletin.p_virt, 0,
1130 sizeof(*vf->bulletin.p_virt));
1132 OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1134 /* Get the link configuration back in bulletin so
1135 * that when VFs are re-enabled they get the actual
1136 * link configuration.
1138 OSAL_MEMCPY(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
1139 OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
1140 OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
1142 ecore_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1144 /* Forget the VF's acquisition message */
1145 OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
1147 /* disablng interrupts and resetting permission table was done during
1148 * vf-close, however, we could get here without going through vf_close
1150 /* Disable Interrupts for VF */
1151 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1153 /* Reset Permission table */
1154 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1158 ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1162 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
1163 ~(1ULL << (vf->relative_vf_id / 64));
1165 if (IS_LEAD_HWFN(p_hwfn))
1166 p_hwfn->p_dev->p_iov_info->num_vfs--;
1169 return ECORE_SUCCESS;
1172 static bool ecore_iov_tlv_supported(u16 tlvtype)
1174 return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
1177 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1178 struct ecore_vf_info *vf, u16 tlv)
1180 /* lock the channel */
1181 /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1183 /* record the locking op */
1184 /* vf->op_current = tlv; @@@TBD MichalK */
1187 if (ecore_iov_tlv_supported(tlv))
1190 "VF[%d]: vf pf channel locked by %s\n",
1192 ecore_channel_tlvs_string[tlv]);
1196 "VF[%d]: vf pf channel locked by %04x\n",
1197 vf->abs_vf_id, tlv);
1200 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1201 struct ecore_vf_info *vf,
1204 /* log the unlock */
1205 if (ecore_iov_tlv_supported(expected_tlv))
1208 "VF[%d]: vf pf channel unlocked by %s\n",
1210 ecore_channel_tlvs_string[expected_tlv]);
1214 "VF[%d]: vf pf channel unlocked by %04x\n",
1215 vf->abs_vf_id, expected_tlv);
1217 /* record the locking op */
1218 /* vf->op_current = CHANNEL_TLV_NONE; */
1221 /* place a given tlv on the tlv buffer, continuing current tlv list */
1222 void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
1223 u8 **offset, u16 type, u16 length)
1225 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1228 tl->length = length;
1230 /* Offset should keep pointing to next TLV (the end of the last) */
1233 /* Return a pointer to the start of the added tlv */
1234 return *offset - length;
1237 /* list the types and lengths of the tlvs on the buffer */
1238 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
1240 u16 i = 1, total_length = 0;
1241 struct channel_tlv *tlv;
1244 /* cast current tlv list entry to channel tlv header */
1245 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1248 if (ecore_iov_tlv_supported(tlv->type))
1249 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1250 "TLV number %d: type %s, length %d\n",
1251 i, ecore_channel_tlvs_string[tlv->type],
1254 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1255 "TLV number %d: type %d, length %d\n",
1256 i, tlv->type, tlv->length);
1258 if (tlv->type == CHANNEL_TLV_LIST_END)
1261 /* Validate entry - protect against malicious VFs */
1263 DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
1266 total_length += tlv->length;
1267 if (total_length >= sizeof(struct tlv_buffer_size)) {
1268 DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
1276 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
1277 struct ecore_ptt *p_ptt,
1278 struct ecore_vf_info *p_vf,
1279 u16 length, u8 status)
1281 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1282 struct ecore_dmae_params params;
1285 mbx->reply_virt->default_resp.hdr.status = status;
1287 ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1289 #ifdef CONFIG_ECORE_SW_CHANNEL
1290 mbx->sw_mbx.response_size =
1291 length + sizeof(struct channel_list_end_tlv);
1293 if (!p_hwfn->p_dev->b_hw_channel)
1297 eng_vf_id = p_vf->abs_vf_id;
1299 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
1300 params.flags = ECORE_DMAE_FLAG_VF_DST;
1301 params.dst_vfid = eng_vf_id;
1303 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1304 mbx->req_virt->first_tlv.reply_address +
1306 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1309 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1310 mbx->req_virt->first_tlv.reply_address,
1311 sizeof(u64) / 4, ¶ms);
1314 GTT_BAR0_MAP_REG_USDM_RAM +
1315 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1318 static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
1319 enum ecore_iov_vport_update_flag flag)
1322 case ECORE_IOV_VP_UPDATE_ACTIVATE:
1323 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1324 case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1325 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1326 case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1327 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1328 case ECORE_IOV_VP_UPDATE_MCAST:
1329 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1330 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1331 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1332 case ECORE_IOV_VP_UPDATE_RSS:
1333 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1334 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1335 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1336 case ECORE_IOV_VP_UPDATE_SGE_TPA:
1337 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1343 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1344 struct ecore_vf_info *p_vf,
1345 struct ecore_iov_vf_mbx *p_mbx,
1346 u8 status, u16 tlvs_mask,
1349 struct pfvf_def_resp_tlv *resp;
1350 u16 size, total_len, i;
1352 OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1353 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1354 size = sizeof(struct pfvf_def_resp_tlv);
1357 ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1359 /* Prepare response for all extended tlvs if they are found by PF */
1360 for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1361 if (!(tlvs_mask & (1 << i)))
1364 resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
1365 ecore_iov_vport_to_tlv(p_hwfn, i), size);
1367 if (tlvs_accepted & (1 << i))
1368 resp->hdr.status = status;
1370 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1372 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1373 "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1374 p_vf->relative_vf_id,
1375 ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1380 ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1381 sizeof(struct channel_list_end_tlv));
1386 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1387 struct ecore_ptt *p_ptt,
1388 struct ecore_vf_info *vf_info,
1389 u16 type, u16 length, u8 status)
1391 struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1393 mbx->offset = (u8 *)mbx->reply_virt;
1395 ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
1396 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1397 sizeof(struct channel_list_end_tlv));
1399 ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1401 OSAL_IOV_PF_RESP_TYPE(p_hwfn, vf_info->relative_vf_id, status);
1404 struct ecore_public_vf_info
1405 *ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
1407 bool b_enabled_only)
1409 struct ecore_vf_info *vf = OSAL_NULL;
1411 vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1415 return &vf->p_vf_info;
1418 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1419 struct ecore_vf_info *p_vf)
1422 p_vf->vf_bulletin = 0;
1423 p_vf->vport_instance = 0;
1424 p_vf->configured_features = 0;
1426 /* If VF previously requested less resources, go back to default */
1427 p_vf->num_rxqs = p_vf->num_sbs;
1428 p_vf->num_txqs = p_vf->num_sbs;
1430 p_vf->num_active_rxqs = 0;
1432 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1433 struct ecore_vf_q_info *p_queue = &p_vf->vf_queues[i];
1435 if (p_queue->p_rx_cid) {
1436 ecore_eth_queue_cid_release(p_hwfn,
1438 p_queue->p_rx_cid = OSAL_NULL;
1441 if (p_queue->p_tx_cid) {
1442 ecore_eth_queue_cid_release(p_hwfn,
1444 p_queue->p_tx_cid = OSAL_NULL;
1448 OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1449 OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1450 OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1453 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
1454 struct ecore_ptt *p_ptt,
1455 struct ecore_vf_info *p_vf,
1456 struct vf_pf_resc_request *p_req,
1457 struct pf_vf_resc *p_resp)
1461 /* Queue related information */
1462 p_resp->num_rxqs = p_vf->num_rxqs;
1463 p_resp->num_txqs = p_vf->num_txqs;
1464 p_resp->num_sbs = p_vf->num_sbs;
1466 for (i = 0; i < p_resp->num_sbs; i++) {
1467 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1468 /* TODO - what's this sb_qid field? Is it deprecated?
1469 * or is there an ecore_client that looks at this?
1471 p_resp->hw_sbs[i].sb_qid = 0;
1474 /* These fields are filled for backward compatibility.
1475 * Unused by modern vfs.
1477 for (i = 0; i < p_resp->num_rxqs; i++) {
1478 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1479 (u16 *)&p_resp->hw_qid[i]);
1480 p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
1483 /* Filter related information */
1484 p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
1485 p_req->num_mac_filters);
1486 p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
1487 p_req->num_vlan_filters);
1489 /* This isn't really needed/enforced, but some legacy VFs might depend
1490 * on the correct filling of this field.
1492 p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
1494 /* Validate sufficient resources for VF */
1495 if (p_resp->num_rxqs < p_req->num_rxqs ||
1496 p_resp->num_txqs < p_req->num_txqs ||
1497 p_resp->num_sbs < p_req->num_sbs ||
1498 p_resp->num_mac_filters < p_req->num_mac_filters ||
1499 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1500 p_resp->num_mc_filters < p_req->num_mc_filters) {
1501 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1502 "VF[%d] - Insufficient resources: rxq [%02x/%02x]"
1503 " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
1504 " vlan [%02x/%02x] mc [%02x/%02x]\n",
1506 p_req->num_rxqs, p_resp->num_rxqs,
1507 p_req->num_rxqs, p_resp->num_txqs,
1508 p_req->num_sbs, p_resp->num_sbs,
1509 p_req->num_mac_filters, p_resp->num_mac_filters,
1510 p_req->num_vlan_filters, p_resp->num_vlan_filters,
1511 p_req->num_mc_filters, p_resp->num_mc_filters);
1513 /* Some legacy OSes are incapable of correctly handling this
1516 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1517 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1518 (p_vf->acquire.vfdev_info.os_type ==
1519 VFPF_ACQUIRE_OS_WINDOWS))
1520 return PFVF_STATUS_SUCCESS;
1522 return PFVF_STATUS_NO_RESOURCE;
1525 return PFVF_STATUS_SUCCESS;
1528 static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn *p_hwfn,
1529 struct pfvf_stats_info *p_stats)
1531 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1532 OFFSETOF(struct mstorm_vf_zone,
1533 non_trigger.eth_queue_stat);
1534 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1535 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1536 OFFSETOF(struct ustorm_vf_zone,
1537 non_trigger.eth_queue_stat);
1538 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1539 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1540 OFFSETOF(struct pstorm_vf_zone,
1541 non_trigger.eth_queue_stat);
1542 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1543 p_stats->tstats.address = 0;
1544 p_stats->tstats.len = 0;
1547 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
1548 struct ecore_ptt *p_ptt,
1549 struct ecore_vf_info *vf)
1551 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1552 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1553 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1554 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1555 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1556 struct pf_vf_resc *resc = &resp->resc;
1557 enum _ecore_status_t rc;
1559 OSAL_MEMSET(resp, 0, sizeof(*resp));
1561 /* Write the PF version so that VF would know which version
1562 * is supported - might be later overridden. This guarantees that
1563 * VF could recognize legacy PF based on lack of versions in reply.
1565 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1566 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1568 /* TODO - not doing anything is bad since we'll assert, but this isn't
1569 * necessarily the right behavior - perhaps we should have allowed some
1572 if (vf->state != VF_FREE &&
1573 vf->state != VF_STOPPED) {
1574 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1575 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1576 vf->abs_vf_id, vf->state);
1580 /* Validate FW compatibility */
1581 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1582 if (req->vfdev_info.capabilities &
1583 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1584 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1586 /* This legacy support would need to be removed once
1587 * the major has changed.
1589 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
1591 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1592 "VF[%d] is pre-fastpath HSI\n",
1594 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1595 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1598 "VF[%d] needs fastpath HSI %02x.%02x, which is"
1599 " incompatible with loaded FW's faspath"
1602 req->vfdev_info.eth_fp_hsi_major,
1603 req->vfdev_info.eth_fp_hsi_minor,
1604 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1610 /* On 100g PFs, prevent old VFs from loading */
1611 if ((p_hwfn->p_dev->num_hwfns > 1) &&
1612 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1614 "VF[%d] is running an old driver that doesn't support"
1620 #ifndef __EXTRACT__LINUX__
1621 if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1622 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1627 /* Store the acquire message */
1628 OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
1630 vf->opaque_fid = req->vfdev_info.opaque_fid;
1632 vf->vf_bulletin = req->bulletin_addr;
1633 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1634 vf->bulletin.size : req->bulletin_size;
1636 /* fill in pfdev info */
1637 pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1638 pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
1639 pfdev_info->indices_per_sb = PIS_PER_SB;
1641 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1642 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1643 if (p_hwfn->p_dev->num_hwfns > 1)
1644 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1646 ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1648 OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1651 pfdev_info->fw_major = FW_MAJOR_VERSION;
1652 pfdev_info->fw_minor = FW_MINOR_VERSION;
1653 pfdev_info->fw_rev = FW_REVISION_VERSION;
1654 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1656 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1659 pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
1660 req->vfdev_info.eth_fp_hsi_minor);
1661 pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1662 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
1665 pfdev_info->dev_type = p_hwfn->p_dev->type;
1666 pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1668 /* Fill resources available to VF; Make sure there are enough to
1669 * satisfy the VF's request.
1671 vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1672 &req->resc_request, resc);
1673 if (vfpf_status != PFVF_STATUS_SUCCESS)
1676 /* Start the VF in FW */
1677 rc = ecore_sp_vf_start(p_hwfn, vf);
1678 if (rc != ECORE_SUCCESS) {
1679 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
1681 vfpf_status = PFVF_STATUS_FAILURE;
1685 /* Fill agreed size of bulletin board in response, and post
1686 * an initial image to the bulletin board.
1688 resp->bulletin_size = vf->bulletin.size;
1689 ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1691 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1692 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1693 " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1694 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1696 vf->abs_vf_id, resp->pfdev_info.chip_num,
1697 resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1698 (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
1699 resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1700 resc->num_vlan_filters);
1702 vf->state = VF_ACQUIRED;
1705 /* Prepare Response */
1706 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1707 sizeof(struct pfvf_acquire_resp_tlv),
1711 static enum _ecore_status_t
1712 __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1713 struct ecore_vf_info *p_vf, bool val)
1715 struct ecore_sp_vport_update_params params;
1716 enum _ecore_status_t rc;
1718 if (val == p_vf->spoof_chk) {
1719 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1720 "Spoofchk value[%d] is already configured\n", val);
1721 return ECORE_SUCCESS;
1724 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1725 params.opaque_fid = p_vf->opaque_fid;
1726 params.vport_id = p_vf->vport_id;
1727 params.update_anti_spoofing_en_flg = 1;
1728 params.anti_spoofing_en = val;
1730 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
1732 if (rc == ECORE_SUCCESS) {
1733 p_vf->spoof_chk = val;
1734 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1735 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1736 "Spoofchk val[%d] configured\n", val);
1738 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1739 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1740 val, p_vf->relative_vf_id);
1746 static enum _ecore_status_t
1747 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1748 struct ecore_vf_info *p_vf)
1750 struct ecore_filter_ucast filter;
1751 enum _ecore_status_t rc = ECORE_SUCCESS;
1754 OSAL_MEMSET(&filter, 0, sizeof(filter));
1755 filter.is_rx_filter = 1;
1756 filter.is_tx_filter = 1;
1757 filter.vport_to_add_to = p_vf->vport_id;
1758 filter.opcode = ECORE_FILTER_ADD;
1760 /* Reconfigure vlans */
1761 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1762 if (!p_vf->shadow_config.vlans[i].used)
1765 filter.type = ECORE_FILTER_VLAN;
1766 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1767 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1768 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1769 filter.vlan, p_vf->relative_vf_id);
1770 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1771 &filter, ECORE_SPQ_MODE_CB,
1774 DP_NOTICE(p_hwfn, true,
1775 "Failed to configure VLAN [%04x]"
1777 filter.vlan, p_vf->relative_vf_id);
1785 static enum _ecore_status_t
1786 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
1787 struct ecore_vf_info *p_vf, u64 events)
1789 enum _ecore_status_t rc = ECORE_SUCCESS;
1791 /*TODO - what about MACs? */
1793 if ((events & (1 << VLAN_ADDR_FORCED)) &&
1794 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1795 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1800 static enum _ecore_status_t
1801 ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
1802 struct ecore_vf_info *p_vf,
1805 enum _ecore_status_t rc = ECORE_SUCCESS;
1806 struct ecore_filter_ucast filter;
1808 if (!p_vf->vport_instance)
1811 if (events & (1 << MAC_ADDR_FORCED)) {
1812 /* Since there's no way [currently] of removing the MAC,
1813 * we can always assume this means we need to force it.
1815 OSAL_MEMSET(&filter, 0, sizeof(filter));
1816 filter.type = ECORE_FILTER_MAC;
1817 filter.opcode = ECORE_FILTER_REPLACE;
1818 filter.is_rx_filter = 1;
1819 filter.is_tx_filter = 1;
1820 filter.vport_to_add_to = p_vf->vport_id;
1821 OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
1823 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1825 ECORE_SPQ_MODE_CB, OSAL_NULL);
1827 DP_NOTICE(p_hwfn, true,
1828 "PF failed to configure MAC for VF\n");
1832 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1835 if (events & (1 << VLAN_ADDR_FORCED)) {
1836 struct ecore_sp_vport_update_params vport_update;
1840 OSAL_MEMSET(&filter, 0, sizeof(filter));
1841 filter.type = ECORE_FILTER_VLAN;
1842 filter.is_rx_filter = 1;
1843 filter.is_tx_filter = 1;
1844 filter.vport_to_add_to = p_vf->vport_id;
1845 filter.vlan = p_vf->bulletin.p_virt->pvid;
1846 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
1849 /* Send the ramrod */
1850 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1852 ECORE_SPQ_MODE_CB, OSAL_NULL);
1854 DP_NOTICE(p_hwfn, true,
1855 "PF failed to configure VLAN for VF\n");
1859 /* Update the default-vlan & silent vlan stripping */
1860 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
1861 vport_update.opaque_fid = p_vf->opaque_fid;
1862 vport_update.vport_id = p_vf->vport_id;
1863 vport_update.update_default_vlan_enable_flg = 1;
1864 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1865 vport_update.update_default_vlan_flg = 1;
1866 vport_update.default_vlan = filter.vlan;
1868 vport_update.update_inner_vlan_removal_flg = 1;
1869 removal = filter.vlan ?
1870 1 : p_vf->shadow_config.inner_vlan_removal;
1871 vport_update.inner_vlan_removal_flg = removal;
1872 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1873 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
1874 ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
1876 DP_NOTICE(p_hwfn, true,
1877 "PF failed to configure VF vport for vlan\n");
1881 /* Update all the Rx queues */
1882 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1883 struct ecore_queue_cid *p_cid;
1885 p_cid = p_vf->vf_queues[i].p_rx_cid;
1886 if (p_cid == OSAL_NULL)
1889 rc = ecore_sp_eth_rx_queues_update(p_hwfn,
1892 ECORE_SPQ_MODE_EBLOCK,
1895 DP_NOTICE(p_hwfn, true,
1896 "Failed to send Rx update"
1897 " fo queue[0x%04x]\n",
1898 p_cid->rel.queue_id);
1904 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1906 p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
1909 /* If forced features are terminated, we need to configure the shadow
1910 * configuration back again.
1913 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1918 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
1919 struct ecore_ptt *p_ptt,
1920 struct ecore_vf_info *vf)
1922 struct ecore_sp_vport_start_params params = { 0 };
1923 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1924 struct vfpf_vport_start_tlv *start;
1925 u8 status = PFVF_STATUS_SUCCESS;
1926 struct ecore_vf_info *vf_info;
1929 enum _ecore_status_t rc;
1931 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
1933 DP_NOTICE(p_hwfn->p_dev, true,
1934 "Failed to get VF info, invalid vfid [%d]\n",
1935 vf->relative_vf_id);
1939 vf->state = VF_ENABLED;
1940 start = &mbx->req_virt->start_vport;
1942 /* Initialize Status block in CAU */
1943 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1944 if (!start->sb_addr[sb_id]) {
1945 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1946 "VF[%d] did not fill the address of SB %d\n",
1947 vf->relative_vf_id, sb_id);
1951 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
1952 start->sb_addr[sb_id],
1956 ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1958 vf->mtu = start->mtu;
1959 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1961 /* Take into consideration configuration forced by hypervisor;
1962 * If none is configured, use the supplied VF values [for old
1963 * vfs that would still be fine, since they passed '0' as padding].
1965 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
1966 if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
1967 u8 vf_req = start->only_untagged;
1969 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1970 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1973 params.tpa_mode = start->tpa_mode;
1974 params.remove_inner_vlan = start->inner_vlan_removal;
1975 params.tx_switching = true;
1978 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1979 DP_NOTICE(p_hwfn, false,
1980 "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
1981 params.tx_switching = false;
1985 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
1986 params.drop_ttl0 = false;
1987 params.concrete_fid = vf->concrete_fid;
1988 params.opaque_fid = vf->opaque_fid;
1989 params.vport_id = vf->vport_id;
1990 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1991 params.mtu = vf->mtu;
1992 params.check_mac = true;
1994 rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms);
1995 if (rc != ECORE_SUCCESS) {
1997 "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
1998 status = PFVF_STATUS_FAILURE;
2000 vf->vport_instance++;
2002 /* Force configuration if needed on the newly opened vport */
2003 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
2004 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
2005 vf->vport_id, vf->opaque_fid);
2006 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
2009 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
2010 sizeof(struct pfvf_def_resp_tlv), status);
2013 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
2014 struct ecore_ptt *p_ptt,
2015 struct ecore_vf_info *vf)
2017 u8 status = PFVF_STATUS_SUCCESS;
2018 enum _ecore_status_t rc;
2020 vf->vport_instance--;
2021 vf->spoof_chk = false;
2023 if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) ||
2024 (ecore_iov_validate_active_txq(p_hwfn, vf))) {
2025 vf->b_malicious = true;
2026 DP_NOTICE(p_hwfn, false,
2027 "VF [%02x] - considered malicious;"
2028 " Unable to stop RX/TX queuess\n",
2032 rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2033 if (rc != ECORE_SUCCESS) {
2035 "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
2036 status = PFVF_STATUS_FAILURE;
2039 /* Forget the configuration on the vport */
2040 vf->configured_features = 0;
2041 OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2043 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2044 sizeof(struct pfvf_def_resp_tlv), status);
2047 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
2048 struct ecore_ptt *p_ptt,
2049 struct ecore_vf_info *vf,
2050 u8 status, bool b_legacy)
2052 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2053 struct pfvf_start_queue_resp_tlv *p_tlv;
2054 struct vfpf_start_rxq_tlv *req;
2057 mbx->offset = (u8 *)mbx->reply_virt;
2059 /* Taking a bigger struct instead of adding a TLV to list was a
2060 * mistake, but one which we're now stuck with, as some older
2061 * clients assume the size of the previous response.
2064 length = sizeof(*p_tlv);
2066 length = sizeof(struct pfvf_def_resp_tlv);
2068 p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
2070 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2071 sizeof(struct channel_list_end_tlv));
2073 /* Update the TLV with the response */
2074 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2075 req = &mbx->req_virt->start_rxq;
2076 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2077 OFFSETOF(struct mstorm_vf_zone,
2078 non_trigger.eth_rx_queue_producers) +
2079 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2082 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2085 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
2086 struct ecore_ptt *p_ptt,
2087 struct ecore_vf_info *vf)
2089 struct ecore_queue_start_common_params params;
2090 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2091 u8 status = PFVF_STATUS_NO_RESOURCE;
2092 struct ecore_vf_q_info *p_queue;
2093 struct vfpf_start_rxq_tlv *req;
2094 bool b_legacy_vf = false;
2095 enum _ecore_status_t rc;
2097 req = &mbx->req_virt->start_rxq;
2099 if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
2100 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2103 /* Acquire a new queue-cid */
2104 p_queue = &vf->vf_queues[req->rx_qid];
2106 OSAL_MEMSET(¶ms, 0, sizeof(params));
2107 params.queue_id = (u8)p_queue->fw_rx_qid;
2108 params.vport_id = vf->vport_id;
2109 params.stats_id = vf->abs_vf_id + 0x10;
2110 params.sb = req->hw_sb;
2111 params.sb_idx = req->sb_index;
2113 p_queue->p_rx_cid = _ecore_eth_queue_to_cid(p_hwfn,
2118 if (p_queue->p_rx_cid == OSAL_NULL)
2121 /* Legacy VFs have their Producers in a different location, which they
2122 * calculate on their own and clean the producer prior to this.
2124 if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2125 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2129 GTT_BAR0_MAP_REG_MSDM_RAM +
2130 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2132 p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
2135 rc = ecore_eth_rxq_start_ramrod(p_hwfn,
2141 if (rc != ECORE_SUCCESS) {
2142 status = PFVF_STATUS_FAILURE;
2143 ecore_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
2144 p_queue->p_rx_cid = OSAL_NULL;
2146 status = PFVF_STATUS_SUCCESS;
2147 vf->num_active_rxqs++;
2151 ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2156 ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2157 struct ecore_tunnel_info *p_tun,
2158 u16 tunn_feature_mask)
2160 p_resp->tunn_feature_mask = tunn_feature_mask;
2161 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2162 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2163 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2164 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2165 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2166 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2167 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2168 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2169 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2170 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2171 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2172 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2176 __ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2177 struct ecore_tunn_update_type *p_tun,
2178 enum ecore_tunn_mode mask, u8 tun_cls)
2180 if (p_req->tun_mode_update_mask & (1 << mask)) {
2181 p_tun->b_update_mode = true;
2183 if (p_req->tunn_mode & (1 << mask))
2184 p_tun->b_mode_enabled = true;
2187 p_tun->tun_cls = tun_cls;
2191 ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2192 struct ecore_tunn_update_type *p_tun,
2193 struct ecore_tunn_update_udp_port *p_port,
2194 enum ecore_tunn_mode mask,
2195 u8 tun_cls, u8 update_port, u16 port)
2198 p_port->b_update_port = true;
2199 p_port->port = port;
2202 __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2206 ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2208 bool b_update_requested = false;
2210 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2211 p_req->update_geneve_port || p_req->update_vxlan_port)
2212 b_update_requested = true;
2214 return b_update_requested;
2217 static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
2218 struct ecore_ptt *p_ptt,
2219 struct ecore_vf_info *p_vf)
2221 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
2222 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2223 struct pfvf_update_tunn_param_tlv *p_resp;
2224 struct vfpf_update_tunn_param_tlv *p_req;
2225 enum _ecore_status_t rc = ECORE_SUCCESS;
2226 u8 status = PFVF_STATUS_SUCCESS;
2227 bool b_update_required = false;
2228 struct ecore_tunnel_info tunn;
2229 u16 tunn_feature_mask = 0;
2231 mbx->offset = (u8 *)mbx->reply_virt;
2233 OSAL_MEM_ZERO(&tunn, sizeof(tunn));
2234 p_req = &mbx->req_virt->tunn_param_update;
2236 if (!ecore_iov_pf_validate_tunn_param(p_req)) {
2237 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2238 "No tunnel update requested by VF\n");
2239 status = PFVF_STATUS_FAILURE;
2243 tunn.b_update_rx_cls = p_req->update_tun_cls;
2244 tunn.b_update_tx_cls = p_req->update_tun_cls;
2246 ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2247 ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2248 p_req->update_vxlan_port,
2250 ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2251 ECORE_MODE_L2GENEVE_TUNN,
2252 p_req->l2geneve_clss,
2253 p_req->update_geneve_port,
2254 p_req->geneve_port);
2255 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2256 ECORE_MODE_IPGENEVE_TUNN,
2257 p_req->ipgeneve_clss);
2258 __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2259 ECORE_MODE_L2GRE_TUNN,
2261 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2262 ECORE_MODE_IPGRE_TUNN,
2265 /* If PF modifies VF's req then it should
2266 * still return an error in case of partial configuration
2267 * or modified configuration as opposed to requested one.
2269 rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
2270 &b_update_required, &tunn);
2272 if (rc != ECORE_SUCCESS)
2273 status = PFVF_STATUS_FAILURE;
2275 /* If ECORE client is willing to update anything ? */
2276 if (b_update_required) {
2277 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
2278 ECORE_SPQ_MODE_EBLOCK,
2280 if (rc != ECORE_SUCCESS)
2281 status = PFVF_STATUS_FAILURE;
2285 p_resp = ecore_add_tlv(p_hwfn, &mbx->offset,
2286 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2288 ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2289 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2290 sizeof(struct channel_list_end_tlv));
2292 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2295 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
2296 struct ecore_ptt *p_ptt,
2297 struct ecore_vf_info *p_vf,
2300 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2301 struct pfvf_start_queue_resp_tlv *p_tlv;
2302 bool b_legacy = false;
2305 mbx->offset = (u8 *)mbx->reply_virt;
2307 /* Taking a bigger struct instead of adding a TLV to list was a
2308 * mistake, but one which we're now stuck with, as some older
2309 * clients assume the size of the previous response.
2311 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2312 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2316 length = sizeof(*p_tlv);
2318 length = sizeof(struct pfvf_def_resp_tlv);
2320 p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
2322 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2323 sizeof(struct channel_list_end_tlv));
2325 /* Update the TLV with the response */
2326 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2327 u16 qid = mbx->req_virt->start_txq.tx_qid;
2329 p_tlv->offset = DB_ADDR_VF(p_vf->vf_queues[qid].fw_cid,
2333 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2336 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
2337 struct ecore_ptt *p_ptt,
2338 struct ecore_vf_info *vf)
2340 struct ecore_queue_start_common_params params;
2341 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2342 u8 status = PFVF_STATUS_NO_RESOURCE;
2343 struct ecore_vf_q_info *p_queue;
2344 struct vfpf_start_txq_tlv *req;
2345 enum _ecore_status_t rc;
2348 OSAL_MEMSET(¶ms, 0, sizeof(params));
2349 req = &mbx->req_virt->start_txq;
2351 if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
2352 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2355 /* Acquire a new queue-cid */
2356 p_queue = &vf->vf_queues[req->tx_qid];
2358 params.queue_id = p_queue->fw_tx_qid;
2359 params.vport_id = vf->vport_id;
2360 params.stats_id = vf->abs_vf_id + 0x10;
2361 params.sb = req->hw_sb;
2362 params.sb_idx = req->sb_index;
2364 p_queue->p_tx_cid = _ecore_eth_queue_to_cid(p_hwfn,
2369 if (p_queue->p_tx_cid == OSAL_NULL)
2372 pq = ecore_get_cm_pq_idx_vf(p_hwfn,
2373 vf->relative_vf_id);
2374 rc = ecore_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
2375 req->pbl_addr, req->pbl_size, pq);
2376 if (rc != ECORE_SUCCESS) {
2377 status = PFVF_STATUS_FAILURE;
2378 ecore_eth_queue_cid_release(p_hwfn,
2380 p_queue->p_tx_cid = OSAL_NULL;
2382 status = PFVF_STATUS_SUCCESS;
2386 ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
2389 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
2390 struct ecore_vf_info *vf,
2393 bool cqe_completion)
2395 struct ecore_vf_q_info *p_queue;
2396 enum _ecore_status_t rc = ECORE_SUCCESS;
2399 if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
2402 for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
2403 p_queue = &vf->vf_queues[qid];
2405 if (!p_queue->p_rx_cid)
2408 rc = ecore_eth_rx_queue_stop(p_hwfn,
2410 false, cqe_completion);
2411 if (rc != ECORE_SUCCESS)
2414 vf->vf_queues[qid].p_rx_cid = OSAL_NULL;
2415 vf->num_active_rxqs--;
2421 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
2422 struct ecore_vf_info *vf,
2423 u16 txq_id, u8 num_txqs)
2425 enum _ecore_status_t rc = ECORE_SUCCESS;
2426 struct ecore_vf_q_info *p_queue;
2429 if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
2432 for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
2433 p_queue = &vf->vf_queues[qid];
2434 if (!p_queue->p_tx_cid)
2437 rc = ecore_eth_tx_queue_stop(p_hwfn,
2439 if (rc != ECORE_SUCCESS)
2442 p_queue->p_tx_cid = OSAL_NULL;
2447 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
2448 struct ecore_ptt *p_ptt,
2449 struct ecore_vf_info *vf)
2451 u16 length = sizeof(struct pfvf_def_resp_tlv);
2452 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2453 u8 status = PFVF_STATUS_SUCCESS;
2454 struct vfpf_stop_rxqs_tlv *req;
2455 enum _ecore_status_t rc;
2457 /* We give the option of starting from qid != 0, in this case we
2458 * need to make sure that qid + num_qs doesn't exceed the actual
2459 * amount of queues that exist.
2461 req = &mbx->req_virt->stop_rxqs;
2462 rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2463 req->num_rxqs, req->cqe_completion);
2465 status = PFVF_STATUS_FAILURE;
2467 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2471 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
2472 struct ecore_ptt *p_ptt,
2473 struct ecore_vf_info *vf)
2475 u16 length = sizeof(struct pfvf_def_resp_tlv);
2476 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2477 u8 status = PFVF_STATUS_SUCCESS;
2478 struct vfpf_stop_txqs_tlv *req;
2479 enum _ecore_status_t rc;
2481 /* We give the option of starting from qid != 0, in this case we
2482 * need to make sure that qid + num_qs doesn't exceed the actual
2483 * amount of queues that exist.
2485 req = &mbx->req_virt->stop_txqs;
2486 rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
2488 status = PFVF_STATUS_FAILURE;
2490 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2494 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
2495 struct ecore_ptt *p_ptt,
2496 struct ecore_vf_info *vf)
2498 struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
2499 u16 length = sizeof(struct pfvf_def_resp_tlv);
2500 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2501 struct vfpf_update_rxq_tlv *req;
2502 u8 status = PFVF_STATUS_FAILURE;
2503 u8 complete_event_flg;
2504 u8 complete_cqe_flg;
2506 enum _ecore_status_t rc;
2509 req = &mbx->req_virt->update_rxq;
2510 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2511 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2513 /* Validaute inputs */
2514 if (req->num_rxqs + req->rx_qid > ECORE_MAX_VF_CHAINS_PER_PF ||
2515 !ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) {
2516 DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2517 vf->relative_vf_id, req->rx_qid, req->num_rxqs);
2521 for (i = 0; i < req->num_rxqs; i++) {
2522 qid = req->rx_qid + i;
2524 if (!vf->vf_queues[qid].p_rx_cid) {
2526 "VF[%d] rx_qid = %d isn`t active!\n",
2527 vf->relative_vf_id, qid);
2531 handlers[i] = vf->vf_queues[qid].p_rx_cid;
2534 rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2538 ECORE_SPQ_MODE_EBLOCK,
2543 status = PFVF_STATUS_SUCCESS;
2545 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2549 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
2550 void *p_tlvs_list, u16 req_type)
2552 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2556 if (!p_tlv->length) {
2557 DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
2561 if (p_tlv->type == req_type) {
2562 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2563 "Extended tlv type %s, length %d found\n",
2564 ecore_channel_tlvs_string[p_tlv->type],
2569 len += p_tlv->length;
2570 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2572 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2573 DP_NOTICE(p_hwfn, true,
2574 "TLVs has overrun the buffer size\n");
2577 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2583 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
2584 struct ecore_sp_vport_update_params *p_data,
2585 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2587 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2588 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2590 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2591 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2595 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2596 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2597 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2598 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2599 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
2603 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
2604 struct ecore_sp_vport_update_params *p_data,
2605 struct ecore_vf_info *p_vf,
2606 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2608 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2609 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2611 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2612 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2616 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2618 /* Ignore the VF request if we're forcing a vlan */
2619 if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
2620 p_data->update_inner_vlan_removal_flg = 1;
2621 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2624 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
2628 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
2629 struct ecore_sp_vport_update_params *p_data,
2630 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2632 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2633 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2635 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2636 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2637 if (!p_tx_switch_tlv)
2641 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2642 DP_NOTICE(p_hwfn, false,
2643 "FPGA: Ignore tx-switching configuration originating"
2649 p_data->update_tx_switching_flg = 1;
2650 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2651 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
2655 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
2656 struct ecore_sp_vport_update_params *p_data,
2657 struct ecore_iov_vf_mbx *p_mbx,
2660 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2661 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2663 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2664 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2668 p_data->update_approx_mcast_flg = 1;
2669 OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
2670 sizeof(unsigned long) *
2671 ETH_MULTICAST_MAC_BINS_IN_REGS);
2672 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
2676 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
2677 struct ecore_sp_vport_update_params *p_data,
2678 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2680 struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
2681 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2682 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2684 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2685 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2689 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2690 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2691 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2692 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2693 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
2697 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
2698 struct ecore_sp_vport_update_params *p_data,
2699 struct ecore_iov_vf_mbx *p_mbx,
2702 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2703 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2705 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2706 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2707 if (!p_accept_any_vlan)
2710 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2711 p_data->update_accept_any_vlan_flg =
2712 p_accept_any_vlan->update_accept_any_vlan_flg;
2713 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2717 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
2718 struct ecore_vf_info *vf,
2719 struct ecore_sp_vport_update_params *p_data,
2720 struct ecore_rss_params *p_rss,
2721 struct ecore_iov_vf_mbx *p_mbx,
2722 u16 *tlvs_mask, u16 *tlvs_accepted)
2724 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2725 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2726 bool b_reject = false;
2730 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2731 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2733 p_data->rss_params = OSAL_NULL;
2737 OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
2739 p_rss->update_rss_config =
2740 !!(p_rss_tlv->update_rss_flags &
2741 VFPF_UPDATE_RSS_CONFIG_FLAG);
2742 p_rss->update_rss_capabilities =
2743 !!(p_rss_tlv->update_rss_flags &
2744 VFPF_UPDATE_RSS_CAPS_FLAG);
2745 p_rss->update_rss_ind_table =
2746 !!(p_rss_tlv->update_rss_flags &
2747 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2748 p_rss->update_rss_key =
2749 !!(p_rss_tlv->update_rss_flags &
2750 VFPF_UPDATE_RSS_KEY_FLAG);
2752 p_rss->rss_enable = p_rss_tlv->rss_enable;
2753 p_rss->rss_eng_id = vf->relative_vf_id + 1;
2754 p_rss->rss_caps = p_rss_tlv->rss_caps;
2755 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2756 OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
2757 sizeof(p_rss->rss_key));
2759 table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
2760 (1 << p_rss_tlv->rss_table_size_log));
2762 for (i = 0; i < table_size; i++) {
2763 q_idx = p_rss_tlv->rss_ind_table[i];
2764 if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx)) {
2765 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2766 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2767 vf->relative_vf_id, q_idx);
2772 if (!vf->vf_queues[q_idx].p_rx_cid) {
2773 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2774 "VF[%d]: Omitting RSS due to inactive queue %08x\n",
2775 vf->relative_vf_id, q_idx);
2780 p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
2783 p_data->rss_params = p_rss;
2785 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
2787 *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
2791 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
2792 struct ecore_vf_info *vf,
2793 struct ecore_sp_vport_update_params *p_data,
2794 struct ecore_sge_tpa_params *p_sge_tpa,
2795 struct ecore_iov_vf_mbx *p_mbx,
2798 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2799 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2801 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2802 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2804 if (!p_sge_tpa_tlv) {
2805 p_data->sge_tpa_params = OSAL_NULL;
2809 OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
2811 p_sge_tpa->update_tpa_en_flg =
2812 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2813 p_sge_tpa->update_tpa_param_flg =
2814 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2815 VFPF_UPDATE_TPA_PARAM_FLAG);
2817 p_sge_tpa->tpa_ipv4_en_flg =
2818 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2819 p_sge_tpa->tpa_ipv6_en_flg =
2820 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2821 p_sge_tpa->tpa_pkt_split_flg =
2822 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2823 p_sge_tpa->tpa_hdr_data_split_flg =
2824 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2825 p_sge_tpa->tpa_gro_consistent_flg =
2826 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2828 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2829 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2830 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2831 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2832 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2834 p_data->sge_tpa_params = p_sge_tpa;
2836 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
2839 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
2840 struct ecore_ptt *p_ptt,
2841 struct ecore_vf_info *vf)
2843 struct ecore_rss_params *p_rss_params = OSAL_NULL;
2844 struct ecore_sp_vport_update_params params;
2845 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2846 struct ecore_sge_tpa_params sge_tpa_params;
2847 u16 tlvs_mask = 0, tlvs_accepted = 0;
2848 u8 status = PFVF_STATUS_SUCCESS;
2850 enum _ecore_status_t rc;
2852 /* Valiate PF can send such a request */
2853 if (!vf->vport_instance) {
2854 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2855 "No VPORT instance available for VF[%d],"
2856 " failing vport update\n",
2858 status = PFVF_STATUS_FAILURE;
2862 p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
2863 if (p_rss_params == OSAL_NULL) {
2864 status = PFVF_STATUS_FAILURE;
2868 OSAL_MEMSET(¶ms, 0, sizeof(params));
2869 params.opaque_fid = vf->opaque_fid;
2870 params.vport_id = vf->vport_id;
2871 params.rss_params = OSAL_NULL;
2873 /* Search for extended tlvs list and update values
2874 * from VF in struct ecore_sp_vport_update_params.
2876 ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
2877 ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
2878 ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
2879 ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
2880 ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
2881 ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
2882 ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms,
2883 &sge_tpa_params, mbx, &tlvs_mask);
2885 tlvs_accepted = tlvs_mask;
2887 /* Some of the extended TLVs need to be validated first; In that case,
2888 * they can update the mask without updating the accepted [so that
2889 * PF could communicate to VF it has rejected request].
2891 ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
2892 mbx, &tlvs_mask, &tlvs_accepted);
2894 /* Just log a message if there is no single extended tlv in buffer.
2895 * When all features of vport update ramrod would be requested by VF
2896 * as extended TLVs in buffer then an error can be returned in response
2897 * if there is no extended TLV present in buffer.
2899 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
2900 ¶ms, &tlvs_accepted) !=
2903 status = PFVF_STATUS_NOT_SUPPORTED;
2907 if (!tlvs_accepted) {
2909 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2910 "Upper-layer prevents said VF"
2911 " configuration\n");
2913 DP_NOTICE(p_hwfn, true,
2914 "No feature tlvs found for vport update\n");
2915 status = PFVF_STATUS_NOT_SUPPORTED;
2919 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
2923 status = PFVF_STATUS_FAILURE;
2926 OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
2927 length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
2928 tlvs_mask, tlvs_accepted);
2929 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2932 static enum _ecore_status_t
2933 ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
2934 struct ecore_vf_info *p_vf,
2935 struct ecore_filter_ucast *p_params)
2939 /* First remove entries and then add new ones */
2940 if (p_params->opcode == ECORE_FILTER_REMOVE) {
2941 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2942 if (p_vf->shadow_config.vlans[i].used &&
2943 p_vf->shadow_config.vlans[i].vid ==
2945 p_vf->shadow_config.vlans[i].used = false;
2948 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
2949 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2950 "VF [%d] - Tries to remove a non-existing"
2952 p_vf->relative_vf_id);
2955 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
2956 p_params->opcode == ECORE_FILTER_FLUSH) {
2957 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2958 p_vf->shadow_config.vlans[i].used = false;
2961 /* In forced mode, we're willing to remove entries - but we don't add
2964 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
2965 return ECORE_SUCCESS;
2967 if (p_params->opcode == ECORE_FILTER_ADD ||
2968 p_params->opcode == ECORE_FILTER_REPLACE) {
2969 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
2970 if (p_vf->shadow_config.vlans[i].used)
2973 p_vf->shadow_config.vlans[i].used = true;
2974 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
2978 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
2979 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2980 "VF [%d] - Tries to configure more than %d"
2982 p_vf->relative_vf_id,
2983 ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
2988 return ECORE_SUCCESS;
2991 static enum _ecore_status_t
2992 ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
2993 struct ecore_vf_info *p_vf,
2994 struct ecore_filter_ucast *p_params)
2996 char empty_mac[ETH_ALEN];
2999 OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
3001 /* If we're in forced-mode, we don't allow any change */
3002 /* TODO - this would change if we were ever to implement logic for
3003 * removing a forced MAC altogether [in which case, like for vlans,
3004 * we should be able to re-trace previous configuration.
3006 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
3007 return ECORE_SUCCESS;
3009 /* First remove entries and then add new ones */
3010 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3011 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3012 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3013 p_params->mac, ETH_ALEN)) {
3014 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
3020 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3021 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3022 "MAC isn't configured\n");
3025 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3026 p_params->opcode == ECORE_FILTER_FLUSH) {
3027 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
3028 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
3031 /* List the new MAC address */
3032 if (p_params->opcode != ECORE_FILTER_ADD &&
3033 p_params->opcode != ECORE_FILTER_REPLACE)
3034 return ECORE_SUCCESS;
3036 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3037 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3038 empty_mac, ETH_ALEN)) {
3039 OSAL_MEMCPY(p_vf->shadow_config.macs[i],
3040 p_params->mac, ETH_ALEN);
3041 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3042 "Added MAC at %d entry in shadow\n", i);
3047 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3048 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3049 "No available place for MAC\n");
3053 return ECORE_SUCCESS;
3056 static enum _ecore_status_t
3057 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
3058 struct ecore_vf_info *p_vf,
3059 struct ecore_filter_ucast *p_params)
3061 enum _ecore_status_t rc = ECORE_SUCCESS;
3063 if (p_params->type == ECORE_FILTER_MAC) {
3064 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3065 if (rc != ECORE_SUCCESS)
3069 if (p_params->type == ECORE_FILTER_VLAN)
3070 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3075 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
3076 struct ecore_ptt *p_ptt,
3077 struct ecore_vf_info *vf)
3079 struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3080 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3081 struct vfpf_ucast_filter_tlv *req;
3082 u8 status = PFVF_STATUS_SUCCESS;
3083 struct ecore_filter_ucast params;
3084 enum _ecore_status_t rc;
3086 /* Prepare the unicast filter params */
3087 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast));
3088 req = &mbx->req_virt->ucast_filter;
3089 params.opcode = (enum ecore_filter_opcode)req->opcode;
3090 params.type = (enum ecore_filter_ucast_type)req->type;
3092 /* @@@TBD - We might need logic on HV side in determining this */
3093 params.is_rx_filter = 1;
3094 params.is_tx_filter = 1;
3095 params.vport_to_remove_from = vf->vport_id;
3096 params.vport_to_add_to = vf->vport_id;
3097 OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
3098 params.vlan = req->vlan;
3100 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3101 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
3102 " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3103 vf->abs_vf_id, params.opcode, params.type,
3104 params.is_rx_filter ? "RX" : "",
3105 params.is_tx_filter ? "TX" : "",
3106 params.vport_to_add_to,
3107 params.mac[0], params.mac[1], params.mac[2],
3108 params.mac[3], params.mac[4], params.mac[5], params.vlan);
3110 if (!vf->vport_instance) {
3111 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3112 "No VPORT instance available for VF[%d],"
3113 " failing ucast MAC configuration\n",
3115 status = PFVF_STATUS_FAILURE;
3119 /* Update shadow copy of the VF configuration */
3120 if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) !=
3122 status = PFVF_STATUS_FAILURE;
3126 /* Determine if the unicast filtering is acceptible by PF */
3127 if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
3128 (params.type == ECORE_FILTER_VLAN ||
3129 params.type == ECORE_FILTER_MAC_VLAN)) {
3130 /* Once VLAN is forced or PVID is set, do not allow
3131 * to add/replace any further VLANs.
3133 if (params.opcode == ECORE_FILTER_ADD ||
3134 params.opcode == ECORE_FILTER_REPLACE)
3135 status = PFVF_STATUS_FORCED;
3139 if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
3140 (params.type == ECORE_FILTER_MAC ||
3141 params.type == ECORE_FILTER_MAC_VLAN)) {
3142 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
3143 (params.opcode != ECORE_FILTER_ADD &&
3144 params.opcode != ECORE_FILTER_REPLACE))
3145 status = PFVF_STATUS_FORCED;
3149 rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms);
3150 if (rc == ECORE_EXISTS) {
3152 } else if (rc == ECORE_INVAL) {
3153 status = PFVF_STATUS_FAILURE;
3157 rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
3158 ECORE_SPQ_MODE_CB, OSAL_NULL);
3160 status = PFVF_STATUS_FAILURE;
3163 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3164 sizeof(struct pfvf_def_resp_tlv), status);
3167 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
3168 struct ecore_ptt *p_ptt,
3169 struct ecore_vf_info *vf)
3174 for (i = 0; i < vf->num_sbs; i++)
3175 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3177 vf->opaque_fid, false);
3179 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3180 sizeof(struct pfvf_def_resp_tlv),
3181 PFVF_STATUS_SUCCESS);
3184 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
3185 struct ecore_ptt *p_ptt,
3186 struct ecore_vf_info *vf)
3188 u16 length = sizeof(struct pfvf_def_resp_tlv);
3189 u8 status = PFVF_STATUS_SUCCESS;
3191 /* Disable Interrupts for VF */
3192 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3194 /* Reset Permission table */
3195 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3197 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3201 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
3202 struct ecore_ptt *p_ptt,
3203 struct ecore_vf_info *p_vf)
3205 u16 length = sizeof(struct pfvf_def_resp_tlv);
3206 u8 status = PFVF_STATUS_SUCCESS;
3207 enum _ecore_status_t rc = ECORE_SUCCESS;
3209 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3211 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3212 /* Stopping the VF */
3213 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3216 if (rc != ECORE_SUCCESS) {
3217 DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
3219 status = PFVF_STATUS_FAILURE;
3222 p_vf->state = VF_STOPPED;
3225 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3229 static enum _ecore_status_t
3230 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
3231 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3236 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
3238 for (cnt = 0; cnt < 50; cnt++) {
3239 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3244 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
3248 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3249 p_vf->abs_vf_id, val);
3250 return ECORE_TIMEOUT;
3253 return ECORE_SUCCESS;
3256 static enum _ecore_status_t
3257 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
3258 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3260 u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
3263 /* Read initial consumers & producers */
3264 for (i = 0; i < MAX_NUM_VOQS; i++) {
3267 cons[i] = ecore_rd(p_hwfn, p_ptt,
3268 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3270 prod = ecore_rd(p_hwfn, p_ptt,
3271 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3273 distance[i] = prod - cons[i];
3276 /* Wait for consumers to pass the producers */
3278 for (cnt = 0; cnt < 50; cnt++) {
3279 for (; i < MAX_NUM_VOQS; i++) {
3282 tmp = ecore_rd(p_hwfn, p_ptt,
3283 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3285 if (distance[i] > tmp - cons[i])
3289 if (i == MAX_NUM_VOQS)
3296 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3297 p_vf->abs_vf_id, i);
3298 return ECORE_TIMEOUT;
3301 return ECORE_SUCCESS;
3304 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
3305 struct ecore_vf_info *p_vf,
3306 struct ecore_ptt *p_ptt)
3308 enum _ecore_status_t rc;
3310 /* TODO - add SRC and TM polling once we add storage IOV */
3312 rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3316 rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3320 return ECORE_SUCCESS;
3323 static enum _ecore_status_t
3324 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3325 struct ecore_ptt *p_ptt,
3326 u16 rel_vf_id, u32 *ack_vfs)
3328 struct ecore_vf_info *p_vf;
3329 enum _ecore_status_t rc = ECORE_SUCCESS;
3331 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3333 return ECORE_SUCCESS;
3335 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3336 (1ULL << (rel_vf_id % 64))) {
3337 u16 vfid = p_vf->abs_vf_id;
3339 /* TODO - should we lock channel? */
3341 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3342 "VF[%d] - Handling FLR\n", vfid);
3344 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3346 /* If VF isn't active, no need for anything but SW */
3350 /* TODO - what to do in case of failure? */
3351 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3352 if (rc != ECORE_SUCCESS)
3355 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
3357 /* TODO - what's now? What a mess.... */
3358 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3362 /* Workaround to make VF-PF channel ready, as FW
3363 * doesn't do that as a part of FLR.
3366 GTT_BAR0_MAP_REG_USDM_RAM +
3367 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3369 /* VF_STOPPED has to be set only after final cleanup
3370 * but prior to re-enabling the VF.
3372 p_vf->state = VF_STOPPED;
3374 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3376 /* TODO - again, a mess... */
3377 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3382 /* Mark VF for ack and clean pending state */
3383 if (p_vf->state == VF_RESET)
3384 p_vf->state = VF_STOPPED;
3385 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
3386 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3387 ~(1ULL << (rel_vf_id % 64));
3388 p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
3389 ~(1ULL << (rel_vf_id % 64));
3395 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3396 struct ecore_ptt *p_ptt)
3398 u32 ack_vfs[VF_MAX_STATIC / 32];
3399 enum _ecore_status_t rc = ECORE_SUCCESS;
3402 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3404 /* Since BRB <-> PRS interface can't be tested as part of the flr
3405 * polling due to HW limitations, simply sleep a bit. And since
3406 * there's no need to wait per-vf, do it before looping.
3410 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
3411 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3413 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3417 enum _ecore_status_t
3418 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3419 struct ecore_ptt *p_ptt, u16 rel_vf_id)
3421 u32 ack_vfs[VF_MAX_STATIC / 32];
3422 enum _ecore_status_t rc = ECORE_SUCCESS;
3424 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3426 /* Wait instead of polling the BRB <-> PRS interface */
3429 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
3431 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3435 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
3440 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
3441 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3442 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3443 "[%08x,...,%08x]: %08x\n",
3444 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3446 if (!p_hwfn->p_dev->p_iov_info) {
3447 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
3452 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
3453 struct ecore_vf_info *p_vf;
3456 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
3460 vfid = p_vf->abs_vf_id;
3461 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3462 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3463 u16 rel_vf_id = p_vf->relative_vf_id;
3465 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3466 "VF[%d] [rel %d] got FLR-ed\n",
3469 p_vf->state = VF_RESET;
3471 /* No need to lock here, since pending_flr should
3472 * only change here and before ACKing MFw. Since
3473 * MFW will not trigger an additional attention for
3474 * VF flr until ACKs, we're safe.
3476 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3484 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
3486 struct ecore_mcp_link_params *p_params,
3487 struct ecore_mcp_link_state *p_link,
3488 struct ecore_mcp_link_capabilities *p_caps)
3490 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
3491 struct ecore_bulletin_content *p_bulletin;
3496 p_bulletin = p_vf->bulletin.p_virt;
3499 __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3501 __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3503 __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3506 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
3507 struct ecore_ptt *p_ptt, int vfid)
3509 struct ecore_iov_vf_mbx *mbx;
3510 struct ecore_vf_info *p_vf;
3512 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3516 mbx = &p_vf->vf_mbx;
3518 /* ecore_iov_process_mbx_request */
3521 "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
3523 mbx->first_tlv = mbx->req_virt->first_tlv;
3525 OSAL_IOV_VF_MSG_TYPE(p_hwfn,
3526 p_vf->relative_vf_id,
3527 mbx->first_tlv.tl.type);
3529 /* Lock the per vf op mutex and note the locker's identity.
3530 * The unlock will take place in mbx response.
3532 ecore_iov_lock_vf_pf_channel(p_hwfn,
3533 p_vf, mbx->first_tlv.tl.type);
3535 /* check if tlv type is known */
3536 if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3537 !p_vf->b_malicious) {
3538 /* switch on the opcode */
3539 switch (mbx->first_tlv.tl.type) {
3540 case CHANNEL_TLV_ACQUIRE:
3541 ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3543 case CHANNEL_TLV_VPORT_START:
3544 ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3546 case CHANNEL_TLV_VPORT_TEARDOWN:
3547 ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3549 case CHANNEL_TLV_START_RXQ:
3550 ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3552 case CHANNEL_TLV_START_TXQ:
3553 ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3555 case CHANNEL_TLV_STOP_RXQS:
3556 ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3558 case CHANNEL_TLV_STOP_TXQS:
3559 ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3561 case CHANNEL_TLV_UPDATE_RXQ:
3562 ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3564 case CHANNEL_TLV_VPORT_UPDATE:
3565 ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3567 case CHANNEL_TLV_UCAST_FILTER:
3568 ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3570 case CHANNEL_TLV_CLOSE:
3571 ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3573 case CHANNEL_TLV_INT_CLEANUP:
3574 ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3576 case CHANNEL_TLV_RELEASE:
3577 ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3579 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
3580 ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
3583 } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3584 /* If we've received a message from a VF we consider malicious
3585 * we ignore the messasge unless it's one for RELEASE, in which
3586 * case we'll let it have the benefit of doubt, allowing the
3587 * next loaded driver to start again.
3589 if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
3590 /* TODO - initiate FLR, remove malicious indication */
3591 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3592 "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
3595 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3596 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3597 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3600 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3601 mbx->first_tlv.tl.type,
3602 sizeof(struct pfvf_def_resp_tlv),
3603 PFVF_STATUS_MALICIOUS);
3605 /* unknown TLV - this may belong to a VF driver from the future
3606 * - a version written after this PF driver was written, which
3607 * supports features unknown as of yet. Too bad since we don't
3608 * support them. Or this may be because someone wrote a crappy
3609 * VF driver and is sending garbage over the channel.
3611 DP_NOTICE(p_hwfn, false,
3612 "VF[%02x]: unknown TLV. type %04x length %04x"
3613 " padding %08x reply address %lu\n",
3615 mbx->first_tlv.tl.type,
3616 mbx->first_tlv.tl.length,
3617 mbx->first_tlv.padding,
3618 (unsigned long)mbx->first_tlv.reply_address);
3620 /* Try replying in case reply address matches the acquisition's
3623 if (p_vf->acquire.first_tlv.reply_address &&
3624 (mbx->first_tlv.reply_address ==
3625 p_vf->acquire.first_tlv.reply_address))
3626 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3627 mbx->first_tlv.tl.type,
3628 sizeof(struct pfvf_def_resp_tlv),
3629 PFVF_STATUS_NOT_SUPPORTED);
3631 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3632 "VF[%02x]: Can't respond to TLV -"
3633 " no valid reply address\n",
3637 ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
3638 mbx->first_tlv.tl.type);
3640 #ifdef CONFIG_ECORE_SW_CHANNEL
3641 mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
3642 mbx->sw_mbx.response_offset = 0;
3646 void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
3648 u64 add_bit = 1ULL << (vfid % 64);
3650 /* TODO - add locking mechanisms [no atomics in ecore, so we can't
3651 * add the lock inside the ecore_pf_iov struct].
3653 p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
3656 void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
3659 u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
3661 /* TODO - Take a lock */
3662 OSAL_MEMCPY(events, p_pending_events,
3663 sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
3664 OSAL_MEMSET(p_pending_events, 0,
3665 sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
3668 static struct ecore_vf_info *
3669 ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
3671 u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
3673 if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
3674 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3675 "Got indication for VF [abs 0x%08x] that cannot be"
3681 return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
3684 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
3686 struct regpair *vf_msg)
3688 struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
3692 return ECORE_SUCCESS;
3694 /* List the physical address of the request so that handler
3695 * could later on copy the message from it.
3697 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
3699 return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
3702 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
3703 struct malicious_vf_eqe_data *p_data)
3705 struct ecore_vf_info *p_vf;
3707 p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
3713 "VF [%d] - Malicious behavior [%02x]\n",
3714 p_vf->abs_vf_id, p_data->errId);
3716 p_vf->b_malicious = true;
3718 OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
3721 enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
3724 union event_ring_data *data)
3727 case COMMON_EVENT_VF_PF_CHANNEL:
3728 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
3729 &data->vf_pf_channel.msg_addr);
3730 case COMMON_EVENT_VF_FLR:
3731 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3732 "VF-FLR is still not supported\n");
3733 return ECORE_SUCCESS;
3734 case COMMON_EVENT_MALICIOUS_VF:
3735 ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
3736 return ECORE_SUCCESS;
3738 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
3744 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3746 return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3747 (1ULL << (rel_vf_id % 64)));
3750 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3752 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
3758 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
3759 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
3763 return E4_MAX_NUM_VFS;
3766 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
3767 struct ecore_ptt *ptt, int vfid)
3769 struct ecore_dmae_params params;
3770 struct ecore_vf_info *vf_info;
3772 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3776 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
3777 params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
3778 params.src_vfid = vf_info->abs_vf_id;
3780 if (ecore_dmae_host2host(p_hwfn, ptt,
3781 vf_info->vf_mbx.pending_req,
3782 vf_info->vf_mbx.req_phys,
3783 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
3784 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3785 "Failed to copy message from VF 0x%02x\n", vfid);
3790 return ECORE_SUCCESS;
3793 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
3796 struct ecore_vf_info *vf_info;
3799 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3801 DP_NOTICE(p_hwfn->p_dev, true,
3802 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3805 if (vf_info->b_malicious) {
3806 DP_NOTICE(p_hwfn->p_dev, false,
3807 "Can't set forced MAC to malicious VF [%d]\n",
3812 feature = 1 << MAC_ADDR_FORCED;
3813 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3815 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3816 /* Forced MAC will disable MAC_ADDR */
3817 vf_info->bulletin.p_virt->valid_bitmap &=
3818 ~(1 << VFPF_BULLETIN_MAC_ADDR);
3820 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3823 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
3826 struct ecore_vf_info *vf_info;
3829 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3831 DP_NOTICE(p_hwfn->p_dev, true,
3832 "Can not set MAC, invalid vfid [%d]\n", vfid);
3835 if (vf_info->b_malicious) {
3836 DP_NOTICE(p_hwfn->p_dev, false,
3837 "Can't set MAC to malicious VF [%d]\n",
3842 if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
3843 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3844 "Can not set MAC, Forced MAC is configured\n");
3848 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
3849 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3851 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3853 return ECORE_SUCCESS;
3856 enum _ecore_status_t
3857 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
3858 bool b_untagged_only, int vfid)
3860 struct ecore_vf_info *vf_info;
3863 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3865 DP_NOTICE(p_hwfn->p_dev, true,
3866 "Can not set untagged default, invalid vfid [%d]\n",
3870 if (vf_info->b_malicious) {
3871 DP_NOTICE(p_hwfn->p_dev, false,
3872 "Can't set untagged default to malicious VF [%d]\n",
3877 /* Since this is configurable only during vport-start, don't take it
3878 * if we're past that point.
3880 if (vf_info->state == VF_ENABLED) {
3881 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3882 "Can't support untagged change for vfid[%d] -"
3883 " VF is already active\n",
3888 /* Set configuration; This will later be taken into account during the
3889 * VF initialization.
3891 feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
3892 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
3893 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3895 vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
3898 return ECORE_SUCCESS;
3901 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
3904 struct ecore_vf_info *vf_info;
3906 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3910 *opaque_fid = vf_info->opaque_fid;
3913 void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
3916 struct ecore_vf_info *vf_info;
3918 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3922 *p_vort_id = vf_info->vport_id;
3925 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
3928 struct ecore_vf_info *vf_info;
3931 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3933 DP_NOTICE(p_hwfn->p_dev, true,
3934 "Can not set forced MAC, invalid vfid [%d]\n",
3938 if (vf_info->b_malicious) {
3939 DP_NOTICE(p_hwfn->p_dev, false,
3940 "Can't set forced vlan to malicious VF [%d]\n",
3945 feature = 1 << VLAN_ADDR_FORCED;
3946 vf_info->bulletin.p_virt->pvid = pvid;
3948 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3950 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
3952 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3955 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
3957 struct ecore_vf_info *p_vf_info;
3959 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3963 return !!p_vf_info->vport_instance;
3966 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
3968 struct ecore_vf_info *p_vf_info;
3970 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3974 return p_vf_info->state == VF_STOPPED;
3977 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
3979 struct ecore_vf_info *vf_info;
3981 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3985 return vf_info->spoof_chk;
3988 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
3991 struct ecore_vf_info *vf;
3992 enum _ecore_status_t rc = ECORE_INVAL;
3994 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
3995 DP_NOTICE(p_hwfn, true,
3996 "SR-IOV sanity check failed, can't set spoofchk\n");
4000 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4004 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4005 /* After VF VPORT start PF will configure spoof check */
4006 vf->req_spoofchk_val = val;
4011 rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
4017 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
4019 u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
4021 max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
4022 : ECORE_MAX_VF_CHAINS_PER_PF;
4024 return max_chains_per_vf;
4027 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4029 void **pp_req_virt_addr,
4030 u16 *p_req_virt_size)
4032 struct ecore_vf_info *vf_info =
4033 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4038 if (pp_req_virt_addr)
4039 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
4041 if (p_req_virt_size)
4042 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
4045 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4047 void **pp_reply_virt_addr,
4048 u16 *p_reply_virt_size)
4050 struct ecore_vf_info *vf_info =
4051 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4056 if (pp_reply_virt_addr)
4057 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
4059 if (p_reply_virt_size)
4060 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
4063 #ifdef CONFIG_ECORE_SW_CHANNEL
4064 struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
4067 struct ecore_vf_info *vf_info =
4068 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4073 return &vf_info->vf_mbx.sw_mbx;
4077 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
4079 return (length >= sizeof(struct vfpf_first_tlv) &&
4080 (length <= sizeof(union vfpf_tlvs)));
4083 u32 ecore_iov_pfvf_msg_length(void)
4085 return sizeof(union pfvf_tlvs);
4088 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4090 struct ecore_vf_info *p_vf;
4092 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4093 if (!p_vf || !p_vf->bulletin.p_virt)
4096 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
4099 return p_vf->bulletin.p_virt->mac;
4102 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
4105 struct ecore_vf_info *p_vf;
4107 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4108 if (!p_vf || !p_vf->bulletin.p_virt)
4111 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
4114 return p_vf->bulletin.p_virt->pvid;
4117 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
4118 struct ecore_ptt *p_ptt,
4121 struct ecore_vf_info *vf;
4123 enum _ecore_status_t rc;
4125 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4130 rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4131 if (rc != ECORE_SUCCESS)
4134 return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
4137 enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
4140 struct ecore_vf_info *vf;
4144 for_each_hwfn(p_dev, i) {
4145 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
4147 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4148 DP_NOTICE(p_hwfn, true,
4149 "SR-IOV sanity check failed,"
4150 " can't set min rate\n");
4155 vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
4156 vport_id = vf->vport_id;
4158 return ecore_configure_vport_wfq(p_dev, vport_id, rate);
4161 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
4162 struct ecore_ptt *p_ptt,
4164 struct ecore_eth_stats *p_stats)
4166 struct ecore_vf_info *vf;
4168 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4172 if (vf->state != VF_ENABLED)
4175 __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
4176 vf->abs_vf_id + 0x10, false);
4178 return ECORE_SUCCESS;
4181 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4183 struct ecore_vf_info *p_vf;
4185 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4189 return p_vf->num_rxqs;
4192 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4194 struct ecore_vf_info *p_vf;
4196 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4200 return p_vf->num_active_rxqs;
4203 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4205 struct ecore_vf_info *p_vf;
4207 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4214 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4216 struct ecore_vf_info *p_vf;
4218 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4222 return p_vf->num_sbs;
4225 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4227 struct ecore_vf_info *p_vf;
4229 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4233 return (p_vf->state == VF_FREE);
4236 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
4239 struct ecore_vf_info *p_vf;
4241 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4245 return (p_vf->state == VF_ACQUIRED);
4248 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4250 struct ecore_vf_info *p_vf;
4252 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4256 return (p_vf->state == VF_ENABLED);
4259 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
4262 struct ecore_vf_info *p_vf;
4264 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4268 return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
4271 enum _ecore_status_t
4272 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
4274 struct ecore_wfq_data *vf_vp_wfq;
4275 struct ecore_vf_info *vf_info;
4277 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4281 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4283 if (vf_vp_wfq->configured)
4284 return vf_vp_wfq->min_speed;