2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
12 #include "ecore_sriov.h"
13 #include "ecore_status.h"
15 #include "ecore_hw_defs.h"
16 #include "ecore_int.h"
17 #include "ecore_hsi_eth.h"
19 #include "ecore_vfpf_if.h"
20 #include "ecore_rt_defs.h"
21 #include "ecore_init_ops.h"
22 #include "ecore_gtt_reg_addr.h"
23 #include "ecore_iro.h"
24 #include "ecore_mcp.h"
25 #include "ecore_cxt.h"
27 #include "ecore_init_fw_funcs.h"
28 #include "ecore_sp_commands.h"
30 const char *ecore_channel_tlvs_string[] = {
31 "CHANNEL_TLV_NONE", /* ends tlv sequence */
32 "CHANNEL_TLV_ACQUIRE",
33 "CHANNEL_TLV_VPORT_START",
34 "CHANNEL_TLV_VPORT_UPDATE",
35 "CHANNEL_TLV_VPORT_TEARDOWN",
36 "CHANNEL_TLV_START_RXQ",
37 "CHANNEL_TLV_START_TXQ",
38 "CHANNEL_TLV_STOP_RXQ",
39 "CHANNEL_TLV_STOP_TXQ",
40 "CHANNEL_TLV_UPDATE_RXQ",
41 "CHANNEL_TLV_INT_CLEANUP",
43 "CHANNEL_TLV_RELEASE",
44 "CHANNEL_TLV_LIST_END",
45 "CHANNEL_TLV_UCAST_FILTER",
46 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
47 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
48 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
49 "CHANNEL_TLV_VPORT_UPDATE_MCAST",
50 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
51 "CHANNEL_TLV_VPORT_UPDATE_RSS",
52 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
53 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
54 "CHANNEL_TLV_UPDATE_TUNN_PARAM",
55 "CHANNEL_TLV_COALESCE_UPDATE",
60 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
61 struct ecore_vf_info *p_vf)
63 struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
64 struct ecore_spq_entry *p_ent = OSAL_NULL;
65 struct ecore_sp_init_data init_data;
66 enum _ecore_status_t rc = ECORE_NOTIMPL;
70 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
71 init_data.cid = ecore_spq_get_cid(p_hwfn);
72 init_data.opaque_fid = p_vf->opaque_fid;
73 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
75 rc = ecore_sp_init_request(p_hwfn, &p_ent,
76 COMMON_RAMROD_VF_START,
77 PROTOCOLID_COMMON, &init_data);
78 if (rc != ECORE_SUCCESS)
81 p_ramrod = &p_ent->ramrod.vf_start;
83 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
84 p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
86 switch (p_hwfn->hw_info.personality) {
88 p_ramrod->personality = PERSONALITY_ETH;
90 case ECORE_PCI_ETH_ROCE:
91 case ECORE_PCI_ETH_IWARP:
92 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
95 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
96 p_hwfn->hw_info.personality);
100 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
101 if (fp_minor > ETH_HSI_VER_MINOR &&
102 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
103 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
104 "VF [%d] - Requested fp hsi %02x.%02x which is"
105 " slightly newer than PF's %02x.%02x; Configuring"
108 ETH_HSI_VER_MAJOR, fp_minor,
109 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
110 fp_minor = ETH_HSI_VER_MINOR;
113 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
114 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
116 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
117 "VF[%d] - Starting using HSI %02x.%02x\n",
118 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
120 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
123 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
127 struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
128 struct ecore_spq_entry *p_ent = OSAL_NULL;
129 struct ecore_sp_init_data init_data;
130 enum _ecore_status_t rc = ECORE_NOTIMPL;
133 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
134 init_data.cid = ecore_spq_get_cid(p_hwfn);
135 init_data.opaque_fid = opaque_vfid;
136 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
138 rc = ecore_sp_init_request(p_hwfn, &p_ent,
139 COMMON_RAMROD_VF_STOP,
140 PROTOCOLID_COMMON, &init_data);
141 if (rc != ECORE_SUCCESS)
144 p_ramrod = &p_ent->ramrod.vf_stop;
146 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
148 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
151 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
152 bool b_enabled_only, bool b_non_malicious)
154 if (!p_hwfn->pf_iov_info) {
155 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
159 if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
163 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
167 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
174 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
178 struct ecore_vf_info *vf = OSAL_NULL;
180 if (!p_hwfn->pf_iov_info) {
181 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
185 if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
186 b_enabled_only, false))
187 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
189 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
195 static struct ecore_queue_cid *
196 ecore_iov_get_vf_rx_queue_cid(struct ecore_hwfn *p_hwfn,
197 struct ecore_vf_info *p_vf,
198 struct ecore_vf_queue *p_queue)
202 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
203 if (p_queue->cids[i].p_cid &&
204 !p_queue->cids[i].b_is_tx)
205 return p_queue->cids[i].p_cid;
211 enum ecore_iov_validate_q_mode {
212 ECORE_IOV_VALIDATE_Q_NA,
213 ECORE_IOV_VALIDATE_Q_ENABLE,
214 ECORE_IOV_VALIDATE_Q_DISABLE,
217 static bool ecore_iov_validate_queue_mode(struct ecore_hwfn *p_hwfn,
218 struct ecore_vf_info *p_vf,
220 enum ecore_iov_validate_q_mode mode,
225 if (mode == ECORE_IOV_VALIDATE_Q_NA)
228 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
229 struct ecore_vf_queue_cid *p_qcid;
231 p_qcid = &p_vf->vf_queues[qid].cids[i];
233 if (p_qcid->p_cid == OSAL_NULL)
236 if (p_qcid->b_is_tx != b_is_tx)
239 /* Found. It's enabled. */
240 return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
243 /* In case we haven't found any valid cid, then its disabled */
244 return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
247 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
248 struct ecore_vf_info *p_vf,
250 enum ecore_iov_validate_q_mode mode)
252 if (rx_qid >= p_vf->num_rxqs) {
253 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
254 "VF[0x%02x] - can't touch Rx queue[%04x];"
255 " Only 0x%04x are allocated\n",
256 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
260 return ecore_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid,
264 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
265 struct ecore_vf_info *p_vf,
267 enum ecore_iov_validate_q_mode mode)
269 if (tx_qid >= p_vf->num_txqs) {
270 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
271 "VF[0x%02x] - can't touch Tx queue[%04x];"
272 " Only 0x%04x are allocated\n",
273 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
277 return ecore_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid,
281 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
282 struct ecore_vf_info *p_vf,
287 for (i = 0; i < p_vf->num_sbs; i++)
288 if (p_vf->igu_sbs[i] == sb_idx)
291 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
292 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
293 " one of its 0x%02x SBs\n",
294 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
299 /* Is there at least 1 queue open? */
300 static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
301 struct ecore_vf_info *p_vf)
305 for (i = 0; i < p_vf->num_rxqs; i++)
306 if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
307 ECORE_IOV_VALIDATE_Q_ENABLE,
314 static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
315 struct ecore_vf_info *p_vf)
319 for (i = 0; i < p_vf->num_txqs; i++)
320 if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
321 ECORE_IOV_VALIDATE_Q_ENABLE,
328 /* TODO - this is linux crc32; Need a way to ifdef it out for linux */
329 u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
335 for (i = 0; i < 8; i++)
336 crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
341 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
343 struct ecore_ptt *p_ptt)
345 struct ecore_bulletin_content *p_bulletin;
346 int crc_size = sizeof(p_bulletin->crc);
347 struct ecore_dmae_params params;
348 struct ecore_vf_info *p_vf;
350 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
354 /* TODO - check VF is in a state where it can accept message */
355 if (!p_vf->vf_bulletin)
358 p_bulletin = p_vf->bulletin.p_virt;
360 /* Increment bulletin board version and compute crc */
361 p_bulletin->version++;
362 p_bulletin->crc = ecore_crc32(0, (u8 *)p_bulletin + crc_size,
363 p_vf->bulletin.size - crc_size);
365 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
366 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
367 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
369 /* propagate bulletin board via dmae to vm memory */
370 OSAL_MEMSET(¶ms, 0, sizeof(params));
371 params.flags = ECORE_DMAE_FLAG_VF_DST;
372 params.dst_vfid = p_vf->abs_vf_id;
373 return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
374 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
378 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
380 struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
383 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
384 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
386 OSAL_PCI_READ_CONFIG_WORD(p_dev,
387 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
388 OSAL_PCI_READ_CONFIG_WORD(p_dev,
389 pos + PCI_SRIOV_INITIAL_VF,
392 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
394 /* @@@TODO - in future we might want to add an OSAL here to
395 * allow each OS to decide on its own how to act.
397 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
398 "Number of VFs are already set to non-zero value."
399 " Ignoring PCI configuration value\n");
403 OSAL_PCI_READ_CONFIG_WORD(p_dev,
404 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
406 OSAL_PCI_READ_CONFIG_WORD(p_dev,
407 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
409 OSAL_PCI_READ_CONFIG_WORD(p_dev,
410 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
412 OSAL_PCI_READ_CONFIG_DWORD(p_dev,
413 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
415 OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
417 OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
419 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
420 "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
421 " stride %d, page size 0x%x\n",
422 iov->nres, iov->cap, iov->ctrl,
423 iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
424 iov->offset, iov->stride, iov->pgsz);
426 /* Some sanity checks */
427 if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
428 iov->total_vfs > NUM_OF_VFS(p_dev)) {
429 /* This can happen only due to a bug. In this case we set
430 * num_vfs to zero to avoid memory corruption in the code that
431 * assumes max number of vfs
433 DP_NOTICE(p_dev, false,
434 "IOV: Unexpected number of vfs set: %d"
435 " setting num_vf to zero\n",
442 return ECORE_SUCCESS;
445 static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
446 struct ecore_ptt *p_ptt)
448 struct ecore_igu_block *p_sb;
452 if (!p_hwfn->hw_info.p_igu_info) {
454 "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
459 sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {
460 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
461 if ((p_sb->status & ECORE_IGU_STATUS_FREE) &&
462 !(p_sb->status & ECORE_IGU_STATUS_PF)) {
463 val = ecore_rd(p_hwfn, p_ptt,
464 IGU_REG_MAPPING_MEMORY + sb_id * 4);
465 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
466 ecore_wr(p_hwfn, p_ptt,
467 IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
472 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
474 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
475 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
476 struct ecore_bulletin_content *p_bulletin_virt;
477 dma_addr_t req_p, rply_p, bulletin_p;
478 union pfvf_tlvs *p_reply_virt_addr;
479 union vfpf_tlvs *p_req_virt_addr;
482 OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
484 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
485 req_p = p_iov_info->mbx_msg_phys_addr;
486 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
487 rply_p = p_iov_info->mbx_reply_phys_addr;
488 p_bulletin_virt = p_iov_info->p_bulletins;
489 bulletin_p = p_iov_info->bulletins_phys;
490 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
492 "ecore_iov_setup_vfdb called without alloc mem first\n");
496 for (idx = 0; idx < p_iov->total_vfs; idx++) {
497 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
500 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
501 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
502 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
503 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
505 #ifdef CONFIG_ECORE_SW_CHANNEL
506 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
507 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
509 vf->state = VF_STOPPED;
512 vf->bulletin.phys = idx *
513 sizeof(struct ecore_bulletin_content) + bulletin_p;
514 vf->bulletin.p_virt = p_bulletin_virt + idx;
515 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
517 vf->relative_vf_id = idx;
518 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
519 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
520 vf->concrete_fid = concrete;
521 /* TODO - need to devise a better way of getting opaque */
522 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
523 (vf->abs_vf_id << 8);
525 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
526 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
530 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
532 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
536 num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
538 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
539 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
541 /* Allocate PF Mailbox buffer (per-VF) */
542 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
543 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
544 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
545 &p_iov_info->mbx_msg_phys_addr,
546 p_iov_info->mbx_msg_size);
550 /* Allocate PF Mailbox Reply buffer (per-VF) */
551 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
552 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
553 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
554 &p_iov_info->mbx_reply_phys_addr,
555 p_iov_info->mbx_reply_size);
559 p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
561 p_v_addr = &p_iov_info->p_bulletins;
562 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
563 &p_iov_info->bulletins_phys,
564 p_iov_info->bulletins_size);
568 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
569 "PF's Requests mailbox [%p virt 0x%lx phys], "
570 "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
571 " [%p virt 0x%lx phys]\n",
572 p_iov_info->mbx_msg_virt_addr,
573 (unsigned long)p_iov_info->mbx_msg_phys_addr,
574 p_iov_info->mbx_reply_virt_addr,
575 (unsigned long)p_iov_info->mbx_reply_phys_addr,
576 p_iov_info->p_bulletins,
577 (unsigned long)p_iov_info->bulletins_phys);
579 return ECORE_SUCCESS;
582 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
584 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
586 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
587 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
588 p_iov_info->mbx_msg_virt_addr,
589 p_iov_info->mbx_msg_phys_addr,
590 p_iov_info->mbx_msg_size);
592 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
593 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
594 p_iov_info->mbx_reply_virt_addr,
595 p_iov_info->mbx_reply_phys_addr,
596 p_iov_info->mbx_reply_size);
598 if (p_iov_info->p_bulletins)
599 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
600 p_iov_info->p_bulletins,
601 p_iov_info->bulletins_phys,
602 p_iov_info->bulletins_size);
605 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
607 struct ecore_pf_iov *p_sriov;
609 if (!IS_PF_SRIOV(p_hwfn)) {
610 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
611 "No SR-IOV - no need for IOV db\n");
612 return ECORE_SUCCESS;
615 p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
617 DP_NOTICE(p_hwfn, true,
618 "Failed to allocate `struct ecore_sriov'\n");
622 p_hwfn->pf_iov_info = p_sriov;
624 return ecore_iov_allocate_vfdb(p_hwfn);
627 void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
629 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
632 ecore_iov_setup_vfdb(p_hwfn);
633 ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
636 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
638 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
639 ecore_iov_free_vfdb(p_hwfn);
640 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
644 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
646 OSAL_FREE(p_dev, p_dev->p_iov_info);
649 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
651 struct ecore_dev *p_dev = p_hwfn->p_dev;
653 enum _ecore_status_t rc;
655 if (IS_VF(p_hwfn->p_dev))
656 return ECORE_SUCCESS;
658 /* Learn the PCI configuration */
659 pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
660 PCI_EXT_CAP_ID_SRIOV);
662 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
663 return ECORE_SUCCESS;
666 /* Allocate a new struct for IOV information */
667 /* TODO - can change to VALLOC when its available */
668 p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
669 sizeof(*p_dev->p_iov_info));
670 if (!p_dev->p_iov_info) {
671 DP_NOTICE(p_hwfn, true,
672 "Can't support IOV due to lack of memory\n");
675 p_dev->p_iov_info->pos = pos;
677 rc = ecore_iov_pci_cfg_info(p_dev);
681 /* We want PF IOV to be synonemous with the existence of p_iov_info;
682 * In case the capability is published but there are no VFs, simply
683 * de-allocate the struct.
685 if (!p_dev->p_iov_info->total_vfs) {
686 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
687 "IOV capabilities, but no VFs are published\n");
688 OSAL_FREE(p_dev, p_dev->p_iov_info);
689 return ECORE_SUCCESS;
692 /* First VF index based on offset is tricky:
693 * - If ARI is supported [likely], offset - (16 - pf_id) would
694 * provide the number for eng0. 2nd engine Vfs would begin
695 * after the first engine's VFs.
696 * - If !ARI, VFs would start on next device.
697 * so offset - (256 - pf_id) would provide the number.
698 * Utilize the fact that (256 - pf_id) is achieved only be later
699 * to diffrentiate between the two.
702 if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
703 u32 first = p_hwfn->p_dev->p_iov_info->offset +
704 p_hwfn->abs_pf_id - 16;
706 p_dev->p_iov_info->first_vf_in_pf = first;
708 if (ECORE_PATH_ID(p_hwfn))
709 p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
711 u32 first = p_hwfn->p_dev->p_iov_info->offset +
712 p_hwfn->abs_pf_id - 256;
714 p_dev->p_iov_info->first_vf_in_pf = first;
717 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
718 "First VF in hwfn 0x%08x\n",
719 p_dev->p_iov_info->first_vf_in_pf);
721 return ECORE_SUCCESS;
724 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
725 bool b_fail_malicious)
727 /* Check PF supports sriov */
728 if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
729 !IS_PF_SRIOV_ALLOC(p_hwfn))
732 /* Check VF validity */
733 if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
739 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
741 return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
744 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
745 u16 rel_vf_id, u8 to_disable)
747 struct ecore_vf_info *vf;
750 for_each_hwfn(p_dev, i) {
751 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
753 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
757 vf->to_disable = to_disable;
761 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
766 if (!IS_ECORE_SRIOV(p_dev))
769 for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
770 ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
774 /* @@@TBD Consider taking outside of ecore... */
775 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
779 enum _ecore_status_t rc = ECORE_SUCCESS;
780 struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
782 if (vf != OSAL_NULL) {
784 #ifdef CONFIG_ECORE_SW_CHANNEL
785 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
788 rc = ECORE_UNKNOWN_ERROR;
794 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
795 struct ecore_ptt *p_ptt,
798 ecore_wr(p_hwfn, p_ptt,
799 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
800 1 << (abs_vfid & 0x1f));
803 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
804 struct ecore_ptt *p_ptt,
805 struct ecore_vf_info *vf)
809 /* Set VF masks and configuration - pretend */
810 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
812 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
815 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
817 /* iterate over all queues, clear sb consumer */
818 for (i = 0; i < vf->num_sbs; i++)
819 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
821 vf->opaque_fid, true);
824 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
825 struct ecore_ptt *p_ptt,
826 struct ecore_vf_info *vf, bool enable)
830 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
832 igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
835 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
837 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
839 ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
842 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
845 static enum _ecore_status_t
846 ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
847 struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
849 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
850 enum _ecore_status_t rc;
853 return ECORE_SUCCESS;
855 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
856 "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
857 ECORE_VF_ABS_ID(p_hwfn, vf));
859 ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
860 ECORE_VF_ABS_ID(p_hwfn, vf));
862 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
864 /* It's possible VF was previously considered malicious */
865 vf->b_malicious = false;
867 rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
868 vf->abs_vf_id, vf->num_sbs);
869 if (rc != ECORE_SUCCESS)
872 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
874 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
875 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
877 ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
878 p_hwfn->hw_info.hw_mode);
881 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
890 * @brief ecore_iov_config_perm_table - configure the permission
892 * In E4, queue zone permission table size is 320x9. There
893 * are 320 VF queues for single engine device (256 for dual
894 * engine device), and each entry has the following format:
901 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
902 struct ecore_ptt *p_ptt,
903 struct ecore_vf_info *vf, u8 enable)
909 for (qid = 0; qid < vf->num_rxqs; qid++) {
910 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
913 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
914 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
915 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
919 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
920 struct ecore_ptt *p_ptt,
921 struct ecore_vf_info *vf)
923 /* Reset vf in IGU - interrupts are still disabled */
924 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
926 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
928 /* Permission Table */
929 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
932 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
933 struct ecore_ptt *p_ptt,
934 struct ecore_vf_info *vf,
937 struct ecore_igu_block *igu_blocks;
938 int qid = 0, igu_id = 0;
941 igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
943 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
944 num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
946 p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
948 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
949 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
950 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
952 while ((qid < num_rx_queues) &&
953 (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {
954 if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {
955 struct cau_sb_entry sb_entry;
957 vf->igu_sbs[qid] = (u16)igu_id;
958 igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;
960 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
962 ecore_wr(p_hwfn, p_ptt,
963 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
966 /* Configure igu sb in CAU which were marked valid */
967 ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
970 ecore_dmae_host2grc(p_hwfn, p_ptt,
971 (u64)(osal_uintptr_t)&sb_entry,
972 CAU_REG_SB_VAR_MEMORY +
973 igu_id * sizeof(u64), 2, 0);
979 vf->num_sbs = (u8)num_rx_queues;
986 * @brief The function invalidates all the VF entries,
987 * technically this isn't required, but added for
988 * cleaness and ease of debugging incase a VF attempts to
989 * produce an interrupt after it has been taken down.
995 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
996 struct ecore_ptt *p_ptt,
997 struct ecore_vf_info *vf)
999 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1003 /* Invalidate igu CAM lines and mark them as free */
1004 for (idx = 0; idx < vf->num_sbs; idx++) {
1005 igu_id = vf->igu_sbs[idx];
1006 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
1008 val = ecore_rd(p_hwfn, p_ptt, addr);
1009 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
1010 ecore_wr(p_hwfn, p_ptt, addr, val);
1012 p_info->igu_map.igu_blocks[igu_id].status |=
1013 ECORE_IGU_STATUS_FREE;
1015 p_hwfn->hw_info.p_igu_info->free_blks++;
1021 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
1023 struct ecore_mcp_link_params *params,
1024 struct ecore_mcp_link_state *link,
1025 struct ecore_mcp_link_capabilities *p_caps)
1027 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
1028 struct ecore_bulletin_content *p_bulletin;
1033 p_bulletin = p_vf->bulletin.p_virt;
1034 p_bulletin->req_autoneg = params->speed.autoneg;
1035 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
1036 p_bulletin->req_forced_speed = params->speed.forced_speed;
1037 p_bulletin->req_autoneg_pause = params->pause.autoneg;
1038 p_bulletin->req_forced_rx = params->pause.forced_rx;
1039 p_bulletin->req_forced_tx = params->pause.forced_tx;
1040 p_bulletin->req_loopback = params->loopback_mode;
1042 p_bulletin->link_up = link->link_up;
1043 p_bulletin->speed = link->speed;
1044 p_bulletin->full_duplex = link->full_duplex;
1045 p_bulletin->autoneg = link->an;
1046 p_bulletin->autoneg_complete = link->an_complete;
1047 p_bulletin->parallel_detection = link->parallel_detection;
1048 p_bulletin->pfc_enabled = link->pfc_enabled;
1049 p_bulletin->partner_adv_speed = link->partner_adv_speed;
1050 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
1051 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
1052 p_bulletin->partner_adv_pause = link->partner_adv_pause;
1053 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
1055 p_bulletin->capability_speed = p_caps->speed_capabilities;
1058 enum _ecore_status_t
1059 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
1060 struct ecore_ptt *p_ptt,
1061 struct ecore_iov_vf_init_params *p_params)
1063 struct ecore_mcp_link_capabilities link_caps;
1064 struct ecore_mcp_link_params link_params;
1065 struct ecore_mcp_link_state link_state;
1066 u8 num_of_vf_available_chains = 0;
1067 struct ecore_vf_info *vf = OSAL_NULL;
1069 enum _ecore_status_t rc = ECORE_SUCCESS;
1073 vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1075 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
1076 return ECORE_UNKNOWN_ERROR;
1080 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
1081 p_params->rel_vf_id);
1085 /* Perform sanity checking on the requested vport/rss */
1086 if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
1087 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
1088 p_params->rel_vf_id, p_params->vport_id);
1092 if ((p_params->num_queues > 1) &&
1093 (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
1094 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
1095 p_params->rel_vf_id, p_params->rss_eng_id);
1099 /* TODO - remove this once we get confidence of change */
1100 if (!p_params->vport_id) {
1101 DP_NOTICE(p_hwfn, false,
1102 "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
1103 p_params->rel_vf_id);
1105 if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
1106 DP_NOTICE(p_hwfn, false,
1107 "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
1108 p_params->rel_vf_id);
1110 vf->vport_id = p_params->vport_id;
1111 vf->rss_eng_id = p_params->rss_eng_id;
1113 /* Perform sanity checking on the requested queue_id */
1114 for (i = 0; i < p_params->num_queues; i++) {
1115 u16 min_vf_qzone = (u16)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
1116 u16 max_vf_qzone = min_vf_qzone +
1117 FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) - 1;
1119 qid = p_params->req_rx_queue[i];
1120 if (qid < min_vf_qzone || qid > max_vf_qzone) {
1121 DP_NOTICE(p_hwfn, true,
1122 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
1123 qid, p_params->rel_vf_id,
1124 min_vf_qzone, max_vf_qzone);
1128 qid = p_params->req_tx_queue[i];
1129 if (qid > max_vf_qzone) {
1130 DP_NOTICE(p_hwfn, true,
1131 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
1132 qid, p_params->rel_vf_id, max_vf_qzone);
1136 /* If client *really* wants, Tx qid can be shared with PF */
1137 if (qid < min_vf_qzone)
1138 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1139 "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1140 p_params->rel_vf_id, qid, i);
1143 /* Limit number of queues according to number of CIDs */
1144 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1145 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1146 "VF[%d] - requesting to initialize for 0x%04x queues"
1147 " [0x%04x CIDs available]\n",
1148 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1149 num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
1151 num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
1155 if (num_of_vf_available_chains == 0) {
1156 DP_ERR(p_hwfn, "no available igu sbs\n");
1160 /* Choose queue number and index ranges */
1161 vf->num_rxqs = num_of_vf_available_chains;
1162 vf->num_txqs = num_of_vf_available_chains;
1164 for (i = 0; i < vf->num_rxqs; i++) {
1165 struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
1167 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1168 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1170 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1171 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1172 vf->relative_vf_id, i, vf->igu_sbs[i],
1173 p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1176 /* Update the link configuration in bulletin.
1178 OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
1179 sizeof(link_params));
1180 OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
1181 sizeof(link_state));
1182 OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
1184 ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
1185 &link_params, &link_state, &link_caps);
1187 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1189 if (rc == ECORE_SUCCESS) {
1191 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
1192 (1ULL << (vf->relative_vf_id % 64));
1194 if (IS_LEAD_HWFN(p_hwfn))
1195 p_hwfn->p_dev->p_iov_info->num_vfs++;
1201 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1202 struct ecore_ptt *p_ptt,
1205 struct ecore_mcp_link_capabilities caps;
1206 struct ecore_mcp_link_params params;
1207 struct ecore_mcp_link_state link;
1208 struct ecore_vf_info *vf = OSAL_NULL;
1210 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1212 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1213 return ECORE_UNKNOWN_ERROR;
1216 if (vf->bulletin.p_virt)
1217 OSAL_MEMSET(vf->bulletin.p_virt, 0,
1218 sizeof(*vf->bulletin.p_virt));
1220 OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1222 /* Get the link configuration back in bulletin so
1223 * that when VFs are re-enabled they get the actual
1224 * link configuration.
1226 OSAL_MEMCPY(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
1227 OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
1228 OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
1230 ecore_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1232 /* Forget the VF's acquisition message */
1233 OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
1235 /* disablng interrupts and resetting permission table was done during
1236 * vf-close, however, we could get here without going through vf_close
1238 /* Disable Interrupts for VF */
1239 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1241 /* Reset Permission table */
1242 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1246 ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1250 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
1251 ~(1ULL << (vf->relative_vf_id / 64));
1253 if (IS_LEAD_HWFN(p_hwfn))
1254 p_hwfn->p_dev->p_iov_info->num_vfs--;
1257 return ECORE_SUCCESS;
1260 static bool ecore_iov_tlv_supported(u16 tlvtype)
1262 return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
1265 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1266 struct ecore_vf_info *vf, u16 tlv)
1268 /* lock the channel */
1269 /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1271 /* record the locking op */
1272 /* vf->op_current = tlv; @@@TBD MichalK */
1275 if (ecore_iov_tlv_supported(tlv))
1278 "VF[%d]: vf pf channel locked by %s\n",
1280 ecore_channel_tlvs_string[tlv]);
1284 "VF[%d]: vf pf channel locked by %04x\n",
1285 vf->abs_vf_id, tlv);
1288 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1289 struct ecore_vf_info *vf,
1292 /* log the unlock */
1293 if (ecore_iov_tlv_supported(expected_tlv))
1296 "VF[%d]: vf pf channel unlocked by %s\n",
1298 ecore_channel_tlvs_string[expected_tlv]);
1302 "VF[%d]: vf pf channel unlocked by %04x\n",
1303 vf->abs_vf_id, expected_tlv);
1305 /* record the locking op */
1306 /* vf->op_current = CHANNEL_TLV_NONE; */
1309 /* place a given tlv on the tlv buffer, continuing current tlv list */
1310 void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
1311 u8 **offset, u16 type, u16 length)
1313 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1316 tl->length = length;
1318 /* Offset should keep pointing to next TLV (the end of the last) */
1321 /* Return a pointer to the start of the added tlv */
1322 return *offset - length;
1325 /* list the types and lengths of the tlvs on the buffer */
1326 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
1328 u16 i = 1, total_length = 0;
1329 struct channel_tlv *tlv;
1332 /* cast current tlv list entry to channel tlv header */
1333 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1336 if (ecore_iov_tlv_supported(tlv->type))
1337 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1338 "TLV number %d: type %s, length %d\n",
1339 i, ecore_channel_tlvs_string[tlv->type],
1342 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1343 "TLV number %d: type %d, length %d\n",
1344 i, tlv->type, tlv->length);
1346 if (tlv->type == CHANNEL_TLV_LIST_END)
1349 /* Validate entry - protect against malicious VFs */
1351 DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
1354 total_length += tlv->length;
1355 if (total_length >= sizeof(struct tlv_buffer_size)) {
1356 DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
1364 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
1365 struct ecore_ptt *p_ptt,
1366 struct ecore_vf_info *p_vf,
1367 u16 length, u8 status)
1369 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1370 struct ecore_dmae_params params;
1373 mbx->reply_virt->default_resp.hdr.status = status;
1375 ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1377 #ifdef CONFIG_ECORE_SW_CHANNEL
1378 mbx->sw_mbx.response_size =
1379 length + sizeof(struct channel_list_end_tlv);
1381 if (!p_hwfn->p_dev->b_hw_channel)
1385 eng_vf_id = p_vf->abs_vf_id;
1387 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
1388 params.flags = ECORE_DMAE_FLAG_VF_DST;
1389 params.dst_vfid = eng_vf_id;
1391 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1392 mbx->req_virt->first_tlv.reply_address +
1394 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1397 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1398 mbx->req_virt->first_tlv.reply_address,
1399 sizeof(u64) / 4, ¶ms);
1402 GTT_BAR0_MAP_REG_USDM_RAM +
1403 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1406 static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
1407 enum ecore_iov_vport_update_flag flag)
1410 case ECORE_IOV_VP_UPDATE_ACTIVATE:
1411 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1412 case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1413 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1414 case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1415 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1416 case ECORE_IOV_VP_UPDATE_MCAST:
1417 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1418 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1419 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1420 case ECORE_IOV_VP_UPDATE_RSS:
1421 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1422 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1423 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1424 case ECORE_IOV_VP_UPDATE_SGE_TPA:
1425 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1431 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1432 struct ecore_vf_info *p_vf,
1433 struct ecore_iov_vf_mbx *p_mbx,
1434 u8 status, u16 tlvs_mask,
1437 struct pfvf_def_resp_tlv *resp;
1438 u16 size, total_len, i;
1440 OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1441 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1442 size = sizeof(struct pfvf_def_resp_tlv);
1445 ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1447 /* Prepare response for all extended tlvs if they are found by PF */
1448 for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1449 if (!(tlvs_mask & (1 << i)))
1452 resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
1453 ecore_iov_vport_to_tlv(p_hwfn, i), size);
1455 if (tlvs_accepted & (1 << i))
1456 resp->hdr.status = status;
1458 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1460 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1461 "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1462 p_vf->relative_vf_id,
1463 ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1468 ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1469 sizeof(struct channel_list_end_tlv));
1474 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1475 struct ecore_ptt *p_ptt,
1476 struct ecore_vf_info *vf_info,
1477 u16 type, u16 length, u8 status)
1479 struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1481 mbx->offset = (u8 *)mbx->reply_virt;
1483 ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
1484 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1485 sizeof(struct channel_list_end_tlv));
1487 ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1489 OSAL_IOV_PF_RESP_TYPE(p_hwfn, vf_info->relative_vf_id, status);
1492 struct ecore_public_vf_info
1493 *ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
1495 bool b_enabled_only)
1497 struct ecore_vf_info *vf = OSAL_NULL;
1499 vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1503 return &vf->p_vf_info;
1506 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1507 struct ecore_vf_info *p_vf)
1510 p_vf->vf_bulletin = 0;
1511 p_vf->vport_instance = 0;
1512 p_vf->configured_features = 0;
1514 /* If VF previously requested less resources, go back to default */
1515 p_vf->num_rxqs = p_vf->num_sbs;
1516 p_vf->num_txqs = p_vf->num_sbs;
1518 p_vf->num_active_rxqs = 0;
1520 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1521 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
1523 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1524 if (!p_queue->cids[j].p_cid)
1527 ecore_eth_queue_cid_release(p_hwfn,
1528 p_queue->cids[j].p_cid);
1529 p_queue->cids[j].p_cid = OSAL_NULL;
1533 OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1534 OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1535 OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1538 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
1539 struct ecore_ptt *p_ptt,
1540 struct ecore_vf_info *p_vf,
1541 struct vf_pf_resc_request *p_req,
1542 struct pf_vf_resc *p_resp)
1546 /* Queue related information */
1547 p_resp->num_rxqs = p_vf->num_rxqs;
1548 p_resp->num_txqs = p_vf->num_txqs;
1549 p_resp->num_sbs = p_vf->num_sbs;
1551 for (i = 0; i < p_resp->num_sbs; i++) {
1552 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1553 /* TODO - what's this sb_qid field? Is it deprecated?
1554 * or is there an ecore_client that looks at this?
1556 p_resp->hw_sbs[i].sb_qid = 0;
1559 /* These fields are filled for backward compatibility.
1560 * Unused by modern vfs.
1562 for (i = 0; i < p_resp->num_rxqs; i++) {
1563 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1564 (u16 *)&p_resp->hw_qid[i]);
1568 /* Filter related information */
1569 p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
1570 p_req->num_mac_filters);
1571 p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
1572 p_req->num_vlan_filters);
1574 /* This isn't really needed/enforced, but some legacy VFs might depend
1575 * on the correct filling of this field.
1577 p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
1579 /* Validate sufficient resources for VF */
1580 if (p_resp->num_rxqs < p_req->num_rxqs ||
1581 p_resp->num_txqs < p_req->num_txqs ||
1582 p_resp->num_sbs < p_req->num_sbs ||
1583 p_resp->num_mac_filters < p_req->num_mac_filters ||
1584 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1585 p_resp->num_mc_filters < p_req->num_mc_filters) {
1586 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1587 "VF[%d] - Insufficient resources: rxq [%02x/%02x]"
1588 " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
1589 " vlan [%02x/%02x] mc [%02x/%02x]\n",
1591 p_req->num_rxqs, p_resp->num_rxqs,
1592 p_req->num_rxqs, p_resp->num_txqs,
1593 p_req->num_sbs, p_resp->num_sbs,
1594 p_req->num_mac_filters, p_resp->num_mac_filters,
1595 p_req->num_vlan_filters, p_resp->num_vlan_filters,
1596 p_req->num_mc_filters, p_resp->num_mc_filters);
1598 /* Some legacy OSes are incapable of correctly handling this
1601 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1602 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1603 (p_vf->acquire.vfdev_info.os_type ==
1604 VFPF_ACQUIRE_OS_WINDOWS))
1605 return PFVF_STATUS_SUCCESS;
1607 return PFVF_STATUS_NO_RESOURCE;
1610 return PFVF_STATUS_SUCCESS;
1613 static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn *p_hwfn,
1614 struct pfvf_stats_info *p_stats)
1616 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1617 OFFSETOF(struct mstorm_vf_zone,
1618 non_trigger.eth_queue_stat);
1619 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1620 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1621 OFFSETOF(struct ustorm_vf_zone,
1622 non_trigger.eth_queue_stat);
1623 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1624 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1625 OFFSETOF(struct pstorm_vf_zone,
1626 non_trigger.eth_queue_stat);
1627 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1628 p_stats->tstats.address = 0;
1629 p_stats->tstats.len = 0;
1632 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
1633 struct ecore_ptt *p_ptt,
1634 struct ecore_vf_info *vf)
1636 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1637 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1638 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1639 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1640 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1641 struct pf_vf_resc *resc = &resp->resc;
1642 enum _ecore_status_t rc;
1644 OSAL_MEMSET(resp, 0, sizeof(*resp));
1646 /* Write the PF version so that VF would know which version
1647 * is supported - might be later overridden. This guarantees that
1648 * VF could recognize legacy PF based on lack of versions in reply.
1650 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1651 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1653 /* TODO - not doing anything is bad since we'll assert, but this isn't
1654 * necessarily the right behavior - perhaps we should have allowed some
1657 if (vf->state != VF_FREE &&
1658 vf->state != VF_STOPPED) {
1659 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1660 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1661 vf->abs_vf_id, vf->state);
1665 /* Validate FW compatibility */
1666 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1667 if (req->vfdev_info.capabilities &
1668 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1669 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1671 /* This legacy support would need to be removed once
1672 * the major has changed.
1674 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
1676 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1677 "VF[%d] is pre-fastpath HSI\n",
1679 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1680 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1683 "VF[%d] needs fastpath HSI %02x.%02x, which is"
1684 " incompatible with loaded FW's faspath"
1687 req->vfdev_info.eth_fp_hsi_major,
1688 req->vfdev_info.eth_fp_hsi_minor,
1689 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1695 /* On 100g PFs, prevent old VFs from loading */
1696 if ((p_hwfn->p_dev->num_hwfns > 1) &&
1697 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1699 "VF[%d] is running an old driver that doesn't support"
1705 #ifndef __EXTRACT__LINUX__
1706 if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1707 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1712 /* Store the acquire message */
1713 OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
1715 vf->opaque_fid = req->vfdev_info.opaque_fid;
1717 vf->vf_bulletin = req->bulletin_addr;
1718 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1719 vf->bulletin.size : req->bulletin_size;
1721 /* fill in pfdev info */
1722 pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1723 pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
1724 pfdev_info->indices_per_sb = PIS_PER_SB;
1726 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1727 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1728 if (p_hwfn->p_dev->num_hwfns > 1)
1729 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1731 ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1733 OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1736 pfdev_info->fw_major = FW_MAJOR_VERSION;
1737 pfdev_info->fw_minor = FW_MINOR_VERSION;
1738 pfdev_info->fw_rev = FW_REVISION_VERSION;
1739 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1741 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1744 pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
1745 req->vfdev_info.eth_fp_hsi_minor);
1746 pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1747 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
1750 pfdev_info->dev_type = p_hwfn->p_dev->type;
1751 pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1753 /* Fill resources available to VF; Make sure there are enough to
1754 * satisfy the VF's request.
1756 vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1757 &req->resc_request, resc);
1758 if (vfpf_status != PFVF_STATUS_SUCCESS)
1761 /* Start the VF in FW */
1762 rc = ecore_sp_vf_start(p_hwfn, vf);
1763 if (rc != ECORE_SUCCESS) {
1764 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
1766 vfpf_status = PFVF_STATUS_FAILURE;
1770 /* Fill agreed size of bulletin board in response, and post
1771 * an initial image to the bulletin board.
1773 resp->bulletin_size = vf->bulletin.size;
1774 ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1776 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1777 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1778 " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1779 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1781 vf->abs_vf_id, resp->pfdev_info.chip_num,
1782 resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1783 (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
1784 resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1785 resc->num_vlan_filters);
1787 vf->state = VF_ACQUIRED;
1790 /* Prepare Response */
1791 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1792 sizeof(struct pfvf_acquire_resp_tlv),
1796 static enum _ecore_status_t
1797 __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1798 struct ecore_vf_info *p_vf, bool val)
1800 struct ecore_sp_vport_update_params params;
1801 enum _ecore_status_t rc;
1803 if (val == p_vf->spoof_chk) {
1804 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1805 "Spoofchk value[%d] is already configured\n", val);
1806 return ECORE_SUCCESS;
1809 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1810 params.opaque_fid = p_vf->opaque_fid;
1811 params.vport_id = p_vf->vport_id;
1812 params.update_anti_spoofing_en_flg = 1;
1813 params.anti_spoofing_en = val;
1815 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
1817 if (rc == ECORE_SUCCESS) {
1818 p_vf->spoof_chk = val;
1819 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1820 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1821 "Spoofchk val[%d] configured\n", val);
1823 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1824 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1825 val, p_vf->relative_vf_id);
1831 static enum _ecore_status_t
1832 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1833 struct ecore_vf_info *p_vf)
1835 struct ecore_filter_ucast filter;
1836 enum _ecore_status_t rc = ECORE_SUCCESS;
1839 OSAL_MEMSET(&filter, 0, sizeof(filter));
1840 filter.is_rx_filter = 1;
1841 filter.is_tx_filter = 1;
1842 filter.vport_to_add_to = p_vf->vport_id;
1843 filter.opcode = ECORE_FILTER_ADD;
1845 /* Reconfigure vlans */
1846 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1847 if (!p_vf->shadow_config.vlans[i].used)
1850 filter.type = ECORE_FILTER_VLAN;
1851 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1852 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1853 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1854 filter.vlan, p_vf->relative_vf_id);
1855 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1856 &filter, ECORE_SPQ_MODE_CB,
1859 DP_NOTICE(p_hwfn, true,
1860 "Failed to configure VLAN [%04x]"
1862 filter.vlan, p_vf->relative_vf_id);
1870 static enum _ecore_status_t
1871 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
1872 struct ecore_vf_info *p_vf, u64 events)
1874 enum _ecore_status_t rc = ECORE_SUCCESS;
1876 /*TODO - what about MACs? */
1878 if ((events & (1 << VLAN_ADDR_FORCED)) &&
1879 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1880 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1885 static enum _ecore_status_t
1886 ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
1887 struct ecore_vf_info *p_vf,
1890 enum _ecore_status_t rc = ECORE_SUCCESS;
1891 struct ecore_filter_ucast filter;
1893 if (!p_vf->vport_instance)
1896 if (events & (1 << MAC_ADDR_FORCED)) {
1897 /* Since there's no way [currently] of removing the MAC,
1898 * we can always assume this means we need to force it.
1900 OSAL_MEMSET(&filter, 0, sizeof(filter));
1901 filter.type = ECORE_FILTER_MAC;
1902 filter.opcode = ECORE_FILTER_REPLACE;
1903 filter.is_rx_filter = 1;
1904 filter.is_tx_filter = 1;
1905 filter.vport_to_add_to = p_vf->vport_id;
1906 OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
1908 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1910 ECORE_SPQ_MODE_CB, OSAL_NULL);
1912 DP_NOTICE(p_hwfn, true,
1913 "PF failed to configure MAC for VF\n");
1917 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1920 if (events & (1 << VLAN_ADDR_FORCED)) {
1921 struct ecore_sp_vport_update_params vport_update;
1925 OSAL_MEMSET(&filter, 0, sizeof(filter));
1926 filter.type = ECORE_FILTER_VLAN;
1927 filter.is_rx_filter = 1;
1928 filter.is_tx_filter = 1;
1929 filter.vport_to_add_to = p_vf->vport_id;
1930 filter.vlan = p_vf->bulletin.p_virt->pvid;
1931 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
1934 /* Send the ramrod */
1935 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1937 ECORE_SPQ_MODE_CB, OSAL_NULL);
1939 DP_NOTICE(p_hwfn, true,
1940 "PF failed to configure VLAN for VF\n");
1944 /* Update the default-vlan & silent vlan stripping */
1945 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
1946 vport_update.opaque_fid = p_vf->opaque_fid;
1947 vport_update.vport_id = p_vf->vport_id;
1948 vport_update.update_default_vlan_enable_flg = 1;
1949 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1950 vport_update.update_default_vlan_flg = 1;
1951 vport_update.default_vlan = filter.vlan;
1953 vport_update.update_inner_vlan_removal_flg = 1;
1954 removal = filter.vlan ?
1955 1 : p_vf->shadow_config.inner_vlan_removal;
1956 vport_update.inner_vlan_removal_flg = removal;
1957 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1958 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
1959 ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
1961 DP_NOTICE(p_hwfn, true,
1962 "PF failed to configure VF vport for vlan\n");
1966 /* Update all the Rx queues */
1967 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1968 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
1969 struct ecore_queue_cid *p_cid = OSAL_NULL;
1971 /* There can be at most 1 Rx queue on qzone. Find it */
1972 p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, p_vf,
1974 if (p_cid == OSAL_NULL)
1977 rc = ecore_sp_eth_rx_queues_update(p_hwfn,
1980 ECORE_SPQ_MODE_EBLOCK,
1983 DP_NOTICE(p_hwfn, true,
1984 "Failed to send Rx update"
1985 " fo queue[0x%04x]\n",
1986 p_cid->rel.queue_id);
1992 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1994 p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
1997 /* If forced features are terminated, we need to configure the shadow
1998 * configuration back again.
2001 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
2006 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
2007 struct ecore_ptt *p_ptt,
2008 struct ecore_vf_info *vf)
2010 struct ecore_sp_vport_start_params params = { 0 };
2011 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2012 struct vfpf_vport_start_tlv *start;
2013 u8 status = PFVF_STATUS_SUCCESS;
2014 struct ecore_vf_info *vf_info;
2017 enum _ecore_status_t rc;
2019 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
2021 DP_NOTICE(p_hwfn->p_dev, true,
2022 "Failed to get VF info, invalid vfid [%d]\n",
2023 vf->relative_vf_id);
2027 vf->state = VF_ENABLED;
2028 start = &mbx->req_virt->start_vport;
2030 ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
2032 /* Initialize Status block in CAU */
2033 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
2034 if (!start->sb_addr[sb_id]) {
2035 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2036 "VF[%d] did not fill the address of SB %d\n",
2037 vf->relative_vf_id, sb_id);
2041 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
2042 start->sb_addr[sb_id],
2047 vf->mtu = start->mtu;
2048 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
2050 /* Take into consideration configuration forced by hypervisor;
2051 * If none is configured, use the supplied VF values [for old
2052 * vfs that would still be fine, since they passed '0' as padding].
2054 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
2055 if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
2056 u8 vf_req = start->only_untagged;
2058 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
2059 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
2062 params.tpa_mode = start->tpa_mode;
2063 params.remove_inner_vlan = start->inner_vlan_removal;
2064 params.tx_switching = true;
2067 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2068 DP_NOTICE(p_hwfn, false,
2069 "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
2070 params.tx_switching = false;
2074 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
2075 params.drop_ttl0 = false;
2076 params.concrete_fid = vf->concrete_fid;
2077 params.opaque_fid = vf->opaque_fid;
2078 params.vport_id = vf->vport_id;
2079 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
2080 params.mtu = vf->mtu;
2081 params.check_mac = true;
2083 rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms);
2084 if (rc != ECORE_SUCCESS) {
2086 "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
2087 status = PFVF_STATUS_FAILURE;
2089 vf->vport_instance++;
2091 /* Force configuration if needed on the newly opened vport */
2092 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
2093 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
2094 vf->vport_id, vf->opaque_fid);
2095 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
2098 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
2099 sizeof(struct pfvf_def_resp_tlv), status);
2102 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
2103 struct ecore_ptt *p_ptt,
2104 struct ecore_vf_info *vf)
2106 u8 status = PFVF_STATUS_SUCCESS;
2107 enum _ecore_status_t rc;
2109 vf->vport_instance--;
2110 vf->spoof_chk = false;
2112 if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) ||
2113 (ecore_iov_validate_active_txq(p_hwfn, vf))) {
2114 vf->b_malicious = true;
2115 DP_NOTICE(p_hwfn, false,
2116 "VF [%02x] - considered malicious;"
2117 " Unable to stop RX/TX queuess\n",
2121 rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2122 if (rc != ECORE_SUCCESS) {
2124 "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
2125 status = PFVF_STATUS_FAILURE;
2128 /* Forget the configuration on the vport */
2129 vf->configured_features = 0;
2130 OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2132 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2133 sizeof(struct pfvf_def_resp_tlv), status);
2136 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
2137 struct ecore_ptt *p_ptt,
2138 struct ecore_vf_info *vf,
2139 u8 status, bool b_legacy)
2141 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2142 struct pfvf_start_queue_resp_tlv *p_tlv;
2143 struct vfpf_start_rxq_tlv *req;
2146 mbx->offset = (u8 *)mbx->reply_virt;
2148 /* Taking a bigger struct instead of adding a TLV to list was a
2149 * mistake, but one which we're now stuck with, as some older
2150 * clients assume the size of the previous response.
2153 length = sizeof(*p_tlv);
2155 length = sizeof(struct pfvf_def_resp_tlv);
2157 p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
2159 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2160 sizeof(struct channel_list_end_tlv));
2162 /* Update the TLV with the response */
2163 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2164 req = &mbx->req_virt->start_rxq;
2165 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2166 OFFSETOF(struct mstorm_vf_zone,
2167 non_trigger.eth_rx_queue_producers) +
2168 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2171 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2174 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
2175 struct ecore_ptt *p_ptt,
2176 struct ecore_vf_info *vf)
2178 struct ecore_queue_start_common_params params;
2179 struct ecore_queue_cid_vf_params vf_params;
2180 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2181 u8 status = PFVF_STATUS_NO_RESOURCE;
2182 struct ecore_vf_queue *p_queue;
2183 struct vfpf_start_rxq_tlv *req;
2184 struct ecore_queue_cid *p_cid;
2185 bool b_legacy_vf = false;
2187 enum _ecore_status_t rc;
2189 req = &mbx->req_virt->start_rxq;
2191 if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2192 ECORE_IOV_VALIDATE_Q_DISABLE) ||
2193 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2196 /* Legacy VFs made assumptions on the CID their queues connected to,
2197 * assuming queue X used CID X.
2198 * TODO - need to validate that there was no official release post
2199 * the current legacy scheme that still made that assumption.
2201 if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2202 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2205 /* Acquire a new queue-cid */
2206 p_queue = &vf->vf_queues[req->rx_qid];
2208 OSAL_MEMSET(¶ms, 0, sizeof(params));
2209 params.queue_id = (u8)p_queue->fw_rx_qid;
2210 params.vport_id = vf->vport_id;
2211 params.stats_id = vf->abs_vf_id + 0x10;
2212 params.sb = req->hw_sb;
2213 params.sb_idx = req->sb_index;
2215 /* TODO - set qid_usage_idx according to extended TLV. For now, use
2220 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2221 vf_params.vfid = vf->relative_vf_id;
2222 vf_params.vf_qid = (u8)req->rx_qid;
2223 vf_params.b_legacy = b_legacy_vf;
2224 vf_params.qid_usage_idx = qid_usage_idx;
2226 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2227 ¶ms, &vf_params);
2228 if (p_cid == OSAL_NULL)
2231 /* Legacy VFs have their Producers in a different location, which they
2232 * calculate on their own and clean the producer prior to this.
2236 GTT_BAR0_MAP_REG_MSDM_RAM +
2237 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2240 rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
2245 if (rc != ECORE_SUCCESS) {
2246 status = PFVF_STATUS_FAILURE;
2247 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2249 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2250 p_queue->cids[qid_usage_idx].b_is_tx = false;
2251 status = PFVF_STATUS_SUCCESS;
2252 vf->num_active_rxqs++;
2256 ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2261 ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2262 struct ecore_tunnel_info *p_tun,
2263 u16 tunn_feature_mask)
2265 p_resp->tunn_feature_mask = tunn_feature_mask;
2266 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2267 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2268 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2269 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2270 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2271 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2272 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2273 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2274 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2275 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2276 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2277 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2281 __ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2282 struct ecore_tunn_update_type *p_tun,
2283 enum ecore_tunn_mode mask, u8 tun_cls)
2285 if (p_req->tun_mode_update_mask & (1 << mask)) {
2286 p_tun->b_update_mode = true;
2288 if (p_req->tunn_mode & (1 << mask))
2289 p_tun->b_mode_enabled = true;
2292 p_tun->tun_cls = tun_cls;
2296 ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2297 struct ecore_tunn_update_type *p_tun,
2298 struct ecore_tunn_update_udp_port *p_port,
2299 enum ecore_tunn_mode mask,
2300 u8 tun_cls, u8 update_port, u16 port)
2303 p_port->b_update_port = true;
2304 p_port->port = port;
2307 __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2311 ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2313 bool b_update_requested = false;
2315 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2316 p_req->update_geneve_port || p_req->update_vxlan_port)
2317 b_update_requested = true;
2319 return b_update_requested;
2322 static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
2323 struct ecore_ptt *p_ptt,
2324 struct ecore_vf_info *p_vf)
2326 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
2327 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2328 struct pfvf_update_tunn_param_tlv *p_resp;
2329 struct vfpf_update_tunn_param_tlv *p_req;
2330 enum _ecore_status_t rc = ECORE_SUCCESS;
2331 u8 status = PFVF_STATUS_SUCCESS;
2332 bool b_update_required = false;
2333 struct ecore_tunnel_info tunn;
2334 u16 tunn_feature_mask = 0;
2337 mbx->offset = (u8 *)mbx->reply_virt;
2339 OSAL_MEM_ZERO(&tunn, sizeof(tunn));
2340 p_req = &mbx->req_virt->tunn_param_update;
2342 if (!ecore_iov_pf_validate_tunn_param(p_req)) {
2343 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2344 "No tunnel update requested by VF\n");
2345 status = PFVF_STATUS_FAILURE;
2349 tunn.b_update_rx_cls = p_req->update_tun_cls;
2350 tunn.b_update_tx_cls = p_req->update_tun_cls;
2352 ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2353 ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2354 p_req->update_vxlan_port,
2356 ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2357 ECORE_MODE_L2GENEVE_TUNN,
2358 p_req->l2geneve_clss,
2359 p_req->update_geneve_port,
2360 p_req->geneve_port);
2361 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2362 ECORE_MODE_IPGENEVE_TUNN,
2363 p_req->ipgeneve_clss);
2364 __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2365 ECORE_MODE_L2GRE_TUNN,
2367 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2368 ECORE_MODE_IPGRE_TUNN,
2371 /* If PF modifies VF's req then it should
2372 * still return an error in case of partial configuration
2373 * or modified configuration as opposed to requested one.
2375 rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
2376 &b_update_required, &tunn);
2378 if (rc != ECORE_SUCCESS)
2379 status = PFVF_STATUS_FAILURE;
2381 /* If ECORE client is willing to update anything ? */
2382 if (b_update_required) {
2385 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
2386 ECORE_SPQ_MODE_EBLOCK,
2388 if (rc != ECORE_SUCCESS)
2389 status = PFVF_STATUS_FAILURE;
2391 geneve_port = p_tun->geneve_port.port;
2392 ecore_for_each_vf(p_hwfn, i) {
2393 ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
2394 p_tun->vxlan_port.port,
2400 p_resp = ecore_add_tlv(p_hwfn, &mbx->offset,
2401 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2403 ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2404 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2405 sizeof(struct channel_list_end_tlv));
2407 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2410 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
2411 struct ecore_ptt *p_ptt,
2412 struct ecore_vf_info *p_vf,
2416 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2417 struct pfvf_start_queue_resp_tlv *p_tlv;
2418 bool b_legacy = false;
2421 mbx->offset = (u8 *)mbx->reply_virt;
2423 /* Taking a bigger struct instead of adding a TLV to list was a
2424 * mistake, but one which we're now stuck with, as some older
2425 * clients assume the size of the previous response.
2427 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2428 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2432 length = sizeof(*p_tlv);
2434 length = sizeof(struct pfvf_def_resp_tlv);
2436 p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
2438 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2439 sizeof(struct channel_list_end_tlv));
2441 /* Update the TLV with the response */
2442 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2443 p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
2445 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2448 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
2449 struct ecore_ptt *p_ptt,
2450 struct ecore_vf_info *vf)
2452 struct ecore_queue_start_common_params params;
2453 struct ecore_queue_cid_vf_params vf_params;
2454 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2455 u8 status = PFVF_STATUS_NO_RESOURCE;
2456 struct ecore_vf_queue *p_queue;
2457 struct vfpf_start_txq_tlv *req;
2458 struct ecore_queue_cid *p_cid;
2459 bool b_legacy_vf = false;
2462 enum _ecore_status_t rc;
2465 OSAL_MEMSET(¶ms, 0, sizeof(params));
2466 req = &mbx->req_virt->start_txq;
2468 if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2469 ECORE_IOV_VALIDATE_Q_NA) ||
2470 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2473 /* In case this is a legacy VF - need to know to use the right cids.
2474 * TODO - need to validate that there was no official release post
2475 * the current legacy scheme that still made that assumption.
2477 if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2478 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2481 /* Acquire a new queue-cid */
2482 p_queue = &vf->vf_queues[req->tx_qid];
2484 params.queue_id = p_queue->fw_tx_qid;
2485 params.vport_id = vf->vport_id;
2486 params.stats_id = vf->abs_vf_id + 0x10;
2487 params.sb = req->hw_sb;
2488 params.sb_idx = req->sb_index;
2490 /* TODO - set qid_usage_idx according to extended TLV. For now, use
2495 if (p_queue->cids[qid_usage_idx].p_cid)
2498 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2499 vf_params.vfid = vf->relative_vf_id;
2500 vf_params.vf_qid = (u8)req->tx_qid;
2501 vf_params.b_legacy = b_legacy_vf;
2502 vf_params.qid_usage_idx = qid_usage_idx;
2504 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2505 ¶ms, &vf_params);
2506 if (p_cid == OSAL_NULL)
2509 pq = ecore_get_cm_pq_idx_vf(p_hwfn,
2510 vf->relative_vf_id);
2511 rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
2512 req->pbl_addr, req->pbl_size, pq);
2513 if (rc != ECORE_SUCCESS) {
2514 status = PFVF_STATUS_FAILURE;
2515 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2517 status = PFVF_STATUS_SUCCESS;
2518 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2519 p_queue->cids[qid_usage_idx].b_is_tx = true;
2524 ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
2528 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
2529 struct ecore_vf_info *vf,
2532 bool cqe_completion)
2534 enum _ecore_status_t rc = ECORE_SUCCESS;
2537 /* TODO - improve validation [wrap around] */
2538 if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
2541 for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
2542 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
2543 struct ecore_queue_cid **pp_cid = OSAL_NULL;
2545 /* There can be at most a single Rx per qzone. Find it */
2546 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
2547 if (p_queue->cids[i].p_cid &&
2548 !p_queue->cids[i].b_is_tx) {
2549 pp_cid = &p_queue->cids[i].p_cid;
2553 if (pp_cid == OSAL_NULL) {
2554 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2555 "Ignoring VF[%02x] request of closing Rx queue %04x - closed\n",
2556 vf->relative_vf_id, qid);
2560 rc = ecore_eth_rx_queue_stop(p_hwfn, *pp_cid,
2561 false, cqe_completion);
2562 if (rc != ECORE_SUCCESS)
2565 *pp_cid = OSAL_NULL;
2566 vf->num_active_rxqs--;
2572 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
2573 struct ecore_vf_info *vf,
2574 u16 txq_id, u8 num_txqs)
2576 enum _ecore_status_t rc = ECORE_SUCCESS;
2577 struct ecore_vf_queue *p_queue;
2580 if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
2581 ECORE_IOV_VALIDATE_Q_NA) ||
2582 !ecore_iov_validate_txq(p_hwfn, vf, txq_id + num_txqs,
2583 ECORE_IOV_VALIDATE_Q_NA))
2586 for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
2587 p_queue = &vf->vf_queues[qid];
2588 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
2589 if (p_queue->cids[j].p_cid == OSAL_NULL)
2592 if (!p_queue->cids[j].b_is_tx)
2595 rc = ecore_eth_tx_queue_stop(p_hwfn,
2596 p_queue->cids[j].p_cid);
2597 if (rc != ECORE_SUCCESS)
2600 p_queue->cids[j].p_cid = OSAL_NULL;
2607 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
2608 struct ecore_ptt *p_ptt,
2609 struct ecore_vf_info *vf)
2611 u16 length = sizeof(struct pfvf_def_resp_tlv);
2612 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2613 u8 status = PFVF_STATUS_SUCCESS;
2614 struct vfpf_stop_rxqs_tlv *req;
2615 enum _ecore_status_t rc;
2617 /* We give the option of starting from qid != 0, in this case we
2618 * need to make sure that qid + num_qs doesn't exceed the actual
2619 * amount of queues that exist.
2621 req = &mbx->req_virt->stop_rxqs;
2622 rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2623 req->num_rxqs, req->cqe_completion);
2625 status = PFVF_STATUS_FAILURE;
2627 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2631 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
2632 struct ecore_ptt *p_ptt,
2633 struct ecore_vf_info *vf)
2635 u16 length = sizeof(struct pfvf_def_resp_tlv);
2636 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2637 u8 status = PFVF_STATUS_SUCCESS;
2638 struct vfpf_stop_txqs_tlv *req;
2639 enum _ecore_status_t rc;
2641 /* We give the option of starting from qid != 0, in this case we
2642 * need to make sure that qid + num_qs doesn't exceed the actual
2643 * amount of queues that exist.
2645 req = &mbx->req_virt->stop_txqs;
2646 rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
2648 status = PFVF_STATUS_FAILURE;
2650 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2654 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
2655 struct ecore_ptt *p_ptt,
2656 struct ecore_vf_info *vf)
2658 struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
2659 u16 length = sizeof(struct pfvf_def_resp_tlv);
2660 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2661 struct vfpf_update_rxq_tlv *req;
2662 u8 status = PFVF_STATUS_FAILURE;
2663 u8 complete_event_flg;
2664 u8 complete_cqe_flg;
2665 enum _ecore_status_t rc;
2668 req = &mbx->req_virt->update_rxq;
2669 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2670 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2672 /* Validate inputs */
2673 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2674 if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
2675 ECORE_IOV_VALIDATE_Q_ENABLE)) {
2676 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2677 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2678 vf->relative_vf_id, req->rx_qid,
2684 for (i = 0; i < req->num_rxqs; i++) {
2685 struct ecore_vf_queue *p_queue;
2686 u16 qid = req->rx_qid + i;
2688 p_queue = &vf->vf_queues[qid];
2689 handlers[i] = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
2693 rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2697 ECORE_SPQ_MODE_EBLOCK,
2702 status = PFVF_STATUS_SUCCESS;
2704 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2708 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
2709 void *p_tlvs_list, u16 req_type)
2711 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2715 if (!p_tlv->length) {
2716 DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
2720 if (p_tlv->type == req_type) {
2721 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2722 "Extended tlv type %s, length %d found\n",
2723 ecore_channel_tlvs_string[p_tlv->type],
2728 len += p_tlv->length;
2729 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2731 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2732 DP_NOTICE(p_hwfn, true,
2733 "TLVs has overrun the buffer size\n");
2736 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2742 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
2743 struct ecore_sp_vport_update_params *p_data,
2744 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2746 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2747 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2749 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2750 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2754 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2755 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2756 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2757 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2758 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
2762 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
2763 struct ecore_sp_vport_update_params *p_data,
2764 struct ecore_vf_info *p_vf,
2765 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2767 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2768 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2770 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2771 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2775 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2777 /* Ignore the VF request if we're forcing a vlan */
2778 if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
2779 p_data->update_inner_vlan_removal_flg = 1;
2780 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2783 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
2787 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
2788 struct ecore_sp_vport_update_params *p_data,
2789 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2791 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2792 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2794 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2795 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2796 if (!p_tx_switch_tlv)
2800 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2801 DP_NOTICE(p_hwfn, false,
2802 "FPGA: Ignore tx-switching configuration originating"
2808 p_data->update_tx_switching_flg = 1;
2809 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2810 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
2814 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
2815 struct ecore_sp_vport_update_params *p_data,
2816 struct ecore_iov_vf_mbx *p_mbx,
2819 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2820 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2822 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2823 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2827 p_data->update_approx_mcast_flg = 1;
2828 OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
2829 sizeof(unsigned long) *
2830 ETH_MULTICAST_MAC_BINS_IN_REGS);
2831 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
2835 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
2836 struct ecore_sp_vport_update_params *p_data,
2837 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2839 struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
2840 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2841 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2843 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2844 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2848 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2849 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2850 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2851 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2852 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
2856 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
2857 struct ecore_sp_vport_update_params *p_data,
2858 struct ecore_iov_vf_mbx *p_mbx,
2861 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2862 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2864 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2865 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2866 if (!p_accept_any_vlan)
2869 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2870 p_data->update_accept_any_vlan_flg =
2871 p_accept_any_vlan->update_accept_any_vlan_flg;
2872 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2876 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
2877 struct ecore_vf_info *vf,
2878 struct ecore_sp_vport_update_params *p_data,
2879 struct ecore_rss_params *p_rss,
2880 struct ecore_iov_vf_mbx *p_mbx,
2881 u16 *tlvs_mask, u16 *tlvs_accepted)
2883 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2884 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2885 bool b_reject = false;
2889 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2890 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2892 p_data->rss_params = OSAL_NULL;
2896 OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
2898 p_rss->update_rss_config =
2899 !!(p_rss_tlv->update_rss_flags &
2900 VFPF_UPDATE_RSS_CONFIG_FLAG);
2901 p_rss->update_rss_capabilities =
2902 !!(p_rss_tlv->update_rss_flags &
2903 VFPF_UPDATE_RSS_CAPS_FLAG);
2904 p_rss->update_rss_ind_table =
2905 !!(p_rss_tlv->update_rss_flags &
2906 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2907 p_rss->update_rss_key =
2908 !!(p_rss_tlv->update_rss_flags &
2909 VFPF_UPDATE_RSS_KEY_FLAG);
2911 p_rss->rss_enable = p_rss_tlv->rss_enable;
2912 p_rss->rss_eng_id = vf->rss_eng_id;
2913 p_rss->rss_caps = p_rss_tlv->rss_caps;
2914 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2915 OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
2916 sizeof(p_rss->rss_key));
2918 table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
2919 (1 << p_rss_tlv->rss_table_size_log));
2921 for (i = 0; i < table_size; i++) {
2922 struct ecore_queue_cid *p_cid;
2924 q_idx = p_rss_tlv->rss_ind_table[i];
2925 if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
2926 ECORE_IOV_VALIDATE_Q_ENABLE)) {
2927 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2928 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2929 vf->relative_vf_id, q_idx);
2934 p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
2935 &vf->vf_queues[q_idx]);
2936 p_rss->rss_ind_table[i] = p_cid;
2939 p_data->rss_params = p_rss;
2941 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
2943 *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
2947 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
2948 struct ecore_vf_info *vf,
2949 struct ecore_sp_vport_update_params *p_data,
2950 struct ecore_sge_tpa_params *p_sge_tpa,
2951 struct ecore_iov_vf_mbx *p_mbx,
2954 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2955 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2957 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2958 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2960 if (!p_sge_tpa_tlv) {
2961 p_data->sge_tpa_params = OSAL_NULL;
2965 OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
2967 p_sge_tpa->update_tpa_en_flg =
2968 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2969 p_sge_tpa->update_tpa_param_flg =
2970 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2971 VFPF_UPDATE_TPA_PARAM_FLAG);
2973 p_sge_tpa->tpa_ipv4_en_flg =
2974 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2975 p_sge_tpa->tpa_ipv6_en_flg =
2976 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2977 p_sge_tpa->tpa_pkt_split_flg =
2978 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2979 p_sge_tpa->tpa_hdr_data_split_flg =
2980 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2981 p_sge_tpa->tpa_gro_consistent_flg =
2982 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2984 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2985 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2986 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2987 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2988 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2990 p_data->sge_tpa_params = p_sge_tpa;
2992 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
2995 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
2996 struct ecore_ptt *p_ptt,
2997 struct ecore_vf_info *vf)
2999 struct ecore_rss_params *p_rss_params = OSAL_NULL;
3000 struct ecore_sp_vport_update_params params;
3001 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3002 struct ecore_sge_tpa_params sge_tpa_params;
3003 u16 tlvs_mask = 0, tlvs_accepted = 0;
3004 u8 status = PFVF_STATUS_SUCCESS;
3006 enum _ecore_status_t rc;
3008 /* Valiate PF can send such a request */
3009 if (!vf->vport_instance) {
3010 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3011 "No VPORT instance available for VF[%d],"
3012 " failing vport update\n",
3014 status = PFVF_STATUS_FAILURE;
3018 p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
3019 if (p_rss_params == OSAL_NULL) {
3020 status = PFVF_STATUS_FAILURE;
3024 OSAL_MEMSET(¶ms, 0, sizeof(params));
3025 params.opaque_fid = vf->opaque_fid;
3026 params.vport_id = vf->vport_id;
3027 params.rss_params = OSAL_NULL;
3029 /* Search for extended tlvs list and update values
3030 * from VF in struct ecore_sp_vport_update_params.
3032 ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3033 ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
3034 ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
3035 ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3036 ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
3037 ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
3038 ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms,
3039 &sge_tpa_params, mbx, &tlvs_mask);
3041 tlvs_accepted = tlvs_mask;
3043 /* Some of the extended TLVs need to be validated first; In that case,
3044 * they can update the mask without updating the accepted [so that
3045 * PF could communicate to VF it has rejected request].
3047 ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
3048 mbx, &tlvs_mask, &tlvs_accepted);
3050 /* Just log a message if there is no single extended tlv in buffer.
3051 * When all features of vport update ramrod would be requested by VF
3052 * as extended TLVs in buffer then an error can be returned in response
3053 * if there is no extended TLV present in buffer.
3055 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
3056 ¶ms, &tlvs_accepted) !=
3059 status = PFVF_STATUS_NOT_SUPPORTED;
3063 if (!tlvs_accepted) {
3065 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3066 "Upper-layer prevents said VF"
3067 " configuration\n");
3069 DP_NOTICE(p_hwfn, true,
3070 "No feature tlvs found for vport update\n");
3071 status = PFVF_STATUS_NOT_SUPPORTED;
3075 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
3079 status = PFVF_STATUS_FAILURE;
3082 OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
3083 length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3084 tlvs_mask, tlvs_accepted);
3085 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3088 static enum _ecore_status_t
3089 ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
3090 struct ecore_vf_info *p_vf,
3091 struct ecore_filter_ucast *p_params)
3095 /* First remove entries and then add new ones */
3096 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3097 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3098 if (p_vf->shadow_config.vlans[i].used &&
3099 p_vf->shadow_config.vlans[i].vid ==
3101 p_vf->shadow_config.vlans[i].used = false;
3104 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3105 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3106 "VF [%d] - Tries to remove a non-existing"
3108 p_vf->relative_vf_id);
3111 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3112 p_params->opcode == ECORE_FILTER_FLUSH) {
3113 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3114 p_vf->shadow_config.vlans[i].used = false;
3117 /* In forced mode, we're willing to remove entries - but we don't add
3120 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
3121 return ECORE_SUCCESS;
3123 if (p_params->opcode == ECORE_FILTER_ADD ||
3124 p_params->opcode == ECORE_FILTER_REPLACE) {
3125 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3126 if (p_vf->shadow_config.vlans[i].used)
3129 p_vf->shadow_config.vlans[i].used = true;
3130 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3134 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3135 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3136 "VF [%d] - Tries to configure more than %d"
3138 p_vf->relative_vf_id,
3139 ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
3144 return ECORE_SUCCESS;
3147 static enum _ecore_status_t
3148 ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
3149 struct ecore_vf_info *p_vf,
3150 struct ecore_filter_ucast *p_params)
3152 char empty_mac[ETH_ALEN];
3155 OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
3157 /* If we're in forced-mode, we don't allow any change */
3158 /* TODO - this would change if we were ever to implement logic for
3159 * removing a forced MAC altogether [in which case, like for vlans,
3160 * we should be able to re-trace previous configuration.
3162 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
3163 return ECORE_SUCCESS;
3165 /* First remove entries and then add new ones */
3166 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3167 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3168 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3169 p_params->mac, ETH_ALEN)) {
3170 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
3176 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3177 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3178 "MAC isn't configured\n");
3181 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3182 p_params->opcode == ECORE_FILTER_FLUSH) {
3183 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
3184 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
3187 /* List the new MAC address */
3188 if (p_params->opcode != ECORE_FILTER_ADD &&
3189 p_params->opcode != ECORE_FILTER_REPLACE)
3190 return ECORE_SUCCESS;
3192 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3193 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3194 empty_mac, ETH_ALEN)) {
3195 OSAL_MEMCPY(p_vf->shadow_config.macs[i],
3196 p_params->mac, ETH_ALEN);
3197 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3198 "Added MAC at %d entry in shadow\n", i);
3203 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3204 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3205 "No available place for MAC\n");
3209 return ECORE_SUCCESS;
3212 static enum _ecore_status_t
3213 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
3214 struct ecore_vf_info *p_vf,
3215 struct ecore_filter_ucast *p_params)
3217 enum _ecore_status_t rc = ECORE_SUCCESS;
3219 if (p_params->type == ECORE_FILTER_MAC) {
3220 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3221 if (rc != ECORE_SUCCESS)
3225 if (p_params->type == ECORE_FILTER_VLAN)
3226 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3231 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
3232 struct ecore_ptt *p_ptt,
3233 struct ecore_vf_info *vf)
3235 struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3236 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3237 struct vfpf_ucast_filter_tlv *req;
3238 u8 status = PFVF_STATUS_SUCCESS;
3239 struct ecore_filter_ucast params;
3240 enum _ecore_status_t rc;
3242 /* Prepare the unicast filter params */
3243 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast));
3244 req = &mbx->req_virt->ucast_filter;
3245 params.opcode = (enum ecore_filter_opcode)req->opcode;
3246 params.type = (enum ecore_filter_ucast_type)req->type;
3248 /* @@@TBD - We might need logic on HV side in determining this */
3249 params.is_rx_filter = 1;
3250 params.is_tx_filter = 1;
3251 params.vport_to_remove_from = vf->vport_id;
3252 params.vport_to_add_to = vf->vport_id;
3253 OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
3254 params.vlan = req->vlan;
3256 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3257 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
3258 " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3259 vf->abs_vf_id, params.opcode, params.type,
3260 params.is_rx_filter ? "RX" : "",
3261 params.is_tx_filter ? "TX" : "",
3262 params.vport_to_add_to,
3263 params.mac[0], params.mac[1], params.mac[2],
3264 params.mac[3], params.mac[4], params.mac[5], params.vlan);
3266 if (!vf->vport_instance) {
3267 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3268 "No VPORT instance available for VF[%d],"
3269 " failing ucast MAC configuration\n",
3271 status = PFVF_STATUS_FAILURE;
3275 /* Update shadow copy of the VF configuration */
3276 if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) !=
3278 status = PFVF_STATUS_FAILURE;
3282 /* Determine if the unicast filtering is acceptible by PF */
3283 if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
3284 (params.type == ECORE_FILTER_VLAN ||
3285 params.type == ECORE_FILTER_MAC_VLAN)) {
3286 /* Once VLAN is forced or PVID is set, do not allow
3287 * to add/replace any further VLANs.
3289 if (params.opcode == ECORE_FILTER_ADD ||
3290 params.opcode == ECORE_FILTER_REPLACE)
3291 status = PFVF_STATUS_FORCED;
3295 if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
3296 (params.type == ECORE_FILTER_MAC ||
3297 params.type == ECORE_FILTER_MAC_VLAN)) {
3298 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
3299 (params.opcode != ECORE_FILTER_ADD &&
3300 params.opcode != ECORE_FILTER_REPLACE))
3301 status = PFVF_STATUS_FORCED;
3305 rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms);
3306 if (rc == ECORE_EXISTS) {
3308 } else if (rc == ECORE_INVAL) {
3309 status = PFVF_STATUS_FAILURE;
3313 rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
3314 ECORE_SPQ_MODE_CB, OSAL_NULL);
3316 status = PFVF_STATUS_FAILURE;
3319 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3320 sizeof(struct pfvf_def_resp_tlv), status);
3323 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
3324 struct ecore_ptt *p_ptt,
3325 struct ecore_vf_info *vf)
3330 for (i = 0; i < vf->num_sbs; i++)
3331 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3333 vf->opaque_fid, false);
3335 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3336 sizeof(struct pfvf_def_resp_tlv),
3337 PFVF_STATUS_SUCCESS);
3340 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
3341 struct ecore_ptt *p_ptt,
3342 struct ecore_vf_info *vf)
3344 u16 length = sizeof(struct pfvf_def_resp_tlv);
3345 u8 status = PFVF_STATUS_SUCCESS;
3347 /* Disable Interrupts for VF */
3348 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3350 /* Reset Permission table */
3351 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3353 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3357 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
3358 struct ecore_ptt *p_ptt,
3359 struct ecore_vf_info *p_vf)
3361 u16 length = sizeof(struct pfvf_def_resp_tlv);
3362 u8 status = PFVF_STATUS_SUCCESS;
3363 enum _ecore_status_t rc = ECORE_SUCCESS;
3365 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3367 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3368 /* Stopping the VF */
3369 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3372 if (rc != ECORE_SUCCESS) {
3373 DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
3375 status = PFVF_STATUS_FAILURE;
3378 p_vf->state = VF_STOPPED;
3381 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3385 static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
3386 struct ecore_ptt *p_ptt,
3387 struct ecore_vf_info *vf)
3389 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3390 enum _ecore_status_t rc = ECORE_SUCCESS;
3391 struct vfpf_update_coalesce *req;
3392 u8 status = PFVF_STATUS_FAILURE;
3393 struct ecore_queue_cid *p_cid;
3394 u16 rx_coal, tx_coal;
3398 req = &mbx->req_virt->update_coalesce;
3400 rx_coal = req->rx_coal;
3401 tx_coal = req->tx_coal;
3404 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3405 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3407 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3408 vf->abs_vf_id, qid);
3412 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3413 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3415 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3416 vf->abs_vf_id, qid);
3420 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3421 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3422 vf->abs_vf_id, rx_coal, tx_coal, qid);
3425 p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
3426 &vf->vf_queues[qid]);
3428 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3429 if (rc != ECORE_SUCCESS) {
3430 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3431 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3432 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3437 /* TODO - in future, it might be possible to pass this in a per-cid
3438 * granularity. For now, do this for all Tx queues.
3441 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3443 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3444 if (p_queue->cids[i].p_cid == OSAL_NULL)
3447 if (!p_queue->cids[i].b_is_tx)
3450 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3451 p_queue->cids[i].p_cid);
3452 if (rc != ECORE_SUCCESS) {
3453 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3454 "VF[%d]: Unable to set tx queue coalesce\n",
3461 status = PFVF_STATUS_SUCCESS;
3463 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3464 sizeof(struct pfvf_def_resp_tlv), status);
3467 static enum _ecore_status_t
3468 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
3469 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3474 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
3476 for (cnt = 0; cnt < 50; cnt++) {
3477 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3482 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
3486 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3487 p_vf->abs_vf_id, val);
3488 return ECORE_TIMEOUT;
3491 return ECORE_SUCCESS;
3494 static enum _ecore_status_t
3495 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
3496 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3498 u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
3501 /* Read initial consumers & producers */
3502 for (i = 0; i < MAX_NUM_VOQS; i++) {
3505 cons[i] = ecore_rd(p_hwfn, p_ptt,
3506 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3508 prod = ecore_rd(p_hwfn, p_ptt,
3509 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3511 distance[i] = prod - cons[i];
3514 /* Wait for consumers to pass the producers */
3516 for (cnt = 0; cnt < 50; cnt++) {
3517 for (; i < MAX_NUM_VOQS; i++) {
3520 tmp = ecore_rd(p_hwfn, p_ptt,
3521 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3523 if (distance[i] > tmp - cons[i])
3527 if (i == MAX_NUM_VOQS)
3534 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3535 p_vf->abs_vf_id, i);
3536 return ECORE_TIMEOUT;
3539 return ECORE_SUCCESS;
3542 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
3543 struct ecore_vf_info *p_vf,
3544 struct ecore_ptt *p_ptt)
3546 enum _ecore_status_t rc;
3548 /* TODO - add SRC and TM polling once we add storage IOV */
3550 rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3554 rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3558 return ECORE_SUCCESS;
3561 static enum _ecore_status_t
3562 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3563 struct ecore_ptt *p_ptt,
3564 u16 rel_vf_id, u32 *ack_vfs)
3566 struct ecore_vf_info *p_vf;
3567 enum _ecore_status_t rc = ECORE_SUCCESS;
3569 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3571 return ECORE_SUCCESS;
3573 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3574 (1ULL << (rel_vf_id % 64))) {
3575 u16 vfid = p_vf->abs_vf_id;
3577 /* TODO - should we lock channel? */
3579 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3580 "VF[%d] - Handling FLR\n", vfid);
3582 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3584 /* If VF isn't active, no need for anything but SW */
3588 /* TODO - what to do in case of failure? */
3589 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3590 if (rc != ECORE_SUCCESS)
3593 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
3595 /* TODO - what's now? What a mess.... */
3596 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3600 /* Workaround to make VF-PF channel ready, as FW
3601 * doesn't do that as a part of FLR.
3604 GTT_BAR0_MAP_REG_USDM_RAM +
3605 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3607 /* VF_STOPPED has to be set only after final cleanup
3608 * but prior to re-enabling the VF.
3610 p_vf->state = VF_STOPPED;
3612 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3614 /* TODO - again, a mess... */
3615 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3620 /* Mark VF for ack and clean pending state */
3621 if (p_vf->state == VF_RESET)
3622 p_vf->state = VF_STOPPED;
3623 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
3624 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3625 ~(1ULL << (rel_vf_id % 64));
3626 p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
3627 ~(1ULL << (rel_vf_id % 64));
3633 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3634 struct ecore_ptt *p_ptt)
3636 u32 ack_vfs[VF_MAX_STATIC / 32];
3637 enum _ecore_status_t rc = ECORE_SUCCESS;
3640 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3642 /* Since BRB <-> PRS interface can't be tested as part of the flr
3643 * polling due to HW limitations, simply sleep a bit. And since
3644 * there's no need to wait per-vf, do it before looping.
3648 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
3649 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3651 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3655 enum _ecore_status_t
3656 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3657 struct ecore_ptt *p_ptt, u16 rel_vf_id)
3659 u32 ack_vfs[VF_MAX_STATIC / 32];
3660 enum _ecore_status_t rc = ECORE_SUCCESS;
3662 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3664 /* Wait instead of polling the BRB <-> PRS interface */
3667 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
3669 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3673 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
3678 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
3679 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3680 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3681 "[%08x,...,%08x]: %08x\n",
3682 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3684 if (!p_hwfn->p_dev->p_iov_info) {
3685 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
3690 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
3691 struct ecore_vf_info *p_vf;
3694 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
3698 vfid = p_vf->abs_vf_id;
3699 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3700 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3701 u16 rel_vf_id = p_vf->relative_vf_id;
3703 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3704 "VF[%d] [rel %d] got FLR-ed\n",
3707 p_vf->state = VF_RESET;
3709 /* No need to lock here, since pending_flr should
3710 * only change here and before ACKing MFw. Since
3711 * MFW will not trigger an additional attention for
3712 * VF flr until ACKs, we're safe.
3714 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3722 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
3724 struct ecore_mcp_link_params *p_params,
3725 struct ecore_mcp_link_state *p_link,
3726 struct ecore_mcp_link_capabilities *p_caps)
3728 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
3729 struct ecore_bulletin_content *p_bulletin;
3734 p_bulletin = p_vf->bulletin.p_virt;
3737 __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3739 __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3741 __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3744 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
3745 struct ecore_ptt *p_ptt, int vfid)
3747 struct ecore_iov_vf_mbx *mbx;
3748 struct ecore_vf_info *p_vf;
3750 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3754 mbx = &p_vf->vf_mbx;
3756 /* ecore_iov_process_mbx_request */
3759 "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
3761 mbx->first_tlv = mbx->req_virt->first_tlv;
3763 OSAL_IOV_VF_MSG_TYPE(p_hwfn,
3764 p_vf->relative_vf_id,
3765 mbx->first_tlv.tl.type);
3767 /* Lock the per vf op mutex and note the locker's identity.
3768 * The unlock will take place in mbx response.
3770 ecore_iov_lock_vf_pf_channel(p_hwfn,
3771 p_vf, mbx->first_tlv.tl.type);
3773 /* check if tlv type is known */
3774 if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3775 !p_vf->b_malicious) {
3776 /* switch on the opcode */
3777 switch (mbx->first_tlv.tl.type) {
3778 case CHANNEL_TLV_ACQUIRE:
3779 ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3781 case CHANNEL_TLV_VPORT_START:
3782 ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3784 case CHANNEL_TLV_VPORT_TEARDOWN:
3785 ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3787 case CHANNEL_TLV_START_RXQ:
3788 ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3790 case CHANNEL_TLV_START_TXQ:
3791 ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3793 case CHANNEL_TLV_STOP_RXQS:
3794 ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3796 case CHANNEL_TLV_STOP_TXQS:
3797 ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3799 case CHANNEL_TLV_UPDATE_RXQ:
3800 ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3802 case CHANNEL_TLV_VPORT_UPDATE:
3803 ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3805 case CHANNEL_TLV_UCAST_FILTER:
3806 ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3808 case CHANNEL_TLV_CLOSE:
3809 ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3811 case CHANNEL_TLV_INT_CLEANUP:
3812 ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3814 case CHANNEL_TLV_RELEASE:
3815 ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3817 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
3818 ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
3820 case CHANNEL_TLV_COALESCE_UPDATE:
3821 ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
3824 } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3825 /* If we've received a message from a VF we consider malicious
3826 * we ignore the messasge unless it's one for RELEASE, in which
3827 * case we'll let it have the benefit of doubt, allowing the
3828 * next loaded driver to start again.
3830 if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
3831 /* TODO - initiate FLR, remove malicious indication */
3832 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3833 "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
3836 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3837 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3838 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3841 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3842 mbx->first_tlv.tl.type,
3843 sizeof(struct pfvf_def_resp_tlv),
3844 PFVF_STATUS_MALICIOUS);
3846 /* unknown TLV - this may belong to a VF driver from the future
3847 * - a version written after this PF driver was written, which
3848 * supports features unknown as of yet. Too bad since we don't
3849 * support them. Or this may be because someone wrote a crappy
3850 * VF driver and is sending garbage over the channel.
3852 DP_NOTICE(p_hwfn, false,
3853 "VF[%02x]: unknown TLV. type %04x length %04x"
3854 " padding %08x reply address %lu\n",
3856 mbx->first_tlv.tl.type,
3857 mbx->first_tlv.tl.length,
3858 mbx->first_tlv.padding,
3859 (unsigned long)mbx->first_tlv.reply_address);
3861 /* Try replying in case reply address matches the acquisition's
3864 if (p_vf->acquire.first_tlv.reply_address &&
3865 (mbx->first_tlv.reply_address ==
3866 p_vf->acquire.first_tlv.reply_address))
3867 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3868 mbx->first_tlv.tl.type,
3869 sizeof(struct pfvf_def_resp_tlv),
3870 PFVF_STATUS_NOT_SUPPORTED);
3872 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3873 "VF[%02x]: Can't respond to TLV -"
3874 " no valid reply address\n",
3878 ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
3879 mbx->first_tlv.tl.type);
3881 #ifdef CONFIG_ECORE_SW_CHANNEL
3882 mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
3883 mbx->sw_mbx.response_offset = 0;
3887 void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
3889 u64 add_bit = 1ULL << (vfid % 64);
3891 /* TODO - add locking mechanisms [no atomics in ecore, so we can't
3892 * add the lock inside the ecore_pf_iov struct].
3894 p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
3897 void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
3900 u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
3902 /* TODO - Take a lock */
3903 OSAL_MEMCPY(events, p_pending_events,
3904 sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
3905 OSAL_MEMSET(p_pending_events, 0,
3906 sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
3909 static struct ecore_vf_info *
3910 ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
3912 u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
3914 if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
3915 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3916 "Got indication for VF [abs 0x%08x] that cannot be"
3922 return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
3925 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
3927 struct regpair *vf_msg)
3929 struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
3933 return ECORE_SUCCESS;
3935 /* List the physical address of the request so that handler
3936 * could later on copy the message from it.
3938 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
3940 return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
3943 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
3944 struct malicious_vf_eqe_data *p_data)
3946 struct ecore_vf_info *p_vf;
3948 p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
3954 "VF [%d] - Malicious behavior [%02x]\n",
3955 p_vf->abs_vf_id, p_data->errId);
3957 p_vf->b_malicious = true;
3959 OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
3962 enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
3965 union event_ring_data *data)
3968 case COMMON_EVENT_VF_PF_CHANNEL:
3969 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
3970 &data->vf_pf_channel.msg_addr);
3971 case COMMON_EVENT_VF_FLR:
3972 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3973 "VF-FLR is still not supported\n");
3974 return ECORE_SUCCESS;
3975 case COMMON_EVENT_MALICIOUS_VF:
3976 ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
3977 return ECORE_SUCCESS;
3979 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
3985 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3987 return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3988 (1ULL << (rel_vf_id % 64)));
3991 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3993 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
3999 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4000 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4004 return E4_MAX_NUM_VFS;
4007 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
4008 struct ecore_ptt *ptt, int vfid)
4010 struct ecore_dmae_params params;
4011 struct ecore_vf_info *vf_info;
4013 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4017 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
4018 params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
4019 params.src_vfid = vf_info->abs_vf_id;
4021 if (ecore_dmae_host2host(p_hwfn, ptt,
4022 vf_info->vf_mbx.pending_req,
4023 vf_info->vf_mbx.req_phys,
4024 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
4025 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4026 "Failed to copy message from VF 0x%02x\n", vfid);
4031 return ECORE_SUCCESS;
4034 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
4037 struct ecore_vf_info *vf_info;
4040 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4042 DP_NOTICE(p_hwfn->p_dev, true,
4043 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4046 if (vf_info->b_malicious) {
4047 DP_NOTICE(p_hwfn->p_dev, false,
4048 "Can't set forced MAC to malicious VF [%d]\n",
4053 feature = 1 << MAC_ADDR_FORCED;
4054 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4056 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4057 /* Forced MAC will disable MAC_ADDR */
4058 vf_info->bulletin.p_virt->valid_bitmap &=
4059 ~(1 << VFPF_BULLETIN_MAC_ADDR);
4061 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4064 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
4067 struct ecore_vf_info *vf_info;
4070 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4072 DP_NOTICE(p_hwfn->p_dev, true,
4073 "Can not set MAC, invalid vfid [%d]\n", vfid);
4076 if (vf_info->b_malicious) {
4077 DP_NOTICE(p_hwfn->p_dev, false,
4078 "Can't set MAC to malicious VF [%d]\n",
4083 if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
4084 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4085 "Can not set MAC, Forced MAC is configured\n");
4089 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
4090 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4092 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4094 return ECORE_SUCCESS;
4097 enum _ecore_status_t
4098 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
4099 bool b_untagged_only, int vfid)
4101 struct ecore_vf_info *vf_info;
4104 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4106 DP_NOTICE(p_hwfn->p_dev, true,
4107 "Can not set untagged default, invalid vfid [%d]\n",
4111 if (vf_info->b_malicious) {
4112 DP_NOTICE(p_hwfn->p_dev, false,
4113 "Can't set untagged default to malicious VF [%d]\n",
4118 /* Since this is configurable only during vport-start, don't take it
4119 * if we're past that point.
4121 if (vf_info->state == VF_ENABLED) {
4122 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4123 "Can't support untagged change for vfid[%d] -"
4124 " VF is already active\n",
4129 /* Set configuration; This will later be taken into account during the
4130 * VF initialization.
4132 feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
4133 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
4134 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4136 vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
4139 return ECORE_SUCCESS;
4142 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
4145 struct ecore_vf_info *vf_info;
4147 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4151 *opaque_fid = vf_info->opaque_fid;
4154 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
4157 struct ecore_vf_info *vf_info;
4160 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4162 DP_NOTICE(p_hwfn->p_dev, true,
4163 "Can not set forced MAC, invalid vfid [%d]\n",
4167 if (vf_info->b_malicious) {
4168 DP_NOTICE(p_hwfn->p_dev, false,
4169 "Can't set forced vlan to malicious VF [%d]\n",
4174 feature = 1 << VLAN_ADDR_FORCED;
4175 vf_info->bulletin.p_virt->pvid = pvid;
4177 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4179 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4181 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4184 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
4185 int vfid, u16 vxlan_port, u16 geneve_port)
4187 struct ecore_vf_info *vf_info;
4189 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4191 DP_NOTICE(p_hwfn->p_dev, true,
4192 "Can not set udp ports, invalid vfid [%d]\n", vfid);
4196 if (vf_info->b_malicious) {
4197 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4198 "Can not set udp ports to malicious VF [%d]\n",
4203 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4204 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4207 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
4209 struct ecore_vf_info *p_vf_info;
4211 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4215 return !!p_vf_info->vport_instance;
4218 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
4220 struct ecore_vf_info *p_vf_info;
4222 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4226 return p_vf_info->state == VF_STOPPED;
4229 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
4231 struct ecore_vf_info *vf_info;
4233 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4237 return vf_info->spoof_chk;
4240 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
4243 struct ecore_vf_info *vf;
4244 enum _ecore_status_t rc = ECORE_INVAL;
4246 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4247 DP_NOTICE(p_hwfn, true,
4248 "SR-IOV sanity check failed, can't set spoofchk\n");
4252 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4256 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4257 /* After VF VPORT start PF will configure spoof check */
4258 vf->req_spoofchk_val = val;
4263 rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
4269 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
4271 u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
4273 max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
4274 : ECORE_MAX_VF_CHAINS_PER_PF;
4276 return max_chains_per_vf;
4279 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4281 void **pp_req_virt_addr,
4282 u16 *p_req_virt_size)
4284 struct ecore_vf_info *vf_info =
4285 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4290 if (pp_req_virt_addr)
4291 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
4293 if (p_req_virt_size)
4294 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
4297 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4299 void **pp_reply_virt_addr,
4300 u16 *p_reply_virt_size)
4302 struct ecore_vf_info *vf_info =
4303 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4308 if (pp_reply_virt_addr)
4309 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
4311 if (p_reply_virt_size)
4312 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
4315 #ifdef CONFIG_ECORE_SW_CHANNEL
4316 struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
4319 struct ecore_vf_info *vf_info =
4320 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4325 return &vf_info->vf_mbx.sw_mbx;
4329 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
4331 return (length >= sizeof(struct vfpf_first_tlv) &&
4332 (length <= sizeof(union vfpf_tlvs)));
4335 u32 ecore_iov_pfvf_msg_length(void)
4337 return sizeof(union pfvf_tlvs);
4340 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4342 struct ecore_vf_info *p_vf;
4344 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4345 if (!p_vf || !p_vf->bulletin.p_virt)
4348 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
4351 return p_vf->bulletin.p_virt->mac;
4354 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
4357 struct ecore_vf_info *p_vf;
4359 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4360 if (!p_vf || !p_vf->bulletin.p_virt)
4363 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
4366 return p_vf->bulletin.p_virt->pvid;
4369 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
4370 struct ecore_ptt *p_ptt,
4373 struct ecore_vf_info *vf;
4375 enum _ecore_status_t rc;
4377 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4382 rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4383 if (rc != ECORE_SUCCESS)
4386 return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
4389 enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
4392 struct ecore_vf_info *vf;
4396 for_each_hwfn(p_dev, i) {
4397 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
4399 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4400 DP_NOTICE(p_hwfn, true,
4401 "SR-IOV sanity check failed,"
4402 " can't set min rate\n");
4407 vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
4408 vport_id = vf->vport_id;
4410 return ecore_configure_vport_wfq(p_dev, vport_id, rate);
4413 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
4414 struct ecore_ptt *p_ptt,
4416 struct ecore_eth_stats *p_stats)
4418 struct ecore_vf_info *vf;
4420 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4424 if (vf->state != VF_ENABLED)
4427 __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
4428 vf->abs_vf_id + 0x10, false);
4430 return ECORE_SUCCESS;
4433 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4435 struct ecore_vf_info *p_vf;
4437 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4441 return p_vf->num_rxqs;
4444 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4446 struct ecore_vf_info *p_vf;
4448 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4452 return p_vf->num_active_rxqs;
4455 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4457 struct ecore_vf_info *p_vf;
4459 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4466 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4468 struct ecore_vf_info *p_vf;
4470 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4474 return p_vf->num_sbs;
4477 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4479 struct ecore_vf_info *p_vf;
4481 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4485 return (p_vf->state == VF_FREE);
4488 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
4491 struct ecore_vf_info *p_vf;
4493 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4497 return (p_vf->state == VF_ACQUIRED);
4500 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4502 struct ecore_vf_info *p_vf;
4504 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4508 return (p_vf->state == VF_ENABLED);
4511 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
4514 struct ecore_vf_info *p_vf;
4516 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4520 return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
4523 enum _ecore_status_t
4524 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
4526 struct ecore_wfq_data *vf_vp_wfq;
4527 struct ecore_vf_info *vf_info;
4529 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4533 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4535 if (vf_vp_wfq->configured)
4536 return vf_vp_wfq->min_speed;