1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
10 #include "ecore_sriov.h"
11 #include "ecore_status.h"
13 #include "ecore_hw_defs.h"
14 #include "ecore_int.h"
15 #include "ecore_hsi_eth.h"
17 #include "ecore_vfpf_if.h"
18 #include "ecore_rt_defs.h"
19 #include "ecore_init_ops.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_mcp.h"
23 #include "ecore_cxt.h"
25 #include "ecore_init_fw_funcs.h"
26 #include "ecore_sp_commands.h"
28 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
31 union event_ring_data *data,
34 const char *qede_ecore_channel_tlvs_string[] = {
35 "CHANNEL_TLV_NONE", /* ends tlv sequence */
36 "CHANNEL_TLV_ACQUIRE",
37 "CHANNEL_TLV_VPORT_START",
38 "CHANNEL_TLV_VPORT_UPDATE",
39 "CHANNEL_TLV_VPORT_TEARDOWN",
40 "CHANNEL_TLV_START_RXQ",
41 "CHANNEL_TLV_START_TXQ",
42 "CHANNEL_TLV_STOP_RXQ",
43 "CHANNEL_TLV_STOP_TXQ",
44 "CHANNEL_TLV_UPDATE_RXQ",
45 "CHANNEL_TLV_INT_CLEANUP",
47 "CHANNEL_TLV_RELEASE",
48 "CHANNEL_TLV_LIST_END",
49 "CHANNEL_TLV_UCAST_FILTER",
50 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
51 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
52 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
53 "CHANNEL_TLV_VPORT_UPDATE_MCAST",
54 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
55 "CHANNEL_TLV_VPORT_UPDATE_RSS",
56 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
57 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
58 "CHANNEL_TLV_UPDATE_TUNN_PARAM",
59 "CHANNEL_TLV_COALESCE_UPDATE",
61 "CHANNEL_TLV_COALESCE_READ",
62 "CHANNEL_TLV_BULLETIN_UPDATE_MAC",
63 "CHANNEL_TLV_UPDATE_MTU",
64 "CHANNEL_TLV_RDMA_ACQUIRE",
65 "CHANNEL_TLV_RDMA_START",
66 "CHANNEL_TLV_RDMA_STOP",
67 "CHANNEL_TLV_RDMA_ADD_USER",
68 "CHANNEL_TLV_RDMA_REMOVE_USER",
69 "CHANNEL_TLV_RDMA_QUERY_COUNTERS",
70 "CHANNEL_TLV_RDMA_ALLOC_TID",
71 "CHANNEL_TLV_RDMA_REGISTER_TID",
72 "CHANNEL_TLV_RDMA_DEREGISTER_TID",
73 "CHANNEL_TLV_RDMA_FREE_TID",
74 "CHANNEL_TLV_RDMA_CREATE_CQ",
75 "CHANNEL_TLV_RDMA_RESIZE_CQ",
76 "CHANNEL_TLV_RDMA_DESTROY_CQ",
77 "CHANNEL_TLV_RDMA_CREATE_QP",
78 "CHANNEL_TLV_RDMA_MODIFY_QP",
79 "CHANNEL_TLV_RDMA_QUERY_QP",
80 "CHANNEL_TLV_RDMA_DESTROY_QP",
81 "CHANNEL_TLV_RDMA_CREATE_SRQ",
82 "CHANNEL_TLV_RDMA_MODIFY_SRQ",
83 "CHANNEL_TLV_RDMA_DESTROY_SRQ",
84 "CHANNEL_TLV_RDMA_QUERY_PORT",
85 "CHANNEL_TLV_RDMA_QUERY_DEVICE",
86 "CHANNEL_TLV_RDMA_IWARP_CONNECT",
87 "CHANNEL_TLV_RDMA_IWARP_ACCEPT",
88 "CHANNEL_TLV_RDMA_IWARP_CREATE_LISTEN",
89 "CHANNEL_TLV_RDMA_IWARP_DESTROY_LISTEN",
90 "CHANNEL_TLV_RDMA_IWARP_PAUSE_LISTEN",
91 "CHANNEL_TLV_RDMA_IWARP_REJECT",
92 "CHANNEL_TLV_RDMA_IWARP_SEND_RTR",
93 "CHANNEL_TLV_ESTABLISH_LL2_CONN",
94 "CHANNEL_TLV_TERMINATE_LL2_CONN",
95 "CHANNEL_TLV_ASYNC_EVENT",
96 "CHANNEL_TLV_SOFT_FLR",
100 static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
104 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
105 ETH_HSI_VER_NO_PKT_LEN_TUNN)
106 legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
108 if (!(p_vf->acquire.vfdev_info.capabilities &
109 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
110 legacy |= ECORE_QCID_LEGACY_VF_CID;
116 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
117 struct ecore_vf_info *p_vf)
119 struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
120 struct ecore_spq_entry *p_ent = OSAL_NULL;
121 struct ecore_sp_init_data init_data;
122 enum _ecore_status_t rc = ECORE_NOTIMPL;
126 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
127 init_data.cid = ecore_spq_get_cid(p_hwfn);
128 init_data.opaque_fid = p_vf->opaque_fid;
129 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
131 rc = ecore_sp_init_request(p_hwfn, &p_ent,
132 COMMON_RAMROD_VF_START,
133 PROTOCOLID_COMMON, &init_data);
134 if (rc != ECORE_SUCCESS)
137 p_ramrod = &p_ent->ramrod.vf_start;
139 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
140 p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
142 switch (p_hwfn->hw_info.personality) {
144 p_ramrod->personality = PERSONALITY_ETH;
146 case ECORE_PCI_ETH_ROCE:
147 case ECORE_PCI_ETH_IWARP:
148 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
151 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
152 p_hwfn->hw_info.personality);
156 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
157 if (fp_minor > ETH_HSI_VER_MINOR &&
158 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
159 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
160 "VF [%d] - Requested fp hsi %02x.%02x which is"
161 " slightly newer than PF's %02x.%02x; Configuring"
164 ETH_HSI_VER_MAJOR, fp_minor,
165 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
166 fp_minor = ETH_HSI_VER_MINOR;
169 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
170 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
172 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
173 "VF[%d] - Starting using HSI %02x.%02x\n",
174 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
176 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
179 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
183 struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
184 struct ecore_spq_entry *p_ent = OSAL_NULL;
185 struct ecore_sp_init_data init_data;
186 enum _ecore_status_t rc = ECORE_NOTIMPL;
189 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
190 init_data.cid = ecore_spq_get_cid(p_hwfn);
191 init_data.opaque_fid = opaque_vfid;
192 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
194 rc = ecore_sp_init_request(p_hwfn, &p_ent,
195 COMMON_RAMROD_VF_STOP,
196 PROTOCOLID_COMMON, &init_data);
197 if (rc != ECORE_SUCCESS)
200 p_ramrod = &p_ent->ramrod.vf_stop;
202 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
204 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
207 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
208 bool b_enabled_only, bool b_non_malicious)
210 if (!p_hwfn->pf_iov_info) {
211 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
215 if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
219 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
223 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
230 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
234 struct ecore_vf_info *vf = OSAL_NULL;
236 if (!p_hwfn->pf_iov_info) {
237 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
241 if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
242 b_enabled_only, false))
243 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
245 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
251 static struct ecore_queue_cid *
252 ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
256 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
257 if (p_queue->cids[i].p_cid &&
258 !p_queue->cids[i].b_is_tx)
259 return p_queue->cids[i].p_cid;
265 enum ecore_iov_validate_q_mode {
266 ECORE_IOV_VALIDATE_Q_NA,
267 ECORE_IOV_VALIDATE_Q_ENABLE,
268 ECORE_IOV_VALIDATE_Q_DISABLE,
271 static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
273 enum ecore_iov_validate_q_mode mode,
278 if (mode == ECORE_IOV_VALIDATE_Q_NA)
281 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
282 struct ecore_vf_queue_cid *p_qcid;
284 p_qcid = &p_vf->vf_queues[qid].cids[i];
286 if (p_qcid->p_cid == OSAL_NULL)
289 if (p_qcid->b_is_tx != b_is_tx)
292 /* Found. It's enabled. */
293 return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
296 /* In case we haven't found any valid cid, then its disabled */
297 return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
300 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
301 struct ecore_vf_info *p_vf,
303 enum ecore_iov_validate_q_mode mode)
305 if (rx_qid >= p_vf->num_rxqs) {
306 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
307 "VF[0x%02x] - can't touch Rx queue[%04x];"
308 " Only 0x%04x are allocated\n",
309 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
313 return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
316 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
317 struct ecore_vf_info *p_vf,
319 enum ecore_iov_validate_q_mode mode)
321 if (tx_qid >= p_vf->num_txqs) {
322 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
323 "VF[0x%02x] - can't touch Tx queue[%04x];"
324 " Only 0x%04x are allocated\n",
325 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
329 return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
332 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
333 struct ecore_vf_info *p_vf,
338 for (i = 0; i < p_vf->num_sbs; i++)
339 if (p_vf->igu_sbs[i] == sb_idx)
342 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
343 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
344 " one of its 0x%02x SBs\n",
345 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
350 /* Is there at least 1 queue open? */
351 static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
355 for (i = 0; i < p_vf->num_rxqs; i++)
356 if (ecore_iov_validate_queue_mode(p_vf, i,
357 ECORE_IOV_VALIDATE_Q_ENABLE,
364 static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
368 for (i = 0; i < p_vf->num_txqs; i++)
369 if (ecore_iov_validate_queue_mode(p_vf, i,
370 ECORE_IOV_VALIDATE_Q_ENABLE,
377 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
379 struct ecore_ptt *p_ptt)
381 struct ecore_bulletin_content *p_bulletin;
382 int crc_size = sizeof(p_bulletin->crc);
383 struct dmae_params params;
384 struct ecore_vf_info *p_vf;
386 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
390 /* TODO - check VF is in a state where it can accept message */
391 if (!p_vf->vf_bulletin)
394 p_bulletin = p_vf->bulletin.p_virt;
396 /* Increment bulletin board version and compute crc */
397 p_bulletin->version++;
398 p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size,
399 p_vf->bulletin.size - crc_size);
401 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
402 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
403 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
405 /* propagate bulletin board via dmae to vm memory */
406 OSAL_MEMSET(¶ms, 0, sizeof(params));
407 SET_FIELD(params.flags, DMAE_PARAMS_DST_VF_VALID, 0x1);
408 params.dst_vf_id = p_vf->abs_vf_id;
409 return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
410 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
414 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
416 struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
419 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
420 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + RTE_PCI_SRIOV_CTRL, &iov->ctrl);
422 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + RTE_PCI_SRIOV_TOTAL_VF,
424 OSAL_PCI_READ_CONFIG_WORD(p_dev,
425 pos + RTE_PCI_SRIOV_INITIAL_VF,
428 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + RTE_PCI_SRIOV_NUM_VF,
431 /* @@@TODO - in future we might want to add an OSAL here to
432 * allow each OS to decide on its own how to act.
434 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
435 "Number of VFs are already set to non-zero value."
436 " Ignoring PCI configuration value\n");
440 OSAL_PCI_READ_CONFIG_WORD(p_dev,
441 pos + RTE_PCI_SRIOV_VF_OFFSET, &iov->offset);
443 OSAL_PCI_READ_CONFIG_WORD(p_dev,
444 pos + RTE_PCI_SRIOV_VF_STRIDE, &iov->stride);
446 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + RTE_PCI_SRIOV_VF_DID,
449 OSAL_PCI_READ_CONFIG_DWORD(p_dev,
450 pos + RTE_PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
452 OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + RTE_PCI_SRIOV_CAP, &iov->cap);
454 OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + RTE_PCI_SRIOV_FUNC_LINK,
457 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
458 "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
459 " stride %d, page size 0x%x\n",
460 iov->nres, iov->cap, iov->ctrl,
461 iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
462 iov->offset, iov->stride, iov->pgsz);
464 /* Some sanity checks */
465 if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
466 iov->total_vfs > NUM_OF_VFS(p_dev)) {
467 /* This can happen only due to a bug. In this case we set
468 * num_vfs to zero to avoid memory corruption in the code that
469 * assumes max number of vfs
471 DP_NOTICE(p_dev, false,
472 "IOV: Unexpected number of vfs set: %d"
473 " setting num_vf to zero\n",
480 return ECORE_SUCCESS;
483 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
485 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
486 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
487 struct ecore_bulletin_content *p_bulletin_virt;
488 dma_addr_t req_p, rply_p, bulletin_p;
489 union pfvf_tlvs *p_reply_virt_addr;
490 union vfpf_tlvs *p_req_virt_addr;
493 OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
495 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
496 req_p = p_iov_info->mbx_msg_phys_addr;
497 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
498 rply_p = p_iov_info->mbx_reply_phys_addr;
499 p_bulletin_virt = p_iov_info->p_bulletins;
500 bulletin_p = p_iov_info->bulletins_phys;
501 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
503 "ecore_iov_setup_vfdb called without alloc mem first\n");
507 for (idx = 0; idx < p_iov->total_vfs; idx++) {
508 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
511 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
512 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
513 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
514 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
516 #ifdef CONFIG_ECORE_SW_CHANNEL
517 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
518 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
520 vf->state = VF_STOPPED;
523 vf->bulletin.phys = idx *
524 sizeof(struct ecore_bulletin_content) + bulletin_p;
525 vf->bulletin.p_virt = p_bulletin_virt + idx;
526 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
528 vf->relative_vf_id = idx;
529 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
530 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
531 vf->concrete_fid = concrete;
532 /* TODO - need to devise a better way of getting opaque */
533 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
534 (vf->abs_vf_id << 8);
536 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
537 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
541 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
543 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
547 num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
549 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
550 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
552 /* Allocate PF Mailbox buffer (per-VF) */
553 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
554 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
555 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
556 &p_iov_info->mbx_msg_phys_addr,
557 p_iov_info->mbx_msg_size);
561 /* Allocate PF Mailbox Reply buffer (per-VF) */
562 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
563 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
564 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
565 &p_iov_info->mbx_reply_phys_addr,
566 p_iov_info->mbx_reply_size);
570 p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
572 p_v_addr = &p_iov_info->p_bulletins;
573 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
574 &p_iov_info->bulletins_phys,
575 p_iov_info->bulletins_size);
579 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
580 "PF's Requests mailbox [%p virt 0x%lx phys], "
581 "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
582 " [%p virt 0x%lx phys]\n",
583 p_iov_info->mbx_msg_virt_addr,
584 (unsigned long)p_iov_info->mbx_msg_phys_addr,
585 p_iov_info->mbx_reply_virt_addr,
586 (unsigned long)p_iov_info->mbx_reply_phys_addr,
587 p_iov_info->p_bulletins,
588 (unsigned long)p_iov_info->bulletins_phys);
590 return ECORE_SUCCESS;
593 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
595 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
597 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
598 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
599 p_iov_info->mbx_msg_virt_addr,
600 p_iov_info->mbx_msg_phys_addr,
601 p_iov_info->mbx_msg_size);
603 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
604 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
605 p_iov_info->mbx_reply_virt_addr,
606 p_iov_info->mbx_reply_phys_addr,
607 p_iov_info->mbx_reply_size);
609 if (p_iov_info->p_bulletins)
610 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
611 p_iov_info->p_bulletins,
612 p_iov_info->bulletins_phys,
613 p_iov_info->bulletins_size);
616 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
618 struct ecore_pf_iov *p_sriov;
620 if (!IS_PF_SRIOV(p_hwfn)) {
621 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
622 "No SR-IOV - no need for IOV db\n");
623 return ECORE_SUCCESS;
626 p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
628 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n");
632 p_hwfn->pf_iov_info = p_sriov;
634 ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
635 ecore_sriov_eqe_event);
637 return ecore_iov_allocate_vfdb(p_hwfn);
640 void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
642 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
645 ecore_iov_setup_vfdb(p_hwfn);
648 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
650 ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
652 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
653 ecore_iov_free_vfdb(p_hwfn);
654 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
658 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
660 OSAL_FREE(p_dev, p_dev->p_iov_info);
663 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
665 struct ecore_dev *p_dev = p_hwfn->p_dev;
667 enum _ecore_status_t rc;
669 if (IS_VF(p_hwfn->p_dev))
670 return ECORE_SUCCESS;
672 /* Learn the PCI configuration */
673 pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
674 RTE_PCI_EXT_CAP_ID_SRIOV);
676 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
677 return ECORE_SUCCESS;
680 /* Allocate a new struct for IOV information */
681 /* TODO - can change to VALLOC when its available */
682 p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
683 sizeof(*p_dev->p_iov_info));
684 if (!p_dev->p_iov_info) {
685 DP_NOTICE(p_hwfn, false,
686 "Can't support IOV due to lack of memory\n");
689 p_dev->p_iov_info->pos = pos;
691 rc = ecore_iov_pci_cfg_info(p_dev);
695 /* We want PF IOV to be synonemous with the existence of p_iov_info;
696 * In case the capability is published but there are no VFs, simply
697 * de-allocate the struct.
699 if (!p_dev->p_iov_info->total_vfs) {
700 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
701 "IOV capabilities, but no VFs are published\n");
702 OSAL_FREE(p_dev, p_dev->p_iov_info);
703 return ECORE_SUCCESS;
706 /* First VF index based on offset is tricky:
707 * - If ARI is supported [likely], offset - (16 - pf_id) would
708 * provide the number for eng0. 2nd engine Vfs would begin
709 * after the first engine's VFs.
710 * - If !ARI, VFs would start on next device.
711 * so offset - (256 - pf_id) would provide the number.
712 * Utilize the fact that (256 - pf_id) is achieved only be later
713 * to diffrentiate between the two.
716 if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
717 u32 first = p_hwfn->p_dev->p_iov_info->offset +
718 p_hwfn->abs_pf_id - 16;
720 p_dev->p_iov_info->first_vf_in_pf = first;
722 if (ECORE_PATH_ID(p_hwfn))
723 p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
725 u32 first = p_hwfn->p_dev->p_iov_info->offset +
726 p_hwfn->abs_pf_id - 256;
728 p_dev->p_iov_info->first_vf_in_pf = first;
731 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
732 "First VF in hwfn 0x%08x\n",
733 p_dev->p_iov_info->first_vf_in_pf);
735 return ECORE_SUCCESS;
738 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
739 bool b_fail_malicious)
741 /* Check PF supports sriov */
742 if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
743 !IS_PF_SRIOV_ALLOC(p_hwfn))
746 /* Check VF validity */
747 if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
753 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
755 return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
758 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
759 u16 rel_vf_id, u8 to_disable)
761 struct ecore_vf_info *vf;
764 for_each_hwfn(p_dev, i) {
765 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
767 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
771 vf->to_disable = to_disable;
775 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
780 if (!IS_ECORE_SRIOV(p_dev))
783 for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
784 ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
788 /* @@@TBD Consider taking outside of ecore... */
789 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
793 enum _ecore_status_t rc = ECORE_SUCCESS;
794 struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
796 if (vf != OSAL_NULL) {
798 #ifdef CONFIG_ECORE_SW_CHANNEL
799 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
802 rc = ECORE_UNKNOWN_ERROR;
808 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
809 struct ecore_ptt *p_ptt,
812 ecore_wr(p_hwfn, p_ptt,
813 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
814 1 << (abs_vfid & 0x1f));
817 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
818 struct ecore_ptt *p_ptt,
819 struct ecore_vf_info *vf)
823 /* Set VF masks and configuration - pretend */
824 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
826 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
829 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
831 /* iterate over all queues, clear sb consumer */
832 for (i = 0; i < vf->num_sbs; i++)
833 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
835 vf->opaque_fid, true);
838 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
839 struct ecore_ptt *p_ptt,
840 struct ecore_vf_info *vf, bool enable)
844 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
846 igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
849 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
851 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
853 ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
856 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
859 static enum _ecore_status_t
860 ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn,
861 struct ecore_ptt *p_ptt,
868 /* If client overrides this, don't do anything */
869 if (p_hwfn->p_dev->b_dont_override_vf_msix)
870 return ECORE_SUCCESS;
872 /* For AH onward, configuration is per-PF. Find maximum of all
873 * the currently enabled child VFs, and set the number to be that.
875 if (!ECORE_IS_BB(p_hwfn->p_dev)) {
876 ecore_for_each_vf(p_hwfn, i) {
877 struct ecore_vf_info *p_vf;
879 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)i, true);
883 current_max = OSAL_MAX_T(u8, current_max,
888 if (num_sbs > current_max)
889 return ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
892 return ECORE_SUCCESS;
895 static enum _ecore_status_t
896 ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
897 struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
899 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
900 enum _ecore_status_t rc = ECORE_SUCCESS;
902 /* It's possible VF was previously considered malicious -
903 * clear the indication even if we're only going to disable VF.
905 vf->b_malicious = false;
908 return ECORE_SUCCESS;
910 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
911 "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
912 ECORE_VF_ABS_ID(p_hwfn, vf));
914 ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
915 ECORE_VF_ABS_ID(p_hwfn, vf));
917 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
919 rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
920 vf->abs_vf_id, vf->num_sbs);
921 if (rc != ECORE_SUCCESS)
924 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
926 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
927 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
929 ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
930 p_hwfn->hw_info.hw_mode);
933 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
942 * @brief ecore_iov_config_perm_table - configure the permission
944 * The queue zone permission table size is 320x9. There
945 * are 320 VF queues for single engine device (256 for dual
946 * engine device), and each entry has the following format:
953 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
954 struct ecore_ptt *p_ptt,
955 struct ecore_vf_info *vf, u8 enable)
961 for (qid = 0; qid < vf->num_rxqs; qid++) {
962 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
965 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
966 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
967 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
971 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
972 struct ecore_ptt *p_ptt,
973 struct ecore_vf_info *vf)
975 /* Reset vf in IGU - interrupts are still disabled */
976 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
978 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
980 /* Permission Table */
981 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
984 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
985 struct ecore_ptt *p_ptt,
986 struct ecore_vf_info *vf,
989 struct ecore_igu_block *p_block;
990 struct cau_sb_entry sb_entry;
994 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
996 (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
997 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
999 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
1000 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
1001 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
1003 for (qid = 0; qid < num_rx_queues; qid++) {
1004 p_block = ecore_get_igu_free_sb(p_hwfn, false);
1008 vf->igu_sbs[qid] = p_block->igu_sb_id;
1009 p_block->status &= ~ECORE_IGU_STATUS_FREE;
1010 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
1012 ecore_wr(p_hwfn, p_ptt,
1013 IGU_REG_MAPPING_MEMORY +
1014 sizeof(u32) * p_block->igu_sb_id, val);
1016 /* Configure igu sb in CAU which were marked valid */
1017 ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
1021 ecore_dmae_host2grc(p_hwfn, p_ptt,
1022 (u64)(osal_uintptr_t)&sb_entry,
1023 CAU_REG_SB_VAR_MEMORY +
1024 p_block->igu_sb_id * sizeof(u64), 2,
1025 OSAL_NULL /* default parameters */);
1028 vf->num_sbs = (u8)num_rx_queues;
1035 * @brief The function invalidates all the VF entries,
1036 * technically this isn't required, but added for
1037 * cleaness and ease of debugging incase a VF attempts to
1038 * produce an interrupt after it has been taken down.
1044 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
1045 struct ecore_ptt *p_ptt,
1046 struct ecore_vf_info *vf)
1048 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1052 /* Invalidate igu CAM lines and mark them as free */
1053 for (idx = 0; idx < vf->num_sbs; idx++) {
1054 igu_id = vf->igu_sbs[idx];
1055 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
1057 val = ecore_rd(p_hwfn, p_ptt, addr);
1058 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
1059 ecore_wr(p_hwfn, p_ptt, addr, val);
1061 p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
1062 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
1068 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
1070 struct ecore_mcp_link_params *params,
1071 struct ecore_mcp_link_state *link,
1072 struct ecore_mcp_link_capabilities *p_caps)
1074 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
1075 struct ecore_bulletin_content *p_bulletin;
1080 p_bulletin = p_vf->bulletin.p_virt;
1081 p_bulletin->req_autoneg = params->speed.autoneg;
1082 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
1083 p_bulletin->req_forced_speed = params->speed.forced_speed;
1084 p_bulletin->req_autoneg_pause = params->pause.autoneg;
1085 p_bulletin->req_forced_rx = params->pause.forced_rx;
1086 p_bulletin->req_forced_tx = params->pause.forced_tx;
1087 p_bulletin->req_loopback = params->loopback_mode;
1089 p_bulletin->link_up = link->link_up;
1090 p_bulletin->speed = link->speed;
1091 p_bulletin->full_duplex = link->full_duplex;
1092 p_bulletin->autoneg = link->an;
1093 p_bulletin->autoneg_complete = link->an_complete;
1094 p_bulletin->parallel_detection = link->parallel_detection;
1095 p_bulletin->pfc_enabled = link->pfc_enabled;
1096 p_bulletin->partner_adv_speed = link->partner_adv_speed;
1097 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
1098 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
1099 p_bulletin->partner_adv_pause = link->partner_adv_pause;
1100 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
1102 p_bulletin->capability_speed = p_caps->speed_capabilities;
1106 static void ecore_emul_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
1107 struct ecore_ptt *p_ptt)
1109 /* Increase the maximum number of DORQ FIFO entries used by child VFs */
1110 ecore_wr(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT_LIM, 0x3ec);
1114 enum _ecore_status_t
1115 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
1116 struct ecore_ptt *p_ptt,
1117 struct ecore_iov_vf_init_params *p_params)
1119 struct ecore_mcp_link_capabilities link_caps;
1120 struct ecore_mcp_link_params link_params;
1121 struct ecore_mcp_link_state link_state;
1122 u8 num_of_vf_available_chains = 0;
1123 struct ecore_vf_info *vf = OSAL_NULL;
1125 enum _ecore_status_t rc = ECORE_SUCCESS;
1129 vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1131 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
1132 return ECORE_UNKNOWN_ERROR;
1136 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
1137 p_params->rel_vf_id);
1141 /* Perform sanity checking on the requested vport/rss */
1142 if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
1143 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
1144 p_params->rel_vf_id, p_params->vport_id);
1148 if ((p_params->num_queues > 1) &&
1149 (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
1150 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
1151 p_params->rel_vf_id, p_params->rss_eng_id);
1155 /* TODO - remove this once we get confidence of change */
1156 if (!p_params->vport_id) {
1157 DP_NOTICE(p_hwfn, false,
1158 "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
1159 p_params->rel_vf_id);
1161 if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
1162 DP_NOTICE(p_hwfn, false,
1163 "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
1164 p_params->rel_vf_id);
1166 vf->vport_id = p_params->vport_id;
1167 vf->rss_eng_id = p_params->rss_eng_id;
1169 /* Since it's possible to relocate SBs, it's a bit difficult to check
1170 * things here. Simply check whether the index falls in the range
1171 * belonging to the PF.
1173 for (i = 0; i < p_params->num_queues; i++) {
1174 qid = p_params->req_rx_queue[i];
1175 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1176 DP_NOTICE(p_hwfn, true,
1177 "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1178 qid, p_params->rel_vf_id,
1179 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1183 qid = p_params->req_tx_queue[i];
1184 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1185 DP_NOTICE(p_hwfn, true,
1186 "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1187 qid, p_params->rel_vf_id,
1188 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1193 /* Limit number of queues according to number of CIDs */
1194 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1195 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1196 "VF[%d] - requesting to initialize for 0x%04x queues"
1197 " [0x%04x CIDs available]\n",
1198 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1199 num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
1201 num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
1205 if (num_of_vf_available_chains == 0) {
1206 DP_ERR(p_hwfn, "no available igu sbs\n");
1210 /* Choose queue number and index ranges */
1211 vf->num_rxqs = num_of_vf_available_chains;
1212 vf->num_txqs = num_of_vf_available_chains;
1214 for (i = 0; i < vf->num_rxqs; i++) {
1215 struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
1217 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1218 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1220 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1221 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1222 vf->relative_vf_id, i, vf->igu_sbs[i],
1223 p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1226 /* Update the link configuration in bulletin.
1228 OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
1229 sizeof(link_params));
1230 OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
1231 sizeof(link_state));
1232 OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
1234 ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
1235 &link_params, &link_state, &link_caps);
1237 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1238 if (rc != ECORE_SUCCESS)
1243 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
1244 (1ULL << (vf->relative_vf_id % 64));
1247 if (IS_LEAD_HWFN(p_hwfn))
1248 p_hwfn->p_dev->p_iov_info->num_vfs++;
1251 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1252 ecore_emul_iov_init_hw_for_vf(p_hwfn, p_ptt);
1255 return ECORE_SUCCESS;
1259 static void ecore_emul_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1260 struct ecore_ptt *p_ptt)
1262 if (!ecore_mcp_is_init(p_hwfn)) {
1263 u32 sriov_dis = ecore_rd(p_hwfn, p_ptt,
1264 PGLUE_B_REG_SR_IOV_DISABLED_REQUEST);
1266 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_SR_IOV_DISABLED_REQUEST_CLR,
1272 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1273 struct ecore_ptt *p_ptt,
1276 struct ecore_mcp_link_capabilities caps;
1277 struct ecore_mcp_link_params params;
1278 struct ecore_mcp_link_state link;
1279 struct ecore_vf_info *vf = OSAL_NULL;
1281 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1283 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1284 return ECORE_UNKNOWN_ERROR;
1287 if (vf->bulletin.p_virt)
1288 OSAL_MEMSET(vf->bulletin.p_virt, 0,
1289 sizeof(*vf->bulletin.p_virt));
1291 OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1293 /* Get the link configuration back in bulletin so
1294 * that when VFs are re-enabled they get the actual
1295 * link configuration.
1297 OSAL_MEMCPY(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
1298 OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
1299 OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
1301 ecore_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1303 /* Forget the VF's acquisition message */
1304 OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
1306 /* disablng interrupts and resetting permission table was done during
1307 * vf-close, however, we could get here without going through vf_close
1309 /* Disable Interrupts for VF */
1310 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1312 /* Reset Permission table */
1313 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1317 ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1321 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
1322 ~(1ULL << (vf->relative_vf_id / 64));
1324 if (IS_LEAD_HWFN(p_hwfn))
1325 p_hwfn->p_dev->p_iov_info->num_vfs--;
1329 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1330 ecore_emul_iov_release_hw_for_vf(p_hwfn, p_ptt);
1333 return ECORE_SUCCESS;
1336 static bool ecore_iov_tlv_supported(u16 tlvtype)
1338 return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
1341 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1342 struct ecore_vf_info *vf, u16 tlv)
1344 /* lock the channel */
1345 /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1347 /* record the locking op */
1348 /* vf->op_current = tlv; @@@TBD MichalK */
1351 if (ecore_iov_tlv_supported(tlv))
1354 "VF[%d]: vf pf channel locked by %s\n",
1356 qede_ecore_channel_tlvs_string[tlv]);
1360 "VF[%d]: vf pf channel locked by %04x\n",
1361 vf->abs_vf_id, tlv);
1364 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1365 struct ecore_vf_info *vf,
1368 /* log the unlock */
1369 if (ecore_iov_tlv_supported(expected_tlv))
1372 "VF[%d]: vf pf channel unlocked by %s\n",
1374 qede_ecore_channel_tlvs_string[expected_tlv]);
1378 "VF[%d]: vf pf channel unlocked by %04x\n",
1379 vf->abs_vf_id, expected_tlv);
1381 /* record the locking op */
1382 /* vf->op_current = CHANNEL_TLV_NONE; */
1385 /* place a given tlv on the tlv buffer, continuing current tlv list */
1386 void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
1388 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1391 tl->length = length;
1393 /* Offset should keep pointing to next TLV (the end of the last) */
1396 /* Return a pointer to the start of the added tlv */
1397 return *offset - length;
1400 /* list the types and lengths of the tlvs on the buffer */
1401 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
1403 u16 i = 1, total_length = 0;
1404 struct channel_tlv *tlv;
1407 /* cast current tlv list entry to channel tlv header */
1408 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1411 if (ecore_iov_tlv_supported(tlv->type))
1412 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1413 "TLV number %d: type %s, length %d\n",
1414 i, qede_ecore_channel_tlvs_string[tlv->type],
1417 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1418 "TLV number %d: type %d, length %d\n",
1419 i, tlv->type, tlv->length);
1421 if (tlv->type == CHANNEL_TLV_LIST_END)
1424 /* Validate entry - protect against malicious VFs */
1426 DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
1429 total_length += tlv->length;
1430 if (total_length >= sizeof(struct tlv_buffer_size)) {
1431 DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
1439 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
1440 struct ecore_ptt *p_ptt,
1441 struct ecore_vf_info *p_vf,
1442 #ifdef CONFIG_ECORE_SW_CHANNEL
1445 u16 OSAL_UNUSED length,
1449 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1450 struct dmae_params params;
1453 mbx->reply_virt->default_resp.hdr.status = status;
1455 ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1457 #ifdef CONFIG_ECORE_SW_CHANNEL
1458 mbx->sw_mbx.response_size =
1459 length + sizeof(struct channel_list_end_tlv);
1461 if (!p_vf->b_hw_channel)
1465 eng_vf_id = p_vf->abs_vf_id;
1467 OSAL_MEMSET(¶ms, 0, sizeof(params));
1468 SET_FIELD(params.flags, DMAE_PARAMS_DST_VF_VALID, 0x1);
1469 params.dst_vf_id = eng_vf_id;
1471 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1472 mbx->req_virt->first_tlv.reply_address +
1474 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1477 /* Once PF copies the rc to the VF, the latter can continue and
1478 * and send an additional message. So we have to make sure the
1479 * channel would be re-set to ready prior to that.
1482 GTT_BAR0_MAP_REG_USDM_RAM +
1483 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1485 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1486 mbx->req_virt->first_tlv.reply_address,
1487 sizeof(u64) / 4, ¶ms);
1489 OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
1492 static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
1495 case ECORE_IOV_VP_UPDATE_ACTIVATE:
1496 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1497 case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1498 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1499 case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1500 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1501 case ECORE_IOV_VP_UPDATE_MCAST:
1502 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1503 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1504 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1505 case ECORE_IOV_VP_UPDATE_RSS:
1506 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1507 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1508 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1509 case ECORE_IOV_VP_UPDATE_SGE_TPA:
1510 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1516 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1517 struct ecore_vf_info *p_vf,
1518 struct ecore_iov_vf_mbx *p_mbx,
1519 u8 status, u16 tlvs_mask,
1522 struct pfvf_def_resp_tlv *resp;
1523 u16 size, total_len, i;
1525 OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1526 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1527 size = sizeof(struct pfvf_def_resp_tlv);
1530 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1532 /* Prepare response for all extended tlvs if they are found by PF */
1533 for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1534 if (!(tlvs_mask & (1 << i)))
1537 resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
1540 if (tlvs_accepted & (1 << i))
1541 resp->hdr.status = status;
1543 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1545 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1546 "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1547 p_vf->relative_vf_id,
1548 ecore_iov_vport_to_tlv(i),
1554 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
1555 sizeof(struct channel_list_end_tlv));
1560 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1561 struct ecore_ptt *p_ptt,
1562 struct ecore_vf_info *vf_info,
1563 u16 type, u16 length, u8 status)
1565 struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1567 mbx->offset = (u8 *)mbx->reply_virt;
1569 ecore_add_tlv(&mbx->offset, type, length);
1570 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
1571 sizeof(struct channel_list_end_tlv));
1573 ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1576 struct ecore_public_vf_info
1577 *ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
1579 bool b_enabled_only)
1581 struct ecore_vf_info *vf = OSAL_NULL;
1583 vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1587 return &vf->p_vf_info;
1590 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1591 struct ecore_vf_info *p_vf)
1594 p_vf->vf_bulletin = 0;
1595 p_vf->vport_instance = 0;
1596 p_vf->configured_features = 0;
1598 /* If VF previously requested less resources, go back to default */
1599 p_vf->num_rxqs = p_vf->num_sbs;
1600 p_vf->num_txqs = p_vf->num_sbs;
1602 p_vf->num_active_rxqs = 0;
1604 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1605 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
1607 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1608 if (!p_queue->cids[j].p_cid)
1611 ecore_eth_queue_cid_release(p_hwfn,
1612 p_queue->cids[j].p_cid);
1613 p_queue->cids[j].p_cid = OSAL_NULL;
1617 OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1618 OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1619 OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1622 /* Returns either 0, or log(size) */
1623 static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
1624 struct ecore_ptt *p_ptt)
1626 u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
1634 ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
1635 struct ecore_ptt *p_ptt,
1636 struct ecore_vf_info *p_vf,
1637 struct vf_pf_resc_request *p_req,
1638 struct pf_vf_resc *p_resp)
1640 u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
1641 u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
1642 DB_ADDR_VF(0, DQ_DEMS_LEGACY);
1645 p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
1647 /* If VF didn't bother asking for QIDs than don't bother limiting
1648 * number of CIDs. The VF doesn't care about the number, and this
1649 * has the likely result of causing an additional acquisition.
1651 if (!(p_vf->acquire.vfdev_info.capabilities &
1652 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
1655 /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1656 * that would make sure doorbells for all CIDs fall within the bar.
1657 * If it doesn't, make sure regview window is sufficient.
1659 if (p_vf->acquire.vfdev_info.capabilities &
1660 VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
1661 bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
1663 bar_size = 1 << bar_size;
1665 if (ECORE_IS_CMT(p_hwfn->p_dev))
1668 bar_size = PXP_VF_BAR0_DQ_LENGTH;
1671 if (bar_size / db_size < 256)
1672 p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
1673 (u8)(bar_size / db_size));
1676 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
1677 struct ecore_ptt *p_ptt,
1678 struct ecore_vf_info *p_vf,
1679 struct vf_pf_resc_request *p_req,
1680 struct pf_vf_resc *p_resp)
1684 /* Queue related information */
1685 p_resp->num_rxqs = p_vf->num_rxqs;
1686 p_resp->num_txqs = p_vf->num_txqs;
1687 p_resp->num_sbs = p_vf->num_sbs;
1689 for (i = 0; i < p_resp->num_sbs; i++) {
1690 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1691 /* TODO - what's this sb_qid field? Is it deprecated?
1692 * or is there an ecore_client that looks at this?
1694 p_resp->hw_sbs[i].sb_qid = 0;
1697 /* These fields are filled for backward compatibility.
1698 * Unused by modern vfs.
1700 for (i = 0; i < p_resp->num_rxqs; i++) {
1701 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1702 (u16 *)&p_resp->hw_qid[i]);
1706 /* Filter related information */
1707 p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
1708 p_req->num_mac_filters);
1709 p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
1710 p_req->num_vlan_filters);
1712 ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
1714 /* This isn't really needed/enforced, but some legacy VFs might depend
1715 * on the correct filling of this field.
1717 p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
1719 /* Validate sufficient resources for VF */
1720 if (p_resp->num_rxqs < p_req->num_rxqs ||
1721 p_resp->num_txqs < p_req->num_txqs ||
1722 p_resp->num_sbs < p_req->num_sbs ||
1723 p_resp->num_mac_filters < p_req->num_mac_filters ||
1724 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1725 p_resp->num_mc_filters < p_req->num_mc_filters ||
1726 p_resp->num_cids < p_req->num_cids) {
1727 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1728 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1730 p_req->num_rxqs, p_resp->num_rxqs,
1731 p_req->num_rxqs, p_resp->num_txqs,
1732 p_req->num_sbs, p_resp->num_sbs,
1733 p_req->num_mac_filters, p_resp->num_mac_filters,
1734 p_req->num_vlan_filters, p_resp->num_vlan_filters,
1735 p_req->num_mc_filters, p_resp->num_mc_filters,
1736 p_req->num_cids, p_resp->num_cids);
1738 /* Some legacy OSes are incapable of correctly handling this
1741 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1742 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1743 (p_vf->acquire.vfdev_info.os_type ==
1744 VFPF_ACQUIRE_OS_WINDOWS))
1745 return PFVF_STATUS_SUCCESS;
1747 return PFVF_STATUS_NO_RESOURCE;
1750 return PFVF_STATUS_SUCCESS;
1753 static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
1755 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1756 OFFSETOF(struct mstorm_vf_zone,
1757 non_trigger.eth_queue_stat);
1758 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1759 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1760 OFFSETOF(struct ustorm_vf_zone,
1761 non_trigger.eth_queue_stat);
1762 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1763 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1764 OFFSETOF(struct pstorm_vf_zone,
1765 non_trigger.eth_queue_stat);
1766 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1767 p_stats->tstats.address = 0;
1768 p_stats->tstats.len = 0;
1771 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
1772 struct ecore_ptt *p_ptt,
1773 struct ecore_vf_info *vf)
1775 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1776 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1777 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1778 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1779 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1780 struct pf_vf_resc *resc = &resp->resc;
1781 enum _ecore_status_t rc;
1783 OSAL_MEMSET(resp, 0, sizeof(*resp));
1785 /* Write the PF version so that VF would know which version
1786 * is supported - might be later overridden. This guarantees that
1787 * VF could recognize legacy PF based on lack of versions in reply.
1789 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1790 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1792 /* TODO - not doing anything is bad since we'll assert, but this isn't
1793 * necessarily the right behavior - perhaps we should have allowed some
1796 if (vf->state != VF_FREE &&
1797 vf->state != VF_STOPPED) {
1798 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1799 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1800 vf->abs_vf_id, vf->state);
1804 /* Validate FW compatibility */
1805 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1806 if (req->vfdev_info.capabilities &
1807 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1808 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1810 /* This legacy support would need to be removed once
1811 * the major has changed.
1813 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
1815 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1816 "VF[%d] is pre-fastpath HSI\n",
1818 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1819 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1822 "VF[%d] needs fastpath HSI %02x.%02x, which is"
1823 " incompatible with loaded FW's faspath"
1826 req->vfdev_info.eth_fp_hsi_major,
1827 req->vfdev_info.eth_fp_hsi_minor,
1828 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1834 /* On 100g PFs, prevent old VFs from loading */
1835 if (ECORE_IS_CMT(p_hwfn->p_dev) &&
1836 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1838 "VF[%d] is running an old driver that doesn't support"
1844 #ifndef __EXTRACT__LINUX__
1845 if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1846 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1851 /* Store the acquire message */
1852 OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
1854 vf->opaque_fid = req->vfdev_info.opaque_fid;
1856 vf->vf_bulletin = req->bulletin_addr;
1857 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1858 vf->bulletin.size : req->bulletin_size;
1860 /* fill in pfdev info */
1861 pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1862 pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
1863 pfdev_info->indices_per_sb = PIS_PER_SB;
1865 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1866 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1867 if (ECORE_IS_CMT(p_hwfn->p_dev))
1868 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1870 /* Share our ability to use multiple queue-ids only with VFs
1873 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1874 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1876 /* Share the sizes of the bars with VF */
1877 resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
1880 ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
1882 OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1885 pfdev_info->fw_major = FW_MAJOR_VERSION;
1886 pfdev_info->fw_minor = FW_MINOR_VERSION;
1887 pfdev_info->fw_rev = FW_REVISION_VERSION;
1888 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1890 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1893 pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
1894 req->vfdev_info.eth_fp_hsi_minor);
1895 pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1896 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
1899 pfdev_info->dev_type = p_hwfn->p_dev->type;
1900 pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1902 /* Fill resources available to VF; Make sure there are enough to
1903 * satisfy the VF's request.
1905 vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1906 &req->resc_request, resc);
1907 if (vfpf_status != PFVF_STATUS_SUCCESS)
1910 /* Start the VF in FW */
1911 rc = ecore_sp_vf_start(p_hwfn, vf);
1912 if (rc != ECORE_SUCCESS) {
1913 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
1915 vfpf_status = PFVF_STATUS_FAILURE;
1919 /* Fill agreed size of bulletin board in response, and post
1920 * an initial image to the bulletin board.
1922 resp->bulletin_size = vf->bulletin.size;
1923 ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1925 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1926 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1927 " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1928 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1930 vf->abs_vf_id, resp->pfdev_info.chip_num,
1931 resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1932 (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
1933 resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1934 resc->num_vlan_filters);
1936 vf->state = VF_ACQUIRED;
1939 /* Prepare Response */
1940 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1941 sizeof(struct pfvf_acquire_resp_tlv),
1945 static enum _ecore_status_t
1946 __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1947 struct ecore_vf_info *p_vf, bool val)
1949 struct ecore_sp_vport_update_params params;
1950 enum _ecore_status_t rc;
1952 if (val == p_vf->spoof_chk) {
1953 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1954 "Spoofchk value[%d] is already configured\n", val);
1955 return ECORE_SUCCESS;
1958 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1959 params.opaque_fid = p_vf->opaque_fid;
1960 params.vport_id = p_vf->vport_id;
1961 params.update_anti_spoofing_en_flg = 1;
1962 params.anti_spoofing_en = val;
1964 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
1966 if (rc == ECORE_SUCCESS) {
1967 p_vf->spoof_chk = val;
1968 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1969 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1970 "Spoofchk val[%d] configured\n", val);
1972 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1973 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1974 val, p_vf->relative_vf_id);
1980 static enum _ecore_status_t
1981 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1982 struct ecore_vf_info *p_vf)
1984 struct ecore_filter_ucast filter;
1985 enum _ecore_status_t rc = ECORE_SUCCESS;
1988 OSAL_MEMSET(&filter, 0, sizeof(filter));
1989 filter.is_rx_filter = 1;
1990 filter.is_tx_filter = 1;
1991 filter.vport_to_add_to = p_vf->vport_id;
1992 filter.opcode = ECORE_FILTER_ADD;
1994 /* Reconfigure vlans */
1995 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1996 if (!p_vf->shadow_config.vlans[i].used)
1999 filter.type = ECORE_FILTER_VLAN;
2000 filter.vlan = p_vf->shadow_config.vlans[i].vid;
2001 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2002 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
2003 filter.vlan, p_vf->relative_vf_id);
2004 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
2005 &filter, ECORE_SPQ_MODE_CB,
2008 DP_NOTICE(p_hwfn, true,
2009 "Failed to configure VLAN [%04x]"
2011 filter.vlan, p_vf->relative_vf_id);
2019 static enum _ecore_status_t
2020 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
2021 struct ecore_vf_info *p_vf, u64 events)
2023 enum _ecore_status_t rc = ECORE_SUCCESS;
2025 /*TODO - what about MACs? */
2027 if ((events & (1 << VLAN_ADDR_FORCED)) &&
2028 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
2029 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
2034 static enum _ecore_status_t
2035 ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
2036 struct ecore_vf_info *p_vf,
2039 enum _ecore_status_t rc = ECORE_SUCCESS;
2040 struct ecore_filter_ucast filter;
2042 if (!p_vf->vport_instance)
2045 if ((events & (1 << MAC_ADDR_FORCED)) ||
2046 p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
2047 p_vf->p_vf_info.is_trusted_configured) {
2048 /* Since there's no way [currently] of removing the MAC,
2049 * we can always assume this means we need to force it.
2051 OSAL_MEMSET(&filter, 0, sizeof(filter));
2052 filter.type = ECORE_FILTER_MAC;
2053 filter.opcode = ECORE_FILTER_REPLACE;
2054 filter.is_rx_filter = 1;
2055 filter.is_tx_filter = 1;
2056 filter.vport_to_add_to = p_vf->vport_id;
2057 OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
2059 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
2061 ECORE_SPQ_MODE_CB, OSAL_NULL);
2063 DP_NOTICE(p_hwfn, true,
2064 "PF failed to configure MAC for VF\n");
2068 if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
2069 p_vf->p_vf_info.is_trusted_configured)
2070 p_vf->configured_features |=
2071 1 << VFPF_BULLETIN_MAC_ADDR;
2073 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
2076 if (events & (1 << VLAN_ADDR_FORCED)) {
2077 struct ecore_sp_vport_update_params vport_update;
2081 OSAL_MEMSET(&filter, 0, sizeof(filter));
2082 filter.type = ECORE_FILTER_VLAN;
2083 filter.is_rx_filter = 1;
2084 filter.is_tx_filter = 1;
2085 filter.vport_to_add_to = p_vf->vport_id;
2086 filter.vlan = p_vf->bulletin.p_virt->pvid;
2087 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
2090 /* Send the ramrod */
2091 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
2093 ECORE_SPQ_MODE_CB, OSAL_NULL);
2095 DP_NOTICE(p_hwfn, true,
2096 "PF failed to configure VLAN for VF\n");
2100 /* Update the default-vlan & silent vlan stripping */
2101 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
2102 vport_update.opaque_fid = p_vf->opaque_fid;
2103 vport_update.vport_id = p_vf->vport_id;
2104 vport_update.update_default_vlan_enable_flg = 1;
2105 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
2106 vport_update.update_default_vlan_flg = 1;
2107 vport_update.default_vlan = filter.vlan;
2109 vport_update.update_inner_vlan_removal_flg = 1;
2110 removal = filter.vlan ?
2111 1 : p_vf->shadow_config.inner_vlan_removal;
2112 vport_update.inner_vlan_removal_flg = removal;
2113 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
2114 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
2115 ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
2117 DP_NOTICE(p_hwfn, true,
2118 "PF failed to configure VF vport for vlan\n");
2122 /* Update all the Rx queues */
2123 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
2124 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
2125 struct ecore_queue_cid *p_cid = OSAL_NULL;
2127 /* There can be at most 1 Rx queue on qzone. Find it */
2128 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
2129 if (p_cid == OSAL_NULL)
2132 rc = ecore_sp_eth_rx_queues_update(p_hwfn,
2135 ECORE_SPQ_MODE_EBLOCK,
2138 DP_NOTICE(p_hwfn, true,
2139 "Failed to send Rx update"
2140 " fo queue[0x%04x]\n",
2141 p_cid->rel.queue_id);
2147 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
2149 p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
2152 /* If forced features are terminated, we need to configure the shadow
2153 * configuration back again.
2156 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
2161 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
2162 struct ecore_ptt *p_ptt,
2163 struct ecore_vf_info *vf)
2165 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2166 struct ecore_sp_vport_start_params params;
2167 struct vfpf_vport_start_tlv *start;
2168 u8 status = PFVF_STATUS_SUCCESS;
2169 struct ecore_vf_info *vf_info;
2172 enum _ecore_status_t rc;
2174 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
2176 DP_NOTICE(p_hwfn->p_dev, true,
2177 "Failed to get VF info, invalid vfid [%d]\n",
2178 vf->relative_vf_id);
2182 vf->state = VF_ENABLED;
2183 start = &mbx->req_virt->start_vport;
2185 ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
2187 /* Initialize Status block in CAU */
2188 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
2189 if (!start->sb_addr[sb_id]) {
2190 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2191 "VF[%d] did not fill the address of SB %d\n",
2192 vf->relative_vf_id, sb_id);
2196 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
2197 start->sb_addr[sb_id],
2202 vf->mtu = start->mtu;
2203 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
2205 /* Take into consideration configuration forced by hypervisor;
2206 * If none is configured, use the supplied VF values [for old
2207 * vfs that would still be fine, since they passed '0' as padding].
2209 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
2210 if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
2211 u8 vf_req = start->only_untagged;
2213 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
2214 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
2217 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_start_params));
2218 params.tpa_mode = start->tpa_mode;
2219 params.remove_inner_vlan = start->inner_vlan_removal;
2220 params.tx_switching = true;
2223 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2224 DP_NOTICE(p_hwfn, false,
2225 "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
2226 params.tx_switching = false;
2230 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
2231 params.drop_ttl0 = false;
2232 params.concrete_fid = vf->concrete_fid;
2233 params.opaque_fid = vf->opaque_fid;
2234 params.vport_id = vf->vport_id;
2235 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
2236 params.mtu = vf->mtu;
2238 /* Non trusted VFs should enable control frame filtering */
2239 params.check_mac = !vf->p_vf_info.is_trusted_configured;
2241 rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms);
2242 if (rc != ECORE_SUCCESS) {
2244 "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
2245 status = PFVF_STATUS_FAILURE;
2247 vf->vport_instance++;
2249 /* Force configuration if needed on the newly opened vport */
2250 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
2251 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
2252 vf->vport_id, vf->opaque_fid);
2253 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
2256 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
2257 sizeof(struct pfvf_def_resp_tlv), status);
2260 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
2261 struct ecore_ptt *p_ptt,
2262 struct ecore_vf_info *vf)
2264 u8 status = PFVF_STATUS_SUCCESS;
2265 enum _ecore_status_t rc;
2267 OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf);
2268 vf->vport_instance--;
2269 vf->spoof_chk = false;
2271 if ((ecore_iov_validate_active_rxq(vf)) ||
2272 (ecore_iov_validate_active_txq(vf))) {
2273 vf->b_malicious = true;
2274 DP_NOTICE(p_hwfn, false,
2275 "VF [%02x] - considered malicious;"
2276 " Unable to stop RX/TX queuess\n",
2278 status = PFVF_STATUS_MALICIOUS;
2282 rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2283 if (rc != ECORE_SUCCESS) {
2285 "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
2286 status = PFVF_STATUS_FAILURE;
2289 /* Forget the configuration on the vport */
2290 vf->configured_features = 0;
2291 OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2294 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2295 sizeof(struct pfvf_def_resp_tlv), status);
2298 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
2299 struct ecore_ptt *p_ptt,
2300 struct ecore_vf_info *vf,
2301 u8 status, bool b_legacy)
2303 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2304 struct pfvf_start_queue_resp_tlv *p_tlv;
2305 struct vfpf_start_rxq_tlv *req;
2308 mbx->offset = (u8 *)mbx->reply_virt;
2310 /* Taking a bigger struct instead of adding a TLV to list was a
2311 * mistake, but one which we're now stuck with, as some older
2312 * clients assume the size of the previous response.
2315 length = sizeof(*p_tlv);
2317 length = sizeof(struct pfvf_def_resp_tlv);
2319 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
2320 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2321 sizeof(struct channel_list_end_tlv));
2323 /* Update the TLV with the response.
2324 * The VF Rx producers are located in the vf zone.
2326 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2327 req = &mbx->req_virt->start_rxq;
2330 PXP_VF_BAR0_START_MSDM_ZONE_B +
2331 OFFSETOF(struct mstorm_vf_zone,
2332 non_trigger.eth_rx_queue_producers) +
2333 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2336 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2339 static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
2340 struct ecore_vf_info *p_vf, bool b_is_tx)
2342 struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2343 struct vfpf_qid_tlv *p_qid_tlv;
2345 /* Search for the qid if the VF published if its going to provide it */
2346 if (!(p_vf->acquire.vfdev_info.capabilities &
2347 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2349 return ECORE_IOV_LEGACY_QID_TX;
2351 return ECORE_IOV_LEGACY_QID_RX;
2354 p_qid_tlv = (struct vfpf_qid_tlv *)
2355 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2357 if (p_qid_tlv == OSAL_NULL) {
2358 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2359 "VF[%2x]: Failed to provide qid\n",
2360 p_vf->relative_vf_id);
2362 return ECORE_IOV_QID_INVALID;
2365 if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2366 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2367 "VF[%02x]: Provided qid out-of-bounds %02x\n",
2368 p_vf->relative_vf_id, p_qid_tlv->qid);
2369 return ECORE_IOV_QID_INVALID;
2372 return p_qid_tlv->qid;
2375 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
2376 struct ecore_ptt *p_ptt,
2377 struct ecore_vf_info *vf)
2379 struct ecore_queue_start_common_params params;
2380 struct ecore_queue_cid_vf_params vf_params;
2381 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2382 u8 status = PFVF_STATUS_NO_RESOURCE;
2383 u8 qid_usage_idx, vf_legacy = 0;
2384 struct ecore_vf_queue *p_queue;
2385 struct vfpf_start_rxq_tlv *req;
2386 struct ecore_queue_cid *p_cid;
2387 struct ecore_sb_info sb_dummy;
2388 enum _ecore_status_t rc;
2390 req = &mbx->req_virt->start_rxq;
2392 if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2393 ECORE_IOV_VALIDATE_Q_DISABLE) ||
2394 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2397 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2398 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2401 p_queue = &vf->vf_queues[req->rx_qid];
2402 if (p_queue->cids[qid_usage_idx].p_cid)
2405 vf_legacy = ecore_vf_calculate_legacy(vf);
2407 /* Acquire a new queue-cid */
2408 OSAL_MEMSET(¶ms, 0, sizeof(params));
2409 params.queue_id = (u8)p_queue->fw_rx_qid;
2410 params.vport_id = vf->vport_id;
2411 params.stats_id = vf->abs_vf_id + 0x10;
2413 /* Since IGU index is passed via sb_info, construct a dummy one */
2414 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2415 sb_dummy.igu_sb_id = req->hw_sb;
2416 params.p_sb = &sb_dummy;
2417 params.sb_idx = req->sb_index;
2419 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2420 vf_params.vfid = vf->relative_vf_id;
2421 vf_params.vf_qid = (u8)req->rx_qid;
2422 vf_params.vf_legacy = vf_legacy;
2423 vf_params.qid_usage_idx = qid_usage_idx;
2425 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2426 ¶ms, true, &vf_params);
2427 if (p_cid == OSAL_NULL)
2430 /* The VF Rx producers are located in the vf zone.
2431 * Legacy VFs have their producers in the queue zone, but they
2432 * calculate the location by their own and clean them prior to this.
2434 if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
2436 GTT_BAR0_MAP_REG_MSDM_RAM +
2437 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id,
2441 rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
2446 if (rc != ECORE_SUCCESS) {
2447 status = PFVF_STATUS_FAILURE;
2448 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2450 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2451 p_queue->cids[qid_usage_idx].b_is_tx = false;
2452 status = PFVF_STATUS_SUCCESS;
2453 vf->num_active_rxqs++;
2457 ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2459 ECORE_QCID_LEGACY_VF_RX_PROD));
2463 ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2464 struct ecore_tunnel_info *p_tun,
2465 u16 tunn_feature_mask)
2467 p_resp->tunn_feature_mask = tunn_feature_mask;
2468 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2469 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2470 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2471 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2472 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2473 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2474 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2475 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2476 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2477 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2478 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2479 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2483 __ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2484 struct ecore_tunn_update_type *p_tun,
2485 enum ecore_tunn_mode mask, u8 tun_cls)
2487 if (p_req->tun_mode_update_mask & (1 << mask)) {
2488 p_tun->b_update_mode = true;
2490 if (p_req->tunn_mode & (1 << mask))
2491 p_tun->b_mode_enabled = true;
2494 p_tun->tun_cls = tun_cls;
2498 ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2499 struct ecore_tunn_update_type *p_tun,
2500 struct ecore_tunn_update_udp_port *p_port,
2501 enum ecore_tunn_mode mask,
2502 u8 tun_cls, u8 update_port, u16 port)
2505 p_port->b_update_port = true;
2506 p_port->port = port;
2509 __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2513 ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2515 bool b_update_requested = false;
2517 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2518 p_req->update_geneve_port || p_req->update_vxlan_port)
2519 b_update_requested = true;
2521 return b_update_requested;
2524 static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
2525 struct ecore_ptt *p_ptt,
2526 struct ecore_vf_info *p_vf)
2528 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
2529 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2530 struct pfvf_update_tunn_param_tlv *p_resp;
2531 struct vfpf_update_tunn_param_tlv *p_req;
2532 enum _ecore_status_t rc = ECORE_SUCCESS;
2533 u8 status = PFVF_STATUS_SUCCESS;
2534 bool b_update_required = false;
2535 struct ecore_tunnel_info tunn;
2536 u16 tunn_feature_mask = 0;
2539 mbx->offset = (u8 *)mbx->reply_virt;
2541 OSAL_MEM_ZERO(&tunn, sizeof(tunn));
2542 p_req = &mbx->req_virt->tunn_param_update;
2544 if (!ecore_iov_pf_validate_tunn_param(p_req)) {
2545 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2546 "No tunnel update requested by VF\n");
2547 status = PFVF_STATUS_FAILURE;
2551 tunn.b_update_rx_cls = p_req->update_tun_cls;
2552 tunn.b_update_tx_cls = p_req->update_tun_cls;
2554 ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2555 ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2556 p_req->update_vxlan_port,
2558 ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2559 ECORE_MODE_L2GENEVE_TUNN,
2560 p_req->l2geneve_clss,
2561 p_req->update_geneve_port,
2562 p_req->geneve_port);
2563 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2564 ECORE_MODE_IPGENEVE_TUNN,
2565 p_req->ipgeneve_clss);
2566 __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2567 ECORE_MODE_L2GRE_TUNN,
2569 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2570 ECORE_MODE_IPGRE_TUNN,
2573 /* If PF modifies VF's req then it should
2574 * still return an error in case of partial configuration
2575 * or modified configuration as opposed to requested one.
2577 rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
2578 &b_update_required, &tunn);
2580 if (rc != ECORE_SUCCESS)
2581 status = PFVF_STATUS_FAILURE;
2583 /* If ECORE client is willing to update anything ? */
2584 if (b_update_required) {
2587 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2588 ECORE_SPQ_MODE_EBLOCK,
2590 if (rc != ECORE_SUCCESS)
2591 status = PFVF_STATUS_FAILURE;
2593 geneve_port = p_tun->geneve_port.port;
2594 ecore_for_each_vf(p_hwfn, i) {
2595 ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
2596 p_tun->vxlan_port.port,
2602 p_resp = ecore_add_tlv(&mbx->offset,
2603 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2605 ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2606 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2607 sizeof(struct channel_list_end_tlv));
2609 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2612 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
2613 struct ecore_ptt *p_ptt,
2614 struct ecore_vf_info *p_vf,
2618 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2619 struct pfvf_start_queue_resp_tlv *p_tlv;
2620 bool b_legacy = false;
2623 mbx->offset = (u8 *)mbx->reply_virt;
2625 /* Taking a bigger struct instead of adding a TLV to list was a
2626 * mistake, but one which we're now stuck with, as some older
2627 * clients assume the size of the previous response.
2629 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2630 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2634 length = sizeof(*p_tlv);
2636 length = sizeof(struct pfvf_def_resp_tlv);
2638 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
2639 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2640 sizeof(struct channel_list_end_tlv));
2642 /* Update the TLV with the response */
2643 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2644 p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
2646 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2649 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
2650 struct ecore_ptt *p_ptt,
2651 struct ecore_vf_info *vf)
2653 struct ecore_queue_start_common_params params;
2654 struct ecore_queue_cid_vf_params vf_params;
2655 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2656 u8 status = PFVF_STATUS_NO_RESOURCE;
2657 struct ecore_vf_queue *p_queue;
2658 struct vfpf_start_txq_tlv *req;
2659 struct ecore_queue_cid *p_cid;
2660 struct ecore_sb_info sb_dummy;
2661 u8 qid_usage_idx, vf_legacy;
2663 enum _ecore_status_t rc;
2666 OSAL_MEMSET(¶ms, 0, sizeof(params));
2667 req = &mbx->req_virt->start_txq;
2669 if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2670 ECORE_IOV_VALIDATE_Q_NA) ||
2671 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2674 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2675 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2678 p_queue = &vf->vf_queues[req->tx_qid];
2679 if (p_queue->cids[qid_usage_idx].p_cid)
2682 vf_legacy = ecore_vf_calculate_legacy(vf);
2684 /* Acquire a new queue-cid */
2685 params.queue_id = p_queue->fw_tx_qid;
2686 params.vport_id = vf->vport_id;
2687 params.stats_id = vf->abs_vf_id + 0x10;
2689 /* Since IGU index is passed via sb_info, construct a dummy one */
2690 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2691 sb_dummy.igu_sb_id = req->hw_sb;
2692 params.p_sb = &sb_dummy;
2693 params.sb_idx = req->sb_index;
2695 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2696 vf_params.vfid = vf->relative_vf_id;
2697 vf_params.vf_qid = (u8)req->tx_qid;
2698 vf_params.vf_legacy = vf_legacy;
2699 vf_params.qid_usage_idx = qid_usage_idx;
2701 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2702 ¶ms, false, &vf_params);
2703 if (p_cid == OSAL_NULL)
2706 pq = ecore_get_cm_pq_idx_vf(p_hwfn,
2707 vf->relative_vf_id);
2708 rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
2709 req->pbl_addr, req->pbl_size, pq);
2710 if (rc != ECORE_SUCCESS) {
2711 status = PFVF_STATUS_FAILURE;
2712 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2714 status = PFVF_STATUS_SUCCESS;
2715 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2716 p_queue->cids[qid_usage_idx].b_is_tx = true;
2721 ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
2725 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
2726 struct ecore_vf_info *vf,
2729 bool cqe_completion)
2731 struct ecore_vf_queue *p_queue;
2732 enum _ecore_status_t rc = ECORE_SUCCESS;
2734 if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
2735 ECORE_IOV_VALIDATE_Q_NA)) {
2736 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2737 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2738 vf->relative_vf_id, rxq_id, qid_usage_idx);
2742 p_queue = &vf->vf_queues[rxq_id];
2744 /* We've validated the index and the existence of the active RXQ -
2745 * now we need to make sure that it's using the correct qid.
2747 if (!p_queue->cids[qid_usage_idx].p_cid ||
2748 p_queue->cids[qid_usage_idx].b_is_tx) {
2749 struct ecore_queue_cid *p_cid;
2751 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
2752 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2753 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2754 vf->relative_vf_id, rxq_id, qid_usage_idx,
2755 rxq_id, p_cid->qid_usage_idx);
2759 /* Now that we know we have a valid Rx-queue - close it */
2760 rc = ecore_eth_rx_queue_stop(p_hwfn,
2761 p_queue->cids[qid_usage_idx].p_cid,
2762 false, cqe_completion);
2763 if (rc != ECORE_SUCCESS)
2766 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2767 vf->num_active_rxqs--;
2769 return ECORE_SUCCESS;
2772 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
2773 struct ecore_vf_info *vf,
2777 struct ecore_vf_queue *p_queue;
2778 enum _ecore_status_t rc = ECORE_SUCCESS;
2780 if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
2781 ECORE_IOV_VALIDATE_Q_NA))
2784 p_queue = &vf->vf_queues[txq_id];
2785 if (!p_queue->cids[qid_usage_idx].p_cid ||
2786 !p_queue->cids[qid_usage_idx].b_is_tx)
2789 rc = ecore_eth_tx_queue_stop(p_hwfn,
2790 p_queue->cids[qid_usage_idx].p_cid);
2791 if (rc != ECORE_SUCCESS)
2794 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2795 return ECORE_SUCCESS;
2798 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
2799 struct ecore_ptt *p_ptt,
2800 struct ecore_vf_info *vf)
2802 u16 length = sizeof(struct pfvf_def_resp_tlv);
2803 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2804 u8 status = PFVF_STATUS_FAILURE;
2805 struct vfpf_stop_rxqs_tlv *req;
2807 enum _ecore_status_t rc;
2809 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
2810 * would be one. Since no older ecore passed multiple queues
2811 * using this API, sanitize on the value.
2813 req = &mbx->req_virt->stop_rxqs;
2814 if (req->num_rxqs != 1) {
2815 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2816 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2817 vf->relative_vf_id);
2818 status = PFVF_STATUS_NOT_SUPPORTED;
2822 /* Find which qid-index is associated with the queue */
2823 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2824 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2827 rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2828 qid_usage_idx, req->cqe_completion);
2829 if (rc == ECORE_SUCCESS)
2830 status = PFVF_STATUS_SUCCESS;
2832 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2836 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
2837 struct ecore_ptt *p_ptt,
2838 struct ecore_vf_info *vf)
2840 u16 length = sizeof(struct pfvf_def_resp_tlv);
2841 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2842 u8 status = PFVF_STATUS_FAILURE;
2843 struct vfpf_stop_txqs_tlv *req;
2845 enum _ecore_status_t rc;
2847 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
2848 * would be one. Since no older ecore passed multiple queues
2849 * using this API, sanitize on the value.
2851 req = &mbx->req_virt->stop_txqs;
2852 if (req->num_txqs != 1) {
2853 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2854 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2855 vf->relative_vf_id);
2856 status = PFVF_STATUS_NOT_SUPPORTED;
2860 /* Find which qid-index is associated with the queue */
2861 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2862 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2865 rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
2867 if (rc == ECORE_SUCCESS)
2868 status = PFVF_STATUS_SUCCESS;
2871 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2875 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
2876 struct ecore_ptt *p_ptt,
2877 struct ecore_vf_info *vf)
2879 struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
2880 u16 length = sizeof(struct pfvf_def_resp_tlv);
2881 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2882 struct vfpf_update_rxq_tlv *req;
2883 u8 status = PFVF_STATUS_FAILURE;
2884 u8 complete_event_flg;
2885 u8 complete_cqe_flg;
2887 enum _ecore_status_t rc;
2890 req = &mbx->req_virt->update_rxq;
2891 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2892 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2894 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2895 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2898 /* Starting with the addition of CHANNEL_TLV_QID, this API started
2899 * expecting a single queue at a time. Validate this.
2901 if ((vf->acquire.vfdev_info.capabilities &
2902 VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
2903 req->num_rxqs != 1) {
2904 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2905 "VF[%d] supports QIDs but sends multiple queues\n",
2906 vf->relative_vf_id);
2910 /* Validate inputs - for the legacy case this is still true since
2911 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2913 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2914 if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
2915 ECORE_IOV_VALIDATE_Q_NA) ||
2916 !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2917 vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2918 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2919 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2920 vf->relative_vf_id, req->rx_qid,
2926 for (i = 0; i < req->num_rxqs; i++) {
2927 u16 qid = req->rx_qid + i;
2929 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2932 rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2936 ECORE_SPQ_MODE_EBLOCK,
2938 if (rc != ECORE_SUCCESS)
2941 status = PFVF_STATUS_SUCCESS;
2943 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2947 static enum _ecore_status_t
2948 ecore_iov_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn,
2949 struct ecore_ptt *p_ptt,
2950 struct ecore_vf_info *p_vf)
2952 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2953 struct ecore_sp_vport_update_params params;
2954 enum _ecore_status_t rc = ECORE_SUCCESS;
2955 struct vfpf_update_mtu_tlv *p_req;
2956 u8 status = PFVF_STATUS_SUCCESS;
2958 /* Valiate PF can send such a request */
2959 if (!p_vf->vport_instance) {
2960 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2961 "No VPORT instance available for VF[%d], failing MTU update\n",
2963 status = PFVF_STATUS_FAILURE;
2967 p_req = &mbx->req_virt->update_mtu;
2969 OSAL_MEMSET(¶ms, 0, sizeof(params));
2970 params.opaque_fid = p_vf->opaque_fid;
2971 params.vport_id = p_vf->vport_id;
2972 params.mtu = p_req->mtu;
2973 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
2977 status = PFVF_STATUS_FAILURE;
2979 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
2980 CHANNEL_TLV_UPDATE_MTU,
2981 sizeof(struct pfvf_def_resp_tlv),
2986 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
2987 void *p_tlvs_list, u16 req_type)
2989 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2993 if (!p_tlv->length) {
2994 DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
2998 if (p_tlv->type == req_type) {
2999 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3000 "Extended tlv type %s, length %d found\n",
3001 qede_ecore_channel_tlvs_string[p_tlv->type],
3006 len += p_tlv->length;
3007 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
3009 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
3010 DP_NOTICE(p_hwfn, true,
3011 "TLVs has overrun the buffer size\n");
3014 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
3020 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
3021 struct ecore_sp_vport_update_params *p_data,
3022 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
3024 struct vfpf_vport_update_activate_tlv *p_act_tlv;
3025 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
3027 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
3028 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3032 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
3033 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
3034 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
3035 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
3036 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
3040 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
3041 struct ecore_sp_vport_update_params *p_data,
3042 struct ecore_vf_info *p_vf,
3043 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
3045 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
3046 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
3048 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
3049 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3053 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
3055 /* Ignore the VF request if we're forcing a vlan */
3056 if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
3057 p_data->update_inner_vlan_removal_flg = 1;
3058 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
3061 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
3065 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
3066 struct ecore_sp_vport_update_params *p_data,
3067 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
3069 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
3070 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
3072 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
3073 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3074 if (!p_tx_switch_tlv)
3078 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
3079 DP_NOTICE(p_hwfn, false,
3080 "FPGA: Ignore tx-switching configuration originating"
3086 p_data->update_tx_switching_flg = 1;
3087 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
3088 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
3092 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
3093 struct ecore_sp_vport_update_params *p_data,
3094 struct ecore_iov_vf_mbx *p_mbx,
3097 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
3098 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
3100 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
3101 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3105 p_data->update_approx_mcast_flg = 1;
3106 OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
3107 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
3108 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
3112 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
3113 struct ecore_sp_vport_update_params *p_data,
3114 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
3116 struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
3117 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
3118 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
3120 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
3121 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3125 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
3126 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
3127 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
3128 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
3129 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
3133 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
3134 struct ecore_sp_vport_update_params *p_data,
3135 struct ecore_iov_vf_mbx *p_mbx,
3138 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
3139 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
3141 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
3142 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3143 if (!p_accept_any_vlan)
3146 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
3147 p_data->update_accept_any_vlan_flg =
3148 p_accept_any_vlan->update_accept_any_vlan_flg;
3149 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
3153 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
3154 struct ecore_vf_info *vf,
3155 struct ecore_sp_vport_update_params *p_data,
3156 struct ecore_rss_params *p_rss,
3157 struct ecore_iov_vf_mbx *p_mbx,
3158 u16 *tlvs_mask, u16 *tlvs_accepted)
3160 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
3161 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
3162 bool b_reject = false;
3166 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
3167 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3169 p_data->rss_params = OSAL_NULL;
3173 OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
3175 p_rss->update_rss_config =
3176 !!(p_rss_tlv->update_rss_flags &
3177 VFPF_UPDATE_RSS_CONFIG_FLAG);
3178 p_rss->update_rss_capabilities =
3179 !!(p_rss_tlv->update_rss_flags &
3180 VFPF_UPDATE_RSS_CAPS_FLAG);
3181 p_rss->update_rss_ind_table =
3182 !!(p_rss_tlv->update_rss_flags &
3183 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
3184 p_rss->update_rss_key =
3185 !!(p_rss_tlv->update_rss_flags &
3186 VFPF_UPDATE_RSS_KEY_FLAG);
3188 p_rss->rss_enable = p_rss_tlv->rss_enable;
3189 p_rss->rss_eng_id = vf->rss_eng_id;
3190 p_rss->rss_caps = p_rss_tlv->rss_caps;
3191 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
3192 OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
3193 sizeof(p_rss->rss_key));
3195 table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
3196 (1 << p_rss_tlv->rss_table_size_log));
3198 for (i = 0; i < table_size; i++) {
3199 struct ecore_queue_cid *p_cid;
3201 q_idx = p_rss_tlv->rss_ind_table[i];
3202 if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
3203 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3204 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3205 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
3206 vf->relative_vf_id, q_idx);
3211 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
3212 p_rss->rss_ind_table[i] = p_cid;
3215 p_data->rss_params = p_rss;
3217 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3219 *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3223 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
3224 struct ecore_sp_vport_update_params *p_data,
3225 struct ecore_sge_tpa_params *p_sge_tpa,
3226 struct ecore_iov_vf_mbx *p_mbx,
3229 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
3230 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
3232 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
3233 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3235 if (!p_sge_tpa_tlv) {
3236 p_data->sge_tpa_params = OSAL_NULL;
3240 OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
3242 p_sge_tpa->update_tpa_en_flg =
3243 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
3244 p_sge_tpa->update_tpa_param_flg =
3245 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
3246 VFPF_UPDATE_TPA_PARAM_FLAG);
3248 p_sge_tpa->tpa_ipv4_en_flg =
3249 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
3250 p_sge_tpa->tpa_ipv6_en_flg =
3251 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
3252 p_sge_tpa->tpa_pkt_split_flg =
3253 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
3254 p_sge_tpa->tpa_hdr_data_split_flg =
3255 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
3256 p_sge_tpa->tpa_gro_consistent_flg =
3257 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
3259 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
3260 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
3261 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
3262 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
3263 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
3265 p_data->sge_tpa_params = p_sge_tpa;
3267 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
3270 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
3271 struct ecore_ptt *p_ptt,
3272 struct ecore_vf_info *vf)
3274 struct ecore_rss_params *p_rss_params = OSAL_NULL;
3275 struct ecore_sp_vport_update_params params;
3276 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3277 struct ecore_sge_tpa_params sge_tpa_params;
3278 u16 tlvs_mask = 0, tlvs_accepted = 0;
3279 u8 status = PFVF_STATUS_SUCCESS;
3281 enum _ecore_status_t rc;
3283 /* Valiate PF can send such a request */
3284 if (!vf->vport_instance) {
3285 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3286 "No VPORT instance available for VF[%d],"
3287 " failing vport update\n",
3289 status = PFVF_STATUS_FAILURE;
3293 p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
3294 if (p_rss_params == OSAL_NULL) {
3295 status = PFVF_STATUS_FAILURE;
3299 OSAL_MEMSET(¶ms, 0, sizeof(params));
3300 params.opaque_fid = vf->opaque_fid;
3301 params.vport_id = vf->vport_id;
3302 params.rss_params = OSAL_NULL;
3304 /* Search for extended tlvs list and update values
3305 * from VF in struct ecore_sp_vport_update_params.
3307 ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3308 ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
3309 ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
3310 ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3311 ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
3312 ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
3313 ecore_iov_vp_update_sge_tpa_param(p_hwfn, ¶ms,
3314 &sge_tpa_params, mbx, &tlvs_mask);
3316 tlvs_accepted = tlvs_mask;
3318 /* Some of the extended TLVs need to be validated first; In that case,
3319 * they can update the mask without updating the accepted [so that
3320 * PF could communicate to VF it has rejected request].
3322 ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
3323 mbx, &tlvs_mask, &tlvs_accepted);
3325 /* Just log a message if there is no single extended tlv in buffer.
3326 * When all features of vport update ramrod would be requested by VF
3327 * as extended TLVs in buffer then an error can be returned in response
3328 * if there is no extended TLV present in buffer.
3330 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
3331 ¶ms, &tlvs_accepted) !=
3334 status = PFVF_STATUS_NOT_SUPPORTED;
3338 if (!tlvs_accepted) {
3340 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3341 "Upper-layer prevents said VF"
3342 " configuration\n");
3344 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3345 "No feature tlvs found for vport update\n");
3346 status = PFVF_STATUS_NOT_SUPPORTED;
3350 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
3354 status = PFVF_STATUS_FAILURE;
3357 OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
3358 length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3359 tlvs_mask, tlvs_accepted);
3360 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3363 static enum _ecore_status_t
3364 ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
3365 struct ecore_vf_info *p_vf,
3366 struct ecore_filter_ucast *p_params)
3370 /* First remove entries and then add new ones */
3371 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3372 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3373 if (p_vf->shadow_config.vlans[i].used &&
3374 p_vf->shadow_config.vlans[i].vid ==
3376 p_vf->shadow_config.vlans[i].used = false;
3379 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3380 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3381 "VF [%d] - Tries to remove a non-existing"
3383 p_vf->relative_vf_id);
3386 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3387 p_params->opcode == ECORE_FILTER_FLUSH) {
3388 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3389 p_vf->shadow_config.vlans[i].used = false;
3392 /* In forced mode, we're willing to remove entries - but we don't add
3395 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
3396 return ECORE_SUCCESS;
3398 if (p_params->opcode == ECORE_FILTER_ADD ||
3399 p_params->opcode == ECORE_FILTER_REPLACE) {
3400 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3401 if (p_vf->shadow_config.vlans[i].used)
3404 p_vf->shadow_config.vlans[i].used = true;
3405 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3409 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3410 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3411 "VF [%d] - Tries to configure more than %d"
3413 p_vf->relative_vf_id,
3414 ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
3419 return ECORE_SUCCESS;
3422 static enum _ecore_status_t
3423 ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
3424 struct ecore_vf_info *p_vf,
3425 struct ecore_filter_ucast *p_params)
3427 char empty_mac[ETH_ALEN];
3430 OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
3432 /* If we're in forced-mode, we don't allow any change */
3433 /* TODO - this would change if we were ever to implement logic for
3434 * removing a forced MAC altogether [in which case, like for vlans,
3435 * we should be able to re-trace previous configuration.
3437 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
3438 return ECORE_SUCCESS;
3440 /* Since we don't have the implementation of the logic for removing
3441 * a forced MAC and restoring shadow MAC, let's not worry about
3442 * processing shadow copies of MAC as long as VF trust mode is ON,
3443 * to keep things simple.
3445 if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
3446 p_vf->p_vf_info.is_trusted_configured)
3447 return ECORE_SUCCESS;
3449 /* First remove entries and then add new ones */
3450 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3451 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3452 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3453 p_params->mac, ETH_ALEN)) {
3454 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
3460 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3461 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3462 "MAC isn't configured\n");
3465 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3466 p_params->opcode == ECORE_FILTER_FLUSH) {
3467 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
3468 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
3471 /* List the new MAC address */
3472 if (p_params->opcode != ECORE_FILTER_ADD &&
3473 p_params->opcode != ECORE_FILTER_REPLACE)
3474 return ECORE_SUCCESS;
3476 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3477 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3478 empty_mac, ETH_ALEN)) {
3479 OSAL_MEMCPY(p_vf->shadow_config.macs[i],
3480 p_params->mac, ETH_ALEN);
3481 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3482 "Added MAC at %d entry in shadow\n", i);
3487 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3488 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3489 "No available place for MAC\n");
3493 return ECORE_SUCCESS;
3496 static enum _ecore_status_t
3497 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
3498 struct ecore_vf_info *p_vf,
3499 struct ecore_filter_ucast *p_params)
3501 enum _ecore_status_t rc = ECORE_SUCCESS;
3503 if (p_params->type == ECORE_FILTER_MAC) {
3504 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3505 if (rc != ECORE_SUCCESS)
3509 if (p_params->type == ECORE_FILTER_VLAN)
3510 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3515 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
3516 struct ecore_ptt *p_ptt,
3517 struct ecore_vf_info *vf)
3519 struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3520 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3521 struct vfpf_ucast_filter_tlv *req;
3522 u8 status = PFVF_STATUS_SUCCESS;
3523 struct ecore_filter_ucast params;
3524 enum _ecore_status_t rc;
3526 /* Prepare the unicast filter params */
3527 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast));
3528 req = &mbx->req_virt->ucast_filter;
3529 params.opcode = (enum ecore_filter_opcode)req->opcode;
3530 params.type = (enum ecore_filter_ucast_type)req->type;
3532 /* @@@TBD - We might need logic on HV side in determining this */
3533 params.is_rx_filter = 1;
3534 params.is_tx_filter = 1;
3535 params.vport_to_remove_from = vf->vport_id;
3536 params.vport_to_add_to = vf->vport_id;
3537 OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
3538 params.vlan = req->vlan;
3540 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3541 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
3542 " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3543 vf->abs_vf_id, params.opcode, params.type,
3544 params.is_rx_filter ? "RX" : "",
3545 params.is_tx_filter ? "TX" : "",
3546 params.vport_to_add_to,
3547 params.mac[0], params.mac[1], params.mac[2],
3548 params.mac[3], params.mac[4], params.mac[5], params.vlan);
3550 if (!vf->vport_instance) {
3551 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3552 "No VPORT instance available for VF[%d],"
3553 " failing ucast MAC configuration\n",
3555 status = PFVF_STATUS_FAILURE;
3559 /* Update shadow copy of the VF configuration. In case shadow indicates
3560 * the action should be blocked return success to VF to imitate the
3561 * firmware behaviour in such case.
3563 if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) !=
3567 /* Determine if the unicast filtering is acceptible by PF */
3568 if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
3569 (params.type == ECORE_FILTER_VLAN ||
3570 params.type == ECORE_FILTER_MAC_VLAN)) {
3571 /* Once VLAN is forced or PVID is set, do not allow
3572 * to add/replace any further VLANs.
3574 if (params.opcode == ECORE_FILTER_ADD ||
3575 params.opcode == ECORE_FILTER_REPLACE)
3576 status = PFVF_STATUS_FORCED;
3580 if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
3581 (params.type == ECORE_FILTER_MAC ||
3582 params.type == ECORE_FILTER_MAC_VLAN)) {
3583 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
3584 (params.opcode != ECORE_FILTER_ADD &&
3585 params.opcode != ECORE_FILTER_REPLACE))
3586 status = PFVF_STATUS_FORCED;
3590 rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms);
3591 if (rc == ECORE_EXISTS) {
3593 } else if (rc == ECORE_INVAL) {
3594 status = PFVF_STATUS_FAILURE;
3598 rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
3599 ECORE_SPQ_MODE_CB, OSAL_NULL);
3601 status = PFVF_STATUS_FAILURE;
3604 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3605 sizeof(struct pfvf_def_resp_tlv), status);
3608 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
3609 struct ecore_ptt *p_ptt,
3610 struct ecore_vf_info *vf)
3615 for (i = 0; i < vf->num_sbs; i++)
3616 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3618 vf->opaque_fid, false);
3620 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3621 sizeof(struct pfvf_def_resp_tlv),
3622 PFVF_STATUS_SUCCESS);
3625 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
3626 struct ecore_ptt *p_ptt,
3627 struct ecore_vf_info *vf)
3629 u16 length = sizeof(struct pfvf_def_resp_tlv);
3630 u8 status = PFVF_STATUS_SUCCESS;
3632 /* Disable Interrupts for VF */
3633 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3635 /* Reset Permission table */
3636 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3638 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3642 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
3643 struct ecore_ptt *p_ptt,
3644 struct ecore_vf_info *p_vf)
3646 u16 length = sizeof(struct pfvf_def_resp_tlv);
3647 u8 status = PFVF_STATUS_SUCCESS;
3648 enum _ecore_status_t rc = ECORE_SUCCESS;
3650 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3652 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3653 /* Stopping the VF */
3654 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3657 if (rc != ECORE_SUCCESS) {
3658 DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
3660 status = PFVF_STATUS_FAILURE;
3663 p_vf->state = VF_STOPPED;
3666 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3670 static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
3671 struct ecore_ptt *p_ptt,
3672 struct ecore_vf_info *p_vf)
3674 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3675 struct pfvf_read_coal_resp_tlv *p_resp;
3676 struct vfpf_read_coal_req_tlv *req;
3677 u8 status = PFVF_STATUS_FAILURE;
3678 struct ecore_vf_queue *p_queue;
3679 struct ecore_queue_cid *p_cid;
3680 enum _ecore_status_t rc = ECORE_SUCCESS;
3681 u16 coal = 0, qid, i;
3684 mbx->offset = (u8 *)mbx->reply_virt;
3685 req = &mbx->req_virt->read_coal_req;
3688 b_is_rx = req->is_rx ? true : false;
3691 if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
3692 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3693 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3694 "VF[%d]: Invalid Rx queue_id = %d\n",
3695 p_vf->abs_vf_id, qid);
3699 p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
3700 rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3701 if (rc != ECORE_SUCCESS)
3704 if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
3705 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3706 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3707 "VF[%d]: Invalid Tx queue_id = %d\n",
3708 p_vf->abs_vf_id, qid);
3711 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3712 p_queue = &p_vf->vf_queues[qid];
3713 if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
3714 (!p_queue->cids[i].b_is_tx))
3717 p_cid = p_queue->cids[i].p_cid;
3719 rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
3721 if (rc != ECORE_SUCCESS)
3727 status = PFVF_STATUS_SUCCESS;
3730 p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
3732 p_resp->coal = coal;
3734 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
3735 sizeof(struct channel_list_end_tlv));
3737 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
3740 static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
3741 struct ecore_ptt *p_ptt,
3742 struct ecore_vf_info *vf)
3744 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3745 enum _ecore_status_t rc = ECORE_SUCCESS;
3746 struct vfpf_update_coalesce *req;
3747 u8 status = PFVF_STATUS_FAILURE;
3748 struct ecore_queue_cid *p_cid;
3749 u16 rx_coal, tx_coal;
3753 req = &mbx->req_virt->update_coalesce;
3755 rx_coal = req->rx_coal;
3756 tx_coal = req->tx_coal;
3759 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3760 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3762 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3763 vf->abs_vf_id, qid);
3767 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3768 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3770 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3771 vf->abs_vf_id, qid);
3775 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3776 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3777 vf->abs_vf_id, rx_coal, tx_coal, qid);
3780 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3782 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3783 if (rc != ECORE_SUCCESS) {
3784 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3785 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3786 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3789 vf->rx_coal = rx_coal;
3792 /* TODO - in future, it might be possible to pass this in a per-cid
3793 * granularity. For now, do this for all Tx queues.
3796 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3798 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3799 if (p_queue->cids[i].p_cid == OSAL_NULL)
3802 if (!p_queue->cids[i].b_is_tx)
3805 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3806 p_queue->cids[i].p_cid);
3807 if (rc != ECORE_SUCCESS) {
3808 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3809 "VF[%d]: Unable to set tx queue coalesce\n",
3814 vf->tx_coal = tx_coal;
3817 status = PFVF_STATUS_SUCCESS;
3819 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3820 sizeof(struct pfvf_def_resp_tlv), status);
3823 enum _ecore_status_t
3824 ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
3825 u16 rx_coal, u16 tx_coal,
3828 struct ecore_queue_cid *p_cid;
3829 struct ecore_vf_info *vf;
3830 struct ecore_ptt *p_ptt;
3834 if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
3835 DP_NOTICE(p_hwfn, true,
3836 "VF[%d] - Can not set coalescing: VF is not active\n",
3841 vf = &p_hwfn->pf_iov_info->vfs_array[vf_id];
3842 p_ptt = ecore_ptt_acquire(p_hwfn);
3846 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3847 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3849 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3850 vf->abs_vf_id, qid);
3854 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3855 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3857 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3858 vf->abs_vf_id, qid);
3862 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3863 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3864 vf->abs_vf_id, rx_coal, tx_coal, qid);
3867 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3869 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3870 if (rc != ECORE_SUCCESS) {
3871 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3872 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3873 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3876 vf->rx_coal = rx_coal;
3879 /* TODO - in future, it might be possible to pass this in a per-cid
3880 * granularity. For now, do this for all Tx queues.
3883 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3885 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3886 if (p_queue->cids[i].p_cid == OSAL_NULL)
3889 if (!p_queue->cids[i].b_is_tx)
3892 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3893 p_queue->cids[i].p_cid);
3894 if (rc != ECORE_SUCCESS) {
3895 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3896 "VF[%d]: Unable to set tx queue coalesce\n",
3901 vf->tx_coal = tx_coal;
3905 ecore_ptt_release(p_hwfn, p_ptt);
3910 static enum _ecore_status_t
3911 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
3912 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3917 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
3919 for (cnt = 0; cnt < 50; cnt++) {
3920 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3925 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
3929 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3930 p_vf->abs_vf_id, val);
3931 return ECORE_TIMEOUT;
3934 return ECORE_SUCCESS;
3937 #define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS)
3939 static enum _ecore_status_t
3940 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
3941 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3943 u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp;
3944 u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port;
3945 u8 max_ports_per_engine = p_hwfn->p_dev->num_ports_in_engine;
3946 u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0;
3947 u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0;
3948 u8 port_id, tc, tc_id = 0, voq = 0;
3951 /* Read initial consumers & producers */
3952 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
3953 /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
3954 for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) {
3955 tc_id = (tc < max_phys_tcs_per_port) ?
3958 voq = VOQ(port_id, tc_id, max_phys_tcs_per_port);
3959 cons[voq] = ecore_rd(p_hwfn, p_ptt,
3960 cons_voq0_addr + voq * 0x40);
3961 prod = ecore_rd(p_hwfn, p_ptt,
3962 prod_voq0_addr + voq * 0x40);
3963 distance[voq] = prod - cons[voq];
3967 /* Wait for consumers to pass the producers */
3970 for (cnt = 0; cnt < 50; cnt++) {
3971 for (; port_id < max_ports_per_engine; port_id++) {
3972 /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
3973 for (; tc < max_phys_tcs_per_port + 1; tc++) {
3974 tc_id = (tc < max_phys_tcs_per_port) ?
3977 voq = VOQ(port_id, tc_id,
3978 max_phys_tcs_per_port);
3979 tmp = ecore_rd(p_hwfn, p_ptt,
3980 cons_voq0_addr + voq * 0x40);
3981 if (distance[voq] > tmp - cons[voq])
3985 if (tc == max_phys_tcs_per_port + 1)
3991 if (port_id == max_ports_per_engine)
3999 "VF[%d] - pbf polling failed on VOQ %d [port_id %d, tc_id %d]\n",
4000 p_vf->abs_vf_id, voq, port_id, tc_id);
4001 return ECORE_TIMEOUT;
4004 return ECORE_SUCCESS;
4007 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
4008 struct ecore_vf_info *p_vf,
4009 struct ecore_ptt *p_ptt)
4011 enum _ecore_status_t rc;
4013 /* TODO - add SRC and TM polling once we add storage IOV */
4015 rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
4019 rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
4023 return ECORE_SUCCESS;
4026 static enum _ecore_status_t
4027 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
4028 struct ecore_ptt *p_ptt,
4029 u16 rel_vf_id, u32 *ack_vfs)
4031 struct ecore_vf_info *p_vf;
4032 enum _ecore_status_t rc = ECORE_SUCCESS;
4034 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
4036 return ECORE_SUCCESS;
4038 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
4039 (1ULL << (rel_vf_id % 64))) {
4040 u16 vfid = p_vf->abs_vf_id;
4042 /* TODO - should we lock channel? */
4044 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4045 "VF[%d] - Handling FLR\n", vfid);
4047 ecore_iov_vf_cleanup(p_hwfn, p_vf);
4049 /* If VF isn't active, no need for anything but SW */
4053 /* TODO - what to do in case of failure? */
4054 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
4055 if (rc != ECORE_SUCCESS)
4058 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
4060 /* TODO - what's now? What a mess.... */
4061 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
4065 /* Workaround to make VF-PF channel ready, as FW
4066 * doesn't do that as a part of FLR.
4069 GTT_BAR0_MAP_REG_USDM_RAM +
4070 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
4072 /* VF_STOPPED has to be set only after final cleanup
4073 * but prior to re-enabling the VF.
4075 p_vf->state = VF_STOPPED;
4077 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
4079 /* TODO - again, a mess... */
4080 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] access\n",
4085 /* Mark VF for ack and clean pending state */
4086 if (p_vf->state == VF_RESET)
4087 p_vf->state = VF_STOPPED;
4088 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
4089 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
4090 ~(1ULL << (rel_vf_id % 64));
4091 p_vf->vf_mbx.b_pending_msg = false;
4097 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
4098 struct ecore_ptt *p_ptt)
4100 u32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
4101 enum _ecore_status_t rc = ECORE_SUCCESS;
4104 OSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
4106 /* Since BRB <-> PRS interface can't be tested as part of the flr
4107 * polling due to HW limitations, simply sleep a bit. And since
4108 * there's no need to wait per-vf, do it before looping.
4112 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
4113 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
4115 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
4119 enum _ecore_status_t
4120 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
4121 struct ecore_ptt *p_ptt, u16 rel_vf_id)
4123 u32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
4124 enum _ecore_status_t rc = ECORE_SUCCESS;
4126 OSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
4128 /* Wait instead of polling the BRB <-> PRS interface */
4131 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
4133 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
4137 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
4142 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
4144 for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++)
4145 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4146 "[%08x,...,%08x]: %08x\n",
4147 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
4149 if (!p_hwfn->p_dev->p_iov_info) {
4150 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
4155 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
4156 struct ecore_vf_info *p_vf;
4159 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
4163 vfid = p_vf->abs_vf_id;
4164 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
4165 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
4166 u16 rel_vf_id = p_vf->relative_vf_id;
4168 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4169 "VF[%d] [rel %d] got FLR-ed\n",
4172 p_vf->state = VF_RESET;
4174 /* No need to lock here, since pending_flr should
4175 * only change here and before ACKing MFw. Since
4176 * MFW will not trigger an additional attention for
4177 * VF flr until ACKs, we're safe.
4179 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
4187 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
4189 struct ecore_mcp_link_params *p_params,
4190 struct ecore_mcp_link_state *p_link,
4191 struct ecore_mcp_link_capabilities *p_caps)
4193 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
4194 struct ecore_bulletin_content *p_bulletin;
4199 p_bulletin = p_vf->bulletin.p_virt;
4202 __ecore_vf_get_link_params(p_params, p_bulletin);
4204 __ecore_vf_get_link_state(p_link, p_bulletin);
4206 __ecore_vf_get_link_caps(p_caps, p_bulletin);
4209 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
4210 struct ecore_ptt *p_ptt, int vfid)
4212 struct ecore_iov_vf_mbx *mbx;
4213 struct ecore_vf_info *p_vf;
4215 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4219 mbx = &p_vf->vf_mbx;
4221 /* ecore_iov_process_mbx_request */
4222 #ifndef CONFIG_ECORE_SW_CHANNEL
4223 if (!mbx->b_pending_msg) {
4224 DP_NOTICE(p_hwfn, true,
4225 "VF[%02x]: Trying to process mailbox message when none is pending\n",
4229 mbx->b_pending_msg = false;
4232 mbx->first_tlv = mbx->req_virt->first_tlv;
4234 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4235 "VF[%02x]: Processing mailbox message [type %04x]\n",
4236 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4238 OSAL_IOV_VF_MSG_TYPE(p_hwfn,
4239 p_vf->relative_vf_id,
4240 mbx->first_tlv.tl.type);
4242 /* Lock the per vf op mutex and note the locker's identity.
4243 * The unlock will take place in mbx response.
4245 ecore_iov_lock_vf_pf_channel(p_hwfn,
4246 p_vf, mbx->first_tlv.tl.type);
4248 /* check if tlv type is known */
4249 if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
4250 !p_vf->b_malicious) {
4251 /* switch on the opcode */
4252 switch (mbx->first_tlv.tl.type) {
4253 case CHANNEL_TLV_ACQUIRE:
4254 ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
4256 case CHANNEL_TLV_VPORT_START:
4257 ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
4259 case CHANNEL_TLV_VPORT_TEARDOWN:
4260 ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
4262 case CHANNEL_TLV_START_RXQ:
4263 ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
4265 case CHANNEL_TLV_START_TXQ:
4266 ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
4268 case CHANNEL_TLV_STOP_RXQS:
4269 ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
4271 case CHANNEL_TLV_STOP_TXQS:
4272 ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
4274 case CHANNEL_TLV_UPDATE_RXQ:
4275 ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
4277 case CHANNEL_TLV_VPORT_UPDATE:
4278 ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
4280 case CHANNEL_TLV_UCAST_FILTER:
4281 ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
4283 case CHANNEL_TLV_CLOSE:
4284 ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
4286 case CHANNEL_TLV_INT_CLEANUP:
4287 ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
4289 case CHANNEL_TLV_RELEASE:
4290 ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
4292 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
4293 ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
4295 case CHANNEL_TLV_COALESCE_UPDATE:
4296 ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
4298 case CHANNEL_TLV_COALESCE_READ:
4299 ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
4301 case CHANNEL_TLV_UPDATE_MTU:
4302 ecore_iov_vf_pf_update_mtu(p_hwfn, p_ptt, p_vf);
4305 } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
4306 /* If we've received a message from a VF we consider malicious
4307 * we ignore the messasge unless it's one for RELEASE, in which
4308 * case we'll let it have the benefit of doubt, allowing the
4309 * next loaded driver to start again.
4311 if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
4312 /* TODO - initiate FLR, remove malicious indication */
4313 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4314 "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
4317 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4318 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
4319 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4322 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4323 mbx->first_tlv.tl.type,
4324 sizeof(struct pfvf_def_resp_tlv),
4325 PFVF_STATUS_MALICIOUS);
4327 /* unknown TLV - this may belong to a VF driver from the future
4328 * - a version written after this PF driver was written, which
4329 * supports features unknown as of yet. Too bad since we don't
4330 * support them. Or this may be because someone wrote a crappy
4331 * VF driver and is sending garbage over the channel.
4333 DP_NOTICE(p_hwfn, false,
4334 "VF[%02x]: unknown TLV. type %04x length %04x"
4335 " padding %08x reply address %lu\n",
4337 mbx->first_tlv.tl.type,
4338 mbx->first_tlv.tl.length,
4339 mbx->first_tlv.padding,
4340 (unsigned long)mbx->first_tlv.reply_address);
4342 /* Try replying in case reply address matches the acquisition's
4345 if (p_vf->acquire.first_tlv.reply_address &&
4346 (mbx->first_tlv.reply_address ==
4347 p_vf->acquire.first_tlv.reply_address))
4348 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4349 mbx->first_tlv.tl.type,
4350 sizeof(struct pfvf_def_resp_tlv),
4351 PFVF_STATUS_NOT_SUPPORTED);
4353 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4354 "VF[%02x]: Can't respond to TLV -"
4355 " no valid reply address\n",
4359 ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
4360 mbx->first_tlv.tl.type);
4362 #ifdef CONFIG_ECORE_SW_CHANNEL
4363 mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
4364 mbx->sw_mbx.response_offset = 0;
4368 void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
4373 OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
4375 ecore_for_each_vf(p_hwfn, i) {
4376 struct ecore_vf_info *p_vf;
4378 p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
4379 if (p_vf->vf_mbx.b_pending_msg)
4380 events[i / 64] |= 1ULL << (i % 64);
4384 static struct ecore_vf_info *
4385 ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
4387 u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
4389 if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
4390 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4391 "Got indication for VF [abs 0x%08x] that cannot be"
4397 return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
4400 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
4402 struct regpair *vf_msg)
4404 struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
4408 return ECORE_SUCCESS;
4410 /* List the physical address of the request so that handler
4411 * could later on copy the message from it.
4413 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
4415 p_vf->vf_mbx.b_pending_msg = true;
4417 return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
4420 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
4421 struct malicious_vf_eqe_data *p_data)
4423 struct ecore_vf_info *p_vf;
4425 p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
4430 if (!p_vf->b_malicious) {
4431 DP_NOTICE(p_hwfn, false,
4432 "VF [%d] - Malicious behavior [%02x]\n",
4433 p_vf->abs_vf_id, p_data->err_id);
4435 p_vf->b_malicious = true;
4438 "VF [%d] - Malicious behavior [%02x]\n",
4439 p_vf->abs_vf_id, p_data->err_id);
4442 OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
4445 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
4448 union event_ring_data *data,
4449 u8 OSAL_UNUSED fw_return_code)
4452 case COMMON_EVENT_VF_PF_CHANNEL:
4453 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
4454 &data->vf_pf_channel.msg_addr);
4455 case COMMON_EVENT_VF_FLR:
4456 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4457 "VF-FLR is still not supported\n");
4458 return ECORE_SUCCESS;
4459 case COMMON_EVENT_MALICIOUS_VF:
4460 ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4461 return ECORE_SUCCESS;
4463 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
4469 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4471 return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
4472 (1ULL << (rel_vf_id % 64)));
4475 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4477 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
4483 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4484 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4488 return MAX_NUM_VFS_K2;
4491 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
4492 struct ecore_ptt *ptt, int vfid)
4494 struct dmae_params params;
4495 struct ecore_vf_info *vf_info;
4497 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4501 OSAL_MEMSET(¶ms, 0, sizeof(params));
4502 SET_FIELD(params.flags, DMAE_PARAMS_SRC_VF_VALID, 0x1);
4503 SET_FIELD(params.flags, DMAE_PARAMS_COMPLETION_DST, 0x1);
4504 params.src_vf_id = vf_info->abs_vf_id;
4506 if (ecore_dmae_host2host(p_hwfn, ptt,
4507 vf_info->vf_mbx.pending_req,
4508 vf_info->vf_mbx.req_phys,
4509 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
4510 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4511 "Failed to copy message from VF 0x%02x\n", vfid);
4516 return ECORE_SUCCESS;
4519 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
4522 struct ecore_vf_info *vf_info;
4525 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4527 DP_NOTICE(p_hwfn->p_dev, true,
4528 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4531 if (vf_info->b_malicious) {
4532 DP_NOTICE(p_hwfn->p_dev, false,
4533 "Can't set forced MAC to malicious VF [%d]\n",
4538 if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
4539 vf_info->p_vf_info.is_trusted_configured) {
4540 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
4541 /* Trust mode will disable Forced MAC */
4542 vf_info->bulletin.p_virt->valid_bitmap &=
4543 ~(1 << MAC_ADDR_FORCED);
4545 feature = 1 << MAC_ADDR_FORCED;
4546 /* Forced MAC will disable MAC_ADDR */
4547 vf_info->bulletin.p_virt->valid_bitmap &=
4548 ~(1 << VFPF_BULLETIN_MAC_ADDR);
4551 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac,
4554 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4556 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4559 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
4562 struct ecore_vf_info *vf_info;
4565 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4567 DP_NOTICE(p_hwfn->p_dev, true,
4568 "Can not set MAC, invalid vfid [%d]\n", vfid);
4571 if (vf_info->b_malicious) {
4572 DP_NOTICE(p_hwfn->p_dev, false,
4573 "Can't set MAC to malicious VF [%d]\n",
4578 if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
4579 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4580 "Can not set MAC, Forced MAC is configured\n");
4584 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
4585 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4587 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4589 if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
4590 vf_info->p_vf_info.is_trusted_configured)
4591 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4593 return ECORE_SUCCESS;
4596 #ifndef LINUX_REMOVE
4597 enum _ecore_status_t
4598 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
4599 bool b_untagged_only, int vfid)
4601 struct ecore_vf_info *vf_info;
4604 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4606 DP_NOTICE(p_hwfn->p_dev, true,
4607 "Can not set untagged default, invalid vfid [%d]\n",
4611 if (vf_info->b_malicious) {
4612 DP_NOTICE(p_hwfn->p_dev, false,
4613 "Can't set untagged default to malicious VF [%d]\n",
4618 /* Since this is configurable only during vport-start, don't take it
4619 * if we're past that point.
4621 if (vf_info->state == VF_ENABLED) {
4622 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4623 "Can't support untagged change for vfid[%d] -"
4624 " VF is already active\n",
4629 /* Set configuration; This will later be taken into account during the
4630 * VF initialization.
4632 feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
4633 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
4634 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4636 vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
4639 return ECORE_SUCCESS;
4642 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
4645 struct ecore_vf_info *vf_info;
4647 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4651 *opaque_fid = vf_info->opaque_fid;
4655 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
4658 struct ecore_vf_info *vf_info;
4661 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4663 DP_NOTICE(p_hwfn->p_dev, true,
4664 "Can not set forced MAC, invalid vfid [%d]\n",
4668 if (vf_info->b_malicious) {
4669 DP_NOTICE(p_hwfn->p_dev, false,
4670 "Can't set forced vlan to malicious VF [%d]\n",
4675 feature = 1 << VLAN_ADDR_FORCED;
4676 vf_info->bulletin.p_virt->pvid = pvid;
4678 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4680 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4682 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4685 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
4686 int vfid, u16 vxlan_port, u16 geneve_port)
4688 struct ecore_vf_info *vf_info;
4690 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4692 DP_NOTICE(p_hwfn->p_dev, true,
4693 "Can not set udp ports, invalid vfid [%d]\n", vfid);
4697 if (vf_info->b_malicious) {
4698 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4699 "Can not set udp ports to malicious VF [%d]\n",
4704 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4705 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4708 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
4710 struct ecore_vf_info *p_vf_info;
4712 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4716 return !!p_vf_info->vport_instance;
4719 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
4721 struct ecore_vf_info *p_vf_info;
4723 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4727 return p_vf_info->state == VF_STOPPED;
4730 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
4732 struct ecore_vf_info *vf_info;
4734 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4738 return vf_info->spoof_chk;
4741 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
4744 struct ecore_vf_info *vf;
4745 enum _ecore_status_t rc = ECORE_INVAL;
4747 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4748 DP_NOTICE(p_hwfn, true,
4749 "SR-IOV sanity check failed, can't set spoofchk\n");
4753 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4757 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4758 /* After VF VPORT start PF will configure spoof check */
4759 vf->req_spoofchk_val = val;
4764 rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
4770 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
4772 u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
4774 max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
4775 : ECORE_MAX_VF_CHAINS_PER_PF;
4777 return max_chains_per_vf;
4780 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4782 void **pp_req_virt_addr,
4783 u16 *p_req_virt_size)
4785 struct ecore_vf_info *vf_info =
4786 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4791 if (pp_req_virt_addr)
4792 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
4794 if (p_req_virt_size)
4795 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
4798 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4800 void **pp_reply_virt_addr,
4801 u16 *p_reply_virt_size)
4803 struct ecore_vf_info *vf_info =
4804 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4809 if (pp_reply_virt_addr)
4810 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
4812 if (p_reply_virt_size)
4813 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
4816 #ifdef CONFIG_ECORE_SW_CHANNEL
4817 struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
4820 struct ecore_vf_info *vf_info =
4821 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4826 return &vf_info->vf_mbx.sw_mbx;
4830 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
4832 return (length >= sizeof(struct vfpf_first_tlv) &&
4833 (length <= sizeof(union vfpf_tlvs)));
4836 u32 ecore_iov_pfvf_msg_length(void)
4838 return sizeof(union pfvf_tlvs);
4841 u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn,
4844 struct ecore_vf_info *p_vf;
4846 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4847 if (!p_vf || !p_vf->bulletin.p_virt)
4850 if (!(p_vf->bulletin.p_virt->valid_bitmap &
4851 (1 << VFPF_BULLETIN_MAC_ADDR)))
4854 return p_vf->bulletin.p_virt->mac;
4857 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4859 struct ecore_vf_info *p_vf;
4861 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4862 if (!p_vf || !p_vf->bulletin.p_virt)
4865 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
4868 return p_vf->bulletin.p_virt->mac;
4871 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
4874 struct ecore_vf_info *p_vf;
4876 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4877 if (!p_vf || !p_vf->bulletin.p_virt)
4880 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
4883 return p_vf->bulletin.p_virt->pvid;
4886 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
4887 struct ecore_ptt *p_ptt,
4890 struct ecore_vf_info *vf;
4893 enum _ecore_status_t rc;
4895 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4900 rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4901 if (rc != ECORE_SUCCESS)
4904 rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
4905 return ecore_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
4908 enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
4911 struct ecore_vf_info *vf;
4914 for_each_hwfn(p_dev, i) {
4915 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
4917 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4918 DP_NOTICE(p_hwfn, true,
4919 "SR-IOV sanity check failed, can't set min rate\n");
4924 vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
4926 DP_NOTICE(p_dev, true,
4927 "Getting vf info failed, can't set min rate\n");
4931 return ecore_configure_vport_wfq(p_dev, vf->vport_id, rate);
4934 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
4935 struct ecore_ptt *p_ptt,
4937 struct ecore_eth_stats *p_stats)
4939 struct ecore_vf_info *vf;
4941 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4945 if (vf->state != VF_ENABLED)
4948 __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
4949 vf->abs_vf_id + 0x10, false);
4951 return ECORE_SUCCESS;
4954 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4956 struct ecore_vf_info *p_vf;
4958 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4962 return p_vf->num_rxqs;
4965 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4967 struct ecore_vf_info *p_vf;
4969 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4973 return p_vf->num_active_rxqs;
4976 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4978 struct ecore_vf_info *p_vf;
4980 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4987 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4989 struct ecore_vf_info *p_vf;
4991 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4995 return p_vf->num_sbs;
4998 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
5000 struct ecore_vf_info *p_vf;
5002 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
5006 return (p_vf->state == VF_FREE);
5009 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
5012 struct ecore_vf_info *p_vf;
5014 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
5018 return (p_vf->state == VF_ACQUIRED);
5021 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
5023 struct ecore_vf_info *p_vf;
5025 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
5029 return (p_vf->state == VF_ENABLED);
5032 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
5035 struct ecore_vf_info *p_vf;
5037 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
5041 return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
5045 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
5047 struct ecore_wfq_data *vf_vp_wfq;
5048 struct ecore_vf_info *vf_info;
5050 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
5054 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
5056 if (vf_vp_wfq->configured)
5057 return vf_vp_wfq->min_speed;
5062 #ifdef CONFIG_ECORE_SW_CHANNEL
5063 void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
5066 struct ecore_vf_info *vf_info;
5068 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
5072 vf_info->b_hw_channel = b_is_hw;