1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
10 #include "ecore_sriov.h"
11 #include "ecore_status.h"
13 #include "ecore_hw_defs.h"
14 #include "ecore_int.h"
15 #include "ecore_hsi_eth.h"
17 #include "ecore_vfpf_if.h"
18 #include "ecore_rt_defs.h"
19 #include "ecore_init_ops.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "ecore_mcp.h"
23 #include "ecore_cxt.h"
25 #include "ecore_init_fw_funcs.h"
26 #include "ecore_sp_commands.h"
28 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
31 union event_ring_data *data,
34 const char *qede_ecore_channel_tlvs_string[] = {
35 "CHANNEL_TLV_NONE", /* ends tlv sequence */
36 "CHANNEL_TLV_ACQUIRE",
37 "CHANNEL_TLV_VPORT_START",
38 "CHANNEL_TLV_VPORT_UPDATE",
39 "CHANNEL_TLV_VPORT_TEARDOWN",
40 "CHANNEL_TLV_START_RXQ",
41 "CHANNEL_TLV_START_TXQ",
42 "CHANNEL_TLV_STOP_RXQ",
43 "CHANNEL_TLV_STOP_TXQ",
44 "CHANNEL_TLV_UPDATE_RXQ",
45 "CHANNEL_TLV_INT_CLEANUP",
47 "CHANNEL_TLV_RELEASE",
48 "CHANNEL_TLV_LIST_END",
49 "CHANNEL_TLV_UCAST_FILTER",
50 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
51 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
52 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
53 "CHANNEL_TLV_VPORT_UPDATE_MCAST",
54 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
55 "CHANNEL_TLV_VPORT_UPDATE_RSS",
56 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
57 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
58 "CHANNEL_TLV_UPDATE_TUNN_PARAM",
59 "CHANNEL_TLV_COALESCE_UPDATE",
61 "CHANNEL_TLV_COALESCE_READ",
62 "CHANNEL_TLV_BULLETIN_UPDATE_MAC",
63 "CHANNEL_TLV_UPDATE_MTU",
64 "CHANNEL_TLV_RDMA_ACQUIRE",
65 "CHANNEL_TLV_RDMA_START",
66 "CHANNEL_TLV_RDMA_STOP",
67 "CHANNEL_TLV_RDMA_ADD_USER",
68 "CHANNEL_TLV_RDMA_REMOVE_USER",
69 "CHANNEL_TLV_RDMA_QUERY_COUNTERS",
70 "CHANNEL_TLV_RDMA_ALLOC_TID",
71 "CHANNEL_TLV_RDMA_REGISTER_TID",
72 "CHANNEL_TLV_RDMA_DEREGISTER_TID",
73 "CHANNEL_TLV_RDMA_FREE_TID",
74 "CHANNEL_TLV_RDMA_CREATE_CQ",
75 "CHANNEL_TLV_RDMA_RESIZE_CQ",
76 "CHANNEL_TLV_RDMA_DESTROY_CQ",
77 "CHANNEL_TLV_RDMA_CREATE_QP",
78 "CHANNEL_TLV_RDMA_MODIFY_QP",
79 "CHANNEL_TLV_RDMA_QUERY_QP",
80 "CHANNEL_TLV_RDMA_DESTROY_QP",
81 "CHANNEL_TLV_RDMA_CREATE_SRQ",
82 "CHANNEL_TLV_RDMA_MODIFY_SRQ",
83 "CHANNEL_TLV_RDMA_DESTROY_SRQ",
84 "CHANNEL_TLV_RDMA_QUERY_PORT",
85 "CHANNEL_TLV_RDMA_QUERY_DEVICE",
86 "CHANNEL_TLV_RDMA_IWARP_CONNECT",
87 "CHANNEL_TLV_RDMA_IWARP_ACCEPT",
88 "CHANNEL_TLV_RDMA_IWARP_CREATE_LISTEN",
89 "CHANNEL_TLV_RDMA_IWARP_DESTROY_LISTEN",
90 "CHANNEL_TLV_RDMA_IWARP_PAUSE_LISTEN",
91 "CHANNEL_TLV_RDMA_IWARP_REJECT",
92 "CHANNEL_TLV_RDMA_IWARP_SEND_RTR",
93 "CHANNEL_TLV_ESTABLISH_LL2_CONN",
94 "CHANNEL_TLV_TERMINATE_LL2_CONN",
95 "CHANNEL_TLV_ASYNC_EVENT",
96 "CHANNEL_TLV_SOFT_FLR",
100 static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
104 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
105 ETH_HSI_VER_NO_PKT_LEN_TUNN)
106 legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
108 if (!(p_vf->acquire.vfdev_info.capabilities &
109 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
110 legacy |= ECORE_QCID_LEGACY_VF_CID;
116 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
117 struct ecore_vf_info *p_vf)
119 struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
120 struct ecore_spq_entry *p_ent = OSAL_NULL;
121 struct ecore_sp_init_data init_data;
122 enum _ecore_status_t rc = ECORE_NOTIMPL;
126 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
127 init_data.cid = ecore_spq_get_cid(p_hwfn);
128 init_data.opaque_fid = p_vf->opaque_fid;
129 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
131 rc = ecore_sp_init_request(p_hwfn, &p_ent,
132 COMMON_RAMROD_VF_START,
133 PROTOCOLID_COMMON, &init_data);
134 if (rc != ECORE_SUCCESS)
137 p_ramrod = &p_ent->ramrod.vf_start;
139 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
140 p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
142 switch (p_hwfn->hw_info.personality) {
144 p_ramrod->personality = PERSONALITY_ETH;
146 case ECORE_PCI_ETH_ROCE:
147 case ECORE_PCI_ETH_IWARP:
148 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
151 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
152 p_hwfn->hw_info.personality);
156 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
157 if (fp_minor > ETH_HSI_VER_MINOR &&
158 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
159 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
160 "VF [%d] - Requested fp hsi %02x.%02x which is"
161 " slightly newer than PF's %02x.%02x; Configuring"
164 ETH_HSI_VER_MAJOR, fp_minor,
165 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
166 fp_minor = ETH_HSI_VER_MINOR;
169 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
170 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
172 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
173 "VF[%d] - Starting using HSI %02x.%02x\n",
174 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
176 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
179 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
183 struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
184 struct ecore_spq_entry *p_ent = OSAL_NULL;
185 struct ecore_sp_init_data init_data;
186 enum _ecore_status_t rc = ECORE_NOTIMPL;
189 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
190 init_data.cid = ecore_spq_get_cid(p_hwfn);
191 init_data.opaque_fid = opaque_vfid;
192 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
194 rc = ecore_sp_init_request(p_hwfn, &p_ent,
195 COMMON_RAMROD_VF_STOP,
196 PROTOCOLID_COMMON, &init_data);
197 if (rc != ECORE_SUCCESS)
200 p_ramrod = &p_ent->ramrod.vf_stop;
202 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
204 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
207 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
208 bool b_enabled_only, bool b_non_malicious)
210 if (!p_hwfn->pf_iov_info) {
211 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
215 if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
219 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
223 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
230 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
234 struct ecore_vf_info *vf = OSAL_NULL;
236 if (!p_hwfn->pf_iov_info) {
237 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
241 if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
242 b_enabled_only, false))
243 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
245 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
251 static struct ecore_queue_cid *
252 ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
256 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
257 if (p_queue->cids[i].p_cid &&
258 !p_queue->cids[i].b_is_tx)
259 return p_queue->cids[i].p_cid;
265 enum ecore_iov_validate_q_mode {
266 ECORE_IOV_VALIDATE_Q_NA,
267 ECORE_IOV_VALIDATE_Q_ENABLE,
268 ECORE_IOV_VALIDATE_Q_DISABLE,
271 static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
273 enum ecore_iov_validate_q_mode mode,
278 if (mode == ECORE_IOV_VALIDATE_Q_NA)
281 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
282 struct ecore_vf_queue_cid *p_qcid;
284 p_qcid = &p_vf->vf_queues[qid].cids[i];
286 if (p_qcid->p_cid == OSAL_NULL)
289 if (p_qcid->b_is_tx != b_is_tx)
292 /* Found. It's enabled. */
293 return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
296 /* In case we haven't found any valid cid, then its disabled */
297 return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
300 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
301 struct ecore_vf_info *p_vf,
303 enum ecore_iov_validate_q_mode mode)
305 if (rx_qid >= p_vf->num_rxqs) {
306 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
307 "VF[0x%02x] - can't touch Rx queue[%04x];"
308 " Only 0x%04x are allocated\n",
309 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
313 return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
316 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
317 struct ecore_vf_info *p_vf,
319 enum ecore_iov_validate_q_mode mode)
321 if (tx_qid >= p_vf->num_txqs) {
322 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
323 "VF[0x%02x] - can't touch Tx queue[%04x];"
324 " Only 0x%04x are allocated\n",
325 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
329 return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
332 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
333 struct ecore_vf_info *p_vf,
338 for (i = 0; i < p_vf->num_sbs; i++)
339 if (p_vf->igu_sbs[i] == sb_idx)
342 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
343 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
344 " one of its 0x%02x SBs\n",
345 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
350 /* Is there at least 1 queue open? */
351 static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
355 for (i = 0; i < p_vf->num_rxqs; i++)
356 if (ecore_iov_validate_queue_mode(p_vf, i,
357 ECORE_IOV_VALIDATE_Q_ENABLE,
364 static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
368 for (i = 0; i < p_vf->num_txqs; i++)
369 if (ecore_iov_validate_queue_mode(p_vf, i,
370 ECORE_IOV_VALIDATE_Q_ENABLE,
377 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
379 struct ecore_ptt *p_ptt)
381 struct ecore_bulletin_content *p_bulletin;
382 int crc_size = sizeof(p_bulletin->crc);
383 struct dmae_params params;
384 struct ecore_vf_info *p_vf;
386 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
390 /* TODO - check VF is in a state where it can accept message */
391 if (!p_vf->vf_bulletin)
394 p_bulletin = p_vf->bulletin.p_virt;
396 /* Increment bulletin board version and compute crc */
397 p_bulletin->version++;
398 p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size,
399 p_vf->bulletin.size - crc_size);
401 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
402 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
403 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
405 /* propagate bulletin board via dmae to vm memory */
406 OSAL_MEMSET(¶ms, 0, sizeof(params));
407 SET_FIELD(params.flags, DMAE_PARAMS_DST_VF_VALID, 0x1);
408 params.dst_vf_id = p_vf->abs_vf_id;
409 return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
410 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
414 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
416 struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
419 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
420 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
422 OSAL_PCI_READ_CONFIG_WORD(p_dev,
423 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
424 OSAL_PCI_READ_CONFIG_WORD(p_dev,
425 pos + PCI_SRIOV_INITIAL_VF,
428 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
430 /* @@@TODO - in future we might want to add an OSAL here to
431 * allow each OS to decide on its own how to act.
433 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
434 "Number of VFs are already set to non-zero value."
435 " Ignoring PCI configuration value\n");
439 OSAL_PCI_READ_CONFIG_WORD(p_dev,
440 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
442 OSAL_PCI_READ_CONFIG_WORD(p_dev,
443 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
445 OSAL_PCI_READ_CONFIG_WORD(p_dev,
446 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
448 OSAL_PCI_READ_CONFIG_DWORD(p_dev,
449 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
451 OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
453 OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
455 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
456 "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
457 " stride %d, page size 0x%x\n",
458 iov->nres, iov->cap, iov->ctrl,
459 iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
460 iov->offset, iov->stride, iov->pgsz);
462 /* Some sanity checks */
463 if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
464 iov->total_vfs > NUM_OF_VFS(p_dev)) {
465 /* This can happen only due to a bug. In this case we set
466 * num_vfs to zero to avoid memory corruption in the code that
467 * assumes max number of vfs
469 DP_NOTICE(p_dev, false,
470 "IOV: Unexpected number of vfs set: %d"
471 " setting num_vf to zero\n",
478 return ECORE_SUCCESS;
481 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
483 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
484 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
485 struct ecore_bulletin_content *p_bulletin_virt;
486 dma_addr_t req_p, rply_p, bulletin_p;
487 union pfvf_tlvs *p_reply_virt_addr;
488 union vfpf_tlvs *p_req_virt_addr;
491 OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
493 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
494 req_p = p_iov_info->mbx_msg_phys_addr;
495 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
496 rply_p = p_iov_info->mbx_reply_phys_addr;
497 p_bulletin_virt = p_iov_info->p_bulletins;
498 bulletin_p = p_iov_info->bulletins_phys;
499 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
501 "ecore_iov_setup_vfdb called without alloc mem first\n");
505 for (idx = 0; idx < p_iov->total_vfs; idx++) {
506 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
509 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
510 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
511 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
512 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
514 #ifdef CONFIG_ECORE_SW_CHANNEL
515 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
516 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
518 vf->state = VF_STOPPED;
521 vf->bulletin.phys = idx *
522 sizeof(struct ecore_bulletin_content) + bulletin_p;
523 vf->bulletin.p_virt = p_bulletin_virt + idx;
524 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
526 vf->relative_vf_id = idx;
527 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
528 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
529 vf->concrete_fid = concrete;
530 /* TODO - need to devise a better way of getting opaque */
531 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
532 (vf->abs_vf_id << 8);
534 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
535 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
539 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
541 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
545 num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
547 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
548 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
550 /* Allocate PF Mailbox buffer (per-VF) */
551 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
552 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
553 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
554 &p_iov_info->mbx_msg_phys_addr,
555 p_iov_info->mbx_msg_size);
559 /* Allocate PF Mailbox Reply buffer (per-VF) */
560 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
561 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
562 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
563 &p_iov_info->mbx_reply_phys_addr,
564 p_iov_info->mbx_reply_size);
568 p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
570 p_v_addr = &p_iov_info->p_bulletins;
571 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
572 &p_iov_info->bulletins_phys,
573 p_iov_info->bulletins_size);
577 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
578 "PF's Requests mailbox [%p virt 0x%lx phys], "
579 "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
580 " [%p virt 0x%lx phys]\n",
581 p_iov_info->mbx_msg_virt_addr,
582 (unsigned long)p_iov_info->mbx_msg_phys_addr,
583 p_iov_info->mbx_reply_virt_addr,
584 (unsigned long)p_iov_info->mbx_reply_phys_addr,
585 p_iov_info->p_bulletins,
586 (unsigned long)p_iov_info->bulletins_phys);
588 return ECORE_SUCCESS;
591 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
593 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
595 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
596 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
597 p_iov_info->mbx_msg_virt_addr,
598 p_iov_info->mbx_msg_phys_addr,
599 p_iov_info->mbx_msg_size);
601 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
602 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
603 p_iov_info->mbx_reply_virt_addr,
604 p_iov_info->mbx_reply_phys_addr,
605 p_iov_info->mbx_reply_size);
607 if (p_iov_info->p_bulletins)
608 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
609 p_iov_info->p_bulletins,
610 p_iov_info->bulletins_phys,
611 p_iov_info->bulletins_size);
614 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
616 struct ecore_pf_iov *p_sriov;
618 if (!IS_PF_SRIOV(p_hwfn)) {
619 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
620 "No SR-IOV - no need for IOV db\n");
621 return ECORE_SUCCESS;
624 p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
626 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n");
630 p_hwfn->pf_iov_info = p_sriov;
632 ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
633 ecore_sriov_eqe_event);
635 return ecore_iov_allocate_vfdb(p_hwfn);
638 void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
640 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
643 ecore_iov_setup_vfdb(p_hwfn);
646 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
648 ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
650 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
651 ecore_iov_free_vfdb(p_hwfn);
652 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
656 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
658 OSAL_FREE(p_dev, p_dev->p_iov_info);
661 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
663 struct ecore_dev *p_dev = p_hwfn->p_dev;
665 enum _ecore_status_t rc;
667 if (IS_VF(p_hwfn->p_dev))
668 return ECORE_SUCCESS;
670 /* Learn the PCI configuration */
671 pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
672 PCI_EXT_CAP_ID_SRIOV);
674 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
675 return ECORE_SUCCESS;
678 /* Allocate a new struct for IOV information */
679 /* TODO - can change to VALLOC when its available */
680 p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
681 sizeof(*p_dev->p_iov_info));
682 if (!p_dev->p_iov_info) {
683 DP_NOTICE(p_hwfn, false,
684 "Can't support IOV due to lack of memory\n");
687 p_dev->p_iov_info->pos = pos;
689 rc = ecore_iov_pci_cfg_info(p_dev);
693 /* We want PF IOV to be synonemous with the existence of p_iov_info;
694 * In case the capability is published but there are no VFs, simply
695 * de-allocate the struct.
697 if (!p_dev->p_iov_info->total_vfs) {
698 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
699 "IOV capabilities, but no VFs are published\n");
700 OSAL_FREE(p_dev, p_dev->p_iov_info);
701 return ECORE_SUCCESS;
704 /* First VF index based on offset is tricky:
705 * - If ARI is supported [likely], offset - (16 - pf_id) would
706 * provide the number for eng0. 2nd engine Vfs would begin
707 * after the first engine's VFs.
708 * - If !ARI, VFs would start on next device.
709 * so offset - (256 - pf_id) would provide the number.
710 * Utilize the fact that (256 - pf_id) is achieved only be later
711 * to diffrentiate between the two.
714 if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
715 u32 first = p_hwfn->p_dev->p_iov_info->offset +
716 p_hwfn->abs_pf_id - 16;
718 p_dev->p_iov_info->first_vf_in_pf = first;
720 if (ECORE_PATH_ID(p_hwfn))
721 p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
723 u32 first = p_hwfn->p_dev->p_iov_info->offset +
724 p_hwfn->abs_pf_id - 256;
726 p_dev->p_iov_info->first_vf_in_pf = first;
729 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
730 "First VF in hwfn 0x%08x\n",
731 p_dev->p_iov_info->first_vf_in_pf);
733 return ECORE_SUCCESS;
736 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
737 bool b_fail_malicious)
739 /* Check PF supports sriov */
740 if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
741 !IS_PF_SRIOV_ALLOC(p_hwfn))
744 /* Check VF validity */
745 if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
751 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
753 return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
756 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
757 u16 rel_vf_id, u8 to_disable)
759 struct ecore_vf_info *vf;
762 for_each_hwfn(p_dev, i) {
763 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
765 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
769 vf->to_disable = to_disable;
773 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
778 if (!IS_ECORE_SRIOV(p_dev))
781 for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
782 ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
786 /* @@@TBD Consider taking outside of ecore... */
787 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
791 enum _ecore_status_t rc = ECORE_SUCCESS;
792 struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
794 if (vf != OSAL_NULL) {
796 #ifdef CONFIG_ECORE_SW_CHANNEL
797 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
800 rc = ECORE_UNKNOWN_ERROR;
806 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
807 struct ecore_ptt *p_ptt,
810 ecore_wr(p_hwfn, p_ptt,
811 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
812 1 << (abs_vfid & 0x1f));
815 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
816 struct ecore_ptt *p_ptt,
817 struct ecore_vf_info *vf)
821 /* Set VF masks and configuration - pretend */
822 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
824 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
827 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
829 /* iterate over all queues, clear sb consumer */
830 for (i = 0; i < vf->num_sbs; i++)
831 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
833 vf->opaque_fid, true);
836 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
837 struct ecore_ptt *p_ptt,
838 struct ecore_vf_info *vf, bool enable)
842 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
844 igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
847 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
849 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
851 ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
854 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
857 static enum _ecore_status_t
858 ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn,
859 struct ecore_ptt *p_ptt,
866 /* If client overrides this, don't do anything */
867 if (p_hwfn->p_dev->b_dont_override_vf_msix)
868 return ECORE_SUCCESS;
870 /* For AH onward, configuration is per-PF. Find maximum of all
871 * the currently enabled child VFs, and set the number to be that.
873 if (!ECORE_IS_BB(p_hwfn->p_dev)) {
874 ecore_for_each_vf(p_hwfn, i) {
875 struct ecore_vf_info *p_vf;
877 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)i, true);
881 current_max = OSAL_MAX_T(u8, current_max,
886 if (num_sbs > current_max)
887 return ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
890 return ECORE_SUCCESS;
893 static enum _ecore_status_t
894 ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
895 struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
897 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
898 enum _ecore_status_t rc = ECORE_SUCCESS;
900 /* It's possible VF was previously considered malicious -
901 * clear the indication even if we're only going to disable VF.
903 vf->b_malicious = false;
906 return ECORE_SUCCESS;
908 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
909 "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
910 ECORE_VF_ABS_ID(p_hwfn, vf));
912 ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
913 ECORE_VF_ABS_ID(p_hwfn, vf));
915 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
917 rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
918 vf->abs_vf_id, vf->num_sbs);
919 if (rc != ECORE_SUCCESS)
922 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
924 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
925 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
927 ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
928 p_hwfn->hw_info.hw_mode);
931 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
940 * @brief ecore_iov_config_perm_table - configure the permission
942 * The queue zone permission table size is 320x9. There
943 * are 320 VF queues for single engine device (256 for dual
944 * engine device), and each entry has the following format:
951 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
952 struct ecore_ptt *p_ptt,
953 struct ecore_vf_info *vf, u8 enable)
959 for (qid = 0; qid < vf->num_rxqs; qid++) {
960 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
963 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
964 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
965 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
969 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
970 struct ecore_ptt *p_ptt,
971 struct ecore_vf_info *vf)
973 /* Reset vf in IGU - interrupts are still disabled */
974 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
976 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
978 /* Permission Table */
979 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
982 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
983 struct ecore_ptt *p_ptt,
984 struct ecore_vf_info *vf,
987 struct ecore_igu_block *p_block;
988 struct cau_sb_entry sb_entry;
992 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
994 (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
995 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
997 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
998 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
999 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
1001 for (qid = 0; qid < num_rx_queues; qid++) {
1002 p_block = ecore_get_igu_free_sb(p_hwfn, false);
1006 vf->igu_sbs[qid] = p_block->igu_sb_id;
1007 p_block->status &= ~ECORE_IGU_STATUS_FREE;
1008 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
1010 ecore_wr(p_hwfn, p_ptt,
1011 IGU_REG_MAPPING_MEMORY +
1012 sizeof(u32) * p_block->igu_sb_id, val);
1014 /* Configure igu sb in CAU which were marked valid */
1015 ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
1019 ecore_dmae_host2grc(p_hwfn, p_ptt,
1020 (u64)(osal_uintptr_t)&sb_entry,
1021 CAU_REG_SB_VAR_MEMORY +
1022 p_block->igu_sb_id * sizeof(u64), 2,
1023 OSAL_NULL /* default parameters */);
1026 vf->num_sbs = (u8)num_rx_queues;
1033 * @brief The function invalidates all the VF entries,
1034 * technically this isn't required, but added for
1035 * cleaness and ease of debugging incase a VF attempts to
1036 * produce an interrupt after it has been taken down.
1042 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
1043 struct ecore_ptt *p_ptt,
1044 struct ecore_vf_info *vf)
1046 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1050 /* Invalidate igu CAM lines and mark them as free */
1051 for (idx = 0; idx < vf->num_sbs; idx++) {
1052 igu_id = vf->igu_sbs[idx];
1053 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
1055 val = ecore_rd(p_hwfn, p_ptt, addr);
1056 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
1057 ecore_wr(p_hwfn, p_ptt, addr, val);
1059 p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
1060 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
1066 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
1068 struct ecore_mcp_link_params *params,
1069 struct ecore_mcp_link_state *link,
1070 struct ecore_mcp_link_capabilities *p_caps)
1072 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
1073 struct ecore_bulletin_content *p_bulletin;
1078 p_bulletin = p_vf->bulletin.p_virt;
1079 p_bulletin->req_autoneg = params->speed.autoneg;
1080 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
1081 p_bulletin->req_forced_speed = params->speed.forced_speed;
1082 p_bulletin->req_autoneg_pause = params->pause.autoneg;
1083 p_bulletin->req_forced_rx = params->pause.forced_rx;
1084 p_bulletin->req_forced_tx = params->pause.forced_tx;
1085 p_bulletin->req_loopback = params->loopback_mode;
1087 p_bulletin->link_up = link->link_up;
1088 p_bulletin->speed = link->speed;
1089 p_bulletin->full_duplex = link->full_duplex;
1090 p_bulletin->autoneg = link->an;
1091 p_bulletin->autoneg_complete = link->an_complete;
1092 p_bulletin->parallel_detection = link->parallel_detection;
1093 p_bulletin->pfc_enabled = link->pfc_enabled;
1094 p_bulletin->partner_adv_speed = link->partner_adv_speed;
1095 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
1096 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
1097 p_bulletin->partner_adv_pause = link->partner_adv_pause;
1098 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
1100 p_bulletin->capability_speed = p_caps->speed_capabilities;
1104 static void ecore_emul_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
1105 struct ecore_ptt *p_ptt)
1107 /* Increase the maximum number of DORQ FIFO entries used by child VFs */
1108 ecore_wr(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT_LIM, 0x3ec);
1112 enum _ecore_status_t
1113 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
1114 struct ecore_ptt *p_ptt,
1115 struct ecore_iov_vf_init_params *p_params)
1117 struct ecore_mcp_link_capabilities link_caps;
1118 struct ecore_mcp_link_params link_params;
1119 struct ecore_mcp_link_state link_state;
1120 u8 num_of_vf_available_chains = 0;
1121 struct ecore_vf_info *vf = OSAL_NULL;
1123 enum _ecore_status_t rc = ECORE_SUCCESS;
1127 vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1129 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
1130 return ECORE_UNKNOWN_ERROR;
1134 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
1135 p_params->rel_vf_id);
1139 /* Perform sanity checking on the requested vport/rss */
1140 if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
1141 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
1142 p_params->rel_vf_id, p_params->vport_id);
1146 if ((p_params->num_queues > 1) &&
1147 (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
1148 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
1149 p_params->rel_vf_id, p_params->rss_eng_id);
1153 /* TODO - remove this once we get confidence of change */
1154 if (!p_params->vport_id) {
1155 DP_NOTICE(p_hwfn, false,
1156 "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
1157 p_params->rel_vf_id);
1159 if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
1160 DP_NOTICE(p_hwfn, false,
1161 "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
1162 p_params->rel_vf_id);
1164 vf->vport_id = p_params->vport_id;
1165 vf->rss_eng_id = p_params->rss_eng_id;
1167 /* Since it's possible to relocate SBs, it's a bit difficult to check
1168 * things here. Simply check whether the index falls in the range
1169 * belonging to the PF.
1171 for (i = 0; i < p_params->num_queues; i++) {
1172 qid = p_params->req_rx_queue[i];
1173 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1174 DP_NOTICE(p_hwfn, true,
1175 "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1176 qid, p_params->rel_vf_id,
1177 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1181 qid = p_params->req_tx_queue[i];
1182 if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
1183 DP_NOTICE(p_hwfn, true,
1184 "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
1185 qid, p_params->rel_vf_id,
1186 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1191 /* Limit number of queues according to number of CIDs */
1192 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1193 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1194 "VF[%d] - requesting to initialize for 0x%04x queues"
1195 " [0x%04x CIDs available]\n",
1196 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1197 num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
1199 num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
1203 if (num_of_vf_available_chains == 0) {
1204 DP_ERR(p_hwfn, "no available igu sbs\n");
1208 /* Choose queue number and index ranges */
1209 vf->num_rxqs = num_of_vf_available_chains;
1210 vf->num_txqs = num_of_vf_available_chains;
1212 for (i = 0; i < vf->num_rxqs; i++) {
1213 struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
1215 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1216 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1218 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1219 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1220 vf->relative_vf_id, i, vf->igu_sbs[i],
1221 p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1224 /* Update the link configuration in bulletin.
1226 OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
1227 sizeof(link_params));
1228 OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
1229 sizeof(link_state));
1230 OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
1232 ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
1233 &link_params, &link_state, &link_caps);
1235 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1236 if (rc != ECORE_SUCCESS)
1241 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
1242 (1ULL << (vf->relative_vf_id % 64));
1245 if (IS_LEAD_HWFN(p_hwfn))
1246 p_hwfn->p_dev->p_iov_info->num_vfs++;
1249 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1250 ecore_emul_iov_init_hw_for_vf(p_hwfn, p_ptt);
1253 return ECORE_SUCCESS;
1257 static void ecore_emul_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1258 struct ecore_ptt *p_ptt)
1260 if (!ecore_mcp_is_init(p_hwfn)) {
1261 u32 sriov_dis = ecore_rd(p_hwfn, p_ptt,
1262 PGLUE_B_REG_SR_IOV_DISABLED_REQUEST);
1264 ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_SR_IOV_DISABLED_REQUEST_CLR,
1270 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1271 struct ecore_ptt *p_ptt,
1274 struct ecore_mcp_link_capabilities caps;
1275 struct ecore_mcp_link_params params;
1276 struct ecore_mcp_link_state link;
1277 struct ecore_vf_info *vf = OSAL_NULL;
1279 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1281 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1282 return ECORE_UNKNOWN_ERROR;
1285 if (vf->bulletin.p_virt)
1286 OSAL_MEMSET(vf->bulletin.p_virt, 0,
1287 sizeof(*vf->bulletin.p_virt));
1289 OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1291 /* Get the link configuration back in bulletin so
1292 * that when VFs are re-enabled they get the actual
1293 * link configuration.
1295 OSAL_MEMCPY(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
1296 OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
1297 OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
1299 ecore_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1301 /* Forget the VF's acquisition message */
1302 OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
1304 /* disablng interrupts and resetting permission table was done during
1305 * vf-close, however, we could get here without going through vf_close
1307 /* Disable Interrupts for VF */
1308 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1310 /* Reset Permission table */
1311 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1315 ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1319 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
1320 ~(1ULL << (vf->relative_vf_id / 64));
1322 if (IS_LEAD_HWFN(p_hwfn))
1323 p_hwfn->p_dev->p_iov_info->num_vfs--;
1327 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1328 ecore_emul_iov_release_hw_for_vf(p_hwfn, p_ptt);
1331 return ECORE_SUCCESS;
1334 static bool ecore_iov_tlv_supported(u16 tlvtype)
1336 return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
1339 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1340 struct ecore_vf_info *vf, u16 tlv)
1342 /* lock the channel */
1343 /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1345 /* record the locking op */
1346 /* vf->op_current = tlv; @@@TBD MichalK */
1349 if (ecore_iov_tlv_supported(tlv))
1352 "VF[%d]: vf pf channel locked by %s\n",
1354 qede_ecore_channel_tlvs_string[tlv]);
1358 "VF[%d]: vf pf channel locked by %04x\n",
1359 vf->abs_vf_id, tlv);
1362 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1363 struct ecore_vf_info *vf,
1366 /* log the unlock */
1367 if (ecore_iov_tlv_supported(expected_tlv))
1370 "VF[%d]: vf pf channel unlocked by %s\n",
1372 qede_ecore_channel_tlvs_string[expected_tlv]);
1376 "VF[%d]: vf pf channel unlocked by %04x\n",
1377 vf->abs_vf_id, expected_tlv);
1379 /* record the locking op */
1380 /* vf->op_current = CHANNEL_TLV_NONE; */
1383 /* place a given tlv on the tlv buffer, continuing current tlv list */
1384 void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
1386 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1389 tl->length = length;
1391 /* Offset should keep pointing to next TLV (the end of the last) */
1394 /* Return a pointer to the start of the added tlv */
1395 return *offset - length;
1398 /* list the types and lengths of the tlvs on the buffer */
1399 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
1401 u16 i = 1, total_length = 0;
1402 struct channel_tlv *tlv;
1405 /* cast current tlv list entry to channel tlv header */
1406 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1409 if (ecore_iov_tlv_supported(tlv->type))
1410 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1411 "TLV number %d: type %s, length %d\n",
1412 i, qede_ecore_channel_tlvs_string[tlv->type],
1415 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1416 "TLV number %d: type %d, length %d\n",
1417 i, tlv->type, tlv->length);
1419 if (tlv->type == CHANNEL_TLV_LIST_END)
1422 /* Validate entry - protect against malicious VFs */
1424 DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
1427 total_length += tlv->length;
1428 if (total_length >= sizeof(struct tlv_buffer_size)) {
1429 DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
1437 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
1438 struct ecore_ptt *p_ptt,
1439 struct ecore_vf_info *p_vf,
1440 #ifdef CONFIG_ECORE_SW_CHANNEL
1443 u16 OSAL_UNUSED length,
1447 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1448 struct dmae_params params;
1451 mbx->reply_virt->default_resp.hdr.status = status;
1453 ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1455 #ifdef CONFIG_ECORE_SW_CHANNEL
1456 mbx->sw_mbx.response_size =
1457 length + sizeof(struct channel_list_end_tlv);
1459 if (!p_vf->b_hw_channel)
1463 eng_vf_id = p_vf->abs_vf_id;
1465 OSAL_MEMSET(¶ms, 0, sizeof(params));
1466 SET_FIELD(params.flags, DMAE_PARAMS_DST_VF_VALID, 0x1);
1467 params.dst_vf_id = eng_vf_id;
1469 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1470 mbx->req_virt->first_tlv.reply_address +
1472 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1475 /* Once PF copies the rc to the VF, the latter can continue and
1476 * and send an additional message. So we have to make sure the
1477 * channel would be re-set to ready prior to that.
1480 GTT_BAR0_MAP_REG_USDM_RAM +
1481 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1483 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1484 mbx->req_virt->first_tlv.reply_address,
1485 sizeof(u64) / 4, ¶ms);
1487 OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
1490 static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
1493 case ECORE_IOV_VP_UPDATE_ACTIVATE:
1494 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1495 case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1496 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1497 case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1498 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1499 case ECORE_IOV_VP_UPDATE_MCAST:
1500 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1501 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1502 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1503 case ECORE_IOV_VP_UPDATE_RSS:
1504 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1505 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1506 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1507 case ECORE_IOV_VP_UPDATE_SGE_TPA:
1508 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1514 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1515 struct ecore_vf_info *p_vf,
1516 struct ecore_iov_vf_mbx *p_mbx,
1517 u8 status, u16 tlvs_mask,
1520 struct pfvf_def_resp_tlv *resp;
1521 u16 size, total_len, i;
1523 OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1524 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1525 size = sizeof(struct pfvf_def_resp_tlv);
1528 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1530 /* Prepare response for all extended tlvs if they are found by PF */
1531 for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1532 if (!(tlvs_mask & (1 << i)))
1535 resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
1538 if (tlvs_accepted & (1 << i))
1539 resp->hdr.status = status;
1541 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1543 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1544 "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1545 p_vf->relative_vf_id,
1546 ecore_iov_vport_to_tlv(i),
1552 ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
1553 sizeof(struct channel_list_end_tlv));
1558 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1559 struct ecore_ptt *p_ptt,
1560 struct ecore_vf_info *vf_info,
1561 u16 type, u16 length, u8 status)
1563 struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1565 mbx->offset = (u8 *)mbx->reply_virt;
1567 ecore_add_tlv(&mbx->offset, type, length);
1568 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
1569 sizeof(struct channel_list_end_tlv));
1571 ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1574 struct ecore_public_vf_info
1575 *ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
1577 bool b_enabled_only)
1579 struct ecore_vf_info *vf = OSAL_NULL;
1581 vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1585 return &vf->p_vf_info;
1588 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1589 struct ecore_vf_info *p_vf)
1592 p_vf->vf_bulletin = 0;
1593 p_vf->vport_instance = 0;
1594 p_vf->configured_features = 0;
1596 /* If VF previously requested less resources, go back to default */
1597 p_vf->num_rxqs = p_vf->num_sbs;
1598 p_vf->num_txqs = p_vf->num_sbs;
1600 p_vf->num_active_rxqs = 0;
1602 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1603 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
1605 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1606 if (!p_queue->cids[j].p_cid)
1609 ecore_eth_queue_cid_release(p_hwfn,
1610 p_queue->cids[j].p_cid);
1611 p_queue->cids[j].p_cid = OSAL_NULL;
1615 OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1616 OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1617 OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1620 /* Returns either 0, or log(size) */
1621 static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
1622 struct ecore_ptt *p_ptt)
1624 u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
1632 ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
1633 struct ecore_ptt *p_ptt,
1634 struct ecore_vf_info *p_vf,
1635 struct vf_pf_resc_request *p_req,
1636 struct pf_vf_resc *p_resp)
1638 u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
1639 u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
1640 DB_ADDR_VF(0, DQ_DEMS_LEGACY);
1643 p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
1645 /* If VF didn't bother asking for QIDs than don't bother limiting
1646 * number of CIDs. The VF doesn't care about the number, and this
1647 * has the likely result of causing an additional acquisition.
1649 if (!(p_vf->acquire.vfdev_info.capabilities &
1650 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
1653 /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1654 * that would make sure doorbells for all CIDs fall within the bar.
1655 * If it doesn't, make sure regview window is sufficient.
1657 if (p_vf->acquire.vfdev_info.capabilities &
1658 VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
1659 bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
1661 bar_size = 1 << bar_size;
1663 if (ECORE_IS_CMT(p_hwfn->p_dev))
1666 bar_size = PXP_VF_BAR0_DQ_LENGTH;
1669 if (bar_size / db_size < 256)
1670 p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
1671 (u8)(bar_size / db_size));
1674 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
1675 struct ecore_ptt *p_ptt,
1676 struct ecore_vf_info *p_vf,
1677 struct vf_pf_resc_request *p_req,
1678 struct pf_vf_resc *p_resp)
1682 /* Queue related information */
1683 p_resp->num_rxqs = p_vf->num_rxqs;
1684 p_resp->num_txqs = p_vf->num_txqs;
1685 p_resp->num_sbs = p_vf->num_sbs;
1687 for (i = 0; i < p_resp->num_sbs; i++) {
1688 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1689 /* TODO - what's this sb_qid field? Is it deprecated?
1690 * or is there an ecore_client that looks at this?
1692 p_resp->hw_sbs[i].sb_qid = 0;
1695 /* These fields are filled for backward compatibility.
1696 * Unused by modern vfs.
1698 for (i = 0; i < p_resp->num_rxqs; i++) {
1699 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1700 (u16 *)&p_resp->hw_qid[i]);
1704 /* Filter related information */
1705 p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
1706 p_req->num_mac_filters);
1707 p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
1708 p_req->num_vlan_filters);
1710 ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
1712 /* This isn't really needed/enforced, but some legacy VFs might depend
1713 * on the correct filling of this field.
1715 p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
1717 /* Validate sufficient resources for VF */
1718 if (p_resp->num_rxqs < p_req->num_rxqs ||
1719 p_resp->num_txqs < p_req->num_txqs ||
1720 p_resp->num_sbs < p_req->num_sbs ||
1721 p_resp->num_mac_filters < p_req->num_mac_filters ||
1722 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1723 p_resp->num_mc_filters < p_req->num_mc_filters ||
1724 p_resp->num_cids < p_req->num_cids) {
1725 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1726 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1728 p_req->num_rxqs, p_resp->num_rxqs,
1729 p_req->num_rxqs, p_resp->num_txqs,
1730 p_req->num_sbs, p_resp->num_sbs,
1731 p_req->num_mac_filters, p_resp->num_mac_filters,
1732 p_req->num_vlan_filters, p_resp->num_vlan_filters,
1733 p_req->num_mc_filters, p_resp->num_mc_filters,
1734 p_req->num_cids, p_resp->num_cids);
1736 /* Some legacy OSes are incapable of correctly handling this
1739 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1740 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1741 (p_vf->acquire.vfdev_info.os_type ==
1742 VFPF_ACQUIRE_OS_WINDOWS))
1743 return PFVF_STATUS_SUCCESS;
1745 return PFVF_STATUS_NO_RESOURCE;
1748 return PFVF_STATUS_SUCCESS;
1751 static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
1753 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1754 OFFSETOF(struct mstorm_vf_zone,
1755 non_trigger.eth_queue_stat);
1756 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1757 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1758 OFFSETOF(struct ustorm_vf_zone,
1759 non_trigger.eth_queue_stat);
1760 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1761 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1762 OFFSETOF(struct pstorm_vf_zone,
1763 non_trigger.eth_queue_stat);
1764 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1765 p_stats->tstats.address = 0;
1766 p_stats->tstats.len = 0;
1769 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
1770 struct ecore_ptt *p_ptt,
1771 struct ecore_vf_info *vf)
1773 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1774 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1775 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1776 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1777 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1778 struct pf_vf_resc *resc = &resp->resc;
1779 enum _ecore_status_t rc;
1781 OSAL_MEMSET(resp, 0, sizeof(*resp));
1783 /* Write the PF version so that VF would know which version
1784 * is supported - might be later overridden. This guarantees that
1785 * VF could recognize legacy PF based on lack of versions in reply.
1787 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1788 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1790 /* TODO - not doing anything is bad since we'll assert, but this isn't
1791 * necessarily the right behavior - perhaps we should have allowed some
1794 if (vf->state != VF_FREE &&
1795 vf->state != VF_STOPPED) {
1796 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1797 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1798 vf->abs_vf_id, vf->state);
1802 /* Validate FW compatibility */
1803 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1804 if (req->vfdev_info.capabilities &
1805 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1806 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1808 /* This legacy support would need to be removed once
1809 * the major has changed.
1811 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
1813 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1814 "VF[%d] is pre-fastpath HSI\n",
1816 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1817 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1820 "VF[%d] needs fastpath HSI %02x.%02x, which is"
1821 " incompatible with loaded FW's faspath"
1824 req->vfdev_info.eth_fp_hsi_major,
1825 req->vfdev_info.eth_fp_hsi_minor,
1826 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1832 /* On 100g PFs, prevent old VFs from loading */
1833 if (ECORE_IS_CMT(p_hwfn->p_dev) &&
1834 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1836 "VF[%d] is running an old driver that doesn't support"
1842 #ifndef __EXTRACT__LINUX__
1843 if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1844 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1849 /* Store the acquire message */
1850 OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
1852 vf->opaque_fid = req->vfdev_info.opaque_fid;
1854 vf->vf_bulletin = req->bulletin_addr;
1855 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1856 vf->bulletin.size : req->bulletin_size;
1858 /* fill in pfdev info */
1859 pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1860 pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
1861 pfdev_info->indices_per_sb = PIS_PER_SB;
1863 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1864 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1865 if (ECORE_IS_CMT(p_hwfn->p_dev))
1866 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1868 /* Share our ability to use multiple queue-ids only with VFs
1871 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1872 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1874 /* Share the sizes of the bars with VF */
1875 resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
1878 ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
1880 OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1883 pfdev_info->fw_major = FW_MAJOR_VERSION;
1884 pfdev_info->fw_minor = FW_MINOR_VERSION;
1885 pfdev_info->fw_rev = FW_REVISION_VERSION;
1886 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1888 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1891 pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
1892 req->vfdev_info.eth_fp_hsi_minor);
1893 pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1894 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
1897 pfdev_info->dev_type = p_hwfn->p_dev->type;
1898 pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1900 /* Fill resources available to VF; Make sure there are enough to
1901 * satisfy the VF's request.
1903 vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1904 &req->resc_request, resc);
1905 if (vfpf_status != PFVF_STATUS_SUCCESS)
1908 /* Start the VF in FW */
1909 rc = ecore_sp_vf_start(p_hwfn, vf);
1910 if (rc != ECORE_SUCCESS) {
1911 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
1913 vfpf_status = PFVF_STATUS_FAILURE;
1917 /* Fill agreed size of bulletin board in response, and post
1918 * an initial image to the bulletin board.
1920 resp->bulletin_size = vf->bulletin.size;
1921 ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1923 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1924 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1925 " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1926 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1928 vf->abs_vf_id, resp->pfdev_info.chip_num,
1929 resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1930 (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
1931 resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1932 resc->num_vlan_filters);
1934 vf->state = VF_ACQUIRED;
1937 /* Prepare Response */
1938 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1939 sizeof(struct pfvf_acquire_resp_tlv),
1943 static enum _ecore_status_t
1944 __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1945 struct ecore_vf_info *p_vf, bool val)
1947 struct ecore_sp_vport_update_params params;
1948 enum _ecore_status_t rc;
1950 if (val == p_vf->spoof_chk) {
1951 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1952 "Spoofchk value[%d] is already configured\n", val);
1953 return ECORE_SUCCESS;
1956 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1957 params.opaque_fid = p_vf->opaque_fid;
1958 params.vport_id = p_vf->vport_id;
1959 params.update_anti_spoofing_en_flg = 1;
1960 params.anti_spoofing_en = val;
1962 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
1964 if (rc == ECORE_SUCCESS) {
1965 p_vf->spoof_chk = val;
1966 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1967 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1968 "Spoofchk val[%d] configured\n", val);
1970 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1971 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1972 val, p_vf->relative_vf_id);
1978 static enum _ecore_status_t
1979 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1980 struct ecore_vf_info *p_vf)
1982 struct ecore_filter_ucast filter;
1983 enum _ecore_status_t rc = ECORE_SUCCESS;
1986 OSAL_MEMSET(&filter, 0, sizeof(filter));
1987 filter.is_rx_filter = 1;
1988 filter.is_tx_filter = 1;
1989 filter.vport_to_add_to = p_vf->vport_id;
1990 filter.opcode = ECORE_FILTER_ADD;
1992 /* Reconfigure vlans */
1993 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1994 if (!p_vf->shadow_config.vlans[i].used)
1997 filter.type = ECORE_FILTER_VLAN;
1998 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1999 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2000 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
2001 filter.vlan, p_vf->relative_vf_id);
2002 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
2003 &filter, ECORE_SPQ_MODE_CB,
2006 DP_NOTICE(p_hwfn, true,
2007 "Failed to configure VLAN [%04x]"
2009 filter.vlan, p_vf->relative_vf_id);
2017 static enum _ecore_status_t
2018 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
2019 struct ecore_vf_info *p_vf, u64 events)
2021 enum _ecore_status_t rc = ECORE_SUCCESS;
2023 /*TODO - what about MACs? */
2025 if ((events & (1 << VLAN_ADDR_FORCED)) &&
2026 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
2027 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
2032 static enum _ecore_status_t
2033 ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
2034 struct ecore_vf_info *p_vf,
2037 enum _ecore_status_t rc = ECORE_SUCCESS;
2038 struct ecore_filter_ucast filter;
2040 if (!p_vf->vport_instance)
2043 if ((events & (1 << MAC_ADDR_FORCED)) ||
2044 p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
2045 p_vf->p_vf_info.is_trusted_configured) {
2046 /* Since there's no way [currently] of removing the MAC,
2047 * we can always assume this means we need to force it.
2049 OSAL_MEMSET(&filter, 0, sizeof(filter));
2050 filter.type = ECORE_FILTER_MAC;
2051 filter.opcode = ECORE_FILTER_REPLACE;
2052 filter.is_rx_filter = 1;
2053 filter.is_tx_filter = 1;
2054 filter.vport_to_add_to = p_vf->vport_id;
2055 OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
2057 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
2059 ECORE_SPQ_MODE_CB, OSAL_NULL);
2061 DP_NOTICE(p_hwfn, true,
2062 "PF failed to configure MAC for VF\n");
2066 if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
2067 p_vf->p_vf_info.is_trusted_configured)
2068 p_vf->configured_features |=
2069 1 << VFPF_BULLETIN_MAC_ADDR;
2071 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
2074 if (events & (1 << VLAN_ADDR_FORCED)) {
2075 struct ecore_sp_vport_update_params vport_update;
2079 OSAL_MEMSET(&filter, 0, sizeof(filter));
2080 filter.type = ECORE_FILTER_VLAN;
2081 filter.is_rx_filter = 1;
2082 filter.is_tx_filter = 1;
2083 filter.vport_to_add_to = p_vf->vport_id;
2084 filter.vlan = p_vf->bulletin.p_virt->pvid;
2085 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
2088 /* Send the ramrod */
2089 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
2091 ECORE_SPQ_MODE_CB, OSAL_NULL);
2093 DP_NOTICE(p_hwfn, true,
2094 "PF failed to configure VLAN for VF\n");
2098 /* Update the default-vlan & silent vlan stripping */
2099 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
2100 vport_update.opaque_fid = p_vf->opaque_fid;
2101 vport_update.vport_id = p_vf->vport_id;
2102 vport_update.update_default_vlan_enable_flg = 1;
2103 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
2104 vport_update.update_default_vlan_flg = 1;
2105 vport_update.default_vlan = filter.vlan;
2107 vport_update.update_inner_vlan_removal_flg = 1;
2108 removal = filter.vlan ?
2109 1 : p_vf->shadow_config.inner_vlan_removal;
2110 vport_update.inner_vlan_removal_flg = removal;
2111 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
2112 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
2113 ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
2115 DP_NOTICE(p_hwfn, true,
2116 "PF failed to configure VF vport for vlan\n");
2120 /* Update all the Rx queues */
2121 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
2122 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
2123 struct ecore_queue_cid *p_cid = OSAL_NULL;
2125 /* There can be at most 1 Rx queue on qzone. Find it */
2126 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
2127 if (p_cid == OSAL_NULL)
2130 rc = ecore_sp_eth_rx_queues_update(p_hwfn,
2133 ECORE_SPQ_MODE_EBLOCK,
2136 DP_NOTICE(p_hwfn, true,
2137 "Failed to send Rx update"
2138 " fo queue[0x%04x]\n",
2139 p_cid->rel.queue_id);
2145 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
2147 p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
2150 /* If forced features are terminated, we need to configure the shadow
2151 * configuration back again.
2154 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
2159 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
2160 struct ecore_ptt *p_ptt,
2161 struct ecore_vf_info *vf)
2163 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2164 struct ecore_sp_vport_start_params params;
2165 struct vfpf_vport_start_tlv *start;
2166 u8 status = PFVF_STATUS_SUCCESS;
2167 struct ecore_vf_info *vf_info;
2170 enum _ecore_status_t rc;
2172 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
2174 DP_NOTICE(p_hwfn->p_dev, true,
2175 "Failed to get VF info, invalid vfid [%d]\n",
2176 vf->relative_vf_id);
2180 vf->state = VF_ENABLED;
2181 start = &mbx->req_virt->start_vport;
2183 ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
2185 /* Initialize Status block in CAU */
2186 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
2187 if (!start->sb_addr[sb_id]) {
2188 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2189 "VF[%d] did not fill the address of SB %d\n",
2190 vf->relative_vf_id, sb_id);
2194 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
2195 start->sb_addr[sb_id],
2200 vf->mtu = start->mtu;
2201 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
2203 /* Take into consideration configuration forced by hypervisor;
2204 * If none is configured, use the supplied VF values [for old
2205 * vfs that would still be fine, since they passed '0' as padding].
2207 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
2208 if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
2209 u8 vf_req = start->only_untagged;
2211 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
2212 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
2215 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_start_params));
2216 params.tpa_mode = start->tpa_mode;
2217 params.remove_inner_vlan = start->inner_vlan_removal;
2218 params.tx_switching = true;
2221 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2222 DP_NOTICE(p_hwfn, false,
2223 "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
2224 params.tx_switching = false;
2228 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
2229 params.drop_ttl0 = false;
2230 params.concrete_fid = vf->concrete_fid;
2231 params.opaque_fid = vf->opaque_fid;
2232 params.vport_id = vf->vport_id;
2233 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
2234 params.mtu = vf->mtu;
2236 /* Non trusted VFs should enable control frame filtering */
2237 params.check_mac = !vf->p_vf_info.is_trusted_configured;
2239 rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms);
2240 if (rc != ECORE_SUCCESS) {
2242 "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
2243 status = PFVF_STATUS_FAILURE;
2245 vf->vport_instance++;
2247 /* Force configuration if needed on the newly opened vport */
2248 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
2249 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
2250 vf->vport_id, vf->opaque_fid);
2251 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
2254 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
2255 sizeof(struct pfvf_def_resp_tlv), status);
2258 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
2259 struct ecore_ptt *p_ptt,
2260 struct ecore_vf_info *vf)
2262 u8 status = PFVF_STATUS_SUCCESS;
2263 enum _ecore_status_t rc;
2265 OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf);
2266 vf->vport_instance--;
2267 vf->spoof_chk = false;
2269 if ((ecore_iov_validate_active_rxq(vf)) ||
2270 (ecore_iov_validate_active_txq(vf))) {
2271 vf->b_malicious = true;
2272 DP_NOTICE(p_hwfn, false,
2273 "VF [%02x] - considered malicious;"
2274 " Unable to stop RX/TX queuess\n",
2276 status = PFVF_STATUS_MALICIOUS;
2280 rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2281 if (rc != ECORE_SUCCESS) {
2283 "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
2284 status = PFVF_STATUS_FAILURE;
2287 /* Forget the configuration on the vport */
2288 vf->configured_features = 0;
2289 OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2292 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2293 sizeof(struct pfvf_def_resp_tlv), status);
2296 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
2297 struct ecore_ptt *p_ptt,
2298 struct ecore_vf_info *vf,
2299 u8 status, bool b_legacy)
2301 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2302 struct pfvf_start_queue_resp_tlv *p_tlv;
2303 struct vfpf_start_rxq_tlv *req;
2306 mbx->offset = (u8 *)mbx->reply_virt;
2308 /* Taking a bigger struct instead of adding a TLV to list was a
2309 * mistake, but one which we're now stuck with, as some older
2310 * clients assume the size of the previous response.
2313 length = sizeof(*p_tlv);
2315 length = sizeof(struct pfvf_def_resp_tlv);
2317 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
2318 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2319 sizeof(struct channel_list_end_tlv));
2321 /* Update the TLV with the response.
2322 * The VF Rx producers are located in the vf zone.
2324 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2325 req = &mbx->req_virt->start_rxq;
2328 PXP_VF_BAR0_START_MSDM_ZONE_B +
2329 OFFSETOF(struct mstorm_vf_zone,
2330 non_trigger.eth_rx_queue_producers) +
2331 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2334 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2337 static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
2338 struct ecore_vf_info *p_vf, bool b_is_tx)
2340 struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2341 struct vfpf_qid_tlv *p_qid_tlv;
2343 /* Search for the qid if the VF published if its going to provide it */
2344 if (!(p_vf->acquire.vfdev_info.capabilities &
2345 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2347 return ECORE_IOV_LEGACY_QID_TX;
2349 return ECORE_IOV_LEGACY_QID_RX;
2352 p_qid_tlv = (struct vfpf_qid_tlv *)
2353 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2355 if (p_qid_tlv == OSAL_NULL) {
2356 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2357 "VF[%2x]: Failed to provide qid\n",
2358 p_vf->relative_vf_id);
2360 return ECORE_IOV_QID_INVALID;
2363 if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2364 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2365 "VF[%02x]: Provided qid out-of-bounds %02x\n",
2366 p_vf->relative_vf_id, p_qid_tlv->qid);
2367 return ECORE_IOV_QID_INVALID;
2370 return p_qid_tlv->qid;
2373 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
2374 struct ecore_ptt *p_ptt,
2375 struct ecore_vf_info *vf)
2377 struct ecore_queue_start_common_params params;
2378 struct ecore_queue_cid_vf_params vf_params;
2379 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2380 u8 status = PFVF_STATUS_NO_RESOURCE;
2381 u8 qid_usage_idx, vf_legacy = 0;
2382 struct ecore_vf_queue *p_queue;
2383 struct vfpf_start_rxq_tlv *req;
2384 struct ecore_queue_cid *p_cid;
2385 struct ecore_sb_info sb_dummy;
2386 enum _ecore_status_t rc;
2388 req = &mbx->req_virt->start_rxq;
2390 if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2391 ECORE_IOV_VALIDATE_Q_DISABLE) ||
2392 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2395 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2396 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2399 p_queue = &vf->vf_queues[req->rx_qid];
2400 if (p_queue->cids[qid_usage_idx].p_cid)
2403 vf_legacy = ecore_vf_calculate_legacy(vf);
2405 /* Acquire a new queue-cid */
2406 OSAL_MEMSET(¶ms, 0, sizeof(params));
2407 params.queue_id = (u8)p_queue->fw_rx_qid;
2408 params.vport_id = vf->vport_id;
2409 params.stats_id = vf->abs_vf_id + 0x10;
2411 /* Since IGU index is passed via sb_info, construct a dummy one */
2412 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2413 sb_dummy.igu_sb_id = req->hw_sb;
2414 params.p_sb = &sb_dummy;
2415 params.sb_idx = req->sb_index;
2417 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2418 vf_params.vfid = vf->relative_vf_id;
2419 vf_params.vf_qid = (u8)req->rx_qid;
2420 vf_params.vf_legacy = vf_legacy;
2421 vf_params.qid_usage_idx = qid_usage_idx;
2423 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2424 ¶ms, true, &vf_params);
2425 if (p_cid == OSAL_NULL)
2428 /* The VF Rx producers are located in the vf zone.
2429 * Legacy VFs have their producers in the queue zone, but they
2430 * calculate the location by their own and clean them prior to this.
2432 if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
2434 GTT_BAR0_MAP_REG_MSDM_RAM +
2435 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id,
2439 rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
2444 if (rc != ECORE_SUCCESS) {
2445 status = PFVF_STATUS_FAILURE;
2446 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2448 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2449 p_queue->cids[qid_usage_idx].b_is_tx = false;
2450 status = PFVF_STATUS_SUCCESS;
2451 vf->num_active_rxqs++;
2455 ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2457 ECORE_QCID_LEGACY_VF_RX_PROD));
2461 ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2462 struct ecore_tunnel_info *p_tun,
2463 u16 tunn_feature_mask)
2465 p_resp->tunn_feature_mask = tunn_feature_mask;
2466 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2467 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2468 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2469 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2470 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2471 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2472 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2473 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2474 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2475 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2476 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2477 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2481 __ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2482 struct ecore_tunn_update_type *p_tun,
2483 enum ecore_tunn_mode mask, u8 tun_cls)
2485 if (p_req->tun_mode_update_mask & (1 << mask)) {
2486 p_tun->b_update_mode = true;
2488 if (p_req->tunn_mode & (1 << mask))
2489 p_tun->b_mode_enabled = true;
2492 p_tun->tun_cls = tun_cls;
2496 ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2497 struct ecore_tunn_update_type *p_tun,
2498 struct ecore_tunn_update_udp_port *p_port,
2499 enum ecore_tunn_mode mask,
2500 u8 tun_cls, u8 update_port, u16 port)
2503 p_port->b_update_port = true;
2504 p_port->port = port;
2507 __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2511 ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2513 bool b_update_requested = false;
2515 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2516 p_req->update_geneve_port || p_req->update_vxlan_port)
2517 b_update_requested = true;
2519 return b_update_requested;
2522 static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
2523 struct ecore_ptt *p_ptt,
2524 struct ecore_vf_info *p_vf)
2526 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
2527 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2528 struct pfvf_update_tunn_param_tlv *p_resp;
2529 struct vfpf_update_tunn_param_tlv *p_req;
2530 enum _ecore_status_t rc = ECORE_SUCCESS;
2531 u8 status = PFVF_STATUS_SUCCESS;
2532 bool b_update_required = false;
2533 struct ecore_tunnel_info tunn;
2534 u16 tunn_feature_mask = 0;
2537 mbx->offset = (u8 *)mbx->reply_virt;
2539 OSAL_MEM_ZERO(&tunn, sizeof(tunn));
2540 p_req = &mbx->req_virt->tunn_param_update;
2542 if (!ecore_iov_pf_validate_tunn_param(p_req)) {
2543 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2544 "No tunnel update requested by VF\n");
2545 status = PFVF_STATUS_FAILURE;
2549 tunn.b_update_rx_cls = p_req->update_tun_cls;
2550 tunn.b_update_tx_cls = p_req->update_tun_cls;
2552 ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2553 ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2554 p_req->update_vxlan_port,
2556 ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2557 ECORE_MODE_L2GENEVE_TUNN,
2558 p_req->l2geneve_clss,
2559 p_req->update_geneve_port,
2560 p_req->geneve_port);
2561 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2562 ECORE_MODE_IPGENEVE_TUNN,
2563 p_req->ipgeneve_clss);
2564 __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2565 ECORE_MODE_L2GRE_TUNN,
2567 __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2568 ECORE_MODE_IPGRE_TUNN,
2571 /* If PF modifies VF's req then it should
2572 * still return an error in case of partial configuration
2573 * or modified configuration as opposed to requested one.
2575 rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
2576 &b_update_required, &tunn);
2578 if (rc != ECORE_SUCCESS)
2579 status = PFVF_STATUS_FAILURE;
2581 /* If ECORE client is willing to update anything ? */
2582 if (b_update_required) {
2585 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2586 ECORE_SPQ_MODE_EBLOCK,
2588 if (rc != ECORE_SUCCESS)
2589 status = PFVF_STATUS_FAILURE;
2591 geneve_port = p_tun->geneve_port.port;
2592 ecore_for_each_vf(p_hwfn, i) {
2593 ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
2594 p_tun->vxlan_port.port,
2600 p_resp = ecore_add_tlv(&mbx->offset,
2601 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2603 ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2604 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2605 sizeof(struct channel_list_end_tlv));
2607 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2610 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
2611 struct ecore_ptt *p_ptt,
2612 struct ecore_vf_info *p_vf,
2616 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2617 struct pfvf_start_queue_resp_tlv *p_tlv;
2618 bool b_legacy = false;
2621 mbx->offset = (u8 *)mbx->reply_virt;
2623 /* Taking a bigger struct instead of adding a TLV to list was a
2624 * mistake, but one which we're now stuck with, as some older
2625 * clients assume the size of the previous response.
2627 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2628 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2632 length = sizeof(*p_tlv);
2634 length = sizeof(struct pfvf_def_resp_tlv);
2636 p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
2637 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
2638 sizeof(struct channel_list_end_tlv));
2640 /* Update the TLV with the response */
2641 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2642 p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
2644 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2647 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
2648 struct ecore_ptt *p_ptt,
2649 struct ecore_vf_info *vf)
2651 struct ecore_queue_start_common_params params;
2652 struct ecore_queue_cid_vf_params vf_params;
2653 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2654 u8 status = PFVF_STATUS_NO_RESOURCE;
2655 struct ecore_vf_queue *p_queue;
2656 struct vfpf_start_txq_tlv *req;
2657 struct ecore_queue_cid *p_cid;
2658 struct ecore_sb_info sb_dummy;
2659 u8 qid_usage_idx, vf_legacy;
2661 enum _ecore_status_t rc;
2664 OSAL_MEMSET(¶ms, 0, sizeof(params));
2665 req = &mbx->req_virt->start_txq;
2667 if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2668 ECORE_IOV_VALIDATE_Q_NA) ||
2669 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2672 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2673 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2676 p_queue = &vf->vf_queues[req->tx_qid];
2677 if (p_queue->cids[qid_usage_idx].p_cid)
2680 vf_legacy = ecore_vf_calculate_legacy(vf);
2682 /* Acquire a new queue-cid */
2683 params.queue_id = p_queue->fw_tx_qid;
2684 params.vport_id = vf->vport_id;
2685 params.stats_id = vf->abs_vf_id + 0x10;
2687 /* Since IGU index is passed via sb_info, construct a dummy one */
2688 OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
2689 sb_dummy.igu_sb_id = req->hw_sb;
2690 params.p_sb = &sb_dummy;
2691 params.sb_idx = req->sb_index;
2693 OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2694 vf_params.vfid = vf->relative_vf_id;
2695 vf_params.vf_qid = (u8)req->tx_qid;
2696 vf_params.vf_legacy = vf_legacy;
2697 vf_params.qid_usage_idx = qid_usage_idx;
2699 p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2700 ¶ms, false, &vf_params);
2701 if (p_cid == OSAL_NULL)
2704 pq = ecore_get_cm_pq_idx_vf(p_hwfn,
2705 vf->relative_vf_id);
2706 rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
2707 req->pbl_addr, req->pbl_size, pq);
2708 if (rc != ECORE_SUCCESS) {
2709 status = PFVF_STATUS_FAILURE;
2710 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2712 status = PFVF_STATUS_SUCCESS;
2713 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2714 p_queue->cids[qid_usage_idx].b_is_tx = true;
2719 ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
2723 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
2724 struct ecore_vf_info *vf,
2727 bool cqe_completion)
2729 struct ecore_vf_queue *p_queue;
2730 enum _ecore_status_t rc = ECORE_SUCCESS;
2732 if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
2733 ECORE_IOV_VALIDATE_Q_NA)) {
2734 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2735 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2736 vf->relative_vf_id, rxq_id, qid_usage_idx);
2740 p_queue = &vf->vf_queues[rxq_id];
2742 /* We've validated the index and the existence of the active RXQ -
2743 * now we need to make sure that it's using the correct qid.
2745 if (!p_queue->cids[qid_usage_idx].p_cid ||
2746 p_queue->cids[qid_usage_idx].b_is_tx) {
2747 struct ecore_queue_cid *p_cid;
2749 p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
2750 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2751 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2752 vf->relative_vf_id, rxq_id, qid_usage_idx,
2753 rxq_id, p_cid->qid_usage_idx);
2757 /* Now that we know we have a valid Rx-queue - close it */
2758 rc = ecore_eth_rx_queue_stop(p_hwfn,
2759 p_queue->cids[qid_usage_idx].p_cid,
2760 false, cqe_completion);
2761 if (rc != ECORE_SUCCESS)
2764 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2765 vf->num_active_rxqs--;
2767 return ECORE_SUCCESS;
2770 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
2771 struct ecore_vf_info *vf,
2775 struct ecore_vf_queue *p_queue;
2776 enum _ecore_status_t rc = ECORE_SUCCESS;
2778 if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
2779 ECORE_IOV_VALIDATE_Q_NA))
2782 p_queue = &vf->vf_queues[txq_id];
2783 if (!p_queue->cids[qid_usage_idx].p_cid ||
2784 !p_queue->cids[qid_usage_idx].b_is_tx)
2787 rc = ecore_eth_tx_queue_stop(p_hwfn,
2788 p_queue->cids[qid_usage_idx].p_cid);
2789 if (rc != ECORE_SUCCESS)
2792 p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
2793 return ECORE_SUCCESS;
2796 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
2797 struct ecore_ptt *p_ptt,
2798 struct ecore_vf_info *vf)
2800 u16 length = sizeof(struct pfvf_def_resp_tlv);
2801 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2802 u8 status = PFVF_STATUS_FAILURE;
2803 struct vfpf_stop_rxqs_tlv *req;
2805 enum _ecore_status_t rc;
2807 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
2808 * would be one. Since no older ecore passed multiple queues
2809 * using this API, sanitize on the value.
2811 req = &mbx->req_virt->stop_rxqs;
2812 if (req->num_rxqs != 1) {
2813 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2814 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2815 vf->relative_vf_id);
2816 status = PFVF_STATUS_NOT_SUPPORTED;
2820 /* Find which qid-index is associated with the queue */
2821 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2822 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2825 rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2826 qid_usage_idx, req->cqe_completion);
2827 if (rc == ECORE_SUCCESS)
2828 status = PFVF_STATUS_SUCCESS;
2830 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2834 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
2835 struct ecore_ptt *p_ptt,
2836 struct ecore_vf_info *vf)
2838 u16 length = sizeof(struct pfvf_def_resp_tlv);
2839 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2840 u8 status = PFVF_STATUS_FAILURE;
2841 struct vfpf_stop_txqs_tlv *req;
2843 enum _ecore_status_t rc;
2845 /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
2846 * would be one. Since no older ecore passed multiple queues
2847 * using this API, sanitize on the value.
2849 req = &mbx->req_virt->stop_txqs;
2850 if (req->num_txqs != 1) {
2851 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2852 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2853 vf->relative_vf_id);
2854 status = PFVF_STATUS_NOT_SUPPORTED;
2858 /* Find which qid-index is associated with the queue */
2859 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
2860 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2863 rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
2865 if (rc == ECORE_SUCCESS)
2866 status = PFVF_STATUS_SUCCESS;
2869 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2873 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
2874 struct ecore_ptt *p_ptt,
2875 struct ecore_vf_info *vf)
2877 struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
2878 u16 length = sizeof(struct pfvf_def_resp_tlv);
2879 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2880 struct vfpf_update_rxq_tlv *req;
2881 u8 status = PFVF_STATUS_FAILURE;
2882 u8 complete_event_flg;
2883 u8 complete_cqe_flg;
2885 enum _ecore_status_t rc;
2888 req = &mbx->req_virt->update_rxq;
2889 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2890 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2892 qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
2893 if (qid_usage_idx == ECORE_IOV_QID_INVALID)
2896 /* Starting with the addition of CHANNEL_TLV_QID, this API started
2897 * expecting a single queue at a time. Validate this.
2899 if ((vf->acquire.vfdev_info.capabilities &
2900 VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
2901 req->num_rxqs != 1) {
2902 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2903 "VF[%d] supports QIDs but sends multiple queues\n",
2904 vf->relative_vf_id);
2908 /* Validate inputs - for the legacy case this is still true since
2909 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2911 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2912 if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
2913 ECORE_IOV_VALIDATE_Q_NA) ||
2914 !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2915 vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2916 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2917 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2918 vf->relative_vf_id, req->rx_qid,
2924 for (i = 0; i < req->num_rxqs; i++) {
2925 u16 qid = req->rx_qid + i;
2927 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2930 rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2934 ECORE_SPQ_MODE_EBLOCK,
2936 if (rc != ECORE_SUCCESS)
2939 status = PFVF_STATUS_SUCCESS;
2941 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2945 static enum _ecore_status_t
2946 ecore_iov_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn,
2947 struct ecore_ptt *p_ptt,
2948 struct ecore_vf_info *p_vf)
2950 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2951 struct ecore_sp_vport_update_params params;
2952 enum _ecore_status_t rc = ECORE_SUCCESS;
2953 struct vfpf_update_mtu_tlv *p_req;
2954 u8 status = PFVF_STATUS_SUCCESS;
2956 /* Valiate PF can send such a request */
2957 if (!p_vf->vport_instance) {
2958 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2959 "No VPORT instance available for VF[%d], failing MTU update\n",
2961 status = PFVF_STATUS_FAILURE;
2965 p_req = &mbx->req_virt->update_mtu;
2967 OSAL_MEMSET(¶ms, 0, sizeof(params));
2968 params.opaque_fid = p_vf->opaque_fid;
2969 params.vport_id = p_vf->vport_id;
2970 params.mtu = p_req->mtu;
2971 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
2975 status = PFVF_STATUS_FAILURE;
2977 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
2978 CHANNEL_TLV_UPDATE_MTU,
2979 sizeof(struct pfvf_def_resp_tlv),
2984 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
2985 void *p_tlvs_list, u16 req_type)
2987 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2991 if (!p_tlv->length) {
2992 DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
2996 if (p_tlv->type == req_type) {
2997 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2998 "Extended tlv type %s, length %d found\n",
2999 qede_ecore_channel_tlvs_string[p_tlv->type],
3004 len += p_tlv->length;
3005 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
3007 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
3008 DP_NOTICE(p_hwfn, true,
3009 "TLVs has overrun the buffer size\n");
3012 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
3018 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
3019 struct ecore_sp_vport_update_params *p_data,
3020 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
3022 struct vfpf_vport_update_activate_tlv *p_act_tlv;
3023 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
3025 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
3026 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3030 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
3031 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
3032 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
3033 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
3034 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
3038 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
3039 struct ecore_sp_vport_update_params *p_data,
3040 struct ecore_vf_info *p_vf,
3041 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
3043 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
3044 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
3046 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
3047 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3051 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
3053 /* Ignore the VF request if we're forcing a vlan */
3054 if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
3055 p_data->update_inner_vlan_removal_flg = 1;
3056 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
3059 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
3063 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
3064 struct ecore_sp_vport_update_params *p_data,
3065 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
3067 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
3068 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
3070 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
3071 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3072 if (!p_tx_switch_tlv)
3076 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
3077 DP_NOTICE(p_hwfn, false,
3078 "FPGA: Ignore tx-switching configuration originating"
3084 p_data->update_tx_switching_flg = 1;
3085 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
3086 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
3090 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
3091 struct ecore_sp_vport_update_params *p_data,
3092 struct ecore_iov_vf_mbx *p_mbx,
3095 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
3096 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
3098 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
3099 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3103 p_data->update_approx_mcast_flg = 1;
3104 OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
3105 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
3106 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
3110 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
3111 struct ecore_sp_vport_update_params *p_data,
3112 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
3114 struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
3115 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
3116 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
3118 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
3119 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3123 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
3124 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
3125 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
3126 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
3127 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
3131 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
3132 struct ecore_sp_vport_update_params *p_data,
3133 struct ecore_iov_vf_mbx *p_mbx,
3136 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
3137 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
3139 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
3140 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3141 if (!p_accept_any_vlan)
3144 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
3145 p_data->update_accept_any_vlan_flg =
3146 p_accept_any_vlan->update_accept_any_vlan_flg;
3147 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
3151 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
3152 struct ecore_vf_info *vf,
3153 struct ecore_sp_vport_update_params *p_data,
3154 struct ecore_rss_params *p_rss,
3155 struct ecore_iov_vf_mbx *p_mbx,
3156 u16 *tlvs_mask, u16 *tlvs_accepted)
3158 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
3159 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
3160 bool b_reject = false;
3164 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
3165 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3167 p_data->rss_params = OSAL_NULL;
3171 OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
3173 p_rss->update_rss_config =
3174 !!(p_rss_tlv->update_rss_flags &
3175 VFPF_UPDATE_RSS_CONFIG_FLAG);
3176 p_rss->update_rss_capabilities =
3177 !!(p_rss_tlv->update_rss_flags &
3178 VFPF_UPDATE_RSS_CAPS_FLAG);
3179 p_rss->update_rss_ind_table =
3180 !!(p_rss_tlv->update_rss_flags &
3181 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
3182 p_rss->update_rss_key =
3183 !!(p_rss_tlv->update_rss_flags &
3184 VFPF_UPDATE_RSS_KEY_FLAG);
3186 p_rss->rss_enable = p_rss_tlv->rss_enable;
3187 p_rss->rss_eng_id = vf->rss_eng_id;
3188 p_rss->rss_caps = p_rss_tlv->rss_caps;
3189 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
3190 OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
3191 sizeof(p_rss->rss_key));
3193 table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
3194 (1 << p_rss_tlv->rss_table_size_log));
3196 for (i = 0; i < table_size; i++) {
3197 struct ecore_queue_cid *p_cid;
3199 q_idx = p_rss_tlv->rss_ind_table[i];
3200 if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
3201 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3202 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3203 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
3204 vf->relative_vf_id, q_idx);
3209 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
3210 p_rss->rss_ind_table[i] = p_cid;
3213 p_data->rss_params = p_rss;
3215 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3217 *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
3221 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
3222 struct ecore_sp_vport_update_params *p_data,
3223 struct ecore_sge_tpa_params *p_sge_tpa,
3224 struct ecore_iov_vf_mbx *p_mbx,
3227 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
3228 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
3230 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
3231 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
3233 if (!p_sge_tpa_tlv) {
3234 p_data->sge_tpa_params = OSAL_NULL;
3238 OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
3240 p_sge_tpa->update_tpa_en_flg =
3241 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
3242 p_sge_tpa->update_tpa_param_flg =
3243 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
3244 VFPF_UPDATE_TPA_PARAM_FLAG);
3246 p_sge_tpa->tpa_ipv4_en_flg =
3247 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
3248 p_sge_tpa->tpa_ipv6_en_flg =
3249 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
3250 p_sge_tpa->tpa_pkt_split_flg =
3251 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
3252 p_sge_tpa->tpa_hdr_data_split_flg =
3253 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
3254 p_sge_tpa->tpa_gro_consistent_flg =
3255 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
3257 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
3258 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
3259 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
3260 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
3261 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
3263 p_data->sge_tpa_params = p_sge_tpa;
3265 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
3268 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
3269 struct ecore_ptt *p_ptt,
3270 struct ecore_vf_info *vf)
3272 struct ecore_rss_params *p_rss_params = OSAL_NULL;
3273 struct ecore_sp_vport_update_params params;
3274 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3275 struct ecore_sge_tpa_params sge_tpa_params;
3276 u16 tlvs_mask = 0, tlvs_accepted = 0;
3277 u8 status = PFVF_STATUS_SUCCESS;
3279 enum _ecore_status_t rc;
3281 /* Valiate PF can send such a request */
3282 if (!vf->vport_instance) {
3283 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3284 "No VPORT instance available for VF[%d],"
3285 " failing vport update\n",
3287 status = PFVF_STATUS_FAILURE;
3291 p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
3292 if (p_rss_params == OSAL_NULL) {
3293 status = PFVF_STATUS_FAILURE;
3297 OSAL_MEMSET(¶ms, 0, sizeof(params));
3298 params.opaque_fid = vf->opaque_fid;
3299 params.vport_id = vf->vport_id;
3300 params.rss_params = OSAL_NULL;
3302 /* Search for extended tlvs list and update values
3303 * from VF in struct ecore_sp_vport_update_params.
3305 ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3306 ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
3307 ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
3308 ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3309 ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
3310 ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
3311 ecore_iov_vp_update_sge_tpa_param(p_hwfn, ¶ms,
3312 &sge_tpa_params, mbx, &tlvs_mask);
3314 tlvs_accepted = tlvs_mask;
3316 /* Some of the extended TLVs need to be validated first; In that case,
3317 * they can update the mask without updating the accepted [so that
3318 * PF could communicate to VF it has rejected request].
3320 ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
3321 mbx, &tlvs_mask, &tlvs_accepted);
3323 /* Just log a message if there is no single extended tlv in buffer.
3324 * When all features of vport update ramrod would be requested by VF
3325 * as extended TLVs in buffer then an error can be returned in response
3326 * if there is no extended TLV present in buffer.
3328 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
3329 ¶ms, &tlvs_accepted) !=
3332 status = PFVF_STATUS_NOT_SUPPORTED;
3336 if (!tlvs_accepted) {
3338 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3339 "Upper-layer prevents said VF"
3340 " configuration\n");
3342 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3343 "No feature tlvs found for vport update\n");
3344 status = PFVF_STATUS_NOT_SUPPORTED;
3348 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
3352 status = PFVF_STATUS_FAILURE;
3355 OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
3356 length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3357 tlvs_mask, tlvs_accepted);
3358 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3361 static enum _ecore_status_t
3362 ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
3363 struct ecore_vf_info *p_vf,
3364 struct ecore_filter_ucast *p_params)
3368 /* First remove entries and then add new ones */
3369 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3370 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3371 if (p_vf->shadow_config.vlans[i].used &&
3372 p_vf->shadow_config.vlans[i].vid ==
3374 p_vf->shadow_config.vlans[i].used = false;
3377 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3378 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3379 "VF [%d] - Tries to remove a non-existing"
3381 p_vf->relative_vf_id);
3384 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3385 p_params->opcode == ECORE_FILTER_FLUSH) {
3386 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3387 p_vf->shadow_config.vlans[i].used = false;
3390 /* In forced mode, we're willing to remove entries - but we don't add
3393 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
3394 return ECORE_SUCCESS;
3396 if (p_params->opcode == ECORE_FILTER_ADD ||
3397 p_params->opcode == ECORE_FILTER_REPLACE) {
3398 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3399 if (p_vf->shadow_config.vlans[i].used)
3402 p_vf->shadow_config.vlans[i].used = true;
3403 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3407 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3408 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3409 "VF [%d] - Tries to configure more than %d"
3411 p_vf->relative_vf_id,
3412 ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
3417 return ECORE_SUCCESS;
3420 static enum _ecore_status_t
3421 ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
3422 struct ecore_vf_info *p_vf,
3423 struct ecore_filter_ucast *p_params)
3425 char empty_mac[ETH_ALEN];
3428 OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
3430 /* If we're in forced-mode, we don't allow any change */
3431 /* TODO - this would change if we were ever to implement logic for
3432 * removing a forced MAC altogether [in which case, like for vlans,
3433 * we should be able to re-trace previous configuration.
3435 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
3436 return ECORE_SUCCESS;
3438 /* Since we don't have the implementation of the logic for removing
3439 * a forced MAC and restoring shadow MAC, let's not worry about
3440 * processing shadow copies of MAC as long as VF trust mode is ON,
3441 * to keep things simple.
3443 if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
3444 p_vf->p_vf_info.is_trusted_configured)
3445 return ECORE_SUCCESS;
3447 /* First remove entries and then add new ones */
3448 if (p_params->opcode == ECORE_FILTER_REMOVE) {
3449 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3450 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3451 p_params->mac, ETH_ALEN)) {
3452 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
3458 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3459 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3460 "MAC isn't configured\n");
3463 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3464 p_params->opcode == ECORE_FILTER_FLUSH) {
3465 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
3466 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
3469 /* List the new MAC address */
3470 if (p_params->opcode != ECORE_FILTER_ADD &&
3471 p_params->opcode != ECORE_FILTER_REPLACE)
3472 return ECORE_SUCCESS;
3474 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3475 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3476 empty_mac, ETH_ALEN)) {
3477 OSAL_MEMCPY(p_vf->shadow_config.macs[i],
3478 p_params->mac, ETH_ALEN);
3479 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3480 "Added MAC at %d entry in shadow\n", i);
3485 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3486 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3487 "No available place for MAC\n");
3491 return ECORE_SUCCESS;
3494 static enum _ecore_status_t
3495 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
3496 struct ecore_vf_info *p_vf,
3497 struct ecore_filter_ucast *p_params)
3499 enum _ecore_status_t rc = ECORE_SUCCESS;
3501 if (p_params->type == ECORE_FILTER_MAC) {
3502 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3503 if (rc != ECORE_SUCCESS)
3507 if (p_params->type == ECORE_FILTER_VLAN)
3508 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3513 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
3514 struct ecore_ptt *p_ptt,
3515 struct ecore_vf_info *vf)
3517 struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3518 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3519 struct vfpf_ucast_filter_tlv *req;
3520 u8 status = PFVF_STATUS_SUCCESS;
3521 struct ecore_filter_ucast params;
3522 enum _ecore_status_t rc;
3524 /* Prepare the unicast filter params */
3525 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast));
3526 req = &mbx->req_virt->ucast_filter;
3527 params.opcode = (enum ecore_filter_opcode)req->opcode;
3528 params.type = (enum ecore_filter_ucast_type)req->type;
3530 /* @@@TBD - We might need logic on HV side in determining this */
3531 params.is_rx_filter = 1;
3532 params.is_tx_filter = 1;
3533 params.vport_to_remove_from = vf->vport_id;
3534 params.vport_to_add_to = vf->vport_id;
3535 OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
3536 params.vlan = req->vlan;
3538 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3539 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
3540 " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3541 vf->abs_vf_id, params.opcode, params.type,
3542 params.is_rx_filter ? "RX" : "",
3543 params.is_tx_filter ? "TX" : "",
3544 params.vport_to_add_to,
3545 params.mac[0], params.mac[1], params.mac[2],
3546 params.mac[3], params.mac[4], params.mac[5], params.vlan);
3548 if (!vf->vport_instance) {
3549 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3550 "No VPORT instance available for VF[%d],"
3551 " failing ucast MAC configuration\n",
3553 status = PFVF_STATUS_FAILURE;
3557 /* Update shadow copy of the VF configuration. In case shadow indicates
3558 * the action should be blocked return success to VF to imitate the
3559 * firmware behaviour in such case.
3561 if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) !=
3565 /* Determine if the unicast filtering is acceptible by PF */
3566 if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
3567 (params.type == ECORE_FILTER_VLAN ||
3568 params.type == ECORE_FILTER_MAC_VLAN)) {
3569 /* Once VLAN is forced or PVID is set, do not allow
3570 * to add/replace any further VLANs.
3572 if (params.opcode == ECORE_FILTER_ADD ||
3573 params.opcode == ECORE_FILTER_REPLACE)
3574 status = PFVF_STATUS_FORCED;
3578 if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
3579 (params.type == ECORE_FILTER_MAC ||
3580 params.type == ECORE_FILTER_MAC_VLAN)) {
3581 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
3582 (params.opcode != ECORE_FILTER_ADD &&
3583 params.opcode != ECORE_FILTER_REPLACE))
3584 status = PFVF_STATUS_FORCED;
3588 rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms);
3589 if (rc == ECORE_EXISTS) {
3591 } else if (rc == ECORE_INVAL) {
3592 status = PFVF_STATUS_FAILURE;
3596 rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
3597 ECORE_SPQ_MODE_CB, OSAL_NULL);
3599 status = PFVF_STATUS_FAILURE;
3602 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3603 sizeof(struct pfvf_def_resp_tlv), status);
3606 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
3607 struct ecore_ptt *p_ptt,
3608 struct ecore_vf_info *vf)
3613 for (i = 0; i < vf->num_sbs; i++)
3614 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3616 vf->opaque_fid, false);
3618 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3619 sizeof(struct pfvf_def_resp_tlv),
3620 PFVF_STATUS_SUCCESS);
3623 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
3624 struct ecore_ptt *p_ptt,
3625 struct ecore_vf_info *vf)
3627 u16 length = sizeof(struct pfvf_def_resp_tlv);
3628 u8 status = PFVF_STATUS_SUCCESS;
3630 /* Disable Interrupts for VF */
3631 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3633 /* Reset Permission table */
3634 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3636 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3640 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
3641 struct ecore_ptt *p_ptt,
3642 struct ecore_vf_info *p_vf)
3644 u16 length = sizeof(struct pfvf_def_resp_tlv);
3645 u8 status = PFVF_STATUS_SUCCESS;
3646 enum _ecore_status_t rc = ECORE_SUCCESS;
3648 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3650 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3651 /* Stopping the VF */
3652 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3655 if (rc != ECORE_SUCCESS) {
3656 DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
3658 status = PFVF_STATUS_FAILURE;
3661 p_vf->state = VF_STOPPED;
3664 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3668 static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
3669 struct ecore_ptt *p_ptt,
3670 struct ecore_vf_info *p_vf)
3672 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3673 struct pfvf_read_coal_resp_tlv *p_resp;
3674 struct vfpf_read_coal_req_tlv *req;
3675 u8 status = PFVF_STATUS_FAILURE;
3676 struct ecore_vf_queue *p_queue;
3677 struct ecore_queue_cid *p_cid;
3678 enum _ecore_status_t rc = ECORE_SUCCESS;
3679 u16 coal = 0, qid, i;
3682 mbx->offset = (u8 *)mbx->reply_virt;
3683 req = &mbx->req_virt->read_coal_req;
3686 b_is_rx = req->is_rx ? true : false;
3689 if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
3690 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3691 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3692 "VF[%d]: Invalid Rx queue_id = %d\n",
3693 p_vf->abs_vf_id, qid);
3697 p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
3698 rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3699 if (rc != ECORE_SUCCESS)
3702 if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
3703 ECORE_IOV_VALIDATE_Q_ENABLE)) {
3704 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3705 "VF[%d]: Invalid Tx queue_id = %d\n",
3706 p_vf->abs_vf_id, qid);
3709 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3710 p_queue = &p_vf->vf_queues[qid];
3711 if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
3712 (!p_queue->cids[i].b_is_tx))
3715 p_cid = p_queue->cids[i].p_cid;
3717 rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
3719 if (rc != ECORE_SUCCESS)
3725 status = PFVF_STATUS_SUCCESS;
3728 p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
3730 p_resp->coal = coal;
3732 ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
3733 sizeof(struct channel_list_end_tlv));
3735 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
3738 static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
3739 struct ecore_ptt *p_ptt,
3740 struct ecore_vf_info *vf)
3742 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3743 enum _ecore_status_t rc = ECORE_SUCCESS;
3744 struct vfpf_update_coalesce *req;
3745 u8 status = PFVF_STATUS_FAILURE;
3746 struct ecore_queue_cid *p_cid;
3747 u16 rx_coal, tx_coal;
3751 req = &mbx->req_virt->update_coalesce;
3753 rx_coal = req->rx_coal;
3754 tx_coal = req->tx_coal;
3757 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3758 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3760 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3761 vf->abs_vf_id, qid);
3765 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3766 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3768 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3769 vf->abs_vf_id, qid);
3773 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3774 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3775 vf->abs_vf_id, rx_coal, tx_coal, qid);
3778 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3780 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3781 if (rc != ECORE_SUCCESS) {
3782 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3783 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3784 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3787 vf->rx_coal = rx_coal;
3790 /* TODO - in future, it might be possible to pass this in a per-cid
3791 * granularity. For now, do this for all Tx queues.
3794 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3796 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3797 if (p_queue->cids[i].p_cid == OSAL_NULL)
3800 if (!p_queue->cids[i].b_is_tx)
3803 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3804 p_queue->cids[i].p_cid);
3805 if (rc != ECORE_SUCCESS) {
3806 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3807 "VF[%d]: Unable to set tx queue coalesce\n",
3812 vf->tx_coal = tx_coal;
3815 status = PFVF_STATUS_SUCCESS;
3817 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3818 sizeof(struct pfvf_def_resp_tlv), status);
3821 enum _ecore_status_t
3822 ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
3823 u16 rx_coal, u16 tx_coal,
3826 struct ecore_queue_cid *p_cid;
3827 struct ecore_vf_info *vf;
3828 struct ecore_ptt *p_ptt;
3832 if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
3833 DP_NOTICE(p_hwfn, true,
3834 "VF[%d] - Can not set coalescing: VF is not active\n",
3839 vf = &p_hwfn->pf_iov_info->vfs_array[vf_id];
3840 p_ptt = ecore_ptt_acquire(p_hwfn);
3844 if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3845 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3847 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3848 vf->abs_vf_id, qid);
3852 if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3853 ECORE_IOV_VALIDATE_Q_ENABLE) &&
3855 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3856 vf->abs_vf_id, qid);
3860 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3861 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3862 vf->abs_vf_id, rx_coal, tx_coal, qid);
3865 p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3867 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3868 if (rc != ECORE_SUCCESS) {
3869 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3870 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3871 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3874 vf->rx_coal = rx_coal;
3877 /* TODO - in future, it might be possible to pass this in a per-cid
3878 * granularity. For now, do this for all Tx queues.
3881 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3883 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3884 if (p_queue->cids[i].p_cid == OSAL_NULL)
3887 if (!p_queue->cids[i].b_is_tx)
3890 rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3891 p_queue->cids[i].p_cid);
3892 if (rc != ECORE_SUCCESS) {
3893 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3894 "VF[%d]: Unable to set tx queue coalesce\n",
3899 vf->tx_coal = tx_coal;
3903 ecore_ptt_release(p_hwfn, p_ptt);
3908 static enum _ecore_status_t
3909 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
3910 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3915 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
3917 for (cnt = 0; cnt < 50; cnt++) {
3918 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3923 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
3927 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3928 p_vf->abs_vf_id, val);
3929 return ECORE_TIMEOUT;
3932 return ECORE_SUCCESS;
3935 #define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS)
3937 static enum _ecore_status_t
3938 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
3939 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3941 u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp;
3942 u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port;
3943 u8 max_ports_per_engine = p_hwfn->p_dev->num_ports_in_engine;
3944 u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0;
3945 u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0;
3946 u8 port_id, tc, tc_id = 0, voq = 0;
3949 /* Read initial consumers & producers */
3950 for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
3951 /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
3952 for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) {
3953 tc_id = (tc < max_phys_tcs_per_port) ?
3956 voq = VOQ(port_id, tc_id, max_phys_tcs_per_port);
3957 cons[voq] = ecore_rd(p_hwfn, p_ptt,
3958 cons_voq0_addr + voq * 0x40);
3959 prod = ecore_rd(p_hwfn, p_ptt,
3960 prod_voq0_addr + voq * 0x40);
3961 distance[voq] = prod - cons[voq];
3965 /* Wait for consumers to pass the producers */
3968 for (cnt = 0; cnt < 50; cnt++) {
3969 for (; port_id < max_ports_per_engine; port_id++) {
3970 /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
3971 for (; tc < max_phys_tcs_per_port + 1; tc++) {
3972 tc_id = (tc < max_phys_tcs_per_port) ?
3975 voq = VOQ(port_id, tc_id,
3976 max_phys_tcs_per_port);
3977 tmp = ecore_rd(p_hwfn, p_ptt,
3978 cons_voq0_addr + voq * 0x40);
3979 if (distance[voq] > tmp - cons[voq])
3983 if (tc == max_phys_tcs_per_port + 1)
3989 if (port_id == max_ports_per_engine)
3997 "VF[%d] - pbf polling failed on VOQ %d [port_id %d, tc_id %d]\n",
3998 p_vf->abs_vf_id, voq, port_id, tc_id);
3999 return ECORE_TIMEOUT;
4002 return ECORE_SUCCESS;
4005 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
4006 struct ecore_vf_info *p_vf,
4007 struct ecore_ptt *p_ptt)
4009 enum _ecore_status_t rc;
4011 /* TODO - add SRC and TM polling once we add storage IOV */
4013 rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
4017 rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
4021 return ECORE_SUCCESS;
4024 static enum _ecore_status_t
4025 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
4026 struct ecore_ptt *p_ptt,
4027 u16 rel_vf_id, u32 *ack_vfs)
4029 struct ecore_vf_info *p_vf;
4030 enum _ecore_status_t rc = ECORE_SUCCESS;
4032 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
4034 return ECORE_SUCCESS;
4036 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
4037 (1ULL << (rel_vf_id % 64))) {
4038 u16 vfid = p_vf->abs_vf_id;
4040 /* TODO - should we lock channel? */
4042 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4043 "VF[%d] - Handling FLR\n", vfid);
4045 ecore_iov_vf_cleanup(p_hwfn, p_vf);
4047 /* If VF isn't active, no need for anything but SW */
4051 /* TODO - what to do in case of failure? */
4052 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
4053 if (rc != ECORE_SUCCESS)
4056 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
4058 /* TODO - what's now? What a mess.... */
4059 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
4063 /* Workaround to make VF-PF channel ready, as FW
4064 * doesn't do that as a part of FLR.
4067 GTT_BAR0_MAP_REG_USDM_RAM +
4068 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
4070 /* VF_STOPPED has to be set only after final cleanup
4071 * but prior to re-enabling the VF.
4073 p_vf->state = VF_STOPPED;
4075 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
4077 /* TODO - again, a mess... */
4078 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
4083 /* Mark VF for ack and clean pending state */
4084 if (p_vf->state == VF_RESET)
4085 p_vf->state = VF_STOPPED;
4086 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
4087 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
4088 ~(1ULL << (rel_vf_id % 64));
4089 p_vf->vf_mbx.b_pending_msg = false;
4095 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
4096 struct ecore_ptt *p_ptt)
4098 u32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
4099 enum _ecore_status_t rc = ECORE_SUCCESS;
4102 OSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
4104 /* Since BRB <-> PRS interface can't be tested as part of the flr
4105 * polling due to HW limitations, simply sleep a bit. And since
4106 * there's no need to wait per-vf, do it before looping.
4110 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
4111 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
4113 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
4117 enum _ecore_status_t
4118 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
4119 struct ecore_ptt *p_ptt, u16 rel_vf_id)
4121 u32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
4122 enum _ecore_status_t rc = ECORE_SUCCESS;
4124 OSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
4126 /* Wait instead of polling the BRB <-> PRS interface */
4129 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
4131 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
4135 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
4140 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
4142 for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++)
4143 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4144 "[%08x,...,%08x]: %08x\n",
4145 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
4147 if (!p_hwfn->p_dev->p_iov_info) {
4148 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
4153 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
4154 struct ecore_vf_info *p_vf;
4157 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
4161 vfid = p_vf->abs_vf_id;
4162 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
4163 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
4164 u16 rel_vf_id = p_vf->relative_vf_id;
4166 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4167 "VF[%d] [rel %d] got FLR-ed\n",
4170 p_vf->state = VF_RESET;
4172 /* No need to lock here, since pending_flr should
4173 * only change here and before ACKing MFw. Since
4174 * MFW will not trigger an additional attention for
4175 * VF flr until ACKs, we're safe.
4177 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
4185 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
4187 struct ecore_mcp_link_params *p_params,
4188 struct ecore_mcp_link_state *p_link,
4189 struct ecore_mcp_link_capabilities *p_caps)
4191 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
4192 struct ecore_bulletin_content *p_bulletin;
4197 p_bulletin = p_vf->bulletin.p_virt;
4200 __ecore_vf_get_link_params(p_params, p_bulletin);
4202 __ecore_vf_get_link_state(p_link, p_bulletin);
4204 __ecore_vf_get_link_caps(p_caps, p_bulletin);
4207 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
4208 struct ecore_ptt *p_ptt, int vfid)
4210 struct ecore_iov_vf_mbx *mbx;
4211 struct ecore_vf_info *p_vf;
4213 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4217 mbx = &p_vf->vf_mbx;
4219 /* ecore_iov_process_mbx_request */
4220 #ifndef CONFIG_ECORE_SW_CHANNEL
4221 if (!mbx->b_pending_msg) {
4222 DP_NOTICE(p_hwfn, true,
4223 "VF[%02x]: Trying to process mailbox message when none is pending\n",
4227 mbx->b_pending_msg = false;
4230 mbx->first_tlv = mbx->req_virt->first_tlv;
4232 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4233 "VF[%02x]: Processing mailbox message [type %04x]\n",
4234 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4236 OSAL_IOV_VF_MSG_TYPE(p_hwfn,
4237 p_vf->relative_vf_id,
4238 mbx->first_tlv.tl.type);
4240 /* Lock the per vf op mutex and note the locker's identity.
4241 * The unlock will take place in mbx response.
4243 ecore_iov_lock_vf_pf_channel(p_hwfn,
4244 p_vf, mbx->first_tlv.tl.type);
4246 /* check if tlv type is known */
4247 if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
4248 !p_vf->b_malicious) {
4249 /* switch on the opcode */
4250 switch (mbx->first_tlv.tl.type) {
4251 case CHANNEL_TLV_ACQUIRE:
4252 ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
4254 case CHANNEL_TLV_VPORT_START:
4255 ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
4257 case CHANNEL_TLV_VPORT_TEARDOWN:
4258 ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
4260 case CHANNEL_TLV_START_RXQ:
4261 ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
4263 case CHANNEL_TLV_START_TXQ:
4264 ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
4266 case CHANNEL_TLV_STOP_RXQS:
4267 ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
4269 case CHANNEL_TLV_STOP_TXQS:
4270 ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
4272 case CHANNEL_TLV_UPDATE_RXQ:
4273 ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
4275 case CHANNEL_TLV_VPORT_UPDATE:
4276 ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
4278 case CHANNEL_TLV_UCAST_FILTER:
4279 ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
4281 case CHANNEL_TLV_CLOSE:
4282 ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
4284 case CHANNEL_TLV_INT_CLEANUP:
4285 ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
4287 case CHANNEL_TLV_RELEASE:
4288 ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
4290 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
4291 ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
4293 case CHANNEL_TLV_COALESCE_UPDATE:
4294 ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
4296 case CHANNEL_TLV_COALESCE_READ:
4297 ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
4299 case CHANNEL_TLV_UPDATE_MTU:
4300 ecore_iov_vf_pf_update_mtu(p_hwfn, p_ptt, p_vf);
4303 } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
4304 /* If we've received a message from a VF we consider malicious
4305 * we ignore the messasge unless it's one for RELEASE, in which
4306 * case we'll let it have the benefit of doubt, allowing the
4307 * next loaded driver to start again.
4309 if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
4310 /* TODO - initiate FLR, remove malicious indication */
4311 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4312 "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
4315 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4316 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
4317 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
4320 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4321 mbx->first_tlv.tl.type,
4322 sizeof(struct pfvf_def_resp_tlv),
4323 PFVF_STATUS_MALICIOUS);
4325 /* unknown TLV - this may belong to a VF driver from the future
4326 * - a version written after this PF driver was written, which
4327 * supports features unknown as of yet. Too bad since we don't
4328 * support them. Or this may be because someone wrote a crappy
4329 * VF driver and is sending garbage over the channel.
4331 DP_NOTICE(p_hwfn, false,
4332 "VF[%02x]: unknown TLV. type %04x length %04x"
4333 " padding %08x reply address %lu\n",
4335 mbx->first_tlv.tl.type,
4336 mbx->first_tlv.tl.length,
4337 mbx->first_tlv.padding,
4338 (unsigned long)mbx->first_tlv.reply_address);
4340 /* Try replying in case reply address matches the acquisition's
4343 if (p_vf->acquire.first_tlv.reply_address &&
4344 (mbx->first_tlv.reply_address ==
4345 p_vf->acquire.first_tlv.reply_address))
4346 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
4347 mbx->first_tlv.tl.type,
4348 sizeof(struct pfvf_def_resp_tlv),
4349 PFVF_STATUS_NOT_SUPPORTED);
4351 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4352 "VF[%02x]: Can't respond to TLV -"
4353 " no valid reply address\n",
4357 ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
4358 mbx->first_tlv.tl.type);
4360 #ifdef CONFIG_ECORE_SW_CHANNEL
4361 mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
4362 mbx->sw_mbx.response_offset = 0;
4366 void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
4371 OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
4373 ecore_for_each_vf(p_hwfn, i) {
4374 struct ecore_vf_info *p_vf;
4376 p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
4377 if (p_vf->vf_mbx.b_pending_msg)
4378 events[i / 64] |= 1ULL << (i % 64);
4382 static struct ecore_vf_info *
4383 ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
4385 u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
4387 if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
4388 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4389 "Got indication for VF [abs 0x%08x] that cannot be"
4395 return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
4398 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
4400 struct regpair *vf_msg)
4402 struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
4406 return ECORE_SUCCESS;
4408 /* List the physical address of the request so that handler
4409 * could later on copy the message from it.
4411 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
4413 p_vf->vf_mbx.b_pending_msg = true;
4415 return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
4418 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
4419 struct malicious_vf_eqe_data *p_data)
4421 struct ecore_vf_info *p_vf;
4423 p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
4428 if (!p_vf->b_malicious) {
4429 DP_NOTICE(p_hwfn, false,
4430 "VF [%d] - Malicious behavior [%02x]\n",
4431 p_vf->abs_vf_id, p_data->err_id);
4433 p_vf->b_malicious = true;
4436 "VF [%d] - Malicious behavior [%02x]\n",
4437 p_vf->abs_vf_id, p_data->err_id);
4440 OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
4443 static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
4446 union event_ring_data *data,
4447 u8 OSAL_UNUSED fw_return_code)
4450 case COMMON_EVENT_VF_PF_CHANNEL:
4451 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
4452 &data->vf_pf_channel.msg_addr);
4453 case COMMON_EVENT_VF_FLR:
4454 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4455 "VF-FLR is still not supported\n");
4456 return ECORE_SUCCESS;
4457 case COMMON_EVENT_MALICIOUS_VF:
4458 ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4459 return ECORE_SUCCESS;
4461 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
4467 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4469 return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
4470 (1ULL << (rel_vf_id % 64)));
4473 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4475 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
4481 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4482 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4486 return MAX_NUM_VFS_K2;
4489 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
4490 struct ecore_ptt *ptt, int vfid)
4492 struct dmae_params params;
4493 struct ecore_vf_info *vf_info;
4495 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4499 OSAL_MEMSET(¶ms, 0, sizeof(params));
4500 SET_FIELD(params.flags, DMAE_PARAMS_SRC_VF_VALID, 0x1);
4501 SET_FIELD(params.flags, DMAE_PARAMS_COMPLETION_DST, 0x1);
4502 params.src_vf_id = vf_info->abs_vf_id;
4504 if (ecore_dmae_host2host(p_hwfn, ptt,
4505 vf_info->vf_mbx.pending_req,
4506 vf_info->vf_mbx.req_phys,
4507 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
4508 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4509 "Failed to copy message from VF 0x%02x\n", vfid);
4514 return ECORE_SUCCESS;
4517 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
4520 struct ecore_vf_info *vf_info;
4523 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4525 DP_NOTICE(p_hwfn->p_dev, true,
4526 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4529 if (vf_info->b_malicious) {
4530 DP_NOTICE(p_hwfn->p_dev, false,
4531 "Can't set forced MAC to malicious VF [%d]\n",
4536 if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
4537 vf_info->p_vf_info.is_trusted_configured) {
4538 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
4539 /* Trust mode will disable Forced MAC */
4540 vf_info->bulletin.p_virt->valid_bitmap &=
4541 ~(1 << MAC_ADDR_FORCED);
4543 feature = 1 << MAC_ADDR_FORCED;
4544 /* Forced MAC will disable MAC_ADDR */
4545 vf_info->bulletin.p_virt->valid_bitmap &=
4546 ~(1 << VFPF_BULLETIN_MAC_ADDR);
4549 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac,
4552 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4554 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4557 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
4560 struct ecore_vf_info *vf_info;
4563 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4565 DP_NOTICE(p_hwfn->p_dev, true,
4566 "Can not set MAC, invalid vfid [%d]\n", vfid);
4569 if (vf_info->b_malicious) {
4570 DP_NOTICE(p_hwfn->p_dev, false,
4571 "Can't set MAC to malicious VF [%d]\n",
4576 if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
4577 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4578 "Can not set MAC, Forced MAC is configured\n");
4582 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
4583 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4585 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4587 if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
4588 vf_info->p_vf_info.is_trusted_configured)
4589 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4591 return ECORE_SUCCESS;
4594 #ifndef LINUX_REMOVE
4595 enum _ecore_status_t
4596 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
4597 bool b_untagged_only, int vfid)
4599 struct ecore_vf_info *vf_info;
4602 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4604 DP_NOTICE(p_hwfn->p_dev, true,
4605 "Can not set untagged default, invalid vfid [%d]\n",
4609 if (vf_info->b_malicious) {
4610 DP_NOTICE(p_hwfn->p_dev, false,
4611 "Can't set untagged default to malicious VF [%d]\n",
4616 /* Since this is configurable only during vport-start, don't take it
4617 * if we're past that point.
4619 if (vf_info->state == VF_ENABLED) {
4620 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4621 "Can't support untagged change for vfid[%d] -"
4622 " VF is already active\n",
4627 /* Set configuration; This will later be taken into account during the
4628 * VF initialization.
4630 feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
4631 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
4632 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4634 vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
4637 return ECORE_SUCCESS;
4640 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
4643 struct ecore_vf_info *vf_info;
4645 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4649 *opaque_fid = vf_info->opaque_fid;
4653 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
4656 struct ecore_vf_info *vf_info;
4659 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4661 DP_NOTICE(p_hwfn->p_dev, true,
4662 "Can not set forced MAC, invalid vfid [%d]\n",
4666 if (vf_info->b_malicious) {
4667 DP_NOTICE(p_hwfn->p_dev, false,
4668 "Can't set forced vlan to malicious VF [%d]\n",
4673 feature = 1 << VLAN_ADDR_FORCED;
4674 vf_info->bulletin.p_virt->pvid = pvid;
4676 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4678 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4680 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4683 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
4684 int vfid, u16 vxlan_port, u16 geneve_port)
4686 struct ecore_vf_info *vf_info;
4688 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4690 DP_NOTICE(p_hwfn->p_dev, true,
4691 "Can not set udp ports, invalid vfid [%d]\n", vfid);
4695 if (vf_info->b_malicious) {
4696 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4697 "Can not set udp ports to malicious VF [%d]\n",
4702 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4703 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4706 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
4708 struct ecore_vf_info *p_vf_info;
4710 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4714 return !!p_vf_info->vport_instance;
4717 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
4719 struct ecore_vf_info *p_vf_info;
4721 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4725 return p_vf_info->state == VF_STOPPED;
4728 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
4730 struct ecore_vf_info *vf_info;
4732 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4736 return vf_info->spoof_chk;
4739 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
4742 struct ecore_vf_info *vf;
4743 enum _ecore_status_t rc = ECORE_INVAL;
4745 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4746 DP_NOTICE(p_hwfn, true,
4747 "SR-IOV sanity check failed, can't set spoofchk\n");
4751 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4755 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4756 /* After VF VPORT start PF will configure spoof check */
4757 vf->req_spoofchk_val = val;
4762 rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
4768 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
4770 u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
4772 max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
4773 : ECORE_MAX_VF_CHAINS_PER_PF;
4775 return max_chains_per_vf;
4778 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4780 void **pp_req_virt_addr,
4781 u16 *p_req_virt_size)
4783 struct ecore_vf_info *vf_info =
4784 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4789 if (pp_req_virt_addr)
4790 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
4792 if (p_req_virt_size)
4793 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
4796 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4798 void **pp_reply_virt_addr,
4799 u16 *p_reply_virt_size)
4801 struct ecore_vf_info *vf_info =
4802 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4807 if (pp_reply_virt_addr)
4808 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
4810 if (p_reply_virt_size)
4811 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
4814 #ifdef CONFIG_ECORE_SW_CHANNEL
4815 struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
4818 struct ecore_vf_info *vf_info =
4819 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4824 return &vf_info->vf_mbx.sw_mbx;
4828 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
4830 return (length >= sizeof(struct vfpf_first_tlv) &&
4831 (length <= sizeof(union vfpf_tlvs)));
4834 u32 ecore_iov_pfvf_msg_length(void)
4836 return sizeof(union pfvf_tlvs);
4839 u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn,
4842 struct ecore_vf_info *p_vf;
4844 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4845 if (!p_vf || !p_vf->bulletin.p_virt)
4848 if (!(p_vf->bulletin.p_virt->valid_bitmap &
4849 (1 << VFPF_BULLETIN_MAC_ADDR)))
4852 return p_vf->bulletin.p_virt->mac;
4855 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4857 struct ecore_vf_info *p_vf;
4859 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4860 if (!p_vf || !p_vf->bulletin.p_virt)
4863 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
4866 return p_vf->bulletin.p_virt->mac;
4869 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
4872 struct ecore_vf_info *p_vf;
4874 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4875 if (!p_vf || !p_vf->bulletin.p_virt)
4878 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
4881 return p_vf->bulletin.p_virt->pvid;
4884 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
4885 struct ecore_ptt *p_ptt,
4888 struct ecore_vf_info *vf;
4891 enum _ecore_status_t rc;
4893 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4898 rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4899 if (rc != ECORE_SUCCESS)
4902 rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
4903 return ecore_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
4906 enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
4909 struct ecore_vf_info *vf;
4912 for_each_hwfn(p_dev, i) {
4913 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
4915 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4916 DP_NOTICE(p_hwfn, true,
4917 "SR-IOV sanity check failed, can't set min rate\n");
4922 vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
4924 DP_NOTICE(p_dev, true,
4925 "Getting vf info failed, can't set min rate\n");
4929 return ecore_configure_vport_wfq(p_dev, vf->vport_id, rate);
4932 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
4933 struct ecore_ptt *p_ptt,
4935 struct ecore_eth_stats *p_stats)
4937 struct ecore_vf_info *vf;
4939 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4943 if (vf->state != VF_ENABLED)
4946 __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
4947 vf->abs_vf_id + 0x10, false);
4949 return ECORE_SUCCESS;
4952 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4954 struct ecore_vf_info *p_vf;
4956 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4960 return p_vf->num_rxqs;
4963 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4965 struct ecore_vf_info *p_vf;
4967 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4971 return p_vf->num_active_rxqs;
4974 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4976 struct ecore_vf_info *p_vf;
4978 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4985 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4987 struct ecore_vf_info *p_vf;
4989 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4993 return p_vf->num_sbs;
4996 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4998 struct ecore_vf_info *p_vf;
5000 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
5004 return (p_vf->state == VF_FREE);
5007 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
5010 struct ecore_vf_info *p_vf;
5012 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
5016 return (p_vf->state == VF_ACQUIRED);
5019 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
5021 struct ecore_vf_info *p_vf;
5023 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
5027 return (p_vf->state == VF_ENABLED);
5030 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
5033 struct ecore_vf_info *p_vf;
5035 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
5039 return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
5043 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
5045 struct ecore_wfq_data *vf_vp_wfq;
5046 struct ecore_vf_info *vf_info;
5048 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
5052 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
5054 if (vf_vp_wfq->configured)
5055 return vf_vp_wfq->min_speed;
5060 #ifdef CONFIG_ECORE_SW_CHANNEL
5061 void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
5064 struct ecore_vf_info *vf_info;
5066 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
5070 vf_info->b_hw_channel = b_is_hw;