1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2020 Marvell.
7 #include "qede_sriov.h"
9 static void qed_sriov_enable_qid_config(struct ecore_hwfn *hwfn,
11 struct ecore_iov_vf_init_params *params)
13 u16 num_pf_l2_queues, base, i;
15 /* Since we have an equal resource distribution per-VF, and we assume
16 * PF has acquired its first queues, we start setting sequentially from
19 num_pf_l2_queues = (u16)FEAT_NUM(hwfn, ECORE_PF_L2_QUE);
21 base = num_pf_l2_queues + vfid * params->num_queues;
22 params->rel_vf_id = vfid;
24 for (i = 0; i < params->num_queues; i++) {
25 params->req_rx_queue[i] = base + i;
26 params->req_tx_queue[i] = base + i;
29 /* PF uses indices 0 for itself; Set vport/RSS afterwards */
30 params->vport_id = vfid + 1;
31 params->rss_eng_id = vfid + 1;
34 static void qed_sriov_enable(struct ecore_dev *edev, int num)
36 struct ecore_iov_vf_init_params params;
37 struct ecore_hwfn *p_hwfn;
38 struct ecore_ptt *p_ptt;
41 if ((u32)num >= RESC_NUM(&edev->hwfns[0], ECORE_VPORT)) {
42 DP_NOTICE(edev, false, "Can start at most %d VFs\n",
43 RESC_NUM(&edev->hwfns[0], ECORE_VPORT) - 1);
47 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_iov_vf_init_params));
49 for_each_hwfn(edev, j) {
52 p_hwfn = &edev->hwfns[j];
53 p_ptt = ecore_ptt_acquire(p_hwfn);
54 feat_num = FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) / num;
56 params.num_queues = OSAL_MIN_T(int, feat_num, 16);
58 for (i = 0; i < num; i++) {
59 if (!ecore_iov_is_valid_vfid(p_hwfn, i, false, true))
62 qed_sriov_enable_qid_config(p_hwfn, i, ¶ms);
64 rc = ecore_iov_init_hw_for_vf(p_hwfn, p_ptt, ¶ms);
66 DP_ERR(edev, "Failed to enable VF[%d]\n", i);
67 ecore_ptt_release(p_hwfn, p_ptt);
72 ecore_ptt_release(p_hwfn, p_ptt);
76 void qed_sriov_configure(struct ecore_dev *edev, int num_vfs_param)
78 if (!IS_ECORE_SRIOV(edev)) {
79 DP_VERBOSE(edev, ECORE_MSG_IOV, "SR-IOV is not supported\n");
84 qed_sriov_enable(edev, num_vfs_param);