1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2020 Marvell.
9 #include "base/bcm_osal.h"
10 #include "base/ecore.h"
11 #include "base/ecore_sriov.h"
12 #include "base/ecore_mcp.h"
13 #include "base/ecore_vf.h"
15 #include "qede_sriov.h"
17 static void qed_sriov_enable_qid_config(struct ecore_hwfn *hwfn,
19 struct ecore_iov_vf_init_params *params)
21 u16 num_pf_l2_queues, base, i;
23 /* Since we have an equal resource distribution per-VF, and we assume
24 * PF has acquired its first queues, we start setting sequentially from
27 num_pf_l2_queues = (u16)FEAT_NUM(hwfn, ECORE_PF_L2_QUE);
29 base = num_pf_l2_queues + vfid * params->num_queues;
30 params->rel_vf_id = vfid;
32 for (i = 0; i < params->num_queues; i++) {
33 params->req_rx_queue[i] = base + i;
34 params->req_tx_queue[i] = base + i;
37 /* PF uses indices 0 for itself; Set vport/RSS afterwards */
38 params->vport_id = vfid + 1;
39 params->rss_eng_id = vfid + 1;
42 static void qed_sriov_enable(struct ecore_dev *edev, int num)
44 struct ecore_iov_vf_init_params params;
45 struct ecore_hwfn *p_hwfn;
46 struct ecore_ptt *p_ptt;
49 if ((u32)num >= RESC_NUM(&edev->hwfns[0], ECORE_VPORT)) {
50 DP_NOTICE(edev, false, "Can start at most %d VFs\n",
51 RESC_NUM(&edev->hwfns[0], ECORE_VPORT) - 1);
55 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_iov_vf_init_params));
57 for_each_hwfn(edev, j) {
60 p_hwfn = &edev->hwfns[j];
61 p_ptt = ecore_ptt_acquire(p_hwfn);
62 feat_num = FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) / num;
64 params.num_queues = OSAL_MIN_T(int, feat_num, 16);
66 for (i = 0; i < num; i++) {
67 if (!ecore_iov_is_valid_vfid(p_hwfn, i, false, true))
70 qed_sriov_enable_qid_config(p_hwfn, i, ¶ms);
72 rc = ecore_iov_init_hw_for_vf(p_hwfn, p_ptt, ¶ms);
74 DP_ERR(edev, "Failed to enable VF[%d]\n", i);
75 ecore_ptt_release(p_hwfn, p_ptt);
80 ecore_ptt_release(p_hwfn, p_ptt);
84 void qed_sriov_configure(struct ecore_dev *edev, int num_vfs_param)
86 if (!IS_ECORE_SRIOV(edev)) {
87 DP_VERBOSE(edev, ECORE_MSG_IOV, "SR-IOV is not supported\n");
92 qed_sriov_enable(edev, num_vfs_param);
95 static void qed_handle_vf_msg(struct ecore_hwfn *hwfn)
97 u64 events[ECORE_VF_ARRAY_LENGTH];
98 struct ecore_ptt *ptt;
101 ptt = ecore_ptt_acquire(hwfn);
103 DP_NOTICE(hwfn, true, "PTT acquire failed\n");
104 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
108 ecore_iov_pf_get_pending_events(hwfn, events);
110 ecore_for_each_vf(hwfn, i) {
111 /* Skip VFs with no pending messages */
112 if (!ECORE_VF_ARRAY_GET_VFID(events, i))
115 DP_VERBOSE(hwfn, ECORE_MSG_IOV,
116 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
117 i, hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
119 /* Copy VF's message to PF's request buffer for that VF */
120 if (ecore_iov_copy_vf_msg(hwfn, ptt, i))
123 ecore_iov_process_mbx_req(hwfn, ptt, i);
126 ecore_ptt_release(hwfn, ptt);
129 static void qed_handle_bulletin_post(struct ecore_hwfn *hwfn)
131 struct ecore_ptt *ptt;
134 ptt = ecore_ptt_acquire(hwfn);
136 DP_NOTICE(hwfn, true, "PTT acquire failed\n");
137 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
141 /* TODO - at the moment update bulletin board of all VFs.
142 * if this proves to costly, we can mark VFs that need their
145 ecore_for_each_vf(hwfn, i)
146 ecore_iov_post_vf_bulletin(hwfn, i, ptt);
148 ecore_ptt_release(hwfn, ptt);
151 void qed_iov_pf_task(void *arg)
153 struct ecore_hwfn *p_hwfn = arg;
156 if (OSAL_GET_BIT(QED_IOV_WQ_MSG_FLAG, &p_hwfn->iov_task_flags)) {
157 OSAL_CLEAR_BIT(QED_IOV_WQ_MSG_FLAG, &p_hwfn->iov_task_flags);
158 qed_handle_vf_msg(p_hwfn);
161 if (OSAL_GET_BIT(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
162 &p_hwfn->iov_task_flags)) {
163 OSAL_CLEAR_BIT(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
164 &p_hwfn->iov_task_flags);
165 qed_handle_bulletin_post(p_hwfn);
168 if (OSAL_GET_BIT(QED_IOV_WQ_FLR_FLAG, &p_hwfn->iov_task_flags)) {
169 struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
171 OSAL_CLEAR_BIT(QED_IOV_WQ_FLR_FLAG, &p_hwfn->iov_task_flags);
174 qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
178 rc = ecore_iov_vf_flr_cleanup(p_hwfn, p_ptt);
180 qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
182 ecore_ptt_release(p_hwfn, p_ptt);
186 int qed_schedule_iov(struct ecore_hwfn *p_hwfn, enum qed_iov_wq_flag flag)
188 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Scheduling iov task [Flag: %d]\n",
191 OSAL_SET_BIT(flag, &p_hwfn->iov_task_flags);
192 return rte_eal_alarm_set(1, qed_iov_pf_task, p_hwfn);
195 void qed_inform_vf_link_state(struct ecore_hwfn *hwfn)
197 struct ecore_hwfn *lead_hwfn = ECORE_LEADING_HWFN(hwfn->p_dev);
198 struct ecore_mcp_link_capabilities caps;
199 struct ecore_mcp_link_params params;
200 struct ecore_mcp_link_state link;
203 if (!hwfn->pf_iov_info)
206 rte_memcpy(¶ms, ecore_mcp_get_link_params(lead_hwfn),
208 rte_memcpy(&link, ecore_mcp_get_link_state(lead_hwfn), sizeof(link));
209 rte_memcpy(&caps, ecore_mcp_get_link_capabilities(lead_hwfn),
212 /* Update bulletin of all future possible VFs with link configuration */
213 for (i = 0; i < hwfn->p_dev->p_iov_info->total_vfs; i++) {
214 ecore_iov_set_link(hwfn, i,
215 ¶ms, &link, &caps);
218 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);