net/mlx5: fix RSS expansion for patterns with ICMP item
[dpdk.git] / drivers / net / qede / qede_sriov.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2020 Marvell.
3  * All rights reserved.
4  * www.marvell.com
5  */
6
7 #include <rte_alarm.h>
8
9 #include "base/bcm_osal.h"
10 #include "base/ecore.h"
11 #include "base/ecore_sriov.h"
12 #include "base/ecore_mcp.h"
13 #include "base/ecore_vf.h"
14
15 #include "qede_sriov.h"
16
17 static void qed_sriov_enable_qid_config(struct ecore_hwfn *hwfn,
18                                         u16 vfid,
19                                         struct ecore_iov_vf_init_params *params)
20 {
21         u16 num_pf_l2_queues, base, i;
22
23         /* Since we have an equal resource distribution per-VF, and we assume
24          * PF has acquired its first queues, we start setting sequentially from
25          * there.
26          */
27         num_pf_l2_queues = (u16)FEAT_NUM(hwfn, ECORE_PF_L2_QUE);
28
29         base = num_pf_l2_queues + vfid * params->num_queues;
30         params->rel_vf_id = vfid;
31
32         for (i = 0; i < params->num_queues; i++) {
33                 params->req_rx_queue[i] = base + i;
34                 params->req_tx_queue[i] = base + i;
35         }
36
37         /* PF uses indices 0 for itself; Set vport/RSS afterwards */
38         params->vport_id = vfid + 1;
39         params->rss_eng_id = vfid + 1;
40 }
41
42 static void qed_sriov_enable(struct ecore_dev *edev, int num)
43 {
44         struct ecore_iov_vf_init_params params;
45         struct ecore_hwfn *p_hwfn;
46         struct ecore_ptt *p_ptt;
47         int i, j, rc;
48
49         if ((u32)num >= RESC_NUM(&edev->hwfns[0], ECORE_VPORT)) {
50                 DP_NOTICE(edev, false, "Can start at most %d VFs\n",
51                           RESC_NUM(&edev->hwfns[0], ECORE_VPORT) - 1);
52                 return;
53         }
54
55         OSAL_MEMSET(&params, 0, sizeof(struct ecore_iov_vf_init_params));
56
57         for_each_hwfn(edev, j) {
58                 int feat_num;
59
60                 p_hwfn = &edev->hwfns[j];
61                 p_ptt = ecore_ptt_acquire(p_hwfn);
62                 feat_num = FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) / num;
63
64                 params.num_queues = OSAL_MIN_T(int, feat_num, 16);
65
66                 for (i = 0; i < num; i++) {
67                         if (!ecore_iov_is_valid_vfid(p_hwfn, i, false, true))
68                                 continue;
69
70                         qed_sriov_enable_qid_config(p_hwfn, i, &params);
71
72                         rc = ecore_iov_init_hw_for_vf(p_hwfn, p_ptt, &params);
73                         if (rc) {
74                                 DP_ERR(edev, "Failed to enable VF[%d]\n", i);
75                                 ecore_ptt_release(p_hwfn, p_ptt);
76                                 return;
77                         }
78                 }
79
80                 ecore_ptt_release(p_hwfn, p_ptt);
81         }
82 }
83
84 void qed_sriov_configure(struct ecore_dev *edev, int num_vfs_param)
85 {
86         if (!IS_ECORE_SRIOV(edev)) {
87                 DP_VERBOSE(edev, ECORE_MSG_IOV, "SR-IOV is not supported\n");
88                 return;
89         }
90
91         if (num_vfs_param)
92                 qed_sriov_enable(edev, num_vfs_param);
93 }
94
95 static void qed_handle_vf_msg(struct ecore_hwfn *hwfn)
96 {
97         u64 events[ECORE_VF_ARRAY_LENGTH];
98         struct ecore_ptt *ptt;
99         int i;
100
101         ptt = ecore_ptt_acquire(hwfn);
102         if (!ptt) {
103                 DP_NOTICE(hwfn, true, "PTT acquire failed\n");
104                 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
105                 return;
106         }
107
108         ecore_iov_pf_get_pending_events(hwfn, events);
109
110         ecore_for_each_vf(hwfn, i) {
111                 /* Skip VFs with no pending messages */
112                 if (!ECORE_VF_ARRAY_GET_VFID(events, i))
113                         continue;
114
115                 DP_VERBOSE(hwfn, ECORE_MSG_IOV,
116                            "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
117                            i, hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
118
119                 /* Copy VF's message to PF's request buffer for that VF */
120                 if (ecore_iov_copy_vf_msg(hwfn, ptt, i))
121                         continue;
122
123                 ecore_iov_process_mbx_req(hwfn, ptt, i);
124         }
125
126         ecore_ptt_release(hwfn, ptt);
127 }
128
129 static void qed_handle_bulletin_post(struct ecore_hwfn *hwfn)
130 {
131         struct ecore_ptt *ptt;
132         int i;
133
134         ptt = ecore_ptt_acquire(hwfn);
135         if (!ptt) {
136                 DP_NOTICE(hwfn, true, "PTT acquire failed\n");
137                 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
138                 return;
139         }
140
141         /* TODO - at the moment update bulletin board of all VFs.
142          * if this proves to costly, we can mark VFs that need their
143          * bulletins updated.
144          */
145         ecore_for_each_vf(hwfn, i)
146                 ecore_iov_post_vf_bulletin(hwfn, i, ptt);
147
148         ecore_ptt_release(hwfn, ptt);
149 }
150
151 void qed_iov_pf_task(void *arg)
152 {
153         struct ecore_hwfn *p_hwfn = arg;
154         int rc;
155
156         if (OSAL_GET_BIT(QED_IOV_WQ_MSG_FLAG, &p_hwfn->iov_task_flags)) {
157                 OSAL_CLEAR_BIT(QED_IOV_WQ_MSG_FLAG, &p_hwfn->iov_task_flags);
158                 qed_handle_vf_msg(p_hwfn);
159         }
160
161         if (OSAL_GET_BIT(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
162                          &p_hwfn->iov_task_flags)) {
163                 OSAL_CLEAR_BIT(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
164                                &p_hwfn->iov_task_flags);
165                 qed_handle_bulletin_post(p_hwfn);
166         }
167
168         if (OSAL_GET_BIT(QED_IOV_WQ_FLR_FLAG, &p_hwfn->iov_task_flags)) {
169                 struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
170
171                 OSAL_CLEAR_BIT(QED_IOV_WQ_FLR_FLAG, &p_hwfn->iov_task_flags);
172
173                 if (!p_ptt) {
174                         qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
175                         return;
176                 }
177
178                 rc = ecore_iov_vf_flr_cleanup(p_hwfn, p_ptt);
179                 if (rc)
180                         qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
181
182                 ecore_ptt_release(p_hwfn, p_ptt);
183         }
184 }
185
186 int qed_schedule_iov(struct ecore_hwfn *p_hwfn, enum qed_iov_wq_flag flag)
187 {
188         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Scheduling iov task [Flag: %d]\n",
189                    flag);
190
191         OSAL_SET_BIT(flag, &p_hwfn->iov_task_flags);
192         return rte_eal_alarm_set(1, qed_iov_pf_task, p_hwfn);
193 }
194
195 void qed_inform_vf_link_state(struct ecore_hwfn *hwfn)
196 {
197         struct ecore_hwfn *lead_hwfn = ECORE_LEADING_HWFN(hwfn->p_dev);
198         struct ecore_mcp_link_capabilities caps;
199         struct ecore_mcp_link_params params;
200         struct ecore_mcp_link_state link;
201         int i;
202
203         if (!hwfn->pf_iov_info)
204                 return;
205
206         memcpy(&params, ecore_mcp_get_link_params(lead_hwfn),
207                    sizeof(params));
208         memcpy(&link, ecore_mcp_get_link_state(lead_hwfn), sizeof(link));
209         memcpy(&caps, ecore_mcp_get_link_capabilities(lead_hwfn),
210                    sizeof(caps));
211
212         /* Update bulletin of all future possible VFs with link configuration */
213         for (i = 0; i < hwfn->p_dev->p_iov_info->total_vfs; i++) {
214                 ecore_iov_set_link(hwfn, i,
215                                    &params, &link, &caps);
216         }
217
218         qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
219 }