raw/cnxk_bphy: add doxygen comments
[dpdk.git] / drivers / vdpa / ifc / base / ifcvf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include "ifcvf.h"
6 #include "ifcvf_osdep.h"
7
8 STATIC void *
9 get_cap_addr(struct ifcvf_hw *hw, struct ifcvf_pci_cap *cap)
10 {
11         u8 bar = cap->bar;
12         u32 length = cap->length;
13         u32 offset = cap->offset;
14
15         if (bar > IFCVF_PCI_MAX_RESOURCE - 1) {
16                 DEBUGOUT("invalid bar: %u\n", bar);
17                 return NULL;
18         }
19
20         if (offset + length < offset) {
21                 DEBUGOUT("offset(%u) + length(%u) overflows\n",
22                         offset, length);
23                 return NULL;
24         }
25
26         if (offset + length > hw->mem_resource[cap->bar].len) {
27                 DEBUGOUT("offset(%u) + length(%u) overflows bar length(%u)",
28                         offset, length, (u32)hw->mem_resource[cap->bar].len);
29                 return NULL;
30         }
31
32         return hw->mem_resource[bar].addr + offset;
33 }
34
35 int
36 ifcvf_init_hw(struct ifcvf_hw *hw, PCI_DEV *dev)
37 {
38         int ret;
39         u8 pos;
40         struct ifcvf_pci_cap cap;
41
42         ret = PCI_READ_CONFIG_BYTE(dev, &pos, PCI_CAPABILITY_LIST);
43         if (ret < 0) {
44                 DEBUGOUT("failed to read pci capability list\n");
45                 return -1;
46         }
47
48         while (pos) {
49                 ret = PCI_READ_CONFIG_RANGE(dev, (u32 *)&cap,
50                                 sizeof(cap), pos);
51                 if (ret < 0) {
52                         DEBUGOUT("failed to read cap at pos: %x", pos);
53                         break;
54                 }
55
56                 if (cap.cap_vndr != PCI_CAP_ID_VNDR)
57                         goto next;
58
59                 DEBUGOUT("cfg type: %u, bar: %u, offset: %u, "
60                                 "len: %u\n", cap.cfg_type, cap.bar,
61                                 cap.offset, cap.length);
62
63                 switch (cap.cfg_type) {
64                 case IFCVF_PCI_CAP_COMMON_CFG:
65                         hw->common_cfg = get_cap_addr(hw, &cap);
66                         break;
67                 case IFCVF_PCI_CAP_NOTIFY_CFG:
68                         ret = PCI_READ_CONFIG_DWORD(dev,
69                                         &hw->notify_off_multiplier,
70                                         pos + sizeof(cap));
71                         if (ret < 0) {
72                                 DEBUGOUT("failed to read notify_off_multiplier\n");
73                                 return -1;
74                         }
75                         hw->notify_base = get_cap_addr(hw, &cap);
76                         hw->notify_region = cap.bar;
77                         break;
78                 case IFCVF_PCI_CAP_ISR_CFG:
79                         hw->isr = get_cap_addr(hw, &cap);
80                         break;
81                 case IFCVF_PCI_CAP_DEVICE_CFG:
82                         hw->dev_cfg = get_cap_addr(hw, &cap);
83                         break;
84                 }
85 next:
86                 pos = cap.cap_next;
87         }
88
89         hw->lm_cfg = hw->mem_resource[4].addr;
90
91         if (hw->common_cfg == NULL || hw->notify_base == NULL ||
92                         hw->isr == NULL || hw->dev_cfg == NULL) {
93                 DEBUGOUT("capability incomplete\n");
94                 return -1;
95         }
96
97         DEBUGOUT("capability mapping:\n"
98                  "common cfg: %p\n"
99                  "notify base: %p\n"
100                  "isr cfg: %p\n"
101                  "device cfg: %p\n"
102                  "multiplier: %u\n",
103                  hw->common_cfg, hw->notify_base, hw->isr, hw->dev_cfg,
104                  hw->notify_off_multiplier);
105
106         return 0;
107 }
108
109 STATIC u8
110 ifcvf_get_status(struct ifcvf_hw *hw)
111 {
112         return IFCVF_READ_REG8(&hw->common_cfg->device_status);
113 }
114
115 STATIC void
116 ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
117 {
118         IFCVF_WRITE_REG8(status, &hw->common_cfg->device_status);
119 }
120
121 STATIC void
122 ifcvf_reset(struct ifcvf_hw *hw)
123 {
124         ifcvf_set_status(hw, 0);
125
126         /* flush status write */
127         while (ifcvf_get_status(hw))
128                 msec_delay(1);
129 }
130
131 STATIC void
132 ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
133 {
134         if (status != 0)
135                 status |= ifcvf_get_status(hw);
136
137         ifcvf_set_status(hw, status);
138         ifcvf_get_status(hw);
139 }
140
141 u64
142 ifcvf_get_features(struct ifcvf_hw *hw)
143 {
144         u32 features_lo, features_hi;
145         struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
146
147         IFCVF_WRITE_REG32(0, &cfg->device_feature_select);
148         features_lo = IFCVF_READ_REG32(&cfg->device_feature);
149
150         IFCVF_WRITE_REG32(1, &cfg->device_feature_select);
151         features_hi = IFCVF_READ_REG32(&cfg->device_feature);
152
153         return ((u64)features_hi << 32) | features_lo;
154 }
155
156 STATIC void
157 ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
158 {
159         struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
160
161         IFCVF_WRITE_REG32(0, &cfg->guest_feature_select);
162         IFCVF_WRITE_REG32(features & ((1ULL << 32) - 1), &cfg->guest_feature);
163
164         IFCVF_WRITE_REG32(1, &cfg->guest_feature_select);
165         IFCVF_WRITE_REG32(features >> 32, &cfg->guest_feature);
166 }
167
168 STATIC int
169 ifcvf_config_features(struct ifcvf_hw *hw)
170 {
171         u64 host_features;
172
173         host_features = ifcvf_get_features(hw);
174         hw->req_features &= host_features;
175
176         ifcvf_set_features(hw, hw->req_features);
177         ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_FEATURES_OK);
178
179         if (!(ifcvf_get_status(hw) & IFCVF_CONFIG_STATUS_FEATURES_OK)) {
180                 DEBUGOUT("failed to set FEATURES_OK status\n");
181                 return -1;
182         }
183
184         return 0;
185 }
186
187 STATIC void
188 io_write64_twopart(u64 val, u32 *lo, u32 *hi)
189 {
190         IFCVF_WRITE_REG32(val & ((1ULL << 32) - 1), lo);
191         IFCVF_WRITE_REG32(val >> 32, hi);
192 }
193
194 STATIC int
195 ifcvf_hw_enable(struct ifcvf_hw *hw)
196 {
197         struct ifcvf_pci_common_cfg *cfg;
198         u8 *lm_cfg;
199         u32 i;
200         u16 notify_off;
201
202         cfg = hw->common_cfg;
203         lm_cfg = hw->lm_cfg;
204
205         IFCVF_WRITE_REG16(0, &cfg->msix_config);
206         if (IFCVF_READ_REG16(&cfg->msix_config) == IFCVF_MSI_NO_VECTOR) {
207                 DEBUGOUT("msix vec alloc failed for device config\n");
208                 return -1;
209         }
210
211         for (i = 0; i < hw->nr_vring; i++) {
212                 IFCVF_WRITE_REG16(i, &cfg->queue_select);
213                 io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
214                                 &cfg->queue_desc_hi);
215                 io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
216                                 &cfg->queue_avail_hi);
217                 io_write64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
218                                 &cfg->queue_used_hi);
219                 IFCVF_WRITE_REG16(hw->vring[i].size, &cfg->queue_size);
220
221                 if (hw->device_type == IFCVF_BLK)
222                         *(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
223                                 i * IFCVF_LM_CFG_SIZE) =
224                                 (u32)hw->vring[i].last_avail_idx |
225                                 ((u32)hw->vring[i].last_used_idx << 16);
226                 else
227                         *(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
228                                 (i / 2) * IFCVF_LM_CFG_SIZE +
229                                 (i % 2) * 4) =
230                                 (u32)hw->vring[i].last_avail_idx |
231                                 ((u32)hw->vring[i].last_used_idx << 16);
232
233                 IFCVF_WRITE_REG16(i + 1, &cfg->queue_msix_vector);
234                 if (IFCVF_READ_REG16(&cfg->queue_msix_vector) ==
235                                 IFCVF_MSI_NO_VECTOR) {
236                         DEBUGOUT("queue %u, msix vec alloc failed\n",
237                                         i);
238                         return -1;
239                 }
240
241                 notify_off = IFCVF_READ_REG16(&cfg->queue_notify_off);
242                 hw->notify_addr[i] = (void *)((u8 *)hw->notify_base +
243                                 notify_off * hw->notify_off_multiplier);
244                 IFCVF_WRITE_REG16(1, &cfg->queue_enable);
245         }
246
247         return 0;
248 }
249
250 STATIC void
251 ifcvf_hw_disable(struct ifcvf_hw *hw)
252 {
253         u32 i;
254         struct ifcvf_pci_common_cfg *cfg;
255         u32 ring_state;
256
257         cfg = hw->common_cfg;
258
259         IFCVF_WRITE_REG16(IFCVF_MSI_NO_VECTOR, &cfg->msix_config);
260         for (i = 0; i < hw->nr_vring; i++) {
261                 IFCVF_WRITE_REG16(i, &cfg->queue_select);
262                 IFCVF_WRITE_REG16(0, &cfg->queue_enable);
263                 IFCVF_WRITE_REG16(IFCVF_MSI_NO_VECTOR, &cfg->queue_msix_vector);
264
265                 if (hw->device_type == IFCVF_BLK)
266                         ring_state = *(u32 *)(hw->lm_cfg +
267                                         IFCVF_LM_RING_STATE_OFFSET +
268                                         i * IFCVF_LM_CFG_SIZE);
269                 else
270                         ring_state = *(u32 *)(hw->lm_cfg +
271                                         IFCVF_LM_RING_STATE_OFFSET +
272                                         (i / 2) * IFCVF_LM_CFG_SIZE +
273                                         (i % 2) * 4);
274
275                 if (hw->device_type == IFCVF_BLK)
276                         hw->vring[i].last_avail_idx =
277                                 (u16)(ring_state & IFCVF_16_BIT_MASK);
278                 else
279                         hw->vring[i].last_avail_idx = (u16)(ring_state >> 16);
280                 hw->vring[i].last_used_idx = (u16)(ring_state >> 16);
281         }
282 }
283
284 int
285 ifcvf_start_hw(struct ifcvf_hw *hw)
286 {
287         ifcvf_reset(hw);
288         ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_ACK);
289         ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_DRIVER);
290
291         if (ifcvf_config_features(hw) < 0)
292                 return -1;
293
294         if (ifcvf_hw_enable(hw) < 0)
295                 return -1;
296
297         ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_DRIVER_OK);
298         return 0;
299 }
300
301 void
302 ifcvf_stop_hw(struct ifcvf_hw *hw)
303 {
304         ifcvf_hw_disable(hw);
305         ifcvf_reset(hw);
306 }
307
308 void
309 ifcvf_enable_logging(struct ifcvf_hw *hw, u64 log_base, u64 log_size)
310 {
311         u8 *lm_cfg;
312
313         lm_cfg = hw->lm_cfg;
314
315         *(u32 *)(lm_cfg + IFCVF_LM_BASE_ADDR_LOW) =
316                 log_base & IFCVF_32_BIT_MASK;
317
318         *(u32 *)(lm_cfg + IFCVF_LM_BASE_ADDR_HIGH) =
319                 (log_base >> 32) & IFCVF_32_BIT_MASK;
320
321         *(u32 *)(lm_cfg + IFCVF_LM_END_ADDR_LOW) =
322                 (log_base + log_size) & IFCVF_32_BIT_MASK;
323
324         *(u32 *)(lm_cfg + IFCVF_LM_END_ADDR_HIGH) =
325                 ((log_base + log_size) >> 32) & IFCVF_32_BIT_MASK;
326
327         *(u32 *)(lm_cfg + IFCVF_LM_LOGGING_CTRL) = IFCVF_LM_ENABLE_VF;
328 }
329
330 void
331 ifcvf_disable_logging(struct ifcvf_hw *hw)
332 {
333         u8 *lm_cfg;
334
335         lm_cfg = hw->lm_cfg;
336         *(u32 *)(lm_cfg + IFCVF_LM_LOGGING_CTRL) = IFCVF_LM_DISABLE;
337 }
338
339 void
340 ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
341 {
342         IFCVF_WRITE_REG16(qid, hw->notify_addr[qid]);
343 }
344
345 u8
346 ifcvf_get_notify_region(struct ifcvf_hw *hw)
347 {
348         return hw->notify_region;
349 }
350
351 u64
352 ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid)
353 {
354         return (u8 *)hw->notify_addr[qid] -
355                 (u8 *)hw->mem_resource[hw->notify_region].addr;
356 }