1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
6 #include "ifcvf_osdep.h"
9 get_cap_addr(struct ifcvf_hw *hw, struct ifcvf_pci_cap *cap)
12 u32 length = cap->length;
13 u32 offset = cap->offset;
15 if (bar > IFCVF_PCI_MAX_RESOURCE - 1) {
16 DEBUGOUT("invalid bar: %u\n", bar);
20 if (offset + length < offset) {
21 DEBUGOUT("offset(%u) + length(%u) overflows\n",
26 if (offset + length > hw->mem_resource[cap->bar].len) {
27 DEBUGOUT("offset(%u) + length(%u) overflows bar length(%u)",
28 offset, length, (u32)hw->mem_resource[cap->bar].len);
32 return hw->mem_resource[bar].addr + offset;
36 ifcvf_init_hw(struct ifcvf_hw *hw, PCI_DEV *dev)
40 struct ifcvf_pci_cap cap;
42 ret = PCI_READ_CONFIG_BYTE(dev, &pos, PCI_CAPABILITY_LIST);
44 DEBUGOUT("failed to read pci capability list\n");
49 ret = PCI_READ_CONFIG_RANGE(dev, (u32 *)&cap,
52 DEBUGOUT("failed to read cap at pos: %x", pos);
56 if (cap.cap_vndr != PCI_CAP_ID_VNDR)
59 DEBUGOUT("cfg type: %u, bar: %u, offset: %u, "
60 "len: %u\n", cap.cfg_type, cap.bar,
61 cap.offset, cap.length);
63 switch (cap.cfg_type) {
64 case IFCVF_PCI_CAP_COMMON_CFG:
65 hw->common_cfg = get_cap_addr(hw, &cap);
67 case IFCVF_PCI_CAP_NOTIFY_CFG:
68 ret = PCI_READ_CONFIG_DWORD(dev,
69 &hw->notify_off_multiplier,
72 DEBUGOUT("failed to read notify_off_multiplier\n");
75 hw->notify_base = get_cap_addr(hw, &cap);
76 hw->notify_region = cap.bar;
78 case IFCVF_PCI_CAP_ISR_CFG:
79 hw->isr = get_cap_addr(hw, &cap);
81 case IFCVF_PCI_CAP_DEVICE_CFG:
82 hw->dev_cfg = get_cap_addr(hw, &cap);
89 hw->lm_cfg = hw->mem_resource[4].addr;
91 if (hw->common_cfg == NULL || hw->notify_base == NULL ||
92 hw->isr == NULL || hw->dev_cfg == NULL) {
93 DEBUGOUT("capability incomplete\n");
97 DEBUGOUT("capability mapping:\n"
103 hw->common_cfg, hw->notify_base, hw->isr, hw->dev_cfg,
104 hw->notify_off_multiplier);
110 ifcvf_get_status(struct ifcvf_hw *hw)
112 return IFCVF_READ_REG8(&hw->common_cfg->device_status);
116 ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
118 IFCVF_WRITE_REG8(status, &hw->common_cfg->device_status);
122 ifcvf_reset(struct ifcvf_hw *hw)
124 ifcvf_set_status(hw, 0);
126 /* flush status write */
127 while (ifcvf_get_status(hw))
132 ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
135 status |= ifcvf_get_status(hw);
137 ifcvf_set_status(hw, status);
138 ifcvf_get_status(hw);
142 ifcvf_get_features(struct ifcvf_hw *hw)
144 u32 features_lo, features_hi;
145 struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
147 IFCVF_WRITE_REG32(0, &cfg->device_feature_select);
148 features_lo = IFCVF_READ_REG32(&cfg->device_feature);
150 IFCVF_WRITE_REG32(1, &cfg->device_feature_select);
151 features_hi = IFCVF_READ_REG32(&cfg->device_feature);
153 return ((u64)features_hi << 32) | features_lo;
157 ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
159 struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
161 IFCVF_WRITE_REG32(0, &cfg->guest_feature_select);
162 IFCVF_WRITE_REG32(features & ((1ULL << 32) - 1), &cfg->guest_feature);
164 IFCVF_WRITE_REG32(1, &cfg->guest_feature_select);
165 IFCVF_WRITE_REG32(features >> 32, &cfg->guest_feature);
169 ifcvf_config_features(struct ifcvf_hw *hw)
173 host_features = ifcvf_get_features(hw);
174 hw->req_features &= host_features;
176 ifcvf_set_features(hw, hw->req_features);
177 ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_FEATURES_OK);
179 if (!(ifcvf_get_status(hw) & IFCVF_CONFIG_STATUS_FEATURES_OK)) {
180 DEBUGOUT("failed to set FEATURES_OK status\n");
188 io_write64_twopart(u64 val, u32 *lo, u32 *hi)
190 IFCVF_WRITE_REG32(val & ((1ULL << 32) - 1), lo);
191 IFCVF_WRITE_REG32(val >> 32, hi);
195 ifcvf_hw_enable(struct ifcvf_hw *hw)
197 struct ifcvf_pci_common_cfg *cfg;
202 cfg = hw->common_cfg;
205 IFCVF_WRITE_REG16(0, &cfg->msix_config);
206 if (IFCVF_READ_REG16(&cfg->msix_config) == IFCVF_MSI_NO_VECTOR) {
207 DEBUGOUT("msix vec alloc failed for device config\n");
211 for (i = 0; i < hw->nr_vring; i++) {
212 IFCVF_WRITE_REG16(i, &cfg->queue_select);
213 io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
214 &cfg->queue_desc_hi);
215 io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
216 &cfg->queue_avail_hi);
217 io_write64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
218 &cfg->queue_used_hi);
219 IFCVF_WRITE_REG16(hw->vring[i].size, &cfg->queue_size);
221 *(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
222 (i / 2) * IFCVF_LM_CFG_SIZE + (i % 2) * 4) =
223 (u32)hw->vring[i].last_avail_idx |
224 ((u32)hw->vring[i].last_used_idx << 16);
226 IFCVF_WRITE_REG16(i + 1, &cfg->queue_msix_vector);
227 if (IFCVF_READ_REG16(&cfg->queue_msix_vector) ==
228 IFCVF_MSI_NO_VECTOR) {
229 DEBUGOUT("queue %u, msix vec alloc failed\n",
234 notify_off = IFCVF_READ_REG16(&cfg->queue_notify_off);
235 hw->notify_addr[i] = (void *)((u8 *)hw->notify_base +
236 notify_off * hw->notify_off_multiplier);
237 IFCVF_WRITE_REG16(1, &cfg->queue_enable);
244 ifcvf_hw_disable(struct ifcvf_hw *hw)
247 struct ifcvf_pci_common_cfg *cfg;
250 cfg = hw->common_cfg;
252 IFCVF_WRITE_REG16(IFCVF_MSI_NO_VECTOR, &cfg->msix_config);
253 for (i = 0; i < hw->nr_vring; i++) {
254 IFCVF_WRITE_REG16(i, &cfg->queue_select);
255 IFCVF_WRITE_REG16(0, &cfg->queue_enable);
256 IFCVF_WRITE_REG16(IFCVF_MSI_NO_VECTOR, &cfg->queue_msix_vector);
257 ring_state = *(u32 *)(hw->lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
258 (i / 2) * IFCVF_LM_CFG_SIZE + (i % 2) * 4);
259 hw->vring[i].last_avail_idx = (u16)(ring_state >> 16);
260 hw->vring[i].last_used_idx = (u16)(ring_state >> 16);
265 ifcvf_start_hw(struct ifcvf_hw *hw)
268 ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_ACK);
269 ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_DRIVER);
271 if (ifcvf_config_features(hw) < 0)
274 if (ifcvf_hw_enable(hw) < 0)
277 ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_DRIVER_OK);
282 ifcvf_stop_hw(struct ifcvf_hw *hw)
284 ifcvf_hw_disable(hw);
289 ifcvf_enable_logging(struct ifcvf_hw *hw, u64 log_base, u64 log_size)
295 *(u32 *)(lm_cfg + IFCVF_LM_BASE_ADDR_LOW) =
296 log_base & IFCVF_32_BIT_MASK;
298 *(u32 *)(lm_cfg + IFCVF_LM_BASE_ADDR_HIGH) =
299 (log_base >> 32) & IFCVF_32_BIT_MASK;
301 *(u32 *)(lm_cfg + IFCVF_LM_END_ADDR_LOW) =
302 (log_base + log_size) & IFCVF_32_BIT_MASK;
304 *(u32 *)(lm_cfg + IFCVF_LM_END_ADDR_HIGH) =
305 ((log_base + log_size) >> 32) & IFCVF_32_BIT_MASK;
307 *(u32 *)(lm_cfg + IFCVF_LM_LOGGING_CTRL) = IFCVF_LM_ENABLE_VF;
311 ifcvf_disable_logging(struct ifcvf_hw *hw)
316 *(u32 *)(lm_cfg + IFCVF_LM_LOGGING_CTRL) = IFCVF_LM_DISABLE;
320 ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
322 IFCVF_WRITE_REG16(qid, hw->notify_addr[qid]);
326 ifcvf_get_notify_region(struct ifcvf_hw *hw)
328 return hw->notify_region;
332 ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid)
334 return (u8 *)hw->notify_addr[qid] -
335 (u8 *)hw->mem_resource[hw->notify_region].addr;