1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
13 #include <rte_malloc.h>
14 #include <rte_errno.h>
16 #include "base/dlb2_resource.h"
17 #include "base/dlb2_osdep.h"
18 #include "base/dlb2_regs.h"
19 #include "dlb2_main.h"
20 #include "../dlb2_user.h"
21 #include "../dlb2_priv.h"
22 #include "../dlb2_iface.h"
23 #include "../dlb2_inline_fns.h"
25 #define PF_ID_ZERO 0 /* PF ONLY! */
26 #define NO_OWNER_VF 0 /* PF ONLY! */
27 #define NOT_VF_REQ false /* PF ONLY! */
29 #define DLB2_PCI_CAP_POINTER 0x34
30 #define DLB2_PCI_CAP_NEXT(hdr) (((hdr) >> 8) & 0xFC)
31 #define DLB2_PCI_CAP_ID(hdr) ((hdr) & 0xFF)
33 #define DLB2_PCI_LNKCTL 16
34 #define DLB2_PCI_SLTCTL 24
35 #define DLB2_PCI_RTCTL 28
36 #define DLB2_PCI_EXP_DEVCTL2 40
37 #define DLB2_PCI_LNKCTL2 48
38 #define DLB2_PCI_SLTCTL2 56
39 #define DLB2_PCI_CMD 4
40 #define DLB2_PCI_EXP_DEVSTA 10
41 #define DLB2_PCI_EXP_DEVSTA_TRPND 0x20
42 #define DLB2_PCI_EXP_DEVCTL_BCR_FLR 0x8000
44 #define DLB2_PCI_CAP_ID_EXP 0x10
45 #define DLB2_PCI_CAP_ID_MSIX 0x11
46 #define DLB2_PCI_EXT_CAP_ID_PRI 0x13
47 #define DLB2_PCI_EXT_CAP_ID_ACS 0xD
49 #define DLB2_PCI_PRI_CTRL_ENABLE 0x1
50 #define DLB2_PCI_PRI_ALLOC_REQ 0xC
51 #define DLB2_PCI_PRI_CTRL 0x4
52 #define DLB2_PCI_MSIX_FLAGS 0x2
53 #define DLB2_PCI_MSIX_FLAGS_ENABLE 0x8000
54 #define DLB2_PCI_MSIX_FLAGS_MASKALL 0x4000
55 #define DLB2_PCI_ERR_ROOT_STATUS 0x30
56 #define DLB2_PCI_ERR_COR_STATUS 0x10
57 #define DLB2_PCI_ERR_UNCOR_STATUS 0x4
58 #define DLB2_PCI_COMMAND_INTX_DISABLE 0x400
59 #define DLB2_PCI_ACS_CAP 0x4
60 #define DLB2_PCI_ACS_CTRL 0x6
61 #define DLB2_PCI_ACS_SV 0x1
62 #define DLB2_PCI_ACS_RR 0x4
63 #define DLB2_PCI_ACS_CR 0x8
64 #define DLB2_PCI_ACS_UF 0x10
65 #define DLB2_PCI_ACS_EC 0x20
67 static int dlb2_pci_find_capability(struct rte_pci_device *pdev, uint32_t id)
73 ret = rte_pci_read_config(pdev, &pos, 1, DLB2_PCI_CAP_POINTER);
80 ret = rte_pci_read_config(pdev, &hdr, 2, pos);
84 if (DLB2_PCI_CAP_ID(hdr) == id)
87 if (DLB2_PCI_CAP_ID(hdr) == 0xFF)
90 pos = DLB2_PCI_CAP_NEXT(hdr);
97 dlb2_pf_init_driver_state(struct dlb2_dev *dlb2_dev)
99 rte_spinlock_init(&dlb2_dev->resource_mutex);
104 static void dlb2_pf_enable_pm(struct dlb2_dev *dlb2_dev)
106 dlb2_clr_pmcsr_disable(&dlb2_dev->hw);
109 #define DLB2_READY_RETRY_LIMIT 1000
110 static int dlb2_pf_wait_for_device_ready(struct dlb2_dev *dlb2_dev)
114 /* Allow at least 1s for the device to become active after power-on */
115 for (retries = 0; retries < DLB2_READY_RETRY_LIMIT; retries++) {
116 union dlb2_cfg_mstr_cfg_diagnostic_idle_status idle;
117 union dlb2_cfg_mstr_cfg_pm_status pm_st;
120 addr = DLB2_CFG_MSTR_CFG_PM_STATUS;
121 pm_st.val = DLB2_CSR_RD(&dlb2_dev->hw, addr);
122 addr = DLB2_CFG_MSTR_CFG_DIAGNOSTIC_IDLE_STATUS;
123 idle.val = DLB2_CSR_RD(&dlb2_dev->hw, addr);
124 if (pm_st.field.pmsm == 1 && idle.field.dlb_func_idle == 1)
130 if (retries == DLB2_READY_RETRY_LIMIT) {
131 DLB2_LOG_ERR("[%s()] wait for device ready timed out\n",
140 dlb2_probe(struct rte_pci_device *pdev)
142 struct dlb2_dev *dlb2_dev;
145 DLB2_INFO(dlb2_dev, "probe\n");
147 dlb2_dev = rte_malloc("DLB2_PF", sizeof(struct dlb2_dev),
148 RTE_CACHE_LINE_SIZE);
150 if (dlb2_dev == NULL) {
152 goto dlb2_dev_malloc_fail;
155 /* PCI Bus driver has already mapped bar space into process.
156 * Save off our IO register and FUNC addresses.
160 if (pdev->mem_resource[0].addr == NULL) {
161 DLB2_ERR(dlb2_dev, "probe: BAR 0 addr (csr_kva) is NULL\n");
163 goto pci_mmap_bad_addr;
165 dlb2_dev->hw.func_kva = (void *)(uintptr_t)pdev->mem_resource[0].addr;
166 dlb2_dev->hw.func_phys_addr = pdev->mem_resource[0].phys_addr;
168 DLB2_INFO(dlb2_dev, "DLB2 FUNC VA=%p, PA=%p, len=%p\n",
169 (void *)dlb2_dev->hw.func_kva,
170 (void *)dlb2_dev->hw.func_phys_addr,
171 (void *)(pdev->mem_resource[0].len));
174 if (pdev->mem_resource[2].addr == NULL) {
175 DLB2_ERR(dlb2_dev, "probe: BAR 2 addr (func_kva) is NULL\n");
177 goto pci_mmap_bad_addr;
179 dlb2_dev->hw.csr_kva = (void *)(uintptr_t)pdev->mem_resource[2].addr;
180 dlb2_dev->hw.csr_phys_addr = pdev->mem_resource[2].phys_addr;
182 DLB2_INFO(dlb2_dev, "DLB2 CSR VA=%p, PA=%p, len=%p\n",
183 (void *)dlb2_dev->hw.csr_kva,
184 (void *)dlb2_dev->hw.csr_phys_addr,
185 (void *)(pdev->mem_resource[2].len));
187 dlb2_dev->pdev = pdev;
189 /* PM enable must be done before any other MMIO accesses, and this
190 * setting is persistent across device reset.
192 dlb2_pf_enable_pm(dlb2_dev);
194 ret = dlb2_pf_wait_for_device_ready(dlb2_dev);
196 goto wait_for_device_ready_fail;
198 ret = dlb2_pf_reset(dlb2_dev);
200 goto dlb2_reset_fail;
202 ret = dlb2_pf_init_driver_state(dlb2_dev);
204 goto init_driver_state_fail;
206 ret = dlb2_resource_init(&dlb2_dev->hw);
208 goto resource_init_fail;
213 dlb2_resource_free(&dlb2_dev->hw);
214 init_driver_state_fail:
217 wait_for_device_ready_fail:
219 dlb2_dev_malloc_fail:
225 dlb2_pf_reset(struct dlb2_dev *dlb2_dev)
233 uint16_t dev_ctl_word;
234 uint16_t dev_ctl2_word;
239 uint16_t rt_ctl_word;
240 uint32_t pri_reqs_dword;
241 uint16_t pri_ctrl_word;
250 uint16_t devsta_busy_word;
251 uint16_t devctl_word;
253 struct rte_pci_device *pdev = dlb2_dev->pdev;
255 /* Save PCI config state */
257 for (i = 0; i < 16; i++) {
258 if (rte_pci_read_config(pdev, &dword[i], 4, i * 4) != 4)
262 pcie_cap_offset = dlb2_pci_find_capability(pdev, DLB2_PCI_CAP_ID_EXP);
264 if (pcie_cap_offset < 0) {
265 DLB2_LOG_ERR("[%s()] failed to find the pcie capability\n",
267 return pcie_cap_offset;
270 off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
271 if (rte_pci_read_config(pdev, &dev_ctl_word, 2, off) != 2)
274 off = pcie_cap_offset + DLB2_PCI_LNKCTL;
275 if (rte_pci_read_config(pdev, &lnk_word, 2, off) != 2)
278 off = pcie_cap_offset + DLB2_PCI_SLTCTL;
279 if (rte_pci_read_config(pdev, &slt_word, 2, off) != 2)
282 off = pcie_cap_offset + DLB2_PCI_RTCTL;
283 if (rte_pci_read_config(pdev, &rt_ctl_word, 2, off) != 2)
286 off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL2;
287 if (rte_pci_read_config(pdev, &dev_ctl2_word, 2, off) != 2)
290 off = pcie_cap_offset + DLB2_PCI_LNKCTL2;
291 if (rte_pci_read_config(pdev, &lnk_word2, 2, off) != 2)
294 off = pcie_cap_offset + DLB2_PCI_SLTCTL2;
295 if (rte_pci_read_config(pdev, &slt_word2, 2, off) != 2)
298 off = DLB2_PCI_EXT_CAP_ID_PRI;
299 pri_cap_offset = rte_pci_find_ext_capability(pdev, off);
301 if (pri_cap_offset >= 0) {
302 off = pri_cap_offset + DLB2_PCI_PRI_ALLOC_REQ;
303 if (rte_pci_read_config(pdev, &pri_reqs_dword, 4, off) != 4)
307 /* clear the PCI command register before issuing the FLR */
311 if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
312 DLB2_LOG_ERR("[%s()] failed to write the pci command\n",
318 for (wait_count = 0; wait_count < 4; wait_count++) {
321 off = pcie_cap_offset + DLB2_PCI_EXP_DEVSTA;
322 ret = rte_pci_read_config(pdev, &devsta_busy_word, 2, off);
324 DLB2_LOG_ERR("[%s()] failed to read the pci device status\n",
329 if (!(devsta_busy_word & DLB2_PCI_EXP_DEVSTA_TRPND))
332 sleep_time = (1 << (wait_count)) * 100;
333 rte_delay_ms(sleep_time);
336 if (wait_count == 4) {
337 DLB2_LOG_ERR("[%s()] wait for pci pending transactions timed out\n",
342 off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
343 ret = rte_pci_read_config(pdev, &devctl_word, 2, off);
345 DLB2_LOG_ERR("[%s()] failed to read the pcie device control\n",
350 devctl_word |= DLB2_PCI_EXP_DEVCTL_BCR_FLR;
352 ret = rte_pci_write_config(pdev, &devctl_word, 2, off);
354 DLB2_LOG_ERR("[%s()] failed to write the pcie device control\n",
361 /* Restore PCI config state */
363 if (pcie_cap_offset >= 0) {
364 off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
365 ret = rte_pci_write_config(pdev, &dev_ctl_word, 2, off);
367 DLB2_LOG_ERR("[%s()] failed to write the pcie device control at offset %d\n",
372 off = pcie_cap_offset + DLB2_PCI_LNKCTL;
373 ret = rte_pci_write_config(pdev, &lnk_word, 2, off);
375 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
380 off = pcie_cap_offset + DLB2_PCI_SLTCTL;
381 ret = rte_pci_write_config(pdev, &slt_word, 2, off);
383 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
388 off = pcie_cap_offset + DLB2_PCI_RTCTL;
389 ret = rte_pci_write_config(pdev, &rt_ctl_word, 2, off);
391 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
396 off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL2;
397 ret = rte_pci_write_config(pdev, &dev_ctl2_word, 2, off);
399 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
404 off = pcie_cap_offset + DLB2_PCI_LNKCTL2;
405 ret = rte_pci_write_config(pdev, &lnk_word2, 2, off);
407 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
412 off = pcie_cap_offset + DLB2_PCI_SLTCTL2;
413 ret = rte_pci_write_config(pdev, &slt_word2, 2, off);
415 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
421 if (pri_cap_offset >= 0) {
422 pri_ctrl_word = DLB2_PCI_PRI_CTRL_ENABLE;
424 off = pri_cap_offset + DLB2_PCI_PRI_ALLOC_REQ;
425 ret = rte_pci_write_config(pdev, &pri_reqs_dword, 4, off);
427 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
432 off = pri_cap_offset + DLB2_PCI_PRI_CTRL;
433 ret = rte_pci_write_config(pdev, &pri_ctrl_word, 2, off);
435 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
441 off = RTE_PCI_EXT_CAP_ID_ERR;
442 err_cap_offset = rte_pci_find_ext_capability(pdev, off);
444 if (err_cap_offset >= 0) {
447 off = err_cap_offset + DLB2_PCI_ERR_ROOT_STATUS;
448 if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
451 ret = rte_pci_write_config(pdev, &tmp, 4, off);
453 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
458 off = err_cap_offset + DLB2_PCI_ERR_COR_STATUS;
459 if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
462 ret = rte_pci_write_config(pdev, &tmp, 4, off);
464 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
469 off = err_cap_offset + DLB2_PCI_ERR_UNCOR_STATUS;
470 if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
473 ret = rte_pci_write_config(pdev, &tmp, 4, off);
475 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
481 for (i = 16; i > 0; i--) {
483 ret = rte_pci_write_config(pdev, &dword[i - 1], 4, off);
485 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
492 if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
493 cmd &= ~DLB2_PCI_COMMAND_INTX_DISABLE;
494 if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
495 DLB2_LOG_ERR("[%s()] failed to write the pci command\n",
501 msix_cap_offset = dlb2_pci_find_capability(pdev,
502 DLB2_PCI_CAP_ID_MSIX);
503 if (msix_cap_offset >= 0) {
504 off = msix_cap_offset + DLB2_PCI_MSIX_FLAGS;
505 if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
506 cmd |= DLB2_PCI_MSIX_FLAGS_ENABLE;
507 cmd |= DLB2_PCI_MSIX_FLAGS_MASKALL;
508 if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
509 DLB2_LOG_ERR("[%s()] failed to write msix flags\n",
515 off = msix_cap_offset + DLB2_PCI_MSIX_FLAGS;
516 if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
517 cmd &= ~DLB2_PCI_MSIX_FLAGS_MASKALL;
518 if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
519 DLB2_LOG_ERR("[%s()] failed to write msix flags\n",
526 off = DLB2_PCI_EXT_CAP_ID_ACS;
527 acs_cap_offset = rte_pci_find_ext_capability(pdev, off);
529 if (acs_cap_offset >= 0) {
530 uint16_t acs_cap, acs_ctrl, acs_mask;
531 off = acs_cap_offset + DLB2_PCI_ACS_CAP;
532 if (rte_pci_read_config(pdev, &acs_cap, 2, off) != 2)
535 off = acs_cap_offset + DLB2_PCI_ACS_CTRL;
536 if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
539 acs_mask = DLB2_PCI_ACS_SV | DLB2_PCI_ACS_RR;
540 acs_mask |= (DLB2_PCI_ACS_CR | DLB2_PCI_ACS_UF);
541 acs_ctrl |= (acs_cap & acs_mask);
543 ret = rte_pci_write_config(pdev, &acs_ctrl, 2, off);
545 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
550 off = acs_cap_offset + DLB2_PCI_ACS_CTRL;
551 if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
554 acs_mask = DLB2_PCI_ACS_RR | DLB2_PCI_ACS_CR;
555 acs_mask |= DLB2_PCI_ACS_EC;
556 acs_ctrl &= ~acs_mask;
558 off = acs_cap_offset + DLB2_PCI_ACS_CTRL;
559 ret = rte_pci_write_config(pdev, &acs_ctrl, 2, off);
561 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
571 dlb2_pf_create_sched_domain(struct dlb2_hw *hw,
572 struct dlb2_create_sched_domain_args *args,
573 struct dlb2_cmd_response *resp)
575 return dlb2_hw_create_sched_domain(hw, args, resp, NOT_VF_REQ,
580 dlb2_pf_reset_domain(struct dlb2_hw *hw, u32 id)
582 return dlb2_reset_domain(hw, id, NOT_VF_REQ, PF_ID_ZERO);
586 dlb2_pf_create_ldb_queue(struct dlb2_hw *hw,
588 struct dlb2_create_ldb_queue_args *args,
589 struct dlb2_cmd_response *resp)
591 return dlb2_hw_create_ldb_queue(hw, id, args, resp, NOT_VF_REQ,
596 dlb2_pf_create_ldb_port(struct dlb2_hw *hw,
598 struct dlb2_create_ldb_port_args *args,
599 uintptr_t cq_dma_base,
600 struct dlb2_cmd_response *resp)
602 return dlb2_hw_create_ldb_port(hw, id, args,
610 dlb2_pf_create_dir_port(struct dlb2_hw *hw,
612 struct dlb2_create_dir_port_args *args,
613 uintptr_t cq_dma_base,
614 struct dlb2_cmd_response *resp)
616 return dlb2_hw_create_dir_port(hw, id, args,
624 dlb2_pf_create_dir_queue(struct dlb2_hw *hw,
626 struct dlb2_create_dir_queue_args *args,
627 struct dlb2_cmd_response *resp)
629 return dlb2_hw_create_dir_queue(hw, id, args, resp, NOT_VF_REQ,
634 dlb2_pf_start_domain(struct dlb2_hw *hw,
636 struct dlb2_start_domain_args *args,
637 struct dlb2_cmd_response *resp)
639 return dlb2_hw_start_domain(hw, id, args, resp, NOT_VF_REQ,