1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
13 #include <rte_malloc.h>
14 #include <rte_errno.h>
16 #define DLB2_USE_NEW_HEADERS /* TEMPORARY FOR MERGE */
18 #include "base/dlb2_regs_new.h"
19 #include "base/dlb2_hw_types_new.h"
20 #include "base/dlb2_resource.h"
21 #include "base/dlb2_osdep.h"
22 #include "dlb2_main.h"
23 #include "../dlb2_user.h"
24 #include "../dlb2_priv.h"
25 #include "../dlb2_iface.h"
26 #include "../dlb2_inline_fns.h"
28 #define PF_ID_ZERO 0 /* PF ONLY! */
29 #define NO_OWNER_VF 0 /* PF ONLY! */
30 #define NOT_VF_REQ false /* PF ONLY! */
32 #define DLB2_PCI_CAP_POINTER 0x34
33 #define DLB2_PCI_CAP_NEXT(hdr) (((hdr) >> 8) & 0xFC)
34 #define DLB2_PCI_CAP_ID(hdr) ((hdr) & 0xFF)
36 #define DLB2_PCI_LNKCTL 16
37 #define DLB2_PCI_SLTCTL 24
38 #define DLB2_PCI_RTCTL 28
39 #define DLB2_PCI_EXP_DEVCTL2 40
40 #define DLB2_PCI_LNKCTL2 48
41 #define DLB2_PCI_SLTCTL2 56
42 #define DLB2_PCI_CMD 4
43 #define DLB2_PCI_EXP_DEVSTA 10
44 #define DLB2_PCI_EXP_DEVSTA_TRPND 0x20
45 #define DLB2_PCI_EXP_DEVCTL_BCR_FLR 0x8000
47 #define DLB2_PCI_CAP_ID_EXP 0x10
48 #define DLB2_PCI_CAP_ID_MSIX 0x11
49 #define DLB2_PCI_EXT_CAP_ID_PRI 0x13
50 #define DLB2_PCI_EXT_CAP_ID_ACS 0xD
52 #define DLB2_PCI_PRI_CTRL_ENABLE 0x1
53 #define DLB2_PCI_PRI_ALLOC_REQ 0xC
54 #define DLB2_PCI_PRI_CTRL 0x4
55 #define DLB2_PCI_MSIX_FLAGS 0x2
56 #define DLB2_PCI_MSIX_FLAGS_ENABLE 0x8000
57 #define DLB2_PCI_MSIX_FLAGS_MASKALL 0x4000
58 #define DLB2_PCI_ERR_ROOT_STATUS 0x30
59 #define DLB2_PCI_ERR_COR_STATUS 0x10
60 #define DLB2_PCI_ERR_UNCOR_STATUS 0x4
61 #define DLB2_PCI_COMMAND_INTX_DISABLE 0x400
62 #define DLB2_PCI_ACS_CAP 0x4
63 #define DLB2_PCI_ACS_CTRL 0x6
64 #define DLB2_PCI_ACS_SV 0x1
65 #define DLB2_PCI_ACS_RR 0x4
66 #define DLB2_PCI_ACS_CR 0x8
67 #define DLB2_PCI_ACS_UF 0x10
68 #define DLB2_PCI_ACS_EC 0x20
70 static int dlb2_pci_find_capability(struct rte_pci_device *pdev, uint32_t id)
76 ret = rte_pci_read_config(pdev, &pos, 1, DLB2_PCI_CAP_POINTER);
83 ret = rte_pci_read_config(pdev, &hdr, 2, pos);
87 if (DLB2_PCI_CAP_ID(hdr) == id)
90 if (DLB2_PCI_CAP_ID(hdr) == 0xFF)
93 pos = DLB2_PCI_CAP_NEXT(hdr);
100 dlb2_pf_init_driver_state(struct dlb2_dev *dlb2_dev)
102 rte_spinlock_init(&dlb2_dev->resource_mutex);
107 static void dlb2_pf_enable_pm(struct dlb2_dev *dlb2_dev)
110 version = DLB2_HW_DEVICE_FROM_PCI_ID(dlb2_dev->pdev);
112 dlb2_clr_pmcsr_disable(&dlb2_dev->hw, version);
115 #define DLB2_READY_RETRY_LIMIT 1000
116 static int dlb2_pf_wait_for_device_ready(struct dlb2_dev *dlb2_dev,
121 /* Allow at least 1s for the device to become active after power-on */
122 for (retries = 0; retries < DLB2_READY_RETRY_LIMIT; retries++) {
124 u32 idle_dlb_func_idle;
129 addr = DLB2_CM_CFG_PM_STATUS(dlb_version);
130 pm_st_val = DLB2_CSR_RD(&dlb2_dev->hw, addr);
131 addr = DLB2_CM_CFG_DIAGNOSTIC_IDLE_STATUS(dlb_version);
132 idle_val = DLB2_CSR_RD(&dlb2_dev->hw, addr);
133 idle_dlb_func_idle = idle_val &
134 DLB2_CM_CFG_DIAGNOSTIC_IDLE_STATUS_DLB_FUNC_IDLE;
135 pm_st_pmsm = pm_st_val & DLB2_CM_CFG_PM_STATUS_PMSM;
136 if (pm_st_pmsm && idle_dlb_func_idle)
142 if (retries == DLB2_READY_RETRY_LIMIT) {
143 DLB2_LOG_ERR("[%s()] wait for device ready timed out\n",
152 dlb2_probe(struct rte_pci_device *pdev)
154 struct dlb2_dev *dlb2_dev;
158 DLB2_INFO(dlb2_dev, "probe\n");
160 dlb2_dev = rte_malloc("DLB2_PF", sizeof(struct dlb2_dev),
161 RTE_CACHE_LINE_SIZE);
163 if (dlb2_dev == NULL) {
165 goto dlb2_dev_malloc_fail;
168 dlb_version = DLB2_HW_DEVICE_FROM_PCI_ID(pdev);
170 /* PCI Bus driver has already mapped bar space into process.
171 * Save off our IO register and FUNC addresses.
175 if (pdev->mem_resource[0].addr == NULL) {
176 DLB2_ERR(dlb2_dev, "probe: BAR 0 addr (csr_kva) is NULL\n");
178 goto pci_mmap_bad_addr;
180 dlb2_dev->hw.func_kva = (void *)(uintptr_t)pdev->mem_resource[0].addr;
181 dlb2_dev->hw.func_phys_addr = pdev->mem_resource[0].phys_addr;
183 DLB2_INFO(dlb2_dev, "DLB2 FUNC VA=%p, PA=%p, len=%p\n",
184 (void *)dlb2_dev->hw.func_kva,
185 (void *)dlb2_dev->hw.func_phys_addr,
186 (void *)(pdev->mem_resource[0].len));
189 if (pdev->mem_resource[2].addr == NULL) {
190 DLB2_ERR(dlb2_dev, "probe: BAR 2 addr (func_kva) is NULL\n");
192 goto pci_mmap_bad_addr;
194 dlb2_dev->hw.csr_kva = (void *)(uintptr_t)pdev->mem_resource[2].addr;
195 dlb2_dev->hw.csr_phys_addr = pdev->mem_resource[2].phys_addr;
197 DLB2_INFO(dlb2_dev, "DLB2 CSR VA=%p, PA=%p, len=%p\n",
198 (void *)dlb2_dev->hw.csr_kva,
199 (void *)dlb2_dev->hw.csr_phys_addr,
200 (void *)(pdev->mem_resource[2].len));
202 dlb2_dev->pdev = pdev;
204 /* PM enable must be done before any other MMIO accesses, and this
205 * setting is persistent across device reset.
207 dlb2_pf_enable_pm(dlb2_dev);
209 ret = dlb2_pf_wait_for_device_ready(dlb2_dev, dlb_version);
211 goto wait_for_device_ready_fail;
213 ret = dlb2_pf_reset(dlb2_dev);
215 goto dlb2_reset_fail;
217 ret = dlb2_pf_init_driver_state(dlb2_dev);
219 goto init_driver_state_fail;
221 ret = dlb2_resource_init(&dlb2_dev->hw, dlb_version);
223 goto resource_init_fail;
228 dlb2_resource_free(&dlb2_dev->hw);
229 init_driver_state_fail:
232 wait_for_device_ready_fail:
234 dlb2_dev_malloc_fail:
240 dlb2_pf_reset(struct dlb2_dev *dlb2_dev)
248 uint16_t dev_ctl_word;
249 uint16_t dev_ctl2_word;
254 uint16_t rt_ctl_word;
255 uint32_t pri_reqs_dword;
256 uint16_t pri_ctrl_word;
265 uint16_t devsta_busy_word;
266 uint16_t devctl_word;
268 struct rte_pci_device *pdev = dlb2_dev->pdev;
270 /* Save PCI config state */
272 for (i = 0; i < 16; i++) {
273 if (rte_pci_read_config(pdev, &dword[i], 4, i * 4) != 4)
277 pcie_cap_offset = dlb2_pci_find_capability(pdev, DLB2_PCI_CAP_ID_EXP);
279 if (pcie_cap_offset < 0) {
280 DLB2_LOG_ERR("[%s()] failed to find the pcie capability\n",
282 return pcie_cap_offset;
285 off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
286 if (rte_pci_read_config(pdev, &dev_ctl_word, 2, off) != 2)
289 off = pcie_cap_offset + DLB2_PCI_LNKCTL;
290 if (rte_pci_read_config(pdev, &lnk_word, 2, off) != 2)
293 off = pcie_cap_offset + DLB2_PCI_SLTCTL;
294 if (rte_pci_read_config(pdev, &slt_word, 2, off) != 2)
297 off = pcie_cap_offset + DLB2_PCI_RTCTL;
298 if (rte_pci_read_config(pdev, &rt_ctl_word, 2, off) != 2)
301 off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL2;
302 if (rte_pci_read_config(pdev, &dev_ctl2_word, 2, off) != 2)
305 off = pcie_cap_offset + DLB2_PCI_LNKCTL2;
306 if (rte_pci_read_config(pdev, &lnk_word2, 2, off) != 2)
309 off = pcie_cap_offset + DLB2_PCI_SLTCTL2;
310 if (rte_pci_read_config(pdev, &slt_word2, 2, off) != 2)
313 off = DLB2_PCI_EXT_CAP_ID_PRI;
314 pri_cap_offset = rte_pci_find_ext_capability(pdev, off);
316 if (pri_cap_offset >= 0) {
317 off = pri_cap_offset + DLB2_PCI_PRI_ALLOC_REQ;
318 if (rte_pci_read_config(pdev, &pri_reqs_dword, 4, off) != 4)
322 /* clear the PCI command register before issuing the FLR */
326 if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
327 DLB2_LOG_ERR("[%s()] failed to write the pci command\n",
333 for (wait_count = 0; wait_count < 4; wait_count++) {
336 off = pcie_cap_offset + DLB2_PCI_EXP_DEVSTA;
337 ret = rte_pci_read_config(pdev, &devsta_busy_word, 2, off);
339 DLB2_LOG_ERR("[%s()] failed to read the pci device status\n",
344 if (!(devsta_busy_word & DLB2_PCI_EXP_DEVSTA_TRPND))
347 sleep_time = (1 << (wait_count)) * 100;
348 rte_delay_ms(sleep_time);
351 if (wait_count == 4) {
352 DLB2_LOG_ERR("[%s()] wait for pci pending transactions timed out\n",
357 off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
358 ret = rte_pci_read_config(pdev, &devctl_word, 2, off);
360 DLB2_LOG_ERR("[%s()] failed to read the pcie device control\n",
365 devctl_word |= DLB2_PCI_EXP_DEVCTL_BCR_FLR;
367 ret = rte_pci_write_config(pdev, &devctl_word, 2, off);
369 DLB2_LOG_ERR("[%s()] failed to write the pcie device control\n",
376 /* Restore PCI config state */
378 if (pcie_cap_offset >= 0) {
379 off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
380 ret = rte_pci_write_config(pdev, &dev_ctl_word, 2, off);
382 DLB2_LOG_ERR("[%s()] failed to write the pcie device control at offset %d\n",
387 off = pcie_cap_offset + DLB2_PCI_LNKCTL;
388 ret = rte_pci_write_config(pdev, &lnk_word, 2, off);
390 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
395 off = pcie_cap_offset + DLB2_PCI_SLTCTL;
396 ret = rte_pci_write_config(pdev, &slt_word, 2, off);
398 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
403 off = pcie_cap_offset + DLB2_PCI_RTCTL;
404 ret = rte_pci_write_config(pdev, &rt_ctl_word, 2, off);
406 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
411 off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL2;
412 ret = rte_pci_write_config(pdev, &dev_ctl2_word, 2, off);
414 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
419 off = pcie_cap_offset + DLB2_PCI_LNKCTL2;
420 ret = rte_pci_write_config(pdev, &lnk_word2, 2, off);
422 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
427 off = pcie_cap_offset + DLB2_PCI_SLTCTL2;
428 ret = rte_pci_write_config(pdev, &slt_word2, 2, off);
430 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
436 if (pri_cap_offset >= 0) {
437 pri_ctrl_word = DLB2_PCI_PRI_CTRL_ENABLE;
439 off = pri_cap_offset + DLB2_PCI_PRI_ALLOC_REQ;
440 ret = rte_pci_write_config(pdev, &pri_reqs_dword, 4, off);
442 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
447 off = pri_cap_offset + DLB2_PCI_PRI_CTRL;
448 ret = rte_pci_write_config(pdev, &pri_ctrl_word, 2, off);
450 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
456 off = RTE_PCI_EXT_CAP_ID_ERR;
457 err_cap_offset = rte_pci_find_ext_capability(pdev, off);
459 if (err_cap_offset >= 0) {
462 off = err_cap_offset + DLB2_PCI_ERR_ROOT_STATUS;
463 if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
466 ret = rte_pci_write_config(pdev, &tmp, 4, off);
468 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
473 off = err_cap_offset + DLB2_PCI_ERR_COR_STATUS;
474 if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
477 ret = rte_pci_write_config(pdev, &tmp, 4, off);
479 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
484 off = err_cap_offset + DLB2_PCI_ERR_UNCOR_STATUS;
485 if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
488 ret = rte_pci_write_config(pdev, &tmp, 4, off);
490 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
496 for (i = 16; i > 0; i--) {
498 ret = rte_pci_write_config(pdev, &dword[i - 1], 4, off);
500 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
507 if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
508 cmd &= ~DLB2_PCI_COMMAND_INTX_DISABLE;
509 if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
510 DLB2_LOG_ERR("[%s()] failed to write the pci command\n",
516 msix_cap_offset = dlb2_pci_find_capability(pdev,
517 DLB2_PCI_CAP_ID_MSIX);
518 if (msix_cap_offset >= 0) {
519 off = msix_cap_offset + DLB2_PCI_MSIX_FLAGS;
520 if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
521 cmd |= DLB2_PCI_MSIX_FLAGS_ENABLE;
522 cmd |= DLB2_PCI_MSIX_FLAGS_MASKALL;
523 if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
524 DLB2_LOG_ERR("[%s()] failed to write msix flags\n",
530 off = msix_cap_offset + DLB2_PCI_MSIX_FLAGS;
531 if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
532 cmd &= ~DLB2_PCI_MSIX_FLAGS_MASKALL;
533 if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
534 DLB2_LOG_ERR("[%s()] failed to write msix flags\n",
541 off = DLB2_PCI_EXT_CAP_ID_ACS;
542 acs_cap_offset = rte_pci_find_ext_capability(pdev, off);
544 if (acs_cap_offset >= 0) {
545 uint16_t acs_cap, acs_ctrl, acs_mask;
546 off = acs_cap_offset + DLB2_PCI_ACS_CAP;
547 if (rte_pci_read_config(pdev, &acs_cap, 2, off) != 2)
550 off = acs_cap_offset + DLB2_PCI_ACS_CTRL;
551 if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
554 acs_mask = DLB2_PCI_ACS_SV | DLB2_PCI_ACS_RR;
555 acs_mask |= (DLB2_PCI_ACS_CR | DLB2_PCI_ACS_UF);
556 acs_ctrl |= (acs_cap & acs_mask);
558 ret = rte_pci_write_config(pdev, &acs_ctrl, 2, off);
560 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
565 off = acs_cap_offset + DLB2_PCI_ACS_CTRL;
566 if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
569 acs_mask = DLB2_PCI_ACS_RR | DLB2_PCI_ACS_CR;
570 acs_mask |= DLB2_PCI_ACS_EC;
571 acs_ctrl &= ~acs_mask;
573 off = acs_cap_offset + DLB2_PCI_ACS_CTRL;
574 ret = rte_pci_write_config(pdev, &acs_ctrl, 2, off);
576 DLB2_LOG_ERR("[%s()] failed to write the pcie config space at offset %d\n",
586 dlb2_pf_create_sched_domain(struct dlb2_hw *hw,
587 struct dlb2_create_sched_domain_args *args,
588 struct dlb2_cmd_response *resp)
590 return dlb2_hw_create_sched_domain(hw, args, resp, NOT_VF_REQ,
595 dlb2_pf_reset_domain(struct dlb2_hw *hw, u32 id)
597 return dlb2_reset_domain(hw, id, NOT_VF_REQ, PF_ID_ZERO);
601 dlb2_pf_create_ldb_queue(struct dlb2_hw *hw,
603 struct dlb2_create_ldb_queue_args *args,
604 struct dlb2_cmd_response *resp)
606 return dlb2_hw_create_ldb_queue(hw, id, args, resp, NOT_VF_REQ,
611 dlb2_pf_create_ldb_port(struct dlb2_hw *hw,
613 struct dlb2_create_ldb_port_args *args,
614 uintptr_t cq_dma_base,
615 struct dlb2_cmd_response *resp)
617 return dlb2_hw_create_ldb_port(hw, id, args,
625 dlb2_pf_create_dir_port(struct dlb2_hw *hw,
627 struct dlb2_create_dir_port_args *args,
628 uintptr_t cq_dma_base,
629 struct dlb2_cmd_response *resp)
631 return dlb2_hw_create_dir_port(hw, id, args,
639 dlb2_pf_create_dir_queue(struct dlb2_hw *hw,
641 struct dlb2_create_dir_queue_args *args,
642 struct dlb2_cmd_response *resp)
644 return dlb2_hw_create_dir_queue(hw, id, args, resp, NOT_VF_REQ,
649 dlb2_pf_start_domain(struct dlb2_hw *hw,
651 struct dlb2_start_domain_args *args,
652 struct dlb2_cmd_response *resp)
654 return dlb2_hw_start_domain(hw, id, args, resp, NOT_VF_REQ,