1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
13 #include <rte_string_fns.h>
14 #include <rte_cycles.h>
17 #include <rte_spinlock.h>
18 #include "../dlb2_main.h"
20 /* TEMPORARY inclusion of both headers for merge */
21 #include "dlb2_resource_new.h"
22 #include "dlb2_resource.h"
24 #include "../../dlb2_log.h"
25 #include "../../dlb2_user.h"
28 #define DLB2_PCI_REG_READ(addr) rte_read32((void *)addr)
29 #define DLB2_PCI_REG_WRITE(reg, value) rte_write32(value, (void *)reg)
31 /* Read/write register 'reg' in the CSR BAR space */
32 #define DLB2_CSR_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->csr_kva + (reg)))
33 #define DLB2_CSR_RD(hw, reg) \
34 DLB2_PCI_REG_READ(DLB2_CSR_REG_ADDR((hw), (reg)))
35 #define DLB2_CSR_WR(hw, reg, value) \
36 DLB2_PCI_REG_WRITE(DLB2_CSR_REG_ADDR((hw), (reg)), (value))
38 /* Read/write register 'reg' in the func BAR space */
39 #define DLB2_FUNC_REG_ADDR(a, reg) ((void *)((uintptr_t)(a)->func_kva + (reg)))
40 #define DLB2_FUNC_RD(hw, reg) \
41 DLB2_PCI_REG_READ(DLB2_FUNC_REG_ADDR((hw), (reg)))
42 #define DLB2_FUNC_WR(hw, reg, value) \
43 DLB2_PCI_REG_WRITE(DLB2_FUNC_REG_ADDR((hw), (reg)), (value))
45 /* Map to PMDs logging interface */
46 #define DLB2_ERR(dev, fmt, args...) \
47 DLB2_LOG_ERR(fmt, ## args)
49 #define DLB2_INFO(dev, fmt, args...) \
50 DLB2_LOG_INFO(fmt, ## args)
52 #define DLB2_DEBUG(dev, fmt, args...) \
53 DLB2_LOG_DBG(fmt, ## args)
56 * os_udelay() - busy-wait for a number of microseconds
57 * @usecs: delay duration.
59 static inline void os_udelay(int usecs)
65 * os_msleep() - sleep for a number of milliseconds
66 * @usecs: delay duration.
68 static inline void os_msleep(int msecs)
73 #define DLB2_PP_BASE(__is_ldb) \
74 ((__is_ldb) ? DLB2_LDB_PP_BASE : DLB2_DIR_PP_BASE)
77 * os_map_producer_port() - map a producer port into the caller's address space
78 * @hw: dlb2_hw handle for a particular device.
80 * @is_ldb: true for load-balanced port, false for a directed port
82 * This function maps the requested producer port memory into the caller's
86 * Returns the base address at which the PP memory was mapped, else NULL.
88 static inline void *os_map_producer_port(struct dlb2_hw *hw,
95 pp_dma_base = (uintptr_t)hw->func_kva + DLB2_PP_BASE(is_ldb);
96 addr = (pp_dma_base + (rte_mem_page_size() * port_id));
98 return (void *)(uintptr_t)addr;
102 * os_unmap_producer_port() - unmap a producer port
103 * @addr: mapped producer port address
105 * This function undoes os_map_producer_port() by unmapping the producer port
106 * memory from the caller's address space.
109 * Returns the base address at which the PP memory was mapped, else NULL.
111 static inline void os_unmap_producer_port(struct dlb2_hw *hw, void *addr)
118 * os_fence_hcw() - fence an HCW to ensure it arrives at the device
119 * @hw: dlb2_hw handle for a particular device.
120 * @pp_addr: producer port address
122 static inline void os_fence_hcw(struct dlb2_hw *hw, u64 *pp_addr)
126 /* To ensure outstanding HCWs reach the device, read the PP address. IA
127 * memory ordering prevents reads from passing older writes, and the
128 * mfence also ensures this.
132 *(volatile u64 *)pp_addr;
136 * DLB2_HW_ERR() - log an error message
137 * @dlb2: dlb2_hw handle for a particular device.
138 * @...: variable string args.
140 #define DLB2_HW_ERR(dlb2, ...) do { \
141 RTE_SET_USED(dlb2); \
142 DLB2_ERR(dlb2, __VA_ARGS__); \
146 * DLB2_HW_DBG() - log an info message
147 * @dlb2: dlb2_hw handle for a particular device.
148 * @...: variable string args.
150 #define DLB2_HW_DBG(dlb2, ...) do { \
151 RTE_SET_USED(dlb2); \
152 DLB2_DEBUG(dlb2, __VA_ARGS__); \
155 /* The callback runs until it completes all outstanding QID->CQ
156 * map and unmap requests. To prevent deadlock, this function gives other
157 * threads a chance to grab the resource mutex and configure hardware.
159 static void *dlb2_complete_queue_map_unmap(void *__args)
161 struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)__args;
165 rte_spinlock_lock(&dlb2_dev->resource_mutex);
167 ret = dlb2_finish_unmap_qid_procedures(&dlb2_dev->hw);
168 ret += dlb2_finish_map_qid_procedures(&dlb2_dev->hw);
171 rte_spinlock_unlock(&dlb2_dev->resource_mutex);
172 /* Relinquish the CPU so the application can process
173 * its CQs, so this function doesn't deadlock.
181 dlb2_dev->worker_launched = false;
183 rte_spinlock_unlock(&dlb2_dev->resource_mutex);
190 * os_schedule_work() - launch a thread to process pending map and unmap work
191 * @hw: dlb2_hw handle for a particular device.
193 * This function launches a kernel thread that will run until all pending
194 * map and unmap procedures are complete.
196 static inline void os_schedule_work(struct dlb2_hw *hw)
198 struct dlb2_dev *dlb2_dev;
199 pthread_t complete_queue_map_unmap_thread;
202 dlb2_dev = container_of(hw, struct dlb2_dev, hw);
204 ret = rte_ctrl_thread_create(&complete_queue_map_unmap_thread,
205 "dlb_queue_unmap_waiter",
207 dlb2_complete_queue_map_unmap,
211 "Could not create queue complete map/unmap thread, err=%d\n",
214 dlb2_dev->worker_launched = true;
218 * os_worker_active() - query whether the map/unmap worker thread is active
219 * @hw: dlb2_hw handle for a particular device.
221 * This function returns a boolean indicating whether a thread (launched by
222 * os_schedule_work()) is active. This function is used to determine
223 * whether or not to launch a worker thread.
225 static inline bool os_worker_active(struct dlb2_hw *hw)
227 struct dlb2_dev *dlb2_dev;
229 dlb2_dev = container_of(hw, struct dlb2_dev, hw);
231 return dlb2_dev->worker_launched;
234 #endif /* __DLB2_OSDEP_H */