4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5 * Copyright (c) 2016 NXP. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 #include <sys/types.h>
44 #include <sys/queue.h>
45 #include <sys/ioctl.h>
48 #include <sys/syscall.h>
51 #include <rte_ethdev.h>
52 #include <rte_malloc.h>
53 #include <rte_memcpy.h>
54 #include <rte_string_fns.h>
55 #include <rte_cycles.h>
56 #include <rte_kvargs.h>
58 #include <rte_ethdev.h>
60 #include <fslmc_logs.h>
61 #include <fslmc_vfio.h>
62 #include "dpaa2_hw_pvt.h"
63 #include "dpaa2_hw_dpio.h"
65 #define NUM_HOST_CPUS RTE_MAX_LCORE
67 struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE];
68 RTE_DEFINE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
70 TAILQ_HEAD(dpio_device_list, dpaa2_dpio_dev);
71 static struct dpio_device_list *dpio_dev_list; /*!< DPIO device list */
72 static uint32_t io_space_count;
74 /*Stashing Macros default for LS208x*/
75 static int dpaa2_core_cluster_base = 0x04;
76 static int dpaa2_cluster_sz = 2;
78 /* For LS208X platform There are four clusters with following mapping:
79 * Cluster 1 (ID = x04) : CPU0, CPU1;
80 * Cluster 2 (ID = x05) : CPU2, CPU3;
81 * Cluster 3 (ID = x06) : CPU4, CPU5;
82 * Cluster 4 (ID = x07) : CPU6, CPU7;
84 /* For LS108X platform There are two clusters with following mapping:
85 * Cluster 1 (ID = x02) : CPU0, CPU1, CPU2, CPU3;
86 * Cluster 2 (ID = x03) : CPU4, CPU5, CPU6, CPU7;
89 /* Set the STASH Destination depending on Current CPU ID.
90 * e.g. Valid values of SDEST are 4,5,6,7. Where,
91 * CPU 0-1 will have SDEST 4
92 * CPU 2-3 will have SDEST 5.....and so on.
95 dpaa2_core_cluster_sdest(int cpu_id)
97 int x = cpu_id / dpaa2_cluster_sz;
102 return dpaa2_core_cluster_base + x;
106 configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
108 struct qbman_swp_desc p_des;
109 struct dpio_attr attr;
111 dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io));
112 if (!dpio_dev->dpio) {
113 PMD_INIT_LOG(ERR, "Memory allocation failure\n");
117 PMD_DRV_LOG(DEBUG, "\t Allocated DPIO Portal[%p]", dpio_dev->dpio);
118 dpio_dev->dpio->regs = dpio_dev->mc_portal;
119 if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id,
121 PMD_INIT_LOG(ERR, "Failed to allocate IO space\n");
122 free(dpio_dev->dpio);
126 if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
127 PMD_INIT_LOG(ERR, "Failed to reset dpio\n");
128 dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
129 free(dpio_dev->dpio);
133 if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
134 PMD_INIT_LOG(ERR, "Failed to Enable dpio\n");
135 dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
136 free(dpio_dev->dpio);
140 if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW,
141 dpio_dev->token, &attr)) {
142 PMD_INIT_LOG(ERR, "DPIO Get attribute failed\n");
143 dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
144 dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
145 free(dpio_dev->dpio);
149 PMD_INIT_LOG(DEBUG, "Qbman Portal ID %d", attr.qbman_portal_id);
150 PMD_INIT_LOG(DEBUG, "Portal CE adr 0x%lX", attr.qbman_portal_ce_offset);
151 PMD_INIT_LOG(DEBUG, "Portal CI adr 0x%lX", attr.qbman_portal_ci_offset);
153 /* Configure & setup SW portal */
155 p_des.idx = attr.qbman_portal_id;
156 p_des.cena_bar = (void *)(dpio_dev->qbman_portal_ce_paddr);
157 p_des.cinh_bar = (void *)(dpio_dev->qbman_portal_ci_paddr);
159 p_des.qman_version = attr.qbman_version;
161 dpio_dev->sw_portal = qbman_swp_init(&p_des);
162 if (dpio_dev->sw_portal == NULL) {
163 PMD_DRV_LOG(ERR, " QBMan SW Portal Init failed\n");
164 dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
165 free(dpio_dev->dpio);
169 PMD_INIT_LOG(DEBUG, "QBMan SW Portal 0x%p\n", dpio_dev->sw_portal);
175 dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev)
180 /* Set the Stashing Destination */
181 cpu_id = rte_lcore_id();
183 cpu_id = rte_get_master_lcore();
185 RTE_LOG(ERR, PMD, "\tGetting CPU Index failed\n");
189 /* Set the STASH Destination depending on Current CPU ID.
190 * Valid values of SDEST are 4,5,6,7. Where,
191 * CPU 0-1 will have SDEST 4
192 * CPU 2-3 will have SDEST 5.....and so on.
195 sdest = dpaa2_core_cluster_sdest(cpu_id);
196 PMD_DRV_LOG(DEBUG, "Portal= %d CPU= %u SDEST= %d",
197 dpio_dev->index, cpu_id, sdest);
199 ret = dpio_set_stashing_destination(dpio_dev->dpio, CMD_PRI_LOW,
200 dpio_dev->token, sdest);
202 PMD_DRV_LOG(ERR, "%d ERROR in SDEST\n", ret);
209 static inline struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(void)
211 struct dpaa2_dpio_dev *dpio_dev = NULL;
214 /* Get DPIO dev handle from list using index */
215 TAILQ_FOREACH(dpio_dev, dpio_dev_list, next) {
216 if (dpio_dev && rte_atomic16_test_and_set(&dpio_dev->ref_count))
222 PMD_DRV_LOG(DEBUG, "New Portal=0x%x (%d) affined thread - %lu",
223 dpio_dev, dpio_dev->index, syscall(SYS_gettid));
225 ret = dpaa2_configure_stashing(dpio_dev);
227 PMD_DRV_LOG(ERR, "dpaa2_configure_stashing failed");
233 dpaa2_affine_qbman_swp(void)
235 unsigned int lcore_id = rte_lcore_id();
236 uint64_t tid = syscall(SYS_gettid);
238 if (lcore_id == LCORE_ID_ANY)
239 lcore_id = rte_get_master_lcore();
240 /* if the core id is not supported */
241 else if (lcore_id >= RTE_MAX_LCORE)
244 if (dpaa2_io_portal[lcore_id].dpio_dev) {
245 PMD_DRV_LOG(INFO, "DPAA Portal=0x%x (%d) is being shared"
246 " between thread %lu and current %lu",
247 dpaa2_io_portal[lcore_id].dpio_dev,
248 dpaa2_io_portal[lcore_id].dpio_dev->index,
249 dpaa2_io_portal[lcore_id].net_tid,
251 RTE_PER_LCORE(_dpaa2_io).dpio_dev
252 = dpaa2_io_portal[lcore_id].dpio_dev;
253 rte_atomic16_inc(&dpaa2_io_portal
254 [lcore_id].dpio_dev->ref_count);
255 dpaa2_io_portal[lcore_id].net_tid = tid;
257 PMD_DRV_LOG(DEBUG, "Old Portal=0x%x (%d) affined thread - %lu",
258 dpaa2_io_portal[lcore_id].dpio_dev,
259 dpaa2_io_portal[lcore_id].dpio_dev->index,
264 /* Populate the dpaa2_io_portal structure */
265 dpaa2_io_portal[lcore_id].dpio_dev = dpaa2_get_qbman_swp();
267 if (dpaa2_io_portal[lcore_id].dpio_dev) {
268 RTE_PER_LCORE(_dpaa2_io).dpio_dev
269 = dpaa2_io_portal[lcore_id].dpio_dev;
270 dpaa2_io_portal[lcore_id].net_tid = tid;
279 dpaa2_affine_qbman_swp_sec(void)
281 unsigned int lcore_id = rte_lcore_id();
282 uint64_t tid = syscall(SYS_gettid);
284 if (lcore_id == LCORE_ID_ANY)
285 lcore_id = rte_get_master_lcore();
286 /* if the core id is not supported */
287 else if (lcore_id >= RTE_MAX_LCORE)
290 if (dpaa2_io_portal[lcore_id].sec_dpio_dev) {
291 PMD_DRV_LOG(INFO, "DPAA Portal=0x%x (%d) is being shared"
292 " between thread %lu and current %lu",
293 dpaa2_io_portal[lcore_id].sec_dpio_dev,
294 dpaa2_io_portal[lcore_id].sec_dpio_dev->index,
295 dpaa2_io_portal[lcore_id].sec_tid,
297 RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev
298 = dpaa2_io_portal[lcore_id].sec_dpio_dev;
299 rte_atomic16_inc(&dpaa2_io_portal
300 [lcore_id].sec_dpio_dev->ref_count);
301 dpaa2_io_portal[lcore_id].sec_tid = tid;
303 PMD_DRV_LOG(DEBUG, "Old Portal=0x%x (%d) affined thread - %lu",
304 dpaa2_io_portal[lcore_id].sec_dpio_dev,
305 dpaa2_io_portal[lcore_id].sec_dpio_dev->index,
310 /* Populate the dpaa2_io_portal structure */
311 dpaa2_io_portal[lcore_id].sec_dpio_dev = dpaa2_get_qbman_swp();
313 if (dpaa2_io_portal[lcore_id].sec_dpio_dev) {
314 RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev
315 = dpaa2_io_portal[lcore_id].sec_dpio_dev;
316 dpaa2_io_portal[lcore_id].sec_tid = tid;
324 dpaa2_create_dpio_device(struct fslmc_vfio_device *vdev,
325 struct vfio_device_info *obj_info,
328 struct dpaa2_dpio_dev *dpio_dev;
329 struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)};
331 if (obj_info->num_regions < NUM_DPIO_REGIONS) {
332 PMD_INIT_LOG(ERR, "ERROR, Not sufficient number "
333 "of DPIO regions.\n");
337 if (!dpio_dev_list) {
338 dpio_dev_list = malloc(sizeof(struct dpio_device_list));
339 if (!dpio_dev_list) {
340 PMD_INIT_LOG(ERR, "Memory alloc failed in DPIO list\n");
344 /* Initialize the DPIO List */
345 TAILQ_INIT(dpio_dev_list);
348 dpio_dev = malloc(sizeof(struct dpaa2_dpio_dev));
350 PMD_INIT_LOG(ERR, "Memory allocation failed for DPIO Device\n");
354 PMD_DRV_LOG(INFO, "\t Aloocated DPIO [%p]", dpio_dev);
355 dpio_dev->dpio = NULL;
356 dpio_dev->hw_id = object_id;
357 dpio_dev->vfio_fd = vdev->fd;
358 rte_atomic16_init(&dpio_dev->ref_count);
359 /* Using single portal for all devices */
360 dpio_dev->mc_portal = rte_mcp_ptr_list[MC_PORTAL_INDEX];
363 if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) {
364 PMD_INIT_LOG(ERR, "vfio: error getting region info\n");
369 PMD_DRV_LOG(DEBUG, "\t Region Offset = %llx", reg_info.offset);
370 PMD_DRV_LOG(DEBUG, "\t Region Size = %llx", reg_info.size);
371 dpio_dev->ce_size = reg_info.size;
372 dpio_dev->qbman_portal_ce_paddr = (uint64_t)mmap(NULL, reg_info.size,
373 PROT_WRITE | PROT_READ, MAP_SHARED,
374 dpio_dev->vfio_fd, reg_info.offset);
376 /* Create Mapping for QBMan Cache Enabled area. This is a fix for
377 * SMMU fault for DQRR statshing transaction.
379 if (vfio_dmamap_mem_region(dpio_dev->qbman_portal_ce_paddr,
380 reg_info.offset, reg_info.size)) {
381 PMD_INIT_LOG(ERR, "DMAMAP for Portal CE area failed.\n");
387 if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) {
388 PMD_INIT_LOG(ERR, "vfio: error getting region info\n");
393 PMD_DRV_LOG(DEBUG, "\t Region Offset = %llx", reg_info.offset);
394 PMD_DRV_LOG(DEBUG, "\t Region Size = %llx", reg_info.size);
395 dpio_dev->ci_size = reg_info.size;
396 dpio_dev->qbman_portal_ci_paddr = (uint64_t)mmap(NULL, reg_info.size,
397 PROT_WRITE | PROT_READ, MAP_SHARED,
398 dpio_dev->vfio_fd, reg_info.offset);
400 if (configure_dpio_qbman_swp(dpio_dev)) {
402 "Fail to configure the dpio qbman portal for %d\n",
409 dpio_dev->index = io_space_count;
410 TAILQ_INSERT_HEAD(dpio_dev_list, dpio_dev, next);
416 dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage)
420 for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
421 if (q_storage->dq_storage[i])
422 rte_free(q_storage->dq_storage[i]);
427 dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
431 for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
432 q_storage->dq_storage[i] = rte_malloc(NULL,
433 DPAA2_DQRR_RING_SIZE * sizeof(struct qbman_result),
434 RTE_CACHE_LINE_SIZE);
435 if (!q_storage->dq_storage[i])
442 rte_free(q_storage->dq_storage[i]);