1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2018 NXP
17 #include <sys/types.h>
18 #include <sys/queue.h>
19 #include <sys/ioctl.h>
22 #include <sys/syscall.h>
23 #include <sys/epoll.h>
24 #include<sys/eventfd.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_malloc.h>
29 #include <rte_memcpy.h>
30 #include <rte_string_fns.h>
31 #include <rte_cycles.h>
32 #include <rte_kvargs.h>
35 #include <fslmc_logs.h>
36 #include <rte_fslmc.h>
37 #include "dpaa2_hw_pvt.h"
38 #include "dpaa2_hw_dpio.h"
39 #include <mc/fsl_dpmng.h>
41 #define NUM_HOST_CPUS RTE_MAX_LCORE
43 struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE];
44 RTE_DEFINE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
46 struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
48 TAILQ_HEAD(dpio_dev_list, dpaa2_dpio_dev);
49 static struct dpio_dev_list dpio_dev_list
50 = TAILQ_HEAD_INITIALIZER(dpio_dev_list); /*!< DPIO device list */
51 static uint32_t io_space_count;
53 /* Variable to store DPAA2 platform type */
54 uint32_t dpaa2_svr_family;
56 /*Stashing Macros default for LS208x*/
57 static int dpaa2_core_cluster_base = 0x04;
58 static int dpaa2_cluster_sz = 2;
60 /* For LS208X platform There are four clusters with following mapping:
61 * Cluster 1 (ID = x04) : CPU0, CPU1;
62 * Cluster 2 (ID = x05) : CPU2, CPU3;
63 * Cluster 3 (ID = x06) : CPU4, CPU5;
64 * Cluster 4 (ID = x07) : CPU6, CPU7;
66 /* For LS108X platform There are two clusters with following mapping:
67 * Cluster 1 (ID = x02) : CPU0, CPU1, CPU2, CPU3;
68 * Cluster 2 (ID = x03) : CPU4, CPU5, CPU6, CPU7;
70 /* For LX2160 platform There are four clusters with following mapping:
71 * Cluster 1 (ID = x00) : CPU0, CPU1;
72 * Cluster 2 (ID = x01) : CPU2, CPU3;
73 * Cluster 3 (ID = x02) : CPU4, CPU5;
74 * Cluster 4 (ID = x03) : CPU6, CPU7;
75 * Cluster 1 (ID = x04) : CPU8, CPU9;
76 * Cluster 2 (ID = x05) : CPU10, CP11;
77 * Cluster 3 (ID = x06) : CPU12, CPU13;
78 * Cluster 4 (ID = x07) : CPU14, CPU15;
82 dpaa2_core_cluster_sdest(int cpu_id)
84 int x = cpu_id / dpaa2_cluster_sz;
86 return dpaa2_core_cluster_base + x;
89 #ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV
90 static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id)
93 #define COMMAND_LEN 50
94 uint32_t cpu_mask = 1;
97 char *temp = NULL, *token = NULL;
98 char string[STRING_LEN], command[COMMAND_LEN];
101 snprintf(string, STRING_LEN, "dpio.%d", dpio_id);
102 file = fopen("/proc/interrupts", "r");
104 DPAA2_BUS_WARN("Failed to open /proc/interrupts file");
107 while (getline(&temp, &len, file) != -1) {
108 if ((strstr(temp, string)) != NULL) {
109 token = strtok(temp, ":");
115 DPAA2_BUS_WARN("Failed to get interrupt id for dpio.%d",
123 cpu_mask = cpu_mask << rte_lcore_id();
124 snprintf(command, COMMAND_LEN, "echo %X > /proc/irq/%s/smp_affinity",
126 ret = system(command);
129 "Failed to affine interrupts on respective core");
131 DPAA2_BUS_DEBUG(" %s command is executed", command);
137 static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)
139 struct epoll_event epoll_ev;
140 int eventfd, dpio_epoll_fd, ret;
141 int threshold = 0x3, timeout = 0xFF;
143 dpio_epoll_fd = epoll_create(1);
144 ret = rte_dpaa2_intr_enable(&dpio_dev->intr_handle, 0);
146 DPAA2_BUS_ERR("Interrupt registeration failed");
150 if (getenv("DPAA2_PORTAL_INTR_THRESHOLD"))
151 threshold = atoi(getenv("DPAA2_PORTAL_INTR_THRESHOLD"));
153 if (getenv("DPAA2_PORTAL_INTR_TIMEOUT"))
154 sscanf(getenv("DPAA2_PORTAL_INTR_TIMEOUT"), "%x", &timeout);
156 qbman_swp_interrupt_set_trigger(dpio_dev->sw_portal,
157 QBMAN_SWP_INTERRUPT_DQRI);
158 qbman_swp_interrupt_clear_status(dpio_dev->sw_portal, 0xffffffff);
159 qbman_swp_interrupt_set_inhibit(dpio_dev->sw_portal, 0);
160 qbman_swp_dqrr_thrshld_write(dpio_dev->sw_portal, threshold);
161 qbman_swp_intr_timeout_write(dpio_dev->sw_portal, timeout);
163 eventfd = dpio_dev->intr_handle.fd;
164 epoll_ev.events = EPOLLIN | EPOLLPRI | EPOLLET;
165 epoll_ev.data.fd = eventfd;
167 ret = epoll_ctl(dpio_epoll_fd, EPOLL_CTL_ADD, eventfd, &epoll_ev);
169 DPAA2_BUS_ERR("epoll_ctl failed");
172 dpio_dev->epoll_fd = dpio_epoll_fd;
174 dpaa2_affine_dpio_intr_to_respective_core(dpio_dev->hw_id);
181 dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id)
185 /* Set the Stashing Destination */
187 cpu_id = rte_get_master_lcore();
189 DPAA2_BUS_ERR("Getting CPU Index failed");
193 /* Set the STASH Destination depending on Current CPU ID.
194 * Valid values of SDEST are 4,5,6,7. Where,
197 sdest = dpaa2_core_cluster_sdest(cpu_id);
198 DPAA2_BUS_DEBUG("Portal= %d CPU= %u SDEST= %d",
199 dpio_dev->index, cpu_id, sdest);
201 ret = dpio_set_stashing_destination(dpio_dev->dpio, CMD_PRI_LOW,
202 dpio_dev->token, sdest);
204 DPAA2_BUS_ERR("%d ERROR in SDEST", ret);
208 #ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV
209 if (dpaa2_dpio_intr_init(dpio_dev)) {
210 DPAA2_BUS_ERR("Interrupt registration failed for dpio");
218 struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id)
220 struct dpaa2_dpio_dev *dpio_dev = NULL;
223 /* Get DPIO dev handle from list using index */
224 TAILQ_FOREACH(dpio_dev, &dpio_dev_list, next) {
225 if (dpio_dev && rte_atomic16_test_and_set(&dpio_dev->ref_count))
231 DPAA2_BUS_DEBUG("New Portal %p (%d) affined thread - %lu",
232 dpio_dev, dpio_dev->index, syscall(SYS_gettid));
234 ret = dpaa2_configure_stashing(dpio_dev, cpu_id);
236 DPAA2_BUS_ERR("dpaa2_configure_stashing failed");
242 dpaa2_affine_qbman_swp(void)
244 unsigned int lcore_id = rte_lcore_id();
245 uint64_t tid = syscall(SYS_gettid);
247 if (lcore_id == LCORE_ID_ANY)
248 lcore_id = rte_get_master_lcore();
249 /* if the core id is not supported */
250 else if (lcore_id >= RTE_MAX_LCORE)
253 if (dpaa2_io_portal[lcore_id].dpio_dev) {
254 DPAA2_BUS_DP_INFO("DPAA Portal=%p (%d) is being shared"
255 " between thread %" PRIu64 " and current "
257 dpaa2_io_portal[lcore_id].dpio_dev,
258 dpaa2_io_portal[lcore_id].dpio_dev->index,
259 dpaa2_io_portal[lcore_id].net_tid,
261 RTE_PER_LCORE(_dpaa2_io).dpio_dev
262 = dpaa2_io_portal[lcore_id].dpio_dev;
263 rte_atomic16_inc(&dpaa2_io_portal
264 [lcore_id].dpio_dev->ref_count);
265 dpaa2_io_portal[lcore_id].net_tid = tid;
267 DPAA2_BUS_DP_DEBUG("Old Portal=%p (%d) affined thread - "
269 dpaa2_io_portal[lcore_id].dpio_dev,
270 dpaa2_io_portal[lcore_id].dpio_dev->index,
275 /* Populate the dpaa2_io_portal structure */
276 dpaa2_io_portal[lcore_id].dpio_dev = dpaa2_get_qbman_swp(lcore_id);
278 if (dpaa2_io_portal[lcore_id].dpio_dev) {
279 RTE_PER_LCORE(_dpaa2_io).dpio_dev
280 = dpaa2_io_portal[lcore_id].dpio_dev;
281 dpaa2_io_portal[lcore_id].net_tid = tid;
290 dpaa2_affine_qbman_ethrx_swp(void)
292 unsigned int lcore_id = rte_lcore_id();
293 uint64_t tid = syscall(SYS_gettid);
295 if (lcore_id == LCORE_ID_ANY)
296 lcore_id = rte_get_master_lcore();
297 /* if the core id is not supported */
298 else if (lcore_id >= RTE_MAX_LCORE)
301 if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) {
303 "DPAA Portal=%p (%d) is being shared between thread"
304 " %" PRIu64 " and current %" PRIu64 "\n",
305 dpaa2_io_portal[lcore_id].ethrx_dpio_dev,
306 dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index,
307 dpaa2_io_portal[lcore_id].sec_tid,
309 RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev
310 = dpaa2_io_portal[lcore_id].ethrx_dpio_dev;
311 rte_atomic16_inc(&dpaa2_io_portal
312 [lcore_id].ethrx_dpio_dev->ref_count);
313 dpaa2_io_portal[lcore_id].sec_tid = tid;
316 "Old Portal=%p (%d) affined thread"
318 dpaa2_io_portal[lcore_id].ethrx_dpio_dev,
319 dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index,
324 /* Populate the dpaa2_io_portal structure */
325 dpaa2_io_portal[lcore_id].ethrx_dpio_dev =
326 dpaa2_get_qbman_swp(lcore_id);
328 if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) {
329 RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev
330 = dpaa2_io_portal[lcore_id].ethrx_dpio_dev;
331 dpaa2_io_portal[lcore_id].sec_tid = tid;
339 dpaa2_create_dpio_device(int vdev_fd,
340 struct vfio_device_info *obj_info,
343 struct dpaa2_dpio_dev *dpio_dev = NULL;
344 struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)};
345 struct qbman_swp_desc p_des;
346 struct dpio_attr attr;
348 if (obj_info->num_regions < NUM_DPIO_REGIONS) {
349 DPAA2_BUS_ERR("Not sufficient number of DPIO regions");
353 dpio_dev = rte_zmalloc(NULL, sizeof(struct dpaa2_dpio_dev),
354 RTE_CACHE_LINE_SIZE);
356 DPAA2_BUS_ERR("Memory allocation failed for DPIO Device");
360 dpio_dev->dpio = NULL;
361 dpio_dev->hw_id = object_id;
362 rte_atomic16_init(&dpio_dev->ref_count);
363 /* Using single portal for all devices */
364 dpio_dev->mc_portal = rte_mcp_ptr_list[MC_PORTAL_INDEX];
366 dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io));
367 if (!dpio_dev->dpio) {
368 DPAA2_BUS_ERR("Memory allocation failure");
372 dpio_dev->dpio->regs = dpio_dev->mc_portal;
373 if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id,
375 DPAA2_BUS_ERR("Failed to allocate IO space");
379 if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
380 DPAA2_BUS_ERR("Failed to reset dpio");
384 if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
385 DPAA2_BUS_ERR("Failed to Enable dpio");
389 if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW,
390 dpio_dev->token, &attr)) {
391 DPAA2_BUS_ERR("DPIO Get attribute failed");
395 /* find the SoC type for the first time */
396 if (!dpaa2_svr_family) {
397 struct mc_soc_version mc_plat_info = {0};
399 if (mc_get_soc_version(dpio_dev->dpio,
400 CMD_PRI_LOW, &mc_plat_info)) {
401 DPAA2_BUS_ERR("Unable to get SoC version information");
402 } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LS1080A) {
403 dpaa2_core_cluster_base = 0x02;
404 dpaa2_cluster_sz = 4;
405 DPAA2_BUS_DEBUG("LS108x (A53) Platform Detected");
406 } else if ((mc_plat_info.svr & 0xffff0000) == SVR_LX2160A) {
407 dpaa2_core_cluster_base = 0x00;
408 dpaa2_cluster_sz = 2;
409 DPAA2_BUS_DEBUG("LX2160 Platform Detected");
411 dpaa2_svr_family = (mc_plat_info.svr & 0xffff0000);
414 if (dpaa2_svr_family == SVR_LX2160A)
415 reg_info.index = DPAA2_SWP_CENA_MEM_REGION;
417 reg_info.index = DPAA2_SWP_CENA_REGION;
419 if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) {
420 DPAA2_BUS_ERR("vfio: error getting region info");
424 dpio_dev->ce_size = reg_info.size;
425 dpio_dev->qbman_portal_ce_paddr = (size_t)mmap(NULL, reg_info.size,
426 PROT_WRITE | PROT_READ, MAP_SHARED,
427 vdev_fd, reg_info.offset);
429 reg_info.index = DPAA2_SWP_CINH_REGION;
430 if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, ®_info)) {
431 DPAA2_BUS_ERR("vfio: error getting region info");
435 dpio_dev->ci_size = reg_info.size;
436 dpio_dev->qbman_portal_ci_paddr = (size_t)mmap(NULL, reg_info.size,
437 PROT_WRITE | PROT_READ, MAP_SHARED,
438 vdev_fd, reg_info.offset);
440 /* Configure & setup SW portal */
442 p_des.idx = attr.qbman_portal_id;
443 p_des.cena_bar = (void *)(dpio_dev->qbman_portal_ce_paddr);
444 p_des.cinh_bar = (void *)(dpio_dev->qbman_portal_ci_paddr);
446 p_des.qman_version = attr.qbman_version;
448 dpio_dev->sw_portal = qbman_swp_init(&p_des);
449 if (dpio_dev->sw_portal == NULL) {
450 DPAA2_BUS_ERR("QBMan SW Portal Init failed");
455 dpio_dev->index = io_space_count;
457 if (rte_dpaa2_vfio_setup_intr(&dpio_dev->intr_handle, vdev_fd, 1)) {
458 DPAA2_BUS_ERR("Fail to setup interrupt for %d",
463 TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next);
468 if (dpio_dev->dpio) {
469 dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
470 dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
471 free(dpio_dev->dpio);
478 dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage)
482 for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
483 if (q_storage->dq_storage[i])
484 rte_free(q_storage->dq_storage[i]);
489 dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
493 for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
494 q_storage->dq_storage[i] = rte_malloc(NULL,
495 DPAA2_DQRR_RING_SIZE * sizeof(struct qbman_result),
496 RTE_CACHE_LINE_SIZE);
497 if (!q_storage->dq_storage[i])
503 rte_free(q_storage->dq_storage[i]);
508 static struct rte_dpaa2_object rte_dpaa2_dpio_obj = {
509 .dev_type = DPAA2_IO,
510 .create = dpaa2_create_dpio_device,
513 RTE_PMD_REGISTER_DPAA2_OBJECT(dpio, rte_dpaa2_dpio_obj);