event/dlb2: add port setup
[dpdk.git] / drivers / event / dlb2 / pf / dlb2_pf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <stdio.h>
8 #include <sys/mman.h>
9 #include <sys/fcntl.h>
10 #include <sys/time.h>
11 #include <errno.h>
12 #include <assert.h>
13 #include <unistd.h>
14 #include <string.h>
15 #include <rte_debug.h>
16 #include <rte_log.h>
17 #include <rte_dev.h>
18 #include <rte_devargs.h>
19 #include <rte_mbuf.h>
20 #include <rte_ring.h>
21 #include <rte_errno.h>
22 #include <rte_kvargs.h>
23 #include <rte_malloc.h>
24 #include <rte_cycles.h>
25 #include <rte_io.h>
26 #include <rte_pci.h>
27 #include <rte_bus_pci.h>
28 #include <rte_eventdev.h>
29 #include <rte_eventdev_pmd.h>
30 #include <rte_eventdev_pmd_pci.h>
31 #include <rte_memory.h>
32 #include <rte_string_fns.h>
33
34 #include "../dlb2_priv.h"
35 #include "../dlb2_iface.h"
36 #include "../dlb2_inline_fns.h"
37 #include "dlb2_main.h"
38 #include "base/dlb2_hw_types.h"
39 #include "base/dlb2_osdep.h"
40 #include "base/dlb2_resource.h"
41
42 static const char *event_dlb2_pf_name = RTE_STR(EVDEV_DLB2_NAME_PMD);
43
44 static void
45 dlb2_pf_low_level_io_init(void)
46 {
47         int i;
48         /* Addresses will be initialized at port create */
49         for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) {
50                 /* First directed ports */
51                 dlb2_port[i][DLB2_DIR_PORT].pp_addr = NULL;
52                 dlb2_port[i][DLB2_DIR_PORT].cq_base = NULL;
53                 dlb2_port[i][DLB2_DIR_PORT].mmaped = true;
54
55                 /* Now load balanced ports */
56                 dlb2_port[i][DLB2_LDB_PORT].pp_addr = NULL;
57                 dlb2_port[i][DLB2_LDB_PORT].cq_base = NULL;
58                 dlb2_port[i][DLB2_LDB_PORT].mmaped = true;
59         }
60 }
61
62 static int
63 dlb2_pf_open(struct dlb2_hw_dev *handle, const char *name)
64 {
65         RTE_SET_USED(handle);
66         RTE_SET_USED(name);
67
68         return 0;
69 }
70
71 static int
72 dlb2_pf_get_device_version(struct dlb2_hw_dev *handle,
73                            uint8_t *revision)
74 {
75         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
76
77         *revision = dlb2_dev->revision;
78
79         return 0;
80 }
81
82 static void
83 dlb2_pf_hardware_init(struct dlb2_hw_dev *handle)
84 {
85         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
86
87         dlb2_hw_enable_sparse_ldb_cq_mode(&dlb2_dev->hw);
88         dlb2_hw_enable_sparse_dir_cq_mode(&dlb2_dev->hw);
89 }
90
91 static int
92 dlb2_pf_get_num_resources(struct dlb2_hw_dev *handle,
93                           struct dlb2_get_num_resources_args *rsrcs)
94 {
95         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
96
97         return dlb2_hw_get_num_resources(&dlb2_dev->hw, rsrcs, false, 0);
98 }
99
100 static int
101 dlb2_pf_get_cq_poll_mode(struct dlb2_hw_dev *handle,
102                          enum dlb2_cq_poll_modes *mode)
103 {
104         RTE_SET_USED(handle);
105
106         *mode = DLB2_CQ_POLL_MODE_SPARSE;
107
108         return 0;
109 }
110
111 static int
112 dlb2_pf_sched_domain_create(struct dlb2_hw_dev *handle,
113                             struct dlb2_create_sched_domain_args *arg)
114 {
115         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
116         struct dlb2_cmd_response response = {0};
117         int ret;
118
119         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
120
121         if (dlb2_dev->domain_reset_failed) {
122                 response.status = DLB2_ST_DOMAIN_RESET_FAILED;
123                 ret = -EINVAL;
124                 goto done;
125         }
126
127         ret = dlb2_pf_create_sched_domain(&dlb2_dev->hw, arg, &response);
128         if (ret)
129                 goto done;
130
131 done:
132
133         arg->response = response;
134
135         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
136                   __func__, ret);
137
138         return ret;
139 }
140
141 static void
142 dlb2_pf_domain_reset(struct dlb2_eventdev *dlb2)
143 {
144         struct dlb2_dev *dlb2_dev;
145         int ret;
146
147         dlb2_dev = (struct dlb2_dev *)dlb2->qm_instance.pf_dev;
148         ret = dlb2_pf_reset_domain(&dlb2_dev->hw, dlb2->qm_instance.domain_id);
149         if (ret)
150                 DLB2_LOG_ERR("dlb2_pf_reset_domain err %d", ret);
151 }
152
153 static int
154 dlb2_pf_ldb_queue_create(struct dlb2_hw_dev *handle,
155                          struct dlb2_create_ldb_queue_args *cfg)
156 {
157         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
158         struct dlb2_cmd_response response = {0};
159         int ret;
160
161         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
162
163         ret = dlb2_pf_create_ldb_queue(&dlb2_dev->hw,
164                                        handle->domain_id,
165                                        cfg,
166                                        &response);
167
168         cfg->response = response;
169
170         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
171                   __func__, ret);
172
173         return ret;
174 }
175
176 static int
177 dlb2_pf_get_sn_occupancy(struct dlb2_hw_dev *handle,
178                          struct dlb2_get_sn_occupancy_args *args)
179 {
180         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
181         struct dlb2_cmd_response response = {0};
182         int ret;
183
184         ret = dlb2_get_group_sequence_number_occupancy(&dlb2_dev->hw,
185                                                        args->group);
186
187         response.id = ret;
188         response.status = 0;
189
190         args->response = response;
191
192         return ret;
193 }
194
195 static int
196 dlb2_pf_get_sn_allocation(struct dlb2_hw_dev *handle,
197                           struct dlb2_get_sn_allocation_args *args)
198 {
199         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
200         struct dlb2_cmd_response response = {0};
201         int ret;
202
203         ret = dlb2_get_group_sequence_numbers(&dlb2_dev->hw, args->group);
204
205         response.id = ret;
206         response.status = 0;
207
208         args->response = response;
209
210         return ret;
211 }
212
213 static int
214 dlb2_pf_set_sn_allocation(struct dlb2_hw_dev *handle,
215                           struct dlb2_set_sn_allocation_args *args)
216 {
217         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
218         struct dlb2_cmd_response response = {0};
219         int ret;
220
221         ret = dlb2_set_group_sequence_numbers(&dlb2_dev->hw, args->group,
222                                               args->num);
223
224         response.status = 0;
225
226         args->response = response;
227
228         return ret;
229 }
230
231 static void *
232 dlb2_alloc_coherent_aligned(const struct rte_memzone **mz, uintptr_t *phys,
233                             size_t size, int align)
234 {
235         char mz_name[RTE_MEMZONE_NAMESIZE];
236         uint32_t core_id = rte_lcore_id();
237         unsigned int socket_id;
238
239         snprintf(mz_name, sizeof(mz_name) - 1, "event_dlb2_pf_%lx",
240                  (unsigned long)rte_get_timer_cycles());
241         if (core_id == (unsigned int)LCORE_ID_ANY)
242                 core_id = rte_get_main_lcore();
243         socket_id = rte_lcore_to_socket_id(core_id);
244         *mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
245                                          RTE_MEMZONE_IOVA_CONTIG, align);
246         if (*mz == NULL) {
247                 DLB2_LOG_DBG("Unable to allocate DMA memory of size %zu bytes - %s\n",
248                              size, rte_strerror(rte_errno));
249                 *phys = 0;
250                 return NULL;
251         }
252         *phys = (*mz)->iova;
253         return (*mz)->addr;
254 }
255
256 static int
257 dlb2_pf_ldb_port_create(struct dlb2_hw_dev *handle,
258                         struct dlb2_create_ldb_port_args *cfg,
259                         enum dlb2_cq_poll_modes poll_mode)
260 {
261         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
262         struct dlb2_cmd_response response = {0};
263         struct dlb2_port_memory port_memory;
264         int ret, cq_alloc_depth;
265         uint8_t *port_base;
266         const struct rte_memzone *mz;
267         int alloc_sz, qe_sz;
268         phys_addr_t cq_base;
269         phys_addr_t pp_base;
270         int is_dir = false;
271
272         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
273
274         if (poll_mode == DLB2_CQ_POLL_MODE_STD)
275                 qe_sz = sizeof(struct dlb2_dequeue_qe);
276         else
277                 qe_sz = RTE_CACHE_LINE_SIZE;
278
279         /* Calculate the port memory required, and round up to the nearest
280          * cache line.
281          */
282         cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB2_MIN_HARDWARE_CQ_DEPTH);
283         alloc_sz = cq_alloc_depth * qe_sz;
284         alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
285
286         port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
287                                                 PAGE_SIZE);
288         if (port_base == NULL)
289                 return -ENOMEM;
290
291         /* Lock the page in memory */
292         ret = rte_mem_lock_page(port_base);
293         if (ret < 0) {
294                 DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n");
295                 goto create_port_err;
296         }
297
298         memset(port_base, 0, alloc_sz);
299
300         ret = dlb2_pf_create_ldb_port(&dlb2_dev->hw,
301                                       handle->domain_id,
302                                       cfg,
303                                       cq_base,
304                                       &response);
305         if (ret)
306                 goto create_port_err;
307
308         pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
309         dlb2_port[response.id][DLB2_LDB_PORT].pp_addr =
310                 (void *)(pp_base + (PAGE_SIZE * response.id));
311
312         dlb2_port[response.id][DLB2_LDB_PORT].cq_base = (void *)(port_base);
313         memset(&port_memory, 0, sizeof(port_memory));
314
315         dlb2_port[response.id][DLB2_LDB_PORT].mz = mz;
316
317         dlb2_list_init_head(&port_memory.list);
318
319         cfg->response = response;
320
321         return 0;
322
323 create_port_err:
324
325         rte_memzone_free(mz);
326
327         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
328                   __func__, ret);
329         return ret;
330 }
331
332 static int
333 dlb2_pf_dir_port_create(struct dlb2_hw_dev *handle,
334                         struct dlb2_create_dir_port_args *cfg,
335                         enum dlb2_cq_poll_modes poll_mode)
336 {
337         struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
338         struct dlb2_cmd_response response = {0};
339         struct dlb2_port_memory port_memory;
340         int ret;
341         uint8_t *port_base;
342         const struct rte_memzone *mz;
343         int alloc_sz, qe_sz;
344         phys_addr_t cq_base;
345         phys_addr_t pp_base;
346         int is_dir = true;
347
348         DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
349
350         if (poll_mode == DLB2_CQ_POLL_MODE_STD)
351                 qe_sz = sizeof(struct dlb2_dequeue_qe);
352         else
353                 qe_sz = RTE_CACHE_LINE_SIZE;
354
355         /* Calculate the port memory required, and round up to the nearest
356          * cache line.
357          */
358         alloc_sz = cfg->cq_depth * qe_sz;
359         alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
360
361         port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
362                                                 PAGE_SIZE);
363         if (port_base == NULL)
364                 return -ENOMEM;
365
366         /* Lock the page in memory */
367         ret = rte_mem_lock_page(port_base);
368         if (ret < 0) {
369                 DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o\n");
370                 goto create_port_err;
371         }
372
373         memset(port_base, 0, alloc_sz);
374
375         ret = dlb2_pf_create_dir_port(&dlb2_dev->hw,
376                                       handle->domain_id,
377                                       cfg,
378                                       cq_base,
379                                       &response);
380         if (ret)
381                 goto create_port_err;
382
383         pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
384         dlb2_port[response.id][DLB2_DIR_PORT].pp_addr =
385                 (void *)(pp_base + (PAGE_SIZE * response.id));
386
387         dlb2_port[response.id][DLB2_DIR_PORT].cq_base =
388                 (void *)(port_base);
389         memset(&port_memory, 0, sizeof(port_memory));
390
391         dlb2_port[response.id][DLB2_DIR_PORT].mz = mz;
392
393         dlb2_list_init_head(&port_memory.list);
394
395         cfg->response = response;
396
397         return 0;
398
399 create_port_err:
400
401         rte_memzone_free(mz);
402
403         DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
404                   __func__, ret);
405
406         return ret;
407 }
408
409 static void
410 dlb2_pf_iface_fn_ptrs_init(void)
411 {
412         dlb2_iface_low_level_io_init = dlb2_pf_low_level_io_init;
413         dlb2_iface_open = dlb2_pf_open;
414         dlb2_iface_domain_reset = dlb2_pf_domain_reset;
415         dlb2_iface_get_device_version = dlb2_pf_get_device_version;
416         dlb2_iface_hardware_init = dlb2_pf_hardware_init;
417         dlb2_iface_get_num_resources = dlb2_pf_get_num_resources;
418         dlb2_iface_get_cq_poll_mode = dlb2_pf_get_cq_poll_mode;
419         dlb2_iface_sched_domain_create = dlb2_pf_sched_domain_create;
420         dlb2_iface_ldb_queue_create = dlb2_pf_ldb_queue_create;
421         dlb2_iface_ldb_port_create = dlb2_pf_ldb_port_create;
422         dlb2_iface_dir_port_create = dlb2_pf_dir_port_create;
423         dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation;
424         dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation;
425         dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy;
426 }
427
428 /* PCI DEV HOOKS */
429 static int
430 dlb2_eventdev_pci_init(struct rte_eventdev *eventdev)
431 {
432         int ret = 0;
433         struct rte_pci_device *pci_dev;
434         struct dlb2_devargs dlb2_args = {
435                 .socket_id = rte_socket_id(),
436                 .max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
437                 .num_dir_credits_override = -1,
438                 .qid_depth_thresholds = { {0} },
439                 .cos_id = DLB2_COS_DEFAULT
440         };
441         struct dlb2_eventdev *dlb2;
442
443         DLB2_LOG_DBG("Enter with dev_id=%d socket_id=%d",
444                      eventdev->data->dev_id, eventdev->data->socket_id);
445
446         dlb2_pf_iface_fn_ptrs_init();
447
448         pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
449
450         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
451                 dlb2 = dlb2_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
452
453                 /* Probe the DLB2 PF layer */
454                 dlb2->qm_instance.pf_dev = dlb2_probe(pci_dev);
455
456                 if (dlb2->qm_instance.pf_dev == NULL) {
457                         DLB2_LOG_ERR("DLB2 PF Probe failed with error %d\n",
458                                      rte_errno);
459                         ret = -rte_errno;
460                         goto dlb2_probe_failed;
461                 }
462
463                 /* Were we invoked with runtime parameters? */
464                 if (pci_dev->device.devargs) {
465                         ret = dlb2_parse_params(pci_dev->device.devargs->args,
466                                                 pci_dev->device.devargs->name,
467                                                 &dlb2_args);
468                         if (ret) {
469                                 DLB2_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
470                                              ret, rte_errno);
471                                 goto dlb2_probe_failed;
472                         }
473                 }
474
475                 ret = dlb2_primary_eventdev_probe(eventdev,
476                                                   event_dlb2_pf_name,
477                                                   &dlb2_args);
478         } else {
479                 ret = dlb2_secondary_eventdev_probe(eventdev,
480                                                     event_dlb2_pf_name);
481         }
482         if (ret)
483                 goto dlb2_probe_failed;
484
485         DLB2_LOG_INFO("DLB2 PF Probe success\n");
486
487         return 0;
488
489 dlb2_probe_failed:
490
491         DLB2_LOG_INFO("DLB2 PF Probe failed, ret=%d\n", ret);
492
493         return ret;
494 }
495
496 #define EVENTDEV_INTEL_VENDOR_ID 0x8086
497
498 static const struct rte_pci_id pci_id_dlb2_map[] = {
499         {
500                 RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
501                                PCI_DEVICE_ID_INTEL_DLB2_PF)
502         },
503         {
504                 .vendor_id = 0,
505         },
506 };
507
508 static int
509 event_dlb2_pci_probe(struct rte_pci_driver *pci_drv,
510                      struct rte_pci_device *pci_dev)
511 {
512         int ret;
513
514         ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
515                                              sizeof(struct dlb2_eventdev),
516                                              dlb2_eventdev_pci_init,
517                                              event_dlb2_pf_name);
518         if (ret) {
519                 DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
520                                 "ret=%d\n", ret);
521         }
522
523         return ret;
524 }
525
526 static int
527 event_dlb2_pci_remove(struct rte_pci_device *pci_dev)
528 {
529         int ret;
530
531         ret = rte_event_pmd_pci_remove(pci_dev, NULL);
532
533         if (ret) {
534                 DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
535                                 "ret=%d\n", ret);
536         }
537
538         return ret;
539
540 }
541
542 static struct rte_pci_driver pci_eventdev_dlb2_pmd = {
543         .id_table = pci_id_dlb2_map,
544         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
545         .probe = event_dlb2_pci_probe,
546         .remove = event_dlb2_pci_remove,
547 };
548
549 RTE_PMD_REGISTER_PCI(event_dlb2_pf, pci_eventdev_dlb2_pmd);
550 RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_pf, pci_id_dlb2_map);