5e1427126dfb4e9808d0084fdb7835d307bcf294
[dpdk.git] / drivers / event / dlb / pf / dlb_pf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <stdio.h>
8 #include <sys/mman.h>
9 #include <sys/fcntl.h>
10 #include <sys/time.h>
11 #include <errno.h>
12 #include <assert.h>
13 #include <unistd.h>
14 #include <string.h>
15 #include <rte_debug.h>
16 #include <rte_log.h>
17 #include <rte_dev.h>
18 #include <rte_devargs.h>
19 #include <rte_mbuf.h>
20 #include <rte_ring.h>
21 #include <rte_errno.h>
22 #include <rte_kvargs.h>
23 #include <rte_malloc.h>
24 #include <rte_cycles.h>
25 #include <rte_io.h>
26 #include <rte_memory.h>
27 #include <rte_string_fns.h>
28
29 #include "../dlb_priv.h"
30 #include "../dlb_iface.h"
31 #include "../dlb_inline_fns.h"
32 #include "dlb_main.h"
33 #include "base/dlb_hw_types.h"
34 #include "base/dlb_osdep.h"
35 #include "base/dlb_resource.h"
36
37 static void
38 dlb_pf_low_level_io_init(struct dlb_eventdev *dlb __rte_unused)
39 {
40         int i;
41
42         /* Addresses will be initialized at port create */
43         for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
44                 /* First directed ports */
45
46                 /* producer port */
47                 dlb_port[i][DLB_DIR].pp_addr = NULL;
48
49                 /* popcount */
50                 dlb_port[i][DLB_DIR].ldb_popcount = NULL;
51                 dlb_port[i][DLB_DIR].dir_popcount = NULL;
52
53                 /* consumer queue */
54                 dlb_port[i][DLB_DIR].cq_base = NULL;
55                 dlb_port[i][DLB_DIR].mmaped = true;
56
57                 /* Now load balanced ports */
58
59                 /* producer port */
60                 dlb_port[i][DLB_LDB].pp_addr = NULL;
61
62                 /* popcount */
63                 dlb_port[i][DLB_LDB].ldb_popcount = NULL;
64                 dlb_port[i][DLB_LDB].dir_popcount = NULL;
65
66                 /* consumer queue */
67                 dlb_port[i][DLB_LDB].cq_base = NULL;
68                 dlb_port[i][DLB_LDB].mmaped = true;
69         }
70 }
71
72 static int
73 dlb_pf_open(struct dlb_hw_dev *handle, const char *name)
74 {
75         RTE_SET_USED(handle);
76         RTE_SET_USED(name);
77
78         return 0;
79 }
80
81 static void
82 dlb_pf_domain_close(struct dlb_eventdev *dlb)
83 {
84         struct dlb_dev *dlb_dev = (struct dlb_dev *)dlb->qm_instance.pf_dev;
85         int ret;
86
87         ret = dlb_reset_domain(&dlb_dev->hw, dlb->qm_instance.domain_id);
88         if (ret)
89                 DLB_LOG_ERR("dlb_pf_reset_domain err %d", ret);
90 }
91
92 static int
93 dlb_pf_get_device_version(struct dlb_hw_dev *handle,
94                           uint8_t *revision)
95 {
96         struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
97
98         *revision = dlb_dev->revision;
99
100         return 0;
101 }
102
103 static int
104 dlb_pf_get_num_resources(struct dlb_hw_dev *handle,
105                          struct dlb_get_num_resources_args *rsrcs)
106 {
107         struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
108
109         dlb_hw_get_num_resources(&dlb_dev->hw, rsrcs);
110
111         return 0;
112 }
113
114 static int
115 dlb_pf_sched_domain_create(struct dlb_hw_dev *handle,
116                            struct dlb_create_sched_domain_args *arg)
117 {
118         struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
119         struct dlb_cmd_response response = {0};
120         int ret;
121
122         DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
123
124         if (dlb_dev->domain_reset_failed) {
125                 response.status = DLB_ST_DOMAIN_RESET_FAILED;
126                 ret = -EINVAL;
127                 goto done;
128         }
129
130         ret = dlb_hw_create_sched_domain(&dlb_dev->hw, arg, &response);
131         if (ret)
132                 goto done;
133
134 done:
135
136         *(struct dlb_cmd_response *)arg->response = response;
137
138         DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
139
140         return ret;
141 }
142
143 static int
144 dlb_pf_ldb_credit_pool_create(struct dlb_hw_dev *handle,
145                               struct dlb_create_ldb_pool_args *cfg)
146 {
147         struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
148         struct dlb_cmd_response response = {0};
149         int ret;
150
151         DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
152
153         ret = dlb_hw_create_ldb_pool(&dlb_dev->hw,
154                                      handle->domain_id,
155                                      cfg,
156                                      &response);
157
158         *(struct dlb_cmd_response *)cfg->response = response;
159
160         DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
161
162         return ret;
163 }
164
165 static int
166 dlb_pf_dir_credit_pool_create(struct dlb_hw_dev *handle,
167                               struct dlb_create_dir_pool_args *cfg)
168 {
169         struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
170         struct dlb_cmd_response response = {0};
171         int ret;
172
173         DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
174
175         ret = dlb_hw_create_dir_pool(&dlb_dev->hw,
176                                      handle->domain_id,
177                                      cfg,
178                                      &response);
179
180         *(struct dlb_cmd_response *)cfg->response = response;
181
182         DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
183
184         return ret;
185 }
186
187 static int
188 dlb_pf_get_cq_poll_mode(struct dlb_hw_dev *handle,
189                         enum dlb_cq_poll_modes *mode)
190 {
191         struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
192
193         if (dlb_dev->revision >= DLB_REV_B0)
194                 *mode = DLB_CQ_POLL_MODE_SPARSE;
195         else
196                 *mode = DLB_CQ_POLL_MODE_STD;
197
198         return 0;
199 }
200
201 static int
202 dlb_pf_ldb_queue_create(struct dlb_hw_dev *handle,
203                         struct dlb_create_ldb_queue_args *cfg)
204 {
205         struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
206         struct dlb_cmd_response response = {0};
207         int ret;
208
209         DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
210
211         ret = dlb_hw_create_ldb_queue(&dlb_dev->hw,
212                                       handle->domain_id,
213                                       cfg,
214                                       &response);
215
216         *(struct dlb_cmd_response *)cfg->response = response;
217
218         DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
219
220         return ret;
221 }
222
223 static int
224 dlb_pf_dir_queue_create(struct dlb_hw_dev *handle,
225                         struct dlb_create_dir_queue_args *cfg)
226 {
227         struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
228         struct dlb_cmd_response response = {0};
229         int ret;
230
231         DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
232
233         ret = dlb_hw_create_dir_queue(&dlb_dev->hw,
234                                       handle->domain_id,
235                                       cfg,
236                                       &response);
237
238         *(struct dlb_cmd_response *)cfg->response = response;
239
240         DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
241
242         return ret;
243 }
244
245 static void *
246 dlb_alloc_coherent_aligned(const struct rte_memzone **mz, rte_iova_t *phys,
247                            size_t size, int align)
248 {
249         char mz_name[RTE_MEMZONE_NAMESIZE];
250         uint32_t core_id = rte_lcore_id();
251         unsigned int socket_id;
252
253         snprintf(mz_name, sizeof(mz_name) - 1, "event_dlb_port_mem_%lx",
254                  (unsigned long)rte_get_timer_cycles());
255         if (core_id == (unsigned int)LCORE_ID_ANY)
256                 core_id = rte_get_main_lcore();
257         socket_id = rte_lcore_to_socket_id(core_id);
258         *mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
259                                          RTE_MEMZONE_IOVA_CONTIG, align);
260         if (*mz == NULL) {
261                 DLB_LOG_ERR("Unable to allocate DMA memory of size %zu bytes\n",
262                             size);
263                 *phys = 0;
264                 return NULL;
265         }
266         *phys = (*mz)->iova;
267         return (*mz)->addr;
268 }
269
270 static int
271 dlb_pf_ldb_port_create(struct dlb_hw_dev *handle,
272                        struct dlb_create_ldb_port_args *cfg,
273                        enum dlb_cq_poll_modes poll_mode)
274 {
275         struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
276         struct dlb_cmd_response response = {0};
277         int ret;
278         uint8_t *port_base;
279         const struct rte_memzone *mz;
280         int alloc_sz, qe_sz, cq_alloc_depth;
281         rte_iova_t pp_dma_base;
282         rte_iova_t pc_dma_base;
283         rte_iova_t cq_dma_base;
284         int is_dir = false;
285
286         DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
287
288         if (poll_mode == DLB_CQ_POLL_MODE_STD)
289                 qe_sz = sizeof(struct dlb_dequeue_qe);
290         else
291                 qe_sz = RTE_CACHE_LINE_SIZE;
292
293         /* The hardware always uses a CQ depth of at least
294          * DLB_MIN_HARDWARE_CQ_DEPTH, even though from the user
295          * perspective we support a depth as low as 1 for LDB ports.
296          */
297         cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB_MIN_HARDWARE_CQ_DEPTH);
298
299         /* Calculate the port memory required, including two cache lines for
300          * credit pop counts. Round up to the nearest cache line.
301          */
302         alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cq_alloc_depth * qe_sz;
303         alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
304
305         port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
306                                                alloc_sz, PAGE_SIZE);
307         if (port_base == NULL)
308                 return -ENOMEM;
309
310         /* Lock the page in memory */
311         ret = rte_mem_lock_page(port_base);
312         if (ret < 0) {
313                 DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n");
314                 goto create_port_err;
315         }
316
317         memset(port_base, 0, alloc_sz);
318         cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
319
320         ret = dlb_hw_create_ldb_port(&dlb_dev->hw,
321                                      handle->domain_id,
322                                      cfg,
323                                      pc_dma_base,
324                                      cq_dma_base,
325                                      &response);
326         if (ret)
327                 goto create_port_err;
328
329         pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
330         dlb_port[response.id][DLB_LDB].pp_addr =
331                 (void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
332
333         dlb_port[response.id][DLB_LDB].cq_base =
334                 (void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
335
336         dlb_port[response.id][DLB_LDB].ldb_popcount =
337                 (void *)(uintptr_t)port_base;
338         dlb_port[response.id][DLB_LDB].dir_popcount = (void *)(uintptr_t)
339                 (port_base + RTE_CACHE_LINE_SIZE);
340         dlb_port[response.id][DLB_LDB].mz = mz;
341
342         *(struct dlb_cmd_response *)cfg->response = response;
343
344         DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
345
346 create_port_err:
347
348         rte_memzone_free(mz);
349
350         return ret;
351 }
352
353 static int
354 dlb_pf_dir_port_create(struct dlb_hw_dev *handle,
355                        struct dlb_create_dir_port_args *cfg,
356                        enum dlb_cq_poll_modes poll_mode)
357 {
358         struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
359         struct dlb_cmd_response response = {0};
360         int ret;
361         uint8_t *port_base;
362         const struct rte_memzone *mz;
363         int alloc_sz, qe_sz;
364         rte_iova_t pp_dma_base;
365         rte_iova_t pc_dma_base;
366         rte_iova_t cq_dma_base;
367         int is_dir = true;
368
369         DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
370
371         if (poll_mode == DLB_CQ_POLL_MODE_STD)
372                 qe_sz = sizeof(struct dlb_dequeue_qe);
373         else
374                 qe_sz = RTE_CACHE_LINE_SIZE;
375
376         /* Calculate the port memory required, including two cache lines for
377          * credit pop counts. Round up to the nearest cache line.
378          */
379         alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cfg->cq_depth * qe_sz;
380         alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
381
382         port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
383                                                alloc_sz, PAGE_SIZE);
384         if (port_base == NULL)
385                 return -ENOMEM;
386
387         /* Lock the page in memory */
388         ret = rte_mem_lock_page(port_base);
389         if (ret < 0) {
390                 DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n");
391                 goto create_port_err;
392         }
393
394         memset(port_base, 0, alloc_sz);
395         cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
396
397         ret = dlb_hw_create_dir_port(&dlb_dev->hw,
398                                      handle->domain_id,
399                                      cfg,
400                                      pc_dma_base,
401                                      cq_dma_base,
402                                      &response);
403         if (ret)
404                 goto create_port_err;
405
406         pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
407         dlb_port[response.id][DLB_DIR].pp_addr =
408                 (void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
409
410         dlb_port[response.id][DLB_DIR].cq_base =
411                 (void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
412
413         dlb_port[response.id][DLB_DIR].ldb_popcount =
414                 (void *)(uintptr_t)port_base;
415         dlb_port[response.id][DLB_DIR].dir_popcount = (void *)(uintptr_t)
416                 (port_base + RTE_CACHE_LINE_SIZE);
417         dlb_port[response.id][DLB_DIR].mz = mz;
418
419         *(struct dlb_cmd_response *)cfg->response = response;
420
421         DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
422
423 create_port_err:
424
425         rte_memzone_free(mz);
426
427         return ret;
428 }
429
430 static int
431 dlb_pf_get_sn_allocation(struct dlb_hw_dev *handle,
432                          struct dlb_get_sn_allocation_args *args)
433 {
434         struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
435         struct dlb_cmd_response response = {0};
436         int ret;
437
438         ret = dlb_get_group_sequence_numbers(&dlb_dev->hw, args->group);
439
440         response.id = ret;
441         response.status = 0;
442
443         *(struct dlb_cmd_response *)args->response = response;
444
445         return ret;
446 }
447
448 static int
449 dlb_pf_set_sn_allocation(struct dlb_hw_dev *handle,
450                          struct dlb_set_sn_allocation_args *args)
451 {
452         struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
453         struct dlb_cmd_response response = {0};
454         int ret;
455
456         ret = dlb_set_group_sequence_numbers(&dlb_dev->hw, args->group,
457                                              args->num);
458
459         response.status = 0;
460
461         *(struct dlb_cmd_response *)args->response = response;
462
463         return ret;
464 }
465
466 static int
467 dlb_pf_get_sn_occupancy(struct dlb_hw_dev *handle,
468                         struct dlb_get_sn_occupancy_args *args)
469 {
470         struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
471         struct dlb_cmd_response response = {0};
472         int ret;
473
474         ret = dlb_get_group_sequence_number_occupancy(&dlb_dev->hw,
475                                                       args->group);
476
477         response.id = ret;
478         response.status = 0;
479
480         *(struct dlb_cmd_response *)args->response = response;
481
482         return ret;
483 }
484
485 static void
486 dlb_pf_iface_fn_ptrs_init(void)
487 {
488         dlb_iface_low_level_io_init = dlb_pf_low_level_io_init;
489         dlb_iface_open = dlb_pf_open;
490         dlb_iface_domain_close = dlb_pf_domain_close;
491         dlb_iface_get_device_version = dlb_pf_get_device_version;
492         dlb_iface_get_num_resources = dlb_pf_get_num_resources;
493         dlb_iface_sched_domain_create = dlb_pf_sched_domain_create;
494         dlb_iface_ldb_credit_pool_create = dlb_pf_ldb_credit_pool_create;
495         dlb_iface_dir_credit_pool_create = dlb_pf_dir_credit_pool_create;
496         dlb_iface_ldb_queue_create = dlb_pf_ldb_queue_create;
497         dlb_iface_dir_queue_create = dlb_pf_dir_queue_create;
498         dlb_iface_ldb_port_create = dlb_pf_ldb_port_create;
499         dlb_iface_dir_port_create = dlb_pf_dir_port_create;
500         dlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;
501         dlb_iface_get_sn_allocation = dlb_pf_get_sn_allocation;
502         dlb_iface_set_sn_allocation = dlb_pf_set_sn_allocation;
503         dlb_iface_get_sn_occupancy = dlb_pf_get_sn_occupancy;
504 }
505
506 /* PCI DEV HOOKS */
507 static int
508 dlb_eventdev_pci_init(struct rte_eventdev *eventdev)
509 {
510         int ret = 0;
511         struct rte_pci_device *pci_dev;
512         struct dlb_devargs dlb_args = {
513                 .socket_id = rte_socket_id(),
514                 .max_num_events = DLB_MAX_NUM_LDB_CREDITS,
515                 .num_dir_credits_override = -1,
516                 .defer_sched = 0,
517                 .num_atm_inflights = DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE,
518         };
519         struct dlb_eventdev *dlb;
520
521         DLB_LOG_DBG("Enter with dev_id=%d socket_id=%d",
522                     eventdev->data->dev_id, eventdev->data->socket_id);
523
524         dlb_entry_points_init(eventdev);
525
526         dlb_pf_iface_fn_ptrs_init();
527
528         pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
529
530         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
531                 dlb = dlb_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
532
533                 /* Probe the DLB PF layer */
534                 dlb->qm_instance.pf_dev = dlb_probe(pci_dev);
535
536                 if (dlb->qm_instance.pf_dev == NULL) {
537                         DLB_LOG_ERR("DLB PF Probe failed with error %d\n",
538                                     rte_errno);
539                         ret = -rte_errno;
540                         goto dlb_probe_failed;
541                 }
542
543                 /* Were we invoked with runtime parameters? */
544                 if (pci_dev->device.devargs) {
545                         ret = dlb_parse_params(pci_dev->device.devargs->args,
546                                                pci_dev->device.devargs->name,
547                                                &dlb_args);
548                         if (ret) {
549                                 DLB_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
550                                             ret, rte_errno);
551                                 goto dlb_probe_failed;
552                         }
553                 }
554
555                 ret = dlb_primary_eventdev_probe(eventdev,
556                                                  EVDEV_DLB_NAME_PMD_STR,
557                                                  &dlb_args);
558         } else {
559                 ret = dlb_secondary_eventdev_probe(eventdev,
560                                                    EVDEV_DLB_NAME_PMD_STR);
561         }
562         if (ret)
563                 goto dlb_probe_failed;
564
565         DLB_LOG_INFO("DLB PF Probe success\n");
566
567         return 0;
568
569 dlb_probe_failed:
570
571         DLB_LOG_INFO("DLB PF Probe failed, ret=%d\n", ret);
572
573         return ret;
574 }
575
576 #define EVENTDEV_INTEL_VENDOR_ID 0x8086
577
578 static const struct rte_pci_id pci_id_dlb_map[] = {
579         {
580                 RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
581                                DLB_PF_DEV_ID)
582         },
583         {
584                 .vendor_id = 0,
585         },
586 };
587
588 static int
589 event_dlb_pci_probe(struct rte_pci_driver *pci_drv,
590                     struct rte_pci_device *pci_dev)
591 {
592         return rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
593                 sizeof(struct dlb_eventdev), dlb_eventdev_pci_init,
594                 EVDEV_DLB_NAME_PMD_STR);
595 }
596
597 static int
598 event_dlb_pci_remove(struct rte_pci_device *pci_dev)
599 {
600         return rte_event_pmd_pci_remove(pci_dev, NULL);
601 }
602
603 static struct rte_pci_driver pci_eventdev_dlb_pmd = {
604         .id_table = pci_id_dlb_map,
605         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
606         .probe = event_dlb_pci_probe,
607         .remove = event_dlb_pci_remove,
608 };
609
610 RTE_PMD_REGISTER_PCI(event_dlb_pf, pci_eventdev_dlb_pmd);
611 RTE_PMD_REGISTER_PCI_TABLE(event_dlb_pf, pci_id_dlb_map);