event/cnxk: add platform specific device config
[dpdk.git] / drivers / event / cnxk / cnxk_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cnxk_eventdev.h"
6
7 void
8 cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
9                   struct rte_event_dev_info *dev_info)
10 {
11
12         dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
13         dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
14         dev_info->max_event_queues = dev->max_event_queues;
15         dev_info->max_event_queue_flows = (1ULL << 20);
16         dev_info->max_event_queue_priority_levels = 8;
17         dev_info->max_event_priority_levels = 1;
18         dev_info->max_event_ports = dev->max_event_ports;
19         dev_info->max_event_port_dequeue_depth = 1;
20         dev_info->max_event_port_enqueue_depth = 1;
21         dev_info->max_num_events = dev->max_num_events;
22         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
23                                   RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
24                                   RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
25                                   RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
26                                   RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
27                                   RTE_EVENT_DEV_CAP_NONSEQ_MODE |
28                                   RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
29 }
30
31 int
32 cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
33 {
34         struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
35         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
36         uint32_t deq_tmo_ns;
37
38         deq_tmo_ns = conf->dequeue_timeout_ns;
39
40         if (deq_tmo_ns == 0)
41                 deq_tmo_ns = dev->min_dequeue_timeout_ns;
42         if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
43             deq_tmo_ns > dev->max_dequeue_timeout_ns) {
44                 plt_err("Unsupported dequeue timeout requested");
45                 return -EINVAL;
46         }
47
48         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
49                 dev->is_timeout_deq = 1;
50
51         dev->deq_tmo_ns = deq_tmo_ns;
52
53         if (!conf->nb_event_queues || !conf->nb_event_ports ||
54             conf->nb_event_ports > dev->max_event_ports ||
55             conf->nb_event_queues > dev->max_event_queues) {
56                 plt_err("Unsupported event queues/ports requested");
57                 return -EINVAL;
58         }
59
60         if (conf->nb_event_port_dequeue_depth > 1) {
61                 plt_err("Unsupported event port deq depth requested");
62                 return -EINVAL;
63         }
64
65         if (conf->nb_event_port_enqueue_depth > 1) {
66                 plt_err("Unsupported event port enq depth requested");
67                 return -EINVAL;
68         }
69
70         dev->nb_event_queues = conf->nb_event_queues;
71         dev->nb_event_ports = conf->nb_event_ports;
72
73         return 0;
74 }
75
76 void
77 cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
78                         struct rte_event_queue_conf *queue_conf)
79 {
80         RTE_SET_USED(event_dev);
81         RTE_SET_USED(queue_id);
82
83         queue_conf->nb_atomic_flows = (1ULL << 20);
84         queue_conf->nb_atomic_order_sequences = (1ULL << 20);
85         queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
86         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
87 }
88
89 void
90 cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
91                        struct rte_event_port_conf *port_conf)
92 {
93         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
94
95         RTE_SET_USED(port_id);
96         port_conf->new_event_threshold = dev->max_num_events;
97         port_conf->dequeue_depth = 1;
98         port_conf->enqueue_depth = 1;
99 }
100
101 int
102 cnxk_sso_init(struct rte_eventdev *event_dev)
103 {
104         const struct rte_memzone *mz = NULL;
105         struct rte_pci_device *pci_dev;
106         struct cnxk_sso_evdev *dev;
107         int rc;
108
109         mz = rte_memzone_reserve(CNXK_SSO_MZ_NAME, sizeof(uint64_t),
110                                  SOCKET_ID_ANY, 0);
111         if (mz == NULL) {
112                 plt_err("Failed to create eventdev memzone");
113                 return -ENOMEM;
114         }
115
116         dev = cnxk_sso_pmd_priv(event_dev);
117         pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
118         dev->sso.pci_dev = pci_dev;
119
120         *(uint64_t *)mz->addr = (uint64_t)dev;
121
122         /* Initialize the base cnxk_dev object */
123         rc = roc_sso_dev_init(&dev->sso);
124         if (rc < 0) {
125                 plt_err("Failed to initialize RoC SSO rc=%d", rc);
126                 goto error;
127         }
128
129         dev->is_timeout_deq = 0;
130         dev->min_dequeue_timeout_ns = USEC2NSEC(1);
131         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
132         dev->max_num_events = -1;
133         dev->nb_event_queues = 0;
134         dev->nb_event_ports = 0;
135
136         return 0;
137
138 error:
139         rte_memzone_free(mz);
140         return rc;
141 }
142
143 int
144 cnxk_sso_fini(struct rte_eventdev *event_dev)
145 {
146         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
147
148         /* For secondary processes, nothing to be done */
149         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
150                 return 0;
151
152         roc_sso_rsrc_fini(&dev->sso);
153         roc_sso_dev_fini(&dev->sso);
154
155         return 0;
156 }
157
158 int
159 cnxk_sso_remove(struct rte_pci_device *pci_dev)
160 {
161         return rte_event_pmd_pci_remove(pci_dev, cnxk_sso_fini);
162 }