event/cnxk: add event queue config
[dpdk.git] / drivers / event / cnxk / cnxk_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cnxk_eventdev.h"
6
7 void
8 cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
9                   struct rte_event_dev_info *dev_info)
10 {
11
12         dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
13         dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
14         dev_info->max_event_queues = dev->max_event_queues;
15         dev_info->max_event_queue_flows = (1ULL << 20);
16         dev_info->max_event_queue_priority_levels = 8;
17         dev_info->max_event_priority_levels = 1;
18         dev_info->max_event_ports = dev->max_event_ports;
19         dev_info->max_event_port_dequeue_depth = 1;
20         dev_info->max_event_port_enqueue_depth = 1;
21         dev_info->max_num_events = dev->max_num_events;
22         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
23                                   RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
24                                   RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
25                                   RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
26                                   RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
27                                   RTE_EVENT_DEV_CAP_NONSEQ_MODE |
28                                   RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
29 }
30
31 int
32 cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
33 {
34         struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
35         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
36         uint32_t deq_tmo_ns;
37
38         deq_tmo_ns = conf->dequeue_timeout_ns;
39
40         if (deq_tmo_ns == 0)
41                 deq_tmo_ns = dev->min_dequeue_timeout_ns;
42         if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
43             deq_tmo_ns > dev->max_dequeue_timeout_ns) {
44                 plt_err("Unsupported dequeue timeout requested");
45                 return -EINVAL;
46         }
47
48         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
49                 dev->is_timeout_deq = 1;
50
51         dev->deq_tmo_ns = deq_tmo_ns;
52
53         if (!conf->nb_event_queues || !conf->nb_event_ports ||
54             conf->nb_event_ports > dev->max_event_ports ||
55             conf->nb_event_queues > dev->max_event_queues) {
56                 plt_err("Unsupported event queues/ports requested");
57                 return -EINVAL;
58         }
59
60         if (conf->nb_event_port_dequeue_depth > 1) {
61                 plt_err("Unsupported event port deq depth requested");
62                 return -EINVAL;
63         }
64
65         if (conf->nb_event_port_enqueue_depth > 1) {
66                 plt_err("Unsupported event port enq depth requested");
67                 return -EINVAL;
68         }
69
70         dev->nb_event_queues = conf->nb_event_queues;
71         dev->nb_event_ports = conf->nb_event_ports;
72
73         return 0;
74 }
75
76 void
77 cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
78                         struct rte_event_queue_conf *queue_conf)
79 {
80         RTE_SET_USED(event_dev);
81         RTE_SET_USED(queue_id);
82
83         queue_conf->nb_atomic_flows = (1ULL << 20);
84         queue_conf->nb_atomic_order_sequences = (1ULL << 20);
85         queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
86         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
87 }
88
89 int
90 cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
91                      const struct rte_event_queue_conf *queue_conf)
92 {
93         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
94
95         plt_sso_dbg("Queue=%d prio=%d", queue_id, queue_conf->priority);
96         /* Normalize <0-255> to <0-7> */
97         return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, 0xFF, 0xFF,
98                                           queue_conf->priority / 32);
99 }
100
101 void
102 cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
103 {
104         RTE_SET_USED(event_dev);
105         RTE_SET_USED(queue_id);
106 }
107
108 void
109 cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
110                        struct rte_event_port_conf *port_conf)
111 {
112         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
113
114         RTE_SET_USED(port_id);
115         port_conf->new_event_threshold = dev->max_num_events;
116         port_conf->dequeue_depth = 1;
117         port_conf->enqueue_depth = 1;
118 }
119
120 int
121 cnxk_sso_init(struct rte_eventdev *event_dev)
122 {
123         const struct rte_memzone *mz = NULL;
124         struct rte_pci_device *pci_dev;
125         struct cnxk_sso_evdev *dev;
126         int rc;
127
128         mz = rte_memzone_reserve(CNXK_SSO_MZ_NAME, sizeof(uint64_t),
129                                  SOCKET_ID_ANY, 0);
130         if (mz == NULL) {
131                 plt_err("Failed to create eventdev memzone");
132                 return -ENOMEM;
133         }
134
135         dev = cnxk_sso_pmd_priv(event_dev);
136         pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
137         dev->sso.pci_dev = pci_dev;
138
139         *(uint64_t *)mz->addr = (uint64_t)dev;
140
141         /* Initialize the base cnxk_dev object */
142         rc = roc_sso_dev_init(&dev->sso);
143         if (rc < 0) {
144                 plt_err("Failed to initialize RoC SSO rc=%d", rc);
145                 goto error;
146         }
147
148         dev->is_timeout_deq = 0;
149         dev->min_dequeue_timeout_ns = USEC2NSEC(1);
150         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
151         dev->max_num_events = -1;
152         dev->nb_event_queues = 0;
153         dev->nb_event_ports = 0;
154
155         return 0;
156
157 error:
158         rte_memzone_free(mz);
159         return rc;
160 }
161
162 int
163 cnxk_sso_fini(struct rte_eventdev *event_dev)
164 {
165         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
166
167         /* For secondary processes, nothing to be done */
168         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
169                 return 0;
170
171         roc_sso_rsrc_fini(&dev->sso);
172         roc_sso_dev_fini(&dev->sso);
173
174         return 0;
175 }
176
177 int
178 cnxk_sso_remove(struct rte_pci_device *pci_dev)
179 {
180         return rte_event_pmd_pci_remove(pci_dev, cnxk_sso_fini);
181 }