ethdev: support queue-based priority flow control
[dpdk.git] / lib / cryptodev / cryptodev_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <rte_errno.h>
7 #include <rte_string_fns.h>
8 #include <rte_malloc.h>
9
10 #include "cryptodev_pmd.h"
11
12 /**
13  * Parse name from argument
14  */
15 static int
16 rte_cryptodev_pmd_parse_name_arg(const char *key __rte_unused,
17                 const char *value, void *extra_args)
18 {
19         struct rte_cryptodev_pmd_init_params *params = extra_args;
20         int n;
21
22         n = strlcpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
23         if (n >= RTE_CRYPTODEV_NAME_MAX_LEN)
24                 return -EINVAL;
25
26         return 0;
27 }
28
29 /**
30  * Parse unsigned integer from argument
31  */
32 static int
33 rte_cryptodev_pmd_parse_uint_arg(const char *key __rte_unused,
34                 const char *value, void *extra_args)
35 {
36         int i;
37         char *end;
38         errno = 0;
39
40         i = strtol(value, &end, 10);
41         if (*end != 0 || errno != 0 || i < 0)
42                 return -EINVAL;
43
44         *((uint32_t *)extra_args) = i;
45         return 0;
46 }
47
48 int
49 rte_cryptodev_pmd_parse_input_args(
50                 struct rte_cryptodev_pmd_init_params *params,
51                 const char *args)
52 {
53         struct rte_kvargs *kvlist = NULL;
54         int ret = 0;
55
56         if (params == NULL)
57                 return -EINVAL;
58
59         if (args) {
60                 kvlist = rte_kvargs_parse(args, cryptodev_pmd_valid_params);
61                 if (kvlist == NULL)
62                         return -EINVAL;
63
64                 ret = rte_kvargs_process(kvlist,
65                                 RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
66                                 &rte_cryptodev_pmd_parse_uint_arg,
67                                 &params->max_nb_queue_pairs);
68                 if (ret < 0)
69                         goto free_kvlist;
70
71                 ret = rte_kvargs_process(kvlist,
72                                 RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
73                                 &rte_cryptodev_pmd_parse_uint_arg,
74                                 &params->socket_id);
75                 if (ret < 0)
76                         goto free_kvlist;
77
78                 ret = rte_kvargs_process(kvlist,
79                                 RTE_CRYPTODEV_PMD_NAME_ARG,
80                                 &rte_cryptodev_pmd_parse_name_arg,
81                                 params);
82                 if (ret < 0)
83                         goto free_kvlist;
84         }
85
86 free_kvlist:
87         rte_kvargs_free(kvlist);
88         return ret;
89 }
90
91 struct rte_cryptodev *
92 rte_cryptodev_pmd_create(const char *name,
93                 struct rte_device *device,
94                 struct rte_cryptodev_pmd_init_params *params)
95 {
96         struct rte_cryptodev *cryptodev;
97
98         if (params->name[0] != '\0') {
99                 CDEV_LOG_INFO("User specified device name = %s\n", params->name);
100                 name = params->name;
101         }
102
103         CDEV_LOG_INFO("Creating cryptodev %s\n", name);
104
105         CDEV_LOG_INFO("Initialisation parameters - name: %s,"
106                         "socket id: %d, max queue pairs: %u",
107                         name, params->socket_id, params->max_nb_queue_pairs);
108
109         /* allocate device structure */
110         cryptodev = rte_cryptodev_pmd_allocate(name, params->socket_id);
111         if (cryptodev == NULL) {
112                 CDEV_LOG_ERR("Failed to allocate crypto device for %s", name);
113                 return NULL;
114         }
115
116         /* allocate private device structure */
117         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
118                 cryptodev->data->dev_private =
119                                 rte_zmalloc_socket("cryptodev device private",
120                                                 params->private_data_size,
121                                                 RTE_CACHE_LINE_SIZE,
122                                                 params->socket_id);
123
124                 if (cryptodev->data->dev_private == NULL) {
125                         CDEV_LOG_ERR("Cannot allocate memory for cryptodev %s"
126                                         " private data", name);
127
128                         rte_cryptodev_pmd_release_device(cryptodev);
129                         return NULL;
130                 }
131         }
132
133         cryptodev->device = device;
134
135         /* initialise user call-back tail queue */
136         TAILQ_INIT(&(cryptodev->link_intr_cbs));
137
138         return cryptodev;
139 }
140
141 int
142 rte_cryptodev_pmd_destroy(struct rte_cryptodev *cryptodev)
143 {
144         int retval;
145         void *dev_priv = cryptodev->data->dev_private;
146
147         CDEV_LOG_INFO("Closing crypto device %s", cryptodev->device->name);
148
149         /* free crypto device */
150         retval = rte_cryptodev_pmd_release_device(cryptodev);
151         if (retval)
152                 return retval;
153
154         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
155                 rte_free(dev_priv);
156
157
158         cryptodev->device = NULL;
159         cryptodev->data = NULL;
160
161         return 0;
162 }
163
164 void
165 rte_cryptodev_pmd_probing_finish(struct rte_cryptodev *cryptodev)
166 {
167         if (cryptodev == NULL)
168                 return;
169         /*
170          * for secondary process, at that point we expect device
171          * to be already 'usable', so shared data and all function
172          * pointers for fast-path devops have to be setup properly
173          * inside rte_cryptodev.
174          */
175         if (rte_eal_process_type() == RTE_PROC_SECONDARY)
176                 cryptodev_fp_ops_set(rte_crypto_fp_ops +
177                                 cryptodev->data->dev_id, cryptodev);
178 }
179
180 static uint16_t
181 dummy_crypto_enqueue_burst(__rte_unused void *qp,
182                            __rte_unused struct rte_crypto_op **ops,
183                            __rte_unused uint16_t nb_ops)
184 {
185         CDEV_LOG_ERR(
186                 "crypto enqueue burst requested for unconfigured device");
187         rte_errno = ENOTSUP;
188         return 0;
189 }
190
191 static uint16_t
192 dummy_crypto_dequeue_burst(__rte_unused void *qp,
193                            __rte_unused struct rte_crypto_op **ops,
194                            __rte_unused uint16_t nb_ops)
195 {
196         CDEV_LOG_ERR(
197                 "crypto dequeue burst requested for unconfigured device");
198         rte_errno = ENOTSUP;
199         return 0;
200 }
201
202 void
203 cryptodev_fp_ops_reset(struct rte_crypto_fp_ops *fp_ops)
204 {
205         static struct rte_cryptodev_cb_rcu dummy_cb[RTE_MAX_QUEUES_PER_PORT];
206         static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
207         static const struct rte_crypto_fp_ops dummy = {
208                 .enqueue_burst = dummy_crypto_enqueue_burst,
209                 .dequeue_burst = dummy_crypto_dequeue_burst,
210                 .qp = {
211                         .data = dummy_data,
212                         .enq_cb = dummy_cb,
213                         .deq_cb = dummy_cb,
214                 },
215         };
216
217         *fp_ops = dummy;
218 }
219
220 void
221 cryptodev_fp_ops_set(struct rte_crypto_fp_ops *fp_ops,
222                      const struct rte_cryptodev *dev)
223 {
224         fp_ops->enqueue_burst = dev->enqueue_burst;
225         fp_ops->dequeue_burst = dev->dequeue_burst;
226         fp_ops->qp.data = dev->data->queue_pairs;
227         fp_ops->qp.enq_cb = dev->enq_cbs;
228         fp_ops->qp.deq_cb = dev->deq_cbs;
229 }