crypto/qat: make dequeue function generic
[dpdk.git] / drivers / crypto / qat / qat_qp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2018 Intel Corporation
3  */
4
5 #include <rte_common.h>
6 #include <rte_dev.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_cryptodev_pmd.h>
10 #include <rte_pci.h>
11 #include <rte_bus_pci.h>
12 #include <rte_atomic.h>
13 #include <rte_prefetch.h>
14
15 #include "qat_logs.h"
16 #include "qat_sym.h"
17 #include "adf_transport_access_macros.h"
18
19 #define ADF_MAX_SYM_DESC                        4096
20 #define ADF_MIN_SYM_DESC                        128
21 #define ADF_SYM_TX_RING_DESC_SIZE               128
22 #define ADF_SYM_RX_RING_DESC_SIZE               32
23 #define ADF_SYM_TX_QUEUE_STARTOFF               2
24 /* Offset from bundle start to 1st Sym Tx queue */
25 #define ADF_SYM_RX_QUEUE_STARTOFF               10
26 #define ADF_ARB_REG_SLOT                        0x1000
27 #define ADF_ARB_RINGSRVARBEN_OFFSET             0x19C
28
29 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
30         ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
31         (ADF_ARB_REG_SLOT * index), value)
32
33 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
34         uint32_t queue_size_bytes);
35 static int qat_tx_queue_create(struct rte_cryptodev *dev,
36         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
37         int socket_id);
38 static int qat_rx_queue_create(struct rte_cryptodev *dev,
39         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
40         int socket_id);
41 static void qat_queue_delete(struct qat_queue *queue);
42 static int qat_queue_create(struct rte_cryptodev *dev,
43         struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
44         int socket_id);
45 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
46         uint32_t *queue_size_for_csr);
47 static void adf_configure_queues(struct qat_qp *queue);
48 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
49 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
50
51 static const struct rte_memzone *
52 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
53                         int socket_id)
54 {
55         const struct rte_memzone *mz;
56
57         PMD_INIT_FUNC_TRACE();
58         mz = rte_memzone_lookup(queue_name);
59         if (mz != 0) {
60                 if (((size_t)queue_size <= mz->len) &&
61                                 ((socket_id == SOCKET_ID_ANY) ||
62                                         (socket_id == mz->socket_id))) {
63                         PMD_DRV_LOG(DEBUG, "re-use memzone already "
64                                         "allocated for %s", queue_name);
65                         return mz;
66                 }
67
68                 PMD_DRV_LOG(ERR, "Incompatible memzone already "
69                                 "allocated %s, size %u, socket %d. "
70                                 "Requested size %u, socket %u",
71                                 queue_name, (uint32_t)mz->len,
72                                 mz->socket_id, queue_size, socket_id);
73                 return NULL;
74         }
75
76         PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
77                                         queue_name, queue_size, socket_id);
78         return rte_memzone_reserve_aligned(queue_name, queue_size,
79                 socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
80 }
81
82 int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
83         const struct rte_cryptodev_qp_conf *qp_conf,
84         int socket_id, struct rte_mempool *session_pool __rte_unused)
85 {
86         struct qat_qp *qp;
87         struct rte_pci_device *pci_dev;
88         int ret;
89         char op_cookie_pool_name[RTE_RING_NAMESIZE];
90         uint32_t i;
91
92         PMD_INIT_FUNC_TRACE();
93
94         /* If qp is already in use free ring memory and qp metadata. */
95         if (dev->data->queue_pairs[queue_pair_id] != NULL) {
96                 ret = qat_sym_qp_release(dev, queue_pair_id);
97                 if (ret < 0)
98                         return ret;
99         }
100
101         if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
102                 (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
103                 PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
104                                 qp_conf->nb_descriptors);
105                 return -EINVAL;
106         }
107
108         pci_dev = RTE_DEV_TO_PCI(dev->device);
109
110         if (pci_dev->mem_resource[0].addr == NULL) {
111                 PMD_DRV_LOG(ERR, "Could not find VF config space "
112                                 "(UIO driver attached?).");
113                 return -EINVAL;
114         }
115
116         if (queue_pair_id >=
117                         (ADF_NUM_SYM_QPS_PER_BUNDLE *
118                                         ADF_NUM_BUNDLES_PER_DEV)) {
119                 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
120                                 queue_pair_id);
121                 return -EINVAL;
122         }
123         /* Allocate the queue pair data structure. */
124         qp = rte_zmalloc("qat PMD qp metadata",
125                         sizeof(*qp), RTE_CACHE_LINE_SIZE);
126         if (qp == NULL) {
127                 PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
128                 return -ENOMEM;
129         }
130         qp->nb_descriptors = qp_conf->nb_descriptors;
131         qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
132                         qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
133                         RTE_CACHE_LINE_SIZE);
134         if (qp->op_cookies == NULL) {
135                 PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie");
136                 rte_free(qp);
137                 return -ENOMEM;
138         }
139
140         qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
141         qp->inflights16 = 0;
142
143         if (qat_tx_queue_create(dev, &(qp->tx_q),
144                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
145                 PMD_INIT_LOG(ERR, "Tx queue create failed "
146                                 "queue_pair_id=%u", queue_pair_id);
147                 goto create_err;
148         }
149
150         if (qat_rx_queue_create(dev, &(qp->rx_q),
151                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
152                 PMD_DRV_LOG(ERR, "Rx queue create failed "
153                                 "queue_pair_id=%hu", queue_pair_id);
154                 qat_queue_delete(&(qp->tx_q));
155                 goto create_err;
156         }
157
158         adf_configure_queues(qp);
159         adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
160         snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
161                 pci_dev->driver->driver.name, dev->data->dev_id,
162                 queue_pair_id);
163
164         qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
165         if (qp->op_cookie_pool == NULL)
166                 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
167                                 qp->nb_descriptors,
168                                 sizeof(struct qat_sym_op_cookie), 64, 0,
169                                 NULL, NULL, NULL, NULL, socket_id,
170                                 0);
171         if (!qp->op_cookie_pool) {
172                 PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
173                                 " op mempool");
174                 goto create_err;
175         }
176
177         for (i = 0; i < qp->nb_descriptors; i++) {
178                 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
179                         PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
180                         goto create_err;
181                 }
182
183                 struct qat_sym_op_cookie *sql_cookie =
184                                 qp->op_cookies[i];
185
186                 sql_cookie->qat_sgl_src_phys_addr =
187                                 rte_mempool_virt2iova(sql_cookie) +
188                                 offsetof(struct qat_sym_op_cookie,
189                                 qat_sgl_list_src);
190
191                 sql_cookie->qat_sgl_dst_phys_addr =
192                                 rte_mempool_virt2iova(sql_cookie) +
193                                 offsetof(struct qat_sym_op_cookie,
194                                 qat_sgl_list_dst);
195         }
196
197         struct qat_pmd_private *internals
198                 = dev->data->dev_private;
199         qp->qat_dev_gen = internals->qat_dev_gen;
200         qp->build_request = qat_sym_build_request;
201         qp->process_response = qat_sym_process_response;
202
203         dev->data->queue_pairs[queue_pair_id] = qp;
204         return 0;
205
206 create_err:
207         if (qp->op_cookie_pool)
208                 rte_mempool_free(qp->op_cookie_pool);
209         rte_free(qp->op_cookies);
210         rte_free(qp);
211         return -EFAULT;
212 }
213
214 int qat_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
215 {
216         struct qat_qp *qp =
217                         (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
218         uint32_t i;
219
220         PMD_INIT_FUNC_TRACE();
221         if (qp == NULL) {
222                 PMD_DRV_LOG(DEBUG, "qp already freed");
223                 return 0;
224         }
225
226         /* Don't free memory if there are still responses to be processed */
227         if (qp->inflights16 == 0) {
228                 qat_queue_delete(&(qp->tx_q));
229                 qat_queue_delete(&(qp->rx_q));
230         } else {
231                 return -EAGAIN;
232         }
233
234         adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
235
236         for (i = 0; i < qp->nb_descriptors; i++)
237                 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
238
239         if (qp->op_cookie_pool)
240                 rte_mempool_free(qp->op_cookie_pool);
241
242         rte_free(qp->op_cookies);
243         rte_free(qp);
244         dev->data->queue_pairs[queue_pair_id] = NULL;
245         return 0;
246 }
247
248 static int qat_tx_queue_create(struct rte_cryptodev *dev,
249         struct qat_queue *queue, uint8_t qp_id,
250         uint32_t nb_desc, int socket_id)
251 {
252         PMD_INIT_FUNC_TRACE();
253         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
254         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
255                                                 ADF_SYM_TX_QUEUE_STARTOFF;
256         PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
257                 nb_desc, qp_id, queue->hw_bundle_number,
258                 queue->hw_queue_number);
259
260         return qat_queue_create(dev, queue, nb_desc,
261                                 ADF_SYM_TX_RING_DESC_SIZE, socket_id);
262 }
263
264 static int qat_rx_queue_create(struct rte_cryptodev *dev,
265                 struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
266                 int socket_id)
267 {
268         PMD_INIT_FUNC_TRACE();
269         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
270         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
271                                                 ADF_SYM_RX_QUEUE_STARTOFF;
272
273         PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
274                 nb_desc, qp_id, queue->hw_bundle_number,
275                 queue->hw_queue_number);
276         return qat_queue_create(dev, queue, nb_desc,
277                                 ADF_SYM_RX_RING_DESC_SIZE, socket_id);
278 }
279
280 static void qat_queue_delete(struct qat_queue *queue)
281 {
282         const struct rte_memzone *mz;
283         int status = 0;
284
285         if (queue == NULL) {
286                 PMD_DRV_LOG(DEBUG, "Invalid queue");
287                 return;
288         }
289         mz = rte_memzone_lookup(queue->memz_name);
290         if (mz != NULL) {
291                 /* Write an unused pattern to the queue memory. */
292                 memset(queue->base_addr, 0x7F, queue->queue_size);
293                 status = rte_memzone_free(mz);
294                 if (status != 0)
295                         PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
296                                         status, queue->memz_name);
297         } else {
298                 PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
299                                 queue->memz_name);
300         }
301 }
302
303 static int
304 qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
305                 uint32_t nb_desc, uint8_t desc_size, int socket_id)
306 {
307         uint64_t queue_base;
308         void *io_addr;
309         const struct rte_memzone *qp_mz;
310         uint32_t queue_size_bytes = nb_desc*desc_size;
311         struct rte_pci_device *pci_dev;
312
313         PMD_INIT_FUNC_TRACE();
314         if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
315                 PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
316                 return -EINVAL;
317         }
318
319         pci_dev = RTE_DEV_TO_PCI(dev->device);
320
321         /*
322          * Allocate a memzone for the queue - create a unique name.
323          */
324         snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
325                 pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
326                 queue->hw_bundle_number, queue->hw_queue_number);
327         qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
328                         socket_id);
329         if (qp_mz == NULL) {
330                 PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
331                 return -ENOMEM;
332         }
333
334         queue->base_addr = (char *)qp_mz->addr;
335         queue->base_phys_addr = qp_mz->iova;
336         if (qat_qp_check_queue_alignment(queue->base_phys_addr,
337                         queue_size_bytes)) {
338                 PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
339                                         " 0x%"PRIx64"\n",
340                                         queue->base_phys_addr);
341                 return -EFAULT;
342         }
343
344         if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
345                         != 0) {
346                 PMD_DRV_LOG(ERR, "Invalid num inflights");
347                 return -EINVAL;
348         }
349
350         queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
351                                         ADF_BYTES_TO_MSG_SIZE(desc_size));
352         queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
353         PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
354                                 " msg_size %u, max_inflights %u modulo %u",
355                                 queue->queue_size, queue_size_bytes,
356                                 nb_desc, desc_size, queue->max_inflights,
357                                 queue->modulo);
358
359         if (queue->max_inflights < 2) {
360                 PMD_DRV_LOG(ERR, "Invalid num inflights");
361                 return -EINVAL;
362         }
363         queue->head = 0;
364         queue->tail = 0;
365         queue->msg_size = desc_size;
366
367         /*
368          * Write an unused pattern to the queue memory.
369          */
370         memset(queue->base_addr, 0x7F, queue_size_bytes);
371
372         queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
373                                         queue->queue_size);
374
375         io_addr = pci_dev->mem_resource[0].addr;
376
377         WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
378                         queue->hw_queue_number, queue_base);
379         return 0;
380 }
381
382 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
383                                         uint32_t queue_size_bytes)
384 {
385         PMD_INIT_FUNC_TRACE();
386         if (((queue_size_bytes - 1) & phys_addr) != 0)
387                 return -EINVAL;
388         return 0;
389 }
390
391 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
392         uint32_t *p_queue_size_for_csr)
393 {
394         uint8_t i = ADF_MIN_RING_SIZE;
395
396         PMD_INIT_FUNC_TRACE();
397         for (; i <= ADF_MAX_RING_SIZE; i++)
398                 if ((msg_size * msg_num) ==
399                                 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
400                         *p_queue_size_for_csr = i;
401                         return 0;
402                 }
403         PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
404         return -EINVAL;
405 }
406
407 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
408 {
409         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
410                                         (ADF_ARB_REG_SLOT *
411                                                         txq->hw_bundle_number);
412         uint32_t value;
413
414         PMD_INIT_FUNC_TRACE();
415         value = ADF_CSR_RD(base_addr, arb_csr_offset);
416         value |= (0x01 << txq->hw_queue_number);
417         ADF_CSR_WR(base_addr, arb_csr_offset, value);
418 }
419
420 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
421 {
422         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
423                                         (ADF_ARB_REG_SLOT *
424                                                         txq->hw_bundle_number);
425         uint32_t value;
426
427         PMD_INIT_FUNC_TRACE();
428         value = ADF_CSR_RD(base_addr, arb_csr_offset);
429         value ^= (0x01 << txq->hw_queue_number);
430         ADF_CSR_WR(base_addr, arb_csr_offset, value);
431 }
432
433 static void adf_configure_queues(struct qat_qp *qp)
434 {
435         uint32_t queue_config;
436         struct qat_queue *queue = &qp->tx_q;
437
438         PMD_INIT_FUNC_TRACE();
439         queue_config = BUILD_RING_CONFIG(queue->queue_size);
440
441         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
442                         queue->hw_queue_number, queue_config);
443
444         queue = &qp->rx_q;
445         queue_config =
446                         BUILD_RESP_RING_CONFIG(queue->queue_size,
447                                         ADF_RING_NEAR_WATERMARK_512,
448                                         ADF_RING_NEAR_WATERMARK_0);
449
450         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
451                         queue->hw_queue_number, queue_config);
452 }