drivers: use SPDX tag for Intel copyright files
[dpdk.git] / drivers / crypto / qat / qat_qp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4
5 #include <rte_common.h>
6 #include <rte_dev.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_cryptodev_pmd.h>
10 #include <rte_pci.h>
11 #include <rte_bus_pci.h>
12 #include <rte_atomic.h>
13 #include <rte_prefetch.h>
14
15 #include "qat_logs.h"
16 #include "qat_crypto.h"
17 #include "qat_algs.h"
18 #include "adf_transport_access_macros.h"
19
20 #define ADF_MAX_SYM_DESC                        4096
21 #define ADF_MIN_SYM_DESC                        128
22 #define ADF_SYM_TX_RING_DESC_SIZE               128
23 #define ADF_SYM_RX_RING_DESC_SIZE               32
24 #define ADF_SYM_TX_QUEUE_STARTOFF               2
25 /* Offset from bundle start to 1st Sym Tx queue */
26 #define ADF_SYM_RX_QUEUE_STARTOFF               10
27 #define ADF_ARB_REG_SLOT                        0x1000
28 #define ADF_ARB_RINGSRVARBEN_OFFSET             0x19C
29
30 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
31         ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
32         (ADF_ARB_REG_SLOT * index), value)
33
34 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
35         uint32_t queue_size_bytes);
36 static int qat_tx_queue_create(struct rte_cryptodev *dev,
37         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
38         int socket_id);
39 static int qat_rx_queue_create(struct rte_cryptodev *dev,
40         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
41         int socket_id);
42 static void qat_queue_delete(struct qat_queue *queue);
43 static int qat_queue_create(struct rte_cryptodev *dev,
44         struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
45         int socket_id);
46 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
47         uint32_t *queue_size_for_csr);
48 static void adf_configure_queues(struct qat_qp *queue);
49 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
50 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
51
52 static const struct rte_memzone *
53 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
54                         int socket_id)
55 {
56         const struct rte_memzone *mz;
57         unsigned memzone_flags = 0;
58         const struct rte_memseg *ms;
59
60         PMD_INIT_FUNC_TRACE();
61         mz = rte_memzone_lookup(queue_name);
62         if (mz != 0) {
63                 if (((size_t)queue_size <= mz->len) &&
64                                 ((socket_id == SOCKET_ID_ANY) ||
65                                         (socket_id == mz->socket_id))) {
66                         PMD_DRV_LOG(DEBUG, "re-use memzone already "
67                                         "allocated for %s", queue_name);
68                         return mz;
69                 }
70
71                 PMD_DRV_LOG(ERR, "Incompatible memzone already "
72                                 "allocated %s, size %u, socket %d. "
73                                 "Requested size %u, socket %u",
74                                 queue_name, (uint32_t)mz->len,
75                                 mz->socket_id, queue_size, socket_id);
76                 return NULL;
77         }
78
79         PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
80                                         queue_name, queue_size, socket_id);
81         ms = rte_eal_get_physmem_layout();
82         switch (ms[0].hugepage_sz) {
83         case(RTE_PGSIZE_2M):
84                 memzone_flags = RTE_MEMZONE_2MB;
85         break;
86         case(RTE_PGSIZE_1G):
87                 memzone_flags = RTE_MEMZONE_1GB;
88         break;
89         case(RTE_PGSIZE_16M):
90                 memzone_flags = RTE_MEMZONE_16MB;
91         break;
92         case(RTE_PGSIZE_16G):
93                 memzone_flags = RTE_MEMZONE_16GB;
94         break;
95         default:
96                 memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
97         }
98         return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
99                 memzone_flags, queue_size);
100 }
101
102 int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
103         const struct rte_cryptodev_qp_conf *qp_conf,
104         int socket_id, struct rte_mempool *session_pool __rte_unused)
105 {
106         struct qat_qp *qp;
107         struct rte_pci_device *pci_dev;
108         int ret;
109         char op_cookie_pool_name[RTE_RING_NAMESIZE];
110         uint32_t i;
111
112         PMD_INIT_FUNC_TRACE();
113
114         /* If qp is already in use free ring memory and qp metadata. */
115         if (dev->data->queue_pairs[queue_pair_id] != NULL) {
116                 ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
117                 if (ret < 0)
118                         return ret;
119         }
120
121         if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
122                 (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
123                 PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
124                                 qp_conf->nb_descriptors);
125                 return -EINVAL;
126         }
127
128         pci_dev = RTE_DEV_TO_PCI(dev->device);
129
130         if (pci_dev->mem_resource[0].addr == NULL) {
131                 PMD_DRV_LOG(ERR, "Could not find VF config space "
132                                 "(UIO driver attached?).");
133                 return -EINVAL;
134         }
135
136         if (queue_pair_id >=
137                         (ADF_NUM_SYM_QPS_PER_BUNDLE *
138                                         ADF_NUM_BUNDLES_PER_DEV)) {
139                 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
140                                 queue_pair_id);
141                 return -EINVAL;
142         }
143         /* Allocate the queue pair data structure. */
144         qp = rte_zmalloc("qat PMD qp metadata",
145                         sizeof(*qp), RTE_CACHE_LINE_SIZE);
146         if (qp == NULL) {
147                 PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
148                 return -ENOMEM;
149         }
150         qp->nb_descriptors = qp_conf->nb_descriptors;
151         qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
152                         qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
153                         RTE_CACHE_LINE_SIZE);
154
155         qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
156         qp->inflights16 = 0;
157
158         if (qat_tx_queue_create(dev, &(qp->tx_q),
159                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
160                 PMD_INIT_LOG(ERR, "Tx queue create failed "
161                                 "queue_pair_id=%u", queue_pair_id);
162                 goto create_err;
163         }
164
165         if (qat_rx_queue_create(dev, &(qp->rx_q),
166                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
167                 PMD_DRV_LOG(ERR, "Rx queue create failed "
168                                 "queue_pair_id=%hu", queue_pair_id);
169                 qat_queue_delete(&(qp->tx_q));
170                 goto create_err;
171         }
172
173         adf_configure_queues(qp);
174         adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
175         snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
176                 pci_dev->driver->driver.name, dev->data->dev_id,
177                 queue_pair_id);
178
179         qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
180         if (qp->op_cookie_pool == NULL)
181                 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
182                                 qp->nb_descriptors,
183                                 sizeof(struct qat_crypto_op_cookie), 64, 0,
184                                 NULL, NULL, NULL, NULL, socket_id,
185                                 0);
186         if (!qp->op_cookie_pool) {
187                 PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
188                                 " op mempool");
189                 goto create_err;
190         }
191
192         for (i = 0; i < qp->nb_descriptors; i++) {
193                 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
194                         PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
195                         return -EFAULT;
196                 }
197
198                 struct qat_crypto_op_cookie *sql_cookie =
199                                 qp->op_cookies[i];
200
201                 sql_cookie->qat_sgl_src_phys_addr =
202                                 rte_mempool_virt2iova(sql_cookie) +
203                                 offsetof(struct qat_crypto_op_cookie,
204                                 qat_sgl_list_src);
205
206                 sql_cookie->qat_sgl_dst_phys_addr =
207                                 rte_mempool_virt2iova(sql_cookie) +
208                                 offsetof(struct qat_crypto_op_cookie,
209                                 qat_sgl_list_dst);
210         }
211
212         struct qat_pmd_private *internals
213                 = dev->data->dev_private;
214         qp->qat_dev_gen = internals->qat_dev_gen;
215
216         dev->data->queue_pairs[queue_pair_id] = qp;
217         return 0;
218
219 create_err:
220         rte_free(qp);
221         return -EFAULT;
222 }
223
224 int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
225 {
226         struct qat_qp *qp =
227                         (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
228         uint32_t i;
229
230         PMD_INIT_FUNC_TRACE();
231         if (qp == NULL) {
232                 PMD_DRV_LOG(DEBUG, "qp already freed");
233                 return 0;
234         }
235
236         /* Don't free memory if there are still responses to be processed */
237         if (qp->inflights16 == 0) {
238                 qat_queue_delete(&(qp->tx_q));
239                 qat_queue_delete(&(qp->rx_q));
240         } else {
241                 return -EAGAIN;
242         }
243
244         adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
245
246         for (i = 0; i < qp->nb_descriptors; i++)
247                 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
248
249         if (qp->op_cookie_pool)
250                 rte_mempool_free(qp->op_cookie_pool);
251
252         rte_free(qp->op_cookies);
253         rte_free(qp);
254         dev->data->queue_pairs[queue_pair_id] = NULL;
255         return 0;
256 }
257
258 static int qat_tx_queue_create(struct rte_cryptodev *dev,
259         struct qat_queue *queue, uint8_t qp_id,
260         uint32_t nb_desc, int socket_id)
261 {
262         PMD_INIT_FUNC_TRACE();
263         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
264         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
265                                                 ADF_SYM_TX_QUEUE_STARTOFF;
266         PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
267                 nb_desc, qp_id, queue->hw_bundle_number,
268                 queue->hw_queue_number);
269
270         return qat_queue_create(dev, queue, nb_desc,
271                                 ADF_SYM_TX_RING_DESC_SIZE, socket_id);
272 }
273
274 static int qat_rx_queue_create(struct rte_cryptodev *dev,
275                 struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
276                 int socket_id)
277 {
278         PMD_INIT_FUNC_TRACE();
279         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
280         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
281                                                 ADF_SYM_RX_QUEUE_STARTOFF;
282
283         PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
284                 nb_desc, qp_id, queue->hw_bundle_number,
285                 queue->hw_queue_number);
286         return qat_queue_create(dev, queue, nb_desc,
287                                 ADF_SYM_RX_RING_DESC_SIZE, socket_id);
288 }
289
290 static void qat_queue_delete(struct qat_queue *queue)
291 {
292         const struct rte_memzone *mz;
293         int status = 0;
294
295         if (queue == NULL) {
296                 PMD_DRV_LOG(DEBUG, "Invalid queue");
297                 return;
298         }
299         mz = rte_memzone_lookup(queue->memz_name);
300         if (mz != NULL) {
301                 /* Write an unused pattern to the queue memory. */
302                 memset(queue->base_addr, 0x7F, queue->queue_size);
303                 status = rte_memzone_free(mz);
304                 if (status != 0)
305                         PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
306                                         status, queue->memz_name);
307         } else {
308                 PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
309                                 queue->memz_name);
310         }
311 }
312
313 static int
314 qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
315                 uint32_t nb_desc, uint8_t desc_size, int socket_id)
316 {
317         uint64_t queue_base;
318         void *io_addr;
319         const struct rte_memzone *qp_mz;
320         uint32_t queue_size_bytes = nb_desc*desc_size;
321         struct rte_pci_device *pci_dev;
322
323         PMD_INIT_FUNC_TRACE();
324         if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
325                 PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
326                 return -EINVAL;
327         }
328
329         pci_dev = RTE_DEV_TO_PCI(dev->device);
330
331         /*
332          * Allocate a memzone for the queue - create a unique name.
333          */
334         snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
335                 pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
336                 queue->hw_bundle_number, queue->hw_queue_number);
337         qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
338                         socket_id);
339         if (qp_mz == NULL) {
340                 PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
341                 return -ENOMEM;
342         }
343
344         queue->base_addr = (char *)qp_mz->addr;
345         queue->base_phys_addr = qp_mz->iova;
346         if (qat_qp_check_queue_alignment(queue->base_phys_addr,
347                         queue_size_bytes)) {
348                 PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
349                                         " 0x%"PRIx64"\n",
350                                         queue->base_phys_addr);
351                 return -EFAULT;
352         }
353
354         if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
355                         != 0) {
356                 PMD_DRV_LOG(ERR, "Invalid num inflights");
357                 return -EINVAL;
358         }
359
360         queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
361                                         ADF_BYTES_TO_MSG_SIZE(desc_size));
362         queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
363         PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
364                                 " msg_size %u, max_inflights %u modulo %u",
365                                 queue->queue_size, queue_size_bytes,
366                                 nb_desc, desc_size, queue->max_inflights,
367                                 queue->modulo);
368
369         if (queue->max_inflights < 2) {
370                 PMD_DRV_LOG(ERR, "Invalid num inflights");
371                 return -EINVAL;
372         }
373         queue->head = 0;
374         queue->tail = 0;
375         queue->msg_size = desc_size;
376
377         /*
378          * Write an unused pattern to the queue memory.
379          */
380         memset(queue->base_addr, 0x7F, queue_size_bytes);
381
382         queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
383                                         queue->queue_size);
384
385         io_addr = pci_dev->mem_resource[0].addr;
386
387         WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
388                         queue->hw_queue_number, queue_base);
389         return 0;
390 }
391
392 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
393                                         uint32_t queue_size_bytes)
394 {
395         PMD_INIT_FUNC_TRACE();
396         if (((queue_size_bytes - 1) & phys_addr) != 0)
397                 return -EINVAL;
398         return 0;
399 }
400
401 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
402         uint32_t *p_queue_size_for_csr)
403 {
404         uint8_t i = ADF_MIN_RING_SIZE;
405
406         PMD_INIT_FUNC_TRACE();
407         for (; i <= ADF_MAX_RING_SIZE; i++)
408                 if ((msg_size * msg_num) ==
409                                 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
410                         *p_queue_size_for_csr = i;
411                         return 0;
412                 }
413         PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
414         return -EINVAL;
415 }
416
417 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
418 {
419         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
420                                         (ADF_ARB_REG_SLOT *
421                                                         txq->hw_bundle_number);
422         uint32_t value;
423
424         PMD_INIT_FUNC_TRACE();
425         value = ADF_CSR_RD(base_addr, arb_csr_offset);
426         value |= (0x01 << txq->hw_queue_number);
427         ADF_CSR_WR(base_addr, arb_csr_offset, value);
428 }
429
430 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
431 {
432         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
433                                         (ADF_ARB_REG_SLOT *
434                                                         txq->hw_bundle_number);
435         uint32_t value;
436
437         PMD_INIT_FUNC_TRACE();
438         value = ADF_CSR_RD(base_addr, arb_csr_offset);
439         value ^= (0x01 << txq->hw_queue_number);
440         ADF_CSR_WR(base_addr, arb_csr_offset, value);
441 }
442
443 static void adf_configure_queues(struct qat_qp *qp)
444 {
445         uint32_t queue_config;
446         struct qat_queue *queue = &qp->tx_q;
447
448         PMD_INIT_FUNC_TRACE();
449         queue_config = BUILD_RING_CONFIG(queue->queue_size);
450
451         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
452                         queue->hw_queue_number, queue_config);
453
454         queue = &qp->rx_q;
455         queue_config =
456                         BUILD_RESP_RING_CONFIG(queue->queue_size,
457                                         ADF_RING_NEAR_WATERMARK_512,
458                                         ADF_RING_NEAR_WATERMARK_0);
459
460         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
461                         queue->hw_queue_number, queue_config);
462 }