crypto/qat: add SGL capability
[dpdk.git] / drivers / crypto / qat / qat_qp.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_common.h>
35 #include <rte_dev.h>
36 #include <rte_malloc.h>
37 #include <rte_memzone.h>
38 #include <rte_cryptodev_pmd.h>
39 #include <rte_atomic.h>
40 #include <rte_prefetch.h>
41
42 #include "qat_logs.h"
43 #include "qat_crypto.h"
44 #include "qat_algs.h"
45 #include "adf_transport_access_macros.h"
46
47 #define ADF_MAX_SYM_DESC                        4096
48 #define ADF_MIN_SYM_DESC                        128
49 #define ADF_SYM_TX_RING_DESC_SIZE               128
50 #define ADF_SYM_RX_RING_DESC_SIZE               32
51 #define ADF_SYM_TX_QUEUE_STARTOFF               2
52 /* Offset from bundle start to 1st Sym Tx queue */
53 #define ADF_SYM_RX_QUEUE_STARTOFF               10
54 #define ADF_ARB_REG_SLOT                        0x1000
55 #define ADF_ARB_RINGSRVARBEN_OFFSET             0x19C
56
57 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
58         ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
59         (ADF_ARB_REG_SLOT * index), value)
60
61 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
62         uint32_t queue_size_bytes);
63 static int qat_tx_queue_create(struct rte_cryptodev *dev,
64         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
65         int socket_id);
66 static int qat_rx_queue_create(struct rte_cryptodev *dev,
67         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
68         int socket_id);
69 static void qat_queue_delete(struct qat_queue *queue);
70 static int qat_queue_create(struct rte_cryptodev *dev,
71         struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
72         int socket_id);
73 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
74         uint32_t *queue_size_for_csr);
75 static void adf_configure_queues(struct qat_qp *queue);
76 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
77 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
78
79 static const struct rte_memzone *
80 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
81                         int socket_id)
82 {
83         const struct rte_memzone *mz;
84         unsigned memzone_flags = 0;
85         const struct rte_memseg *ms;
86
87         PMD_INIT_FUNC_TRACE();
88         mz = rte_memzone_lookup(queue_name);
89         if (mz != 0) {
90                 if (((size_t)queue_size <= mz->len) &&
91                                 ((socket_id == SOCKET_ID_ANY) ||
92                                         (socket_id == mz->socket_id))) {
93                         PMD_DRV_LOG(DEBUG, "re-use memzone already "
94                                         "allocated for %s", queue_name);
95                         return mz;
96                 }
97
98                 PMD_DRV_LOG(ERR, "Incompatible memzone already "
99                                 "allocated %s, size %u, socket %d. "
100                                 "Requested size %u, socket %u",
101                                 queue_name, (uint32_t)mz->len,
102                                 mz->socket_id, queue_size, socket_id);
103                 return NULL;
104         }
105
106         PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
107                                         queue_name, queue_size, socket_id);
108         ms = rte_eal_get_physmem_layout();
109         switch (ms[0].hugepage_sz) {
110         case(RTE_PGSIZE_2M):
111                 memzone_flags = RTE_MEMZONE_2MB;
112         break;
113         case(RTE_PGSIZE_1G):
114                 memzone_flags = RTE_MEMZONE_1GB;
115         break;
116         case(RTE_PGSIZE_16M):
117                 memzone_flags = RTE_MEMZONE_16MB;
118         break;
119         case(RTE_PGSIZE_16G):
120                 memzone_flags = RTE_MEMZONE_16GB;
121         break;
122         default:
123                 memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
124 }
125 #ifdef RTE_LIBRTE_XEN_DOM0
126         return rte_memzone_reserve_bounded(queue_name, queue_size,
127                 socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
128 #else
129         return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
130                 memzone_flags, queue_size);
131 #endif
132 }
133
134 int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
135         const struct rte_cryptodev_qp_conf *qp_conf,
136         int socket_id)
137 {
138         struct qat_qp *qp;
139         int ret;
140         char op_cookie_pool_name[RTE_RING_NAMESIZE];
141         uint32_t i;
142
143         PMD_INIT_FUNC_TRACE();
144
145         /* If qp is already in use free ring memory and qp metadata. */
146         if (dev->data->queue_pairs[queue_pair_id] != NULL) {
147                 ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
148                 if (ret < 0)
149                         return ret;
150         }
151
152         if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
153                 (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
154                 PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
155                                 qp_conf->nb_descriptors);
156                 return -EINVAL;
157         }
158
159         if (dev->pci_dev->mem_resource[0].addr == NULL) {
160                 PMD_DRV_LOG(ERR, "Could not find VF config space "
161                                 "(UIO driver attached?).");
162                 return -EINVAL;
163         }
164
165         if (queue_pair_id >=
166                         (ADF_NUM_SYM_QPS_PER_BUNDLE *
167                                         ADF_NUM_BUNDLES_PER_DEV)) {
168                 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
169                                 queue_pair_id);
170                 return -EINVAL;
171         }
172         /* Allocate the queue pair data structure. */
173         qp = rte_zmalloc("qat PMD qp metadata",
174                         sizeof(*qp), RTE_CACHE_LINE_SIZE);
175         if (qp == NULL) {
176                 PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
177                 return -ENOMEM;
178         }
179         qp->nb_descriptors = qp_conf->nb_descriptors;
180         qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
181                         qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
182                         RTE_CACHE_LINE_SIZE);
183
184         qp->mmap_bar_addr = dev->pci_dev->mem_resource[0].addr;
185         rte_atomic16_init(&qp->inflights16);
186
187         if (qat_tx_queue_create(dev, &(qp->tx_q),
188                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
189                 PMD_INIT_LOG(ERR, "Tx queue create failed "
190                                 "queue_pair_id=%u", queue_pair_id);
191                 goto create_err;
192         }
193
194         if (qat_rx_queue_create(dev, &(qp->rx_q),
195                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
196                 PMD_DRV_LOG(ERR, "Rx queue create failed "
197                                 "queue_pair_id=%hu", queue_pair_id);
198                 qat_queue_delete(&(qp->tx_q));
199                 goto create_err;
200         }
201
202         adf_configure_queues(qp);
203         adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
204         snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
205                 dev->driver->pci_drv.driver.name, dev->data->dev_id,
206                 queue_pair_id);
207
208         qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
209         if (qp->op_cookie_pool == NULL)
210                 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
211                                 qp->nb_descriptors,
212                                 sizeof(struct qat_crypto_op_cookie), 64, 0,
213                                 NULL, NULL, NULL, NULL, socket_id,
214                                 0);
215         if (!qp->op_cookie_pool) {
216                 PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
217                                 " op mempool");
218                 goto create_err;
219         }
220
221         for (i = 0; i < qp->nb_descriptors; i++) {
222                 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
223                         PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
224                         return -EFAULT;
225                 }
226
227                 struct qat_crypto_op_cookie *sql_cookie =
228                                 qp->op_cookies[i];
229
230                 sql_cookie->qat_sgl_src_phys_addr =
231                                 rte_mempool_virt2phy(qp->op_cookie_pool,
232                                 sql_cookie) +
233                                 offsetof(struct qat_crypto_op_cookie,
234                                 qat_sgl_list_src);
235
236                 sql_cookie->qat_sgl_dst_phys_addr =
237                                 rte_mempool_virt2phy(qp->op_cookie_pool,
238                                 sql_cookie) +
239                                 offsetof(struct qat_crypto_op_cookie,
240                                 qat_sgl_list_dst);
241         }
242         dev->data->queue_pairs[queue_pair_id] = qp;
243         return 0;
244
245 create_err:
246         rte_free(qp);
247         return -EFAULT;
248 }
249
250 int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
251 {
252         struct qat_qp *qp =
253                         (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
254         uint32_t i;
255
256         PMD_INIT_FUNC_TRACE();
257         if (qp == NULL) {
258                 PMD_DRV_LOG(DEBUG, "qp already freed");
259                 return 0;
260         }
261
262         /* Don't free memory if there are still responses to be processed */
263         if (rte_atomic16_read(&(qp->inflights16)) == 0) {
264                 qat_queue_delete(&(qp->tx_q));
265                 qat_queue_delete(&(qp->rx_q));
266         } else {
267                 return -EAGAIN;
268         }
269
270         adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
271
272         for (i = 0; i < qp->nb_descriptors; i++)
273                 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
274
275         if (qp->op_cookie_pool)
276                 rte_mempool_free(qp->op_cookie_pool);
277
278         rte_free(qp->op_cookies);
279         rte_free(qp);
280         dev->data->queue_pairs[queue_pair_id] = NULL;
281         return 0;
282 }
283
284 static int qat_tx_queue_create(struct rte_cryptodev *dev,
285         struct qat_queue *queue, uint8_t qp_id,
286         uint32_t nb_desc, int socket_id)
287 {
288         PMD_INIT_FUNC_TRACE();
289         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
290         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
291                                                 ADF_SYM_TX_QUEUE_STARTOFF;
292         PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
293                 nb_desc, qp_id, queue->hw_bundle_number,
294                 queue->hw_queue_number);
295
296         return qat_queue_create(dev, queue, nb_desc,
297                                 ADF_SYM_TX_RING_DESC_SIZE, socket_id);
298 }
299
300 static int qat_rx_queue_create(struct rte_cryptodev *dev,
301                 struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
302                 int socket_id)
303 {
304         PMD_INIT_FUNC_TRACE();
305         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
306         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
307                                                 ADF_SYM_RX_QUEUE_STARTOFF;
308
309         PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
310                 nb_desc, qp_id, queue->hw_bundle_number,
311                 queue->hw_queue_number);
312         return qat_queue_create(dev, queue, nb_desc,
313                                 ADF_SYM_RX_RING_DESC_SIZE, socket_id);
314 }
315
316 static void qat_queue_delete(struct qat_queue *queue)
317 {
318         const struct rte_memzone *mz;
319         int status = 0;
320
321         if (queue == NULL) {
322                 PMD_DRV_LOG(DEBUG, "Invalid queue");
323                 return;
324         }
325         mz = rte_memzone_lookup(queue->memz_name);
326         if (mz != NULL) {
327                 /* Write an unused pattern to the queue memory. */
328                 memset(queue->base_addr, 0x7F, queue->queue_size);
329                 status = rte_memzone_free(mz);
330                 if (status != 0)
331                         PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
332                                         status, queue->memz_name);
333         } else {
334                 PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
335                                 queue->memz_name);
336         }
337 }
338
339 static int
340 qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
341                 uint32_t nb_desc, uint8_t desc_size, int socket_id)
342 {
343         uint64_t queue_base;
344         void *io_addr;
345         const struct rte_memzone *qp_mz;
346         uint32_t queue_size_bytes = nb_desc*desc_size;
347
348         PMD_INIT_FUNC_TRACE();
349         if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
350                 PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
351                 return -EINVAL;
352         }
353
354         /*
355          * Allocate a memzone for the queue - create a unique name.
356          */
357         snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
358                 dev->driver->pci_drv.driver.name, "qp_mem", dev->data->dev_id,
359                 queue->hw_bundle_number, queue->hw_queue_number);
360         qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
361                         socket_id);
362         if (qp_mz == NULL) {
363                 PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
364                 return -ENOMEM;
365         }
366
367         queue->base_addr = (char *)qp_mz->addr;
368         queue->base_phys_addr = qp_mz->phys_addr;
369         if (qat_qp_check_queue_alignment(queue->base_phys_addr,
370                         queue_size_bytes)) {
371                 PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
372                                         " 0x%"PRIx64"\n",
373                                         queue->base_phys_addr);
374                 return -EFAULT;
375         }
376
377         if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
378                         != 0) {
379                 PMD_DRV_LOG(ERR, "Invalid num inflights");
380                 return -EINVAL;
381         }
382
383         queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
384                                         ADF_BYTES_TO_MSG_SIZE(desc_size));
385         queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
386         PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
387                                 " msg_size %u, max_inflights %u modulo %u",
388                                 queue->queue_size, queue_size_bytes,
389                                 nb_desc, desc_size, queue->max_inflights,
390                                 queue->modulo);
391
392         if (queue->max_inflights < 2) {
393                 PMD_DRV_LOG(ERR, "Invalid num inflights");
394                 return -EINVAL;
395         }
396         queue->head = 0;
397         queue->tail = 0;
398         queue->msg_size = desc_size;
399
400         /*
401          * Write an unused pattern to the queue memory.
402          */
403         memset(queue->base_addr, 0x7F, queue_size_bytes);
404
405         queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
406                                         queue->queue_size);
407         io_addr = dev->pci_dev->mem_resource[0].addr;
408
409         WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
410                         queue->hw_queue_number, queue_base);
411         return 0;
412 }
413
414 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
415                                         uint32_t queue_size_bytes)
416 {
417         PMD_INIT_FUNC_TRACE();
418         if (((queue_size_bytes - 1) & phys_addr) != 0)
419                 return -EINVAL;
420         return 0;
421 }
422
423 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
424         uint32_t *p_queue_size_for_csr)
425 {
426         uint8_t i = ADF_MIN_RING_SIZE;
427
428         PMD_INIT_FUNC_TRACE();
429         for (; i <= ADF_MAX_RING_SIZE; i++)
430                 if ((msg_size * msg_num) ==
431                                 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
432                         *p_queue_size_for_csr = i;
433                         return 0;
434                 }
435         PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
436         return -EINVAL;
437 }
438
439 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
440 {
441         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
442                                         (ADF_ARB_REG_SLOT *
443                                                         txq->hw_bundle_number);
444         uint32_t value;
445
446         PMD_INIT_FUNC_TRACE();
447         value = ADF_CSR_RD(base_addr, arb_csr_offset);
448         value |= (0x01 << txq->hw_queue_number);
449         ADF_CSR_WR(base_addr, arb_csr_offset, value);
450 }
451
452 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
453 {
454         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
455                                         (ADF_ARB_REG_SLOT *
456                                                         txq->hw_bundle_number);
457         uint32_t value;
458
459         PMD_INIT_FUNC_TRACE();
460         value = ADF_CSR_RD(base_addr, arb_csr_offset);
461         value ^= (0x01 << txq->hw_queue_number);
462         ADF_CSR_WR(base_addr, arb_csr_offset, value);
463 }
464
465 static void adf_configure_queues(struct qat_qp *qp)
466 {
467         uint32_t queue_config;
468         struct qat_queue *queue = &qp->tx_q;
469
470         PMD_INIT_FUNC_TRACE();
471         queue_config = BUILD_RING_CONFIG(queue->queue_size);
472
473         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
474                         queue->hw_queue_number, queue_config);
475
476         queue = &qp->rx_q;
477         queue_config =
478                         BUILD_RESP_RING_CONFIG(queue->queue_size,
479                                         ADF_RING_NEAR_WATERMARK_512,
480                                         ADF_RING_NEAR_WATERMARK_0);
481
482         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
483                         queue->hw_queue_number, queue_config);
484 }