94aeb9f63ba6545e5d79494d3e3550af4370f87c
[dpdk.git] / drivers / crypto / qat / qat_qp.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_common.h>
35 #include <rte_dev.h>
36 #include <rte_malloc.h>
37 #include <rte_memzone.h>
38 #include <rte_cryptodev_pmd.h>
39 #include <rte_pci.h>
40 #include <rte_bus_pci.h>
41 #include <rte_atomic.h>
42 #include <rte_prefetch.h>
43
44 #include "qat_logs.h"
45 #include "qat_crypto.h"
46 #include "qat_algs.h"
47 #include "adf_transport_access_macros.h"
48
49 #define ADF_MAX_SYM_DESC                        4096
50 #define ADF_MIN_SYM_DESC                        128
51 #define ADF_SYM_TX_RING_DESC_SIZE               128
52 #define ADF_SYM_RX_RING_DESC_SIZE               32
53 #define ADF_SYM_TX_QUEUE_STARTOFF               2
54 /* Offset from bundle start to 1st Sym Tx queue */
55 #define ADF_SYM_RX_QUEUE_STARTOFF               10
56 #define ADF_ARB_REG_SLOT                        0x1000
57 #define ADF_ARB_RINGSRVARBEN_OFFSET             0x19C
58
59 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
60         ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
61         (ADF_ARB_REG_SLOT * index), value)
62
63 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
64         uint32_t queue_size_bytes);
65 static int qat_tx_queue_create(struct rte_cryptodev *dev,
66         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
67         int socket_id);
68 static int qat_rx_queue_create(struct rte_cryptodev *dev,
69         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
70         int socket_id);
71 static void qat_queue_delete(struct qat_queue *queue);
72 static int qat_queue_create(struct rte_cryptodev *dev,
73         struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
74         int socket_id);
75 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
76         uint32_t *queue_size_for_csr);
77 static void adf_configure_queues(struct qat_qp *queue);
78 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
79 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
80
81 static const struct rte_memzone *
82 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
83                         int socket_id)
84 {
85         const struct rte_memzone *mz;
86         unsigned memzone_flags = 0;
87         const struct rte_memseg *ms;
88
89         PMD_INIT_FUNC_TRACE();
90         mz = rte_memzone_lookup(queue_name);
91         if (mz != 0) {
92                 if (((size_t)queue_size <= mz->len) &&
93                                 ((socket_id == SOCKET_ID_ANY) ||
94                                         (socket_id == mz->socket_id))) {
95                         PMD_DRV_LOG(DEBUG, "re-use memzone already "
96                                         "allocated for %s", queue_name);
97                         return mz;
98                 }
99
100                 PMD_DRV_LOG(ERR, "Incompatible memzone already "
101                                 "allocated %s, size %u, socket %d. "
102                                 "Requested size %u, socket %u",
103                                 queue_name, (uint32_t)mz->len,
104                                 mz->socket_id, queue_size, socket_id);
105                 return NULL;
106         }
107
108         PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
109                                         queue_name, queue_size, socket_id);
110         ms = rte_eal_get_physmem_layout();
111         switch (ms[0].hugepage_sz) {
112         case(RTE_PGSIZE_2M):
113                 memzone_flags = RTE_MEMZONE_2MB;
114         break;
115         case(RTE_PGSIZE_1G):
116                 memzone_flags = RTE_MEMZONE_1GB;
117         break;
118         case(RTE_PGSIZE_16M):
119                 memzone_flags = RTE_MEMZONE_16MB;
120         break;
121         case(RTE_PGSIZE_16G):
122                 memzone_flags = RTE_MEMZONE_16GB;
123         break;
124         default:
125                 memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
126         }
127         return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
128                 memzone_flags, queue_size);
129 }
130
131 int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
132         const struct rte_cryptodev_qp_conf *qp_conf,
133         int socket_id, struct rte_mempool *session_pool __rte_unused)
134 {
135         struct qat_qp *qp;
136         struct rte_pci_device *pci_dev;
137         int ret;
138         char op_cookie_pool_name[RTE_RING_NAMESIZE];
139         uint32_t i;
140
141         PMD_INIT_FUNC_TRACE();
142
143         /* If qp is already in use free ring memory and qp metadata. */
144         if (dev->data->queue_pairs[queue_pair_id] != NULL) {
145                 ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
146                 if (ret < 0)
147                         return ret;
148         }
149
150         if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
151                 (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
152                 PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
153                                 qp_conf->nb_descriptors);
154                 return -EINVAL;
155         }
156
157         pci_dev = RTE_DEV_TO_PCI(dev->device);
158
159         if (pci_dev->mem_resource[0].addr == NULL) {
160                 PMD_DRV_LOG(ERR, "Could not find VF config space "
161                                 "(UIO driver attached?).");
162                 return -EINVAL;
163         }
164
165         if (queue_pair_id >=
166                         (ADF_NUM_SYM_QPS_PER_BUNDLE *
167                                         ADF_NUM_BUNDLES_PER_DEV)) {
168                 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
169                                 queue_pair_id);
170                 return -EINVAL;
171         }
172         /* Allocate the queue pair data structure. */
173         qp = rte_zmalloc("qat PMD qp metadata",
174                         sizeof(*qp), RTE_CACHE_LINE_SIZE);
175         if (qp == NULL) {
176                 PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
177                 return -ENOMEM;
178         }
179         qp->nb_descriptors = qp_conf->nb_descriptors;
180         qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
181                         qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
182                         RTE_CACHE_LINE_SIZE);
183
184         qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
185         qp->inflights16 = 0;
186
187         if (qat_tx_queue_create(dev, &(qp->tx_q),
188                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
189                 PMD_INIT_LOG(ERR, "Tx queue create failed "
190                                 "queue_pair_id=%u", queue_pair_id);
191                 goto create_err;
192         }
193
194         if (qat_rx_queue_create(dev, &(qp->rx_q),
195                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
196                 PMD_DRV_LOG(ERR, "Rx queue create failed "
197                                 "queue_pair_id=%hu", queue_pair_id);
198                 qat_queue_delete(&(qp->tx_q));
199                 goto create_err;
200         }
201
202         adf_configure_queues(qp);
203         adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
204         snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
205                 pci_dev->driver->driver.name, dev->data->dev_id,
206                 queue_pair_id);
207
208         qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
209         if (qp->op_cookie_pool == NULL)
210                 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
211                                 qp->nb_descriptors,
212                                 sizeof(struct qat_crypto_op_cookie), 64, 0,
213                                 NULL, NULL, NULL, NULL, socket_id,
214                                 0);
215         if (!qp->op_cookie_pool) {
216                 PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
217                                 " op mempool");
218                 goto create_err;
219         }
220
221         for (i = 0; i < qp->nb_descriptors; i++) {
222                 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
223                         PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
224                         return -EFAULT;
225                 }
226
227                 struct qat_crypto_op_cookie *sql_cookie =
228                                 qp->op_cookies[i];
229
230                 sql_cookie->qat_sgl_src_phys_addr =
231                                 rte_mempool_virt2phy(qp->op_cookie_pool,
232                                 sql_cookie) +
233                                 offsetof(struct qat_crypto_op_cookie,
234                                 qat_sgl_list_src);
235
236                 sql_cookie->qat_sgl_dst_phys_addr =
237                                 rte_mempool_virt2phy(qp->op_cookie_pool,
238                                 sql_cookie) +
239                                 offsetof(struct qat_crypto_op_cookie,
240                                 qat_sgl_list_dst);
241         }
242
243         struct qat_pmd_private *internals
244                 = dev->data->dev_private;
245         qp->qat_dev_gen = internals->qat_dev_gen;
246
247         dev->data->queue_pairs[queue_pair_id] = qp;
248         return 0;
249
250 create_err:
251         rte_free(qp);
252         return -EFAULT;
253 }
254
255 int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
256 {
257         struct qat_qp *qp =
258                         (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
259         uint32_t i;
260
261         PMD_INIT_FUNC_TRACE();
262         if (qp == NULL) {
263                 PMD_DRV_LOG(DEBUG, "qp already freed");
264                 return 0;
265         }
266
267         /* Don't free memory if there are still responses to be processed */
268         if (qp->inflights16 == 0) {
269                 qat_queue_delete(&(qp->tx_q));
270                 qat_queue_delete(&(qp->rx_q));
271         } else {
272                 return -EAGAIN;
273         }
274
275         adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
276
277         for (i = 0; i < qp->nb_descriptors; i++)
278                 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
279
280         if (qp->op_cookie_pool)
281                 rte_mempool_free(qp->op_cookie_pool);
282
283         rte_free(qp->op_cookies);
284         rte_free(qp);
285         dev->data->queue_pairs[queue_pair_id] = NULL;
286         return 0;
287 }
288
289 static int qat_tx_queue_create(struct rte_cryptodev *dev,
290         struct qat_queue *queue, uint8_t qp_id,
291         uint32_t nb_desc, int socket_id)
292 {
293         PMD_INIT_FUNC_TRACE();
294         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
295         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
296                                                 ADF_SYM_TX_QUEUE_STARTOFF;
297         PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
298                 nb_desc, qp_id, queue->hw_bundle_number,
299                 queue->hw_queue_number);
300
301         return qat_queue_create(dev, queue, nb_desc,
302                                 ADF_SYM_TX_RING_DESC_SIZE, socket_id);
303 }
304
305 static int qat_rx_queue_create(struct rte_cryptodev *dev,
306                 struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
307                 int socket_id)
308 {
309         PMD_INIT_FUNC_TRACE();
310         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
311         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
312                                                 ADF_SYM_RX_QUEUE_STARTOFF;
313
314         PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
315                 nb_desc, qp_id, queue->hw_bundle_number,
316                 queue->hw_queue_number);
317         return qat_queue_create(dev, queue, nb_desc,
318                                 ADF_SYM_RX_RING_DESC_SIZE, socket_id);
319 }
320
321 static void qat_queue_delete(struct qat_queue *queue)
322 {
323         const struct rte_memzone *mz;
324         int status = 0;
325
326         if (queue == NULL) {
327                 PMD_DRV_LOG(DEBUG, "Invalid queue");
328                 return;
329         }
330         mz = rte_memzone_lookup(queue->memz_name);
331         if (mz != NULL) {
332                 /* Write an unused pattern to the queue memory. */
333                 memset(queue->base_addr, 0x7F, queue->queue_size);
334                 status = rte_memzone_free(mz);
335                 if (status != 0)
336                         PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
337                                         status, queue->memz_name);
338         } else {
339                 PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
340                                 queue->memz_name);
341         }
342 }
343
344 static int
345 qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
346                 uint32_t nb_desc, uint8_t desc_size, int socket_id)
347 {
348         uint64_t queue_base;
349         void *io_addr;
350         const struct rte_memzone *qp_mz;
351         uint32_t queue_size_bytes = nb_desc*desc_size;
352         struct rte_pci_device *pci_dev;
353
354         PMD_INIT_FUNC_TRACE();
355         if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
356                 PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
357                 return -EINVAL;
358         }
359
360         pci_dev = RTE_DEV_TO_PCI(dev->device);
361
362         /*
363          * Allocate a memzone for the queue - create a unique name.
364          */
365         snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
366                 pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
367                 queue->hw_bundle_number, queue->hw_queue_number);
368         qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
369                         socket_id);
370         if (qp_mz == NULL) {
371                 PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
372                 return -ENOMEM;
373         }
374
375         queue->base_addr = (char *)qp_mz->addr;
376         queue->base_phys_addr = qp_mz->iova;
377         if (qat_qp_check_queue_alignment(queue->base_phys_addr,
378                         queue_size_bytes)) {
379                 PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
380                                         " 0x%"PRIx64"\n",
381                                         queue->base_phys_addr);
382                 return -EFAULT;
383         }
384
385         if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
386                         != 0) {
387                 PMD_DRV_LOG(ERR, "Invalid num inflights");
388                 return -EINVAL;
389         }
390
391         queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
392                                         ADF_BYTES_TO_MSG_SIZE(desc_size));
393         queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
394         PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
395                                 " msg_size %u, max_inflights %u modulo %u",
396                                 queue->queue_size, queue_size_bytes,
397                                 nb_desc, desc_size, queue->max_inflights,
398                                 queue->modulo);
399
400         if (queue->max_inflights < 2) {
401                 PMD_DRV_LOG(ERR, "Invalid num inflights");
402                 return -EINVAL;
403         }
404         queue->head = 0;
405         queue->tail = 0;
406         queue->msg_size = desc_size;
407
408         /*
409          * Write an unused pattern to the queue memory.
410          */
411         memset(queue->base_addr, 0x7F, queue_size_bytes);
412
413         queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
414                                         queue->queue_size);
415
416         io_addr = pci_dev->mem_resource[0].addr;
417
418         WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
419                         queue->hw_queue_number, queue_base);
420         return 0;
421 }
422
423 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
424                                         uint32_t queue_size_bytes)
425 {
426         PMD_INIT_FUNC_TRACE();
427         if (((queue_size_bytes - 1) & phys_addr) != 0)
428                 return -EINVAL;
429         return 0;
430 }
431
432 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
433         uint32_t *p_queue_size_for_csr)
434 {
435         uint8_t i = ADF_MIN_RING_SIZE;
436
437         PMD_INIT_FUNC_TRACE();
438         for (; i <= ADF_MAX_RING_SIZE; i++)
439                 if ((msg_size * msg_num) ==
440                                 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
441                         *p_queue_size_for_csr = i;
442                         return 0;
443                 }
444         PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
445         return -EINVAL;
446 }
447
448 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
449 {
450         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
451                                         (ADF_ARB_REG_SLOT *
452                                                         txq->hw_bundle_number);
453         uint32_t value;
454
455         PMD_INIT_FUNC_TRACE();
456         value = ADF_CSR_RD(base_addr, arb_csr_offset);
457         value |= (0x01 << txq->hw_queue_number);
458         ADF_CSR_WR(base_addr, arb_csr_offset, value);
459 }
460
461 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
462 {
463         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
464                                         (ADF_ARB_REG_SLOT *
465                                                         txq->hw_bundle_number);
466         uint32_t value;
467
468         PMD_INIT_FUNC_TRACE();
469         value = ADF_CSR_RD(base_addr, arb_csr_offset);
470         value ^= (0x01 << txq->hw_queue_number);
471         ADF_CSR_WR(base_addr, arb_csr_offset, value);
472 }
473
474 static void adf_configure_queues(struct qat_qp *qp)
475 {
476         uint32_t queue_config;
477         struct qat_queue *queue = &qp->tx_q;
478
479         PMD_INIT_FUNC_TRACE();
480         queue_config = BUILD_RING_CONFIG(queue->queue_size);
481
482         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
483                         queue->hw_queue_number, queue_config);
484
485         queue = &qp->rx_q;
486         queue_config =
487                         BUILD_RESP_RING_CONFIG(queue->queue_size,
488                                         ADF_RING_NEAR_WATERMARK_512,
489                                         ADF_RING_NEAR_WATERMARK_0);
490
491         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
492                         queue->hw_queue_number, queue_config);
493 }