8bd60ffa7a3b3920c31c9a6760245a8388cbe1bc
[dpdk.git] / drivers / crypto / qat / qat_qp.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_common.h>
35 #include <rte_dev.h>
36 #include <rte_malloc.h>
37 #include <rte_memzone.h>
38 #include <rte_cryptodev_pmd.h>
39 #include <rte_pci.h>
40 #include <rte_atomic.h>
41 #include <rte_prefetch.h>
42
43 #include "qat_logs.h"
44 #include "qat_crypto.h"
45 #include "qat_algs.h"
46 #include "adf_transport_access_macros.h"
47
48 #define ADF_MAX_SYM_DESC                        4096
49 #define ADF_MIN_SYM_DESC                        128
50 #define ADF_SYM_TX_RING_DESC_SIZE               128
51 #define ADF_SYM_RX_RING_DESC_SIZE               32
52 #define ADF_SYM_TX_QUEUE_STARTOFF               2
53 /* Offset from bundle start to 1st Sym Tx queue */
54 #define ADF_SYM_RX_QUEUE_STARTOFF               10
55 #define ADF_ARB_REG_SLOT                        0x1000
56 #define ADF_ARB_RINGSRVARBEN_OFFSET             0x19C
57
58 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
59         ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
60         (ADF_ARB_REG_SLOT * index), value)
61
62 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
63         uint32_t queue_size_bytes);
64 static int qat_tx_queue_create(struct rte_cryptodev *dev,
65         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
66         int socket_id);
67 static int qat_rx_queue_create(struct rte_cryptodev *dev,
68         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
69         int socket_id);
70 static void qat_queue_delete(struct qat_queue *queue);
71 static int qat_queue_create(struct rte_cryptodev *dev,
72         struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
73         int socket_id);
74 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
75         uint32_t *queue_size_for_csr);
76 static void adf_configure_queues(struct qat_qp *queue);
77 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
78 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
79
80 static const struct rte_memzone *
81 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
82                         int socket_id)
83 {
84         const struct rte_memzone *mz;
85         unsigned memzone_flags = 0;
86         const struct rte_memseg *ms;
87
88         PMD_INIT_FUNC_TRACE();
89         mz = rte_memzone_lookup(queue_name);
90         if (mz != 0) {
91                 if (((size_t)queue_size <= mz->len) &&
92                                 ((socket_id == SOCKET_ID_ANY) ||
93                                         (socket_id == mz->socket_id))) {
94                         PMD_DRV_LOG(DEBUG, "re-use memzone already "
95                                         "allocated for %s", queue_name);
96                         return mz;
97                 }
98
99                 PMD_DRV_LOG(ERR, "Incompatible memzone already "
100                                 "allocated %s, size %u, socket %d. "
101                                 "Requested size %u, socket %u",
102                                 queue_name, (uint32_t)mz->len,
103                                 mz->socket_id, queue_size, socket_id);
104                 return NULL;
105         }
106
107         PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
108                                         queue_name, queue_size, socket_id);
109         ms = rte_eal_get_physmem_layout();
110         switch (ms[0].hugepage_sz) {
111         case(RTE_PGSIZE_2M):
112                 memzone_flags = RTE_MEMZONE_2MB;
113         break;
114         case(RTE_PGSIZE_1G):
115                 memzone_flags = RTE_MEMZONE_1GB;
116         break;
117         case(RTE_PGSIZE_16M):
118                 memzone_flags = RTE_MEMZONE_16MB;
119         break;
120         case(RTE_PGSIZE_16G):
121                 memzone_flags = RTE_MEMZONE_16GB;
122         break;
123         default:
124                 memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
125         }
126         return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
127                 memzone_flags, queue_size);
128 }
129
130 int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
131         const struct rte_cryptodev_qp_conf *qp_conf,
132         int socket_id, struct rte_mempool *session_pool __rte_unused)
133 {
134         struct qat_qp *qp;
135         struct rte_pci_device *pci_dev;
136         int ret;
137         char op_cookie_pool_name[RTE_RING_NAMESIZE];
138         uint32_t i;
139
140         PMD_INIT_FUNC_TRACE();
141
142         /* If qp is already in use free ring memory and qp metadata. */
143         if (dev->data->queue_pairs[queue_pair_id] != NULL) {
144                 ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
145                 if (ret < 0)
146                         return ret;
147         }
148
149         if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
150                 (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
151                 PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
152                                 qp_conf->nb_descriptors);
153                 return -EINVAL;
154         }
155
156         pci_dev = RTE_DEV_TO_PCI(dev->device);
157
158         if (pci_dev->mem_resource[0].addr == NULL) {
159                 PMD_DRV_LOG(ERR, "Could not find VF config space "
160                                 "(UIO driver attached?).");
161                 return -EINVAL;
162         }
163
164         if (queue_pair_id >=
165                         (ADF_NUM_SYM_QPS_PER_BUNDLE *
166                                         ADF_NUM_BUNDLES_PER_DEV)) {
167                 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
168                                 queue_pair_id);
169                 return -EINVAL;
170         }
171         /* Allocate the queue pair data structure. */
172         qp = rte_zmalloc("qat PMD qp metadata",
173                         sizeof(*qp), RTE_CACHE_LINE_SIZE);
174         if (qp == NULL) {
175                 PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
176                 return -ENOMEM;
177         }
178         qp->nb_descriptors = qp_conf->nb_descriptors;
179         qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
180                         qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
181                         RTE_CACHE_LINE_SIZE);
182
183         qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
184         qp->inflights16 = 0;
185
186         if (qat_tx_queue_create(dev, &(qp->tx_q),
187                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
188                 PMD_INIT_LOG(ERR, "Tx queue create failed "
189                                 "queue_pair_id=%u", queue_pair_id);
190                 goto create_err;
191         }
192
193         if (qat_rx_queue_create(dev, &(qp->rx_q),
194                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
195                 PMD_DRV_LOG(ERR, "Rx queue create failed "
196                                 "queue_pair_id=%hu", queue_pair_id);
197                 qat_queue_delete(&(qp->tx_q));
198                 goto create_err;
199         }
200
201         adf_configure_queues(qp);
202         adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
203         snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
204                 pci_dev->driver->driver.name, dev->data->dev_id,
205                 queue_pair_id);
206
207         qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
208         if (qp->op_cookie_pool == NULL)
209                 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
210                                 qp->nb_descriptors,
211                                 sizeof(struct qat_crypto_op_cookie), 64, 0,
212                                 NULL, NULL, NULL, NULL, socket_id,
213                                 0);
214         if (!qp->op_cookie_pool) {
215                 PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
216                                 " op mempool");
217                 goto create_err;
218         }
219
220         for (i = 0; i < qp->nb_descriptors; i++) {
221                 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
222                         PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
223                         return -EFAULT;
224                 }
225
226                 struct qat_crypto_op_cookie *sql_cookie =
227                                 qp->op_cookies[i];
228
229                 sql_cookie->qat_sgl_src_phys_addr =
230                                 rte_mempool_virt2phy(qp->op_cookie_pool,
231                                 sql_cookie) +
232                                 offsetof(struct qat_crypto_op_cookie,
233                                 qat_sgl_list_src);
234
235                 sql_cookie->qat_sgl_dst_phys_addr =
236                                 rte_mempool_virt2phy(qp->op_cookie_pool,
237                                 sql_cookie) +
238                                 offsetof(struct qat_crypto_op_cookie,
239                                 qat_sgl_list_dst);
240         }
241
242         struct qat_pmd_private *internals
243                 = dev->data->dev_private;
244         qp->qat_dev_gen = internals->qat_dev_gen;
245
246         dev->data->queue_pairs[queue_pair_id] = qp;
247         return 0;
248
249 create_err:
250         rte_free(qp);
251         return -EFAULT;
252 }
253
254 int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
255 {
256         struct qat_qp *qp =
257                         (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
258         uint32_t i;
259
260         PMD_INIT_FUNC_TRACE();
261         if (qp == NULL) {
262                 PMD_DRV_LOG(DEBUG, "qp already freed");
263                 return 0;
264         }
265
266         /* Don't free memory if there are still responses to be processed */
267         if (qp->inflights16 == 0) {
268                 qat_queue_delete(&(qp->tx_q));
269                 qat_queue_delete(&(qp->rx_q));
270         } else {
271                 return -EAGAIN;
272         }
273
274         adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
275
276         for (i = 0; i < qp->nb_descriptors; i++)
277                 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
278
279         if (qp->op_cookie_pool)
280                 rte_mempool_free(qp->op_cookie_pool);
281
282         rte_free(qp->op_cookies);
283         rte_free(qp);
284         dev->data->queue_pairs[queue_pair_id] = NULL;
285         return 0;
286 }
287
288 static int qat_tx_queue_create(struct rte_cryptodev *dev,
289         struct qat_queue *queue, uint8_t qp_id,
290         uint32_t nb_desc, int socket_id)
291 {
292         PMD_INIT_FUNC_TRACE();
293         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
294         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
295                                                 ADF_SYM_TX_QUEUE_STARTOFF;
296         PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
297                 nb_desc, qp_id, queue->hw_bundle_number,
298                 queue->hw_queue_number);
299
300         return qat_queue_create(dev, queue, nb_desc,
301                                 ADF_SYM_TX_RING_DESC_SIZE, socket_id);
302 }
303
304 static int qat_rx_queue_create(struct rte_cryptodev *dev,
305                 struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
306                 int socket_id)
307 {
308         PMD_INIT_FUNC_TRACE();
309         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
310         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
311                                                 ADF_SYM_RX_QUEUE_STARTOFF;
312
313         PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
314                 nb_desc, qp_id, queue->hw_bundle_number,
315                 queue->hw_queue_number);
316         return qat_queue_create(dev, queue, nb_desc,
317                                 ADF_SYM_RX_RING_DESC_SIZE, socket_id);
318 }
319
320 static void qat_queue_delete(struct qat_queue *queue)
321 {
322         const struct rte_memzone *mz;
323         int status = 0;
324
325         if (queue == NULL) {
326                 PMD_DRV_LOG(DEBUG, "Invalid queue");
327                 return;
328         }
329         mz = rte_memzone_lookup(queue->memz_name);
330         if (mz != NULL) {
331                 /* Write an unused pattern to the queue memory. */
332                 memset(queue->base_addr, 0x7F, queue->queue_size);
333                 status = rte_memzone_free(mz);
334                 if (status != 0)
335                         PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
336                                         status, queue->memz_name);
337         } else {
338                 PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
339                                 queue->memz_name);
340         }
341 }
342
343 static int
344 qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
345                 uint32_t nb_desc, uint8_t desc_size, int socket_id)
346 {
347         uint64_t queue_base;
348         void *io_addr;
349         const struct rte_memzone *qp_mz;
350         uint32_t queue_size_bytes = nb_desc*desc_size;
351         struct rte_pci_device *pci_dev;
352
353         PMD_INIT_FUNC_TRACE();
354         if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
355                 PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
356                 return -EINVAL;
357         }
358
359         pci_dev = RTE_DEV_TO_PCI(dev->device);
360
361         /*
362          * Allocate a memzone for the queue - create a unique name.
363          */
364         snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
365                 pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
366                 queue->hw_bundle_number, queue->hw_queue_number);
367         qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
368                         socket_id);
369         if (qp_mz == NULL) {
370                 PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
371                 return -ENOMEM;
372         }
373
374         queue->base_addr = (char *)qp_mz->addr;
375         queue->base_phys_addr = qp_mz->phys_addr;
376         if (qat_qp_check_queue_alignment(queue->base_phys_addr,
377                         queue_size_bytes)) {
378                 PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
379                                         " 0x%"PRIx64"\n",
380                                         queue->base_phys_addr);
381                 return -EFAULT;
382         }
383
384         if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
385                         != 0) {
386                 PMD_DRV_LOG(ERR, "Invalid num inflights");
387                 return -EINVAL;
388         }
389
390         queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
391                                         ADF_BYTES_TO_MSG_SIZE(desc_size));
392         queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
393         PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
394                                 " msg_size %u, max_inflights %u modulo %u",
395                                 queue->queue_size, queue_size_bytes,
396                                 nb_desc, desc_size, queue->max_inflights,
397                                 queue->modulo);
398
399         if (queue->max_inflights < 2) {
400                 PMD_DRV_LOG(ERR, "Invalid num inflights");
401                 return -EINVAL;
402         }
403         queue->head = 0;
404         queue->tail = 0;
405         queue->msg_size = desc_size;
406
407         /*
408          * Write an unused pattern to the queue memory.
409          */
410         memset(queue->base_addr, 0x7F, queue_size_bytes);
411
412         queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
413                                         queue->queue_size);
414
415         io_addr = pci_dev->mem_resource[0].addr;
416
417         WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
418                         queue->hw_queue_number, queue_base);
419         return 0;
420 }
421
422 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
423                                         uint32_t queue_size_bytes)
424 {
425         PMD_INIT_FUNC_TRACE();
426         if (((queue_size_bytes - 1) & phys_addr) != 0)
427                 return -EINVAL;
428         return 0;
429 }
430
431 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
432         uint32_t *p_queue_size_for_csr)
433 {
434         uint8_t i = ADF_MIN_RING_SIZE;
435
436         PMD_INIT_FUNC_TRACE();
437         for (; i <= ADF_MAX_RING_SIZE; i++)
438                 if ((msg_size * msg_num) ==
439                                 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
440                         *p_queue_size_for_csr = i;
441                         return 0;
442                 }
443         PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
444         return -EINVAL;
445 }
446
447 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
448 {
449         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
450                                         (ADF_ARB_REG_SLOT *
451                                                         txq->hw_bundle_number);
452         uint32_t value;
453
454         PMD_INIT_FUNC_TRACE();
455         value = ADF_CSR_RD(base_addr, arb_csr_offset);
456         value |= (0x01 << txq->hw_queue_number);
457         ADF_CSR_WR(base_addr, arb_csr_offset, value);
458 }
459
460 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
461 {
462         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
463                                         (ADF_ARB_REG_SLOT *
464                                                         txq->hw_bundle_number);
465         uint32_t value;
466
467         PMD_INIT_FUNC_TRACE();
468         value = ADF_CSR_RD(base_addr, arb_csr_offset);
469         value ^= (0x01 << txq->hw_queue_number);
470         ADF_CSR_WR(base_addr, arb_csr_offset, value);
471 }
472
473 static void adf_configure_queues(struct qat_qp *qp)
474 {
475         uint32_t queue_config;
476         struct qat_queue *queue = &qp->tx_q;
477
478         PMD_INIT_FUNC_TRACE();
479         queue_config = BUILD_RING_CONFIG(queue->queue_size);
480
481         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
482                         queue->hw_queue_number, queue_config);
483
484         queue = &qp->rx_q;
485         queue_config =
486                         BUILD_RESP_RING_CONFIG(queue->queue_size,
487                                         ADF_RING_NEAR_WATERMARK_512,
488                                         ADF_RING_NEAR_WATERMARK_0);
489
490         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
491                         queue->hw_queue_number, queue_config);
492 }