remove extra parentheses in return statement
[dpdk.git] / drivers / crypto / qat / qat_qp.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_common.h>
35 #include <rte_dev.h>
36 #include <rte_malloc.h>
37 #include <rte_memzone.h>
38 #include <rte_cryptodev_pmd.h>
39 #include <rte_atomic.h>
40 #include <rte_prefetch.h>
41
42 #include "qat_logs.h"
43 #include "qat_crypto.h"
44 #include "adf_transport_access_macros.h"
45
46 #define ADF_MAX_SYM_DESC                        4096
47 #define ADF_MIN_SYM_DESC                        128
48 #define ADF_SYM_TX_RING_DESC_SIZE               128
49 #define ADF_SYM_RX_RING_DESC_SIZE               32
50 #define ADF_SYM_TX_QUEUE_STARTOFF               2
51 /* Offset from bundle start to 1st Sym Tx queue */
52 #define ADF_SYM_RX_QUEUE_STARTOFF               10
53 #define ADF_ARB_REG_SLOT                        0x1000
54 #define ADF_ARB_RINGSRVARBEN_OFFSET             0x19C
55
56 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
57         ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
58         (ADF_ARB_REG_SLOT * index), value)
59
60 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
61         uint32_t queue_size_bytes);
62 static int qat_tx_queue_create(struct rte_cryptodev *dev,
63         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
64         int socket_id);
65 static int qat_rx_queue_create(struct rte_cryptodev *dev,
66         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
67         int socket_id);
68 static void qat_queue_delete(struct qat_queue *queue);
69 static int qat_queue_create(struct rte_cryptodev *dev,
70         struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
71         int socket_id);
72 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
73         uint32_t *queue_size_for_csr);
74 static void adf_configure_queues(struct qat_qp *queue);
75 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
76 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
77
78 static const struct rte_memzone *
79 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
80                         int socket_id)
81 {
82         const struct rte_memzone *mz;
83         unsigned memzone_flags = 0;
84         const struct rte_memseg *ms;
85
86         PMD_INIT_FUNC_TRACE();
87         mz = rte_memzone_lookup(queue_name);
88         if (mz != 0) {
89                 if (((size_t)queue_size <= mz->len) &&
90                                 ((socket_id == SOCKET_ID_ANY) ||
91                                         (socket_id == mz->socket_id))) {
92                         PMD_DRV_LOG(DEBUG, "re-use memzone already "
93                                         "allocated for %s", queue_name);
94                         return mz;
95                 }
96
97                 PMD_DRV_LOG(ERR, "Incompatible memzone already "
98                                 "allocated %s, size %u, socket %d. "
99                                 "Requested size %u, socket %u",
100                                 queue_name, (uint32_t)mz->len,
101                                 mz->socket_id, queue_size, socket_id);
102                 return NULL;
103         }
104
105         PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
106                                         queue_name, queue_size, socket_id);
107         ms = rte_eal_get_physmem_layout();
108         switch (ms[0].hugepage_sz) {
109         case(RTE_PGSIZE_2M):
110                 memzone_flags = RTE_MEMZONE_2MB;
111         break;
112         case(RTE_PGSIZE_1G):
113                 memzone_flags = RTE_MEMZONE_1GB;
114         break;
115         case(RTE_PGSIZE_16M):
116                 memzone_flags = RTE_MEMZONE_16MB;
117         break;
118         case(RTE_PGSIZE_16G):
119                 memzone_flags = RTE_MEMZONE_16GB;
120         break;
121         default:
122                 memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
123 }
124 #ifdef RTE_LIBRTE_XEN_DOM0
125         return rte_memzone_reserve_bounded(queue_name, queue_size,
126                 socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
127 #else
128         return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
129                 memzone_flags, queue_size);
130 #endif
131 }
132
133 int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
134         const struct rte_cryptodev_qp_conf *qp_conf,
135         int socket_id)
136 {
137         struct qat_qp *qp;
138         int ret;
139
140         PMD_INIT_FUNC_TRACE();
141
142         /* If qp is already in use free ring memory and qp metadata. */
143         if (dev->data->queue_pairs[queue_pair_id] != NULL) {
144                 ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
145                 if (ret < 0)
146                         return ret;
147         }
148
149         if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
150                 (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
151                 PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
152                                 qp_conf->nb_descriptors);
153                 return -EINVAL;
154         }
155
156         if (dev->pci_dev->mem_resource[0].addr == NULL) {
157                 PMD_DRV_LOG(ERR, "Could not find VF config space "
158                                 "(UIO driver attached?).");
159                 return -EINVAL;
160         }
161
162         if (queue_pair_id >=
163                         (ADF_NUM_SYM_QPS_PER_BUNDLE *
164                                         ADF_NUM_BUNDLES_PER_DEV)) {
165                 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
166                                 queue_pair_id);
167                 return -EINVAL;
168         }
169
170         /* Allocate the queue pair data structure. */
171         qp = rte_zmalloc("qat PMD qp metadata",
172                         sizeof(*qp), RTE_CACHE_LINE_SIZE);
173         if (qp == NULL) {
174                 PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
175                 return -ENOMEM;
176         }
177         qp->mmap_bar_addr = dev->pci_dev->mem_resource[0].addr;
178         rte_atomic16_init(&qp->inflights16);
179
180         if (qat_tx_queue_create(dev, &(qp->tx_q),
181                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
182                 PMD_INIT_LOG(ERR, "Tx queue create failed "
183                                 "queue_pair_id=%u", queue_pair_id);
184                 goto create_err;
185         }
186
187         if (qat_rx_queue_create(dev, &(qp->rx_q),
188                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
189                 PMD_DRV_LOG(ERR, "Rx queue create failed "
190                                 "queue_pair_id=%hu", queue_pair_id);
191                 qat_queue_delete(&(qp->tx_q));
192                 goto create_err;
193         }
194         adf_configure_queues(qp);
195         adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
196         dev->data->queue_pairs[queue_pair_id] = qp;
197         return 0;
198
199 create_err:
200         rte_free(qp);
201         return -EFAULT;
202 }
203
204 int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
205 {
206         struct qat_qp *qp =
207                         (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
208
209         PMD_INIT_FUNC_TRACE();
210         if (qp == NULL) {
211                 PMD_DRV_LOG(DEBUG, "qp already freed");
212                 return 0;
213         }
214
215         /* Don't free memory if there are still responses to be processed */
216         if (rte_atomic16_read(&(qp->inflights16)) == 0) {
217                 qat_queue_delete(&(qp->tx_q));
218                 qat_queue_delete(&(qp->rx_q));
219         } else {
220                 return -EAGAIN;
221         }
222
223         adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
224         rte_free(qp);
225         dev->data->queue_pairs[queue_pair_id] = NULL;
226         return 0;
227 }
228
229 static int qat_tx_queue_create(struct rte_cryptodev *dev,
230         struct qat_queue *queue, uint8_t qp_id,
231         uint32_t nb_desc, int socket_id)
232 {
233         PMD_INIT_FUNC_TRACE();
234         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
235         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
236                                                 ADF_SYM_TX_QUEUE_STARTOFF;
237         PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
238                 nb_desc, qp_id, queue->hw_bundle_number,
239                 queue->hw_queue_number);
240
241         return qat_queue_create(dev, queue, nb_desc,
242                                 ADF_SYM_TX_RING_DESC_SIZE, socket_id);
243 }
244
245 static int qat_rx_queue_create(struct rte_cryptodev *dev,
246                 struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
247                 int socket_id)
248 {
249         PMD_INIT_FUNC_TRACE();
250         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
251         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
252                                                 ADF_SYM_RX_QUEUE_STARTOFF;
253
254         PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
255                 nb_desc, qp_id, queue->hw_bundle_number,
256                 queue->hw_queue_number);
257         return qat_queue_create(dev, queue, nb_desc,
258                                 ADF_SYM_RX_RING_DESC_SIZE, socket_id);
259 }
260
261 static void qat_queue_delete(struct qat_queue *queue)
262 {
263         const struct rte_memzone *mz;
264         int status = 0;
265
266         if (queue == NULL) {
267                 PMD_DRV_LOG(DEBUG, "Invalid queue");
268                 return;
269         }
270         mz = rte_memzone_lookup(queue->memz_name);
271         if (mz != NULL) {
272                 /* Write an unused pattern to the queue memory. */
273                 memset(queue->base_addr, 0x7F, queue->queue_size);
274                 status = rte_memzone_free(mz);
275                 if (status != 0)
276                         PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
277                                         status, queue->memz_name);
278         } else {
279                 PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
280                                 queue->memz_name);
281         }
282 }
283
284 static int
285 qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
286                 uint32_t nb_desc, uint8_t desc_size, int socket_id)
287 {
288         uint64_t queue_base;
289         void *io_addr;
290         const struct rte_memzone *qp_mz;
291         uint32_t queue_size_bytes = nb_desc*desc_size;
292
293         PMD_INIT_FUNC_TRACE();
294         if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
295                 PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
296                 return -EINVAL;
297         }
298
299         /*
300          * Allocate a memzone for the queue - create a unique name.
301          */
302         snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
303                 dev->driver->pci_drv.name, "qp_mem", dev->data->dev_id,
304                 queue->hw_bundle_number, queue->hw_queue_number);
305         qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
306                         socket_id);
307         if (qp_mz == NULL) {
308                 PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
309                 return -ENOMEM;
310         }
311
312         queue->base_addr = (char *)qp_mz->addr;
313         queue->base_phys_addr = qp_mz->phys_addr;
314         if (qat_qp_check_queue_alignment(queue->base_phys_addr,
315                         queue_size_bytes)) {
316                 PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
317                                         " 0x%"PRIx64"\n",
318                                         queue->base_phys_addr);
319                 return -EFAULT;
320         }
321
322         if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
323                         != 0) {
324                 PMD_DRV_LOG(ERR, "Invalid num inflights");
325                 return -EINVAL;
326         }
327
328         queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
329                                         ADF_BYTES_TO_MSG_SIZE(desc_size));
330         queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
331         PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
332                                 " msg_size %u, max_inflights %u modulo %u",
333                                 queue->queue_size, queue_size_bytes,
334                                 nb_desc, desc_size, queue->max_inflights,
335                                 queue->modulo);
336
337         if (queue->max_inflights < 2) {
338                 PMD_DRV_LOG(ERR, "Invalid num inflights");
339                 return -EINVAL;
340         }
341         queue->head = 0;
342         queue->tail = 0;
343         queue->msg_size = desc_size;
344
345         /*
346          * Write an unused pattern to the queue memory.
347          */
348         memset(queue->base_addr, 0x7F, queue_size_bytes);
349
350         queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
351                                         queue->queue_size);
352         io_addr = dev->pci_dev->mem_resource[0].addr;
353
354         WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
355                         queue->hw_queue_number, queue_base);
356         return 0;
357 }
358
359 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
360                                         uint32_t queue_size_bytes)
361 {
362         PMD_INIT_FUNC_TRACE();
363         if (((queue_size_bytes - 1) & phys_addr) != 0)
364                 return -EINVAL;
365         return 0;
366 }
367
368 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
369         uint32_t *p_queue_size_for_csr)
370 {
371         uint8_t i = ADF_MIN_RING_SIZE;
372
373         PMD_INIT_FUNC_TRACE();
374         for (; i <= ADF_MAX_RING_SIZE; i++)
375                 if ((msg_size * msg_num) ==
376                                 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
377                         *p_queue_size_for_csr = i;
378                         return 0;
379                 }
380         PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
381         return -EINVAL;
382 }
383
384 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
385 {
386         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
387                                         (ADF_ARB_REG_SLOT *
388                                                         txq->hw_bundle_number);
389         uint32_t value;
390
391         PMD_INIT_FUNC_TRACE();
392         value = ADF_CSR_RD(base_addr, arb_csr_offset);
393         value |= (0x01 << txq->hw_queue_number);
394         ADF_CSR_WR(base_addr, arb_csr_offset, value);
395 }
396
397 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
398 {
399         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
400                                         (ADF_ARB_REG_SLOT *
401                                                         txq->hw_bundle_number);
402         uint32_t value;
403
404         PMD_INIT_FUNC_TRACE();
405         value = ADF_CSR_RD(base_addr, arb_csr_offset);
406         value ^= (0x01 << txq->hw_queue_number);
407         ADF_CSR_WR(base_addr, arb_csr_offset, value);
408 }
409
410 static void adf_configure_queues(struct qat_qp *qp)
411 {
412         uint32_t queue_config;
413         struct qat_queue *queue = &qp->tx_q;
414
415         PMD_INIT_FUNC_TRACE();
416         queue_config = BUILD_RING_CONFIG(queue->queue_size);
417
418         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
419                         queue->hw_queue_number, queue_config);
420
421         queue = &qp->rx_q;
422         queue_config =
423                         BUILD_RESP_RING_CONFIG(queue->queue_size,
424                                         ADF_RING_NEAR_WATERMARK_512,
425                                         ADF_RING_NEAR_WATERMARK_0);
426
427         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
428                         queue->hw_queue_number, queue_config);
429 }