4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_common.h>
36 #include <rte_malloc.h>
37 #include <rte_memzone.h>
38 #include <rte_cryptodev_pmd.h>
40 #include <rte_atomic.h>
41 #include <rte_prefetch.h>
44 #include "qat_crypto.h"
46 #include "adf_transport_access_macros.h"
48 #define ADF_MAX_SYM_DESC 4096
49 #define ADF_MIN_SYM_DESC 128
50 #define ADF_SYM_TX_RING_DESC_SIZE 128
51 #define ADF_SYM_RX_RING_DESC_SIZE 32
52 #define ADF_SYM_TX_QUEUE_STARTOFF 2
53 /* Offset from bundle start to 1st Sym Tx queue */
54 #define ADF_SYM_RX_QUEUE_STARTOFF 10
55 #define ADF_ARB_REG_SLOT 0x1000
56 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
58 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
59 ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
60 (ADF_ARB_REG_SLOT * index), value)
62 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
63 uint32_t queue_size_bytes);
64 static int qat_tx_queue_create(struct rte_cryptodev *dev,
65 struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
67 static int qat_rx_queue_create(struct rte_cryptodev *dev,
68 struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
70 static void qat_queue_delete(struct qat_queue *queue);
71 static int qat_queue_create(struct rte_cryptodev *dev,
72 struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
74 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
75 uint32_t *queue_size_for_csr);
76 static void adf_configure_queues(struct qat_qp *queue);
77 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
78 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
80 static const struct rte_memzone *
81 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
84 const struct rte_memzone *mz;
85 unsigned memzone_flags = 0;
86 const struct rte_memseg *ms;
88 PMD_INIT_FUNC_TRACE();
89 mz = rte_memzone_lookup(queue_name);
91 if (((size_t)queue_size <= mz->len) &&
92 ((socket_id == SOCKET_ID_ANY) ||
93 (socket_id == mz->socket_id))) {
94 PMD_DRV_LOG(DEBUG, "re-use memzone already "
95 "allocated for %s", queue_name);
99 PMD_DRV_LOG(ERR, "Incompatible memzone already "
100 "allocated %s, size %u, socket %d. "
101 "Requested size %u, socket %u",
102 queue_name, (uint32_t)mz->len,
103 mz->socket_id, queue_size, socket_id);
107 PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
108 queue_name, queue_size, socket_id);
109 ms = rte_eal_get_physmem_layout();
110 switch (ms[0].hugepage_sz) {
112 memzone_flags = RTE_MEMZONE_2MB;
115 memzone_flags = RTE_MEMZONE_1GB;
117 case(RTE_PGSIZE_16M):
118 memzone_flags = RTE_MEMZONE_16MB;
120 case(RTE_PGSIZE_16G):
121 memzone_flags = RTE_MEMZONE_16GB;
124 memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
126 #ifdef RTE_LIBRTE_XEN_DOM0
127 return rte_memzone_reserve_bounded(queue_name, queue_size,
128 socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
130 return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
131 memzone_flags, queue_size);
135 int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
136 const struct rte_cryptodev_qp_conf *qp_conf,
137 int socket_id, struct rte_mempool *session_pool __rte_unused)
140 struct rte_pci_device *pci_dev;
142 char op_cookie_pool_name[RTE_RING_NAMESIZE];
145 PMD_INIT_FUNC_TRACE();
147 /* If qp is already in use free ring memory and qp metadata. */
148 if (dev->data->queue_pairs[queue_pair_id] != NULL) {
149 ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
154 if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
155 (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
156 PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
157 qp_conf->nb_descriptors);
161 pci_dev = RTE_DEV_TO_PCI(dev->device);
163 if (pci_dev->mem_resource[0].addr == NULL) {
164 PMD_DRV_LOG(ERR, "Could not find VF config space "
165 "(UIO driver attached?).");
170 (ADF_NUM_SYM_QPS_PER_BUNDLE *
171 ADF_NUM_BUNDLES_PER_DEV)) {
172 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
176 /* Allocate the queue pair data structure. */
177 qp = rte_zmalloc("qat PMD qp metadata",
178 sizeof(*qp), RTE_CACHE_LINE_SIZE);
180 PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
183 qp->nb_descriptors = qp_conf->nb_descriptors;
184 qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
185 qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
186 RTE_CACHE_LINE_SIZE);
188 qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
189 rte_atomic16_init(&qp->inflights16);
191 if (qat_tx_queue_create(dev, &(qp->tx_q),
192 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
193 PMD_INIT_LOG(ERR, "Tx queue create failed "
194 "queue_pair_id=%u", queue_pair_id);
198 if (qat_rx_queue_create(dev, &(qp->rx_q),
199 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
200 PMD_DRV_LOG(ERR, "Rx queue create failed "
201 "queue_pair_id=%hu", queue_pair_id);
202 qat_queue_delete(&(qp->tx_q));
206 adf_configure_queues(qp);
207 adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
208 snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
209 pci_dev->driver->driver.name, dev->data->dev_id,
212 qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
213 if (qp->op_cookie_pool == NULL)
214 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
216 sizeof(struct qat_crypto_op_cookie), 64, 0,
217 NULL, NULL, NULL, NULL, socket_id,
219 if (!qp->op_cookie_pool) {
220 PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
225 for (i = 0; i < qp->nb_descriptors; i++) {
226 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
227 PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
231 struct qat_crypto_op_cookie *sql_cookie =
234 sql_cookie->qat_sgl_src_phys_addr =
235 rte_mempool_virt2phy(qp->op_cookie_pool,
237 offsetof(struct qat_crypto_op_cookie,
240 sql_cookie->qat_sgl_dst_phys_addr =
241 rte_mempool_virt2phy(qp->op_cookie_pool,
243 offsetof(struct qat_crypto_op_cookie,
246 dev->data->queue_pairs[queue_pair_id] = qp;
254 int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
257 (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
260 PMD_INIT_FUNC_TRACE();
262 PMD_DRV_LOG(DEBUG, "qp already freed");
266 /* Don't free memory if there are still responses to be processed */
267 if (rte_atomic16_read(&(qp->inflights16)) == 0) {
268 qat_queue_delete(&(qp->tx_q));
269 qat_queue_delete(&(qp->rx_q));
274 adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
276 for (i = 0; i < qp->nb_descriptors; i++)
277 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
279 if (qp->op_cookie_pool)
280 rte_mempool_free(qp->op_cookie_pool);
282 rte_free(qp->op_cookies);
284 dev->data->queue_pairs[queue_pair_id] = NULL;
288 static int qat_tx_queue_create(struct rte_cryptodev *dev,
289 struct qat_queue *queue, uint8_t qp_id,
290 uint32_t nb_desc, int socket_id)
292 PMD_INIT_FUNC_TRACE();
293 queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
294 queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
295 ADF_SYM_TX_QUEUE_STARTOFF;
296 PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
297 nb_desc, qp_id, queue->hw_bundle_number,
298 queue->hw_queue_number);
300 return qat_queue_create(dev, queue, nb_desc,
301 ADF_SYM_TX_RING_DESC_SIZE, socket_id);
304 static int qat_rx_queue_create(struct rte_cryptodev *dev,
305 struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
308 PMD_INIT_FUNC_TRACE();
309 queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
310 queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
311 ADF_SYM_RX_QUEUE_STARTOFF;
313 PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
314 nb_desc, qp_id, queue->hw_bundle_number,
315 queue->hw_queue_number);
316 return qat_queue_create(dev, queue, nb_desc,
317 ADF_SYM_RX_RING_DESC_SIZE, socket_id);
320 static void qat_queue_delete(struct qat_queue *queue)
322 const struct rte_memzone *mz;
326 PMD_DRV_LOG(DEBUG, "Invalid queue");
329 mz = rte_memzone_lookup(queue->memz_name);
331 /* Write an unused pattern to the queue memory. */
332 memset(queue->base_addr, 0x7F, queue->queue_size);
333 status = rte_memzone_free(mz);
335 PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
336 status, queue->memz_name);
338 PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
344 qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
345 uint32_t nb_desc, uint8_t desc_size, int socket_id)
349 const struct rte_memzone *qp_mz;
350 uint32_t queue_size_bytes = nb_desc*desc_size;
351 struct rte_pci_device *pci_dev;
353 PMD_INIT_FUNC_TRACE();
354 if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
355 PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
359 pci_dev = RTE_DEV_TO_PCI(dev->device);
362 * Allocate a memzone for the queue - create a unique name.
364 snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
365 pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
366 queue->hw_bundle_number, queue->hw_queue_number);
367 qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
370 PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
374 queue->base_addr = (char *)qp_mz->addr;
375 queue->base_phys_addr = qp_mz->phys_addr;
376 if (qat_qp_check_queue_alignment(queue->base_phys_addr,
378 PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
380 queue->base_phys_addr);
384 if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
386 PMD_DRV_LOG(ERR, "Invalid num inflights");
390 queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
391 ADF_BYTES_TO_MSG_SIZE(desc_size));
392 queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
393 PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
394 " msg_size %u, max_inflights %u modulo %u",
395 queue->queue_size, queue_size_bytes,
396 nb_desc, desc_size, queue->max_inflights,
399 if (queue->max_inflights < 2) {
400 PMD_DRV_LOG(ERR, "Invalid num inflights");
405 queue->msg_size = desc_size;
408 * Write an unused pattern to the queue memory.
410 memset(queue->base_addr, 0x7F, queue_size_bytes);
412 queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
415 io_addr = pci_dev->mem_resource[0].addr;
417 WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
418 queue->hw_queue_number, queue_base);
422 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
423 uint32_t queue_size_bytes)
425 PMD_INIT_FUNC_TRACE();
426 if (((queue_size_bytes - 1) & phys_addr) != 0)
431 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
432 uint32_t *p_queue_size_for_csr)
434 uint8_t i = ADF_MIN_RING_SIZE;
436 PMD_INIT_FUNC_TRACE();
437 for (; i <= ADF_MAX_RING_SIZE; i++)
438 if ((msg_size * msg_num) ==
439 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
440 *p_queue_size_for_csr = i;
443 PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
447 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
449 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
451 txq->hw_bundle_number);
454 PMD_INIT_FUNC_TRACE();
455 value = ADF_CSR_RD(base_addr, arb_csr_offset);
456 value |= (0x01 << txq->hw_queue_number);
457 ADF_CSR_WR(base_addr, arb_csr_offset, value);
460 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
462 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
464 txq->hw_bundle_number);
467 PMD_INIT_FUNC_TRACE();
468 value = ADF_CSR_RD(base_addr, arb_csr_offset);
469 value ^= (0x01 << txq->hw_queue_number);
470 ADF_CSR_WR(base_addr, arb_csr_offset, value);
473 static void adf_configure_queues(struct qat_qp *qp)
475 uint32_t queue_config;
476 struct qat_queue *queue = &qp->tx_q;
478 PMD_INIT_FUNC_TRACE();
479 queue_config = BUILD_RING_CONFIG(queue->queue_size);
481 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
482 queue->hw_queue_number, queue_config);
486 BUILD_RESP_RING_CONFIG(queue->queue_size,
487 ADF_RING_NEAR_WATERMARK_512,
488 ADF_RING_NEAR_WATERMARK_0);
490 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
491 queue->hw_queue_number, queue_config);