1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2018 Intel Corporation
5 #include <rte_common.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_pci.h>
12 #include <rte_atomic.h>
13 #include <rte_prefetch.h>
17 #include "adf_transport_access_macros.h"
19 #define ADF_MAX_SYM_DESC 4096
20 #define ADF_MIN_SYM_DESC 128
21 #define ADF_SYM_TX_RING_DESC_SIZE 128
22 #define ADF_SYM_RX_RING_DESC_SIZE 32
23 #define ADF_SYM_TX_QUEUE_STARTOFF 2
24 /* Offset from bundle start to 1st Sym Tx queue */
25 #define ADF_SYM_RX_QUEUE_STARTOFF 10
26 #define ADF_ARB_REG_SLOT 0x1000
27 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
29 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
30 ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
31 (ADF_ARB_REG_SLOT * index), value)
33 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
34 uint32_t queue_size_bytes);
35 static int qat_tx_queue_create(struct rte_cryptodev *dev,
36 struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
38 static int qat_rx_queue_create(struct rte_cryptodev *dev,
39 struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
41 static void qat_queue_delete(struct qat_queue *queue);
42 static int qat_queue_create(struct rte_cryptodev *dev,
43 struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
45 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
46 uint32_t *queue_size_for_csr);
47 static void adf_configure_queues(struct qat_qp *queue);
48 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
49 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
51 static const struct rte_memzone *
52 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
55 const struct rte_memzone *mz;
57 PMD_INIT_FUNC_TRACE();
58 mz = rte_memzone_lookup(queue_name);
60 if (((size_t)queue_size <= mz->len) &&
61 ((socket_id == SOCKET_ID_ANY) ||
62 (socket_id == mz->socket_id))) {
63 PMD_DRV_LOG(DEBUG, "re-use memzone already "
64 "allocated for %s", queue_name);
68 PMD_DRV_LOG(ERR, "Incompatible memzone already "
69 "allocated %s, size %u, socket %d. "
70 "Requested size %u, socket %u",
71 queue_name, (uint32_t)mz->len,
72 mz->socket_id, queue_size, socket_id);
76 PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
77 queue_name, queue_size, socket_id);
78 return rte_memzone_reserve_aligned(queue_name, queue_size,
79 socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
82 int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
83 const struct rte_cryptodev_qp_conf *qp_conf,
84 int socket_id, struct rte_mempool *session_pool __rte_unused)
87 struct rte_pci_device *pci_dev;
89 char op_cookie_pool_name[RTE_RING_NAMESIZE];
92 PMD_INIT_FUNC_TRACE();
94 /* If qp is already in use free ring memory and qp metadata. */
95 if (dev->data->queue_pairs[queue_pair_id] != NULL) {
96 ret = qat_sym_qp_release(dev, queue_pair_id);
101 if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
102 (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
103 PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
104 qp_conf->nb_descriptors);
108 pci_dev = RTE_DEV_TO_PCI(dev->device);
110 if (pci_dev->mem_resource[0].addr == NULL) {
111 PMD_DRV_LOG(ERR, "Could not find VF config space "
112 "(UIO driver attached?).");
117 (ADF_NUM_SYM_QPS_PER_BUNDLE *
118 ADF_NUM_BUNDLES_PER_DEV)) {
119 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
123 /* Allocate the queue pair data structure. */
124 qp = rte_zmalloc("qat PMD qp metadata",
125 sizeof(*qp), RTE_CACHE_LINE_SIZE);
127 PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
130 qp->nb_descriptors = qp_conf->nb_descriptors;
131 qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
132 qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
133 RTE_CACHE_LINE_SIZE);
134 if (qp->op_cookies == NULL) {
135 PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie");
140 qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
143 if (qat_tx_queue_create(dev, &(qp->tx_q),
144 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
145 PMD_INIT_LOG(ERR, "Tx queue create failed "
146 "queue_pair_id=%u", queue_pair_id);
150 if (qat_rx_queue_create(dev, &(qp->rx_q),
151 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
152 PMD_DRV_LOG(ERR, "Rx queue create failed "
153 "queue_pair_id=%hu", queue_pair_id);
154 qat_queue_delete(&(qp->tx_q));
158 adf_configure_queues(qp);
159 adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
160 snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
161 pci_dev->driver->driver.name, dev->data->dev_id,
164 qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
165 if (qp->op_cookie_pool == NULL)
166 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
168 sizeof(struct qat_crypto_op_cookie), 64, 0,
169 NULL, NULL, NULL, NULL, socket_id,
171 if (!qp->op_cookie_pool) {
172 PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
177 for (i = 0; i < qp->nb_descriptors; i++) {
178 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
179 PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
183 struct qat_crypto_op_cookie *sql_cookie =
186 sql_cookie->qat_sgl_src_phys_addr =
187 rte_mempool_virt2iova(sql_cookie) +
188 offsetof(struct qat_crypto_op_cookie,
191 sql_cookie->qat_sgl_dst_phys_addr =
192 rte_mempool_virt2iova(sql_cookie) +
193 offsetof(struct qat_crypto_op_cookie,
197 struct qat_pmd_private *internals
198 = dev->data->dev_private;
199 qp->qat_dev_gen = internals->qat_dev_gen;
201 dev->data->queue_pairs[queue_pair_id] = qp;
205 if (qp->op_cookie_pool)
206 rte_mempool_free(qp->op_cookie_pool);
207 rte_free(qp->op_cookies);
212 int qat_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
215 (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
218 PMD_INIT_FUNC_TRACE();
220 PMD_DRV_LOG(DEBUG, "qp already freed");
224 /* Don't free memory if there are still responses to be processed */
225 if (qp->inflights16 == 0) {
226 qat_queue_delete(&(qp->tx_q));
227 qat_queue_delete(&(qp->rx_q));
232 adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
234 for (i = 0; i < qp->nb_descriptors; i++)
235 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
237 if (qp->op_cookie_pool)
238 rte_mempool_free(qp->op_cookie_pool);
240 rte_free(qp->op_cookies);
242 dev->data->queue_pairs[queue_pair_id] = NULL;
246 static int qat_tx_queue_create(struct rte_cryptodev *dev,
247 struct qat_queue *queue, uint8_t qp_id,
248 uint32_t nb_desc, int socket_id)
250 PMD_INIT_FUNC_TRACE();
251 queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
252 queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
253 ADF_SYM_TX_QUEUE_STARTOFF;
254 PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
255 nb_desc, qp_id, queue->hw_bundle_number,
256 queue->hw_queue_number);
258 return qat_queue_create(dev, queue, nb_desc,
259 ADF_SYM_TX_RING_DESC_SIZE, socket_id);
262 static int qat_rx_queue_create(struct rte_cryptodev *dev,
263 struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
266 PMD_INIT_FUNC_TRACE();
267 queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
268 queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
269 ADF_SYM_RX_QUEUE_STARTOFF;
271 PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
272 nb_desc, qp_id, queue->hw_bundle_number,
273 queue->hw_queue_number);
274 return qat_queue_create(dev, queue, nb_desc,
275 ADF_SYM_RX_RING_DESC_SIZE, socket_id);
278 static void qat_queue_delete(struct qat_queue *queue)
280 const struct rte_memzone *mz;
284 PMD_DRV_LOG(DEBUG, "Invalid queue");
287 mz = rte_memzone_lookup(queue->memz_name);
289 /* Write an unused pattern to the queue memory. */
290 memset(queue->base_addr, 0x7F, queue->queue_size);
291 status = rte_memzone_free(mz);
293 PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
294 status, queue->memz_name);
296 PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
302 qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
303 uint32_t nb_desc, uint8_t desc_size, int socket_id)
307 const struct rte_memzone *qp_mz;
308 uint32_t queue_size_bytes = nb_desc*desc_size;
309 struct rte_pci_device *pci_dev;
311 PMD_INIT_FUNC_TRACE();
312 if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
313 PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
317 pci_dev = RTE_DEV_TO_PCI(dev->device);
320 * Allocate a memzone for the queue - create a unique name.
322 snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
323 pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
324 queue->hw_bundle_number, queue->hw_queue_number);
325 qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
328 PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
332 queue->base_addr = (char *)qp_mz->addr;
333 queue->base_phys_addr = qp_mz->iova;
334 if (qat_qp_check_queue_alignment(queue->base_phys_addr,
336 PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
338 queue->base_phys_addr);
342 if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
344 PMD_DRV_LOG(ERR, "Invalid num inflights");
348 queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
349 ADF_BYTES_TO_MSG_SIZE(desc_size));
350 queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
351 PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
352 " msg_size %u, max_inflights %u modulo %u",
353 queue->queue_size, queue_size_bytes,
354 nb_desc, desc_size, queue->max_inflights,
357 if (queue->max_inflights < 2) {
358 PMD_DRV_LOG(ERR, "Invalid num inflights");
363 queue->msg_size = desc_size;
366 * Write an unused pattern to the queue memory.
368 memset(queue->base_addr, 0x7F, queue_size_bytes);
370 queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
373 io_addr = pci_dev->mem_resource[0].addr;
375 WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
376 queue->hw_queue_number, queue_base);
380 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
381 uint32_t queue_size_bytes)
383 PMD_INIT_FUNC_TRACE();
384 if (((queue_size_bytes - 1) & phys_addr) != 0)
389 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
390 uint32_t *p_queue_size_for_csr)
392 uint8_t i = ADF_MIN_RING_SIZE;
394 PMD_INIT_FUNC_TRACE();
395 for (; i <= ADF_MAX_RING_SIZE; i++)
396 if ((msg_size * msg_num) ==
397 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
398 *p_queue_size_for_csr = i;
401 PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
405 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
407 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
409 txq->hw_bundle_number);
412 PMD_INIT_FUNC_TRACE();
413 value = ADF_CSR_RD(base_addr, arb_csr_offset);
414 value |= (0x01 << txq->hw_queue_number);
415 ADF_CSR_WR(base_addr, arb_csr_offset, value);
418 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
420 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
422 txq->hw_bundle_number);
425 PMD_INIT_FUNC_TRACE();
426 value = ADF_CSR_RD(base_addr, arb_csr_offset);
427 value ^= (0x01 << txq->hw_queue_number);
428 ADF_CSR_WR(base_addr, arb_csr_offset, value);
431 static void adf_configure_queues(struct qat_qp *qp)
433 uint32_t queue_config;
434 struct qat_queue *queue = &qp->tx_q;
436 PMD_INIT_FUNC_TRACE();
437 queue_config = BUILD_RING_CONFIG(queue->queue_size);
439 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
440 queue->hw_queue_number, queue_config);
444 BUILD_RESP_RING_CONFIG(queue->queue_size,
445 ADF_RING_NEAR_WATERMARK_512,
446 ADF_RING_NEAR_WATERMARK_0);
448 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
449 queue->hw_queue_number, queue_config);