1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
5 #include <rte_common.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_pci.h>
12 #include <rte_atomic.h>
13 #include <rte_prefetch.h>
16 #include "qat_crypto.h"
18 #include "adf_transport_access_macros.h"
20 #define ADF_MAX_SYM_DESC 4096
21 #define ADF_MIN_SYM_DESC 128
22 #define ADF_SYM_TX_RING_DESC_SIZE 128
23 #define ADF_SYM_RX_RING_DESC_SIZE 32
24 #define ADF_SYM_TX_QUEUE_STARTOFF 2
25 /* Offset from bundle start to 1st Sym Tx queue */
26 #define ADF_SYM_RX_QUEUE_STARTOFF 10
27 #define ADF_ARB_REG_SLOT 0x1000
28 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
30 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
31 ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
32 (ADF_ARB_REG_SLOT * index), value)
34 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
35 uint32_t queue_size_bytes);
36 static int qat_tx_queue_create(struct rte_cryptodev *dev,
37 struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
39 static int qat_rx_queue_create(struct rte_cryptodev *dev,
40 struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
42 static void qat_queue_delete(struct qat_queue *queue);
43 static int qat_queue_create(struct rte_cryptodev *dev,
44 struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
46 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
47 uint32_t *queue_size_for_csr);
48 static void adf_configure_queues(struct qat_qp *queue);
49 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
50 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
52 static const struct rte_memzone *
53 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
56 const struct rte_memzone *mz;
57 unsigned memzone_flags = 0;
58 const struct rte_memseg *ms;
60 PMD_INIT_FUNC_TRACE();
61 mz = rte_memzone_lookup(queue_name);
63 if (((size_t)queue_size <= mz->len) &&
64 ((socket_id == SOCKET_ID_ANY) ||
65 (socket_id == mz->socket_id))) {
66 PMD_DRV_LOG(DEBUG, "re-use memzone already "
67 "allocated for %s", queue_name);
71 PMD_DRV_LOG(ERR, "Incompatible memzone already "
72 "allocated %s, size %u, socket %d. "
73 "Requested size %u, socket %u",
74 queue_name, (uint32_t)mz->len,
75 mz->socket_id, queue_size, socket_id);
79 PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
80 queue_name, queue_size, socket_id);
81 ms = rte_eal_get_physmem_layout();
82 switch (ms[0].hugepage_sz) {
84 memzone_flags = RTE_MEMZONE_2MB;
87 memzone_flags = RTE_MEMZONE_1GB;
90 memzone_flags = RTE_MEMZONE_16MB;
93 memzone_flags = RTE_MEMZONE_16GB;
96 memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
98 return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
99 memzone_flags, queue_size);
102 int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
103 const struct rte_cryptodev_qp_conf *qp_conf,
104 int socket_id, struct rte_mempool *session_pool __rte_unused)
107 struct rte_pci_device *pci_dev;
109 char op_cookie_pool_name[RTE_RING_NAMESIZE];
112 PMD_INIT_FUNC_TRACE();
114 /* If qp is already in use free ring memory and qp metadata. */
115 if (dev->data->queue_pairs[queue_pair_id] != NULL) {
116 ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
121 if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
122 (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
123 PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
124 qp_conf->nb_descriptors);
128 pci_dev = RTE_DEV_TO_PCI(dev->device);
130 if (pci_dev->mem_resource[0].addr == NULL) {
131 PMD_DRV_LOG(ERR, "Could not find VF config space "
132 "(UIO driver attached?).");
137 (ADF_NUM_SYM_QPS_PER_BUNDLE *
138 ADF_NUM_BUNDLES_PER_DEV)) {
139 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
143 /* Allocate the queue pair data structure. */
144 qp = rte_zmalloc("qat PMD qp metadata",
145 sizeof(*qp), RTE_CACHE_LINE_SIZE);
147 PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
150 qp->nb_descriptors = qp_conf->nb_descriptors;
151 qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
152 qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
153 RTE_CACHE_LINE_SIZE);
154 if (qp->op_cookies == NULL) {
155 PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie");
160 qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
163 if (qat_tx_queue_create(dev, &(qp->tx_q),
164 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
165 PMD_INIT_LOG(ERR, "Tx queue create failed "
166 "queue_pair_id=%u", queue_pair_id);
170 if (qat_rx_queue_create(dev, &(qp->rx_q),
171 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
172 PMD_DRV_LOG(ERR, "Rx queue create failed "
173 "queue_pair_id=%hu", queue_pair_id);
174 qat_queue_delete(&(qp->tx_q));
178 adf_configure_queues(qp);
179 adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
180 snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
181 pci_dev->driver->driver.name, dev->data->dev_id,
184 qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
185 if (qp->op_cookie_pool == NULL)
186 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
188 sizeof(struct qat_crypto_op_cookie), 64, 0,
189 NULL, NULL, NULL, NULL, socket_id,
191 if (!qp->op_cookie_pool) {
192 PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
197 for (i = 0; i < qp->nb_descriptors; i++) {
198 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
199 PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
203 struct qat_crypto_op_cookie *sql_cookie =
206 sql_cookie->qat_sgl_src_phys_addr =
207 rte_mempool_virt2iova(sql_cookie) +
208 offsetof(struct qat_crypto_op_cookie,
211 sql_cookie->qat_sgl_dst_phys_addr =
212 rte_mempool_virt2iova(sql_cookie) +
213 offsetof(struct qat_crypto_op_cookie,
217 struct qat_pmd_private *internals
218 = dev->data->dev_private;
219 qp->qat_dev_gen = internals->qat_dev_gen;
221 dev->data->queue_pairs[queue_pair_id] = qp;
225 if (qp->op_cookie_pool)
226 rte_mempool_free(qp->op_cookie_pool);
227 rte_free(qp->op_cookies);
232 int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
235 (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
238 PMD_INIT_FUNC_TRACE();
240 PMD_DRV_LOG(DEBUG, "qp already freed");
244 /* Don't free memory if there are still responses to be processed */
245 if (qp->inflights16 == 0) {
246 qat_queue_delete(&(qp->tx_q));
247 qat_queue_delete(&(qp->rx_q));
252 adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
254 for (i = 0; i < qp->nb_descriptors; i++)
255 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
257 if (qp->op_cookie_pool)
258 rte_mempool_free(qp->op_cookie_pool);
260 rte_free(qp->op_cookies);
262 dev->data->queue_pairs[queue_pair_id] = NULL;
266 static int qat_tx_queue_create(struct rte_cryptodev *dev,
267 struct qat_queue *queue, uint8_t qp_id,
268 uint32_t nb_desc, int socket_id)
270 PMD_INIT_FUNC_TRACE();
271 queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
272 queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
273 ADF_SYM_TX_QUEUE_STARTOFF;
274 PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
275 nb_desc, qp_id, queue->hw_bundle_number,
276 queue->hw_queue_number);
278 return qat_queue_create(dev, queue, nb_desc,
279 ADF_SYM_TX_RING_DESC_SIZE, socket_id);
282 static int qat_rx_queue_create(struct rte_cryptodev *dev,
283 struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
286 PMD_INIT_FUNC_TRACE();
287 queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
288 queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
289 ADF_SYM_RX_QUEUE_STARTOFF;
291 PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
292 nb_desc, qp_id, queue->hw_bundle_number,
293 queue->hw_queue_number);
294 return qat_queue_create(dev, queue, nb_desc,
295 ADF_SYM_RX_RING_DESC_SIZE, socket_id);
298 static void qat_queue_delete(struct qat_queue *queue)
300 const struct rte_memzone *mz;
304 PMD_DRV_LOG(DEBUG, "Invalid queue");
307 mz = rte_memzone_lookup(queue->memz_name);
309 /* Write an unused pattern to the queue memory. */
310 memset(queue->base_addr, 0x7F, queue->queue_size);
311 status = rte_memzone_free(mz);
313 PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
314 status, queue->memz_name);
316 PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
322 qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
323 uint32_t nb_desc, uint8_t desc_size, int socket_id)
327 const struct rte_memzone *qp_mz;
328 uint32_t queue_size_bytes = nb_desc*desc_size;
329 struct rte_pci_device *pci_dev;
331 PMD_INIT_FUNC_TRACE();
332 if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
333 PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
337 pci_dev = RTE_DEV_TO_PCI(dev->device);
340 * Allocate a memzone for the queue - create a unique name.
342 snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
343 pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
344 queue->hw_bundle_number, queue->hw_queue_number);
345 qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
348 PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
352 queue->base_addr = (char *)qp_mz->addr;
353 queue->base_phys_addr = qp_mz->iova;
354 if (qat_qp_check_queue_alignment(queue->base_phys_addr,
356 PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
358 queue->base_phys_addr);
362 if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
364 PMD_DRV_LOG(ERR, "Invalid num inflights");
368 queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
369 ADF_BYTES_TO_MSG_SIZE(desc_size));
370 queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
371 PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
372 " msg_size %u, max_inflights %u modulo %u",
373 queue->queue_size, queue_size_bytes,
374 nb_desc, desc_size, queue->max_inflights,
377 if (queue->max_inflights < 2) {
378 PMD_DRV_LOG(ERR, "Invalid num inflights");
383 queue->msg_size = desc_size;
386 * Write an unused pattern to the queue memory.
388 memset(queue->base_addr, 0x7F, queue_size_bytes);
390 queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
393 io_addr = pci_dev->mem_resource[0].addr;
395 WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
396 queue->hw_queue_number, queue_base);
400 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
401 uint32_t queue_size_bytes)
403 PMD_INIT_FUNC_TRACE();
404 if (((queue_size_bytes - 1) & phys_addr) != 0)
409 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
410 uint32_t *p_queue_size_for_csr)
412 uint8_t i = ADF_MIN_RING_SIZE;
414 PMD_INIT_FUNC_TRACE();
415 for (; i <= ADF_MAX_RING_SIZE; i++)
416 if ((msg_size * msg_num) ==
417 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
418 *p_queue_size_for_csr = i;
421 PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
425 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
427 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
429 txq->hw_bundle_number);
432 PMD_INIT_FUNC_TRACE();
433 value = ADF_CSR_RD(base_addr, arb_csr_offset);
434 value |= (0x01 << txq->hw_queue_number);
435 ADF_CSR_WR(base_addr, arb_csr_offset, value);
438 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
440 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
442 txq->hw_bundle_number);
445 PMD_INIT_FUNC_TRACE();
446 value = ADF_CSR_RD(base_addr, arb_csr_offset);
447 value ^= (0x01 << txq->hw_queue_number);
448 ADF_CSR_WR(base_addr, arb_csr_offset, value);
451 static void adf_configure_queues(struct qat_qp *qp)
453 uint32_t queue_config;
454 struct qat_queue *queue = &qp->tx_q;
456 PMD_INIT_FUNC_TRACE();
457 queue_config = BUILD_RING_CONFIG(queue->queue_size);
459 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
460 queue->hw_queue_number, queue_config);
464 BUILD_RESP_RING_CONFIG(queue->queue_size,
465 ADF_RING_NEAR_WATERMARK_512,
466 ADF_RING_NEAR_WATERMARK_0);
468 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
469 queue->hw_queue_number, queue_config);