1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2018 Intel Corporation
5 #include <rte_common.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_pci.h>
12 #include <rte_atomic.h>
13 #include <rte_prefetch.h>
16 #include "qat_crypto.h"
18 #include "adf_transport_access_macros.h"
20 #define ADF_MAX_SYM_DESC 4096
21 #define ADF_MIN_SYM_DESC 128
22 #define ADF_SYM_TX_RING_DESC_SIZE 128
23 #define ADF_SYM_RX_RING_DESC_SIZE 32
24 #define ADF_SYM_TX_QUEUE_STARTOFF 2
25 /* Offset from bundle start to 1st Sym Tx queue */
26 #define ADF_SYM_RX_QUEUE_STARTOFF 10
27 #define ADF_ARB_REG_SLOT 0x1000
28 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
30 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
31 ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
32 (ADF_ARB_REG_SLOT * index), value)
34 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
35 uint32_t queue_size_bytes);
36 static int qat_tx_queue_create(struct rte_cryptodev *dev,
37 struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
39 static int qat_rx_queue_create(struct rte_cryptodev *dev,
40 struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
42 static void qat_queue_delete(struct qat_queue *queue);
43 static int qat_queue_create(struct rte_cryptodev *dev,
44 struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
46 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
47 uint32_t *queue_size_for_csr);
48 static void adf_configure_queues(struct qat_qp *queue);
49 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
50 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
52 static const struct rte_memzone *
53 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
56 const struct rte_memzone *mz;
58 PMD_INIT_FUNC_TRACE();
59 mz = rte_memzone_lookup(queue_name);
61 if (((size_t)queue_size <= mz->len) &&
62 ((socket_id == SOCKET_ID_ANY) ||
63 (socket_id == mz->socket_id))) {
64 PMD_DRV_LOG(DEBUG, "re-use memzone already "
65 "allocated for %s", queue_name);
69 PMD_DRV_LOG(ERR, "Incompatible memzone already "
70 "allocated %s, size %u, socket %d. "
71 "Requested size %u, socket %u",
72 queue_name, (uint32_t)mz->len,
73 mz->socket_id, queue_size, socket_id);
77 PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
78 queue_name, queue_size, socket_id);
79 return rte_memzone_reserve_aligned(queue_name, queue_size,
80 socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
83 int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
84 const struct rte_cryptodev_qp_conf *qp_conf,
85 int socket_id, struct rte_mempool *session_pool __rte_unused)
88 struct rte_pci_device *pci_dev;
90 char op_cookie_pool_name[RTE_RING_NAMESIZE];
93 PMD_INIT_FUNC_TRACE();
95 /* If qp is already in use free ring memory and qp metadata. */
96 if (dev->data->queue_pairs[queue_pair_id] != NULL) {
97 ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
102 if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
103 (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
104 PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
105 qp_conf->nb_descriptors);
109 pci_dev = RTE_DEV_TO_PCI(dev->device);
111 if (pci_dev->mem_resource[0].addr == NULL) {
112 PMD_DRV_LOG(ERR, "Could not find VF config space "
113 "(UIO driver attached?).");
118 (ADF_NUM_SYM_QPS_PER_BUNDLE *
119 ADF_NUM_BUNDLES_PER_DEV)) {
120 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
124 /* Allocate the queue pair data structure. */
125 qp = rte_zmalloc("qat PMD qp metadata",
126 sizeof(*qp), RTE_CACHE_LINE_SIZE);
128 PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
131 qp->nb_descriptors = qp_conf->nb_descriptors;
132 qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
133 qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
134 RTE_CACHE_LINE_SIZE);
135 if (qp->op_cookies == NULL) {
136 PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie");
141 qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
144 if (qat_tx_queue_create(dev, &(qp->tx_q),
145 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
146 PMD_INIT_LOG(ERR, "Tx queue create failed "
147 "queue_pair_id=%u", queue_pair_id);
151 if (qat_rx_queue_create(dev, &(qp->rx_q),
152 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
153 PMD_DRV_LOG(ERR, "Rx queue create failed "
154 "queue_pair_id=%hu", queue_pair_id);
155 qat_queue_delete(&(qp->tx_q));
159 adf_configure_queues(qp);
160 adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
161 snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
162 pci_dev->driver->driver.name, dev->data->dev_id,
165 qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
166 if (qp->op_cookie_pool == NULL)
167 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
169 sizeof(struct qat_crypto_op_cookie), 64, 0,
170 NULL, NULL, NULL, NULL, socket_id,
172 if (!qp->op_cookie_pool) {
173 PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
178 for (i = 0; i < qp->nb_descriptors; i++) {
179 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
180 PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
184 struct qat_crypto_op_cookie *sql_cookie =
187 sql_cookie->qat_sgl_src_phys_addr =
188 rte_mempool_virt2iova(sql_cookie) +
189 offsetof(struct qat_crypto_op_cookie,
192 sql_cookie->qat_sgl_dst_phys_addr =
193 rte_mempool_virt2iova(sql_cookie) +
194 offsetof(struct qat_crypto_op_cookie,
198 struct qat_pmd_private *internals
199 = dev->data->dev_private;
200 qp->qat_dev_gen = internals->qat_dev_gen;
202 dev->data->queue_pairs[queue_pair_id] = qp;
206 if (qp->op_cookie_pool)
207 rte_mempool_free(qp->op_cookie_pool);
208 rte_free(qp->op_cookies);
213 int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
216 (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
219 PMD_INIT_FUNC_TRACE();
221 PMD_DRV_LOG(DEBUG, "qp already freed");
225 /* Don't free memory if there are still responses to be processed */
226 if (qp->inflights16 == 0) {
227 qat_queue_delete(&(qp->tx_q));
228 qat_queue_delete(&(qp->rx_q));
233 adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
235 for (i = 0; i < qp->nb_descriptors; i++)
236 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
238 if (qp->op_cookie_pool)
239 rte_mempool_free(qp->op_cookie_pool);
241 rte_free(qp->op_cookies);
243 dev->data->queue_pairs[queue_pair_id] = NULL;
247 static int qat_tx_queue_create(struct rte_cryptodev *dev,
248 struct qat_queue *queue, uint8_t qp_id,
249 uint32_t nb_desc, int socket_id)
251 PMD_INIT_FUNC_TRACE();
252 queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
253 queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
254 ADF_SYM_TX_QUEUE_STARTOFF;
255 PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
256 nb_desc, qp_id, queue->hw_bundle_number,
257 queue->hw_queue_number);
259 return qat_queue_create(dev, queue, nb_desc,
260 ADF_SYM_TX_RING_DESC_SIZE, socket_id);
263 static int qat_rx_queue_create(struct rte_cryptodev *dev,
264 struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
267 PMD_INIT_FUNC_TRACE();
268 queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
269 queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
270 ADF_SYM_RX_QUEUE_STARTOFF;
272 PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
273 nb_desc, qp_id, queue->hw_bundle_number,
274 queue->hw_queue_number);
275 return qat_queue_create(dev, queue, nb_desc,
276 ADF_SYM_RX_RING_DESC_SIZE, socket_id);
279 static void qat_queue_delete(struct qat_queue *queue)
281 const struct rte_memzone *mz;
285 PMD_DRV_LOG(DEBUG, "Invalid queue");
288 mz = rte_memzone_lookup(queue->memz_name);
290 /* Write an unused pattern to the queue memory. */
291 memset(queue->base_addr, 0x7F, queue->queue_size);
292 status = rte_memzone_free(mz);
294 PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
295 status, queue->memz_name);
297 PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
303 qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
304 uint32_t nb_desc, uint8_t desc_size, int socket_id)
308 const struct rte_memzone *qp_mz;
309 uint32_t queue_size_bytes = nb_desc*desc_size;
310 struct rte_pci_device *pci_dev;
312 PMD_INIT_FUNC_TRACE();
313 if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
314 PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
318 pci_dev = RTE_DEV_TO_PCI(dev->device);
321 * Allocate a memzone for the queue - create a unique name.
323 snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
324 pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
325 queue->hw_bundle_number, queue->hw_queue_number);
326 qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
329 PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
333 queue->base_addr = (char *)qp_mz->addr;
334 queue->base_phys_addr = qp_mz->iova;
335 if (qat_qp_check_queue_alignment(queue->base_phys_addr,
337 PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
339 queue->base_phys_addr);
343 if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
345 PMD_DRV_LOG(ERR, "Invalid num inflights");
349 queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
350 ADF_BYTES_TO_MSG_SIZE(desc_size));
351 queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
352 PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
353 " msg_size %u, max_inflights %u modulo %u",
354 queue->queue_size, queue_size_bytes,
355 nb_desc, desc_size, queue->max_inflights,
358 if (queue->max_inflights < 2) {
359 PMD_DRV_LOG(ERR, "Invalid num inflights");
364 queue->msg_size = desc_size;
367 * Write an unused pattern to the queue memory.
369 memset(queue->base_addr, 0x7F, queue_size_bytes);
371 queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
374 io_addr = pci_dev->mem_resource[0].addr;
376 WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
377 queue->hw_queue_number, queue_base);
381 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
382 uint32_t queue_size_bytes)
384 PMD_INIT_FUNC_TRACE();
385 if (((queue_size_bytes - 1) & phys_addr) != 0)
390 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
391 uint32_t *p_queue_size_for_csr)
393 uint8_t i = ADF_MIN_RING_SIZE;
395 PMD_INIT_FUNC_TRACE();
396 for (; i <= ADF_MAX_RING_SIZE; i++)
397 if ((msg_size * msg_num) ==
398 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
399 *p_queue_size_for_csr = i;
402 PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
406 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
408 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
410 txq->hw_bundle_number);
413 PMD_INIT_FUNC_TRACE();
414 value = ADF_CSR_RD(base_addr, arb_csr_offset);
415 value |= (0x01 << txq->hw_queue_number);
416 ADF_CSR_WR(base_addr, arb_csr_offset, value);
419 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
421 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
423 txq->hw_bundle_number);
426 PMD_INIT_FUNC_TRACE();
427 value = ADF_CSR_RD(base_addr, arb_csr_offset);
428 value ^= (0x01 << txq->hw_queue_number);
429 ADF_CSR_WR(base_addr, arb_csr_offset, value);
432 static void adf_configure_queues(struct qat_qp *qp)
434 uint32_t queue_config;
435 struct qat_queue *queue = &qp->tx_q;
437 PMD_INIT_FUNC_TRACE();
438 queue_config = BUILD_RING_CONFIG(queue->queue_size);
440 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
441 queue->hw_queue_number, queue_config);
445 BUILD_RESP_RING_CONFIG(queue->queue_size,
446 ADF_RING_NEAR_WATERMARK_512,
447 ADF_RING_NEAR_WATERMARK_0);
449 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
450 queue->hw_queue_number, queue_config);