1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
5 #include <rte_common.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_pci.h>
12 #include <rte_atomic.h>
13 #include <rte_prefetch.h>
16 #include "qat_crypto.h"
18 #include "adf_transport_access_macros.h"
20 #define ADF_MAX_SYM_DESC 4096
21 #define ADF_MIN_SYM_DESC 128
22 #define ADF_SYM_TX_RING_DESC_SIZE 128
23 #define ADF_SYM_RX_RING_DESC_SIZE 32
24 #define ADF_SYM_TX_QUEUE_STARTOFF 2
25 /* Offset from bundle start to 1st Sym Tx queue */
26 #define ADF_SYM_RX_QUEUE_STARTOFF 10
27 #define ADF_ARB_REG_SLOT 0x1000
28 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
30 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
31 ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
32 (ADF_ARB_REG_SLOT * index), value)
34 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
35 uint32_t queue_size_bytes);
36 static int qat_tx_queue_create(struct rte_cryptodev *dev,
37 struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
39 static int qat_rx_queue_create(struct rte_cryptodev *dev,
40 struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
42 static void qat_queue_delete(struct qat_queue *queue);
43 static int qat_queue_create(struct rte_cryptodev *dev,
44 struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
46 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
47 uint32_t *queue_size_for_csr);
48 static void adf_configure_queues(struct qat_qp *queue);
49 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
50 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
52 static const struct rte_memzone *
53 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
56 const struct rte_memzone *mz;
57 unsigned memzone_flags = 0;
58 const struct rte_memseg *ms;
60 PMD_INIT_FUNC_TRACE();
61 mz = rte_memzone_lookup(queue_name);
63 if (((size_t)queue_size <= mz->len) &&
64 ((socket_id == SOCKET_ID_ANY) ||
65 (socket_id == mz->socket_id))) {
66 PMD_DRV_LOG(DEBUG, "re-use memzone already "
67 "allocated for %s", queue_name);
71 PMD_DRV_LOG(ERR, "Incompatible memzone already "
72 "allocated %s, size %u, socket %d. "
73 "Requested size %u, socket %u",
74 queue_name, (uint32_t)mz->len,
75 mz->socket_id, queue_size, socket_id);
79 PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
80 queue_name, queue_size, socket_id);
81 ms = rte_eal_get_physmem_layout();
82 switch (ms[0].hugepage_sz) {
84 memzone_flags = RTE_MEMZONE_2MB;
87 memzone_flags = RTE_MEMZONE_1GB;
90 memzone_flags = RTE_MEMZONE_16MB;
93 memzone_flags = RTE_MEMZONE_16GB;
96 memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
98 return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
99 memzone_flags, queue_size);
102 int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
103 const struct rte_cryptodev_qp_conf *qp_conf,
104 int socket_id, struct rte_mempool *session_pool __rte_unused)
107 struct rte_pci_device *pci_dev;
109 char op_cookie_pool_name[RTE_RING_NAMESIZE];
112 PMD_INIT_FUNC_TRACE();
114 /* If qp is already in use free ring memory and qp metadata. */
115 if (dev->data->queue_pairs[queue_pair_id] != NULL) {
116 ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
121 if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
122 (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
123 PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
124 qp_conf->nb_descriptors);
128 pci_dev = RTE_DEV_TO_PCI(dev->device);
130 if (pci_dev->mem_resource[0].addr == NULL) {
131 PMD_DRV_LOG(ERR, "Could not find VF config space "
132 "(UIO driver attached?).");
137 (ADF_NUM_SYM_QPS_PER_BUNDLE *
138 ADF_NUM_BUNDLES_PER_DEV)) {
139 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
143 /* Allocate the queue pair data structure. */
144 qp = rte_zmalloc("qat PMD qp metadata",
145 sizeof(*qp), RTE_CACHE_LINE_SIZE);
147 PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
150 qp->nb_descriptors = qp_conf->nb_descriptors;
151 qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
152 qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
153 RTE_CACHE_LINE_SIZE);
155 qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
158 if (qat_tx_queue_create(dev, &(qp->tx_q),
159 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
160 PMD_INIT_LOG(ERR, "Tx queue create failed "
161 "queue_pair_id=%u", queue_pair_id);
165 if (qat_rx_queue_create(dev, &(qp->rx_q),
166 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
167 PMD_DRV_LOG(ERR, "Rx queue create failed "
168 "queue_pair_id=%hu", queue_pair_id);
169 qat_queue_delete(&(qp->tx_q));
173 adf_configure_queues(qp);
174 adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
175 snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
176 pci_dev->driver->driver.name, dev->data->dev_id,
179 qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
180 if (qp->op_cookie_pool == NULL)
181 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
183 sizeof(struct qat_crypto_op_cookie), 64, 0,
184 NULL, NULL, NULL, NULL, socket_id,
186 if (!qp->op_cookie_pool) {
187 PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
192 for (i = 0; i < qp->nb_descriptors; i++) {
193 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
194 PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
198 struct qat_crypto_op_cookie *sql_cookie =
201 sql_cookie->qat_sgl_src_phys_addr =
202 rte_mempool_virt2iova(sql_cookie) +
203 offsetof(struct qat_crypto_op_cookie,
206 sql_cookie->qat_sgl_dst_phys_addr =
207 rte_mempool_virt2iova(sql_cookie) +
208 offsetof(struct qat_crypto_op_cookie,
212 struct qat_pmd_private *internals
213 = dev->data->dev_private;
214 qp->qat_dev_gen = internals->qat_dev_gen;
216 dev->data->queue_pairs[queue_pair_id] = qp;
224 int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
227 (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
230 PMD_INIT_FUNC_TRACE();
232 PMD_DRV_LOG(DEBUG, "qp already freed");
236 /* Don't free memory if there are still responses to be processed */
237 if (qp->inflights16 == 0) {
238 qat_queue_delete(&(qp->tx_q));
239 qat_queue_delete(&(qp->rx_q));
244 adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
246 for (i = 0; i < qp->nb_descriptors; i++)
247 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
249 if (qp->op_cookie_pool)
250 rte_mempool_free(qp->op_cookie_pool);
252 rte_free(qp->op_cookies);
254 dev->data->queue_pairs[queue_pair_id] = NULL;
258 static int qat_tx_queue_create(struct rte_cryptodev *dev,
259 struct qat_queue *queue, uint8_t qp_id,
260 uint32_t nb_desc, int socket_id)
262 PMD_INIT_FUNC_TRACE();
263 queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
264 queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
265 ADF_SYM_TX_QUEUE_STARTOFF;
266 PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
267 nb_desc, qp_id, queue->hw_bundle_number,
268 queue->hw_queue_number);
270 return qat_queue_create(dev, queue, nb_desc,
271 ADF_SYM_TX_RING_DESC_SIZE, socket_id);
274 static int qat_rx_queue_create(struct rte_cryptodev *dev,
275 struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
278 PMD_INIT_FUNC_TRACE();
279 queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
280 queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
281 ADF_SYM_RX_QUEUE_STARTOFF;
283 PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
284 nb_desc, qp_id, queue->hw_bundle_number,
285 queue->hw_queue_number);
286 return qat_queue_create(dev, queue, nb_desc,
287 ADF_SYM_RX_RING_DESC_SIZE, socket_id);
290 static void qat_queue_delete(struct qat_queue *queue)
292 const struct rte_memzone *mz;
296 PMD_DRV_LOG(DEBUG, "Invalid queue");
299 mz = rte_memzone_lookup(queue->memz_name);
301 /* Write an unused pattern to the queue memory. */
302 memset(queue->base_addr, 0x7F, queue->queue_size);
303 status = rte_memzone_free(mz);
305 PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
306 status, queue->memz_name);
308 PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
314 qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
315 uint32_t nb_desc, uint8_t desc_size, int socket_id)
319 const struct rte_memzone *qp_mz;
320 uint32_t queue_size_bytes = nb_desc*desc_size;
321 struct rte_pci_device *pci_dev;
323 PMD_INIT_FUNC_TRACE();
324 if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
325 PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
329 pci_dev = RTE_DEV_TO_PCI(dev->device);
332 * Allocate a memzone for the queue - create a unique name.
334 snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
335 pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
336 queue->hw_bundle_number, queue->hw_queue_number);
337 qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
340 PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
344 queue->base_addr = (char *)qp_mz->addr;
345 queue->base_phys_addr = qp_mz->iova;
346 if (qat_qp_check_queue_alignment(queue->base_phys_addr,
348 PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
350 queue->base_phys_addr);
354 if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
356 PMD_DRV_LOG(ERR, "Invalid num inflights");
360 queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
361 ADF_BYTES_TO_MSG_SIZE(desc_size));
362 queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
363 PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
364 " msg_size %u, max_inflights %u modulo %u",
365 queue->queue_size, queue_size_bytes,
366 nb_desc, desc_size, queue->max_inflights,
369 if (queue->max_inflights < 2) {
370 PMD_DRV_LOG(ERR, "Invalid num inflights");
375 queue->msg_size = desc_size;
378 * Write an unused pattern to the queue memory.
380 memset(queue->base_addr, 0x7F, queue_size_bytes);
382 queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
385 io_addr = pci_dev->mem_resource[0].addr;
387 WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
388 queue->hw_queue_number, queue_base);
392 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
393 uint32_t queue_size_bytes)
395 PMD_INIT_FUNC_TRACE();
396 if (((queue_size_bytes - 1) & phys_addr) != 0)
401 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
402 uint32_t *p_queue_size_for_csr)
404 uint8_t i = ADF_MIN_RING_SIZE;
406 PMD_INIT_FUNC_TRACE();
407 for (; i <= ADF_MAX_RING_SIZE; i++)
408 if ((msg_size * msg_num) ==
409 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
410 *p_queue_size_for_csr = i;
413 PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
417 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
419 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
421 txq->hw_bundle_number);
424 PMD_INIT_FUNC_TRACE();
425 value = ADF_CSR_RD(base_addr, arb_csr_offset);
426 value |= (0x01 << txq->hw_queue_number);
427 ADF_CSR_WR(base_addr, arb_csr_offset, value);
430 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
432 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
434 txq->hw_bundle_number);
437 PMD_INIT_FUNC_TRACE();
438 value = ADF_CSR_RD(base_addr, arb_csr_offset);
439 value ^= (0x01 << txq->hw_queue_number);
440 ADF_CSR_WR(base_addr, arb_csr_offset, value);
443 static void adf_configure_queues(struct qat_qp *qp)
445 uint32_t queue_config;
446 struct qat_queue *queue = &qp->tx_q;
448 PMD_INIT_FUNC_TRACE();
449 queue_config = BUILD_RING_CONFIG(queue->queue_size);
451 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
452 queue->hw_queue_number, queue_config);
456 BUILD_RESP_RING_CONFIG(queue->queue_size,
457 ADF_RING_NEAR_WATERMARK_512,
458 ADF_RING_NEAR_WATERMARK_0);
460 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
461 queue->hw_queue_number, queue_config);