1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2018 Intel Corporation
5 #include <rte_common.h>
6 #include <rte_cycles.h>
8 #include <rte_malloc.h>
9 #include <rte_memzone.h>
11 #include <rte_bus_pci.h>
12 #include <rte_atomic.h>
13 #include <rte_prefetch.h>
16 #include "qat_device.h"
21 #include "adf_transport_access_macros.h"
23 #define QAT_CQ_MAX_DEQ_RETRIES 10
25 #define ADF_MAX_DESC 4096
26 #define ADF_MIN_DESC 128
28 #define ADF_ARB_REG_SLOT 0x1000
29 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
31 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
32 ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
33 (ADF_ARB_REG_SLOT * index), value)
36 const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES]
37 [ADF_MAX_QPS_ON_ANY_SERVICE] = {
38 /* queue pairs which provide an asymmetric crypto service */
39 [QAT_SERVICE_ASYMMETRIC] = {
41 .service_type = QAT_SERVICE_ASYMMETRIC,
49 .service_type = QAT_SERVICE_ASYMMETRIC,
57 /* queue pairs which provide a symmetric crypto service */
58 [QAT_SERVICE_SYMMETRIC] = {
60 .service_type = QAT_SERVICE_SYMMETRIC,
68 .service_type = QAT_SERVICE_SYMMETRIC,
76 /* queue pairs which provide a compression service */
77 [QAT_SERVICE_COMPRESSION] = {
79 .service_type = QAT_SERVICE_COMPRESSION,
86 .service_type = QAT_SERVICE_COMPRESSION,
97 const struct qat_qp_hw_data qat_gen3_qps[QAT_MAX_SERVICES]
98 [ADF_MAX_QPS_ON_ANY_SERVICE] = {
99 /* queue pairs which provide an asymmetric crypto service */
100 [QAT_SERVICE_ASYMMETRIC] = {
102 .service_type = QAT_SERVICE_ASYMMETRIC,
110 /* queue pairs which provide a symmetric crypto service */
111 [QAT_SERVICE_SYMMETRIC] = {
113 .service_type = QAT_SERVICE_SYMMETRIC,
121 /* queue pairs which provide a compression service */
122 [QAT_SERVICE_COMPRESSION] = {
124 .service_type = QAT_SERVICE_COMPRESSION,
134 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
135 uint32_t queue_size_bytes);
136 static void qat_queue_delete(struct qat_queue *queue);
137 static int qat_queue_create(struct qat_pci_device *qat_dev,
138 struct qat_queue *queue, struct qat_qp_config *, uint8_t dir);
139 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
140 uint32_t *queue_size_for_csr);
141 static void adf_configure_queues(struct qat_qp *queue);
142 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
143 rte_spinlock_t *lock);
144 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
145 rte_spinlock_t *lock);
148 int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
149 enum qat_service_type service)
153 for (i = 0, count = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++)
154 if (qp_hw_data[i].service_type == service)
159 static const struct rte_memzone *
160 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
163 const struct rte_memzone *mz;
165 mz = rte_memzone_lookup(queue_name);
167 if (((size_t)queue_size <= mz->len) &&
168 ((socket_id == SOCKET_ID_ANY) ||
169 (socket_id == mz->socket_id))) {
170 QAT_LOG(DEBUG, "re-use memzone already "
171 "allocated for %s", queue_name);
175 QAT_LOG(ERR, "Incompatible memzone already "
176 "allocated %s, size %u, socket %d. "
177 "Requested size %u, socket %u",
178 queue_name, (uint32_t)mz->len,
179 mz->socket_id, queue_size, socket_id);
183 QAT_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
184 queue_name, queue_size, socket_id);
185 return rte_memzone_reserve_aligned(queue_name, queue_size,
186 socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
189 int qat_qp_setup(struct qat_pci_device *qat_dev,
190 struct qat_qp **qp_addr,
191 uint16_t queue_pair_id,
192 struct qat_qp_config *qat_qp_conf)
196 struct rte_pci_device *pci_dev =
197 qat_pci_devs[qat_dev->qat_dev_id].pci_dev;
198 char op_cookie_pool_name[RTE_RING_NAMESIZE];
201 QAT_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d",
202 queue_pair_id, qat_dev->qat_dev_id, qat_dev->qat_dev_gen);
204 if ((qat_qp_conf->nb_descriptors > ADF_MAX_DESC) ||
205 (qat_qp_conf->nb_descriptors < ADF_MIN_DESC)) {
206 QAT_LOG(ERR, "Can't create qp for %u descriptors",
207 qat_qp_conf->nb_descriptors);
211 if (pci_dev->mem_resource[0].addr == NULL) {
212 QAT_LOG(ERR, "Could not find VF config space "
213 "(UIO driver attached?).");
217 /* Allocate the queue pair data structure. */
218 qp = rte_zmalloc_socket("qat PMD qp metadata",
219 sizeof(*qp), RTE_CACHE_LINE_SIZE,
220 qat_qp_conf->socket_id);
222 QAT_LOG(ERR, "Failed to alloc mem for qp struct");
225 qp->nb_descriptors = qat_qp_conf->nb_descriptors;
226 qp->op_cookies = rte_zmalloc_socket("qat PMD op cookie pointer",
227 qat_qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
228 RTE_CACHE_LINE_SIZE, qat_qp_conf->socket_id);
229 if (qp->op_cookies == NULL) {
230 QAT_LOG(ERR, "Failed to alloc mem for cookie");
235 qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
236 qp->enqueued = qp->dequeued = 0;
238 if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf,
239 ADF_RING_DIR_TX) != 0) {
240 QAT_LOG(ERR, "Tx queue create failed "
241 "queue_pair_id=%u", queue_pair_id);
245 qp->max_inflights = ADF_MAX_INFLIGHTS(qp->tx_q.queue_size,
246 ADF_BYTES_TO_MSG_SIZE(qp->tx_q.msg_size));
248 if (qp->max_inflights < 2) {
249 QAT_LOG(ERR, "Invalid num inflights");
250 qat_queue_delete(&(qp->tx_q));
254 if (qat_queue_create(qat_dev, &(qp->rx_q), qat_qp_conf,
255 ADF_RING_DIR_RX) != 0) {
256 QAT_LOG(ERR, "Rx queue create failed "
257 "queue_pair_id=%hu", queue_pair_id);
258 qat_queue_delete(&(qp->tx_q));
262 adf_configure_queues(qp);
263 adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr,
264 &qat_dev->arb_csr_lock);
266 snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE,
267 "%s%d_cookies_%s_qp%hu",
268 pci_dev->driver->driver.name, qat_dev->qat_dev_id,
269 qat_qp_conf->service_str, queue_pair_id);
271 QAT_LOG(DEBUG, "cookiepool: %s", op_cookie_pool_name);
272 qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
273 if (qp->op_cookie_pool == NULL)
274 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
276 qat_qp_conf->cookie_size, 64, 0,
277 NULL, NULL, NULL, NULL,
278 pci_dev->device.numa_node,
280 if (!qp->op_cookie_pool) {
281 QAT_LOG(ERR, "QAT PMD Cannot create"
286 for (i = 0; i < qp->nb_descriptors; i++) {
287 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
288 QAT_LOG(ERR, "QAT PMD Cannot get op_cookie");
291 memset(qp->op_cookies[i], 0, qat_qp_conf->cookie_size);
294 qp->qat_dev_gen = qat_dev->qat_dev_gen;
295 qp->build_request = qat_qp_conf->build_request;
296 qp->service_type = qat_qp_conf->hw->service_type;
297 qp->qat_dev = qat_dev;
299 QAT_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s",
300 queue_pair_id, op_cookie_pool_name);
306 if (qp->op_cookie_pool)
307 rte_mempool_free(qp->op_cookie_pool);
308 rte_free(qp->op_cookies);
313 int qat_qp_release(struct qat_qp **qp_addr)
315 struct qat_qp *qp = *qp_addr;
319 QAT_LOG(DEBUG, "qp already freed");
323 QAT_LOG(DEBUG, "Free qp on qat_pci device %d",
324 qp->qat_dev->qat_dev_id);
326 /* Don't free memory if there are still responses to be processed */
327 if ((qp->enqueued - qp->dequeued) == 0) {
328 qat_queue_delete(&(qp->tx_q));
329 qat_queue_delete(&(qp->rx_q));
334 adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr,
335 &qp->qat_dev->arb_csr_lock);
337 for (i = 0; i < qp->nb_descriptors; i++)
338 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
340 if (qp->op_cookie_pool)
341 rte_mempool_free(qp->op_cookie_pool);
343 rte_free(qp->op_cookies);
350 static void qat_queue_delete(struct qat_queue *queue)
352 const struct rte_memzone *mz;
356 QAT_LOG(DEBUG, "Invalid queue");
359 QAT_LOG(DEBUG, "Free ring %d, memzone: %s",
360 queue->hw_queue_number, queue->memz_name);
362 mz = rte_memzone_lookup(queue->memz_name);
364 /* Write an unused pattern to the queue memory. */
365 memset(queue->base_addr, 0x7F, queue->queue_size);
366 status = rte_memzone_free(mz);
368 QAT_LOG(ERR, "Error %d on freeing queue %s",
369 status, queue->memz_name);
371 QAT_LOG(DEBUG, "queue %s doesn't exist",
377 qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue,
378 struct qat_qp_config *qp_conf, uint8_t dir)
382 const struct rte_memzone *qp_mz;
383 struct rte_pci_device *pci_dev =
384 qat_pci_devs[qat_dev->qat_dev_id].pci_dev;
386 uint16_t desc_size = (dir == ADF_RING_DIR_TX ?
387 qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size);
388 uint32_t queue_size_bytes = (qp_conf->nb_descriptors)*(desc_size);
390 queue->hw_bundle_number = qp_conf->hw->hw_bundle_num;
391 queue->hw_queue_number = (dir == ADF_RING_DIR_TX ?
392 qp_conf->hw->tx_ring_num : qp_conf->hw->rx_ring_num);
394 if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
395 QAT_LOG(ERR, "Invalid descriptor size %d", desc_size);
400 * Allocate a memzone for the queue - create a unique name.
402 snprintf(queue->memz_name, sizeof(queue->memz_name),
404 pci_dev->driver->driver.name, qat_dev->qat_dev_id,
405 qp_conf->service_str, "qp_mem",
406 queue->hw_bundle_number, queue->hw_queue_number);
407 qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
408 pci_dev->device.numa_node);
410 QAT_LOG(ERR, "Failed to allocate ring memzone");
414 queue->base_addr = (char *)qp_mz->addr;
415 queue->base_phys_addr = qp_mz->iova;
416 if (qat_qp_check_queue_alignment(queue->base_phys_addr,
418 QAT_LOG(ERR, "Invalid alignment on queue create "
420 queue->base_phys_addr);
422 goto queue_create_err;
425 if (adf_verify_queue_size(desc_size, qp_conf->nb_descriptors,
426 &(queue->queue_size)) != 0) {
427 QAT_LOG(ERR, "Invalid num inflights");
429 goto queue_create_err;
432 queue->modulo_mask = (1 << ADF_RING_SIZE_MODULO(queue->queue_size)) - 1;
435 queue->msg_size = desc_size;
437 /* For fast calculation of cookie index, relies on msg_size being 2^n */
438 queue->trailz = __builtin_ctz(desc_size);
441 * Write an unused pattern to the queue memory.
443 memset(queue->base_addr, 0x7F, queue_size_bytes);
445 queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
448 io_addr = pci_dev->mem_resource[0].addr;
450 WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
451 queue->hw_queue_number, queue_base);
453 QAT_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u,"
454 " nb msgs %u, msg_size %u, modulo mask %u",
456 queue->queue_size, queue_size_bytes,
457 qp_conf->nb_descriptors, desc_size,
463 rte_memzone_free(qp_mz);
467 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
468 uint32_t queue_size_bytes)
470 if (((queue_size_bytes - 1) & phys_addr) != 0)
475 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
476 uint32_t *p_queue_size_for_csr)
478 uint8_t i = ADF_MIN_RING_SIZE;
480 for (; i <= ADF_MAX_RING_SIZE; i++)
481 if ((msg_size * msg_num) ==
482 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
483 *p_queue_size_for_csr = i;
486 QAT_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
490 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
491 rte_spinlock_t *lock)
493 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
495 txq->hw_bundle_number);
498 rte_spinlock_lock(lock);
499 value = ADF_CSR_RD(base_addr, arb_csr_offset);
500 value |= (0x01 << txq->hw_queue_number);
501 ADF_CSR_WR(base_addr, arb_csr_offset, value);
502 rte_spinlock_unlock(lock);
505 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
506 rte_spinlock_t *lock)
508 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
510 txq->hw_bundle_number);
513 rte_spinlock_lock(lock);
514 value = ADF_CSR_RD(base_addr, arb_csr_offset);
515 value &= ~(0x01 << txq->hw_queue_number);
516 ADF_CSR_WR(base_addr, arb_csr_offset, value);
517 rte_spinlock_unlock(lock);
520 static void adf_configure_queues(struct qat_qp *qp)
522 uint32_t queue_config;
523 struct qat_queue *queue = &qp->tx_q;
525 queue_config = BUILD_RING_CONFIG(queue->queue_size);
527 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
528 queue->hw_queue_number, queue_config);
532 BUILD_RESP_RING_CONFIG(queue->queue_size,
533 ADF_RING_NEAR_WATERMARK_512,
534 ADF_RING_NEAR_WATERMARK_0);
536 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
537 queue->hw_queue_number, queue_config);
540 static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask)
542 return data & modulo_mask;
546 txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
547 WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
548 q->hw_queue_number, q->tail);
549 q->csr_tail = q->tail;
553 void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
555 uint32_t old_head, new_head;
558 old_head = q->csr_head;
560 max_head = qp->nb_descriptors * q->msg_size;
562 /* write out free descriptors */
563 void *cur_desc = (uint8_t *)q->base_addr + old_head;
565 if (new_head < old_head) {
566 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
567 memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
569 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
571 q->nb_processed_responses = 0;
572 q->csr_head = new_head;
574 /* write current head to CSR */
575 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
576 q->hw_queue_number, new_head);
580 qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
582 register struct qat_queue *queue;
583 struct qat_qp *tmp_qp = (struct qat_qp *)qp;
584 register uint32_t nb_ops_sent = 0;
586 uint16_t nb_ops_possible = nb_ops;
587 register uint8_t *base_addr;
588 register uint32_t tail;
590 if (unlikely(nb_ops == 0))
593 /* read params used a lot in main loop into registers */
594 queue = &(tmp_qp->tx_q);
595 base_addr = (uint8_t *)queue->base_addr;
598 /* Find how many can actually fit on the ring */
600 /* dequeued can only be written by one thread, but it may not
601 * be this thread. As it's 4-byte aligned it will be read
602 * atomically here by any Intel CPU.
603 * enqueued can wrap before dequeued, but cannot
604 * lap it as var size of enq/deq (uint32_t) > var size of
605 * max_inflights (uint16_t). In reality inflights is never
606 * even as big as max uint16_t, as it's <= ADF_MAX_DESC.
607 * On wrapping, the calculation still returns the correct
608 * positive value as all three vars are unsigned.
611 tmp_qp->enqueued - tmp_qp->dequeued;
613 if ((inflights + nb_ops) > tmp_qp->max_inflights) {
614 nb_ops_possible = tmp_qp->max_inflights - inflights;
615 if (nb_ops_possible == 0)
618 /* QAT has plenty of work queued already, so don't waste cycles
619 * enqueueing, wait til the application has gathered a bigger
620 * burst or some completed ops have been dequeued
622 if (tmp_qp->min_enq_burst_threshold && inflights >
623 QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible <
624 tmp_qp->min_enq_burst_threshold) {
625 tmp_qp->stats.threshold_hit_count++;
631 if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
632 qat_sym_preprocess_requests(ops, nb_ops_possible);
635 while (nb_ops_sent != nb_ops_possible) {
636 if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) {
638 ret = qat_sym_build_request(*ops, base_addr + tail,
639 tmp_qp->op_cookies[tail >> queue->trailz],
640 tmp_qp->qat_dev_gen);
642 } else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) {
643 ret = qat_comp_build_request(*ops, base_addr + tail,
644 tmp_qp->op_cookies[tail >> queue->trailz],
645 tmp_qp->qat_dev_gen);
646 } else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
647 #ifdef BUILD_QAT_ASYM
648 ret = qat_asym_build_request(*ops, base_addr + tail,
649 tmp_qp->op_cookies[tail >> queue->trailz],
650 tmp_qp->qat_dev_gen);
654 tmp_qp->stats.enqueue_err_count++;
655 /* This message cannot be enqueued */
656 if (nb_ops_sent == 0)
661 tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask);
667 tmp_qp->enqueued += nb_ops_sent;
668 tmp_qp->stats.enqueued_count += nb_ops_sent;
669 txq_write_tail(tmp_qp, queue);
673 /* Use this for compression only - but keep consistent with above common
674 * function as much as possible.
677 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
679 register struct qat_queue *queue;
680 struct qat_qp *tmp_qp = (struct qat_qp *)qp;
681 register uint32_t nb_ops_sent = 0;
682 register int nb_desc_to_build;
683 uint16_t nb_ops_possible = nb_ops;
684 register uint8_t *base_addr;
685 register uint32_t tail;
687 int descriptors_built, total_descriptors_built = 0;
688 int nb_remaining_descriptors;
691 if (unlikely(nb_ops == 0))
694 /* read params used a lot in main loop into registers */
695 queue = &(tmp_qp->tx_q);
696 base_addr = (uint8_t *)queue->base_addr;
699 /* Find how many can actually fit on the ring */
701 /* dequeued can only be written by one thread, but it may not
702 * be this thread. As it's 4-byte aligned it will be read
703 * atomically here by any Intel CPU.
704 * enqueued can wrap before dequeued, but cannot
705 * lap it as var size of enq/deq (uint32_t) > var size of
706 * max_inflights (uint16_t). In reality inflights is never
707 * even as big as max uint16_t, as it's <= ADF_MAX_DESC.
708 * On wrapping, the calculation still returns the correct
709 * positive value as all three vars are unsigned.
712 tmp_qp->enqueued - tmp_qp->dequeued;
714 /* Find how many can actually fit on the ring */
715 overflow = (inflights + nb_ops) - tmp_qp->max_inflights;
717 nb_ops_possible = nb_ops - overflow;
718 if (nb_ops_possible == 0)
722 /* QAT has plenty of work queued already, so don't waste cycles
723 * enqueueing, wait til the application has gathered a bigger
724 * burst or some completed ops have been dequeued
726 if (tmp_qp->min_enq_burst_threshold && inflights >
727 QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible <
728 tmp_qp->min_enq_burst_threshold) {
729 tmp_qp->stats.threshold_hit_count++;
734 /* At this point nb_ops_possible is assuming a 1:1 mapping
735 * between ops and descriptors.
736 * Fewer may be sent if some ops have to be split.
737 * nb_ops_possible is <= burst size.
738 * Find out how many spaces are actually available on the qp in case
741 nb_remaining_descriptors = nb_ops_possible
742 + ((overflow >= 0) ? 0 : overflow * (-1));
743 QAT_DP_LOG(DEBUG, "Nb ops requested %d, nb descriptors remaining %d",
744 nb_ops, nb_remaining_descriptors);
746 while (nb_ops_sent != nb_ops_possible &&
747 nb_remaining_descriptors > 0) {
748 struct qat_comp_op_cookie *cookie =
749 tmp_qp->op_cookies[tail >> queue->trailz];
751 descriptors_built = 0;
753 QAT_DP_LOG(DEBUG, "--- data length: %u",
754 ((struct rte_comp_op *)*ops)->src.length);
756 nb_desc_to_build = qat_comp_build_request(*ops,
757 base_addr + tail, cookie, tmp_qp->qat_dev_gen);
758 QAT_DP_LOG(DEBUG, "%d descriptors built, %d remaining, "
759 "%d ops sent, %d descriptors needed",
760 total_descriptors_built, nb_remaining_descriptors,
761 nb_ops_sent, nb_desc_to_build);
763 if (unlikely(nb_desc_to_build < 0)) {
764 /* this message cannot be enqueued */
765 tmp_qp->stats.enqueue_err_count++;
766 if (nb_ops_sent == 0)
769 } else if (unlikely(nb_desc_to_build > 1)) {
770 /* this op is too big and must be split - get more
771 * descriptors and retry
774 QAT_DP_LOG(DEBUG, "Build %d descriptors for this op",
777 nb_remaining_descriptors -= nb_desc_to_build;
778 if (nb_remaining_descriptors >= 0) {
779 /* There are enough remaining descriptors
782 int ret2 = qat_comp_build_multiple_requests(
786 if (unlikely(ret2 < 1)) {
788 "Failed to build (%d) descriptors, status %d",
789 nb_desc_to_build, ret2);
791 qat_comp_free_split_op_memzones(cookie,
792 nb_desc_to_build - 1);
794 tmp_qp->stats.enqueue_err_count++;
796 /* This message cannot be enqueued */
797 if (nb_ops_sent == 0)
801 descriptors_built = ret2;
802 total_descriptors_built +=
804 nb_remaining_descriptors -=
807 "Multiple descriptors (%d) built ok",
811 QAT_DP_LOG(ERR, "For the current op, number of requested descriptors (%d) "
812 "exceeds number of available descriptors (%d)",
814 nb_remaining_descriptors +
817 qat_comp_free_split_op_memzones(cookie,
818 nb_desc_to_build - 1);
820 /* Not enough extra descriptors */
821 if (nb_ops_sent == 0)
826 descriptors_built = 1;
827 total_descriptors_built++;
828 nb_remaining_descriptors--;
829 QAT_DP_LOG(DEBUG, "Single descriptor built ok");
832 tail = adf_modulo(tail + (queue->msg_size * descriptors_built),
840 tmp_qp->enqueued += total_descriptors_built;
841 tmp_qp->stats.enqueued_count += nb_ops_sent;
842 txq_write_tail(tmp_qp, queue);
847 qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
849 struct qat_queue *rx_queue;
850 struct qat_qp *tmp_qp = (struct qat_qp *)qp;
852 uint32_t op_resp_counter = 0, fw_resp_counter = 0;
856 rx_queue = &(tmp_qp->rx_q);
857 head = rx_queue->head;
858 resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;
860 while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
861 op_resp_counter != nb_ops) {
865 if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
866 qat_sym_process_response(ops, resp_msg);
867 else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
868 nb_fw_responses = qat_comp_process_response(
870 tmp_qp->op_cookies[head >> rx_queue->trailz],
871 &tmp_qp->stats.dequeue_err_count);
872 #ifdef BUILD_QAT_ASYM
873 else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
874 qat_asym_process_response(ops, resp_msg,
875 tmp_qp->op_cookies[head >> rx_queue->trailz]);
878 head = adf_modulo(head + rx_queue->msg_size,
879 rx_queue->modulo_mask);
881 resp_msg = (uint8_t *)rx_queue->base_addr + head;
883 if (nb_fw_responses) {
884 /* only move on to next op if one was ready to return
891 /* A compression op may be broken up into multiple fw requests.
892 * Only count fw responses as complete once ALL the responses
893 * associated with an op have been processed, as the cookie
894 * data from the first response must be available until
895 * finished with all firmware responses.
897 fw_resp_counter += nb_fw_responses;
899 rx_queue->nb_processed_responses++;
902 tmp_qp->dequeued += fw_resp_counter;
903 tmp_qp->stats.dequeued_count += op_resp_counter;
905 rx_queue->head = head;
906 if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
907 rxq_free_desc(tmp_qp, rx_queue);
909 QAT_DP_LOG(DEBUG, "Dequeue burst return: %u, QAT responses: %u",
910 op_resp_counter, fw_resp_counter);
912 return op_resp_counter;
915 /* This is almost same as dequeue_op_burst, without the atomic, without stats
916 * and without the op. Dequeues one response.
919 qat_cq_dequeue_response(struct qat_qp *qp, void *out_data)
923 struct qat_queue *queue = &(qp->rx_q);
924 struct icp_qat_fw_comn_resp *resp_msg = (struct icp_qat_fw_comn_resp *)
925 ((uint8_t *)queue->base_addr + queue->head);
927 while (retries++ < QAT_CQ_MAX_DEQ_RETRIES &&
928 *(uint32_t *)resp_msg == ADF_RING_EMPTY_SIG) {
929 /* loop waiting for response until we reach the timeout */
933 if (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG) {
934 /* response received */
937 /* check status flag */
938 if (ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
939 resp_msg->comn_hdr.comn_status) ==
940 ICP_QAT_FW_COMN_STATUS_FLAG_OK) {
942 memcpy(out_data, resp_msg, queue->msg_size);
944 memset(out_data, 0, queue->msg_size);
947 queue->head = adf_modulo(queue->head + queue->msg_size,
949 rxq_free_desc(qp, queue);
955 /* Sends a NULL message and extracts QAT fw version from the response.
956 * Used to determine detailed capabilities based on the fw version number.
957 * This assumes that there are no inflight messages, i.e. assumes there's space
958 * on the qp, one message is sent and only one response collected.
959 * Returns fw version number or 0 for unknown version or a negative error code.
962 qat_cq_get_fw_version(struct qat_qp *qp)
964 struct qat_queue *queue = &(qp->tx_q);
965 uint8_t *base_addr = (uint8_t *)queue->base_addr;
966 struct icp_qat_fw_comn_req null_msg;
967 struct icp_qat_fw_comn_resp response;
969 /* prepare the NULL request */
970 memset(&null_msg, 0, sizeof(null_msg));
971 null_msg.comn_hdr.hdr_flags =
972 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
973 null_msg.comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
974 null_msg.comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
976 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
977 QAT_DP_HEXDUMP_LOG(DEBUG, "NULL request", &null_msg, sizeof(null_msg));
980 /* send the NULL request */
981 memcpy(base_addr + queue->tail, &null_msg, sizeof(null_msg));
982 queue->tail = adf_modulo(queue->tail + queue->msg_size,
984 txq_write_tail(qp, queue);
986 /* receive a response */
987 if (qat_cq_dequeue_response(qp, &response)) {
989 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
990 QAT_DP_HEXDUMP_LOG(DEBUG, "NULL response:", &response,
993 /* if LW0 bit 24 is set - then the fw version was returned */
994 if (QAT_FIELD_GET(response.comn_hdr.hdr_flags,
995 ICP_QAT_FW_COMN_NULL_VERSION_FLAG_BITPOS,
996 ICP_QAT_FW_COMN_NULL_VERSION_FLAG_MASK))
997 return response.resrvd[0]; /* return LW4 */
999 return 0; /* not set - we don't know fw version */
1002 QAT_LOG(ERR, "No response received");
1007 qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
1008 void *op_cookie __rte_unused,
1009 uint64_t *dequeue_err_count __rte_unused)