1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2018 Intel Corporation
5 #include <rte_common.h>
6 #include <rte_cycles.h>
8 #include <rte_malloc.h>
9 #include <rte_memzone.h>
11 #include <rte_bus_pci.h>
12 #include <rte_atomic.h>
13 #include <rte_prefetch.h>
16 #include "qat_device.h"
21 #include "adf_transport_access_macros.h"
23 #define QAT_CQ_MAX_DEQ_RETRIES 10
25 #define ADF_MAX_DESC 4096
26 #define ADF_MIN_DESC 128
28 #define ADF_ARB_REG_SLOT 0x1000
29 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
31 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
32 ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
33 (ADF_ARB_REG_SLOT * index), value)
36 const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES]
37 [ADF_MAX_QPS_ON_ANY_SERVICE] = {
38 /* queue pairs which provide an asymmetric crypto service */
39 [QAT_SERVICE_ASYMMETRIC] = {
41 .service_type = QAT_SERVICE_ASYMMETRIC,
49 .service_type = QAT_SERVICE_ASYMMETRIC,
57 /* queue pairs which provide a symmetric crypto service */
58 [QAT_SERVICE_SYMMETRIC] = {
60 .service_type = QAT_SERVICE_SYMMETRIC,
68 .service_type = QAT_SERVICE_SYMMETRIC,
76 /* queue pairs which provide a compression service */
77 [QAT_SERVICE_COMPRESSION] = {
79 .service_type = QAT_SERVICE_COMPRESSION,
86 .service_type = QAT_SERVICE_COMPRESSION,
97 const struct qat_qp_hw_data qat_gen3_qps[QAT_MAX_SERVICES]
98 [ADF_MAX_QPS_ON_ANY_SERVICE] = {
99 /* queue pairs which provide an asymmetric crypto service */
100 [QAT_SERVICE_ASYMMETRIC] = {
102 .service_type = QAT_SERVICE_ASYMMETRIC,
110 /* queue pairs which provide a symmetric crypto service */
111 [QAT_SERVICE_SYMMETRIC] = {
113 .service_type = QAT_SERVICE_SYMMETRIC,
121 /* queue pairs which provide a compression service */
122 [QAT_SERVICE_COMPRESSION] = {
124 .service_type = QAT_SERVICE_COMPRESSION,
134 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
135 uint32_t queue_size_bytes);
136 static void qat_queue_delete(struct qat_queue *queue);
137 static int qat_queue_create(struct qat_pci_device *qat_dev,
138 struct qat_queue *queue, struct qat_qp_config *, uint8_t dir);
139 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
140 uint32_t *queue_size_for_csr);
141 static void adf_configure_queues(struct qat_qp *queue);
142 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
143 rte_spinlock_t *lock);
144 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
145 rte_spinlock_t *lock);
148 int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
149 enum qat_service_type service)
153 for (i = 0, count = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++)
154 if (qp_hw_data[i].service_type == service)
159 static const struct rte_memzone *
160 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
163 const struct rte_memzone *mz;
165 mz = rte_memzone_lookup(queue_name);
167 if (((size_t)queue_size <= mz->len) &&
168 ((socket_id == SOCKET_ID_ANY) ||
169 (socket_id == mz->socket_id))) {
170 QAT_LOG(DEBUG, "re-use memzone already "
171 "allocated for %s", queue_name);
175 QAT_LOG(ERR, "Incompatible memzone already "
176 "allocated %s, size %u, socket %d. "
177 "Requested size %u, socket %u",
178 queue_name, (uint32_t)mz->len,
179 mz->socket_id, queue_size, socket_id);
183 QAT_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
184 queue_name, queue_size, socket_id);
185 return rte_memzone_reserve_aligned(queue_name, queue_size,
186 socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
189 int qat_qp_setup(struct qat_pci_device *qat_dev,
190 struct qat_qp **qp_addr,
191 uint16_t queue_pair_id,
192 struct qat_qp_config *qat_qp_conf)
196 struct rte_pci_device *pci_dev = qat_dev->pci_dev;
197 char op_cookie_pool_name[RTE_RING_NAMESIZE];
200 QAT_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d",
201 queue_pair_id, qat_dev->qat_dev_id, qat_dev->qat_dev_gen);
203 if ((qat_qp_conf->nb_descriptors > ADF_MAX_DESC) ||
204 (qat_qp_conf->nb_descriptors < ADF_MIN_DESC)) {
205 QAT_LOG(ERR, "Can't create qp for %u descriptors",
206 qat_qp_conf->nb_descriptors);
210 if (pci_dev->mem_resource[0].addr == NULL) {
211 QAT_LOG(ERR, "Could not find VF config space "
212 "(UIO driver attached?).");
216 /* Allocate the queue pair data structure. */
217 qp = rte_zmalloc_socket("qat PMD qp metadata",
218 sizeof(*qp), RTE_CACHE_LINE_SIZE,
219 qat_qp_conf->socket_id);
221 QAT_LOG(ERR, "Failed to alloc mem for qp struct");
224 qp->nb_descriptors = qat_qp_conf->nb_descriptors;
225 qp->op_cookies = rte_zmalloc_socket("qat PMD op cookie pointer",
226 qat_qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
227 RTE_CACHE_LINE_SIZE, qat_qp_conf->socket_id);
228 if (qp->op_cookies == NULL) {
229 QAT_LOG(ERR, "Failed to alloc mem for cookie");
234 qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
235 qp->enqueued = qp->dequeued = 0;
237 if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf,
238 ADF_RING_DIR_TX) != 0) {
239 QAT_LOG(ERR, "Tx queue create failed "
240 "queue_pair_id=%u", queue_pair_id);
244 qp->max_inflights = ADF_MAX_INFLIGHTS(qp->tx_q.queue_size,
245 ADF_BYTES_TO_MSG_SIZE(qp->tx_q.msg_size));
247 if (qp->max_inflights < 2) {
248 QAT_LOG(ERR, "Invalid num inflights");
249 qat_queue_delete(&(qp->tx_q));
253 if (qat_queue_create(qat_dev, &(qp->rx_q), qat_qp_conf,
254 ADF_RING_DIR_RX) != 0) {
255 QAT_LOG(ERR, "Rx queue create failed "
256 "queue_pair_id=%hu", queue_pair_id);
257 qat_queue_delete(&(qp->tx_q));
261 adf_configure_queues(qp);
262 adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr,
263 &qat_dev->arb_csr_lock);
265 snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE,
266 "%s%d_cookies_%s_qp%hu",
267 pci_dev->driver->driver.name, qat_dev->qat_dev_id,
268 qat_qp_conf->service_str, queue_pair_id);
270 QAT_LOG(DEBUG, "cookiepool: %s", op_cookie_pool_name);
271 qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
272 if (qp->op_cookie_pool == NULL)
273 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
275 qat_qp_conf->cookie_size, 64, 0,
276 NULL, NULL, NULL, NULL,
277 qat_dev->pci_dev->device.numa_node,
279 if (!qp->op_cookie_pool) {
280 QAT_LOG(ERR, "QAT PMD Cannot create"
285 for (i = 0; i < qp->nb_descriptors; i++) {
286 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
287 QAT_LOG(ERR, "QAT PMD Cannot get op_cookie");
290 memset(qp->op_cookies[i], 0, qat_qp_conf->cookie_size);
293 qp->qat_dev_gen = qat_dev->qat_dev_gen;
294 qp->build_request = qat_qp_conf->build_request;
295 qp->service_type = qat_qp_conf->hw->service_type;
296 qp->qat_dev = qat_dev;
298 QAT_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s",
299 queue_pair_id, op_cookie_pool_name);
305 if (qp->op_cookie_pool)
306 rte_mempool_free(qp->op_cookie_pool);
307 rte_free(qp->op_cookies);
312 int qat_qp_release(struct qat_qp **qp_addr)
314 struct qat_qp *qp = *qp_addr;
318 QAT_LOG(DEBUG, "qp already freed");
322 QAT_LOG(DEBUG, "Free qp on qat_pci device %d",
323 qp->qat_dev->qat_dev_id);
325 /* Don't free memory if there are still responses to be processed */
326 if ((qp->enqueued - qp->dequeued) == 0) {
327 qat_queue_delete(&(qp->tx_q));
328 qat_queue_delete(&(qp->rx_q));
333 adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr,
334 &qp->qat_dev->arb_csr_lock);
336 for (i = 0; i < qp->nb_descriptors; i++)
337 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
339 if (qp->op_cookie_pool)
340 rte_mempool_free(qp->op_cookie_pool);
342 rte_free(qp->op_cookies);
349 static void qat_queue_delete(struct qat_queue *queue)
351 const struct rte_memzone *mz;
355 QAT_LOG(DEBUG, "Invalid queue");
358 QAT_LOG(DEBUG, "Free ring %d, memzone: %s",
359 queue->hw_queue_number, queue->memz_name);
361 mz = rte_memzone_lookup(queue->memz_name);
363 /* Write an unused pattern to the queue memory. */
364 memset(queue->base_addr, 0x7F, queue->queue_size);
365 status = rte_memzone_free(mz);
367 QAT_LOG(ERR, "Error %d on freeing queue %s",
368 status, queue->memz_name);
370 QAT_LOG(DEBUG, "queue %s doesn't exist",
376 qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue,
377 struct qat_qp_config *qp_conf, uint8_t dir)
381 const struct rte_memzone *qp_mz;
382 struct rte_pci_device *pci_dev = qat_dev->pci_dev;
384 uint16_t desc_size = (dir == ADF_RING_DIR_TX ?
385 qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size);
386 uint32_t queue_size_bytes = (qp_conf->nb_descriptors)*(desc_size);
388 queue->hw_bundle_number = qp_conf->hw->hw_bundle_num;
389 queue->hw_queue_number = (dir == ADF_RING_DIR_TX ?
390 qp_conf->hw->tx_ring_num : qp_conf->hw->rx_ring_num);
392 if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
393 QAT_LOG(ERR, "Invalid descriptor size %d", desc_size);
398 * Allocate a memzone for the queue - create a unique name.
400 snprintf(queue->memz_name, sizeof(queue->memz_name),
402 pci_dev->driver->driver.name, qat_dev->qat_dev_id,
403 qp_conf->service_str, "qp_mem",
404 queue->hw_bundle_number, queue->hw_queue_number);
405 qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
406 qat_dev->pci_dev->device.numa_node);
408 QAT_LOG(ERR, "Failed to allocate ring memzone");
412 queue->base_addr = (char *)qp_mz->addr;
413 queue->base_phys_addr = qp_mz->iova;
414 if (qat_qp_check_queue_alignment(queue->base_phys_addr,
416 QAT_LOG(ERR, "Invalid alignment on queue create "
418 queue->base_phys_addr);
420 goto queue_create_err;
423 if (adf_verify_queue_size(desc_size, qp_conf->nb_descriptors,
424 &(queue->queue_size)) != 0) {
425 QAT_LOG(ERR, "Invalid num inflights");
427 goto queue_create_err;
430 queue->modulo_mask = (1 << ADF_RING_SIZE_MODULO(queue->queue_size)) - 1;
433 queue->msg_size = desc_size;
435 /* For fast calculation of cookie index, relies on msg_size being 2^n */
436 queue->trailz = __builtin_ctz(desc_size);
439 * Write an unused pattern to the queue memory.
441 memset(queue->base_addr, 0x7F, queue_size_bytes);
443 queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
446 io_addr = pci_dev->mem_resource[0].addr;
448 WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
449 queue->hw_queue_number, queue_base);
451 QAT_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u,"
452 " nb msgs %u, msg_size %u, modulo mask %u",
454 queue->queue_size, queue_size_bytes,
455 qp_conf->nb_descriptors, desc_size,
461 rte_memzone_free(qp_mz);
465 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
466 uint32_t queue_size_bytes)
468 if (((queue_size_bytes - 1) & phys_addr) != 0)
473 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
474 uint32_t *p_queue_size_for_csr)
476 uint8_t i = ADF_MIN_RING_SIZE;
478 for (; i <= ADF_MAX_RING_SIZE; i++)
479 if ((msg_size * msg_num) ==
480 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
481 *p_queue_size_for_csr = i;
484 QAT_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
488 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
489 rte_spinlock_t *lock)
491 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
493 txq->hw_bundle_number);
496 rte_spinlock_lock(lock);
497 value = ADF_CSR_RD(base_addr, arb_csr_offset);
498 value |= (0x01 << txq->hw_queue_number);
499 ADF_CSR_WR(base_addr, arb_csr_offset, value);
500 rte_spinlock_unlock(lock);
503 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
504 rte_spinlock_t *lock)
506 uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
508 txq->hw_bundle_number);
511 rte_spinlock_lock(lock);
512 value = ADF_CSR_RD(base_addr, arb_csr_offset);
513 value &= ~(0x01 << txq->hw_queue_number);
514 ADF_CSR_WR(base_addr, arb_csr_offset, value);
515 rte_spinlock_unlock(lock);
518 static void adf_configure_queues(struct qat_qp *qp)
520 uint32_t queue_config;
521 struct qat_queue *queue = &qp->tx_q;
523 queue_config = BUILD_RING_CONFIG(queue->queue_size);
525 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
526 queue->hw_queue_number, queue_config);
530 BUILD_RESP_RING_CONFIG(queue->queue_size,
531 ADF_RING_NEAR_WATERMARK_512,
532 ADF_RING_NEAR_WATERMARK_0);
534 WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
535 queue->hw_queue_number, queue_config);
538 static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask)
540 return data & modulo_mask;
544 txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
545 WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
546 q->hw_queue_number, q->tail);
547 q->csr_tail = q->tail;
551 void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
553 uint32_t old_head, new_head;
556 old_head = q->csr_head;
558 max_head = qp->nb_descriptors * q->msg_size;
560 /* write out free descriptors */
561 void *cur_desc = (uint8_t *)q->base_addr + old_head;
563 if (new_head < old_head) {
564 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
565 memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
567 memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
569 q->nb_processed_responses = 0;
570 q->csr_head = new_head;
572 /* write current head to CSR */
573 WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
574 q->hw_queue_number, new_head);
578 qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
580 register struct qat_queue *queue;
581 struct qat_qp *tmp_qp = (struct qat_qp *)qp;
582 register uint32_t nb_ops_sent = 0;
584 uint16_t nb_ops_possible = nb_ops;
585 register uint8_t *base_addr;
586 register uint32_t tail;
588 if (unlikely(nb_ops == 0))
591 /* read params used a lot in main loop into registers */
592 queue = &(tmp_qp->tx_q);
593 base_addr = (uint8_t *)queue->base_addr;
596 /* Find how many can actually fit on the ring */
598 /* dequeued can only be written by one thread, but it may not
599 * be this thread. As it's 4-byte aligned it will be read
600 * atomically here by any Intel CPU.
601 * enqueued can wrap before dequeued, but cannot
602 * lap it as var size of enq/deq (uint32_t) > var size of
603 * max_inflights (uint16_t). In reality inflights is never
604 * even as big as max uint16_t, as it's <= ADF_MAX_DESC.
605 * On wrapping, the calculation still returns the correct
606 * positive value as all three vars are unsigned.
609 tmp_qp->enqueued - tmp_qp->dequeued;
611 if ((inflights + nb_ops) > tmp_qp->max_inflights) {
612 nb_ops_possible = tmp_qp->max_inflights - inflights;
613 if (nb_ops_possible == 0)
616 /* QAT has plenty of work queued already, so don't waste cycles
617 * enqueueing, wait til the application has gathered a bigger
618 * burst or some completed ops have been dequeued
620 if (tmp_qp->min_enq_burst_threshold && inflights >
621 QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible <
622 tmp_qp->min_enq_burst_threshold) {
623 tmp_qp->stats.threshold_hit_count++;
629 while (nb_ops_sent != nb_ops_possible) {
630 ret = tmp_qp->build_request(*ops, base_addr + tail,
631 tmp_qp->op_cookies[tail >> queue->trailz],
632 tmp_qp->qat_dev_gen);
634 tmp_qp->stats.enqueue_err_count++;
635 /* This message cannot be enqueued */
636 if (nb_ops_sent == 0)
641 tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask);
647 tmp_qp->enqueued += nb_ops_sent;
648 tmp_qp->stats.enqueued_count += nb_ops_sent;
649 txq_write_tail(tmp_qp, queue);
653 /* Use this for compression only - but keep consistent with above common
654 * function as much as possible.
657 qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
659 register struct qat_queue *queue;
660 struct qat_qp *tmp_qp = (struct qat_qp *)qp;
661 register uint32_t nb_ops_sent = 0;
662 register int nb_desc_to_build;
663 uint16_t nb_ops_possible = nb_ops;
664 register uint8_t *base_addr;
665 register uint32_t tail;
667 int descriptors_built, total_descriptors_built = 0;
668 int nb_remaining_descriptors;
671 if (unlikely(nb_ops == 0))
674 /* read params used a lot in main loop into registers */
675 queue = &(tmp_qp->tx_q);
676 base_addr = (uint8_t *)queue->base_addr;
679 /* Find how many can actually fit on the ring */
681 /* dequeued can only be written by one thread, but it may not
682 * be this thread. As it's 4-byte aligned it will be read
683 * atomically here by any Intel CPU.
684 * enqueued can wrap before dequeued, but cannot
685 * lap it as var size of enq/deq (uint32_t) > var size of
686 * max_inflights (uint16_t). In reality inflights is never
687 * even as big as max uint16_t, as it's <= ADF_MAX_DESC.
688 * On wrapping, the calculation still returns the correct
689 * positive value as all three vars are unsigned.
692 tmp_qp->enqueued - tmp_qp->dequeued;
694 /* Find how many can actually fit on the ring */
695 overflow = (inflights + nb_ops) - tmp_qp->max_inflights;
697 nb_ops_possible = nb_ops - overflow;
698 if (nb_ops_possible == 0)
702 /* QAT has plenty of work queued already, so don't waste cycles
703 * enqueueing, wait til the application has gathered a bigger
704 * burst or some completed ops have been dequeued
706 if (tmp_qp->min_enq_burst_threshold && inflights >
707 QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible <
708 tmp_qp->min_enq_burst_threshold) {
709 tmp_qp->stats.threshold_hit_count++;
714 /* At this point nb_ops_possible is assuming a 1:1 mapping
715 * between ops and descriptors.
716 * Fewer may be sent if some ops have to be split.
717 * nb_ops_possible is <= burst size.
718 * Find out how many spaces are actually available on the qp in case
721 nb_remaining_descriptors = nb_ops_possible
722 + ((overflow >= 0) ? 0 : overflow * (-1));
723 QAT_DP_LOG(DEBUG, "Nb ops requested %d, nb descriptors remaining %d",
724 nb_ops, nb_remaining_descriptors);
726 while (nb_ops_sent != nb_ops_possible &&
727 nb_remaining_descriptors > 0) {
728 struct qat_comp_op_cookie *cookie =
729 tmp_qp->op_cookies[tail >> queue->trailz];
731 descriptors_built = 0;
733 QAT_DP_LOG(DEBUG, "--- data length: %u",
734 ((struct rte_comp_op *)*ops)->src.length);
736 nb_desc_to_build = qat_comp_build_request(*ops,
737 base_addr + tail, cookie, tmp_qp->qat_dev_gen);
738 QAT_DP_LOG(DEBUG, "%d descriptors built, %d remaining, "
739 "%d ops sent, %d descriptors needed",
740 total_descriptors_built, nb_remaining_descriptors,
741 nb_ops_sent, nb_desc_to_build);
743 if (unlikely(nb_desc_to_build < 0)) {
744 /* this message cannot be enqueued */
745 tmp_qp->stats.enqueue_err_count++;
746 if (nb_ops_sent == 0)
749 } else if (unlikely(nb_desc_to_build > 1)) {
750 /* this op is too big and must be split - get more
751 * descriptors and retry
754 QAT_DP_LOG(DEBUG, "Build %d descriptors for this op",
757 nb_remaining_descriptors -= nb_desc_to_build;
758 if (nb_remaining_descriptors >= 0) {
759 /* There are enough remaining descriptors
762 int ret2 = qat_comp_build_multiple_requests(
766 if (unlikely(ret2 < 1)) {
768 "Failed to build (%d) descriptors, status %d",
769 nb_desc_to_build, ret2);
771 qat_comp_free_split_op_memzones(cookie,
772 nb_desc_to_build - 1);
774 tmp_qp->stats.enqueue_err_count++;
776 /* This message cannot be enqueued */
777 if (nb_ops_sent == 0)
781 descriptors_built = ret2;
782 total_descriptors_built +=
784 nb_remaining_descriptors -=
787 "Multiple descriptors (%d) built ok",
791 QAT_DP_LOG(ERR, "For the current op, number of requested descriptors (%d) "
792 "exceeds number of available descriptors (%d)",
794 nb_remaining_descriptors +
797 qat_comp_free_split_op_memzones(cookie,
798 nb_desc_to_build - 1);
800 /* Not enough extra descriptors */
801 if (nb_ops_sent == 0)
806 descriptors_built = 1;
807 total_descriptors_built++;
808 nb_remaining_descriptors--;
809 QAT_DP_LOG(DEBUG, "Single descriptor built ok");
812 tail = adf_modulo(tail + (queue->msg_size * descriptors_built),
820 tmp_qp->enqueued += total_descriptors_built;
821 tmp_qp->stats.enqueued_count += nb_ops_sent;
822 txq_write_tail(tmp_qp, queue);
827 qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
829 struct qat_queue *rx_queue;
830 struct qat_qp *tmp_qp = (struct qat_qp *)qp;
832 uint32_t op_resp_counter = 0, fw_resp_counter = 0;
836 rx_queue = &(tmp_qp->rx_q);
837 head = rx_queue->head;
838 resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;
840 while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
841 op_resp_counter != nb_ops) {
845 if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
846 qat_sym_process_response(ops, resp_msg);
847 else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
848 nb_fw_responses = qat_comp_process_response(
850 tmp_qp->op_cookies[head >> rx_queue->trailz],
851 &tmp_qp->stats.dequeue_err_count);
852 #ifdef BUILD_QAT_ASYM
853 else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC)
854 qat_asym_process_response(ops, resp_msg,
855 tmp_qp->op_cookies[head >> rx_queue->trailz]);
858 head = adf_modulo(head + rx_queue->msg_size,
859 rx_queue->modulo_mask);
861 resp_msg = (uint8_t *)rx_queue->base_addr + head;
863 if (nb_fw_responses) {
864 /* only move on to next op if one was ready to return
871 /* A compression op may be broken up into multiple fw requests.
872 * Only count fw responses as complete once ALL the responses
873 * associated with an op have been processed, as the cookie
874 * data from the first response must be available until
875 * finished with all firmware responses.
877 fw_resp_counter += nb_fw_responses;
879 rx_queue->nb_processed_responses++;
882 tmp_qp->dequeued += fw_resp_counter;
883 tmp_qp->stats.dequeued_count += op_resp_counter;
885 rx_queue->head = head;
886 if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
887 rxq_free_desc(tmp_qp, rx_queue);
889 QAT_DP_LOG(DEBUG, "Dequeue burst return: %u, QAT responses: %u",
890 op_resp_counter, fw_resp_counter);
892 return op_resp_counter;
895 /* This is almost same as dequeue_op_burst, without the atomic, without stats
896 * and without the op. Dequeues one response.
899 qat_cq_dequeue_response(struct qat_qp *qp, void *out_data)
903 struct qat_queue *queue = &(qp->rx_q);
904 struct icp_qat_fw_comn_resp *resp_msg = (struct icp_qat_fw_comn_resp *)
905 ((uint8_t *)queue->base_addr + queue->head);
907 while (retries++ < QAT_CQ_MAX_DEQ_RETRIES &&
908 *(uint32_t *)resp_msg == ADF_RING_EMPTY_SIG) {
909 /* loop waiting for response until we reach the timeout */
913 if (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG) {
914 /* response received */
917 /* check status flag */
918 if (ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
919 resp_msg->comn_hdr.comn_status) ==
920 ICP_QAT_FW_COMN_STATUS_FLAG_OK) {
922 memcpy(out_data, resp_msg, queue->msg_size);
924 memset(out_data, 0, queue->msg_size);
927 queue->head = adf_modulo(queue->head + queue->msg_size,
929 rxq_free_desc(qp, queue);
935 /* Sends a NULL message and extracts QAT fw version from the response.
936 * Used to determine detailed capabilities based on the fw version number.
937 * This assumes that there are no inflight messages, i.e. assumes there's space
938 * on the qp, one message is sent and only one response collected.
939 * Returns fw version number or 0 for unknown version or a negative error code.
942 qat_cq_get_fw_version(struct qat_qp *qp)
944 struct qat_queue *queue = &(qp->tx_q);
945 uint8_t *base_addr = (uint8_t *)queue->base_addr;
946 struct icp_qat_fw_comn_req null_msg;
947 struct icp_qat_fw_comn_resp response;
949 /* prepare the NULL request */
950 memset(&null_msg, 0, sizeof(null_msg));
951 null_msg.comn_hdr.hdr_flags =
952 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
953 null_msg.comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
954 null_msg.comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
956 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
957 QAT_DP_HEXDUMP_LOG(DEBUG, "NULL request", &null_msg, sizeof(null_msg));
960 /* send the NULL request */
961 memcpy(base_addr + queue->tail, &null_msg, sizeof(null_msg));
962 queue->tail = adf_modulo(queue->tail + queue->msg_size,
964 txq_write_tail(qp, queue);
966 /* receive a response */
967 if (qat_cq_dequeue_response(qp, &response)) {
969 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
970 QAT_DP_HEXDUMP_LOG(DEBUG, "NULL response:", &response,
973 /* if LW0 bit 24 is set - then the fw version was returned */
974 if (QAT_FIELD_GET(response.comn_hdr.hdr_flags,
975 ICP_QAT_FW_COMN_NULL_VERSION_FLAG_BITPOS,
976 ICP_QAT_FW_COMN_NULL_VERSION_FLAG_MASK))
977 return response.resrvd[0]; /* return LW4 */
979 return 0; /* not set - we don't know fw version */
982 QAT_LOG(ERR, "No response received");
987 qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
988 void *op_cookie __rte_unused,
989 uint64_t *dequeue_err_count __rte_unused)