X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fqat%2Fqat_qp.c;h=b190f2cee4e16ecffd19c3d4dcde9ac751df1eb1;hb=91614c73b67cad33750b04b9904dd99666b9a91e;hp=7fea10c76a81d64a3a88fd58986810748bfa74f0;hpb=9a063cf41cd148d99733901f81ca0cec64e988b4;p=dpdk.git diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c index 7fea10c76a..b190f2cee4 100644 --- a/drivers/crypto/qat/qat_qp.c +++ b/drivers/crypto/qat/qat_qp.c @@ -6,24 +6,21 @@ #include #include #include -#include #include #include #include #include #include "qat_logs.h" -#include "qat_crypto.h" -#include "qat_algs.h" +#include "qat_device.h" +#include "qat_qp.h" +#include "qat_sym.h" #include "adf_transport_access_macros.h" -#define ADF_MAX_SYM_DESC 4096 -#define ADF_MIN_SYM_DESC 128 -#define ADF_SYM_TX_RING_DESC_SIZE 128 -#define ADF_SYM_RX_RING_DESC_SIZE 32 -#define ADF_SYM_TX_QUEUE_STARTOFF 2 -/* Offset from bundle start to 1st Sym Tx queue */ -#define ADF_SYM_RX_QUEUE_STARTOFF 10 + +#define ADF_MAX_DESC 4096 +#define ADF_MIN_DESC 128 + #define ADF_ARB_REG_SLOT 0x1000 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C @@ -31,23 +28,91 @@ ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ (ADF_ARB_REG_SLOT * index), value) +__extension__ +const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES] + [ADF_MAX_QPS_ON_ANY_SERVICE] = { + /* queue pairs which provide an asymmetric crypto service */ + [QAT_SERVICE_ASYMMETRIC] = { + { + .service_type = QAT_SERVICE_ASYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 0, + .rx_ring_num = 8, + .tx_msg_size = 64, + .rx_msg_size = 32, + + }, { + .service_type = QAT_SERVICE_ASYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 1, + .rx_ring_num = 9, + .tx_msg_size = 64, + .rx_msg_size = 32, + } + }, + /* queue pairs which provide a symmetric crypto service */ + [QAT_SERVICE_SYMMETRIC] = { + { + .service_type = QAT_SERVICE_SYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 2, + .rx_ring_num = 10, + .tx_msg_size = 128, + .rx_msg_size = 32, + }, + { + .service_type = QAT_SERVICE_SYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 3, + .rx_ring_num = 11, + .tx_msg_size = 128, + .rx_msg_size = 32, + } + }, + /* queue pairs which provide a compression service */ + [QAT_SERVICE_COMPRESSION] = { + { + .service_type = QAT_SERVICE_COMPRESSION, + .hw_bundle_num = 0, + .tx_ring_num = 6, + .rx_ring_num = 14, + .tx_msg_size = 128, + .rx_msg_size = 32, + }, { + .service_type = QAT_SERVICE_COMPRESSION, + .hw_bundle_num = 0, + .tx_ring_num = 7, + .rx_ring_num = 15, + .tx_msg_size = 128, + .rx_msg_size = 32, + } + } +}; + static int qat_qp_check_queue_alignment(uint64_t phys_addr, uint32_t queue_size_bytes); -static int qat_tx_queue_create(struct rte_cryptodev *dev, - struct qat_queue *queue, uint8_t id, uint32_t nb_desc, - int socket_id); -static int qat_rx_queue_create(struct rte_cryptodev *dev, - struct qat_queue *queue, uint8_t id, uint32_t nb_desc, - int socket_id); static void qat_queue_delete(struct qat_queue *queue); -static int qat_queue_create(struct rte_cryptodev *dev, - struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size, - int socket_id); +static int qat_queue_create(struct qat_pci_device *qat_dev, + struct qat_queue *queue, struct qat_qp_config *, uint8_t dir); static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, uint32_t *queue_size_for_csr); static void adf_configure_queues(struct qat_qp *queue); -static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr); -static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr); +static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr, + rte_spinlock_t *lock); +static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr, + rte_spinlock_t *lock); + + +int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data, + enum qat_service_type service) +{ + int i, count; + + for (i = 0, count = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) + if (qp_hw_data[i].service_type == service) + count++; + return count; +} static const struct rte_memzone * queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size, @@ -80,47 +145,33 @@ queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size, socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size); } -int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, - const struct rte_cryptodev_qp_conf *qp_conf, - int socket_id, struct rte_mempool *session_pool __rte_unused) +int qat_qp_setup(struct qat_pci_device *qat_dev, + struct qat_qp **qp_addr, + uint16_t queue_pair_id, + struct qat_qp_config *qat_qp_conf) + { struct qat_qp *qp; - struct rte_pci_device *pci_dev; - int ret; + struct rte_pci_device *pci_dev = qat_dev->pci_dev; char op_cookie_pool_name[RTE_RING_NAMESIZE]; uint32_t i; - PMD_INIT_FUNC_TRACE(); - - /* If qp is already in use free ring memory and qp metadata. */ - if (dev->data->queue_pairs[queue_pair_id] != NULL) { - ret = qat_crypto_sym_qp_release(dev, queue_pair_id); - if (ret < 0) - return ret; - } + PMD_DRV_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d", + queue_pair_id, qat_dev->qat_dev_id, qat_dev->qat_dev_gen); - if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) || - (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) { + if ((qat_qp_conf->nb_descriptors > ADF_MAX_DESC) || + (qat_qp_conf->nb_descriptors < ADF_MIN_DESC)) { PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors", - qp_conf->nb_descriptors); + qat_qp_conf->nb_descriptors); return -EINVAL; } - pci_dev = RTE_DEV_TO_PCI(dev->device); - if (pci_dev->mem_resource[0].addr == NULL) { PMD_DRV_LOG(ERR, "Could not find VF config space " "(UIO driver attached?)."); return -EINVAL; } - if (queue_pair_id >= - (ADF_NUM_SYM_QPS_PER_BUNDLE * - ADF_NUM_BUNDLES_PER_DEV)) { - PMD_DRV_LOG(ERR, "qp_id %u invalid for this device", - queue_pair_id); - return -EINVAL; - } /* Allocate the queue pair data structure. */ qp = rte_zmalloc("qat PMD qp metadata", sizeof(*qp), RTE_CACHE_LINE_SIZE); @@ -128,9 +179,9 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct"); return -ENOMEM; } - qp->nb_descriptors = qp_conf->nb_descriptors; + qp->nb_descriptors = qat_qp_conf->nb_descriptors; qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer", - qp_conf->nb_descriptors * sizeof(*qp->op_cookies), + qat_qp_conf->nb_descriptors * sizeof(*qp->op_cookies), RTE_CACHE_LINE_SIZE); if (qp->op_cookies == NULL) { PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie"); @@ -141,15 +192,15 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, qp->mmap_bar_addr = pci_dev->mem_resource[0].addr; qp->inflights16 = 0; - if (qat_tx_queue_create(dev, &(qp->tx_q), - queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) { + if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf, + ADF_RING_DIR_TX) != 0) { PMD_INIT_LOG(ERR, "Tx queue create failed " "queue_pair_id=%u", queue_pair_id); goto create_err; } - if (qat_rx_queue_create(dev, &(qp->rx_q), - queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) { + if (qat_queue_create(qat_dev, &(qp->rx_q), qat_qp_conf, + ADF_RING_DIR_RX) != 0) { PMD_DRV_LOG(ERR, "Rx queue create failed " "queue_pair_id=%hu", queue_pair_id); qat_queue_delete(&(qp->tx_q)); @@ -157,17 +208,21 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, } adf_configure_queues(qp); - adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr); - snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu", - pci_dev->driver->driver.name, dev->data->dev_id, - queue_pair_id); + adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr, + &qat_dev->arb_csr_lock); + + snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, + "%s%d_cookies_%s_qp%hu", + pci_dev->driver->driver.name, qat_dev->qat_dev_id, + qat_qp_conf->service_str, queue_pair_id); + PMD_DRV_LOG(DEBUG, "cookiepool: %s", op_cookie_pool_name); qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name); if (qp->op_cookie_pool == NULL) qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name, qp->nb_descriptors, - sizeof(struct qat_crypto_op_cookie), 64, 0, - NULL, NULL, NULL, NULL, socket_id, + qat_qp_conf->cookie_size, 64, 0, + NULL, NULL, NULL, NULL, qat_qp_conf->socket_id, 0); if (!qp->op_cookie_pool) { PMD_DRV_LOG(ERR, "QAT PMD Cannot create" @@ -180,26 +235,16 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie"); goto create_err; } - - struct qat_crypto_op_cookie *sql_cookie = - qp->op_cookies[i]; - - sql_cookie->qat_sgl_src_phys_addr = - rte_mempool_virt2iova(sql_cookie) + - offsetof(struct qat_crypto_op_cookie, - qat_sgl_list_src); - - sql_cookie->qat_sgl_dst_phys_addr = - rte_mempool_virt2iova(sql_cookie) + - offsetof(struct qat_crypto_op_cookie, - qat_sgl_list_dst); } - struct qat_pmd_private *internals - = dev->data->dev_private; - qp->qat_dev_gen = internals->qat_dev_gen; + qp->qat_dev_gen = qat_dev->qat_dev_gen; + qp->build_request = qat_qp_conf->build_request; + qp->qat_dev = qat_dev; - dev->data->queue_pairs[queue_pair_id] = qp; + PMD_DRV_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s", + queue_pair_id, op_cookie_pool_name); + + *qp_addr = qp; return 0; create_err: @@ -210,10 +255,9 @@ create_err: return -EFAULT; } -int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) +int qat_qp_release(struct qat_qp **qp_addr) { - struct qat_qp *qp = - (struct qat_qp *)dev->data->queue_pairs[queue_pair_id]; + struct qat_qp *qp = *qp_addr; uint32_t i; PMD_INIT_FUNC_TRACE(); @@ -222,6 +266,9 @@ int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) return 0; } + PMD_DRV_LOG(DEBUG, "Free qp on qat_pci device %d", + qp->qat_dev->qat_dev_id); + /* Don't free memory if there are still responses to be processed */ if (qp->inflights16 == 0) { qat_queue_delete(&(qp->tx_q)); @@ -230,7 +277,8 @@ int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) return -EAGAIN; } - adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr); + adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr, + &qp->qat_dev->arb_csr_lock); for (i = 0; i < qp->nb_descriptors; i++) rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]); @@ -240,41 +288,10 @@ int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) rte_free(qp->op_cookies); rte_free(qp); - dev->data->queue_pairs[queue_pair_id] = NULL; + *qp_addr = NULL; return 0; } -static int qat_tx_queue_create(struct rte_cryptodev *dev, - struct qat_queue *queue, uint8_t qp_id, - uint32_t nb_desc, int socket_id) -{ - PMD_INIT_FUNC_TRACE(); - queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE; - queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) + - ADF_SYM_TX_QUEUE_STARTOFF; - PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u", - nb_desc, qp_id, queue->hw_bundle_number, - queue->hw_queue_number); - - return qat_queue_create(dev, queue, nb_desc, - ADF_SYM_TX_RING_DESC_SIZE, socket_id); -} - -static int qat_rx_queue_create(struct rte_cryptodev *dev, - struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc, - int socket_id) -{ - PMD_INIT_FUNC_TRACE(); - queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE; - queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) + - ADF_SYM_RX_QUEUE_STARTOFF; - - PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u", - nb_desc, qp_id, queue->hw_bundle_number, - queue->hw_queue_number); - return qat_queue_create(dev, queue, nb_desc, - ADF_SYM_RX_RING_DESC_SIZE, socket_id); -} static void qat_queue_delete(struct qat_queue *queue) { @@ -285,6 +302,9 @@ static void qat_queue_delete(struct qat_queue *queue) PMD_DRV_LOG(DEBUG, "Invalid queue"); return; } + PMD_DRV_LOG(DEBUG, "Free ring %d, memzone: %s", + queue->hw_queue_number, queue->memz_name); + mz = rte_memzone_lookup(queue->memz_name); if (mz != NULL) { /* Write an unused pattern to the queue memory. */ @@ -300,31 +320,37 @@ static void qat_queue_delete(struct qat_queue *queue) } static int -qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue, - uint32_t nb_desc, uint8_t desc_size, int socket_id) +qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue, + struct qat_qp_config *qp_conf, uint8_t dir) { uint64_t queue_base; void *io_addr; const struct rte_memzone *qp_mz; - uint32_t queue_size_bytes = nb_desc*desc_size; - struct rte_pci_device *pci_dev; + struct rte_pci_device *pci_dev = qat_dev->pci_dev; + int ret = 0; + uint16_t desc_size = (dir == ADF_RING_DIR_TX ? + qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size); + uint32_t queue_size_bytes = (qp_conf->nb_descriptors)*(desc_size); + + queue->hw_bundle_number = qp_conf->hw->hw_bundle_num; + queue->hw_queue_number = (dir == ADF_RING_DIR_TX ? + qp_conf->hw->tx_ring_num : qp_conf->hw->rx_ring_num); - PMD_INIT_FUNC_TRACE(); if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) { PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size); return -EINVAL; } - pci_dev = RTE_DEV_TO_PCI(dev->device); - /* * Allocate a memzone for the queue - create a unique name. */ - snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d", - pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id, + snprintf(queue->memz_name, sizeof(queue->memz_name), + "%s_%d_%s_%s_%d_%d", + pci_dev->driver->driver.name, qat_dev->qat_dev_id, + qp_conf->service_str, "qp_mem", queue->hw_bundle_number, queue->hw_queue_number); qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes, - socket_id); + qp_conf->socket_id); if (qp_mz == NULL) { PMD_DRV_LOG(ERR, "Failed to allocate ring memzone"); return -ENOMEM; @@ -337,27 +363,25 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue, PMD_DRV_LOG(ERR, "Invalid alignment on queue create " " 0x%"PRIx64"\n", queue->base_phys_addr); - return -EFAULT; + ret = -EFAULT; + goto queue_create_err; } - if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size)) - != 0) { + if (adf_verify_queue_size(desc_size, qp_conf->nb_descriptors, + &(queue->queue_size)) != 0) { PMD_DRV_LOG(ERR, "Invalid num inflights"); - return -EINVAL; + ret = -EINVAL; + goto queue_create_err; } queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size, ADF_BYTES_TO_MSG_SIZE(desc_size)); - queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size); - PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u," - " msg_size %u, max_inflights %u modulo %u", - queue->queue_size, queue_size_bytes, - nb_desc, desc_size, queue->max_inflights, - queue->modulo); + queue->modulo_mask = (1 << ADF_RING_SIZE_MODULO(queue->queue_size)) - 1; if (queue->max_inflights < 2) { PMD_DRV_LOG(ERR, "Invalid num inflights"); - return -EINVAL; + ret = -EINVAL; + goto queue_create_err; } queue->head = 0; queue->tail = 0; @@ -375,7 +399,19 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue, WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number, queue->hw_queue_number, queue_base); + + PMD_DRV_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u," + " nb msgs %u, msg_size %u, max_inflights %u modulo mask %u", + queue->memz_name, + queue->queue_size, queue_size_bytes, + qp_conf->nb_descriptors, desc_size, + queue->max_inflights, queue->modulo_mask); + return 0; + +queue_create_err: + rte_memzone_free(qp_mz); + return ret; } static int qat_qp_check_queue_alignment(uint64_t phys_addr, @@ -403,7 +439,8 @@ static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, return -EINVAL; } -static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr) +static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr, + rte_spinlock_t *lock) { uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * @@ -411,12 +448,16 @@ static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr) uint32_t value; PMD_INIT_FUNC_TRACE(); + + rte_spinlock_lock(lock); value = ADF_CSR_RD(base_addr, arb_csr_offset); value |= (0x01 << txq->hw_queue_number); ADF_CSR_WR(base_addr, arb_csr_offset, value); + rte_spinlock_unlock(lock); } -static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr) +static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr, + rte_spinlock_t *lock) { uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * @@ -424,9 +465,12 @@ static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr) uint32_t value; PMD_INIT_FUNC_TRACE(); + + rte_spinlock_lock(lock); value = ADF_CSR_RD(base_addr, arb_csr_offset); - value ^= (0x01 << txq->hw_queue_number); + value &= ~(0x01 << txq->hw_queue_number); ADF_CSR_WR(base_addr, arb_csr_offset, value); + rte_spinlock_unlock(lock); } static void adf_configure_queues(struct qat_qp *qp) @@ -449,3 +493,148 @@ static void adf_configure_queues(struct qat_qp *qp) WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number, queue->hw_queue_number, queue_config); } + +static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask) +{ + return data & modulo_mask; +} + +static inline void +txq_write_tail(struct qat_qp *qp, struct qat_queue *q) { + WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number, + q->hw_queue_number, q->tail); + q->nb_pending_requests = 0; + q->csr_tail = q->tail; +} + +static inline +void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q) +{ + uint32_t old_head, new_head; + uint32_t max_head; + + old_head = q->csr_head; + new_head = q->head; + max_head = qp->nb_descriptors * q->msg_size; + + /* write out free descriptors */ + void *cur_desc = (uint8_t *)q->base_addr + old_head; + + if (new_head < old_head) { + memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head); + memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head); + } else { + memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head); + } + q->nb_processed_responses = 0; + q->csr_head = new_head; + + /* write current head to CSR */ + WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number, + q->hw_queue_number, new_head); +} + +uint16_t +qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops) +{ + register struct qat_queue *queue; + struct qat_qp *tmp_qp = (struct qat_qp *)qp; + register uint32_t nb_ops_sent = 0; + register int ret; + uint16_t nb_ops_possible = nb_ops; + register uint8_t *base_addr; + register uint32_t tail; + int overflow; + + if (unlikely(nb_ops == 0)) + return 0; + + /* read params used a lot in main loop into registers */ + queue = &(tmp_qp->tx_q); + base_addr = (uint8_t *)queue->base_addr; + tail = queue->tail; + + /* Find how many can actually fit on the ring */ + tmp_qp->inflights16 += nb_ops; + overflow = tmp_qp->inflights16 - queue->max_inflights; + if (overflow > 0) { + tmp_qp->inflights16 -= overflow; + nb_ops_possible = nb_ops - overflow; + if (nb_ops_possible == 0) + return 0; + } + + while (nb_ops_sent != nb_ops_possible) { + ret = tmp_qp->build_request(*ops, base_addr + tail, + tmp_qp->op_cookies[tail / queue->msg_size], + tmp_qp->qat_dev_gen); + if (ret != 0) { + tmp_qp->stats.enqueue_err_count++; + /* + * This message cannot be enqueued, + * decrease number of ops that wasn't sent + */ + tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent; + if (nb_ops_sent == 0) + return 0; + goto kick_tail; + } + + tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask); + ops++; + nb_ops_sent++; + } +kick_tail: + queue->tail = tail; + tmp_qp->stats.enqueued_count += nb_ops_sent; + queue->nb_pending_requests += nb_ops_sent; + if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH || + queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) { + txq_write_tail(tmp_qp, queue); + } + return nb_ops_sent; +} + +uint16_t +qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops) +{ + struct qat_queue *rx_queue, *tx_queue; + struct qat_qp *tmp_qp = (struct qat_qp *)qp; + uint32_t head; + uint32_t resp_counter = 0; + uint8_t *resp_msg; + + rx_queue = &(tmp_qp->rx_q); + tx_queue = &(tmp_qp->tx_q); + head = rx_queue->head; + resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head; + + while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG && + resp_counter != nb_ops) { + + qat_sym_process_response(ops, resp_msg); + + head = adf_modulo(head + rx_queue->msg_size, + rx_queue->modulo_mask); + + resp_msg = (uint8_t *)rx_queue->base_addr + head; + ops++; + resp_counter++; + } + if (resp_counter > 0) { + rx_queue->head = head; + tmp_qp->stats.dequeued_count += resp_counter; + rx_queue->nb_processed_responses += resp_counter; + tmp_qp->inflights16 -= resp_counter; + + if (rx_queue->nb_processed_responses > + QAT_CSR_HEAD_WRITE_THRESH) + rxq_free_desc(tmp_qp, rx_queue); + } + /* also check if tail needs to be advanced */ + if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH && + tx_queue->tail != tx_queue->csr_tail) { + txq_write_tail(tmp_qp, tx_queue); + } + return resp_counter; +}