X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcommon%2Fqat%2Fqat_qp.c;h=026ea5ee01c4066efd650a09aebdc3925d614b00;hb=90a2ec4ae81f2ef52f7c14bfc9307e75a4127fa4;hp=8e6dd04eb73f61010d092bd5f7e0bb57f207e522;hpb=b5d704b92a8316f76f464efa5770efee6afa1ca4;p=dpdk.git diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c index 8e6dd04eb7..026ea5ee01 100644 --- a/drivers/common/qat/qat_qp.c +++ b/drivers/common/qat/qat_qp.c @@ -19,6 +19,7 @@ #include "qat_asym.h" #include "qat_comp.h" #include "adf_transport_access_macros.h" +#include "adf_transport_access_macros_gen4vf.h" #define QAT_CQ_MAX_DEQ_RETRIES 10 @@ -138,21 +139,34 @@ static int qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue, struct qat_qp_config *, uint8_t dir); static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, uint32_t *queue_size_for_csr); -static void adf_configure_queues(struct qat_qp *queue); -static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr, - rte_spinlock_t *lock); -static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr, - rte_spinlock_t *lock); - - -int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data, +static void adf_configure_queues(struct qat_qp *queue, + enum qat_device_gen qat_dev_gen); +static void adf_queue_arb_enable(enum qat_device_gen qat_dev_gen, + struct qat_queue *txq, void *base_addr, rte_spinlock_t *lock); +static void adf_queue_arb_disable(enum qat_device_gen qat_dev_gen, + struct qat_queue *txq, void *base_addr, rte_spinlock_t *lock); + +int qat_qps_per_service(struct qat_pci_device *qat_dev, enum qat_service_type service) { - int i, count; + int i = 0, count = 0, max_ops_per_srv = 0; + + if (qat_dev->qat_dev_gen == QAT_GEN4) { + max_ops_per_srv = QAT_GEN4_BUNDLE_NUM; + for (i = 0, count = 0; i < max_ops_per_srv; i++) + if (qat_dev->qp_gen4_data[i][0].service_type == service) + count++; + } else { + const struct qat_qp_hw_data *sym_hw_qps = + qat_gen_config[qat_dev->qat_dev_gen] + .qp_hw_data[service]; + + max_ops_per_srv = ADF_MAX_QPS_ON_ANY_SERVICE; + for (i = 0, count = 0; i < max_ops_per_srv; i++) + if (sym_hw_qps[i].service_type == service) + count++; + } - for (i = 0, count = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) - if (qp_hw_data[i].service_type == service) - count++; return count; } @@ -190,11 +204,12 @@ int qat_qp_setup(struct qat_pci_device *qat_dev, struct qat_qp **qp_addr, uint16_t queue_pair_id, struct qat_qp_config *qat_qp_conf) - { struct qat_qp *qp; - struct rte_pci_device *pci_dev = qat_dev->pci_dev; + struct rte_pci_device *pci_dev = + qat_pci_devs[qat_dev->qat_dev_id].pci_dev; char op_cookie_pool_name[RTE_RING_NAMESIZE]; + enum qat_device_gen qat_dev_gen = qat_dev->qat_dev_gen; uint32_t i; QAT_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d", @@ -258,8 +273,8 @@ int qat_qp_setup(struct qat_pci_device *qat_dev, goto create_err; } - adf_configure_queues(qp); - adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr, + adf_configure_queues(qp, qat_dev_gen); + adf_queue_arb_enable(qat_dev_gen, &qp->tx_q, qp->mmap_bar_addr, &qat_dev->arb_csr_lock); snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, @@ -274,7 +289,7 @@ int qat_qp_setup(struct qat_pci_device *qat_dev, qp->nb_descriptors, qat_qp_conf->cookie_size, 64, 0, NULL, NULL, NULL, NULL, - qat_dev->pci_dev->device.numa_node, + pci_dev->device.numa_node, 0); if (!qp->op_cookie_pool) { QAT_LOG(ERR, "QAT PMD Cannot create" @@ -291,7 +306,6 @@ int qat_qp_setup(struct qat_pci_device *qat_dev, } qp->qat_dev_gen = qat_dev->qat_dev_gen; - qp->build_request = qat_qp_conf->build_request; qp->service_type = qat_qp_conf->hw->service_type; qp->qat_dev = qat_dev; @@ -309,7 +323,8 @@ create_err: return -EFAULT; } -int qat_qp_release(struct qat_qp **qp_addr) + +int qat_qp_release(enum qat_device_gen qat_dev_gen, struct qat_qp **qp_addr) { struct qat_qp *qp = *qp_addr; uint32_t i; @@ -330,8 +345,8 @@ int qat_qp_release(struct qat_qp **qp_addr) return -EAGAIN; } - adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr, - &qp->qat_dev->arb_csr_lock); + adf_queue_arb_disable(qat_dev_gen, &(qp->tx_q), qp->mmap_bar_addr, + &qp->qat_dev->arb_csr_lock); for (i = 0; i < qp->nb_descriptors; i++) rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]); @@ -379,7 +394,9 @@ qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue, uint64_t queue_base; void *io_addr; const struct rte_memzone *qp_mz; - struct rte_pci_device *pci_dev = qat_dev->pci_dev; + struct rte_pci_device *pci_dev = + qat_pci_devs[qat_dev->qat_dev_id].pci_dev; + enum qat_device_gen qat_dev_gen = qat_dev->qat_dev_gen; int ret = 0; uint16_t desc_size = (dir == ADF_RING_DIR_TX ? qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size); @@ -403,7 +420,7 @@ qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue, qp_conf->service_str, "qp_mem", queue->hw_bundle_number, queue->hw_queue_number); qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes, - qat_dev->pci_dev->device.numa_node); + pci_dev->device.numa_node); if (qp_mz == NULL) { QAT_LOG(ERR, "Failed to allocate ring memzone"); return -ENOMEM; @@ -439,14 +456,19 @@ qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue, * Write an unused pattern to the queue memory. */ memset(queue->base_addr, 0x7F, queue_size_bytes); - - queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr, - queue->queue_size); - io_addr = pci_dev->mem_resource[0].addr; - WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number, + if (qat_dev_gen == QAT_GEN4) { + queue_base = BUILD_RING_BASE_ADDR_GEN4(queue->base_phys_addr, + queue->queue_size); + WRITE_CSR_RING_BASE_GEN4VF(io_addr, queue->hw_bundle_number, queue->hw_queue_number, queue_base); + } else { + queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr, + queue->queue_size); + WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number, + queue->hw_queue_number, queue_base); + } QAT_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u," " nb msgs %u, msg_size %u, modulo mask %u", @@ -462,6 +484,84 @@ queue_create_err: return ret; } +int +qat_select_valid_queue(struct qat_pci_device *qat_dev, int qp_id, + enum qat_service_type service_type) +{ + if (qat_dev->qat_dev_gen == QAT_GEN4) { + int i = 0, valid_qps = 0; + + for (; i < QAT_GEN4_BUNDLE_NUM; i++) { + if (qat_dev->qp_gen4_data[i][0].service_type == + service_type) { + if (valid_qps == qp_id) + return i; + ++valid_qps; + } + } + } + return -1; +} + +int +qat_read_qp_config(struct qat_pci_device *qat_dev) +{ + int i = 0; + enum qat_device_gen qat_dev_gen = qat_dev->qat_dev_gen; + + if (qat_dev_gen == QAT_GEN4) { + uint16_t svc = 0; + + if (qat_query_svc(qat_dev, (uint8_t *)&svc)) + return -(EFAULT); + for (; i < QAT_GEN4_BUNDLE_NUM; i++) { + struct qat_qp_hw_data *hw_data = + &qat_dev->qp_gen4_data[i][0]; + uint8_t svc1 = (svc >> (3 * i)) & 0x7; + enum qat_service_type service_type = QAT_SERVICE_INVALID; + + if (svc1 == QAT_SVC_SYM) { + service_type = QAT_SERVICE_SYMMETRIC; + QAT_LOG(DEBUG, + "Discovered SYMMETRIC service on bundle %d", + i); + } else if (svc1 == QAT_SVC_COMPRESSION) { + service_type = QAT_SERVICE_COMPRESSION; + QAT_LOG(DEBUG, + "Discovered COPRESSION service on bundle %d", + i); + } else if (svc1 == QAT_SVC_ASYM) { + service_type = QAT_SERVICE_ASYMMETRIC; + QAT_LOG(DEBUG, + "Discovered ASYMMETRIC service on bundle %d", + i); + } else { + QAT_LOG(ERR, + "Unrecognized service on bundle %d", + i); + return -(EFAULT); + } + + memset(hw_data, 0, sizeof(*hw_data)); + hw_data->service_type = service_type; + if (service_type == QAT_SERVICE_ASYMMETRIC) { + hw_data->tx_msg_size = 64; + hw_data->rx_msg_size = 32; + } else if (service_type == QAT_SERVICE_SYMMETRIC || + service_type == + QAT_SERVICE_COMPRESSION) { + hw_data->tx_msg_size = 128; + hw_data->rx_msg_size = 32; + } + hw_data->tx_ring_num = 0; + hw_data->rx_ring_num = 1; + hw_data->hw_bundle_num = i; + } + return 0; + } + return -(EINVAL); +} + static int qat_qp_check_queue_alignment(uint64_t phys_addr, uint32_t queue_size_bytes) { @@ -485,54 +585,81 @@ static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, return -EINVAL; } -static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr, - rte_spinlock_t *lock) +static void +adf_queue_arb_enable(enum qat_device_gen qat_dev_gen, struct qat_queue *txq, + void *base_addr, rte_spinlock_t *lock) { - uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + - (ADF_ARB_REG_SLOT * - txq->hw_bundle_number); - uint32_t value; + uint32_t arb_csr_offset = 0, value; rte_spinlock_lock(lock); - value = ADF_CSR_RD(base_addr, arb_csr_offset); + if (qat_dev_gen == QAT_GEN4) { + arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + + (ADF_RING_BUNDLE_SIZE_GEN4 * + txq->hw_bundle_number); + value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN4VF, + arb_csr_offset); + } else { + arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + + (ADF_ARB_REG_SLOT * + txq->hw_bundle_number); + value = ADF_CSR_RD(base_addr, + arb_csr_offset); + } value |= (0x01 << txq->hw_queue_number); ADF_CSR_WR(base_addr, arb_csr_offset, value); rte_spinlock_unlock(lock); } -static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr, - rte_spinlock_t *lock) +static void adf_queue_arb_disable(enum qat_device_gen qat_dev_gen, + struct qat_queue *txq, void *base_addr, rte_spinlock_t *lock) { - uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + - (ADF_ARB_REG_SLOT * - txq->hw_bundle_number); - uint32_t value; + uint32_t arb_csr_offset = 0, value; rte_spinlock_lock(lock); - value = ADF_CSR_RD(base_addr, arb_csr_offset); + if (qat_dev_gen == QAT_GEN4) { + arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + + (ADF_RING_BUNDLE_SIZE_GEN4 * + txq->hw_bundle_number); + value = ADF_CSR_RD(base_addr + ADF_RING_CSR_ADDR_OFFSET_GEN4VF, + arb_csr_offset); + } else { + arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + + (ADF_ARB_REG_SLOT * + txq->hw_bundle_number); + value = ADF_CSR_RD(base_addr, + arb_csr_offset); + } value &= ~(0x01 << txq->hw_queue_number); ADF_CSR_WR(base_addr, arb_csr_offset, value); rte_spinlock_unlock(lock); } -static void adf_configure_queues(struct qat_qp *qp) +static void adf_configure_queues(struct qat_qp *qp, + enum qat_device_gen qat_dev_gen) { - uint32_t queue_config; - struct qat_queue *queue = &qp->tx_q; - - queue_config = BUILD_RING_CONFIG(queue->queue_size); - - WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number, - queue->hw_queue_number, queue_config); - - queue = &qp->rx_q; - queue_config = - BUILD_RESP_RING_CONFIG(queue->queue_size, - ADF_RING_NEAR_WATERMARK_512, - ADF_RING_NEAR_WATERMARK_0); - - WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number, - queue->hw_queue_number, queue_config); + uint32_t q_tx_config, q_resp_config; + struct qat_queue *q_tx = &qp->tx_q, *q_rx = &qp->rx_q; + + q_tx_config = BUILD_RING_CONFIG(q_tx->queue_size); + q_resp_config = BUILD_RESP_RING_CONFIG(q_rx->queue_size, + ADF_RING_NEAR_WATERMARK_512, + ADF_RING_NEAR_WATERMARK_0); + + if (qat_dev_gen == QAT_GEN4) { + WRITE_CSR_RING_CONFIG_GEN4VF(qp->mmap_bar_addr, + q_tx->hw_bundle_number, q_tx->hw_queue_number, + q_tx_config); + WRITE_CSR_RING_CONFIG_GEN4VF(qp->mmap_bar_addr, + q_rx->hw_bundle_number, q_rx->hw_queue_number, + q_resp_config); + } else { + WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, + q_tx->hw_bundle_number, q_tx->hw_queue_number, + q_tx_config); + WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, + q_rx->hw_bundle_number, q_rx->hw_queue_number, + q_resp_config); + } } static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask) @@ -541,14 +668,21 @@ static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask) } static inline void -txq_write_tail(struct qat_qp *qp, struct qat_queue *q) { - WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number, +txq_write_tail(enum qat_device_gen qat_dev_gen, + struct qat_qp *qp, struct qat_queue *q) { + + if (qat_dev_gen == QAT_GEN4) { + WRITE_CSR_RING_TAIL_GEN4VF(qp->mmap_bar_addr, + q->hw_bundle_number, q->hw_queue_number, q->tail); + } else { + WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number, q->hw_queue_number, q->tail); - q->csr_tail = q->tail; + } } static inline -void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q) +void rxq_free_desc(enum qat_device_gen qat_dev_gen, struct qat_qp *qp, + struct qat_queue *q) { uint32_t old_head, new_head; uint32_t max_head; @@ -570,8 +704,14 @@ void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q) q->csr_head = new_head; /* write current head to CSR */ - WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number, - q->hw_queue_number, new_head); + if (qat_dev_gen == QAT_GEN4) { + WRITE_CSR_RING_HEAD_GEN4VF(qp->mmap_bar_addr, + q->hw_bundle_number, q->hw_queue_number, new_head); + } else { + WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number, + q->hw_queue_number, new_head); + } + } uint16_t @@ -580,7 +720,7 @@ qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops) register struct qat_queue *queue; struct qat_qp *tmp_qp = (struct qat_qp *)qp; register uint32_t nb_ops_sent = 0; - register int ret; + register int ret = -1; uint16_t nb_ops_possible = nb_ops; register uint8_t *base_addr; register uint32_t tail; @@ -625,11 +765,29 @@ qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops) } } +#ifdef BUILD_QAT_SYM + if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) + qat_sym_preprocess_requests(ops, nb_ops_possible); +#endif while (nb_ops_sent != nb_ops_possible) { - ret = tmp_qp->build_request(*ops, base_addr + tail, + if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) { +#ifdef BUILD_QAT_SYM + ret = qat_sym_build_request(*ops, base_addr + tail, + tmp_qp->op_cookies[tail >> queue->trailz], + tmp_qp->qat_dev_gen); +#endif + } else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) { + ret = qat_comp_build_request(*ops, base_addr + tail, + tmp_qp->op_cookies[tail >> queue->trailz], + tmp_qp->qat_dev_gen); + } else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) { +#ifdef BUILD_QAT_ASYM + ret = qat_asym_build_request(*ops, base_addr + tail, tmp_qp->op_cookies[tail >> queue->trailz], tmp_qp->qat_dev_gen); +#endif + } if (ret != 0) { tmp_qp->stats.enqueue_err_count++; /* This message cannot be enqueued */ @@ -646,7 +804,7 @@ kick_tail: queue->tail = tail; tmp_qp->enqueued += nb_ops_sent; tmp_qp->stats.enqueued_count += nb_ops_sent; - txq_write_tail(tmp_qp, queue); + txq_write_tail(tmp_qp->qat_dev_gen, tmp_qp, queue); return nb_ops_sent; } @@ -819,7 +977,7 @@ kick_tail: queue->tail = tail; tmp_qp->enqueued += total_descriptors_built; tmp_qp->stats.enqueued_count += nb_ops_sent; - txq_write_tail(tmp_qp, queue); + txq_write_tail(tmp_qp->qat_dev_gen, tmp_qp, queue); return nb_ops_sent; } @@ -843,7 +1001,8 @@ qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops) nb_fw_responses = 1; if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) - qat_sym_process_response(ops, resp_msg); + qat_sym_process_response(ops, resp_msg, + tmp_qp->op_cookies[head >> rx_queue->trailz]); else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) nb_fw_responses = qat_comp_process_response( ops, resp_msg, @@ -884,7 +1043,7 @@ qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops) rx_queue->head = head; if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) - rxq_free_desc(tmp_qp, rx_queue); + rxq_free_desc(tmp_qp->qat_dev_gen, tmp_qp, rx_queue); QAT_DP_LOG(DEBUG, "Dequeue burst return: %u, QAT responses: %u", op_resp_counter, fw_resp_counter); @@ -926,7 +1085,7 @@ qat_cq_dequeue_response(struct qat_qp *qp, void *out_data) queue->head = adf_modulo(queue->head + queue->msg_size, queue->modulo_mask); - rxq_free_desc(qp, queue); + rxq_free_desc(qp->qat_dev_gen, qp, queue); } return result; @@ -961,7 +1120,7 @@ qat_cq_get_fw_version(struct qat_qp *qp) memcpy(base_addr + queue->tail, &null_msg, sizeof(null_msg)); queue->tail = adf_modulo(queue->tail + queue->msg_size, queue->modulo_mask); - txq_write_tail(qp, queue); + txq_write_tail(qp->qat_dev_gen, qp, queue); /* receive a response */ if (qat_cq_dequeue_response(qp, &response)) {