net/bnxt: ignore VLAN priority mask
[dpdk.git] / drivers / common / qat / qat_qp.c
index 098b997..32d7401 100644 (file)
@@ -193,7 +193,8 @@ int qat_qp_setup(struct qat_pci_device *qat_dev,
 
 {
        struct qat_qp *qp;
-       struct rte_pci_device *pci_dev = qat_dev->pci_dev;
+       struct rte_pci_device *pci_dev =
+                       qat_pci_devs[qat_dev->qat_dev_id].pci_dev;
        char op_cookie_pool_name[RTE_RING_NAMESIZE];
        uint32_t i;
 
@@ -274,7 +275,7 @@ int qat_qp_setup(struct qat_pci_device *qat_dev,
                                qp->nb_descriptors,
                                qat_qp_conf->cookie_size, 64, 0,
                                NULL, NULL, NULL, NULL,
-                               qat_dev->pci_dev->device.numa_node,
+                               pci_dev->device.numa_node,
                                0);
        if (!qp->op_cookie_pool) {
                QAT_LOG(ERR, "QAT PMD Cannot create"
@@ -291,7 +292,6 @@ int qat_qp_setup(struct qat_pci_device *qat_dev,
        }
 
        qp->qat_dev_gen = qat_dev->qat_dev_gen;
-       qp->build_request = qat_qp_conf->build_request;
        qp->service_type = qat_qp_conf->hw->service_type;
        qp->qat_dev = qat_dev;
 
@@ -379,7 +379,8 @@ qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue,
        uint64_t queue_base;
        void *io_addr;
        const struct rte_memzone *qp_mz;
-       struct rte_pci_device *pci_dev = qat_dev->pci_dev;
+       struct rte_pci_device *pci_dev =
+                       qat_pci_devs[qat_dev->qat_dev_id].pci_dev;
        int ret = 0;
        uint16_t desc_size = (dir == ADF_RING_DIR_TX ?
                        qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size);
@@ -403,7 +404,7 @@ qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue,
                qp_conf->service_str, "qp_mem",
                queue->hw_bundle_number, queue->hw_queue_number);
        qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
-                       qat_dev->pci_dev->device.numa_node);
+                       pci_dev->device.numa_node);
        if (qp_mz == NULL) {
                QAT_LOG(ERR, "Failed to allocate ring memzone");
                return -ENOMEM;
@@ -580,7 +581,7 @@ qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
        register struct qat_queue *queue;
        struct qat_qp *tmp_qp = (struct qat_qp *)qp;
        register uint32_t nb_ops_sent = 0;
-       register int ret;
+       register int ret = -1;
        uint16_t nb_ops_possible = nb_ops;
        register uint8_t *base_addr;
        register uint32_t tail;
@@ -625,11 +626,29 @@ qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
                }
        }
 
+#ifdef BUILD_QAT_SYM
+       if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
+               qat_sym_preprocess_requests(ops, nb_ops_possible);
+#endif
 
        while (nb_ops_sent != nb_ops_possible) {
-               ret = tmp_qp->build_request(*ops, base_addr + tail,
+               if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) {
+#ifdef BUILD_QAT_SYM
+                       ret = qat_sym_build_request(*ops, base_addr + tail,
+                               tmp_qp->op_cookies[tail >> queue->trailz],
+                               tmp_qp->qat_dev_gen);
+#endif
+               } else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) {
+                       ret = qat_comp_build_request(*ops, base_addr + tail,
+                               tmp_qp->op_cookies[tail >> queue->trailz],
+                               tmp_qp->qat_dev_gen);
+               } else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
+#ifdef BUILD_QAT_ASYM
+                       ret = qat_asym_build_request(*ops, base_addr + tail,
                                tmp_qp->op_cookies[tail >> queue->trailz],
                                tmp_qp->qat_dev_gen);
+#endif
+               }
                if (ret != 0) {
                        tmp_qp->stats.enqueue_err_count++;
                        /* This message cannot be enqueued */
@@ -818,7 +837,7 @@ qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
 kick_tail:
        queue->tail = tail;
        tmp_qp->enqueued += total_descriptors_built;
-       tmp_qp->stats.enqueued_count += total_descriptors_built;
+       tmp_qp->stats.enqueued_count += nb_ops_sent;
        txq_write_tail(tmp_qp, queue);
        return nb_ops_sent;
 }
@@ -860,7 +879,7 @@ qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
 
                resp_msg = (uint8_t *)rx_queue->base_addr + head;
 
-               if (ops != NULL && nb_fw_responses) {
+               if (nb_fw_responses) {
                        /* only move on to next op if one was ready to return
                         * to API
                         */
@@ -880,7 +899,7 @@ qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
        }
 
        tmp_qp->dequeued += fw_resp_counter;
-       tmp_qp->stats.dequeued_count += fw_resp_counter;
+       tmp_qp->stats.dequeued_count += op_resp_counter;
 
        rx_queue->head = head;
        if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)