compress/qat: fix overflow status return
authorTomasz Jozwiak <tomaszx.jozwiak@intel.com>
Fri, 5 Jul 2019 17:15:51 +0000 (18:15 +0100)
committerAkhil Goyal <akhil.goyal@nxp.com>
Fri, 19 Jul 2019 12:15:21 +0000 (14:15 +0200)
This patch fixes fail status returned from compression PMD
in case destination buffer size is not enough to store
all data.

Fixes: 3dc9ef2d23fe ("compress/qat: fix returned status on overflow")
Cc: stable@dpdk.org
Signed-off-by: Tomasz Jozwiak <tomaszx.jozwiak@intel.com>
Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
Acked-by: Adam Dybkowski <adamx.dybkowski@intel.com>
drivers/common/qat/qat_qp.c
drivers/common/qat/qat_qp.h
drivers/compress/qat/qat_comp.c
drivers/compress/qat/qat_comp.h
drivers/compress/qat/qat_comp_pmd.c

index 1312152..03f11f8 100644 (file)
@@ -651,7 +651,8 @@ qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
                        qat_sym_process_response(ops, resp_msg);
                else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
                        qat_comp_process_response(ops, resp_msg,
-                                       &tmp_qp->stats.dequeue_err_count);
+                               tmp_qp->op_cookies[head / rx_queue->msg_size],
+                               &tmp_qp->stats.dequeue_err_count);
                else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
 #ifdef BUILD_QAT_ASYM
                        qat_asym_process_response(ops, resp_msg,
@@ -686,6 +687,7 @@ qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
 
 __rte_weak int
 qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
+                         void *op_cookie __rte_unused,
                          uint64_t *dequeue_err_count __rte_unused)
 {
        return  0;
index 9833bcb..980c2ba 100644 (file)
@@ -108,6 +108,7 @@ qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
 /* Needed for weak function*/
 int
 qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
-                         uint64_t *dequeue_err_count);
+                         void *op_cookie __rte_unused,
+                         uint64_t *dequeue_err_count __rte_unused);
 
 #endif /* _QAT_QP_H_ */
index dd0fe1b..835aaa8 100644 (file)
@@ -170,6 +170,18 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
                    rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
        }
 
+       if (unlikely(rte_pktmbuf_pkt_len(op->m_dst) < QAT_MIN_OUT_BUF_SIZE)) {
+               /* QAT doesn't support dest. buffer lower
+                * than QAT_MIN_OUT_BUF_SIZE. Propagate error mark
+                * by converting this request to the null one
+                * and check the status in the response.
+                */
+               QAT_DP_LOG(WARNING, "QAT destination buffer too small - resend with larger buffer");
+               comp_req->comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
+               comp_req->comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
+               cookie->error = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+       }
+
 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
        QAT_DP_LOG(DEBUG, "Direction: %s",
            qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
@@ -181,10 +193,13 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
 }
 
 int
-qat_comp_process_response(void **op, uint8_t *resp, uint64_t *dequeue_err_count)
+qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
+                         uint64_t *dequeue_err_count)
 {
        struct icp_qat_fw_comp_resp *resp_msg =
                        (struct icp_qat_fw_comp_resp *)resp;
+       struct qat_comp_op_cookie *cookie =
+                       (struct qat_comp_op_cookie *)op_cookie;
        struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
                        (resp_msg->opaque_data);
        struct qat_comp_xform *qat_xform = (struct qat_comp_xform *)
@@ -201,6 +216,17 @@ qat_comp_process_response(void **op, uint8_t *resp, uint64_t *dequeue_err_count)
                        sizeof(struct icp_qat_fw_comp_resp));
 #endif
 
+       if (unlikely(cookie->error)) {
+               rx_op->status = cookie->error;
+               cookie->error = 0;
+               ++(*dequeue_err_count);
+               rx_op->debug_status = 0;
+               rx_op->consumed = 0;
+               rx_op->produced = 0;
+               *op = (void *)rx_op;
+               return 0;
+       }
+
        if (likely(qat_xform->qat_comp_request_type
                        != QAT_COMP_REQUEST_DECOMPRESS)) {
                if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
index 1312ee9..61d12ec 100644 (file)
@@ -24,6 +24,8 @@
 /* fallback to fixed compression threshold */
 #define QAT_FALLBACK_THLD ((uint32_t)(RTE_PMD_QAT_COMP_IM_BUFFER_SIZE / 1.1))
 
+#define QAT_MIN_OUT_BUF_SIZE 46
+
 enum qat_comp_request_type {
        QAT_COMP_REQUEST_FIXED_COMP_STATELESS,
        QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS,
@@ -45,6 +47,7 @@ struct qat_comp_op_cookie {
        phys_addr_t qat_sgl_src_phys_addr;
        phys_addr_t qat_sgl_dst_phys_addr;
        /* dynamically created SGLs */
+       uint8_t error;
        uint8_t socket_id;
        uint16_t src_nb_elems;
        uint16_t dst_nb_elems;
@@ -63,7 +66,7 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
                       enum qat_device_gen qat_dev_gen __rte_unused);
 
 int
-qat_comp_process_response(void **op, uint8_t *resp,
+qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
                          uint64_t *dequeue_err_count);
 
 int
index 1d9d72e..0726472 100644 (file)
@@ -168,6 +168,8 @@ qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
                                QAT_PMD_COMP_SGL_DEF_SEGMENTS;
 
                cookie->socket_id = dev->data->socket_id;
+
+               cookie->error = 0;
        }
 
        return ret;