X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcompress%2Fqat%2Fqat_comp.c;h=835aaa83826d6e791ca3093a0383fac0476c4b0b;hb=b643808ffb15dc0225b203b047c5f9e234d0e58e;hp=27547428f47a676540e62b3f54d046c017be33c7;hpb=4e8f2d6a6aaebe3f89f3edd09d479434f374a763;p=dpdk.git diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c index 27547428f4..835aaa8382 100644 --- a/drivers/compress/qat/qat_comp.c +++ b/drivers/compress/qat/qat_comp.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018-2019 Intel Corporation */ #include @@ -36,12 +36,37 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg, QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression " "operation requests, op (%p) is not a " "stateless operation.", op); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; return -EINVAL; } rte_mov128(out_msg, tmpl); comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op; + if (likely(qat_xform->qat_comp_request_type == + QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) { + if (unlikely(op->src.length > QAT_FALLBACK_THLD)) { + + /* fallback to fixed compression */ + comp_req->comn_hdr.service_cmd_id = + ICP_QAT_FW_COMP_CMD_STATIC; + + ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + + ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl, + ICP_QAT_FW_SLICE_NULL); + ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl, + ICP_QAT_FW_SLICE_NULL); + + QAT_DP_LOG(DEBUG, "QAT PMD: fallback to fixed " + "compression! IM buffer size can be too low " + "for produced data.\n Please use input " + "buffer length lower than %d bytes", + QAT_FALLBACK_THLD); + } + } + /* common for sgl and flat buffers */ comp_req->comp_pars.comp_len = op->src.length; comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) - @@ -54,23 +79,73 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg, ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags, QAT_COMN_PTR_TYPE_SGL); + if (unlikely(op->m_src->nb_segs > cookie->src_nb_elems)) { + /* we need to allocate more elements in SGL*/ + void *tmp; + + tmp = rte_realloc_socket(cookie->qat_sgl_src_d, + sizeof(struct qat_sgl) + + sizeof(struct qat_flat_buf) * + op->m_src->nb_segs, 64, + cookie->socket_id); + + if (unlikely(tmp == NULL)) { + QAT_DP_LOG(ERR, "QAT PMD can't allocate memory" + " for %d elements of SGL", + op->m_src->nb_segs); + op->status = RTE_COMP_OP_STATUS_ERROR; + return -ENOMEM; + } + /* new SGL is valid now */ + cookie->qat_sgl_src_d = (struct qat_sgl *)tmp; + cookie->src_nb_elems = op->m_src->nb_segs; + cookie->qat_sgl_src_phys_addr = + rte_malloc_virt2iova(cookie->qat_sgl_src_d); + } + ret = qat_sgl_fill_array(op->m_src, op->src.offset, - &cookie->qat_sgl_src, + cookie->qat_sgl_src_d, op->src.length, - RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS); + cookie->src_nb_elems); if (ret) { QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array"); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; return ret; } + if (unlikely(op->m_dst->nb_segs > cookie->dst_nb_elems)) { + /* we need to allocate more elements in SGL*/ + struct qat_sgl *tmp; + + tmp = rte_realloc_socket(cookie->qat_sgl_dst_d, + sizeof(struct qat_sgl) + + sizeof(struct qat_flat_buf) * + op->m_dst->nb_segs, 64, + cookie->socket_id); + + if (unlikely(tmp == NULL)) { + QAT_DP_LOG(ERR, "QAT PMD can't allocate memory" + " for %d elements of SGL", + op->m_dst->nb_segs); + op->status = RTE_COMP_OP_STATUS_ERROR; + return -ENOMEM; + } + /* new SGL is valid now */ + cookie->qat_sgl_dst_d = (struct qat_sgl *)tmp; + cookie->dst_nb_elems = op->m_dst->nb_segs; + cookie->qat_sgl_dst_phys_addr = + rte_malloc_virt2iova(cookie->qat_sgl_dst_d); + } + ret = qat_sgl_fill_array(op->m_dst, op->dst.offset, - &cookie->qat_sgl_dst, + cookie->qat_sgl_dst_d, comp_req->comp_pars.out_buffer_sz, - RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS); + cookie->dst_nb_elems); if (ret) { QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array"); + op->status = RTE_COMP_OP_STATUS_INVALID_ARGS; return ret; } @@ -95,6 +170,18 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg, rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset); } + if (unlikely(rte_pktmbuf_pkt_len(op->m_dst) < QAT_MIN_OUT_BUF_SIZE)) { + /* QAT doesn't support dest. buffer lower + * than QAT_MIN_OUT_BUF_SIZE. Propagate error mark + * by converting this request to the null one + * and check the status in the response. + */ + QAT_DP_LOG(WARNING, "QAT destination buffer too small - resend with larger buffer"); + comp_req->comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL; + comp_req->comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID; + cookie->error = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; + } + #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG QAT_DP_LOG(DEBUG, "Direction: %s", qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ? @@ -106,14 +193,20 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg, } int -qat_comp_process_response(void **op, uint8_t *resp) +qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie, + uint64_t *dequeue_err_count) { struct icp_qat_fw_comp_resp *resp_msg = (struct icp_qat_fw_comp_resp *)resp; + struct qat_comp_op_cookie *cookie = + (struct qat_comp_op_cookie *)op_cookie; struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t) (resp_msg->opaque_data); struct qat_comp_xform *qat_xform = (struct qat_comp_xform *) (rx_op->private_xform); + int err = resp_msg->comn_resp.comn_status & + ((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) | + (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS)); #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG QAT_DP_LOG(DEBUG, "Direction: %s", @@ -123,6 +216,17 @@ qat_comp_process_response(void **op, uint8_t *resp) sizeof(struct icp_qat_fw_comp_resp)); #endif + if (unlikely(cookie->error)) { + rx_op->status = cookie->error; + cookie->error = 0; + ++(*dequeue_err_count); + rx_op->debug_status = 0; + rx_op->consumed = 0; + rx_op->produced = 0; + *op = (void *)rx_op; + return 0; + } + if (likely(qat_xform->qat_comp_request_type != QAT_COMP_REQUEST_DECOMPRESS)) { if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET( @@ -132,24 +236,36 @@ qat_comp_process_response(void **op, uint8_t *resp) rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW; *op = (void *)rx_op; QAT_DP_LOG(ERR, "QAT has wrong firmware"); + ++(*dequeue_err_count); return 0; } } - if ((ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(resp_msg->comn_resp.comn_status) - | ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET( - resp_msg->comn_resp.comn_status)) != - ICP_QAT_FW_COMN_STATUS_FLAG_OK) { - - if (unlikely((ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET( - resp_msg->comn_resp.comn_status) != - ICP_QAT_FW_COMN_STATUS_FLAG_OK) && - (qat_xform->qat_comp_request_type - == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS))) + if (err) { + if (unlikely((err & (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS)) + && (qat_xform->qat_comp_request_type + == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS))) { QAT_DP_LOG(ERR, "QAT intermediate buffer may be too " "small for output, try configuring a larger size"); + } + + int8_t cmp_err_code = + (int8_t)resp_msg->comn_resp.comn_error.cmp_err_code; + int8_t xlat_err_code = + (int8_t)resp_msg->comn_resp.comn_error.xlat_err_code; + + if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code) + || + (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR) + || + (cmp_err_code == ERR_CODE_OVERFLOW_ERROR && + xlat_err_code == ERR_CODE_OVERFLOW_ERROR)) + rx_op->status = + RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED; + else + rx_op->status = RTE_COMP_OP_STATUS_ERROR; - rx_op->status = RTE_COMP_OP_STATUS_ERROR; + ++(*dequeue_err_count); rx_op->debug_status = *((uint16_t *)(&resp_msg->comn_resp.comn_error)); } else { @@ -265,7 +381,7 @@ static int qat_comp_create_templates(struct qat_comp_xform *qat_xform, ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST, ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST, ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST, - ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF); + ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF); comp_req->cd_pars.sl.comp_slice_cfg_word[0] = ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(