/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018-2019 Intel Corporation
*/
#include <rte_mempool.h>
QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression "
"operation requests, op (%p) is not a "
"stateless operation.", op);
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
return -EINVAL;
}
rte_mov128(out_msg, tmpl);
comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+ if (likely(qat_xform->qat_comp_request_type ==
+ QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
+ if (unlikely(op->src.length > QAT_FALLBACK_THLD)) {
+
+ /* fallback to fixed compression */
+ comp_req->comn_hdr.service_cmd_id =
+ ICP_QAT_FW_COMP_CMD_STATIC;
+
+ ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+
+ ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
+ ICP_QAT_FW_SLICE_NULL);
+ ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
+ ICP_QAT_FW_SLICE_NULL);
+
+ QAT_DP_LOG(DEBUG, "QAT PMD: fallback to fixed "
+ "compression! IM buffer size can be too low "
+ "for produced data.\n Please use input "
+ "buffer length lower than %d bytes",
+ QAT_FALLBACK_THLD);
+ }
+ }
+
/* common for sgl and flat buffers */
comp_req->comp_pars.comp_len = op->src.length;
- comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst);
+ comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
+ op->dst.offset;
if (op->m_src->next != NULL || op->m_dst->next != NULL) {
/* sgl */
ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
QAT_COMN_PTR_TYPE_SGL);
+
+ if (unlikely(op->m_src->nb_segs > cookie->src_nb_elems)) {
+ /* we need to allocate more elements in SGL*/
+ void *tmp;
+
+ tmp = rte_realloc_socket(cookie->qat_sgl_src_d,
+ sizeof(struct qat_sgl) +
+ sizeof(struct qat_flat_buf) *
+ op->m_src->nb_segs, 64,
+ cookie->socket_id);
+
+ if (unlikely(tmp == NULL)) {
+ QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
+ " for %d elements of SGL",
+ op->m_src->nb_segs);
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return -ENOMEM;
+ }
+ /* new SGL is valid now */
+ cookie->qat_sgl_src_d = (struct qat_sgl *)tmp;
+ cookie->src_nb_elems = op->m_src->nb_segs;
+ cookie->qat_sgl_src_phys_addr =
+ rte_malloc_virt2iova(cookie->qat_sgl_src_d);
+ }
+
ret = qat_sgl_fill_array(op->m_src,
- rte_pktmbuf_mtophys_offset(op->m_src,
- op->src.offset),
- &cookie->qat_sgl_src,
+ op->src.offset,
+ cookie->qat_sgl_src_d,
op->src.length,
- RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ cookie->src_nb_elems);
if (ret) {
- QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
return ret;
}
+ if (unlikely(op->m_dst->nb_segs > cookie->dst_nb_elems)) {
+ /* we need to allocate more elements in SGL*/
+ struct qat_sgl *tmp;
+
+ tmp = rte_realloc_socket(cookie->qat_sgl_dst_d,
+ sizeof(struct qat_sgl) +
+ sizeof(struct qat_flat_buf) *
+ op->m_dst->nb_segs, 64,
+ cookie->socket_id);
+
+ if (unlikely(tmp == NULL)) {
+ QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
+ " for %d elements of SGL",
+ op->m_dst->nb_segs);
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return -ENOMEM;
+ }
+ /* new SGL is valid now */
+ cookie->qat_sgl_dst_d = (struct qat_sgl *)tmp;
+ cookie->dst_nb_elems = op->m_dst->nb_segs;
+ cookie->qat_sgl_dst_phys_addr =
+ rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
+ }
+
ret = qat_sgl_fill_array(op->m_dst,
- rte_pktmbuf_mtophys_offset(op->m_dst,
- op->dst.offset),
- &cookie->qat_sgl_dst,
+ op->dst.offset,
+ cookie->qat_sgl_dst_d,
comp_req->comp_pars.out_buffer_sz,
- RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ cookie->dst_nb_elems);
if (ret) {
- QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
return ret;
}
/* flat aka linear buffer */
ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
QAT_COMN_PTR_TYPE_FLAT);
- comp_req->comn_mid.src_length = rte_pktmbuf_data_len(op->m_src);
- comp_req->comn_mid.dst_length = rte_pktmbuf_data_len(op->m_dst);
+ comp_req->comn_mid.src_length = op->src.length;
+ comp_req->comn_mid.dst_length =
+ comp_req->comp_pars.out_buffer_sz;
comp_req->comn_mid.src_data_addr =
rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset);
rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
}
+ if (unlikely(rte_pktmbuf_pkt_len(op->m_dst) < QAT_MIN_OUT_BUF_SIZE)) {
+ /* QAT doesn't support dest. buffer lower
+ * than QAT_MIN_OUT_BUF_SIZE. Propagate error mark
+ * by converting this request to the null one
+ * and check the status in the response.
+ */
+ QAT_DP_LOG(WARNING, "QAT destination buffer too small - resend with larger buffer");
+ comp_req->comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
+ comp_req->comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
+ cookie->error = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ }
+
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
QAT_DP_LOG(DEBUG, "Direction: %s",
qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
}
int
-qat_comp_process_response(void **op, uint8_t *resp)
+qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
+ uint64_t *dequeue_err_count)
{
struct icp_qat_fw_comp_resp *resp_msg =
(struct icp_qat_fw_comp_resp *)resp;
+ struct qat_comp_op_cookie *cookie =
+ (struct qat_comp_op_cookie *)op_cookie;
struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
(resp_msg->opaque_data);
struct qat_comp_xform *qat_xform = (struct qat_comp_xform *)
(rx_op->private_xform);
+ int err = resp_msg->comn_resp.comn_status &
+ ((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) |
+ (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS));
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
QAT_DP_LOG(DEBUG, "Direction: %s",
sizeof(struct icp_qat_fw_comp_resp));
#endif
+ if (unlikely(cookie->error)) {
+ rx_op->status = cookie->error;
+ cookie->error = 0;
+ ++(*dequeue_err_count);
+ rx_op->debug_status = 0;
+ rx_op->consumed = 0;
+ rx_op->produced = 0;
+ *op = (void *)rx_op;
+ return 0;
+ }
+
if (likely(qat_xform->qat_comp_request_type
!= QAT_COMP_REQUEST_DECOMPRESS)) {
if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW;
*op = (void *)rx_op;
QAT_DP_LOG(ERR, "QAT has wrong firmware");
+ ++(*dequeue_err_count);
return 0;
}
}
- if ((ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(resp_msg->comn_resp.comn_status)
- | ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(
- resp_msg->comn_resp.comn_status)) !=
- ICP_QAT_FW_COMN_STATUS_FLAG_OK) {
+ if (err) {
+ if (unlikely((err & (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS))
+ && (qat_xform->qat_comp_request_type
+ == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS))) {
+ QAT_DP_LOG(ERR, "QAT intermediate buffer may be too "
+ "small for output, try configuring a larger size");
+ }
- rx_op->status = RTE_COMP_OP_STATUS_ERROR;
+ int8_t cmp_err_code =
+ (int8_t)resp_msg->comn_resp.comn_error.cmp_err_code;
+ int8_t xlat_err_code =
+ (int8_t)resp_msg->comn_resp.comn_error.xlat_err_code;
+
+ if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code)
+ ||
+ (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR)
+ ||
+ (cmp_err_code == ERR_CODE_OVERFLOW_ERROR &&
+ xlat_err_code == ERR_CODE_OVERFLOW_ERROR))
+ rx_op->status =
+ RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ else
+ rx_op->status = RTE_COMP_OP_STATUS_ERROR;
+
+ ++(*dequeue_err_count);
rx_op->debug_status =
*((uint16_t *)(&resp_msg->comn_resp.comn_error));
} else {
- struct qat_comp_xform *qat_xform = rx_op->private_xform;
struct icp_qat_fw_resp_comp_pars *comp_resp =
(struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
}
static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
- const struct rte_memzone *interm_buff_mz __rte_unused,
+ const struct rte_memzone *interm_buff_mz,
const struct rte_comp_xform *xform)
{
struct icp_qat_fw_comp_req *comp_req;
ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
- ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+ ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
ICP_QAT_FW_SLICE_COMP);
} else if (qat_xform->qat_comp_request_type ==
- QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
+ QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
- QAT_LOG(ERR, "Dynamic huffman encoding not supported");
- return -EINVAL;
+ ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
+ ICP_QAT_FW_SLICE_XLAT);
+ ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
+ ICP_QAT_FW_SLICE_COMP);
+
+ ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
+ ICP_QAT_FW_SLICE_XLAT);
+
+ comp_req->u1.xlt_pars.inter_buff_ptr =
+ interm_buff_mz->phys_addr;
}
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
(struct qat_comp_xform *)*private_xform;
if (xform->type == RTE_COMP_COMPRESS) {
- if (xform->compress.deflate.huffman ==
- RTE_COMP_HUFFMAN_DYNAMIC) {
- QAT_LOG(ERR,
- "QAT device doesn't support dynamic compression");
- return -ENOTSUP;
- }
if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
&& qat->interm_buff_mz == NULL))
-
qat_xform->qat_comp_request_type =
QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
+ else if ((xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DYNAMIC ||
+ xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DEFAULT) &&
+ qat->interm_buff_mz != NULL)
+
+ qat_xform->qat_comp_request_type =
+ QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS;
+
+ else {
+ QAT_LOG(ERR,
+ "IM buffers needed for dynamic deflate. Set size in config file");
+ return -EINVAL;
+ }
+
+ qat_xform->checksum_type = xform->compress.chksum;
} else {
qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
+ qat_xform->checksum_type = xform->decompress.chksum;
}
- qat_xform->checksum_type = xform->compress.chksum;
-
if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform)) {
QAT_LOG(ERR, "QAT: Problem with setting compression");
return -EINVAL;