+
+int
+qat_comp_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie,
+ enum qat_device_gen qat_dev_gen __rte_unused)
+{
+ struct rte_comp_op *op = in_op;
+ struct qat_comp_op_cookie *cookie =
+ (struct qat_comp_op_cookie *)op_cookie;
+ struct qat_comp_stream *stream;
+ struct qat_comp_xform *qat_xform;
+ const uint8_t *tmpl;
+ struct icp_qat_fw_comp_req *comp_req =
+ (struct icp_qat_fw_comp_req *)out_msg;
+
+ if (op->op_type == RTE_COMP_OP_STATEFUL) {
+ stream = op->stream;
+ qat_xform = &stream->qat_xform;
+ if (unlikely(qat_xform->qat_comp_request_type !=
+ QAT_COMP_REQUEST_DECOMPRESS)) {
+ QAT_DP_LOG(ERR, "QAT PMD does not support stateful compression");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ if (unlikely(stream->op_in_progress)) {
+ QAT_DP_LOG(ERR, "QAT PMD does not support running multiple stateful operations on the same stream at once");
+ op->status = RTE_COMP_OP_STATUS_INVALID_STATE;
+ return -EINVAL;
+ }
+ stream->op_in_progress = 1;
+ } else {
+ stream = NULL;
+ qat_xform = op->private_xform;
+ }
+ tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
+
+ rte_mov128(out_msg, tmpl);
+ comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+
+ if (op->op_type == RTE_COMP_OP_STATEFUL) {
+ comp_req->comp_pars.req_par_flags =
+ ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
+ (stream->start_of_packet) ?
+ ICP_QAT_FW_COMP_SOP
+ : ICP_QAT_FW_COMP_NOT_SOP,
+ (op->flush_flag == RTE_COMP_FLUSH_FULL ||
+ op->flush_flag == RTE_COMP_FLUSH_FINAL) ?
+ ICP_QAT_FW_COMP_EOP
+ : ICP_QAT_FW_COMP_NOT_EOP,
+ ICP_QAT_FW_COMP_NOT_BFINAL,
+ ICP_QAT_FW_COMP_NO_CNV,
+ ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
+ }
+
+ if (likely(qat_xform->qat_comp_request_type ==
+ QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
+ if (unlikely(op->src.length > QAT_FALLBACK_THLD)) {
+
+ /* fallback to fixed compression */
+ comp_req->comn_hdr.service_cmd_id =
+ ICP_QAT_FW_COMP_CMD_STATIC;
+
+ ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+
+ ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
+ ICP_QAT_FW_SLICE_NULL);
+ ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
+ ICP_QAT_FW_SLICE_NULL);
+
+ QAT_DP_LOG(DEBUG, "QAT PMD: fallback to fixed "
+ "compression! IM buffer size can be too low "
+ "for produced data.\n Please use input "
+ "buffer length lower than %d bytes",
+ QAT_FALLBACK_THLD);
+ }
+ }
+
+ /* common for sgl and flat buffers */
+ comp_req->comp_pars.comp_len = op->src.length;
+ comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
+ op->dst.offset;
+
+ if (op->m_src->next != NULL || op->m_dst->next != NULL) {
+ /* sgl */
+ int ret = 0;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+
+ if (unlikely(op->m_src->nb_segs > cookie->src_nb_elems)) {
+ /* we need to allocate more elements in SGL*/
+ void *tmp;
+
+ tmp = rte_realloc_socket(cookie->qat_sgl_src_d,
+ sizeof(struct qat_sgl) +
+ sizeof(struct qat_flat_buf) *
+ op->m_src->nb_segs, 64,
+ cookie->socket_id);
+
+ if (unlikely(tmp == NULL)) {
+ QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
+ " for %d elements of SGL",
+ op->m_src->nb_segs);
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ /* clear op-in-progress flag */
+ if (stream)
+ stream->op_in_progress = 0;
+ return -ENOMEM;
+ }
+ /* new SGL is valid now */
+ cookie->qat_sgl_src_d = (struct qat_sgl *)tmp;
+ cookie->src_nb_elems = op->m_src->nb_segs;
+ cookie->qat_sgl_src_phys_addr =
+ rte_malloc_virt2iova(cookie->qat_sgl_src_d);
+ }
+
+ ret = qat_sgl_fill_array(op->m_src,
+ op->src.offset,
+ cookie->qat_sgl_src_d,
+ op->src.length,
+ cookie->src_nb_elems);
+ if (ret) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ /* clear op-in-progress flag */
+ if (stream)
+ stream->op_in_progress = 0;
+ return ret;
+ }
+
+ if (unlikely(op->m_dst->nb_segs > cookie->dst_nb_elems)) {
+ /* we need to allocate more elements in SGL*/
+ struct qat_sgl *tmp;
+
+ tmp = rte_realloc_socket(cookie->qat_sgl_dst_d,
+ sizeof(struct qat_sgl) +
+ sizeof(struct qat_flat_buf) *
+ op->m_dst->nb_segs, 64,
+ cookie->socket_id);
+
+ if (unlikely(tmp == NULL)) {
+ QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
+ " for %d elements of SGL",
+ op->m_dst->nb_segs);
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ /* clear op-in-progress flag */
+ if (stream)
+ stream->op_in_progress = 0;
+ return -ENOMEM;
+ }
+ /* new SGL is valid now */
+ cookie->qat_sgl_dst_d = (struct qat_sgl *)tmp;
+ cookie->dst_nb_elems = op->m_dst->nb_segs;
+ cookie->qat_sgl_dst_phys_addr =
+ rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
+ }
+
+ ret = qat_sgl_fill_array(op->m_dst,
+ op->dst.offset,
+ cookie->qat_sgl_dst_d,
+ comp_req->comp_pars.out_buffer_sz,
+ cookie->dst_nb_elems);
+ if (ret) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ /* clear op-in-progress flag */
+ if (stream)
+ stream->op_in_progress = 0;
+ return ret;
+ }
+
+ comp_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ comp_req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_dst_phys_addr;
+ comp_req->comn_mid.src_length = 0;
+ comp_req->comn_mid.dst_length = 0;
+
+ } else {
+ /* flat aka linear buffer */
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_FLAT);
+ comp_req->comn_mid.src_length = op->src.length;
+ comp_req->comn_mid.dst_length =
+ comp_req->comp_pars.out_buffer_sz;
+
+ comp_req->comn_mid.src_data_addr =
+ rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset);
+ comp_req->comn_mid.dest_data_addr =
+ rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
+ }
+
+ if (unlikely(rte_pktmbuf_pkt_len(op->m_dst) < QAT_MIN_OUT_BUF_SIZE)) {
+ /* QAT doesn't support dest. buffer lower
+ * than QAT_MIN_OUT_BUF_SIZE. Propagate error mark
+ * by converting this request to the null one
+ * and check the status in the response.
+ */
+ QAT_DP_LOG(WARNING, "QAT destination buffer too small - resend with larger buffer");
+ comp_req->comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
+ comp_req->comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
+ cookie->error = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(DEBUG, "Direction: %s",
+ qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
+ "decompression" : "compression");
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req,
+ sizeof(struct icp_qat_fw_comp_req));
+#endif
+ return 0;
+}
+
+int
+qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
+ uint64_t *dequeue_err_count)
+{
+ struct icp_qat_fw_comp_resp *resp_msg =
+ (struct icp_qat_fw_comp_resp *)resp;
+ struct qat_comp_op_cookie *cookie =
+ (struct qat_comp_op_cookie *)op_cookie;
+ struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+ struct qat_comp_stream *stream;
+ struct qat_comp_xform *qat_xform;
+ int err = resp_msg->comn_resp.comn_status &
+ ((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) |
+ (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS));
+
+ if (rx_op->op_type == RTE_COMP_OP_STATEFUL) {
+ stream = rx_op->stream;
+ qat_xform = &stream->qat_xform;
+ /* clear op-in-progress flag */
+ stream->op_in_progress = 0;
+ } else {
+ stream = NULL;
+ qat_xform = rx_op->private_xform;
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(DEBUG, "Direction: %s",
+ qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
+ "decompression" : "compression");
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comp_resp));
+#endif
+
+ if (unlikely(cookie->error)) {
+ rx_op->status = cookie->error;
+ cookie->error = 0;
+ ++(*dequeue_err_count);
+ rx_op->debug_status = 0;
+ rx_op->consumed = 0;
+ rx_op->produced = 0;
+ *op = (void *)rx_op;
+ return 0;
+ }
+
+ if (likely(qat_xform->qat_comp_request_type
+ != QAT_COMP_REQUEST_DECOMPRESS)) {
+ if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
+ resp_msg->comn_resp.hdr_flags)
+ == ICP_QAT_FW_COMP_NO_CNV)) {
+ rx_op->status = RTE_COMP_OP_STATUS_ERROR;
+ rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW;
+ *op = (void *)rx_op;
+ QAT_DP_LOG(ERR, "QAT has wrong firmware");
+ ++(*dequeue_err_count);
+ return 0;
+ }
+ }
+
+ if (err) {
+ if (unlikely((err & (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS))
+ && (qat_xform->qat_comp_request_type
+ == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS))) {
+ QAT_DP_LOG(ERR, "QAT intermediate buffer may be too "
+ "small for output, try configuring a larger size");
+ }
+
+ int8_t cmp_err_code =
+ (int8_t)resp_msg->comn_resp.comn_error.cmp_err_code;
+ int8_t xlat_err_code =
+ (int8_t)resp_msg->comn_resp.comn_error.xlat_err_code;
+
+ /* handle recoverable out-of-buffer condition in stateful */
+ /* decompression scenario */
+ if (cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code
+ && qat_xform->qat_comp_request_type
+ == QAT_COMP_REQUEST_DECOMPRESS
+ && rx_op->op_type == RTE_COMP_OP_STATEFUL) {
+ struct icp_qat_fw_resp_comp_pars *comp_resp =
+ &resp_msg->comp_resp_pars;
+ rx_op->status =
+ RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
+ rx_op->consumed = comp_resp->input_byte_counter;
+ rx_op->produced = comp_resp->output_byte_counter;
+ stream->start_of_packet = 0;
+ } else if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR
+ && !xlat_err_code)
+ ||
+ (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR)
+ ||
+ (cmp_err_code == ERR_CODE_OVERFLOW_ERROR &&
+ xlat_err_code == ERR_CODE_OVERFLOW_ERROR)){
+
+ struct icp_qat_fw_resp_comp_pars *comp_resp =
+ (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
+
+ /* handle recoverable out-of-buffer condition */
+ /* in stateless compression scenario */
+ if (comp_resp->input_byte_counter) {
+ if ((qat_xform->qat_comp_request_type
+ == QAT_COMP_REQUEST_FIXED_COMP_STATELESS) ||
+ (qat_xform->qat_comp_request_type
+ == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
+
+ rx_op->status =
+ RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
+ rx_op->consumed =
+ comp_resp->input_byte_counter;
+ rx_op->produced =
+ comp_resp->output_byte_counter;
+ } else
+ rx_op->status =
+ RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ } else
+ rx_op->status =
+ RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ } else
+ rx_op->status = RTE_COMP_OP_STATUS_ERROR;
+
+ ++(*dequeue_err_count);
+ rx_op->debug_status =
+ *((uint16_t *)(&resp_msg->comn_resp.comn_error));
+ } else {
+ struct icp_qat_fw_resp_comp_pars *comp_resp =
+ (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
+
+ rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
+ rx_op->consumed = comp_resp->input_byte_counter;
+ rx_op->produced = comp_resp->output_byte_counter;
+ if (stream)
+ stream->start_of_packet = 0;
+
+ if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
+ if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
+ rx_op->output_chksum = comp_resp->curr_crc32;
+ else if (qat_xform->checksum_type ==
+ RTE_COMP_CHECKSUM_ADLER32)
+ rx_op->output_chksum = comp_resp->curr_adler_32;
+ else
+ rx_op->output_chksum = comp_resp->curr_chksum;
+ }
+ }
+ *op = (void *)rx_op;
+
+ return 0;
+}
+