1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Intel Corporation
5 #include <rte_mempool.h>
7 #include <rte_hexdump.h>
9 #include <rte_bus_pci.h>
10 #include <rte_byteorder.h>
11 #include <rte_memcpy.h>
12 #include <rte_common.h>
13 #include <rte_spinlock.h>
15 #include <rte_malloc.h>
19 #include "qat_comp_pmd.h"
23 qat_comp_build_request(void *in_op, uint8_t *out_msg,
25 enum qat_device_gen qat_dev_gen __rte_unused)
27 struct rte_comp_op *op = in_op;
28 struct qat_comp_op_cookie *cookie =
29 (struct qat_comp_op_cookie *)op_cookie;
30 struct qat_comp_stream *stream;
31 struct qat_comp_xform *qat_xform;
33 struct icp_qat_fw_comp_req *comp_req =
34 (struct icp_qat_fw_comp_req *)out_msg;
36 if (op->op_type == RTE_COMP_OP_STATEFUL) {
38 qat_xform = &stream->qat_xform;
39 if (unlikely(qat_xform->qat_comp_request_type !=
40 QAT_COMP_REQUEST_DECOMPRESS)) {
41 QAT_DP_LOG(ERR, "QAT PMD does not support stateful compression");
42 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
45 if (unlikely(stream->op_in_progress)) {
46 QAT_DP_LOG(ERR, "QAT PMD does not support running multiple stateful operations on the same stream at once");
47 op->status = RTE_COMP_OP_STATUS_INVALID_STATE;
50 stream->op_in_progress = 1;
53 qat_xform = op->private_xform;
55 tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
57 rte_mov128(out_msg, tmpl);
58 comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
60 if (op->op_type == RTE_COMP_OP_STATEFUL) {
61 comp_req->comp_pars.req_par_flags =
62 ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
63 (stream->start_of_packet) ?
65 : ICP_QAT_FW_COMP_NOT_SOP,
66 (op->flush_flag == RTE_COMP_FLUSH_FULL ||
67 op->flush_flag == RTE_COMP_FLUSH_FINAL) ?
69 : ICP_QAT_FW_COMP_NOT_EOP,
70 ICP_QAT_FW_COMP_NOT_BFINAL,
71 ICP_QAT_FW_COMP_NO_CNV,
72 ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
75 if (likely(qat_xform->qat_comp_request_type ==
76 QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
77 if (unlikely(op->src.length > QAT_FALLBACK_THLD)) {
79 /* fallback to fixed compression */
80 comp_req->comn_hdr.service_cmd_id =
81 ICP_QAT_FW_COMP_CMD_STATIC;
83 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
84 ICP_QAT_FW_SLICE_DRAM_WR);
86 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
87 ICP_QAT_FW_SLICE_NULL);
88 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
89 ICP_QAT_FW_SLICE_NULL);
91 QAT_DP_LOG(DEBUG, "QAT PMD: fallback to fixed "
92 "compression! IM buffer size can be too low "
93 "for produced data.\n Please use input "
94 "buffer length lower than %d bytes",
99 /* common for sgl and flat buffers */
100 comp_req->comp_pars.comp_len = op->src.length;
101 comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
104 if (op->m_src->next != NULL || op->m_dst->next != NULL) {
108 ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
109 QAT_COMN_PTR_TYPE_SGL);
111 if (unlikely(op->m_src->nb_segs > cookie->src_nb_elems)) {
112 /* we need to allocate more elements in SGL*/
115 tmp = rte_realloc_socket(cookie->qat_sgl_src_d,
116 sizeof(struct qat_sgl) +
117 sizeof(struct qat_flat_buf) *
118 op->m_src->nb_segs, 64,
121 if (unlikely(tmp == NULL)) {
122 QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
123 " for %d elements of SGL",
125 op->status = RTE_COMP_OP_STATUS_ERROR;
126 /* clear op-in-progress flag */
128 stream->op_in_progress = 0;
131 /* new SGL is valid now */
132 cookie->qat_sgl_src_d = (struct qat_sgl *)tmp;
133 cookie->src_nb_elems = op->m_src->nb_segs;
134 cookie->qat_sgl_src_phys_addr =
135 rte_malloc_virt2iova(cookie->qat_sgl_src_d);
138 ret = qat_sgl_fill_array(op->m_src,
140 cookie->qat_sgl_src_d,
142 cookie->src_nb_elems);
144 QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
145 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
146 /* clear op-in-progress flag */
148 stream->op_in_progress = 0;
152 if (unlikely(op->m_dst->nb_segs > cookie->dst_nb_elems)) {
153 /* we need to allocate more elements in SGL*/
156 tmp = rte_realloc_socket(cookie->qat_sgl_dst_d,
157 sizeof(struct qat_sgl) +
158 sizeof(struct qat_flat_buf) *
159 op->m_dst->nb_segs, 64,
162 if (unlikely(tmp == NULL)) {
163 QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
164 " for %d elements of SGL",
166 op->status = RTE_COMP_OP_STATUS_ERROR;
167 /* clear op-in-progress flag */
169 stream->op_in_progress = 0;
172 /* new SGL is valid now */
173 cookie->qat_sgl_dst_d = (struct qat_sgl *)tmp;
174 cookie->dst_nb_elems = op->m_dst->nb_segs;
175 cookie->qat_sgl_dst_phys_addr =
176 rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
179 ret = qat_sgl_fill_array(op->m_dst,
181 cookie->qat_sgl_dst_d,
182 comp_req->comp_pars.out_buffer_sz,
183 cookie->dst_nb_elems);
185 QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
186 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
187 /* clear op-in-progress flag */
189 stream->op_in_progress = 0;
193 comp_req->comn_mid.src_data_addr =
194 cookie->qat_sgl_src_phys_addr;
195 comp_req->comn_mid.dest_data_addr =
196 cookie->qat_sgl_dst_phys_addr;
197 comp_req->comn_mid.src_length = 0;
198 comp_req->comn_mid.dst_length = 0;
201 /* flat aka linear buffer */
202 ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
203 QAT_COMN_PTR_TYPE_FLAT);
204 comp_req->comn_mid.src_length = op->src.length;
205 comp_req->comn_mid.dst_length =
206 comp_req->comp_pars.out_buffer_sz;
208 comp_req->comn_mid.src_data_addr =
209 rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset);
210 comp_req->comn_mid.dest_data_addr =
211 rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
214 if (unlikely(rte_pktmbuf_pkt_len(op->m_dst) < QAT_MIN_OUT_BUF_SIZE)) {
215 /* QAT doesn't support dest. buffer lower
216 * than QAT_MIN_OUT_BUF_SIZE. Propagate error mark
217 * by converting this request to the null one
218 * and check the status in the response.
220 QAT_DP_LOG(WARNING, "QAT destination buffer too small - resend with larger buffer");
221 comp_req->comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
222 comp_req->comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
223 cookie->error = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
226 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
227 QAT_DP_LOG(DEBUG, "Direction: %s",
228 qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
229 "decompression" : "compression");
230 QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req,
231 sizeof(struct icp_qat_fw_comp_req));
237 qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
238 uint64_t *dequeue_err_count)
240 struct icp_qat_fw_comp_resp *resp_msg =
241 (struct icp_qat_fw_comp_resp *)resp;
242 struct qat_comp_op_cookie *cookie =
243 (struct qat_comp_op_cookie *)op_cookie;
244 struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
245 (resp_msg->opaque_data);
246 struct qat_comp_stream *stream;
247 struct qat_comp_xform *qat_xform;
248 int err = resp_msg->comn_resp.comn_status &
249 ((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) |
250 (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS));
252 if (rx_op->op_type == RTE_COMP_OP_STATEFUL) {
253 stream = rx_op->stream;
254 qat_xform = &stream->qat_xform;
255 /* clear op-in-progress flag */
256 stream->op_in_progress = 0;
259 qat_xform = rx_op->private_xform;
262 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
263 QAT_DP_LOG(DEBUG, "Direction: %s",
264 qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
265 "decompression" : "compression");
266 QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
267 sizeof(struct icp_qat_fw_comp_resp));
270 if (unlikely(cookie->error)) {
271 rx_op->status = cookie->error;
273 ++(*dequeue_err_count);
274 rx_op->debug_status = 0;
281 if (likely(qat_xform->qat_comp_request_type
282 != QAT_COMP_REQUEST_DECOMPRESS)) {
283 if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
284 resp_msg->comn_resp.hdr_flags)
285 == ICP_QAT_FW_COMP_NO_CNV)) {
286 rx_op->status = RTE_COMP_OP_STATUS_ERROR;
287 rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW;
289 QAT_DP_LOG(ERR, "QAT has wrong firmware");
290 ++(*dequeue_err_count);
296 if (unlikely((err & (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS))
297 && (qat_xform->qat_comp_request_type
298 == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS))) {
299 QAT_DP_LOG(ERR, "QAT intermediate buffer may be too "
300 "small for output, try configuring a larger size");
303 int8_t cmp_err_code =
304 (int8_t)resp_msg->comn_resp.comn_error.cmp_err_code;
305 int8_t xlat_err_code =
306 (int8_t)resp_msg->comn_resp.comn_error.xlat_err_code;
308 /* handle recoverable out-of-buffer condition in stateful */
309 /* decompression scenario */
310 if (cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code
311 && qat_xform->qat_comp_request_type
312 == QAT_COMP_REQUEST_DECOMPRESS
313 && rx_op->op_type == RTE_COMP_OP_STATEFUL) {
314 struct icp_qat_fw_resp_comp_pars *comp_resp =
315 &resp_msg->comp_resp_pars;
317 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
318 rx_op->consumed = comp_resp->input_byte_counter;
319 rx_op->produced = comp_resp->output_byte_counter;
320 stream->start_of_packet = 0;
321 } else if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR
324 (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR)
326 (cmp_err_code == ERR_CODE_OVERFLOW_ERROR &&
327 xlat_err_code == ERR_CODE_OVERFLOW_ERROR)){
329 struct icp_qat_fw_resp_comp_pars *comp_resp =
330 (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
332 /* handle recoverable out-of-buffer condition */
333 /* in stateless compression scenario */
334 if (comp_resp->input_byte_counter) {
335 if ((qat_xform->qat_comp_request_type
336 == QAT_COMP_REQUEST_FIXED_COMP_STATELESS) ||
337 (qat_xform->qat_comp_request_type
338 == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
341 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
343 comp_resp->input_byte_counter;
345 comp_resp->output_byte_counter;
348 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
351 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
353 rx_op->status = RTE_COMP_OP_STATUS_ERROR;
355 ++(*dequeue_err_count);
356 rx_op->debug_status =
357 *((uint16_t *)(&resp_msg->comn_resp.comn_error));
359 struct icp_qat_fw_resp_comp_pars *comp_resp =
360 (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
362 rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
363 rx_op->consumed = comp_resp->input_byte_counter;
364 rx_op->produced = comp_resp->output_byte_counter;
366 stream->start_of_packet = 0;
368 if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
369 if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
370 rx_op->output_chksum = comp_resp->curr_crc32;
371 else if (qat_xform->checksum_type ==
372 RTE_COMP_CHECKSUM_ADLER32)
373 rx_op->output_chksum = comp_resp->curr_adler_32;
375 rx_op->output_chksum = comp_resp->curr_chksum;
384 qat_comp_xform_size(void)
386 return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
390 qat_comp_stream_size(void)
392 return RTE_ALIGN_CEIL(sizeof(struct qat_comp_stream), 8);
395 static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
396 enum qat_comp_request_type request)
398 if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS)
399 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
400 else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)
401 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
402 else if (request == QAT_COMP_REQUEST_DECOMPRESS)
403 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
405 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
407 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
409 header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(
410 QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT);
413 static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
414 const struct rte_memzone *interm_buff_mz,
415 const struct rte_comp_xform *xform,
416 const struct qat_comp_stream *stream,
417 enum rte_comp_op_type op_type)
419 struct icp_qat_fw_comp_req *comp_req;
420 int comp_level, algo;
421 uint32_t req_par_flags;
422 int direction = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS;
424 if (unlikely(qat_xform == NULL)) {
425 QAT_LOG(ERR, "Session was not created for this device");
429 if (op_type == RTE_COMP_OP_STATEFUL) {
430 if (unlikely(stream == NULL)) {
431 QAT_LOG(ERR, "Stream must be non null for stateful op");
434 if (unlikely(qat_xform->qat_comp_request_type !=
435 QAT_COMP_REQUEST_DECOMPRESS)) {
436 QAT_LOG(ERR, "QAT PMD does not support stateful compression");
441 if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
442 direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
443 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
444 req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
445 ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
446 ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_NO_CNV,
447 ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
450 if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
451 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
452 else if (xform->compress.level == 1)
453 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
454 else if (xform->compress.level == 2)
455 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_4;
456 else if (xform->compress.level == 3)
457 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
458 else if (xform->compress.level >= 4 &&
459 xform->compress.level <= 9)
460 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_16;
462 QAT_LOG(ERR, "compression level not supported");
465 req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
466 ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
467 ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV,
468 ICP_QAT_FW_COMP_CNV_RECOVERY);
471 switch (xform->compress.algo) {
472 case RTE_COMP_ALGO_DEFLATE:
473 algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE;
475 case RTE_COMP_ALGO_LZS:
478 QAT_LOG(ERR, "compression algorithm not supported");
482 comp_req = &qat_xform->qat_comp_req_tmpl;
484 /* Initialize header */
485 qat_comp_create_req_hdr(&comp_req->comn_hdr,
486 qat_xform->qat_comp_request_type);
488 if (op_type == RTE_COMP_OP_STATEFUL) {
489 comp_req->comn_hdr.serv_specif_flags =
490 ICP_QAT_FW_COMP_FLAGS_BUILD(
491 ICP_QAT_FW_COMP_STATEFUL_SESSION,
492 ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
493 ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
494 ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
495 ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
497 /* Decompression state registers */
498 comp_req->comp_cd_ctrl.comp_state_addr =
499 stream->state_registers_decomp_phys;
501 /* Enable A, B, C, D, and E (CAMs). */
502 comp_req->comp_cd_ctrl.ram_bank_flags =
503 ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(
504 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank I */
505 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank H */
506 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank G */
507 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank F */
508 ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank E */
509 ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank D */
510 ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank C */
511 ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank B */
512 ICP_QAT_FW_COMP_BANK_ENABLED); /* Bank A */
514 comp_req->comp_cd_ctrl.ram_banks_addr =
515 stream->inflate_context_phys;
517 comp_req->comn_hdr.serv_specif_flags =
518 ICP_QAT_FW_COMP_FLAGS_BUILD(
519 ICP_QAT_FW_COMP_STATELESS_SESSION,
520 ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
521 ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
522 ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
523 ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
526 comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
527 ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
529 /* In CPM 1.6 only valid mode ! */
530 ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED, algo,
531 /* Translate level to depth */
532 comp_level, ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
534 comp_req->comp_pars.initial_adler = 1;
535 comp_req->comp_pars.initial_crc32 = 0;
536 comp_req->comp_pars.req_par_flags = req_par_flags;
539 if (qat_xform->qat_comp_request_type ==
540 QAT_COMP_REQUEST_FIXED_COMP_STATELESS ||
541 qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
542 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
543 ICP_QAT_FW_SLICE_DRAM_WR);
544 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
545 ICP_QAT_FW_SLICE_COMP);
546 } else if (qat_xform->qat_comp_request_type ==
547 QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
549 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
550 ICP_QAT_FW_SLICE_XLAT);
551 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
552 ICP_QAT_FW_SLICE_COMP);
554 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
555 ICP_QAT_FW_SLICE_DRAM_WR);
556 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
557 ICP_QAT_FW_SLICE_XLAT);
559 comp_req->u1.xlt_pars.inter_buff_ptr =
560 interm_buff_mz->phys_addr;
563 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
564 QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req,
565 sizeof(struct icp_qat_fw_comp_req));
571 * Create driver private_xform data.
576 * xform data from application
577 * @param private_xform
578 * ptr where handle of pmd's private_xform data should be stored
580 * - if successful returns 0
581 * and valid private_xform handle
582 * - <0 in error cases
583 * - Returns -EINVAL if input parameters are invalid.
584 * - Returns -ENOTSUP if comp device does not support the comp transform.
585 * - Returns -ENOMEM if the private_xform could not be allocated.
588 qat_comp_private_xform_create(struct rte_compressdev *dev,
589 const struct rte_comp_xform *xform,
590 void **private_xform)
592 struct qat_comp_dev_private *qat = dev->data->dev_private;
594 if (unlikely(private_xform == NULL)) {
595 QAT_LOG(ERR, "QAT: private_xform parameter is NULL");
598 if (unlikely(qat->xformpool == NULL)) {
599 QAT_LOG(ERR, "QAT device has no private_xform mempool");
602 if (rte_mempool_get(qat->xformpool, private_xform)) {
603 QAT_LOG(ERR, "Couldn't get object from qat xform mempool");
607 struct qat_comp_xform *qat_xform =
608 (struct qat_comp_xform *)*private_xform;
610 if (xform->type == RTE_COMP_COMPRESS) {
612 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
613 ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
614 && qat->interm_buff_mz == NULL))
615 qat_xform->qat_comp_request_type =
616 QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
618 else if ((xform->compress.deflate.huffman ==
619 RTE_COMP_HUFFMAN_DYNAMIC ||
620 xform->compress.deflate.huffman ==
621 RTE_COMP_HUFFMAN_DEFAULT) &&
622 qat->interm_buff_mz != NULL)
624 qat_xform->qat_comp_request_type =
625 QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS;
629 "IM buffers needed for dynamic deflate. Set size in config file");
633 qat_xform->checksum_type = xform->compress.chksum;
636 qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
637 qat_xform->checksum_type = xform->decompress.chksum;
640 if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform,
641 NULL, RTE_COMP_OP_STATELESS)) {
642 QAT_LOG(ERR, "QAT: Problem with setting compression");
649 * Free driver private_xform data.
653 * @param private_xform
654 * handle of pmd's private_xform data
657 * - <0 in error cases
658 * - Returns -EINVAL if input parameters are invalid.
661 qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
664 struct qat_comp_xform *qat_xform =
665 (struct qat_comp_xform *)private_xform;
668 memset(qat_xform, 0, qat_comp_xform_size());
669 struct rte_mempool *mp = rte_mempool_from_obj(qat_xform);
671 rte_mempool_put(mp, qat_xform);
678 * Reset stream state for the next use.
681 * handle of pmd's private stream data
684 qat_comp_stream_reset(struct qat_comp_stream *stream)
687 memset(&stream->qat_xform, 0, sizeof(struct qat_comp_xform));
688 stream->start_of_packet = 1;
689 stream->op_in_progress = 0;
694 * Create driver private stream data.
701 * ptr where handle of pmd's private stream data should be stored
703 * - Returns 0 if private stream structure has been created successfully.
704 * - Returns -EINVAL if input parameters are invalid.
705 * - Returns -ENOTSUP if comp device does not support STATEFUL operations.
706 * - Returns -ENOTSUP if comp device does not support the comp transform.
707 * - Returns -ENOMEM if the private stream could not be allocated.
710 qat_comp_stream_create(struct rte_compressdev *dev,
711 const struct rte_comp_xform *xform,
714 struct qat_comp_dev_private *qat = dev->data->dev_private;
715 struct qat_comp_stream *ptr;
717 if (unlikely(stream == NULL)) {
718 QAT_LOG(ERR, "QAT: stream parameter is NULL");
721 if (unlikely(xform->type == RTE_COMP_COMPRESS)) {
722 QAT_LOG(ERR, "QAT: stateful compression not supported");
725 if (unlikely(qat->streampool == NULL)) {
726 QAT_LOG(ERR, "QAT device has no stream mempool");
729 if (rte_mempool_get(qat->streampool, stream)) {
730 QAT_LOG(ERR, "Couldn't get object from qat stream mempool");
734 ptr = (struct qat_comp_stream *) *stream;
735 qat_comp_stream_reset(ptr);
736 ptr->qat_xform.qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
737 ptr->qat_xform.checksum_type = xform->decompress.chksum;
739 if (qat_comp_create_templates(&ptr->qat_xform, qat->interm_buff_mz,
740 xform, ptr, RTE_COMP_OP_STATEFUL)) {
741 QAT_LOG(ERR, "QAT: problem with creating descriptor template for stream");
742 rte_mempool_put(qat->streampool, *stream);
751 * Free driver private stream data.
756 * handle of pmd's private stream data
759 * - <0 in error cases
760 * - Returns -EINVAL if input parameters are invalid.
761 * - Returns -ENOTSUP if comp device does not support STATEFUL operations.
762 * - Returns -EBUSY if can't free stream as there are inflight operations
765 qat_comp_stream_free(struct rte_compressdev *dev, void *stream)
768 struct qat_comp_dev_private *qat = dev->data->dev_private;
769 qat_comp_stream_reset((struct qat_comp_stream *) stream);
770 rte_mempool_put(qat->streampool, stream);