compress/qat: fix overflow status return
[dpdk.git] / drivers / compress / qat / qat_comp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Intel Corporation
3  */
4
5 #include <rte_mempool.h>
6 #include <rte_mbuf.h>
7 #include <rte_hexdump.h>
8 #include <rte_comp.h>
9 #include <rte_bus_pci.h>
10 #include <rte_byteorder.h>
11 #include <rte_memcpy.h>
12 #include <rte_common.h>
13 #include <rte_spinlock.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16
17 #include "qat_logs.h"
18 #include "qat_comp.h"
19 #include "qat_comp_pmd.h"
20
21
22 int
23 qat_comp_build_request(void *in_op, uint8_t *out_msg,
24                        void *op_cookie,
25                        enum qat_device_gen qat_dev_gen __rte_unused)
26 {
27         struct rte_comp_op *op = in_op;
28         struct qat_comp_op_cookie *cookie =
29                         (struct qat_comp_op_cookie *)op_cookie;
30         struct qat_comp_xform *qat_xform = op->private_xform;
31         const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
32         struct icp_qat_fw_comp_req *comp_req =
33             (struct icp_qat_fw_comp_req *)out_msg;
34
35         if (unlikely(op->op_type != RTE_COMP_OP_STATELESS)) {
36                 QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression "
37                                 "operation requests, op (%p) is not a "
38                                 "stateless operation.", op);
39                 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
40                 return -EINVAL;
41         }
42
43         rte_mov128(out_msg, tmpl);
44         comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
45
46         if (likely(qat_xform->qat_comp_request_type ==
47                     QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
48                 if (unlikely(op->src.length > QAT_FALLBACK_THLD)) {
49
50                         /* fallback to fixed compression */
51                         comp_req->comn_hdr.service_cmd_id =
52                                         ICP_QAT_FW_COMP_CMD_STATIC;
53
54                         ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
55                                         ICP_QAT_FW_SLICE_DRAM_WR);
56
57                         ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
58                                         ICP_QAT_FW_SLICE_NULL);
59                         ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
60                                         ICP_QAT_FW_SLICE_NULL);
61
62                         QAT_DP_LOG(DEBUG, "QAT PMD: fallback to fixed "
63                                    "compression! IM buffer size can be too low "
64                                    "for produced data.\n Please use input "
65                                    "buffer length lower than %d bytes",
66                                    QAT_FALLBACK_THLD);
67                 }
68         }
69
70         /* common for sgl and flat buffers */
71         comp_req->comp_pars.comp_len = op->src.length;
72         comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
73                         op->dst.offset;
74
75         if (op->m_src->next != NULL || op->m_dst->next != NULL) {
76                 /* sgl */
77                 int ret = 0;
78
79                 ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
80                                 QAT_COMN_PTR_TYPE_SGL);
81
82                 if (unlikely(op->m_src->nb_segs > cookie->src_nb_elems)) {
83                         /* we need to allocate more elements in SGL*/
84                         void *tmp;
85
86                         tmp = rte_realloc_socket(cookie->qat_sgl_src_d,
87                                           sizeof(struct qat_sgl) +
88                                           sizeof(struct qat_flat_buf) *
89                                           op->m_src->nb_segs, 64,
90                                           cookie->socket_id);
91
92                         if (unlikely(tmp == NULL)) {
93                                 QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
94                                            " for %d elements of SGL",
95                                            op->m_src->nb_segs);
96                                 op->status = RTE_COMP_OP_STATUS_ERROR;
97                                 return -ENOMEM;
98                         }
99                         /* new SGL is valid now */
100                         cookie->qat_sgl_src_d = (struct qat_sgl *)tmp;
101                         cookie->src_nb_elems = op->m_src->nb_segs;
102                         cookie->qat_sgl_src_phys_addr =
103                                 rte_malloc_virt2iova(cookie->qat_sgl_src_d);
104                 }
105
106                 ret = qat_sgl_fill_array(op->m_src,
107                                 op->src.offset,
108                                 cookie->qat_sgl_src_d,
109                                 op->src.length,
110                                 cookie->src_nb_elems);
111                 if (ret) {
112                         QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
113                         op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
114                         return ret;
115                 }
116
117                 if (unlikely(op->m_dst->nb_segs > cookie->dst_nb_elems)) {
118                         /* we need to allocate more elements in SGL*/
119                         struct qat_sgl *tmp;
120
121                         tmp = rte_realloc_socket(cookie->qat_sgl_dst_d,
122                                           sizeof(struct qat_sgl) +
123                                           sizeof(struct qat_flat_buf) *
124                                           op->m_dst->nb_segs, 64,
125                                           cookie->socket_id);
126
127                         if (unlikely(tmp == NULL)) {
128                                 QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
129                                            " for %d elements of SGL",
130                                            op->m_dst->nb_segs);
131                                 op->status = RTE_COMP_OP_STATUS_ERROR;
132                                 return -ENOMEM;
133                         }
134                         /* new SGL is valid now */
135                         cookie->qat_sgl_dst_d = (struct qat_sgl *)tmp;
136                         cookie->dst_nb_elems = op->m_dst->nb_segs;
137                         cookie->qat_sgl_dst_phys_addr =
138                                 rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
139                 }
140
141                 ret = qat_sgl_fill_array(op->m_dst,
142                                 op->dst.offset,
143                                 cookie->qat_sgl_dst_d,
144                                 comp_req->comp_pars.out_buffer_sz,
145                                 cookie->dst_nb_elems);
146                 if (ret) {
147                         QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
148                         op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
149                         return ret;
150                 }
151
152                 comp_req->comn_mid.src_data_addr =
153                                 cookie->qat_sgl_src_phys_addr;
154                 comp_req->comn_mid.dest_data_addr =
155                                 cookie->qat_sgl_dst_phys_addr;
156                 comp_req->comn_mid.src_length = 0;
157                 comp_req->comn_mid.dst_length = 0;
158
159         } else {
160                 /* flat aka linear buffer */
161                 ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
162                                 QAT_COMN_PTR_TYPE_FLAT);
163                 comp_req->comn_mid.src_length = op->src.length;
164                 comp_req->comn_mid.dst_length =
165                                 comp_req->comp_pars.out_buffer_sz;
166
167                 comp_req->comn_mid.src_data_addr =
168                     rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset);
169                 comp_req->comn_mid.dest_data_addr =
170                     rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
171         }
172
173         if (unlikely(rte_pktmbuf_pkt_len(op->m_dst) < QAT_MIN_OUT_BUF_SIZE)) {
174                 /* QAT doesn't support dest. buffer lower
175                  * than QAT_MIN_OUT_BUF_SIZE. Propagate error mark
176                  * by converting this request to the null one
177                  * and check the status in the response.
178                  */
179                 QAT_DP_LOG(WARNING, "QAT destination buffer too small - resend with larger buffer");
180                 comp_req->comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
181                 comp_req->comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
182                 cookie->error = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
183         }
184
185 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
186         QAT_DP_LOG(DEBUG, "Direction: %s",
187             qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
188                             "decompression" : "compression");
189         QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req,
190                     sizeof(struct icp_qat_fw_comp_req));
191 #endif
192         return 0;
193 }
194
195 int
196 qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
197                           uint64_t *dequeue_err_count)
198 {
199         struct icp_qat_fw_comp_resp *resp_msg =
200                         (struct icp_qat_fw_comp_resp *)resp;
201         struct qat_comp_op_cookie *cookie =
202                         (struct qat_comp_op_cookie *)op_cookie;
203         struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
204                         (resp_msg->opaque_data);
205         struct qat_comp_xform *qat_xform = (struct qat_comp_xform *)
206                                 (rx_op->private_xform);
207         int err = resp_msg->comn_resp.comn_status &
208                         ((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) |
209                          (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS));
210
211 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
212         QAT_DP_LOG(DEBUG, "Direction: %s",
213             qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
214             "decompression" : "compression");
215         QAT_DP_HEXDUMP_LOG(DEBUG,  "qat_response:", (uint8_t *)resp_msg,
216                         sizeof(struct icp_qat_fw_comp_resp));
217 #endif
218
219         if (unlikely(cookie->error)) {
220                 rx_op->status = cookie->error;
221                 cookie->error = 0;
222                 ++(*dequeue_err_count);
223                 rx_op->debug_status = 0;
224                 rx_op->consumed = 0;
225                 rx_op->produced = 0;
226                 *op = (void *)rx_op;
227                 return 0;
228         }
229
230         if (likely(qat_xform->qat_comp_request_type
231                         != QAT_COMP_REQUEST_DECOMPRESS)) {
232                 if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
233                                 resp_msg->comn_resp.hdr_flags)
234                                         == ICP_QAT_FW_COMP_NO_CNV)) {
235                         rx_op->status = RTE_COMP_OP_STATUS_ERROR;
236                         rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW;
237                         *op = (void *)rx_op;
238                         QAT_DP_LOG(ERR, "QAT has wrong firmware");
239                         ++(*dequeue_err_count);
240                         return 0;
241                 }
242         }
243
244         if (err) {
245                 if (unlikely((err & (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS))
246                              && (qat_xform->qat_comp_request_type
247                                  == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS))) {
248                         QAT_DP_LOG(ERR, "QAT intermediate buffer may be too "
249                             "small for output, try configuring a larger size");
250                 }
251
252                 int8_t cmp_err_code =
253                         (int8_t)resp_msg->comn_resp.comn_error.cmp_err_code;
254                 int8_t xlat_err_code =
255                         (int8_t)resp_msg->comn_resp.comn_error.xlat_err_code;
256
257                 if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code)
258                                 ||
259                     (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR)
260                                 ||
261                     (cmp_err_code == ERR_CODE_OVERFLOW_ERROR &&
262                      xlat_err_code == ERR_CODE_OVERFLOW_ERROR))
263                         rx_op->status =
264                                 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
265                 else
266                         rx_op->status = RTE_COMP_OP_STATUS_ERROR;
267
268                 ++(*dequeue_err_count);
269                 rx_op->debug_status =
270                         *((uint16_t *)(&resp_msg->comn_resp.comn_error));
271         } else {
272                 struct icp_qat_fw_resp_comp_pars *comp_resp =
273                   (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
274
275                 rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
276                 rx_op->consumed = comp_resp->input_byte_counter;
277                 rx_op->produced = comp_resp->output_byte_counter;
278
279                 if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
280                         if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
281                                 rx_op->output_chksum = comp_resp->curr_crc32;
282                         else if (qat_xform->checksum_type ==
283                                         RTE_COMP_CHECKSUM_ADLER32)
284                                 rx_op->output_chksum = comp_resp->curr_adler_32;
285                         else
286                                 rx_op->output_chksum = comp_resp->curr_chksum;
287                 }
288         }
289         *op = (void *)rx_op;
290
291         return 0;
292 }
293
294 unsigned int
295 qat_comp_xform_size(void)
296 {
297         return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
298 }
299
300 static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
301                                     enum qat_comp_request_type request)
302 {
303         if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS)
304                 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
305         else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)
306                 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
307         else if (request == QAT_COMP_REQUEST_DECOMPRESS)
308                 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
309
310         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
311         header->hdr_flags =
312             ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
313
314         header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(
315             QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT);
316 }
317
318 static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
319                         const struct rte_memzone *interm_buff_mz,
320                         const struct rte_comp_xform *xform)
321 {
322         struct icp_qat_fw_comp_req *comp_req;
323         int comp_level, algo;
324         uint32_t req_par_flags;
325         int direction = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS;
326
327         if (unlikely(qat_xform == NULL)) {
328                 QAT_LOG(ERR, "Session was not created for this device");
329                 return -EINVAL;
330         }
331
332         if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
333                 direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
334                 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
335                 req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
336                                 ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
337                                 ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_NO_CNV,
338                                 ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
339
340         } else {
341                 if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
342                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
343                 else if (xform->compress.level == 1)
344                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
345                 else if (xform->compress.level == 2)
346                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_4;
347                 else if (xform->compress.level == 3)
348                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
349                 else if (xform->compress.level >= 4 &&
350                          xform->compress.level <= 9)
351                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_16;
352                 else {
353                         QAT_LOG(ERR, "compression level not supported");
354                         return -EINVAL;
355                 }
356                 req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
357                                 ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
358                                 ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV,
359                                 ICP_QAT_FW_COMP_CNV_RECOVERY);
360         }
361
362         switch (xform->compress.algo) {
363         case RTE_COMP_ALGO_DEFLATE:
364                 algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE;
365                 break;
366         case RTE_COMP_ALGO_LZS:
367         default:
368                 /* RTE_COMP_NULL */
369                 QAT_LOG(ERR, "compression algorithm not supported");
370                 return -EINVAL;
371         }
372
373         comp_req = &qat_xform->qat_comp_req_tmpl;
374
375         /* Initialize header */
376         qat_comp_create_req_hdr(&comp_req->comn_hdr,
377                                         qat_xform->qat_comp_request_type);
378
379         comp_req->comn_hdr.serv_specif_flags = ICP_QAT_FW_COMP_FLAGS_BUILD(
380             ICP_QAT_FW_COMP_STATELESS_SESSION,
381             ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
382             ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
383             ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
384             ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
385
386         comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
387             ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
388                 direction,
389                 /* In CPM 1.6 only valid mode ! */
390                 ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED, algo,
391                 /* Translate level to depth */
392                 comp_level, ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
393
394         comp_req->comp_pars.initial_adler = 1;
395         comp_req->comp_pars.initial_crc32 = 0;
396         comp_req->comp_pars.req_par_flags = req_par_flags;
397
398
399         if (qat_xform->qat_comp_request_type ==
400                         QAT_COMP_REQUEST_FIXED_COMP_STATELESS ||
401             qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
402                 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
403                                             ICP_QAT_FW_SLICE_DRAM_WR);
404                 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
405                                             ICP_QAT_FW_SLICE_COMP);
406         } else if (qat_xform->qat_comp_request_type ==
407                         QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
408
409                 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
410                                 ICP_QAT_FW_SLICE_XLAT);
411                 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
412                                 ICP_QAT_FW_SLICE_COMP);
413
414                 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
415                                 ICP_QAT_FW_SLICE_DRAM_WR);
416                 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
417                                 ICP_QAT_FW_SLICE_XLAT);
418
419                 comp_req->u1.xlt_pars.inter_buff_ptr =
420                                 interm_buff_mz->phys_addr;
421         }
422
423 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
424         QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req,
425                     sizeof(struct icp_qat_fw_comp_req));
426 #endif
427         return 0;
428 }
429
430 /**
431  * Create driver private_xform data.
432  *
433  * @param dev
434  *   Compressdev device
435  * @param xform
436  *   xform data from application
437  * @param private_xform
438  *   ptr where handle of pmd's private_xform data should be stored
439  * @return
440  *  - if successful returns 0
441  *    and valid private_xform handle
442  *  - <0 in error cases
443  *  - Returns -EINVAL if input parameters are invalid.
444  *  - Returns -ENOTSUP if comp device does not support the comp transform.
445  *  - Returns -ENOMEM if the private_xform could not be allocated.
446  */
447 int
448 qat_comp_private_xform_create(struct rte_compressdev *dev,
449                               const struct rte_comp_xform *xform,
450                               void **private_xform)
451 {
452         struct qat_comp_dev_private *qat = dev->data->dev_private;
453
454         if (unlikely(private_xform == NULL)) {
455                 QAT_LOG(ERR, "QAT: private_xform parameter is NULL");
456                 return -EINVAL;
457         }
458         if (unlikely(qat->xformpool == NULL)) {
459                 QAT_LOG(ERR, "QAT device has no private_xform mempool");
460                 return -ENOMEM;
461         }
462         if (rte_mempool_get(qat->xformpool, private_xform)) {
463                 QAT_LOG(ERR, "Couldn't get object from qat xform mempool");
464                 return -ENOMEM;
465         }
466
467         struct qat_comp_xform *qat_xform =
468                         (struct qat_comp_xform *)*private_xform;
469
470         if (xform->type == RTE_COMP_COMPRESS) {
471
472                 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
473                   ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
474                                    && qat->interm_buff_mz == NULL))
475                         qat_xform->qat_comp_request_type =
476                                         QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
477
478                 else if ((xform->compress.deflate.huffman ==
479                                 RTE_COMP_HUFFMAN_DYNAMIC ||
480                                 xform->compress.deflate.huffman ==
481                                                 RTE_COMP_HUFFMAN_DEFAULT) &&
482                                 qat->interm_buff_mz != NULL)
483
484                         qat_xform->qat_comp_request_type =
485                                         QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS;
486
487                 else {
488                         QAT_LOG(ERR,
489                                         "IM buffers needed for dynamic deflate. Set size in config file");
490                         return -EINVAL;
491                 }
492
493                 qat_xform->checksum_type = xform->compress.chksum;
494
495         } else {
496                 qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
497                 qat_xform->checksum_type = xform->decompress.chksum;
498         }
499
500         if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform)) {
501                 QAT_LOG(ERR, "QAT: Problem with setting compression");
502                 return -EINVAL;
503         }
504         return 0;
505 }
506
507 /**
508  * Free driver private_xform data.
509  *
510  * @param dev
511  *   Compressdev device
512  * @param private_xform
513  *   handle of pmd's private_xform data
514  * @return
515  *  - 0 if successful
516  *  - <0 in error cases
517  *  - Returns -EINVAL if input parameters are invalid.
518  */
519 int
520 qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
521                             void *private_xform)
522 {
523         struct qat_comp_xform *qat_xform =
524                         (struct qat_comp_xform *)private_xform;
525
526         if (qat_xform) {
527                 memset(qat_xform, 0, qat_comp_xform_size());
528                 struct rte_mempool *mp = rte_mempool_from_obj(qat_xform);
529
530                 rte_mempool_put(mp, qat_xform);
531                 return 0;
532         }
533         return -EINVAL;
534 }