8717b7432f15eb074a9e91004afaa75eaefe7335
[dpdk.git] / drivers / compress / qat / qat_comp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Intel Corporation
3  */
4
5 #include <rte_mempool.h>
6 #include <rte_mbuf.h>
7 #include <rte_hexdump.h>
8 #include <rte_comp.h>
9 #include <rte_bus_pci.h>
10 #include <rte_byteorder.h>
11 #include <rte_memcpy.h>
12 #include <rte_common.h>
13 #include <rte_spinlock.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16
17 #include "qat_logs.h"
18 #include "qat_comp.h"
19 #include "qat_comp_pmd.h"
20
21
22 int
23 qat_comp_build_request(void *in_op, uint8_t *out_msg,
24                        void *op_cookie,
25                        enum qat_device_gen qat_dev_gen __rte_unused)
26 {
27         struct rte_comp_op *op = in_op;
28         struct qat_comp_op_cookie *cookie =
29                         (struct qat_comp_op_cookie *)op_cookie;
30         struct qat_comp_stream *stream;
31         struct qat_comp_xform *qat_xform;
32         const uint8_t *tmpl;
33         struct icp_qat_fw_comp_req *comp_req =
34             (struct icp_qat_fw_comp_req *)out_msg;
35
36         if (op->op_type == RTE_COMP_OP_STATEFUL) {
37                 stream = op->stream;
38                 qat_xform = &stream->qat_xform;
39                 if (unlikely(qat_xform->qat_comp_request_type !=
40                              QAT_COMP_REQUEST_DECOMPRESS)) {
41                         QAT_DP_LOG(ERR, "QAT PMD does not support stateful compression");
42                         op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
43                         return -EINVAL;
44                 }
45                 if (unlikely(stream->op_in_progress)) {
46                         QAT_DP_LOG(ERR, "QAT PMD does not support running multiple stateful operations on the same stream at once");
47                         op->status = RTE_COMP_OP_STATUS_INVALID_STATE;
48                         return -EINVAL;
49                 }
50                 stream->op_in_progress = 1;
51         } else {
52                 stream = NULL;
53                 qat_xform = op->private_xform;
54         }
55         tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
56
57         rte_mov128(out_msg, tmpl);
58         comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
59
60         if (op->op_type == RTE_COMP_OP_STATEFUL) {
61                 comp_req->comp_pars.req_par_flags =
62                         ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
63                                 (stream->start_of_packet) ?
64                                         ICP_QAT_FW_COMP_SOP
65                                       : ICP_QAT_FW_COMP_NOT_SOP,
66                                 (op->flush_flag == RTE_COMP_FLUSH_FULL ||
67                                  op->flush_flag == RTE_COMP_FLUSH_FINAL) ?
68                                         ICP_QAT_FW_COMP_EOP
69                                       : ICP_QAT_FW_COMP_NOT_EOP,
70                                 ICP_QAT_FW_COMP_NOT_BFINAL,
71                                 ICP_QAT_FW_COMP_NO_CNV,
72                                 ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
73         }
74
75         if (likely(qat_xform->qat_comp_request_type ==
76                     QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
77                 if (unlikely(op->src.length > QAT_FALLBACK_THLD)) {
78
79                         /* fallback to fixed compression */
80                         comp_req->comn_hdr.service_cmd_id =
81                                         ICP_QAT_FW_COMP_CMD_STATIC;
82
83                         ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
84                                         ICP_QAT_FW_SLICE_DRAM_WR);
85
86                         ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
87                                         ICP_QAT_FW_SLICE_NULL);
88                         ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
89                                         ICP_QAT_FW_SLICE_NULL);
90
91                         QAT_DP_LOG(DEBUG, "QAT PMD: fallback to fixed "
92                                    "compression! IM buffer size can be too low "
93                                    "for produced data.\n Please use input "
94                                    "buffer length lower than %d bytes",
95                                    QAT_FALLBACK_THLD);
96                 }
97         }
98
99         /* common for sgl and flat buffers */
100         comp_req->comp_pars.comp_len = op->src.length;
101         comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
102                         op->dst.offset;
103
104         if (op->m_src->next != NULL || op->m_dst->next != NULL) {
105                 /* sgl */
106                 int ret = 0;
107
108                 ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
109                                 QAT_COMN_PTR_TYPE_SGL);
110
111                 if (unlikely(op->m_src->nb_segs > cookie->src_nb_elems)) {
112                         /* we need to allocate more elements in SGL*/
113                         void *tmp;
114
115                         tmp = rte_realloc_socket(cookie->qat_sgl_src_d,
116                                           sizeof(struct qat_sgl) +
117                                           sizeof(struct qat_flat_buf) *
118                                           op->m_src->nb_segs, 64,
119                                           cookie->socket_id);
120
121                         if (unlikely(tmp == NULL)) {
122                                 QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
123                                            " for %d elements of SGL",
124                                            op->m_src->nb_segs);
125                                 op->status = RTE_COMP_OP_STATUS_ERROR;
126                                 /* clear op-in-progress flag */
127                                 if (stream)
128                                         stream->op_in_progress = 0;
129                                 return -ENOMEM;
130                         }
131                         /* new SGL is valid now */
132                         cookie->qat_sgl_src_d = (struct qat_sgl *)tmp;
133                         cookie->src_nb_elems = op->m_src->nb_segs;
134                         cookie->qat_sgl_src_phys_addr =
135                                 rte_malloc_virt2iova(cookie->qat_sgl_src_d);
136                 }
137
138                 ret = qat_sgl_fill_array(op->m_src,
139                                 op->src.offset,
140                                 cookie->qat_sgl_src_d,
141                                 op->src.length,
142                                 cookie->src_nb_elems);
143                 if (ret) {
144                         QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
145                         op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
146                         /* clear op-in-progress flag */
147                         if (stream)
148                                 stream->op_in_progress = 0;
149                         return ret;
150                 }
151
152                 if (unlikely(op->m_dst->nb_segs > cookie->dst_nb_elems)) {
153                         /* we need to allocate more elements in SGL*/
154                         struct qat_sgl *tmp;
155
156                         tmp = rte_realloc_socket(cookie->qat_sgl_dst_d,
157                                           sizeof(struct qat_sgl) +
158                                           sizeof(struct qat_flat_buf) *
159                                           op->m_dst->nb_segs, 64,
160                                           cookie->socket_id);
161
162                         if (unlikely(tmp == NULL)) {
163                                 QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
164                                            " for %d elements of SGL",
165                                            op->m_dst->nb_segs);
166                                 op->status = RTE_COMP_OP_STATUS_ERROR;
167                                 /* clear op-in-progress flag */
168                                 if (stream)
169                                         stream->op_in_progress = 0;
170                                 return -ENOMEM;
171                         }
172                         /* new SGL is valid now */
173                         cookie->qat_sgl_dst_d = (struct qat_sgl *)tmp;
174                         cookie->dst_nb_elems = op->m_dst->nb_segs;
175                         cookie->qat_sgl_dst_phys_addr =
176                                 rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
177                 }
178
179                 ret = qat_sgl_fill_array(op->m_dst,
180                                 op->dst.offset,
181                                 cookie->qat_sgl_dst_d,
182                                 comp_req->comp_pars.out_buffer_sz,
183                                 cookie->dst_nb_elems);
184                 if (ret) {
185                         QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
186                         op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
187                         /* clear op-in-progress flag */
188                         if (stream)
189                                 stream->op_in_progress = 0;
190                         return ret;
191                 }
192
193                 comp_req->comn_mid.src_data_addr =
194                                 cookie->qat_sgl_src_phys_addr;
195                 comp_req->comn_mid.dest_data_addr =
196                                 cookie->qat_sgl_dst_phys_addr;
197                 comp_req->comn_mid.src_length = 0;
198                 comp_req->comn_mid.dst_length = 0;
199
200         } else {
201                 /* flat aka linear buffer */
202                 ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
203                                 QAT_COMN_PTR_TYPE_FLAT);
204                 comp_req->comn_mid.src_length = op->src.length;
205                 comp_req->comn_mid.dst_length =
206                                 comp_req->comp_pars.out_buffer_sz;
207
208                 comp_req->comn_mid.src_data_addr =
209                     rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset);
210                 comp_req->comn_mid.dest_data_addr =
211                     rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
212         }
213
214         if (unlikely(rte_pktmbuf_pkt_len(op->m_dst) < QAT_MIN_OUT_BUF_SIZE)) {
215                 /* QAT doesn't support dest. buffer lower
216                  * than QAT_MIN_OUT_BUF_SIZE. Propagate error mark
217                  * by converting this request to the null one
218                  * and check the status in the response.
219                  */
220                 QAT_DP_LOG(WARNING, "QAT destination buffer too small - resend with larger buffer");
221                 comp_req->comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
222                 comp_req->comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
223                 cookie->error = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
224         }
225
226 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
227         QAT_DP_LOG(DEBUG, "Direction: %s",
228             qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
229                             "decompression" : "compression");
230         QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req,
231                     sizeof(struct icp_qat_fw_comp_req));
232 #endif
233         return 0;
234 }
235
236 int
237 qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
238                           uint64_t *dequeue_err_count)
239 {
240         struct icp_qat_fw_comp_resp *resp_msg =
241                         (struct icp_qat_fw_comp_resp *)resp;
242         struct qat_comp_op_cookie *cookie =
243                         (struct qat_comp_op_cookie *)op_cookie;
244         struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
245                         (resp_msg->opaque_data);
246         struct qat_comp_stream *stream;
247         struct qat_comp_xform *qat_xform;
248         int err = resp_msg->comn_resp.comn_status &
249                         ((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) |
250                          (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS));
251
252         if (rx_op->op_type == RTE_COMP_OP_STATEFUL) {
253                 stream = rx_op->stream;
254                 qat_xform = &stream->qat_xform;
255                 /* clear op-in-progress flag */
256                 stream->op_in_progress = 0;
257         } else {
258                 stream = NULL;
259                 qat_xform = rx_op->private_xform;
260         }
261
262 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
263         QAT_DP_LOG(DEBUG, "Direction: %s",
264             qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
265             "decompression" : "compression");
266         QAT_DP_HEXDUMP_LOG(DEBUG,  "qat_response:", (uint8_t *)resp_msg,
267                         sizeof(struct icp_qat_fw_comp_resp));
268 #endif
269
270         if (unlikely(cookie->error)) {
271                 rx_op->status = cookie->error;
272                 cookie->error = 0;
273                 ++(*dequeue_err_count);
274                 rx_op->debug_status = 0;
275                 rx_op->consumed = 0;
276                 rx_op->produced = 0;
277                 *op = (void *)rx_op;
278                 return 0;
279         }
280
281         if (likely(qat_xform->qat_comp_request_type
282                         != QAT_COMP_REQUEST_DECOMPRESS)) {
283                 if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
284                                 resp_msg->comn_resp.hdr_flags)
285                                         == ICP_QAT_FW_COMP_NO_CNV)) {
286                         rx_op->status = RTE_COMP_OP_STATUS_ERROR;
287                         rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW;
288                         *op = (void *)rx_op;
289                         QAT_DP_LOG(ERR, "QAT has wrong firmware");
290                         ++(*dequeue_err_count);
291                         return 0;
292                 }
293         }
294
295         if (err) {
296                 if (unlikely((err & (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS))
297                              && (qat_xform->qat_comp_request_type
298                                  == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS))) {
299                         QAT_DP_LOG(ERR, "QAT intermediate buffer may be too "
300                             "small for output, try configuring a larger size");
301                 }
302
303                 int8_t cmp_err_code =
304                         (int8_t)resp_msg->comn_resp.comn_error.cmp_err_code;
305                 int8_t xlat_err_code =
306                         (int8_t)resp_msg->comn_resp.comn_error.xlat_err_code;
307
308                 /* handle recoverable out-of-buffer condition in stateful */
309                 /* decompression scenario */
310                 if (cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code
311                                 && qat_xform->qat_comp_request_type
312                                         == QAT_COMP_REQUEST_DECOMPRESS
313                                 && rx_op->op_type == RTE_COMP_OP_STATEFUL) {
314                         struct icp_qat_fw_resp_comp_pars *comp_resp =
315                                         &resp_msg->comp_resp_pars;
316                         rx_op->status =
317                                 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
318                         rx_op->consumed = comp_resp->input_byte_counter;
319                         rx_op->produced = comp_resp->output_byte_counter;
320                         stream->start_of_packet = 0;
321                 } else if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR
322                           && !xlat_err_code)
323                                 ||
324                     (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR)
325                                 ||
326                     (cmp_err_code == ERR_CODE_OVERFLOW_ERROR &&
327                      xlat_err_code == ERR_CODE_OVERFLOW_ERROR))
328                         rx_op->status =
329                                 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
330                 else
331                         rx_op->status = RTE_COMP_OP_STATUS_ERROR;
332
333                 ++(*dequeue_err_count);
334                 rx_op->debug_status =
335                         *((uint16_t *)(&resp_msg->comn_resp.comn_error));
336         } else {
337                 struct icp_qat_fw_resp_comp_pars *comp_resp =
338                   (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
339
340                 rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
341                 rx_op->consumed = comp_resp->input_byte_counter;
342                 rx_op->produced = comp_resp->output_byte_counter;
343                 if (stream)
344                         stream->start_of_packet = 0;
345
346                 if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
347                         if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
348                                 rx_op->output_chksum = comp_resp->curr_crc32;
349                         else if (qat_xform->checksum_type ==
350                                         RTE_COMP_CHECKSUM_ADLER32)
351                                 rx_op->output_chksum = comp_resp->curr_adler_32;
352                         else
353                                 rx_op->output_chksum = comp_resp->curr_chksum;
354                 }
355         }
356         *op = (void *)rx_op;
357
358         return 0;
359 }
360
361 unsigned int
362 qat_comp_xform_size(void)
363 {
364         return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
365 }
366
367 unsigned int
368 qat_comp_stream_size(void)
369 {
370         return RTE_ALIGN_CEIL(sizeof(struct qat_comp_stream), 8);
371 }
372
373 static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
374                                     enum qat_comp_request_type request)
375 {
376         if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS)
377                 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
378         else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)
379                 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
380         else if (request == QAT_COMP_REQUEST_DECOMPRESS)
381                 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
382
383         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
384         header->hdr_flags =
385             ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
386
387         header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(
388             QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT);
389 }
390
391 static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
392                         const struct rte_memzone *interm_buff_mz,
393                         const struct rte_comp_xform *xform,
394                         const struct qat_comp_stream *stream,
395                         enum rte_comp_op_type op_type)
396 {
397         struct icp_qat_fw_comp_req *comp_req;
398         int comp_level, algo;
399         uint32_t req_par_flags;
400         int direction = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS;
401
402         if (unlikely(qat_xform == NULL)) {
403                 QAT_LOG(ERR, "Session was not created for this device");
404                 return -EINVAL;
405         }
406
407         if (op_type == RTE_COMP_OP_STATEFUL) {
408                 if (unlikely(stream == NULL)) {
409                         QAT_LOG(ERR, "Stream must be non null for stateful op");
410                         return -EINVAL;
411                 }
412                 if (unlikely(qat_xform->qat_comp_request_type !=
413                              QAT_COMP_REQUEST_DECOMPRESS)) {
414                         QAT_LOG(ERR, "QAT PMD does not support stateful compression");
415                         return -ENOTSUP;
416                 }
417         }
418
419         if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
420                 direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
421                 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
422                 req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
423                                 ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
424                                 ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_NO_CNV,
425                                 ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
426
427         } else {
428                 if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
429                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
430                 else if (xform->compress.level == 1)
431                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
432                 else if (xform->compress.level == 2)
433                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_4;
434                 else if (xform->compress.level == 3)
435                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
436                 else if (xform->compress.level >= 4 &&
437                          xform->compress.level <= 9)
438                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_16;
439                 else {
440                         QAT_LOG(ERR, "compression level not supported");
441                         return -EINVAL;
442                 }
443                 req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
444                                 ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
445                                 ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV,
446                                 ICP_QAT_FW_COMP_CNV_RECOVERY);
447         }
448
449         switch (xform->compress.algo) {
450         case RTE_COMP_ALGO_DEFLATE:
451                 algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE;
452                 break;
453         case RTE_COMP_ALGO_LZS:
454         default:
455                 /* RTE_COMP_NULL */
456                 QAT_LOG(ERR, "compression algorithm not supported");
457                 return -EINVAL;
458         }
459
460         comp_req = &qat_xform->qat_comp_req_tmpl;
461
462         /* Initialize header */
463         qat_comp_create_req_hdr(&comp_req->comn_hdr,
464                                         qat_xform->qat_comp_request_type);
465
466         if (op_type == RTE_COMP_OP_STATEFUL) {
467                 comp_req->comn_hdr.serv_specif_flags =
468                                 ICP_QAT_FW_COMP_FLAGS_BUILD(
469                         ICP_QAT_FW_COMP_STATEFUL_SESSION,
470                         ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
471                         ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
472                         ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
473                         ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
474
475                 /* Decompression state registers */
476                 comp_req->comp_cd_ctrl.comp_state_addr =
477                                 stream->state_registers_decomp_phys;
478
479                 /* Enable A, B, C, D, and E (CAMs). */
480                 comp_req->comp_cd_ctrl.ram_bank_flags =
481                         ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(
482                                 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank I */
483                                 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank H */
484                                 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank G */
485                                 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank F */
486                                 ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank E */
487                                 ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank D */
488                                 ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank C */
489                                 ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank B */
490                                 ICP_QAT_FW_COMP_BANK_ENABLED); /* Bank A */
491
492                 comp_req->comp_cd_ctrl.ram_banks_addr =
493                                 stream->inflate_context_phys;
494         } else {
495                 comp_req->comn_hdr.serv_specif_flags =
496                                 ICP_QAT_FW_COMP_FLAGS_BUILD(
497                         ICP_QAT_FW_COMP_STATELESS_SESSION,
498                         ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
499                         ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
500                         ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
501                         ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
502         }
503
504         comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
505             ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
506                 direction,
507                 /* In CPM 1.6 only valid mode ! */
508                 ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED, algo,
509                 /* Translate level to depth */
510                 comp_level, ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
511
512         comp_req->comp_pars.initial_adler = 1;
513         comp_req->comp_pars.initial_crc32 = 0;
514         comp_req->comp_pars.req_par_flags = req_par_flags;
515
516
517         if (qat_xform->qat_comp_request_type ==
518                         QAT_COMP_REQUEST_FIXED_COMP_STATELESS ||
519             qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
520                 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
521                                             ICP_QAT_FW_SLICE_DRAM_WR);
522                 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
523                                             ICP_QAT_FW_SLICE_COMP);
524         } else if (qat_xform->qat_comp_request_type ==
525                         QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
526
527                 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
528                                 ICP_QAT_FW_SLICE_XLAT);
529                 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
530                                 ICP_QAT_FW_SLICE_COMP);
531
532                 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
533                                 ICP_QAT_FW_SLICE_DRAM_WR);
534                 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
535                                 ICP_QAT_FW_SLICE_XLAT);
536
537                 comp_req->u1.xlt_pars.inter_buff_ptr =
538                                 interm_buff_mz->phys_addr;
539         }
540
541 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
542         QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req,
543                     sizeof(struct icp_qat_fw_comp_req));
544 #endif
545         return 0;
546 }
547
548 /**
549  * Create driver private_xform data.
550  *
551  * @param dev
552  *   Compressdev device
553  * @param xform
554  *   xform data from application
555  * @param private_xform
556  *   ptr where handle of pmd's private_xform data should be stored
557  * @return
558  *  - if successful returns 0
559  *    and valid private_xform handle
560  *  - <0 in error cases
561  *  - Returns -EINVAL if input parameters are invalid.
562  *  - Returns -ENOTSUP if comp device does not support the comp transform.
563  *  - Returns -ENOMEM if the private_xform could not be allocated.
564  */
565 int
566 qat_comp_private_xform_create(struct rte_compressdev *dev,
567                               const struct rte_comp_xform *xform,
568                               void **private_xform)
569 {
570         struct qat_comp_dev_private *qat = dev->data->dev_private;
571
572         if (unlikely(private_xform == NULL)) {
573                 QAT_LOG(ERR, "QAT: private_xform parameter is NULL");
574                 return -EINVAL;
575         }
576         if (unlikely(qat->xformpool == NULL)) {
577                 QAT_LOG(ERR, "QAT device has no private_xform mempool");
578                 return -ENOMEM;
579         }
580         if (rte_mempool_get(qat->xformpool, private_xform)) {
581                 QAT_LOG(ERR, "Couldn't get object from qat xform mempool");
582                 return -ENOMEM;
583         }
584
585         struct qat_comp_xform *qat_xform =
586                         (struct qat_comp_xform *)*private_xform;
587
588         if (xform->type == RTE_COMP_COMPRESS) {
589
590                 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
591                   ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
592                                    && qat->interm_buff_mz == NULL))
593                         qat_xform->qat_comp_request_type =
594                                         QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
595
596                 else if ((xform->compress.deflate.huffman ==
597                                 RTE_COMP_HUFFMAN_DYNAMIC ||
598                                 xform->compress.deflate.huffman ==
599                                                 RTE_COMP_HUFFMAN_DEFAULT) &&
600                                 qat->interm_buff_mz != NULL)
601
602                         qat_xform->qat_comp_request_type =
603                                         QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS;
604
605                 else {
606                         QAT_LOG(ERR,
607                                         "IM buffers needed for dynamic deflate. Set size in config file");
608                         return -EINVAL;
609                 }
610
611                 qat_xform->checksum_type = xform->compress.chksum;
612
613         } else {
614                 qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
615                 qat_xform->checksum_type = xform->decompress.chksum;
616         }
617
618         if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform,
619                                       NULL, RTE_COMP_OP_STATELESS)) {
620                 QAT_LOG(ERR, "QAT: Problem with setting compression");
621                 return -EINVAL;
622         }
623         return 0;
624 }
625
626 /**
627  * Free driver private_xform data.
628  *
629  * @param dev
630  *   Compressdev device
631  * @param private_xform
632  *   handle of pmd's private_xform data
633  * @return
634  *  - 0 if successful
635  *  - <0 in error cases
636  *  - Returns -EINVAL if input parameters are invalid.
637  */
638 int
639 qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
640                             void *private_xform)
641 {
642         struct qat_comp_xform *qat_xform =
643                         (struct qat_comp_xform *)private_xform;
644
645         if (qat_xform) {
646                 memset(qat_xform, 0, qat_comp_xform_size());
647                 struct rte_mempool *mp = rte_mempool_from_obj(qat_xform);
648
649                 rte_mempool_put(mp, qat_xform);
650                 return 0;
651         }
652         return -EINVAL;
653 }
654
655 /**
656  * Reset stream state for the next use.
657  *
658  * @param stream
659  *   handle of pmd's private stream data
660  */
661 static void
662 qat_comp_stream_reset(struct qat_comp_stream *stream)
663 {
664         if (stream) {
665                 memset(&stream->qat_xform, 0, sizeof(struct qat_comp_xform));
666                 stream->start_of_packet = 1;
667                 stream->op_in_progress = 0;
668         }
669 }
670
671 /**
672  * Create driver private stream data.
673  *
674  * @param dev
675  *   Compressdev device
676  * @param xform
677  *   xform data
678  * @param stream
679  *   ptr where handle of pmd's private stream data should be stored
680  * @return
681  *  - Returns 0 if private stream structure has been created successfully.
682  *  - Returns -EINVAL if input parameters are invalid.
683  *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
684  *  - Returns -ENOTSUP if comp device does not support the comp transform.
685  *  - Returns -ENOMEM if the private stream could not be allocated.
686  */
687 int
688 qat_comp_stream_create(struct rte_compressdev *dev,
689                        const struct rte_comp_xform *xform,
690                        void **stream)
691 {
692         struct qat_comp_dev_private *qat = dev->data->dev_private;
693         struct qat_comp_stream *ptr;
694
695         if (unlikely(stream == NULL)) {
696                 QAT_LOG(ERR, "QAT: stream parameter is NULL");
697                 return -EINVAL;
698         }
699         if (unlikely(xform->type == RTE_COMP_COMPRESS)) {
700                 QAT_LOG(ERR, "QAT: stateful compression not supported");
701                 return -ENOTSUP;
702         }
703         if (unlikely(qat->streampool == NULL)) {
704                 QAT_LOG(ERR, "QAT device has no stream mempool");
705                 return -ENOMEM;
706         }
707         if (rte_mempool_get(qat->streampool, stream)) {
708                 QAT_LOG(ERR, "Couldn't get object from qat stream mempool");
709                 return -ENOMEM;
710         }
711
712         ptr = (struct qat_comp_stream *) *stream;
713         qat_comp_stream_reset(ptr);
714         ptr->qat_xform.qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
715         ptr->qat_xform.checksum_type = xform->decompress.chksum;
716
717         if (qat_comp_create_templates(&ptr->qat_xform, qat->interm_buff_mz,
718                                       xform, ptr, RTE_COMP_OP_STATEFUL)) {
719                 QAT_LOG(ERR, "QAT: problem with creating descriptor template for stream");
720                 rte_mempool_put(qat->streampool, *stream);
721                 *stream = NULL;
722                 return -EINVAL;
723         }
724
725         return 0;
726 }
727
728 /**
729  * Free driver private stream data.
730  *
731  * @param dev
732  *   Compressdev device
733  * @param stream
734  *   handle of pmd's private stream data
735  * @return
736  *  - 0 if successful
737  *  - <0 in error cases
738  *  - Returns -EINVAL if input parameters are invalid.
739  *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
740  *  - Returns -EBUSY if can't free stream as there are inflight operations
741  */
742 int
743 qat_comp_stream_free(struct rte_compressdev *dev, void *stream)
744 {
745         if (stream) {
746                 struct qat_comp_dev_private *qat = dev->data->dev_private;
747                 qat_comp_stream_reset((struct qat_comp_stream *) stream);
748                 rte_mempool_put(qat->streampool, stream);
749                 return 0;
750         }
751         return -EINVAL;
752 }