sleep in control plane thread
[dpdk.git] / drivers / compress / qat / qat_comp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Intel Corporation
3  */
4
5 #include <rte_mempool.h>
6 #include <rte_mbuf.h>
7 #include <rte_hexdump.h>
8 #include <rte_comp.h>
9 #include <rte_bus_pci.h>
10 #include <rte_byteorder.h>
11 #include <rte_memcpy.h>
12 #include <rte_common.h>
13 #include <rte_spinlock.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16
17 #include "qat_logs.h"
18 #include "qat_comp.h"
19 #include "qat_comp_pmd.h"
20
21
22 int
23 qat_comp_build_request(void *in_op, uint8_t *out_msg,
24                        void *op_cookie,
25                        enum qat_device_gen qat_dev_gen __rte_unused)
26 {
27         struct rte_comp_op *op = in_op;
28         struct qat_comp_op_cookie *cookie =
29                         (struct qat_comp_op_cookie *)op_cookie;
30         struct qat_comp_stream *stream;
31         struct qat_comp_xform *qat_xform;
32         const uint8_t *tmpl;
33         struct icp_qat_fw_comp_req *comp_req =
34             (struct icp_qat_fw_comp_req *)out_msg;
35
36         if (op->op_type == RTE_COMP_OP_STATEFUL) {
37                 stream = op->stream;
38                 qat_xform = &stream->qat_xform;
39                 if (unlikely(qat_xform->qat_comp_request_type !=
40                              QAT_COMP_REQUEST_DECOMPRESS)) {
41                         QAT_DP_LOG(ERR, "QAT PMD does not support stateful compression");
42                         op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
43                         return -EINVAL;
44                 }
45                 if (unlikely(stream->op_in_progress)) {
46                         QAT_DP_LOG(ERR, "QAT PMD does not support running multiple stateful operations on the same stream at once");
47                         op->status = RTE_COMP_OP_STATUS_INVALID_STATE;
48                         return -EINVAL;
49                 }
50                 stream->op_in_progress = 1;
51         } else {
52                 stream = NULL;
53                 qat_xform = op->private_xform;
54         }
55         tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
56
57         rte_mov128(out_msg, tmpl);
58         comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
59
60         if (op->op_type == RTE_COMP_OP_STATEFUL) {
61                 comp_req->comp_pars.req_par_flags =
62                         ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
63                                 (stream->start_of_packet) ?
64                                         ICP_QAT_FW_COMP_SOP
65                                       : ICP_QAT_FW_COMP_NOT_SOP,
66                                 (op->flush_flag == RTE_COMP_FLUSH_FULL ||
67                                  op->flush_flag == RTE_COMP_FLUSH_FINAL) ?
68                                         ICP_QAT_FW_COMP_EOP
69                                       : ICP_QAT_FW_COMP_NOT_EOP,
70                                 ICP_QAT_FW_COMP_NOT_BFINAL,
71                                 ICP_QAT_FW_COMP_NO_CNV,
72                                 ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
73         }
74
75         if (likely(qat_xform->qat_comp_request_type ==
76                     QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
77                 if (unlikely(op->src.length > QAT_FALLBACK_THLD)) {
78
79                         /* fallback to fixed compression */
80                         comp_req->comn_hdr.service_cmd_id =
81                                         ICP_QAT_FW_COMP_CMD_STATIC;
82
83                         ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
84                                         ICP_QAT_FW_SLICE_DRAM_WR);
85
86                         ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
87                                         ICP_QAT_FW_SLICE_NULL);
88                         ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
89                                         ICP_QAT_FW_SLICE_NULL);
90
91                         QAT_DP_LOG(DEBUG, "QAT PMD: fallback to fixed "
92                                    "compression! IM buffer size can be too low "
93                                    "for produced data.\n Please use input "
94                                    "buffer length lower than %d bytes",
95                                    QAT_FALLBACK_THLD);
96                 }
97         }
98
99         /* common for sgl and flat buffers */
100         comp_req->comp_pars.comp_len = op->src.length;
101         comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
102                         op->dst.offset;
103
104         if (op->m_src->next != NULL || op->m_dst->next != NULL) {
105                 /* sgl */
106                 int ret = 0;
107
108                 ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
109                                 QAT_COMN_PTR_TYPE_SGL);
110
111                 if (unlikely(op->m_src->nb_segs > cookie->src_nb_elems)) {
112                         /* we need to allocate more elements in SGL*/
113                         void *tmp;
114
115                         tmp = rte_realloc_socket(cookie->qat_sgl_src_d,
116                                           sizeof(struct qat_sgl) +
117                                           sizeof(struct qat_flat_buf) *
118                                           op->m_src->nb_segs, 64,
119                                           cookie->socket_id);
120
121                         if (unlikely(tmp == NULL)) {
122                                 QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
123                                            " for %d elements of SGL",
124                                            op->m_src->nb_segs);
125                                 op->status = RTE_COMP_OP_STATUS_ERROR;
126                                 /* clear op-in-progress flag */
127                                 if (stream)
128                                         stream->op_in_progress = 0;
129                                 return -ENOMEM;
130                         }
131                         /* new SGL is valid now */
132                         cookie->qat_sgl_src_d = (struct qat_sgl *)tmp;
133                         cookie->src_nb_elems = op->m_src->nb_segs;
134                         cookie->qat_sgl_src_phys_addr =
135                                 rte_malloc_virt2iova(cookie->qat_sgl_src_d);
136                 }
137
138                 ret = qat_sgl_fill_array(op->m_src,
139                                 op->src.offset,
140                                 cookie->qat_sgl_src_d,
141                                 op->src.length,
142                                 cookie->src_nb_elems);
143                 if (ret) {
144                         QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
145                         op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
146                         /* clear op-in-progress flag */
147                         if (stream)
148                                 stream->op_in_progress = 0;
149                         return ret;
150                 }
151
152                 if (unlikely(op->m_dst->nb_segs > cookie->dst_nb_elems)) {
153                         /* we need to allocate more elements in SGL*/
154                         struct qat_sgl *tmp;
155
156                         tmp = rte_realloc_socket(cookie->qat_sgl_dst_d,
157                                           sizeof(struct qat_sgl) +
158                                           sizeof(struct qat_flat_buf) *
159                                           op->m_dst->nb_segs, 64,
160                                           cookie->socket_id);
161
162                         if (unlikely(tmp == NULL)) {
163                                 QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
164                                            " for %d elements of SGL",
165                                            op->m_dst->nb_segs);
166                                 op->status = RTE_COMP_OP_STATUS_ERROR;
167                                 /* clear op-in-progress flag */
168                                 if (stream)
169                                         stream->op_in_progress = 0;
170                                 return -ENOMEM;
171                         }
172                         /* new SGL is valid now */
173                         cookie->qat_sgl_dst_d = (struct qat_sgl *)tmp;
174                         cookie->dst_nb_elems = op->m_dst->nb_segs;
175                         cookie->qat_sgl_dst_phys_addr =
176                                 rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
177                 }
178
179                 ret = qat_sgl_fill_array(op->m_dst,
180                                 op->dst.offset,
181                                 cookie->qat_sgl_dst_d,
182                                 comp_req->comp_pars.out_buffer_sz,
183                                 cookie->dst_nb_elems);
184                 if (ret) {
185                         QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
186                         op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
187                         /* clear op-in-progress flag */
188                         if (stream)
189                                 stream->op_in_progress = 0;
190                         return ret;
191                 }
192
193                 comp_req->comn_mid.src_data_addr =
194                                 cookie->qat_sgl_src_phys_addr;
195                 comp_req->comn_mid.dest_data_addr =
196                                 cookie->qat_sgl_dst_phys_addr;
197                 comp_req->comn_mid.src_length = 0;
198                 comp_req->comn_mid.dst_length = 0;
199
200         } else {
201                 /* flat aka linear buffer */
202                 ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
203                                 QAT_COMN_PTR_TYPE_FLAT);
204                 comp_req->comn_mid.src_length = op->src.length;
205                 comp_req->comn_mid.dst_length =
206                                 comp_req->comp_pars.out_buffer_sz;
207
208                 comp_req->comn_mid.src_data_addr =
209                     rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset);
210                 comp_req->comn_mid.dest_data_addr =
211                     rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
212         }
213
214         if (unlikely(rte_pktmbuf_pkt_len(op->m_dst) < QAT_MIN_OUT_BUF_SIZE)) {
215                 /* QAT doesn't support dest. buffer lower
216                  * than QAT_MIN_OUT_BUF_SIZE. Propagate error mark
217                  * by converting this request to the null one
218                  * and check the status in the response.
219                  */
220                 QAT_DP_LOG(WARNING, "QAT destination buffer too small - resend with larger buffer");
221                 comp_req->comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
222                 comp_req->comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
223                 cookie->error = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
224         }
225
226 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
227         QAT_DP_LOG(DEBUG, "Direction: %s",
228             qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
229                             "decompression" : "compression");
230         QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req,
231                     sizeof(struct icp_qat_fw_comp_req));
232 #endif
233         return 0;
234 }
235
236 int
237 qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
238                           uint64_t *dequeue_err_count)
239 {
240         struct icp_qat_fw_comp_resp *resp_msg =
241                         (struct icp_qat_fw_comp_resp *)resp;
242         struct qat_comp_op_cookie *cookie =
243                         (struct qat_comp_op_cookie *)op_cookie;
244         struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
245                         (resp_msg->opaque_data);
246         struct qat_comp_stream *stream;
247         struct qat_comp_xform *qat_xform;
248         int err = resp_msg->comn_resp.comn_status &
249                         ((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) |
250                          (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS));
251
252         if (rx_op->op_type == RTE_COMP_OP_STATEFUL) {
253                 stream = rx_op->stream;
254                 qat_xform = &stream->qat_xform;
255                 /* clear op-in-progress flag */
256                 stream->op_in_progress = 0;
257         } else {
258                 stream = NULL;
259                 qat_xform = rx_op->private_xform;
260         }
261
262 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
263         QAT_DP_LOG(DEBUG, "Direction: %s",
264             qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
265             "decompression" : "compression");
266         QAT_DP_HEXDUMP_LOG(DEBUG,  "qat_response:", (uint8_t *)resp_msg,
267                         sizeof(struct icp_qat_fw_comp_resp));
268 #endif
269
270         if (unlikely(cookie->error)) {
271                 rx_op->status = cookie->error;
272                 cookie->error = 0;
273                 ++(*dequeue_err_count);
274                 rx_op->debug_status = 0;
275                 rx_op->consumed = 0;
276                 rx_op->produced = 0;
277                 *op = (void *)rx_op;
278                 return 0;
279         }
280
281         if (likely(qat_xform->qat_comp_request_type
282                         != QAT_COMP_REQUEST_DECOMPRESS)) {
283                 if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
284                                 resp_msg->comn_resp.hdr_flags)
285                                         == ICP_QAT_FW_COMP_NO_CNV)) {
286                         rx_op->status = RTE_COMP_OP_STATUS_ERROR;
287                         rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW;
288                         *op = (void *)rx_op;
289                         QAT_DP_LOG(ERR, "QAT has wrong firmware");
290                         ++(*dequeue_err_count);
291                         return 0;
292                 }
293         }
294
295         if (err) {
296                 if (unlikely((err & (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS))
297                              && (qat_xform->qat_comp_request_type
298                                  == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS))) {
299                         QAT_DP_LOG(ERR, "QAT intermediate buffer may be too "
300                             "small for output, try configuring a larger size");
301                 }
302
303                 int8_t cmp_err_code =
304                         (int8_t)resp_msg->comn_resp.comn_error.cmp_err_code;
305                 int8_t xlat_err_code =
306                         (int8_t)resp_msg->comn_resp.comn_error.xlat_err_code;
307
308                 /* handle recoverable out-of-buffer condition in stateful */
309                 /* decompression scenario */
310                 if (cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code
311                                 && qat_xform->qat_comp_request_type
312                                         == QAT_COMP_REQUEST_DECOMPRESS
313                                 && rx_op->op_type == RTE_COMP_OP_STATEFUL) {
314                         struct icp_qat_fw_resp_comp_pars *comp_resp =
315                                         &resp_msg->comp_resp_pars;
316                         rx_op->status =
317                                 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
318                         rx_op->consumed = comp_resp->input_byte_counter;
319                         rx_op->produced = comp_resp->output_byte_counter;
320                         stream->start_of_packet = 0;
321                 } else if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR
322                           && !xlat_err_code)
323                                 ||
324                     (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR)
325                                 ||
326                     (cmp_err_code == ERR_CODE_OVERFLOW_ERROR &&
327                      xlat_err_code == ERR_CODE_OVERFLOW_ERROR)){
328
329                         struct icp_qat_fw_resp_comp_pars *comp_resp =
330           (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
331
332                         /* handle recoverable out-of-buffer condition */
333                         /* in stateless compression scenario */
334                         if (comp_resp->input_byte_counter) {
335                                 if ((qat_xform->qat_comp_request_type
336                                 == QAT_COMP_REQUEST_FIXED_COMP_STATELESS) ||
337                                     (qat_xform->qat_comp_request_type
338                                 == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
339
340                                         rx_op->status =
341                                 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
342                                         rx_op->consumed =
343                                                 comp_resp->input_byte_counter;
344                                         rx_op->produced =
345                                                 comp_resp->output_byte_counter;
346                                 } else
347                                         rx_op->status =
348                                 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
349                         } else
350                                 rx_op->status =
351                                 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
352                 } else
353                         rx_op->status = RTE_COMP_OP_STATUS_ERROR;
354
355                 ++(*dequeue_err_count);
356                 rx_op->debug_status =
357                         *((uint16_t *)(&resp_msg->comn_resp.comn_error));
358         } else {
359                 struct icp_qat_fw_resp_comp_pars *comp_resp =
360                   (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
361
362                 rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
363                 rx_op->consumed = comp_resp->input_byte_counter;
364                 rx_op->produced = comp_resp->output_byte_counter;
365                 if (stream)
366                         stream->start_of_packet = 0;
367
368                 if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
369                         if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
370                                 rx_op->output_chksum = comp_resp->curr_crc32;
371                         else if (qat_xform->checksum_type ==
372                                         RTE_COMP_CHECKSUM_ADLER32)
373                                 rx_op->output_chksum = comp_resp->curr_adler_32;
374                         else
375                                 rx_op->output_chksum = comp_resp->curr_chksum;
376                 }
377         }
378         *op = (void *)rx_op;
379
380         return 0;
381 }
382
383 unsigned int
384 qat_comp_xform_size(void)
385 {
386         return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
387 }
388
389 unsigned int
390 qat_comp_stream_size(void)
391 {
392         return RTE_ALIGN_CEIL(sizeof(struct qat_comp_stream), 8);
393 }
394
395 static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
396                                     enum qat_comp_request_type request)
397 {
398         if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS)
399                 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
400         else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)
401                 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
402         else if (request == QAT_COMP_REQUEST_DECOMPRESS)
403                 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
404
405         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
406         header->hdr_flags =
407             ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
408
409         header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(
410             QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT);
411 }
412
413 static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
414                         const struct rte_memzone *interm_buff_mz,
415                         const struct rte_comp_xform *xform,
416                         const struct qat_comp_stream *stream,
417                         enum rte_comp_op_type op_type)
418 {
419         struct icp_qat_fw_comp_req *comp_req;
420         int comp_level, algo;
421         uint32_t req_par_flags;
422         int direction = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS;
423
424         if (unlikely(qat_xform == NULL)) {
425                 QAT_LOG(ERR, "Session was not created for this device");
426                 return -EINVAL;
427         }
428
429         if (op_type == RTE_COMP_OP_STATEFUL) {
430                 if (unlikely(stream == NULL)) {
431                         QAT_LOG(ERR, "Stream must be non null for stateful op");
432                         return -EINVAL;
433                 }
434                 if (unlikely(qat_xform->qat_comp_request_type !=
435                              QAT_COMP_REQUEST_DECOMPRESS)) {
436                         QAT_LOG(ERR, "QAT PMD does not support stateful compression");
437                         return -ENOTSUP;
438                 }
439         }
440
441         if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
442                 direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
443                 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
444                 req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
445                                 ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
446                                 ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_NO_CNV,
447                                 ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
448
449         } else {
450                 if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
451                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
452                 else if (xform->compress.level == 1)
453                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
454                 else if (xform->compress.level == 2)
455                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_4;
456                 else if (xform->compress.level == 3)
457                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
458                 else if (xform->compress.level >= 4 &&
459                          xform->compress.level <= 9)
460                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_16;
461                 else {
462                         QAT_LOG(ERR, "compression level not supported");
463                         return -EINVAL;
464                 }
465                 req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
466                                 ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
467                                 ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV,
468                                 ICP_QAT_FW_COMP_CNV_RECOVERY);
469         }
470
471         switch (xform->compress.algo) {
472         case RTE_COMP_ALGO_DEFLATE:
473                 algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE;
474                 break;
475         case RTE_COMP_ALGO_LZS:
476         default:
477                 /* RTE_COMP_NULL */
478                 QAT_LOG(ERR, "compression algorithm not supported");
479                 return -EINVAL;
480         }
481
482         comp_req = &qat_xform->qat_comp_req_tmpl;
483
484         /* Initialize header */
485         qat_comp_create_req_hdr(&comp_req->comn_hdr,
486                                         qat_xform->qat_comp_request_type);
487
488         if (op_type == RTE_COMP_OP_STATEFUL) {
489                 comp_req->comn_hdr.serv_specif_flags =
490                                 ICP_QAT_FW_COMP_FLAGS_BUILD(
491                         ICP_QAT_FW_COMP_STATEFUL_SESSION,
492                         ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
493                         ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
494                         ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
495                         ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
496
497                 /* Decompression state registers */
498                 comp_req->comp_cd_ctrl.comp_state_addr =
499                                 stream->state_registers_decomp_phys;
500
501                 /* Enable A, B, C, D, and E (CAMs). */
502                 comp_req->comp_cd_ctrl.ram_bank_flags =
503                         ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(
504                                 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank I */
505                                 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank H */
506                                 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank G */
507                                 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank F */
508                                 ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank E */
509                                 ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank D */
510                                 ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank C */
511                                 ICP_QAT_FW_COMP_BANK_ENABLED,  /* Bank B */
512                                 ICP_QAT_FW_COMP_BANK_ENABLED); /* Bank A */
513
514                 comp_req->comp_cd_ctrl.ram_banks_addr =
515                                 stream->inflate_context_phys;
516         } else {
517                 comp_req->comn_hdr.serv_specif_flags =
518                                 ICP_QAT_FW_COMP_FLAGS_BUILD(
519                         ICP_QAT_FW_COMP_STATELESS_SESSION,
520                         ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
521                         ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
522                         ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
523                         ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
524         }
525
526         comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
527             ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
528                 direction,
529                 /* In CPM 1.6 only valid mode ! */
530                 ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED, algo,
531                 /* Translate level to depth */
532                 comp_level, ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
533
534         comp_req->comp_pars.initial_adler = 1;
535         comp_req->comp_pars.initial_crc32 = 0;
536         comp_req->comp_pars.req_par_flags = req_par_flags;
537
538
539         if (qat_xform->qat_comp_request_type ==
540                         QAT_COMP_REQUEST_FIXED_COMP_STATELESS ||
541             qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
542                 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
543                                             ICP_QAT_FW_SLICE_DRAM_WR);
544                 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
545                                             ICP_QAT_FW_SLICE_COMP);
546         } else if (qat_xform->qat_comp_request_type ==
547                         QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
548
549                 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
550                                 ICP_QAT_FW_SLICE_XLAT);
551                 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
552                                 ICP_QAT_FW_SLICE_COMP);
553
554                 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
555                                 ICP_QAT_FW_SLICE_DRAM_WR);
556                 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
557                                 ICP_QAT_FW_SLICE_XLAT);
558
559                 comp_req->u1.xlt_pars.inter_buff_ptr =
560                                 interm_buff_mz->phys_addr;
561         }
562
563 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
564         QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req,
565                     sizeof(struct icp_qat_fw_comp_req));
566 #endif
567         return 0;
568 }
569
570 /**
571  * Create driver private_xform data.
572  *
573  * @param dev
574  *   Compressdev device
575  * @param xform
576  *   xform data from application
577  * @param private_xform
578  *   ptr where handle of pmd's private_xform data should be stored
579  * @return
580  *  - if successful returns 0
581  *    and valid private_xform handle
582  *  - <0 in error cases
583  *  - Returns -EINVAL if input parameters are invalid.
584  *  - Returns -ENOTSUP if comp device does not support the comp transform.
585  *  - Returns -ENOMEM if the private_xform could not be allocated.
586  */
587 int
588 qat_comp_private_xform_create(struct rte_compressdev *dev,
589                               const struct rte_comp_xform *xform,
590                               void **private_xform)
591 {
592         struct qat_comp_dev_private *qat = dev->data->dev_private;
593
594         if (unlikely(private_xform == NULL)) {
595                 QAT_LOG(ERR, "QAT: private_xform parameter is NULL");
596                 return -EINVAL;
597         }
598         if (unlikely(qat->xformpool == NULL)) {
599                 QAT_LOG(ERR, "QAT device has no private_xform mempool");
600                 return -ENOMEM;
601         }
602         if (rte_mempool_get(qat->xformpool, private_xform)) {
603                 QAT_LOG(ERR, "Couldn't get object from qat xform mempool");
604                 return -ENOMEM;
605         }
606
607         struct qat_comp_xform *qat_xform =
608                         (struct qat_comp_xform *)*private_xform;
609
610         if (xform->type == RTE_COMP_COMPRESS) {
611
612                 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
613                   ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
614                                    && qat->interm_buff_mz == NULL))
615                         qat_xform->qat_comp_request_type =
616                                         QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
617
618                 else if ((xform->compress.deflate.huffman ==
619                                 RTE_COMP_HUFFMAN_DYNAMIC ||
620                                 xform->compress.deflate.huffman ==
621                                                 RTE_COMP_HUFFMAN_DEFAULT) &&
622                                 qat->interm_buff_mz != NULL)
623
624                         qat_xform->qat_comp_request_type =
625                                         QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS;
626
627                 else {
628                         QAT_LOG(ERR,
629                                         "IM buffers needed for dynamic deflate. Set size in config file");
630                         return -EINVAL;
631                 }
632
633                 qat_xform->checksum_type = xform->compress.chksum;
634
635         } else {
636                 qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
637                 qat_xform->checksum_type = xform->decompress.chksum;
638         }
639
640         if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform,
641                                       NULL, RTE_COMP_OP_STATELESS)) {
642                 QAT_LOG(ERR, "QAT: Problem with setting compression");
643                 return -EINVAL;
644         }
645         return 0;
646 }
647
648 /**
649  * Free driver private_xform data.
650  *
651  * @param dev
652  *   Compressdev device
653  * @param private_xform
654  *   handle of pmd's private_xform data
655  * @return
656  *  - 0 if successful
657  *  - <0 in error cases
658  *  - Returns -EINVAL if input parameters are invalid.
659  */
660 int
661 qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
662                             void *private_xform)
663 {
664         struct qat_comp_xform *qat_xform =
665                         (struct qat_comp_xform *)private_xform;
666
667         if (qat_xform) {
668                 memset(qat_xform, 0, qat_comp_xform_size());
669                 struct rte_mempool *mp = rte_mempool_from_obj(qat_xform);
670
671                 rte_mempool_put(mp, qat_xform);
672                 return 0;
673         }
674         return -EINVAL;
675 }
676
677 /**
678  * Reset stream state for the next use.
679  *
680  * @param stream
681  *   handle of pmd's private stream data
682  */
683 static void
684 qat_comp_stream_reset(struct qat_comp_stream *stream)
685 {
686         if (stream) {
687                 memset(&stream->qat_xform, 0, sizeof(struct qat_comp_xform));
688                 stream->start_of_packet = 1;
689                 stream->op_in_progress = 0;
690         }
691 }
692
693 /**
694  * Create driver private stream data.
695  *
696  * @param dev
697  *   Compressdev device
698  * @param xform
699  *   xform data
700  * @param stream
701  *   ptr where handle of pmd's private stream data should be stored
702  * @return
703  *  - Returns 0 if private stream structure has been created successfully.
704  *  - Returns -EINVAL if input parameters are invalid.
705  *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
706  *  - Returns -ENOTSUP if comp device does not support the comp transform.
707  *  - Returns -ENOMEM if the private stream could not be allocated.
708  */
709 int
710 qat_comp_stream_create(struct rte_compressdev *dev,
711                        const struct rte_comp_xform *xform,
712                        void **stream)
713 {
714         struct qat_comp_dev_private *qat = dev->data->dev_private;
715         struct qat_comp_stream *ptr;
716
717         if (unlikely(stream == NULL)) {
718                 QAT_LOG(ERR, "QAT: stream parameter is NULL");
719                 return -EINVAL;
720         }
721         if (unlikely(xform->type == RTE_COMP_COMPRESS)) {
722                 QAT_LOG(ERR, "QAT: stateful compression not supported");
723                 return -ENOTSUP;
724         }
725         if (unlikely(qat->streampool == NULL)) {
726                 QAT_LOG(ERR, "QAT device has no stream mempool");
727                 return -ENOMEM;
728         }
729         if (rte_mempool_get(qat->streampool, stream)) {
730                 QAT_LOG(ERR, "Couldn't get object from qat stream mempool");
731                 return -ENOMEM;
732         }
733
734         ptr = (struct qat_comp_stream *) *stream;
735         qat_comp_stream_reset(ptr);
736         ptr->qat_xform.qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
737         ptr->qat_xform.checksum_type = xform->decompress.chksum;
738
739         if (qat_comp_create_templates(&ptr->qat_xform, qat->interm_buff_mz,
740                                       xform, ptr, RTE_COMP_OP_STATEFUL)) {
741                 QAT_LOG(ERR, "QAT: problem with creating descriptor template for stream");
742                 rte_mempool_put(qat->streampool, *stream);
743                 *stream = NULL;
744                 return -EINVAL;
745         }
746
747         return 0;
748 }
749
750 /**
751  * Free driver private stream data.
752  *
753  * @param dev
754  *   Compressdev device
755  * @param stream
756  *   handle of pmd's private stream data
757  * @return
758  *  - 0 if successful
759  *  - <0 in error cases
760  *  - Returns -EINVAL if input parameters are invalid.
761  *  - Returns -ENOTSUP if comp device does not support STATEFUL operations.
762  *  - Returns -EBUSY if can't free stream as there are inflight operations
763  */
764 int
765 qat_comp_stream_free(struct rte_compressdev *dev, void *stream)
766 {
767         if (stream) {
768                 struct qat_comp_dev_private *qat = dev->data->dev_private;
769                 qat_comp_stream_reset((struct qat_comp_stream *) stream);
770                 rte_mempool_put(qat->streampool, stream);
771                 return 0;
772         }
773         return -EINVAL;
774 }