compress/qat: add fallback to fixed compression
[dpdk.git] / drivers / compress / qat / qat_comp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation
3  */
4
5 #include <rte_mempool.h>
6 #include <rte_mbuf.h>
7 #include <rte_hexdump.h>
8 #include <rte_comp.h>
9 #include <rte_bus_pci.h>
10 #include <rte_byteorder.h>
11 #include <rte_memcpy.h>
12 #include <rte_common.h>
13 #include <rte_spinlock.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16
17 #include "qat_logs.h"
18 #include "qat_comp.h"
19 #include "qat_comp_pmd.h"
20
21
22 int
23 qat_comp_build_request(void *in_op, uint8_t *out_msg,
24                        void *op_cookie,
25                        enum qat_device_gen qat_dev_gen __rte_unused)
26 {
27         struct rte_comp_op *op = in_op;
28         struct qat_comp_op_cookie *cookie =
29                         (struct qat_comp_op_cookie *)op_cookie;
30         struct qat_comp_xform *qat_xform = op->private_xform;
31         const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
32         struct icp_qat_fw_comp_req *comp_req =
33             (struct icp_qat_fw_comp_req *)out_msg;
34
35         if (unlikely(op->op_type != RTE_COMP_OP_STATELESS)) {
36                 QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression "
37                                 "operation requests, op (%p) is not a "
38                                 "stateless operation.", op);
39                 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
40                 return -EINVAL;
41         }
42
43         rte_mov128(out_msg, tmpl);
44         comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
45
46         if (likely(qat_xform->qat_comp_request_type ==
47                     QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
48                 if (unlikely(op->src.length > QAT_FALLBACK_THLD)) {
49
50                         /* fallback to fixed compression */
51                         comp_req->comn_hdr.service_cmd_id =
52                                         ICP_QAT_FW_COMP_CMD_STATIC;
53
54                         ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
55                                         ICP_QAT_FW_SLICE_DRAM_WR);
56
57                         ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
58                                         ICP_QAT_FW_SLICE_NULL);
59                         ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
60                                         ICP_QAT_FW_SLICE_NULL);
61
62                         QAT_DP_LOG(DEBUG, "QAT PMD: fallback to fixed "
63                                    "compression! IM buffer size can be too low "
64                                    "for produced data.\n Please use input "
65                                    "buffer length lower than %d bytes",
66                                    QAT_FALLBACK_THLD);
67                 }
68         }
69
70         /* common for sgl and flat buffers */
71         comp_req->comp_pars.comp_len = op->src.length;
72         comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
73                         op->dst.offset;
74
75         if (op->m_src->next != NULL || op->m_dst->next != NULL) {
76                 /* sgl */
77                 int ret = 0;
78
79                 ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
80                                 QAT_COMN_PTR_TYPE_SGL);
81
82                 ret = qat_sgl_fill_array(op->m_src,
83                                 op->src.offset,
84                                 &cookie->qat_sgl_src,
85                                 op->src.length,
86                                 RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
87                 if (ret) {
88                         QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
89                         op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
90                         return ret;
91                 }
92
93                 ret = qat_sgl_fill_array(op->m_dst,
94                                 op->dst.offset,
95                                 &cookie->qat_sgl_dst,
96                                 comp_req->comp_pars.out_buffer_sz,
97                                 RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
98                 if (ret) {
99                         QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
100                         op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
101                         return ret;
102                 }
103
104                 comp_req->comn_mid.src_data_addr =
105                                 cookie->qat_sgl_src_phys_addr;
106                 comp_req->comn_mid.dest_data_addr =
107                                 cookie->qat_sgl_dst_phys_addr;
108                 comp_req->comn_mid.src_length = 0;
109                 comp_req->comn_mid.dst_length = 0;
110
111         } else {
112                 /* flat aka linear buffer */
113                 ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
114                                 QAT_COMN_PTR_TYPE_FLAT);
115                 comp_req->comn_mid.src_length = op->src.length;
116                 comp_req->comn_mid.dst_length =
117                                 comp_req->comp_pars.out_buffer_sz;
118
119                 comp_req->comn_mid.src_data_addr =
120                     rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset);
121                 comp_req->comn_mid.dest_data_addr =
122                     rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
123         }
124
125 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
126         QAT_DP_LOG(DEBUG, "Direction: %s",
127             qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
128                             "decompression" : "compression");
129         QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req,
130                     sizeof(struct icp_qat_fw_comp_req));
131 #endif
132         return 0;
133 }
134
135 int
136 qat_comp_process_response(void **op, uint8_t *resp, uint64_t *dequeue_err_count)
137 {
138         struct icp_qat_fw_comp_resp *resp_msg =
139                         (struct icp_qat_fw_comp_resp *)resp;
140         struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
141                         (resp_msg->opaque_data);
142         struct qat_comp_xform *qat_xform = (struct qat_comp_xform *)
143                                 (rx_op->private_xform);
144         int err = resp_msg->comn_resp.comn_status &
145                         ((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) |
146                          (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS));
147
148 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
149         QAT_DP_LOG(DEBUG, "Direction: %s",
150             qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
151             "decompression" : "compression");
152         QAT_DP_HEXDUMP_LOG(DEBUG,  "qat_response:", (uint8_t *)resp_msg,
153                         sizeof(struct icp_qat_fw_comp_resp));
154 #endif
155
156         if (likely(qat_xform->qat_comp_request_type
157                         != QAT_COMP_REQUEST_DECOMPRESS)) {
158                 if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
159                                 resp_msg->comn_resp.hdr_flags)
160                                         == ICP_QAT_FW_COMP_NO_CNV)) {
161                         rx_op->status = RTE_COMP_OP_STATUS_ERROR;
162                         rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW;
163                         *op = (void *)rx_op;
164                         QAT_DP_LOG(ERR, "QAT has wrong firmware");
165                         ++(*dequeue_err_count);
166                         return 0;
167                 }
168         }
169
170         if (err) {
171                 if (unlikely((err & (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS))
172                              && (qat_xform->qat_comp_request_type
173                                  == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS))) {
174                         QAT_DP_LOG(ERR, "QAT intermediate buffer may be too "
175                             "small for output, try configuring a larger size");
176                 }
177
178                 int8_t cmp_err_code =
179                         (int8_t)resp_msg->comn_resp.comn_error.cmp_err_code;
180                 int8_t xlat_err_code =
181                         (int8_t)resp_msg->comn_resp.comn_error.xlat_err_code;
182
183                 if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code)
184                                 ||
185                     (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR)
186                                 ||
187                     (cmp_err_code == ERR_CODE_OVERFLOW_ERROR &&
188                      xlat_err_code == ERR_CODE_OVERFLOW_ERROR))
189                         rx_op->status =
190                                 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
191                 else
192                         rx_op->status = RTE_COMP_OP_STATUS_ERROR;
193
194                 ++(*dequeue_err_count);
195                 rx_op->debug_status =
196                         *((uint16_t *)(&resp_msg->comn_resp.comn_error));
197         } else {
198                 struct icp_qat_fw_resp_comp_pars *comp_resp =
199                   (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
200
201                 rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
202                 rx_op->consumed = comp_resp->input_byte_counter;
203                 rx_op->produced = comp_resp->output_byte_counter;
204
205                 if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
206                         if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
207                                 rx_op->output_chksum = comp_resp->curr_crc32;
208                         else if (qat_xform->checksum_type ==
209                                         RTE_COMP_CHECKSUM_ADLER32)
210                                 rx_op->output_chksum = comp_resp->curr_adler_32;
211                         else
212                                 rx_op->output_chksum = comp_resp->curr_chksum;
213                 }
214         }
215         *op = (void *)rx_op;
216
217         return 0;
218 }
219
220 unsigned int
221 qat_comp_xform_size(void)
222 {
223         return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
224 }
225
226 static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
227                                     enum qat_comp_request_type request)
228 {
229         if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS)
230                 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
231         else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)
232                 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
233         else if (request == QAT_COMP_REQUEST_DECOMPRESS)
234                 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
235
236         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
237         header->hdr_flags =
238             ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
239
240         header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(
241             QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT);
242 }
243
244 static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
245                         const struct rte_memzone *interm_buff_mz,
246                         const struct rte_comp_xform *xform)
247 {
248         struct icp_qat_fw_comp_req *comp_req;
249         int comp_level, algo;
250         uint32_t req_par_flags;
251         int direction = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS;
252
253         if (unlikely(qat_xform == NULL)) {
254                 QAT_LOG(ERR, "Session was not created for this device");
255                 return -EINVAL;
256         }
257
258         if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
259                 direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
260                 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
261                 req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
262                                 ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
263                                 ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_NO_CNV,
264                                 ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
265
266         } else {
267                 if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
268                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
269                 else if (xform->compress.level == 1)
270                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
271                 else if (xform->compress.level == 2)
272                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_4;
273                 else if (xform->compress.level == 3)
274                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
275                 else if (xform->compress.level >= 4 &&
276                          xform->compress.level <= 9)
277                         comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_16;
278                 else {
279                         QAT_LOG(ERR, "compression level not supported");
280                         return -EINVAL;
281                 }
282                 req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
283                                 ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
284                                 ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV,
285                                 ICP_QAT_FW_COMP_CNV_RECOVERY);
286         }
287
288         switch (xform->compress.algo) {
289         case RTE_COMP_ALGO_DEFLATE:
290                 algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE;
291                 break;
292         case RTE_COMP_ALGO_LZS:
293         default:
294                 /* RTE_COMP_NULL */
295                 QAT_LOG(ERR, "compression algorithm not supported");
296                 return -EINVAL;
297         }
298
299         comp_req = &qat_xform->qat_comp_req_tmpl;
300
301         /* Initialize header */
302         qat_comp_create_req_hdr(&comp_req->comn_hdr,
303                                         qat_xform->qat_comp_request_type);
304
305         comp_req->comn_hdr.serv_specif_flags = ICP_QAT_FW_COMP_FLAGS_BUILD(
306             ICP_QAT_FW_COMP_STATELESS_SESSION,
307             ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
308             ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
309             ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
310             ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
311
312         comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
313             ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
314                 direction,
315                 /* In CPM 1.6 only valid mode ! */
316                 ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED, algo,
317                 /* Translate level to depth */
318                 comp_level, ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
319
320         comp_req->comp_pars.initial_adler = 1;
321         comp_req->comp_pars.initial_crc32 = 0;
322         comp_req->comp_pars.req_par_flags = req_par_flags;
323
324
325         if (qat_xform->qat_comp_request_type ==
326                         QAT_COMP_REQUEST_FIXED_COMP_STATELESS ||
327             qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
328                 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
329                                             ICP_QAT_FW_SLICE_DRAM_WR);
330                 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
331                                             ICP_QAT_FW_SLICE_COMP);
332         } else if (qat_xform->qat_comp_request_type ==
333                         QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
334
335                 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
336                                 ICP_QAT_FW_SLICE_XLAT);
337                 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
338                                 ICP_QAT_FW_SLICE_COMP);
339
340                 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
341                                 ICP_QAT_FW_SLICE_DRAM_WR);
342                 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
343                                 ICP_QAT_FW_SLICE_XLAT);
344
345                 comp_req->u1.xlt_pars.inter_buff_ptr =
346                                 interm_buff_mz->phys_addr;
347         }
348
349 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
350         QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req,
351                     sizeof(struct icp_qat_fw_comp_req));
352 #endif
353         return 0;
354 }
355
356 /**
357  * Create driver private_xform data.
358  *
359  * @param dev
360  *   Compressdev device
361  * @param xform
362  *   xform data from application
363  * @param private_xform
364  *   ptr where handle of pmd's private_xform data should be stored
365  * @return
366  *  - if successful returns 0
367  *    and valid private_xform handle
368  *  - <0 in error cases
369  *  - Returns -EINVAL if input parameters are invalid.
370  *  - Returns -ENOTSUP if comp device does not support the comp transform.
371  *  - Returns -ENOMEM if the private_xform could not be allocated.
372  */
373 int
374 qat_comp_private_xform_create(struct rte_compressdev *dev,
375                               const struct rte_comp_xform *xform,
376                               void **private_xform)
377 {
378         struct qat_comp_dev_private *qat = dev->data->dev_private;
379
380         if (unlikely(private_xform == NULL)) {
381                 QAT_LOG(ERR, "QAT: private_xform parameter is NULL");
382                 return -EINVAL;
383         }
384         if (unlikely(qat->xformpool == NULL)) {
385                 QAT_LOG(ERR, "QAT device has no private_xform mempool");
386                 return -ENOMEM;
387         }
388         if (rte_mempool_get(qat->xformpool, private_xform)) {
389                 QAT_LOG(ERR, "Couldn't get object from qat xform mempool");
390                 return -ENOMEM;
391         }
392
393         struct qat_comp_xform *qat_xform =
394                         (struct qat_comp_xform *)*private_xform;
395
396         if (xform->type == RTE_COMP_COMPRESS) {
397
398                 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
399                   ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
400                                    && qat->interm_buff_mz == NULL))
401                         qat_xform->qat_comp_request_type =
402                                         QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
403
404                 else if ((xform->compress.deflate.huffman ==
405                                 RTE_COMP_HUFFMAN_DYNAMIC ||
406                                 xform->compress.deflate.huffman ==
407                                                 RTE_COMP_HUFFMAN_DEFAULT) &&
408                                 qat->interm_buff_mz != NULL)
409
410                         qat_xform->qat_comp_request_type =
411                                         QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS;
412
413                 else {
414                         QAT_LOG(ERR,
415                                         "IM buffers needed for dynamic deflate. Set size in config file");
416                         return -EINVAL;
417                 }
418
419                 qat_xform->checksum_type = xform->compress.chksum;
420
421         } else {
422                 qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
423                 qat_xform->checksum_type = xform->decompress.chksum;
424         }
425
426         if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform)) {
427                 QAT_LOG(ERR, "QAT: Problem with setting compression");
428                 return -EINVAL;
429         }
430         return 0;
431 }
432
433 /**
434  * Free driver private_xform data.
435  *
436  * @param dev
437  *   Compressdev device
438  * @param private_xform
439  *   handle of pmd's private_xform data
440  * @return
441  *  - 0 if successful
442  *  - <0 in error cases
443  *  - Returns -EINVAL if input parameters are invalid.
444  */
445 int
446 qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
447                             void *private_xform)
448 {
449         struct qat_comp_xform *qat_xform =
450                         (struct qat_comp_xform *)private_xform;
451
452         if (qat_xform) {
453                 memset(qat_xform, 0, qat_comp_xform_size());
454                 struct rte_mempool *mp = rte_mempool_from_obj(qat_xform);
455
456                 rte_mempool_put(mp, qat_xform);
457                 return 0;
458         }
459         return -EINVAL;
460 }