1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Intel Corporation
5 #include <rte_mempool.h>
7 #include <rte_hexdump.h>
9 #include <rte_bus_pci.h>
10 #include <rte_byteorder.h>
11 #include <rte_memcpy.h>
12 #include <rte_common.h>
13 #include <rte_spinlock.h>
15 #include <rte_malloc.h>
16 #include <rte_memzone.h>
20 #include "qat_comp_pmd.h"
23 qat_comp_fallback_to_fixed(struct icp_qat_fw_comp_req *comp_req)
25 QAT_DP_LOG(DEBUG, "QAT PMD: fallback to fixed compression!");
27 comp_req->comn_hdr.service_cmd_id =
28 ICP_QAT_FW_COMP_CMD_STATIC;
30 ICP_QAT_FW_COMN_NEXT_ID_SET(
31 &comp_req->comp_cd_ctrl,
32 ICP_QAT_FW_SLICE_DRAM_WR);
34 ICP_QAT_FW_COMN_NEXT_ID_SET(
35 &comp_req->u2.xlt_cd_ctrl,
36 ICP_QAT_FW_SLICE_NULL);
37 ICP_QAT_FW_COMN_CURR_ID_SET(
38 &comp_req->u2.xlt_cd_ctrl,
39 ICP_QAT_FW_SLICE_NULL);
43 qat_comp_free_split_op_memzones(struct qat_comp_op_cookie *cookie,
44 unsigned int nb_children)
48 /* free all memzones allocated for child descriptors */
49 for (i = 0; i < nb_children; i++)
50 rte_memzone_free(cookie->dst_memzones[i]);
52 /* and free the pointer table */
53 rte_free(cookie->dst_memzones);
54 cookie->dst_memzones = NULL;
58 qat_comp_allocate_split_op_memzones(struct qat_comp_op_cookie *cookie,
59 unsigned int nb_descriptors_needed)
61 struct qat_queue *txq = &(cookie->qp->tx_q);
62 char dst_memz_name[RTE_MEMZONE_NAMESIZE];
65 /* allocate the array of memzone pointers */
66 cookie->dst_memzones = rte_zmalloc_socket("qat PMD im buf mz pointers",
67 (nb_descriptors_needed - 1) *
68 sizeof(const struct rte_memzone *),
69 RTE_CACHE_LINE_SIZE, cookie->socket_id);
71 if (cookie->dst_memzones == NULL) {
73 "QAT PMD: failed to allocate im buf mz pointers");
77 for (i = 0; i < nb_descriptors_needed - 1; i++) {
78 snprintf(dst_memz_name,
79 sizeof(dst_memz_name),
81 cookie->qp->qat_dev->qat_dev_id,
82 txq->hw_bundle_number, txq->hw_queue_number,
83 cookie->cookie_index, i);
85 cookie->dst_memzones[i] = rte_memzone_reserve_aligned(
86 dst_memz_name, RTE_PMD_QAT_COMP_IM_BUFFER_SIZE,
87 cookie->socket_id, RTE_MEMZONE_IOVA_CONTIG,
90 if (cookie->dst_memzones[i] == NULL) {
92 "QAT PMD: failed to allocate dst buffer memzone");
94 /* let's free all memzones allocated up to now */
95 qat_comp_free_split_op_memzones(cookie, i);
105 qat_comp_build_request(void *in_op, uint8_t *out_msg,
107 enum qat_device_gen qat_dev_gen __rte_unused)
109 struct rte_comp_op *op = in_op;
110 struct qat_comp_op_cookie *cookie =
111 (struct qat_comp_op_cookie *)op_cookie;
112 struct qat_comp_stream *stream;
113 struct qat_comp_xform *qat_xform;
115 struct icp_qat_fw_comp_req *comp_req =
116 (struct icp_qat_fw_comp_req *)out_msg;
118 if (op->op_type == RTE_COMP_OP_STATEFUL) {
120 qat_xform = &stream->qat_xform;
121 if (unlikely(qat_xform->qat_comp_request_type !=
122 QAT_COMP_REQUEST_DECOMPRESS)) {
123 QAT_DP_LOG(ERR, "QAT PMD does not support stateful compression");
124 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
127 if (unlikely(stream->op_in_progress)) {
128 QAT_DP_LOG(ERR, "QAT PMD does not support running multiple stateful operations on the same stream at once");
129 op->status = RTE_COMP_OP_STATUS_INVALID_STATE;
132 stream->op_in_progress = 1;
135 qat_xform = op->private_xform;
137 tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
139 rte_mov128(out_msg, tmpl);
140 comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
142 if (likely(qat_xform->qat_comp_request_type ==
143 QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
145 if (unlikely(op->src.length > QAT_FALLBACK_THLD)) {
146 /* the operation must be split into pieces */
147 if (qat_xform->checksum_type !=
148 RTE_COMP_CHECKSUM_NONE) {
149 /* fallback to fixed compression in case any
150 * checksum calculation was requested
152 qat_comp_fallback_to_fixed(comp_req);
154 /* calculate num. of descriptors for split op */
155 unsigned int nb_descriptors_needed =
156 op->src.length / QAT_FALLBACK_THLD + 1;
157 /* allocate memzone for output data */
158 if (qat_comp_allocate_split_op_memzones(
159 cookie, nb_descriptors_needed)) {
160 /* out of memory, fallback to fixed */
161 qat_comp_fallback_to_fixed(comp_req);
164 "Input data is too big, op must be split into %u descriptors",
165 nb_descriptors_needed);
166 return (int) nb_descriptors_needed;
171 /* set BFINAL bit according to flush_flag */
172 comp_req->comp_pars.req_par_flags =
173 ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
176 op->flush_flag == RTE_COMP_FLUSH_FINAL ?
177 ICP_QAT_FW_COMP_BFINAL
178 : ICP_QAT_FW_COMP_NOT_BFINAL,
180 ICP_QAT_FW_COMP_CNV_RECOVERY);
182 } else if (op->op_type == RTE_COMP_OP_STATEFUL) {
184 comp_req->comp_pars.req_par_flags =
185 ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
186 (stream->start_of_packet) ?
188 : ICP_QAT_FW_COMP_NOT_SOP,
189 (op->flush_flag == RTE_COMP_FLUSH_FULL ||
190 op->flush_flag == RTE_COMP_FLUSH_FINAL) ?
192 : ICP_QAT_FW_COMP_NOT_EOP,
193 ICP_QAT_FW_COMP_NOT_BFINAL,
194 ICP_QAT_FW_COMP_NO_CNV,
195 ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
198 /* common for sgl and flat buffers */
199 comp_req->comp_pars.comp_len = op->src.length;
200 comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
203 if (op->m_src->next != NULL || op->m_dst->next != NULL) {
207 ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
208 QAT_COMN_PTR_TYPE_SGL);
210 if (unlikely(op->m_src->nb_segs > cookie->src_nb_elems)) {
211 /* we need to allocate more elements in SGL*/
214 tmp = rte_realloc_socket(cookie->qat_sgl_src_d,
215 sizeof(struct qat_sgl) +
216 sizeof(struct qat_flat_buf) *
217 op->m_src->nb_segs, 64,
220 if (unlikely(tmp == NULL)) {
221 QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
222 " for %d elements of SGL",
224 op->status = RTE_COMP_OP_STATUS_ERROR;
225 /* clear op-in-progress flag */
227 stream->op_in_progress = 0;
230 /* new SGL is valid now */
231 cookie->qat_sgl_src_d = (struct qat_sgl *)tmp;
232 cookie->src_nb_elems = op->m_src->nb_segs;
233 cookie->qat_sgl_src_phys_addr =
234 rte_malloc_virt2iova(cookie->qat_sgl_src_d);
237 ret = qat_sgl_fill_array(op->m_src,
239 cookie->qat_sgl_src_d,
241 cookie->src_nb_elems);
243 QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
244 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
245 /* clear op-in-progress flag */
247 stream->op_in_progress = 0;
251 if (unlikely(op->m_dst->nb_segs > cookie->dst_nb_elems)) {
252 /* we need to allocate more elements in SGL*/
255 tmp = rte_realloc_socket(cookie->qat_sgl_dst_d,
256 sizeof(struct qat_sgl) +
257 sizeof(struct qat_flat_buf) *
258 op->m_dst->nb_segs, 64,
261 if (unlikely(tmp == NULL)) {
262 QAT_DP_LOG(ERR, "QAT PMD can't allocate memory"
263 " for %d elements of SGL",
265 op->status = RTE_COMP_OP_STATUS_ERROR;
266 /* clear op-in-progress flag */
268 stream->op_in_progress = 0;
271 /* new SGL is valid now */
272 cookie->qat_sgl_dst_d = (struct qat_sgl *)tmp;
273 cookie->dst_nb_elems = op->m_dst->nb_segs;
274 cookie->qat_sgl_dst_phys_addr =
275 rte_malloc_virt2iova(cookie->qat_sgl_dst_d);
278 ret = qat_sgl_fill_array(op->m_dst,
280 cookie->qat_sgl_dst_d,
281 comp_req->comp_pars.out_buffer_sz,
282 cookie->dst_nb_elems);
284 QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
285 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
286 /* clear op-in-progress flag */
288 stream->op_in_progress = 0;
292 comp_req->comn_mid.src_data_addr =
293 cookie->qat_sgl_src_phys_addr;
294 comp_req->comn_mid.dest_data_addr =
295 cookie->qat_sgl_dst_phys_addr;
296 comp_req->comn_mid.src_length = 0;
297 comp_req->comn_mid.dst_length = 0;
300 /* flat aka linear buffer */
301 ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
302 QAT_COMN_PTR_TYPE_FLAT);
303 comp_req->comn_mid.src_length = op->src.length;
304 comp_req->comn_mid.dst_length =
305 comp_req->comp_pars.out_buffer_sz;
307 comp_req->comn_mid.src_data_addr =
308 rte_pktmbuf_iova_offset(op->m_src, op->src.offset);
309 comp_req->comn_mid.dest_data_addr =
310 rte_pktmbuf_iova_offset(op->m_dst, op->dst.offset);
313 if (unlikely(rte_pktmbuf_pkt_len(op->m_dst) < QAT_MIN_OUT_BUF_SIZE)) {
314 /* QAT doesn't support dest. buffer lower
315 * than QAT_MIN_OUT_BUF_SIZE. Propagate error mark
316 * by converting this request to the null one
317 * and check the status in the response.
319 QAT_DP_LOG(WARNING, "QAT destination buffer too small - resend with larger buffer");
320 comp_req->comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
321 comp_req->comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
322 cookie->error = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
325 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
326 QAT_DP_LOG(DEBUG, "Direction: %s",
327 qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
328 "decompression" : "compression");
329 QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req,
330 sizeof(struct icp_qat_fw_comp_req));
335 static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask)
337 return data & modulo_mask;
341 qat_comp_mbuf_skip(struct rte_mbuf **mbuf, uint32_t *offset, uint32_t len)
343 while (*offset + len >= rte_pktmbuf_data_len(*mbuf)) {
344 len -= (rte_pktmbuf_data_len(*mbuf) - *offset);
345 *mbuf = (*mbuf)->next;
352 qat_comp_build_multiple_requests(void *in_op, struct qat_qp *qp,
353 uint32_t parent_tail, int nb_descr)
355 struct rte_comp_op op_backup;
356 struct rte_mbuf dst_mbuf;
357 struct rte_comp_op *op = in_op;
358 struct qat_queue *txq = &(qp->tx_q);
359 uint8_t *base_addr = (uint8_t *)txq->base_addr;
360 uint8_t *out_msg = base_addr + parent_tail;
361 uint32_t tail = parent_tail;
362 struct icp_qat_fw_comp_req *comp_req =
363 (struct icp_qat_fw_comp_req *)out_msg;
364 struct qat_comp_op_cookie *parent_cookie =
365 (struct qat_comp_op_cookie *)
366 qp->op_cookies[parent_tail / txq->msg_size];
367 struct qat_comp_op_cookie *child_cookie;
368 uint16_t dst_data_size =
369 RTE_MIN(RTE_PMD_QAT_COMP_IM_BUFFER_SIZE, 65535);
370 uint32_t data_to_enqueue = op->src.length - QAT_FALLBACK_THLD;
371 int num_descriptors_built = 1;
374 QAT_DP_LOG(DEBUG, "op %p, parent_cookie %p", op, parent_cookie);
376 /* copy original op to the local variable for restoring later */
377 rte_memcpy(&op_backup, op, sizeof(op_backup));
379 parent_cookie->nb_child_responses = 0;
380 parent_cookie->nb_children = 0;
381 parent_cookie->split_op = 1;
382 parent_cookie->dst_data = op->m_dst;
383 parent_cookie->dst_data_offset = op->dst.offset;
385 op->src.length = QAT_FALLBACK_THLD;
386 op->flush_flag = RTE_COMP_FLUSH_FULL;
388 QAT_DP_LOG(DEBUG, "parent op src len %u dst len %u",
389 op->src.length, op->m_dst->pkt_len);
391 ret = qat_comp_build_request(in_op, out_msg, parent_cookie,
394 /* restore op and clear cookie */
395 QAT_DP_LOG(WARNING, "Failed to build parent descriptor");
396 op->src.length = op_backup.src.length;
397 op->flush_flag = op_backup.flush_flag;
398 parent_cookie->split_op = 0;
402 /* prepare local dst mbuf */
403 rte_memcpy(&dst_mbuf, op->m_dst, sizeof(dst_mbuf));
404 rte_pktmbuf_reset(&dst_mbuf);
405 dst_mbuf.buf_len = dst_data_size;
406 dst_mbuf.data_len = dst_data_size;
407 dst_mbuf.pkt_len = dst_data_size;
408 dst_mbuf.data_off = 0;
410 /* update op for the child operations */
411 op->m_dst = &dst_mbuf;
414 while (data_to_enqueue) {
415 const struct rte_memzone *mz =
416 parent_cookie->dst_memzones[num_descriptors_built - 1];
417 uint32_t src_data_size = RTE_MIN(data_to_enqueue,
419 uint32_t cookie_index;
421 /* update params for the next op */
422 op->src.offset += QAT_FALLBACK_THLD;
423 op->src.length = src_data_size;
424 op->flush_flag = (src_data_size == data_to_enqueue) ?
425 op_backup.flush_flag : RTE_COMP_FLUSH_FULL;
427 /* update dst mbuf for the next op (use memzone for dst data) */
428 dst_mbuf.buf_addr = mz->addr;
429 dst_mbuf.buf_iova = mz->iova;
431 /* move the tail and calculate next cookie index */
432 tail = adf_modulo(tail + txq->msg_size, txq->modulo_mask);
433 cookie_index = tail / txq->msg_size;
434 child_cookie = (struct qat_comp_op_cookie *)
435 qp->op_cookies[cookie_index];
436 comp_req = (struct icp_qat_fw_comp_req *)(base_addr + tail);
438 /* update child cookie */
439 child_cookie->split_op = 1; /* must be set for child as well */
440 child_cookie->parent_cookie = parent_cookie; /* same as above */
441 child_cookie->nb_children = 0;
442 child_cookie->dest_buffer = mz->addr;
445 "cookie_index %u, child_cookie %p, comp_req %p",
446 cookie_index, child_cookie, comp_req);
448 "data_to_enqueue %u, num_descriptors_built %d",
449 data_to_enqueue, num_descriptors_built);
450 QAT_DP_LOG(DEBUG, "child op src len %u dst len %u",
451 op->src.length, op->m_dst->pkt_len);
453 /* build the request */
454 ret = qat_comp_build_request(op, (uint8_t *)comp_req,
455 child_cookie, qp->qat_dev_gen);
457 QAT_DP_LOG(WARNING, "Failed to build child descriptor");
458 /* restore op and clear cookie */
459 rte_memcpy(op, &op_backup, sizeof(op_backup));
460 parent_cookie->split_op = 0;
461 parent_cookie->nb_children = 0;
465 data_to_enqueue -= src_data_size;
466 num_descriptors_built++;
469 /* restore backed up original op */
470 rte_memcpy(op, &op_backup, sizeof(op_backup));
472 if (nb_descr != num_descriptors_built)
473 QAT_DP_LOG(ERR, "split op. expected %d, built %d",
474 nb_descr, num_descriptors_built);
476 parent_cookie->nb_children = num_descriptors_built - 1;
477 return num_descriptors_built;
481 qat_comp_response_data_copy(struct qat_comp_op_cookie *cookie,
482 struct rte_comp_op *rx_op)
484 struct qat_comp_op_cookie *pc = cookie->parent_cookie;
485 struct rte_mbuf *sgl_buf = pc->dst_data;
486 void *op_dst_addr = rte_pktmbuf_mtod_offset(sgl_buf, uint8_t *,
487 pc->dst_data_offset);
489 /* number of bytes left in the current segment */
490 uint32_t left_in_current = rte_pktmbuf_data_len(sgl_buf) -
495 if (rx_op->produced <= left_in_current) {
496 rte_memcpy(op_dst_addr, cookie->dest_buffer,
498 /* calculate dst mbuf and offset for the next child op */
499 if (rx_op->produced == left_in_current) {
500 pc->dst_data = sgl_buf->next;
501 pc->dst_data_offset = 0;
503 pc->dst_data_offset += rx_op->produced;
505 rte_memcpy(op_dst_addr, cookie->dest_buffer,
507 sgl_buf = sgl_buf->next;
508 prod = rx_op->produced - left_in_current;
509 sent = left_in_current;
510 while (prod > rte_pktmbuf_data_len(sgl_buf)) {
511 op_dst_addr = rte_pktmbuf_mtod_offset(sgl_buf,
514 rte_memcpy(op_dst_addr,
515 ((uint8_t *)cookie->dest_buffer) +
517 rte_pktmbuf_data_len(sgl_buf));
519 prod -= rte_pktmbuf_data_len(sgl_buf);
520 sent += rte_pktmbuf_data_len(sgl_buf);
522 sgl_buf = sgl_buf->next;
525 op_dst_addr = rte_pktmbuf_mtod_offset(sgl_buf, uint8_t *, 0);
527 rte_memcpy(op_dst_addr,
528 ((uint8_t *)cookie->dest_buffer) + sent,
531 /* calculate dst mbuf and offset for the next child op */
532 if (prod == rte_pktmbuf_data_len(sgl_buf)) {
533 pc->dst_data = sgl_buf->next;
534 pc->dst_data_offset = 0;
536 pc->dst_data = sgl_buf;
537 pc->dst_data_offset = prod;
543 qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
544 uint64_t *dequeue_err_count)
546 struct icp_qat_fw_comp_resp *resp_msg =
547 (struct icp_qat_fw_comp_resp *)resp;
548 struct qat_comp_op_cookie *cookie =
549 (struct qat_comp_op_cookie *)op_cookie;
551 struct icp_qat_fw_resp_comp_pars *comp_resp1 =
552 (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
554 QAT_DP_LOG(DEBUG, "input counter = %u, output counter = %u",
555 comp_resp1->input_byte_counter,
556 comp_resp1->output_byte_counter);
558 struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
559 (resp_msg->opaque_data);
560 struct qat_comp_stream *stream;
561 struct qat_comp_xform *qat_xform;
562 int err = resp_msg->comn_resp.comn_status &
563 ((1 << QAT_COMN_RESP_CMP_STATUS_BITPOS) |
564 (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS));
566 if (rx_op->op_type == RTE_COMP_OP_STATEFUL) {
567 stream = rx_op->stream;
568 qat_xform = &stream->qat_xform;
569 /* clear op-in-progress flag */
570 stream->op_in_progress = 0;
573 qat_xform = rx_op->private_xform;
576 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
577 QAT_DP_LOG(DEBUG, "Direction: %s",
578 qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
579 "decompression" : "compression");
580 QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
581 sizeof(struct icp_qat_fw_comp_resp));
584 if (unlikely(cookie->error)) {
585 rx_op->status = cookie->error;
587 ++(*dequeue_err_count);
588 rx_op->debug_status = 0;
592 /* also in this case number of returned ops */
593 /* must be equal to one, */
594 /* appropriate status (error) must be set as well */
598 if (likely(qat_xform->qat_comp_request_type
599 != QAT_COMP_REQUEST_DECOMPRESS)) {
600 if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
601 resp_msg->comn_resp.hdr_flags)
602 == ICP_QAT_FW_COMP_NO_CNV)) {
603 rx_op->status = RTE_COMP_OP_STATUS_ERROR;
604 rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW;
606 QAT_DP_LOG(ERR, "QAT has wrong firmware");
607 ++(*dequeue_err_count);
613 if (unlikely((err & (1 << QAT_COMN_RESP_XLAT_STATUS_BITPOS))
614 && (qat_xform->qat_comp_request_type
615 == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS))) {
616 QAT_DP_LOG(ERR, "QAT intermediate buffer may be too "
617 "small for output, try configuring a larger size");
620 int8_t cmp_err_code =
621 (int8_t)resp_msg->comn_resp.comn_error.cmp_err_code;
622 int8_t xlat_err_code =
623 (int8_t)resp_msg->comn_resp.comn_error.xlat_err_code;
625 /* handle recoverable out-of-buffer condition in stateful
626 * decompression scenario
628 if (cmp_err_code == ERR_CODE_OVERFLOW_ERROR && !xlat_err_code
629 && qat_xform->qat_comp_request_type
630 == QAT_COMP_REQUEST_DECOMPRESS
631 && rx_op->op_type == RTE_COMP_OP_STATEFUL) {
632 struct icp_qat_fw_resp_comp_pars *comp_resp =
633 &resp_msg->comp_resp_pars;
635 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
636 rx_op->consumed = comp_resp->input_byte_counter;
637 rx_op->produced = comp_resp->output_byte_counter;
638 stream->start_of_packet = 0;
639 } else if ((cmp_err_code == ERR_CODE_OVERFLOW_ERROR
642 (!cmp_err_code && xlat_err_code == ERR_CODE_OVERFLOW_ERROR)
644 (cmp_err_code == ERR_CODE_OVERFLOW_ERROR &&
645 xlat_err_code == ERR_CODE_OVERFLOW_ERROR)){
647 struct icp_qat_fw_resp_comp_pars *comp_resp =
648 (struct icp_qat_fw_resp_comp_pars *)
649 &resp_msg->comp_resp_pars;
651 /* handle recoverable out-of-buffer condition
652 * in stateless compression scenario
654 if (comp_resp->input_byte_counter) {
655 if ((qat_xform->qat_comp_request_type
656 == QAT_COMP_REQUEST_FIXED_COMP_STATELESS) ||
657 (qat_xform->qat_comp_request_type
658 == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)) {
661 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE;
663 comp_resp->input_byte_counter;
665 comp_resp->output_byte_counter;
668 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
671 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
673 rx_op->status = RTE_COMP_OP_STATUS_ERROR;
675 ++(*dequeue_err_count);
676 rx_op->debug_status =
677 *((uint16_t *)(&resp_msg->comn_resp.comn_error));
679 struct icp_qat_fw_resp_comp_pars *comp_resp =
680 (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
682 rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
683 rx_op->consumed = comp_resp->input_byte_counter;
684 rx_op->produced = comp_resp->output_byte_counter;
686 stream->start_of_packet = 0;
688 if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
689 if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
690 rx_op->output_chksum = comp_resp->curr_crc32;
691 else if (qat_xform->checksum_type ==
692 RTE_COMP_CHECKSUM_ADLER32)
693 rx_op->output_chksum = comp_resp->curr_adler_32;
695 rx_op->output_chksum = comp_resp->curr_chksum;
698 QAT_DP_LOG(DEBUG, "About to check for split op :cookies: %p %p, split:%u",
699 cookie, cookie->parent_cookie, cookie->split_op);
701 if (cookie->split_op) {
703 struct qat_comp_op_cookie *pc = cookie->parent_cookie;
705 if (cookie->nb_children > 0) {
706 QAT_DP_LOG(DEBUG, "Parent");
707 /* parent - don't return until all children
708 * responses are collected
710 cookie->total_consumed = rx_op->consumed;
711 cookie->total_produced = rx_op->produced;
713 cookie->error = rx_op->status;
714 rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
716 /* calculate dst mbuf and offset for child op */
717 qat_comp_mbuf_skip(&cookie->dst_data,
718 &cookie->dst_data_offset,
722 QAT_DP_LOG(DEBUG, "Child");
723 if (pc->error == RTE_COMP_OP_STATUS_SUCCESS) {
725 pc->error = rx_op->status;
726 if (rx_op->produced) {
727 /* this covers both SUCCESS and
728 * OUT_OF_SPACE_RECOVERABLE cases
730 qat_comp_response_data_copy(cookie,
732 pc->total_consumed += rx_op->consumed;
733 pc->total_produced += rx_op->produced;
736 rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
738 pc->nb_child_responses++;
740 /* (child) cookie fields have to be reset
741 * to avoid problems with reusability -
742 * rx and tx queue starting from index zero
744 cookie->nb_children = 0;
745 cookie->split_op = 0;
746 cookie->nb_child_responses = 0;
747 cookie->dest_buffer = NULL;
749 if (pc->nb_child_responses == pc->nb_children) {
752 /* parent should be included as well */
753 child_resp = pc->nb_child_responses + 1;
755 rx_op->status = pc->error;
756 rx_op->consumed = pc->total_consumed;
757 rx_op->produced = pc->total_produced;
760 /* free memzones used for dst data */
761 qat_comp_free_split_op_memzones(pc,
764 /* (parent) cookie fields have to be reset
765 * to avoid problems with reusability -
766 * rx and tx queue starting from index zero
770 pc->nb_child_responses = 0;
771 pc->error = RTE_COMP_OP_STATUS_SUCCESS;
784 qat_comp_xform_size(void)
786 return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
790 qat_comp_stream_size(void)
792 return RTE_ALIGN_CEIL(sizeof(struct qat_comp_stream), 8);
795 static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
796 enum qat_comp_request_type request)
798 if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS)
799 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
800 else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)
801 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
802 else if (request == QAT_COMP_REQUEST_DECOMPRESS)
803 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
805 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
807 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
809 header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(
810 QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT);
813 static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
814 const struct rte_memzone *interm_buff_mz,
815 const struct rte_comp_xform *xform,
816 const struct qat_comp_stream *stream,
817 enum rte_comp_op_type op_type)
819 struct icp_qat_fw_comp_req *comp_req;
820 int comp_level, algo;
821 uint32_t req_par_flags;
822 int direction = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS;
824 if (unlikely(qat_xform == NULL)) {
825 QAT_LOG(ERR, "Session was not created for this device");
829 if (op_type == RTE_COMP_OP_STATEFUL) {
830 if (unlikely(stream == NULL)) {
831 QAT_LOG(ERR, "Stream must be non null for stateful op");
834 if (unlikely(qat_xform->qat_comp_request_type !=
835 QAT_COMP_REQUEST_DECOMPRESS)) {
836 QAT_LOG(ERR, "QAT PMD does not support stateful compression");
841 if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
842 direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
843 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
844 req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
845 ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
846 ICP_QAT_FW_COMP_BFINAL,
848 ICP_QAT_FW_COMP_CNV_RECOVERY);
850 if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
851 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
852 else if (xform->compress.level == 1)
853 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
854 else if (xform->compress.level == 2)
855 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_4;
856 else if (xform->compress.level == 3)
857 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
858 else if (xform->compress.level >= 4 &&
859 xform->compress.level <= 9)
860 comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_16;
862 QAT_LOG(ERR, "compression level not supported");
865 req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
866 ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
867 ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV,
868 ICP_QAT_FW_COMP_CNV_RECOVERY);
871 switch (xform->compress.algo) {
872 case RTE_COMP_ALGO_DEFLATE:
873 algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE;
875 case RTE_COMP_ALGO_LZS:
878 QAT_LOG(ERR, "compression algorithm not supported");
882 comp_req = &qat_xform->qat_comp_req_tmpl;
884 /* Initialize header */
885 qat_comp_create_req_hdr(&comp_req->comn_hdr,
886 qat_xform->qat_comp_request_type);
888 if (op_type == RTE_COMP_OP_STATEFUL) {
889 comp_req->comn_hdr.serv_specif_flags =
890 ICP_QAT_FW_COMP_FLAGS_BUILD(
891 ICP_QAT_FW_COMP_STATEFUL_SESSION,
892 ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
893 ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
894 ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
895 ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
897 /* Decompression state registers */
898 comp_req->comp_cd_ctrl.comp_state_addr =
899 stream->state_registers_decomp_phys;
901 /* Enable A, B, C, D, and E (CAMs). */
902 comp_req->comp_cd_ctrl.ram_bank_flags =
903 ICP_QAT_FW_COMP_RAM_FLAGS_BUILD(
904 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank I */
905 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank H */
906 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank G */
907 ICP_QAT_FW_COMP_BANK_DISABLED, /* Bank F */
908 ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank E */
909 ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank D */
910 ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank C */
911 ICP_QAT_FW_COMP_BANK_ENABLED, /* Bank B */
912 ICP_QAT_FW_COMP_BANK_ENABLED); /* Bank A */
914 comp_req->comp_cd_ctrl.ram_banks_addr =
915 stream->inflate_context_phys;
917 comp_req->comn_hdr.serv_specif_flags =
918 ICP_QAT_FW_COMP_FLAGS_BUILD(
919 ICP_QAT_FW_COMP_STATELESS_SESSION,
920 ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
921 ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
922 ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
923 ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF);
926 comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
927 ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
929 /* In CPM 1.6 only valid mode ! */
930 ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED, algo,
931 /* Translate level to depth */
932 comp_level, ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
934 comp_req->comp_pars.initial_adler = 1;
935 comp_req->comp_pars.initial_crc32 = 0;
936 comp_req->comp_pars.req_par_flags = req_par_flags;
939 if (qat_xform->qat_comp_request_type ==
940 QAT_COMP_REQUEST_FIXED_COMP_STATELESS ||
941 qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
942 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
943 ICP_QAT_FW_SLICE_DRAM_WR);
944 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
945 ICP_QAT_FW_SLICE_COMP);
946 } else if (qat_xform->qat_comp_request_type ==
947 QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
949 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
950 ICP_QAT_FW_SLICE_XLAT);
951 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
952 ICP_QAT_FW_SLICE_COMP);
954 ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->u2.xlt_cd_ctrl,
955 ICP_QAT_FW_SLICE_DRAM_WR);
956 ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->u2.xlt_cd_ctrl,
957 ICP_QAT_FW_SLICE_XLAT);
959 comp_req->u1.xlt_pars.inter_buff_ptr =
960 interm_buff_mz->iova;
963 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
964 QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req,
965 sizeof(struct icp_qat_fw_comp_req));
971 * Create driver private_xform data.
976 * xform data from application
977 * @param private_xform
978 * ptr where handle of pmd's private_xform data should be stored
980 * - if successful returns 0
981 * and valid private_xform handle
982 * - <0 in error cases
983 * - Returns -EINVAL if input parameters are invalid.
984 * - Returns -ENOTSUP if comp device does not support the comp transform.
985 * - Returns -ENOMEM if the private_xform could not be allocated.
988 qat_comp_private_xform_create(struct rte_compressdev *dev,
989 const struct rte_comp_xform *xform,
990 void **private_xform)
992 struct qat_comp_dev_private *qat = dev->data->dev_private;
994 if (unlikely(private_xform == NULL)) {
995 QAT_LOG(ERR, "QAT: private_xform parameter is NULL");
998 if (unlikely(qat->xformpool == NULL)) {
999 QAT_LOG(ERR, "QAT device has no private_xform mempool");
1002 if (rte_mempool_get(qat->xformpool, private_xform)) {
1003 QAT_LOG(ERR, "Couldn't get object from qat xform mempool");
1007 struct qat_comp_xform *qat_xform =
1008 (struct qat_comp_xform *)*private_xform;
1010 if (xform->type == RTE_COMP_COMPRESS) {
1012 if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
1013 ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
1014 && qat->interm_buff_mz == NULL))
1015 qat_xform->qat_comp_request_type =
1016 QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
1018 else if ((xform->compress.deflate.huffman ==
1019 RTE_COMP_HUFFMAN_DYNAMIC ||
1020 xform->compress.deflate.huffman ==
1021 RTE_COMP_HUFFMAN_DEFAULT) &&
1022 qat->interm_buff_mz != NULL)
1024 qat_xform->qat_comp_request_type =
1025 QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS;
1029 "IM buffers needed for dynamic deflate. Set size in config file");
1033 qat_xform->checksum_type = xform->compress.chksum;
1036 qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
1037 qat_xform->checksum_type = xform->decompress.chksum;
1040 if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform,
1041 NULL, RTE_COMP_OP_STATELESS)) {
1042 QAT_LOG(ERR, "QAT: Problem with setting compression");
1049 * Free driver private_xform data.
1052 * Compressdev device
1053 * @param private_xform
1054 * handle of pmd's private_xform data
1057 * - <0 in error cases
1058 * - Returns -EINVAL if input parameters are invalid.
1061 qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
1062 void *private_xform)
1064 struct qat_comp_xform *qat_xform =
1065 (struct qat_comp_xform *)private_xform;
1068 memset(qat_xform, 0, qat_comp_xform_size());
1069 struct rte_mempool *mp = rte_mempool_from_obj(qat_xform);
1071 rte_mempool_put(mp, qat_xform);
1078 * Reset stream state for the next use.
1081 * handle of pmd's private stream data
1084 qat_comp_stream_reset(struct qat_comp_stream *stream)
1087 memset(&stream->qat_xform, 0, sizeof(struct qat_comp_xform));
1088 stream->start_of_packet = 1;
1089 stream->op_in_progress = 0;
1094 * Create driver private stream data.
1097 * Compressdev device
1101 * ptr where handle of pmd's private stream data should be stored
1103 * - Returns 0 if private stream structure has been created successfully.
1104 * - Returns -EINVAL if input parameters are invalid.
1105 * - Returns -ENOTSUP if comp device does not support STATEFUL operations.
1106 * - Returns -ENOTSUP if comp device does not support the comp transform.
1107 * - Returns -ENOMEM if the private stream could not be allocated.
1110 qat_comp_stream_create(struct rte_compressdev *dev,
1111 const struct rte_comp_xform *xform,
1114 struct qat_comp_dev_private *qat = dev->data->dev_private;
1115 struct qat_comp_stream *ptr;
1117 if (unlikely(stream == NULL)) {
1118 QAT_LOG(ERR, "QAT: stream parameter is NULL");
1121 if (unlikely(xform->type == RTE_COMP_COMPRESS)) {
1122 QAT_LOG(ERR, "QAT: stateful compression not supported");
1125 if (unlikely(qat->streampool == NULL)) {
1126 QAT_LOG(ERR, "QAT device has no stream mempool");
1129 if (rte_mempool_get(qat->streampool, stream)) {
1130 QAT_LOG(ERR, "Couldn't get object from qat stream mempool");
1134 ptr = (struct qat_comp_stream *) *stream;
1135 qat_comp_stream_reset(ptr);
1136 ptr->qat_xform.qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
1137 ptr->qat_xform.checksum_type = xform->decompress.chksum;
1139 if (qat_comp_create_templates(&ptr->qat_xform, qat->interm_buff_mz,
1140 xform, ptr, RTE_COMP_OP_STATEFUL)) {
1141 QAT_LOG(ERR, "QAT: problem with creating descriptor template for stream");
1142 rte_mempool_put(qat->streampool, *stream);
1151 * Free driver private stream data.
1154 * Compressdev device
1156 * handle of pmd's private stream data
1159 * - <0 in error cases
1160 * - Returns -EINVAL if input parameters are invalid.
1161 * - Returns -ENOTSUP if comp device does not support STATEFUL operations.
1162 * - Returns -EBUSY if can't free stream as there are inflight operations
1165 qat_comp_stream_free(struct rte_compressdev *dev, void *stream)
1168 struct qat_comp_dev_private *qat = dev->data->dev_private;
1169 qat_comp_stream_reset((struct qat_comp_stream *) stream);
1170 rte_mempool_put(qat->streampool, stream);