1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
6 #include <rte_bus_vdev.h>
7 #include <rte_common.h>
8 #include <rte_malloc.h>
10 #include <rte_compressdev_pmd.h>
12 #include "isal_compress_pmd_private.h"
14 #define RTE_COMP_ISAL_WINDOW_SIZE 15
15 #define RTE_COMP_ISAL_LEVEL_ZERO 0 /* ISA-L Level 0 used for fixed Huffman */
16 #define RTE_COMP_ISAL_LEVEL_ONE 1
17 #define RTE_COMP_ISAL_LEVEL_TWO 2
18 #define RTE_COMP_ISAL_LEVEL_THREE 3 /* Optimised for AVX512 & AVX2 only */
20 int isal_logtype_driver;
22 /* Verify and set private xform parameters */
24 isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform,
25 const struct rte_comp_xform *xform)
30 /* Set compression private xform variables */
31 if (xform->type == RTE_COMP_COMPRESS) {
32 /* Set private xform type - COMPRESS/DECOMPRESS */
33 priv_xform->type = RTE_COMP_COMPRESS;
35 /* Set private xform algorithm */
36 if (xform->compress.algo != RTE_COMP_ALGO_DEFLATE) {
37 if (xform->compress.algo == RTE_COMP_ALGO_NULL) {
38 ISAL_PMD_LOG(ERR, "By-pass not supported\n");
41 ISAL_PMD_LOG(ERR, "Algorithm not supported\n");
44 priv_xform->compress.algo = RTE_COMP_ALGO_DEFLATE;
46 /* Set private xform checksum - raw deflate by default */
47 if (xform->compress.chksum != RTE_COMP_CHECKSUM_NONE) {
48 ISAL_PMD_LOG(ERR, "Checksum not supported\n");
52 /* Set private xform window size, 32K supported */
53 if (xform->compress.window_size == RTE_COMP_ISAL_WINDOW_SIZE)
54 priv_xform->compress.window_size =
55 RTE_COMP_ISAL_WINDOW_SIZE;
57 ISAL_PMD_LOG(ERR, "Window size not supported\n");
61 /* Set private xform huffman type */
62 switch (xform->compress.deflate.huffman) {
63 case(RTE_COMP_HUFFMAN_DEFAULT):
64 priv_xform->compress.deflate.huffman =
65 RTE_COMP_HUFFMAN_DEFAULT;
67 case(RTE_COMP_HUFFMAN_FIXED):
68 priv_xform->compress.deflate.huffman =
69 RTE_COMP_HUFFMAN_FIXED;
71 case(RTE_COMP_HUFFMAN_DYNAMIC):
72 priv_xform->compress.deflate.huffman =
73 RTE_COMP_HUFFMAN_DYNAMIC;
76 ISAL_PMD_LOG(ERR, "Huffman code not supported\n");
80 /* Set private xform level.
81 * Checking compliance with compressdev API, -1 <= level => 9
83 if (xform->compress.level < RTE_COMP_LEVEL_PMD_DEFAULT ||
84 xform->compress.level > RTE_COMP_LEVEL_MAX) {
85 ISAL_PMD_LOG(ERR, "Compression level out of range\n");
88 /* Check for Compressdev API level 0, No compression
89 * not supported in ISA-L
91 else if (xform->compress.level == RTE_COMP_LEVEL_NONE) {
92 ISAL_PMD_LOG(ERR, "No Compression not supported\n");
95 /* If using fixed huffman code, level must be 0 */
96 else if (priv_xform->compress.deflate.huffman ==
97 RTE_COMP_HUFFMAN_FIXED) {
98 ISAL_PMD_LOG(DEBUG, "ISA-L level 0 used due to a"
99 " fixed huffman code\n");
100 priv_xform->compress.level = RTE_COMP_ISAL_LEVEL_ZERO;
101 priv_xform->level_buffer_size =
102 ISAL_DEF_LVL0_DEFAULT;
104 /* Mapping API levels to ISA-L levels 1,2 & 3 */
105 switch (xform->compress.level) {
106 case RTE_COMP_LEVEL_PMD_DEFAULT:
107 /* Default is 1 if not using fixed huffman */
108 priv_xform->compress.level =
109 RTE_COMP_ISAL_LEVEL_ONE;
110 priv_xform->level_buffer_size =
111 ISAL_DEF_LVL1_DEFAULT;
113 case RTE_COMP_LEVEL_MIN:
114 priv_xform->compress.level =
115 RTE_COMP_ISAL_LEVEL_ONE;
116 priv_xform->level_buffer_size =
117 ISAL_DEF_LVL1_DEFAULT;
119 case RTE_COMP_ISAL_LEVEL_TWO:
120 priv_xform->compress.level =
121 RTE_COMP_ISAL_LEVEL_TWO;
122 priv_xform->level_buffer_size =
123 ISAL_DEF_LVL2_DEFAULT;
125 /* Level 3 or higher requested */
127 /* Check for AVX512, to use ISA-L level 3 */
128 if (rte_cpu_get_flag_enabled(
129 RTE_CPUFLAG_AVX512F)) {
130 priv_xform->compress.level =
131 RTE_COMP_ISAL_LEVEL_THREE;
132 priv_xform->level_buffer_size =
133 ISAL_DEF_LVL3_DEFAULT;
135 /* Check for AVX2, to use ISA-L level 3 */
136 else if (rte_cpu_get_flag_enabled(
138 priv_xform->compress.level =
139 RTE_COMP_ISAL_LEVEL_THREE;
140 priv_xform->level_buffer_size =
141 ISAL_DEF_LVL3_DEFAULT;
143 ISAL_PMD_LOG(DEBUG, "Requested ISA-L level"
144 " 3 or above; Level 3 optimized"
145 " for AVX512 & AVX2 only."
146 " level changed to 2.\n");
147 priv_xform->compress.level =
148 RTE_COMP_ISAL_LEVEL_TWO;
149 priv_xform->level_buffer_size =
150 ISAL_DEF_LVL2_DEFAULT;
156 /* Set decompression private xform variables */
157 else if (xform->type == RTE_COMP_DECOMPRESS) {
159 /* Set private xform type - COMPRESS/DECOMPRESS */
160 priv_xform->type = RTE_COMP_DECOMPRESS;
162 /* Set private xform algorithm */
163 if (xform->decompress.algo != RTE_COMP_ALGO_DEFLATE) {
164 if (xform->decompress.algo == RTE_COMP_ALGO_NULL) {
165 ISAL_PMD_LOG(ERR, "By pass not supported\n");
168 ISAL_PMD_LOG(ERR, "Algorithm not supported\n");
171 priv_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE;
173 /* Set private xform checksum - raw deflate by default */
174 if (xform->compress.chksum != RTE_COMP_CHECKSUM_NONE) {
175 ISAL_PMD_LOG(ERR, "Checksum not supported\n");
179 /* Set private xform window size, 32K supported */
180 if (xform->decompress.window_size == RTE_COMP_ISAL_WINDOW_SIZE)
181 priv_xform->decompress.window_size =
182 RTE_COMP_ISAL_WINDOW_SIZE;
184 ISAL_PMD_LOG(ERR, "Window size not supported\n");
191 /* Compression using chained mbufs for input/output data */
193 chained_mbuf_compression(struct rte_comp_op *op, struct isal_comp_qp *qp)
196 uint32_t remaining_offset;
197 uint32_t remaining_data = op->src.length;
198 struct rte_mbuf *src = op->m_src;
199 struct rte_mbuf *dst = op->m_dst;
201 /* check for source/destination offset passing multiple segments
202 * and point compression stream to input/output buffer.
204 remaining_offset = op->src.offset;
205 while (remaining_offset >= src->data_len) {
206 remaining_offset -= src->data_len;
209 qp->stream->avail_in = RTE_MIN(src->data_len - remaining_offset,
211 qp->stream->next_in = rte_pktmbuf_mtod_offset(src, uint8_t *,
214 remaining_offset = op->dst.offset;
215 while (remaining_offset >= dst->data_len) {
216 remaining_offset -= dst->data_len;
219 qp->stream->avail_out = dst->data_len - remaining_offset;
220 qp->stream->next_out = rte_pktmbuf_mtod_offset(dst, uint8_t *,
223 if (unlikely(!qp->stream->next_in || !qp->stream->next_out)) {
224 ISAL_PMD_LOG(ERR, "Invalid source or destination buffer\n");
225 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
229 while (qp->stream->internal_state.state != ZSTATE_END) {
230 /* Last segment of data */
231 if (remaining_data <= src->data_len)
232 qp->stream->end_of_stream = 1;
234 /* Execute compression operation */
235 ret = isal_deflate(qp->stream);
237 remaining_data = op->src.length - qp->stream->total_in;
239 if (ret != COMP_OK) {
240 ISAL_PMD_LOG(ERR, "Compression operation failed\n");
241 op->status = RTE_COMP_OP_STATUS_ERROR;
245 if (qp->stream->avail_in == 0 &&
246 qp->stream->total_in != op->src.length) {
247 if (src->next != NULL) {
249 qp->stream->next_in =
250 rte_pktmbuf_mtod(src, uint8_t *);
251 qp->stream->avail_in =
252 RTE_MIN(remaining_data, src->data_len);
255 "Not enough input buffer segments\n");
256 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
261 if (qp->stream->avail_out == 0 &&
262 qp->stream->internal_state.state != ZSTATE_END) {
263 if (dst->next != NULL) {
265 qp->stream->next_out =
266 rte_pktmbuf_mtod(dst, uint8_t *);
267 qp->stream->avail_out = dst->data_len;
270 "Not enough output buffer segments\n");
272 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
281 /* Decompression using chained mbufs for input/output data */
283 chained_mbuf_decompression(struct rte_comp_op *op, struct isal_comp_qp *qp)
286 uint32_t consumed_data, src_remaining_offset, dst_remaining_offset;
287 uint32_t remaining_data = op->src.length;
288 struct rte_mbuf *src = op->m_src;
289 struct rte_mbuf *dst = op->m_dst;
291 /* check for offset passing multiple segments
292 * and point decompression state to input/output buffer
294 src_remaining_offset = op->src.offset;
295 while (src_remaining_offset >= src->data_len) {
296 src_remaining_offset -= src->data_len;
299 qp->state->avail_in = RTE_MIN(src->data_len - src_remaining_offset,
301 qp->state->next_in = rte_pktmbuf_mtod_offset(src, uint8_t *,
302 src_remaining_offset);
304 dst_remaining_offset = op->dst.offset;
305 while (dst_remaining_offset >= dst->data_len) {
306 dst_remaining_offset -= dst->data_len;
309 qp->state->avail_out = dst->data_len - dst_remaining_offset;
310 qp->state->next_out = rte_pktmbuf_mtod_offset(dst, uint8_t *,
311 dst_remaining_offset);
313 while (qp->state->block_state != ISAL_BLOCK_FINISH) {
315 ret = isal_inflate(qp->state);
317 /* Check for first segment, offset needs to be accounted for */
318 if (remaining_data == op->src.length) {
319 consumed_data = src->data_len - qp->state->avail_in -
320 src_remaining_offset;
322 consumed_data = src->data_len - qp->state->avail_in;
324 op->consumed += consumed_data;
325 remaining_data -= consumed_data;
327 if (ret != ISAL_DECOMP_OK) {
328 ISAL_PMD_LOG(ERR, "Decompression operation failed\n");
329 op->status = RTE_COMP_OP_STATUS_ERROR;
333 if (qp->state->avail_in == 0
334 && op->consumed != op->src.length) {
335 if (src->next != NULL) {
338 rte_pktmbuf_mtod(src, uint8_t *);
339 qp->state->avail_in =
340 RTE_MIN(remaining_data, src->data_len);
344 if (qp->state->avail_out == 0 &&
345 qp->state->block_state != ISAL_BLOCK_FINISH) {
346 if (dst->next != NULL) {
348 qp->state->next_out =
349 rte_pktmbuf_mtod(dst, uint8_t *);
350 qp->state->avail_out = dst->data_len;
353 "Not enough output buffer segments\n");
355 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
364 /* Stateless Compression Function */
366 process_isal_deflate(struct rte_comp_op *op, struct isal_comp_qp *qp,
367 struct isal_priv_xform *priv_xform)
370 op->status = RTE_COMP_OP_STATUS_SUCCESS;
372 /* Required due to init clearing level_buf */
373 uint8_t *temp_level_buf = qp->stream->level_buf;
375 /* Initialize compression stream */
376 isal_deflate_stateless_init(qp->stream);
378 qp->stream->level_buf = temp_level_buf;
380 /* Stateless operation, input will be consumed in one go */
381 qp->stream->flush = NO_FLUSH;
383 /* set compression level & intermediate level buffer size */
384 qp->stream->level = priv_xform->compress.level;
385 qp->stream->level_buf_size = priv_xform->level_buffer_size;
387 /* Set op huffman code */
388 if (priv_xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
389 isal_deflate_set_hufftables(qp->stream, NULL,
390 IGZIP_HUFFTABLE_STATIC);
391 else if (priv_xform->compress.deflate.huffman ==
392 RTE_COMP_HUFFMAN_DEFAULT)
393 isal_deflate_set_hufftables(qp->stream, NULL,
394 IGZIP_HUFFTABLE_DEFAULT);
395 /* Dynamically change the huffman code to suit the input data */
396 else if (priv_xform->compress.deflate.huffman ==
397 RTE_COMP_HUFFMAN_DYNAMIC)
398 isal_deflate_set_hufftables(qp->stream, NULL,
399 IGZIP_HUFFTABLE_DEFAULT);
401 if (op->m_src->pkt_len < (op->src.length + op->src.offset)) {
402 ISAL_PMD_LOG(ERR, "Input mbuf(s) not big enough.\n");
403 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
407 if (op->dst.offset >= op->m_dst->pkt_len) {
408 ISAL_PMD_LOG(ERR, "Output mbuf(s) not big enough"
409 " for offset provided.\n");
410 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
415 if (op->m_src->nb_segs > 1 || op->m_dst->nb_segs > 1) {
416 ret = chained_mbuf_compression(op, qp);
421 qp->stream->end_of_stream = 1; /* All input consumed in one */
422 /* Point compression stream to input buffer */
423 qp->stream->avail_in = op->src.length;
424 qp->stream->next_in = rte_pktmbuf_mtod_offset(op->m_src,
425 uint8_t *, op->src.offset);
427 /* Point compression stream to output buffer */
428 qp->stream->avail_out = op->m_dst->data_len - op->dst.offset;
429 qp->stream->next_out = rte_pktmbuf_mtod_offset(op->m_dst,
430 uint8_t *, op->dst.offset);
432 if (unlikely(!qp->stream->next_in || !qp->stream->next_out)) {
433 ISAL_PMD_LOG(ERR, "Invalid source or destination"
435 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
439 /* Execute compression operation */
440 ret = isal_deflate_stateless(qp->stream);
442 /* Check that output buffer did not run out of space */
443 if (ret == STATELESS_OVERFLOW) {
444 ISAL_PMD_LOG(ERR, "Output buffer not big enough\n");
445 op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
449 /* Check that input buffer has been fully consumed */
450 if (qp->stream->avail_in != (uint32_t)0) {
451 ISAL_PMD_LOG(ERR, "Input buffer could not be read"
453 op->status = RTE_COMP_OP_STATUS_ERROR;
457 if (ret != COMP_OK) {
458 ISAL_PMD_LOG(ERR, "Compression operation failed\n");
459 op->status = RTE_COMP_OP_STATUS_ERROR;
463 op->consumed = qp->stream->total_in;
464 op->produced = qp->stream->total_out;
469 /* Stateless Decompression Function */
471 process_isal_inflate(struct rte_comp_op *op, struct isal_comp_qp *qp)
475 op->status = RTE_COMP_OP_STATUS_SUCCESS;
477 /* Initialize decompression state */
478 isal_inflate_init(qp->state);
480 if (op->m_src->pkt_len < (op->src.length + op->src.offset)) {
481 ISAL_PMD_LOG(ERR, "Input mbuf(s) not big enough.\n");
482 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
486 if (op->dst.offset >= op->m_dst->pkt_len) {
487 ISAL_PMD_LOG(ERR, "Output mbuf not big enough for "
488 "offset provided.\n");
489 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
494 if (op->m_src->nb_segs > 1 || op->m_dst->nb_segs > 1) {
495 ret = chained_mbuf_decompression(op, qp);
500 /* Point decompression state to input buffer */
501 qp->state->avail_in = op->src.length;
502 qp->state->next_in = rte_pktmbuf_mtod_offset(op->m_src,
503 uint8_t *, op->src.offset);
505 /* Point decompression state to output buffer */
506 qp->state->avail_out = op->m_dst->data_len - op->dst.offset;
507 qp->state->next_out = rte_pktmbuf_mtod_offset(op->m_dst,
508 uint8_t *, op->dst.offset);
510 if (unlikely(!qp->state->next_in || !qp->state->next_out)) {
511 ISAL_PMD_LOG(ERR, "Invalid source or destination"
513 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
517 /* Execute decompression operation */
518 ret = isal_inflate_stateless(qp->state);
520 if (ret == ISAL_OUT_OVERFLOW) {
521 ISAL_PMD_LOG(ERR, "Output buffer not big enough\n");
522 op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
526 /* Check that input buffer has been fully consumed */
527 if (qp->state->avail_in != (uint32_t)0) {
528 ISAL_PMD_LOG(ERR, "Input buffer could not be read"
530 op->status = RTE_COMP_OP_STATUS_ERROR;
534 if (ret != ISAL_DECOMP_OK) {
535 op->status = RTE_COMP_OP_STATUS_ERROR;
538 op->consumed = op->src.length - qp->state->avail_in;
540 op->produced = qp->state->total_out;
545 /* Process compression/decompression operation */
547 process_op(struct isal_comp_qp *qp, struct rte_comp_op *op,
548 struct isal_priv_xform *priv_xform)
550 switch (priv_xform->type) {
551 case RTE_COMP_COMPRESS:
552 process_isal_deflate(op, qp, priv_xform);
554 case RTE_COMP_DECOMPRESS:
555 process_isal_inflate(op, qp);
558 ISAL_PMD_LOG(ERR, "Operation Not Supported\n");
566 isal_comp_pmd_enqueue_burst(void *queue_pair, struct rte_comp_op **ops,
569 struct isal_comp_qp *qp = queue_pair;
572 int16_t num_enq = RTE_MIN(qp->num_free_elements, nb_ops);
574 for (i = 0; i < num_enq; i++) {
575 if (unlikely(ops[i]->op_type != RTE_COMP_OP_STATELESS)) {
576 ops[i]->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
577 ISAL_PMD_LOG(ERR, "Stateful operation not Supported\n");
578 qp->qp_stats.enqueue_err_count++;
581 retval = process_op(qp, ops[i], ops[i]->private_xform);
582 if (unlikely(retval < 0) ||
583 ops[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
584 qp->qp_stats.enqueue_err_count++;
588 retval = rte_ring_enqueue_burst(qp->processed_pkts, (void *)ops,
590 qp->num_free_elements -= retval;
591 qp->qp_stats.enqueued_count += retval;
598 isal_comp_pmd_dequeue_burst(void *queue_pair, struct rte_comp_op **ops,
601 struct isal_comp_qp *qp = queue_pair;
602 uint16_t nb_dequeued;
604 nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts, (void **)ops,
606 qp->num_free_elements += nb_dequeued;
607 qp->qp_stats.dequeued_count += nb_dequeued;
612 /* Create ISA-L compression device */
614 compdev_isal_create(const char *name, struct rte_vdev_device *vdev,
615 struct rte_compressdev_pmd_init_params *init_params)
617 struct rte_compressdev *dev;
619 dev = rte_compressdev_pmd_create(name, &vdev->device,
620 sizeof(struct isal_comp_private), init_params);
622 ISAL_PMD_LOG(ERR, "failed to create compressdev vdev");
626 dev->dev_ops = isal_compress_pmd_ops;
628 /* register rx/tx burst functions for data path */
629 dev->dequeue_burst = isal_comp_pmd_dequeue_burst;
630 dev->enqueue_burst = isal_comp_pmd_enqueue_burst;
635 /** Remove compression device */
637 compdev_isal_remove_dev(struct rte_vdev_device *vdev)
639 struct rte_compressdev *compdev;
642 name = rte_vdev_device_name(vdev);
646 compdev = rte_compressdev_pmd_get_named_dev(name);
650 return rte_compressdev_pmd_destroy(compdev);
653 /** Initialise ISA-L compression device */
655 compdev_isal_probe(struct rte_vdev_device *dev)
657 struct rte_compressdev_pmd_init_params init_params = {
661 const char *name, *args;
664 name = rte_vdev_device_name(dev);
668 args = rte_vdev_device_args(dev);
670 retval = rte_compressdev_pmd_parse_input_args(&init_params, args);
673 "Failed to parse initialisation arguments[%s]\n", args);
677 return compdev_isal_create(name, dev, &init_params);
680 static struct rte_vdev_driver compdev_isal_pmd_drv = {
681 .probe = compdev_isal_probe,
682 .remove = compdev_isal_remove_dev,
685 RTE_PMD_REGISTER_VDEV(COMPDEV_NAME_ISAL_PMD, compdev_isal_pmd_drv);
686 RTE_PMD_REGISTER_PARAM_STRING(COMPDEV_NAME_ISAL_PMD,
689 RTE_INIT(isal_init_log)
691 isal_logtype_driver = rte_log_register("pmd.compress.isal");
692 if (isal_logtype_driver >= 0)
693 rte_log_set_level(isal_logtype_driver, RTE_LOG_INFO);