1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
6 #include <rte_bus_vdev.h>
7 #include <rte_common.h>
8 #include <rte_malloc.h>
10 #include <rte_compressdev_pmd.h>
12 #include "isal_compress_pmd_private.h"
14 #define RTE_COMP_ISAL_WINDOW_SIZE 15
15 #define RTE_COMP_ISAL_LEVEL_ZERO 0 /* ISA-L Level 0 used for fixed Huffman */
16 #define RTE_COMP_ISAL_LEVEL_ONE 1
17 #define RTE_COMP_ISAL_LEVEL_TWO 2
18 #define RTE_COMP_ISAL_LEVEL_THREE 3 /* Optimised for AVX512 & AVX2 only */
20 int isal_logtype_driver;
22 /* Verify and set private xform parameters */
24 isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform,
25 const struct rte_comp_xform *xform)
30 /* Set compression private xform variables */
31 if (xform->type == RTE_COMP_COMPRESS) {
32 /* Set private xform type - COMPRESS/DECOMPRESS */
33 priv_xform->type = RTE_COMP_COMPRESS;
35 /* Set private xform algorithm */
36 if (xform->compress.algo != RTE_COMP_ALGO_DEFLATE) {
37 if (xform->compress.algo == RTE_COMP_ALGO_NULL) {
38 ISAL_PMD_LOG(ERR, "By-pass not supported\n");
41 ISAL_PMD_LOG(ERR, "Algorithm not supported\n");
44 priv_xform->compress.algo = RTE_COMP_ALGO_DEFLATE;
46 /* Set private xform checksum - raw deflate by default */
47 if (xform->compress.chksum != RTE_COMP_CHECKSUM_NONE) {
48 ISAL_PMD_LOG(ERR, "Checksum not supported\n");
52 /* Set private xform window size, 32K supported */
53 if (xform->compress.window_size == RTE_COMP_ISAL_WINDOW_SIZE)
54 priv_xform->compress.window_size =
55 RTE_COMP_ISAL_WINDOW_SIZE;
57 ISAL_PMD_LOG(ERR, "Window size not supported\n");
61 /* Set private xform huffman type */
62 switch (xform->compress.deflate.huffman) {
63 case(RTE_COMP_HUFFMAN_DEFAULT):
64 priv_xform->compress.deflate.huffman =
65 RTE_COMP_HUFFMAN_DEFAULT;
67 case(RTE_COMP_HUFFMAN_FIXED):
68 priv_xform->compress.deflate.huffman =
69 RTE_COMP_HUFFMAN_FIXED;
71 case(RTE_COMP_HUFFMAN_DYNAMIC):
72 priv_xform->compress.deflate.huffman =
73 RTE_COMP_HUFFMAN_DYNAMIC;
76 ISAL_PMD_LOG(ERR, "Huffman code not supported\n");
80 /* Set private xform level.
81 * Checking compliance with compressdev API, -1 <= level => 9
83 if (xform->compress.level < RTE_COMP_LEVEL_PMD_DEFAULT ||
84 xform->compress.level > RTE_COMP_LEVEL_MAX) {
85 ISAL_PMD_LOG(ERR, "Compression level out of range\n");
88 /* Check for Compressdev API level 0, No compression
89 * not supported in ISA-L
91 else if (xform->compress.level == RTE_COMP_LEVEL_NONE) {
92 ISAL_PMD_LOG(ERR, "No Compression not supported\n");
95 /* If using fixed huffman code, level must be 0 */
96 else if (priv_xform->compress.deflate.huffman ==
97 RTE_COMP_HUFFMAN_FIXED) {
98 ISAL_PMD_LOG(DEBUG, "ISA-L level 0 used due to a"
99 " fixed huffman code\n");
100 priv_xform->compress.level = RTE_COMP_ISAL_LEVEL_ZERO;
101 priv_xform->level_buffer_size =
102 ISAL_DEF_LVL0_DEFAULT;
104 /* Mapping API levels to ISA-L levels 1,2 & 3 */
105 switch (xform->compress.level) {
106 case RTE_COMP_LEVEL_PMD_DEFAULT:
107 /* Default is 1 if not using fixed huffman */
108 priv_xform->compress.level =
109 RTE_COMP_ISAL_LEVEL_ONE;
110 priv_xform->level_buffer_size =
111 ISAL_DEF_LVL1_DEFAULT;
113 case RTE_COMP_LEVEL_MIN:
114 priv_xform->compress.level =
115 RTE_COMP_ISAL_LEVEL_ONE;
116 priv_xform->level_buffer_size =
117 ISAL_DEF_LVL1_DEFAULT;
119 case RTE_COMP_ISAL_LEVEL_TWO:
120 priv_xform->compress.level =
121 RTE_COMP_ISAL_LEVEL_TWO;
122 priv_xform->level_buffer_size =
123 ISAL_DEF_LVL2_DEFAULT;
125 /* Level 3 or higher requested */
127 /* Check for AVX512, to use ISA-L level 3 */
128 if (rte_cpu_get_flag_enabled(
129 RTE_CPUFLAG_AVX512F)) {
130 priv_xform->compress.level =
131 RTE_COMP_ISAL_LEVEL_THREE;
132 priv_xform->level_buffer_size =
133 ISAL_DEF_LVL3_DEFAULT;
135 /* Check for AVX2, to use ISA-L level 3 */
136 else if (rte_cpu_get_flag_enabled(
138 priv_xform->compress.level =
139 RTE_COMP_ISAL_LEVEL_THREE;
140 priv_xform->level_buffer_size =
141 ISAL_DEF_LVL3_DEFAULT;
143 ISAL_PMD_LOG(DEBUG, "Requested ISA-L level"
144 " 3 or above; Level 3 optimized"
145 " for AVX512 & AVX2 only."
146 " level changed to 2.\n");
147 priv_xform->compress.level =
148 RTE_COMP_ISAL_LEVEL_TWO;
149 priv_xform->level_buffer_size =
150 ISAL_DEF_LVL2_DEFAULT;
156 /* Set decompression private xform variables */
157 else if (xform->type == RTE_COMP_DECOMPRESS) {
159 /* Set private xform type - COMPRESS/DECOMPRESS */
160 priv_xform->type = RTE_COMP_DECOMPRESS;
162 /* Set private xform algorithm */
163 if (xform->decompress.algo != RTE_COMP_ALGO_DEFLATE) {
164 if (xform->decompress.algo == RTE_COMP_ALGO_NULL) {
165 ISAL_PMD_LOG(ERR, "By pass not supported\n");
168 ISAL_PMD_LOG(ERR, "Algorithm not supported\n");
171 priv_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE;
173 /* Set private xform checksum - raw deflate by default */
174 if (xform->compress.chksum != RTE_COMP_CHECKSUM_NONE) {
175 ISAL_PMD_LOG(ERR, "Checksum not supported\n");
179 /* Set private xform window size, 32K supported */
180 if (xform->decompress.window_size == RTE_COMP_ISAL_WINDOW_SIZE)
181 priv_xform->decompress.window_size =
182 RTE_COMP_ISAL_WINDOW_SIZE;
184 ISAL_PMD_LOG(ERR, "Window size not supported\n");
191 /* Stateless Compression Function */
193 process_isal_deflate(struct rte_comp_op *op, struct isal_comp_qp *qp,
194 struct isal_priv_xform *priv_xform)
197 op->status = RTE_COMP_OP_STATUS_SUCCESS;
199 /* Required due to init clearing level_buf */
200 uint8_t *temp_level_buf = qp->stream->level_buf;
202 /* Initialize compression stream */
203 isal_deflate_stateless_init(qp->stream);
205 qp->stream->level_buf = temp_level_buf;
207 /* Stateless operation, input will be consumed in one go */
208 qp->stream->flush = NO_FLUSH;
210 /* set op level & intermediate level buffer */
211 qp->stream->level = priv_xform->compress.level;
212 qp->stream->level_buf_size = priv_xform->level_buffer_size;
214 /* Point compression stream structure to input/output buffers */
215 qp->stream->avail_in = op->src.length;
216 qp->stream->next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
217 qp->stream->avail_out = op->m_dst->data_len;
218 qp->stream->next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
219 qp->stream->end_of_stream = 1; /* All input consumed in one go */
221 if (unlikely(!qp->stream->next_in || !qp->stream->next_out)) {
222 ISAL_PMD_LOG(ERR, "Invalid source or destination buffers\n");
223 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
227 /* Set op huffman code */
228 if (priv_xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
229 isal_deflate_set_hufftables(qp->stream, NULL,
230 IGZIP_HUFFTABLE_STATIC);
231 else if (priv_xform->compress.deflate.huffman ==
232 RTE_COMP_HUFFMAN_DEFAULT)
233 isal_deflate_set_hufftables(qp->stream, NULL,
234 IGZIP_HUFFTABLE_DEFAULT);
235 /* Dynamically change the huffman code to suit the input data */
236 else if (priv_xform->compress.deflate.huffman ==
237 RTE_COMP_HUFFMAN_DYNAMIC)
238 isal_deflate_set_hufftables(qp->stream, NULL,
239 IGZIP_HUFFTABLE_DEFAULT);
241 /* Execute compression operation */
242 ret = isal_deflate_stateless(qp->stream);
244 /* Check that output buffer did not run out of space */
245 if (ret == STATELESS_OVERFLOW) {
246 ISAL_PMD_LOG(ERR, "Output buffer not big enough\n");
247 op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
251 /* Check that input buffer has been fully consumed */
252 if (qp->stream->avail_in != (uint32_t)0) {
253 ISAL_PMD_LOG(ERR, "Input buffer could not be read entirely\n");
254 op->status = RTE_COMP_OP_STATUS_ERROR;
258 if (ret != COMP_OK) {
259 op->status = RTE_COMP_OP_STATUS_ERROR;
263 op->consumed = qp->stream->total_in;
264 op->produced = qp->stream->total_out;
269 /* Stateless Decompression Function */
271 process_isal_inflate(struct rte_comp_op *op, struct isal_comp_qp *qp)
275 op->status = RTE_COMP_OP_STATUS_SUCCESS;
277 /* Initialize decompression state */
278 isal_inflate_init(qp->state);
280 /* Point decompression state structure to input/output buffers */
281 qp->state->avail_in = op->src.length;
282 qp->state->next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
283 qp->state->avail_out = op->m_dst->data_len;
284 qp->state->next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
286 if (unlikely(!qp->state->next_in || !qp->state->next_out)) {
287 ISAL_PMD_LOG(ERR, "Invalid source or destination buffers\n");
288 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
292 /* Execute decompression operation */
293 ret = isal_inflate_stateless(qp->state);
295 if (ret == ISAL_OUT_OVERFLOW) {
296 ISAL_PMD_LOG(ERR, "Output buffer not big enough\n");
297 op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
301 /* Check that input buffer has been fully consumed */
302 if (qp->state->avail_in != (uint32_t)0) {
303 ISAL_PMD_LOG(ERR, "Input buffer could not be read entirely\n");
304 op->status = RTE_COMP_OP_STATUS_ERROR;
308 if (ret != ISAL_DECOMP_OK) {
309 op->status = RTE_COMP_OP_STATUS_ERROR;
313 op->consumed = op->src.length - qp->state->avail_in;
314 op->produced = qp->state->total_out;
319 /* Process compression/decompression operation */
321 process_op(struct isal_comp_qp *qp, struct rte_comp_op *op,
322 struct isal_priv_xform *priv_xform)
324 switch (priv_xform->type) {
325 case RTE_COMP_COMPRESS:
326 process_isal_deflate(op, qp, priv_xform);
328 case RTE_COMP_DECOMPRESS:
329 process_isal_inflate(op, qp);
332 ISAL_PMD_LOG(ERR, "Operation Not Supported\n");
340 isal_comp_pmd_enqueue_burst(void *queue_pair, struct rte_comp_op **ops,
343 struct isal_comp_qp *qp = queue_pair;
346 int16_t num_enq = RTE_MIN(qp->num_free_elements, nb_ops);
348 for (i = 0; i < num_enq; i++) {
349 if (unlikely(ops[i]->op_type != RTE_COMP_OP_STATELESS)) {
350 ops[i]->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
351 ISAL_PMD_LOG(ERR, "Stateful operation not Supported\n");
352 qp->qp_stats.enqueue_err_count++;
355 retval = process_op(qp, ops[i], ops[i]->private_xform);
356 if (unlikely(retval < 0) ||
357 ops[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
358 qp->qp_stats.enqueue_err_count++;
362 retval = rte_ring_enqueue_burst(qp->processed_pkts, (void *)ops,
364 qp->num_free_elements -= retval;
365 qp->qp_stats.enqueued_count += retval;
372 isal_comp_pmd_dequeue_burst(void *queue_pair, struct rte_comp_op **ops,
375 struct isal_comp_qp *qp = queue_pair;
376 uint16_t nb_dequeued;
378 nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts, (void **)ops,
380 qp->num_free_elements += nb_dequeued;
381 qp->qp_stats.dequeued_count += nb_dequeued;
386 /* Create ISA-L compression device */
388 compdev_isal_create(const char *name, struct rte_vdev_device *vdev,
389 struct rte_compressdev_pmd_init_params *init_params)
391 struct rte_compressdev *dev;
393 dev = rte_compressdev_pmd_create(name, &vdev->device,
394 sizeof(struct isal_comp_private), init_params);
396 ISAL_PMD_LOG(ERR, "failed to create compressdev vdev");
400 dev->dev_ops = isal_compress_pmd_ops;
402 /* register rx/tx burst functions for data path */
403 dev->dequeue_burst = isal_comp_pmd_dequeue_burst;
404 dev->enqueue_burst = isal_comp_pmd_enqueue_burst;
409 /** Remove compression device */
411 compdev_isal_remove_dev(struct rte_vdev_device *vdev)
413 struct rte_compressdev *compdev;
416 name = rte_vdev_device_name(vdev);
420 compdev = rte_compressdev_pmd_get_named_dev(name);
424 return rte_compressdev_pmd_destroy(compdev);
427 /** Initialise ISA-L compression device */
429 compdev_isal_probe(struct rte_vdev_device *dev)
431 struct rte_compressdev_pmd_init_params init_params = {
435 const char *name, *args;
438 name = rte_vdev_device_name(dev);
442 args = rte_vdev_device_args(dev);
444 retval = rte_compressdev_pmd_parse_input_args(&init_params, args);
447 "Failed to parse initialisation arguments[%s]\n", args);
451 return compdev_isal_create(name, dev, &init_params);
454 static struct rte_vdev_driver compdev_isal_pmd_drv = {
455 .probe = compdev_isal_probe,
456 .remove = compdev_isal_remove_dev,
459 RTE_PMD_REGISTER_VDEV(COMPDEV_NAME_ISAL_PMD, compdev_isal_pmd_drv);
460 RTE_PMD_REGISTER_PARAM_STRING(COMPDEV_NAME_ISAL_PMD,
463 RTE_INIT(isal_init_log);
468 isal_logtype_driver = rte_log_register("comp_isal");
469 if (isal_logtype_driver >= 0)
470 rte_log_set_level(isal_logtype_driver, RTE_LOG_INFO);