1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium Networks
5 #include <rte_bus_vdev.h>
6 #include <rte_common.h>
8 #include "zlib_pmd_private.h"
10 int zlib_logtype_driver;
12 /** Compute next mbuf in the list, assign data buffer and length,
13 * returns 0 if mbuf is NULL
15 #define COMPUTE_BUF(mbuf, data, len) \
16 ((mbuf = mbuf->next) ? \
17 (data = rte_pktmbuf_mtod(mbuf, uint8_t *)), \
18 (len = rte_pktmbuf_data_len(mbuf)) : 0)
21 process_zlib_deflate(struct rte_comp_op *op, z_stream *strm)
23 int ret, flush, fin_flush;
24 struct rte_mbuf *mbuf_src = op->m_src;
25 struct rte_mbuf *mbuf_dst = op->m_dst;
27 switch (op->flush_flag) {
28 case RTE_COMP_FLUSH_FULL:
29 case RTE_COMP_FLUSH_FINAL:
33 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
34 ZLIB_PMD_ERR("Invalid flush value\n");
38 if (unlikely(!strm)) {
39 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
40 ZLIB_PMD_ERR("Invalid z_stream\n");
43 /* Update z_stream with the inputs provided by application */
44 strm->next_in = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
47 strm->avail_in = rte_pktmbuf_data_len(mbuf_src) - op->src.offset;
49 strm->next_out = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
52 strm->avail_out = rte_pktmbuf_data_len(mbuf_dst) - op->dst.offset;
54 /* Set flush value to NO_FLUSH unless it is last mbuf */
56 /* Initialize status to SUCCESS */
57 op->status = RTE_COMP_OP_STATUS_SUCCESS;
60 /* Set flush value to Z_FINISH for last block */
61 if ((op->src.length - strm->total_in) <= strm->avail_in) {
62 strm->avail_in = (op->src.length - strm->total_in);
66 ret = deflate(strm, flush);
67 if (unlikely(ret == Z_STREAM_ERROR)) {
68 /* error return, do not process further */
69 op->status = RTE_COMP_OP_STATUS_ERROR;
72 /* Break if Z_STREAM_END is encountered */
73 if (ret == Z_STREAM_END)
76 /* Keep looping until input mbuf is consumed.
77 * Exit if destination mbuf gets exhausted.
79 } while ((strm->avail_out == 0) &&
80 COMPUTE_BUF(mbuf_dst, strm->next_out, strm->avail_out));
82 if (!strm->avail_out) {
83 /* there is no space for compressed output */
84 op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
88 /* Update source buffer to next mbuf
89 * Exit if input buffers are fully consumed
91 } while (COMPUTE_BUF(mbuf_src, strm->next_in, strm->avail_in));
96 case RTE_COMP_OP_STATUS_SUCCESS:
97 op->consumed += strm->total_in;
99 case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
100 op->produced += strm->total_out;
103 ZLIB_PMD_ERR("stats not updated for status:%d\n",
111 process_zlib_inflate(struct rte_comp_op *op, z_stream *strm)
114 struct rte_mbuf *mbuf_src = op->m_src;
115 struct rte_mbuf *mbuf_dst = op->m_dst;
117 if (unlikely(!strm)) {
118 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
119 ZLIB_PMD_ERR("Invalid z_stream\n");
122 strm->next_in = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
125 strm->avail_in = rte_pktmbuf_data_len(mbuf_src) - op->src.offset;
127 strm->next_out = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
130 strm->avail_out = rte_pktmbuf_data_len(mbuf_dst) - op->dst.offset;
132 /** Ignoring flush value provided from application for decompression */
134 /* initialize status to SUCCESS */
135 op->status = RTE_COMP_OP_STATUS_SUCCESS;
139 ret = inflate(strm, flush);
151 op->status = RTE_COMP_OP_STATUS_ERROR;
154 /* no further computation needed if
155 * Z_STREAM_END is encountered
163 /* Keep looping until input mbuf is consumed.
164 * Exit if destination mbuf gets exhausted.
166 } while ((strm->avail_out == 0) &&
167 COMPUTE_BUF(mbuf_dst, strm->next_out, strm->avail_out));
169 if (!strm->avail_out) {
170 /* there is no more space for decompressed output */
171 op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
174 /* Read next input buffer to be processed, exit if compressed
175 * blocks are fully read
177 } while (COMPUTE_BUF(mbuf_src, strm->next_in, strm->avail_in));
180 /* Update op stats */
181 switch (op->status) {
182 case RTE_COMP_OP_STATUS_SUCCESS:
183 op->consumed += strm->total_in;
185 case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
186 op->produced += strm->total_out;
189 ZLIB_PMD_ERR("stats not produced for status:%d\n",
196 /** Process comp operation for mbuf */
198 process_zlib_op(struct zlib_qp *qp, struct rte_comp_op *op)
200 struct zlib_stream *stream;
201 struct zlib_priv_xform *private_xform;
203 if ((op->op_type == RTE_COMP_OP_STATEFUL) ||
204 (op->src.offset > rte_pktmbuf_data_len(op->m_src)) ||
205 (op->dst.offset > rte_pktmbuf_data_len(op->m_dst))) {
206 op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
207 ZLIB_PMD_ERR("Invalid source or destination buffers or "
208 "invalid Operation requested\n");
210 private_xform = (struct zlib_priv_xform *)op->private_xform;
211 stream = &private_xform->stream;
212 stream->comp(op, &stream->strm);
214 /* whatever is out of op, put it into completion queue with
217 return rte_ring_enqueue(qp->processed_pkts, (void *)op);
220 /** Parse comp xform and set private xform/Stream parameters */
222 zlib_set_stream_parameters(const struct rte_comp_xform *xform,
223 struct zlib_stream *stream)
225 int strategy, level, wbits;
226 z_stream *strm = &stream->strm;
228 /* allocate deflate state */
229 strm->zalloc = Z_NULL;
230 strm->zfree = Z_NULL;
231 strm->opaque = Z_NULL;
233 switch (xform->type) {
234 case RTE_COMP_COMPRESS:
235 stream->comp = process_zlib_deflate;
236 stream->free = deflateEnd;
237 /** Compression window bits */
238 switch (xform->compress.algo) {
239 case RTE_COMP_ALGO_DEFLATE:
240 wbits = -(xform->compress.window_size);
243 ZLIB_PMD_ERR("Compression algorithm not supported\n");
246 /** Compression Level */
247 switch (xform->compress.level) {
248 case RTE_COMP_LEVEL_PMD_DEFAULT:
249 level = Z_DEFAULT_COMPRESSION;
251 case RTE_COMP_LEVEL_NONE:
252 level = Z_NO_COMPRESSION;
254 case RTE_COMP_LEVEL_MIN:
255 level = Z_BEST_SPEED;
257 case RTE_COMP_LEVEL_MAX:
258 level = Z_BEST_COMPRESSION;
261 level = xform->compress.level;
262 if (level < RTE_COMP_LEVEL_MIN ||
263 level > RTE_COMP_LEVEL_MAX) {
264 ZLIB_PMD_ERR("Compression level %d "
271 /** Compression strategy */
272 switch (xform->compress.deflate.huffman) {
273 case RTE_COMP_HUFFMAN_DEFAULT:
274 strategy = Z_DEFAULT_STRATEGY;
276 case RTE_COMP_HUFFMAN_FIXED:
279 case RTE_COMP_HUFFMAN_DYNAMIC:
280 strategy = Z_DEFAULT_STRATEGY;
283 ZLIB_PMD_ERR("Compression strategy not supported\n");
286 if (deflateInit2(strm, level,
288 DEF_MEM_LEVEL, strategy) != Z_OK) {
289 ZLIB_PMD_ERR("Deflate init failed\n");
294 case RTE_COMP_DECOMPRESS:
295 stream->comp = process_zlib_inflate;
296 stream->free = inflateEnd;
298 switch (xform->decompress.algo) {
299 case RTE_COMP_ALGO_DEFLATE:
300 wbits = -(xform->decompress.window_size);
303 ZLIB_PMD_ERR("Compression algorithm not supported\n");
307 if (inflateInit2(strm, wbits) != Z_OK) {
308 ZLIB_PMD_ERR("Inflate init failed\n");
319 zlib_pmd_enqueue_burst(void *queue_pair,
320 struct rte_comp_op **ops, uint16_t nb_ops)
322 struct zlib_qp *qp = queue_pair;
326 for (i = 0; i < nb_ops; i++) {
327 ret = process_zlib_op(qp, ops[i]);
328 if (unlikely(ret < 0)) {
329 /* increment count if failed to push to completion
332 qp->qp_stats.enqueue_err_count++;
334 qp->qp_stats.enqueued_count++;
342 zlib_pmd_dequeue_burst(void *queue_pair,
343 struct rte_comp_op **ops, uint16_t nb_ops)
345 struct zlib_qp *qp = queue_pair;
347 unsigned int nb_dequeued = 0;
349 nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
350 (void **)ops, nb_ops, NULL);
351 qp->qp_stats.dequeued_count += nb_dequeued;
357 zlib_create(const char *name,
358 struct rte_vdev_device *vdev,
359 struct rte_compressdev_pmd_init_params *init_params)
361 struct rte_compressdev *dev;
363 dev = rte_compressdev_pmd_create(name, &vdev->device,
364 sizeof(struct zlib_private), init_params);
366 ZLIB_PMD_ERR("driver %s: create failed", init_params->name);
370 dev->dev_ops = rte_zlib_pmd_ops;
372 /* register rx/tx burst functions for data path */
373 dev->dequeue_burst = zlib_pmd_dequeue_burst;
374 dev->enqueue_burst = zlib_pmd_enqueue_burst;
380 zlib_probe(struct rte_vdev_device *vdev)
382 struct rte_compressdev_pmd_init_params init_params = {
387 const char *input_args;
390 name = rte_vdev_device_name(vdev);
395 input_args = rte_vdev_device_args(vdev);
397 retval = rte_compressdev_pmd_parse_input_args(&init_params, input_args);
400 "Failed to parse initialisation arguments[%s]\n",
405 return zlib_create(name, vdev, &init_params);
409 zlib_remove(struct rte_vdev_device *vdev)
411 struct rte_compressdev *compressdev;
414 name = rte_vdev_device_name(vdev);
418 compressdev = rte_compressdev_pmd_get_named_dev(name);
419 if (compressdev == NULL)
422 return rte_compressdev_pmd_destroy(compressdev);
425 static struct rte_vdev_driver zlib_pmd_drv = {
427 .remove = zlib_remove
430 RTE_PMD_REGISTER_VDEV(COMPRESSDEV_NAME_ZLIB_PMD, zlib_pmd_drv);
432 RTE_INIT(zlib_init_log)
434 zlib_logtype_driver = rte_log_register("pmd.compress.zlib");
435 if (zlib_logtype_driver >= 0)
436 rte_log_set_level(zlib_logtype_driver, RTE_LOG_INFO);