1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
11 * RTE definitions for Data Compression Service
19 #include <rte_mempool.h>
22 /** Status of comp operation */
23 enum rte_comp_op_status {
24 RTE_COMP_OP_STATUS_SUCCESS = 0,
25 /**< Operation completed successfully */
26 RTE_COMP_OP_STATUS_NOT_PROCESSED,
27 /**< Operation has not yet been processed by the device */
28 RTE_COMP_OP_STATUS_INVALID_ARGS,
29 /**< Operation failed due to invalid arguments in request */
30 RTE_COMP_OP_STATUS_ERROR,
31 /**< Error handling operation */
32 RTE_COMP_OP_STATUS_INVALID_STATE,
33 /**< Operation is invoked in invalid state */
34 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED,
35 /**< Output buffer ran out of space before operation completed.
36 * Error case. Application must resubmit all data with a larger
39 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE,
40 /**< Output buffer ran out of space before operation completed, but this
41 * is not an error case. Output data up to op.produced can be used and
42 * next op in the stream should continue on from op.consumed+1.
46 /** Compression Algorithms */
47 enum rte_comp_algorithm {
48 RTE_COMP_ALGO_UNSPECIFIED = 0,
49 /** No Compression algorithm */
52 * Pass-through, data is copied unchanged from source buffer to
55 RTE_COMP_ALGO_DEFLATE,
56 /**< DEFLATE compression algorithm
57 * https://tools.ietf.org/html/rfc1951
60 /**< LZS compression algorithm
61 * https://tools.ietf.org/html/rfc2395
63 RTE_COMP_ALGO_LIST_END
66 /** Compression Hash Algorithms */
67 enum rte_comp_hash_algorithm {
68 RTE_COMP_HASH_ALGO_NONE = 0,
70 RTE_COMP_HASH_ALGO_SHA1,
71 /**< SHA1 hash algorithm */
72 RTE_COMP_HASH_ALGO_SHA2_256,
73 /**< SHA256 hash algorithm of SHA2 family */
74 RTE_COMP_HASH_ALGO_LIST_END
77 /**< Compression Level.
78 * The number is interpreted by each PMD differently. However, lower numbers
79 * give fastest compression, at the expense of compression ratio while
80 * higher numbers may give better compression ratios but are likely slower.
82 #define RTE_COMP_LEVEL_PMD_DEFAULT (-1)
83 /** Use PMD Default */
84 #define RTE_COMP_LEVEL_NONE (0)
85 /** Output uncompressed blocks if supported by the specified algorithm */
86 #define RTE_COMP_LEVEL_MIN (1)
87 /** Use minimum compression level supported by the PMD */
88 #define RTE_COMP_LEVEL_MAX (9)
89 /** Use maximum compression level supported by the PMD */
91 /** Compression checksum types */
92 enum rte_comp_checksum_type {
93 RTE_COMP_CHECKSUM_NONE,
94 /**< No checksum generated */
95 RTE_COMP_CHECKSUM_CRC32,
96 /**< Generates a CRC32 checksum, as used by gzip */
97 RTE_COMP_CHECKSUM_ADLER32,
98 /**< Generates an Adler-32 checksum, as used by zlib */
99 RTE_COMP_CHECKSUM_CRC32_ADLER32,
100 /**< Generates both Adler-32 and CRC32 checksums, concatenated.
101 * CRC32 is in the lower 32bits, Adler-32 in the upper 32 bits.
106 /** Compression Huffman Type - used by DEFLATE algorithm */
107 enum rte_comp_huffman {
108 RTE_COMP_HUFFMAN_DEFAULT,
109 /**< PMD may choose which Huffman codes to use */
110 RTE_COMP_HUFFMAN_FIXED,
111 /**< Use Fixed Huffman codes */
112 RTE_COMP_HUFFMAN_DYNAMIC,
113 /**< Use Dynamic Huffman codes */
116 /** Compression flush flags */
117 enum rte_comp_flush_flag {
119 /**< Data is not flushed. Output may remain in the compressor and be
120 * processed during a following op. It may not be possible to decompress
121 * output until a later op with some other flush flag has been sent.
124 /**< All data should be flushed to output buffer. Output data can be
125 * decompressed. However state and history is not cleared, so future
126 * operations may use history from this operation.
129 /**< All data should be flushed to output buffer. Output data can be
130 * decompressed. State and history data is cleared, so future
131 * ops will be independent of ops processed before this.
134 /**< Same as RTE_COMP_FLUSH_FULL but if op.algo is RTE_COMP_ALGO_DEFLATE
135 * then bfinal bit is set in the last block.
139 /** Compression transform types */
140 enum rte_comp_xform_type {
142 /**< Compression service - compress */
144 /**< Compression service - decompress */
147 /** Compression operation type */
148 enum rte_comp_op_type {
149 RTE_COMP_OP_STATELESS,
150 /**< All data to be processed is submitted in the op, no state or
151 * history from previous ops is used and none will be stored for future
152 * ops. Flush flag must be set to either FLUSH_FULL or FLUSH_FINAL.
155 /**< There may be more data to be processed after this op, it's part of
156 * a stream of data. State and history from previous ops can be used
157 * and resulting state and history can be stored for future ops,
158 * depending on flush flag.
163 /** Parameters specific to the deflate algorithm */
164 struct rte_comp_deflate_params {
165 enum rte_comp_huffman huffman;
166 /**< Compression huffman encoding type */
169 /** Setup Data for compression */
170 struct rte_comp_compress_xform {
171 enum rte_comp_algorithm algo;
172 /**< Algorithm to use for compress operation */
174 struct rte_comp_deflate_params deflate;
175 /**< Parameters specific to the deflate algorithm */
176 }; /**< Algorithm specific parameters */
178 /**< Compression level */
180 /**< Base two log value of sliding window to be used. If window size
181 * can't be supported by the PMD then it may fall back to a smaller
182 * size. This is likely to result in a worse compression ratio.
184 enum rte_comp_checksum_type chksum;
185 /**< Type of checksum to generate on the uncompressed data */
186 enum rte_comp_hash_algorithm hash_algo;
187 /**< Hash algorithm to be used with compress operation. Hash is always
193 * Setup Data for decompression.
195 struct rte_comp_decompress_xform {
196 enum rte_comp_algorithm algo;
197 /**< Algorithm to use for decompression */
198 enum rte_comp_checksum_type chksum;
199 /**< Type of checksum to generate on the decompressed data */
201 /**< Base two log value of sliding window which was used to generate
202 * compressed data. If window size can't be supported by the PMD then
203 * setup of stream or private_xform should fail.
205 enum rte_comp_hash_algorithm hash_algo;
206 /**< Hash algorithm to be used with decompress operation. Hash is always
212 * Compression transform structure.
214 * This is used to specify the compression transforms required.
215 * Each transform structure can hold a single transform, the type field is
216 * used to specify which transform is contained within the union.
218 struct rte_comp_xform {
219 enum rte_comp_xform_type type;
222 struct rte_comp_compress_xform compress;
223 /**< xform for compress operation */
224 struct rte_comp_decompress_xform decompress;
225 /**< decompress xform */
230 * Compression Operation.
232 * This structure contains data relating to performing a compression
233 * operation on the referenced mbuf data buffers.
235 * Comp operations are enqueued and dequeued in comp PMDs using the
236 * rte_compressdev_enqueue_burst() / rte_compressdev_dequeue_burst() APIs
239 enum rte_comp_op_type op_type;
242 /**< Stateless private PMD data derived from an rte_comp_xform.
243 * A handle returned by rte_compressdev_private_xform_create()
244 * must be attached to operations of op_type RTE_COMP_STATELESS.
247 /**< Private PMD data derived initially from an rte_comp_xform,
248 * which holds state and history data and evolves as operations
249 * are processed. rte_compressdev_stream_create() must be called
250 * on a device for all STATEFUL data streams and the resulting
251 * stream attached to the one or more operations associated
252 * with the data stream.
253 * All operations in a stream must be sent to the same device.
257 struct rte_mempool *mempool;
258 /**< Pool from which operation is allocated */
259 rte_iova_t iova_addr;
260 /**< IOVA address of this operation */
261 struct rte_mbuf *m_src;
263 * The total size of the input buffer(s) can be retrieved using
264 * rte_pktmbuf_data_len(m_src)
266 struct rte_mbuf *m_dst;
267 /**< destination mbuf
268 * The total size of the output buffer(s) can be retrieved using
269 * rte_pktmbuf_data_len(m_dst)
274 /**< Starting point for compression or decompression,
275 * specified as number of bytes from start of packet in
277 * Starting point for checksum generation in compress direction.
280 /**< The length, in bytes, of the data in source buffer
281 * to be compressed or decompressed.
282 * Also the length of the data over which the checksum
283 * should be generated in compress direction
288 /**< Starting point for writing output data, specified as
289 * number of bytes from start of packet in dest
290 * buffer. Starting point for checksum generation in
291 * decompress direction.
296 /**< Output buffer to store hash output, if enabled in xform.
297 * Buffer would contain valid value only after an op with
298 * flush flag = RTE_COMP_FLUSH_FULL/FLUSH_FINAL is processed
301 * Length of buffer should be contiguous and large enough to
302 * accommodate digest produced by specific hash algo.
304 rte_iova_t iova_addr;
305 /**< IO address of the buffer */
307 enum rte_comp_flush_flag flush_flag;
308 /**< Defines flush characteristics for the output data.
309 * Only applicable in compress direction
311 uint64_t input_chksum;
312 /**< An input checksum can be provided to generate a
313 * cumulative checksum across sequential blocks in a STATELESS stream.
314 * Checksum type is as specified in xform chksum_type
316 uint64_t output_chksum;
317 /**< If a checksum is generated it will be written in here.
318 * Checksum type is as specified in xform chksum_type.
321 /**< The number of bytes from the source buffer
322 * which were compressed/decompressed.
325 /**< The number of bytes written to the destination buffer
326 * which were compressed/decompressed.
328 uint64_t debug_status;
330 * Status of the operation is returned in the status param.
331 * This field allows the PMD to pass back extra
332 * pmd-specific debug information. Value is not defined on the API.
336 * Operation status - use values from enum rte_comp_status.
338 * RTE_COMP_OP_STATUS_NOT_PROCESSED on allocation from mempool and
339 * will be set to RTE_COMP_OP_STATUS_SUCCESS after operation
340 * is successfully processed by a PMD
342 } __rte_cache_aligned;
345 * Creates an operation pool
350 * Number of elements in pool
352 * Number of elements to cache on lcore, see
353 * *rte_mempool_create* for further details about cache size
355 * Size of private data to allocate for user with each operation
357 * Socket to identifier allocate memory on
359 * - On success pointer to mempool
362 struct rte_mempool * __rte_experimental
363 rte_comp_op_pool_create(const char *name,
364 unsigned int nb_elts, unsigned int cache_size,
365 uint16_t user_size, int socket_id);
368 * Allocate an operation from a mempool with default parameters set
371 * Compress operation mempool
374 * - On success returns a valid rte_comp_op structure
375 * - On failure returns NULL
377 struct rte_comp_op * __rte_experimental
378 rte_comp_op_alloc(struct rte_mempool *mempool);
381 * Bulk allocate operations from a mempool with default parameters set
384 * Compress operation mempool
386 * Array to place allocated operations
388 * Number of operations to allocate
391 * - -ENOENT: Not enough entries in the mempool; no ops are retrieved.
393 int __rte_experimental
394 rte_comp_op_bulk_alloc(struct rte_mempool *mempool,
395 struct rte_comp_op **ops, uint16_t nb_ops);
398 * Free operation structure
399 * If operation has been allocate from a rte_mempool, then the operation will
400 * be returned to the mempool.
405 void __rte_experimental
406 rte_comp_op_free(struct rte_comp_op *op);
412 #endif /* _RTE_COMP_H_ */