1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
11 * RTE definitions for Data Compression Service
19 #include <rte_mempool.h>
23 * compression service feature flags
25 * @note New features flags should be added to the end of the list
27 * Keep these flags synchronised with rte_comp_get_feature_name()
29 #define RTE_COMP_FF_STATEFUL_COMPRESSION (1ULL << 0)
30 /**< Stateful compression is supported */
31 #define RTE_COMP_FF_STATEFUL_DECOMPRESSION (1ULL << 1)
32 /**< Stateful decompression is supported */
33 #define RTE_COMP_FF_OOP_SGL_IN_SGL_OUT (1ULL << 2)
34 /**< Out-of-place Scatter-gather (SGL) buffers,
35 * with multiple segments, are supported in input and output
37 #define RTE_COMP_FF_OOP_SGL_IN_LB_OUT (1ULL << 3)
38 /**< Out-of-place Scatter-gather (SGL) buffers are supported
39 * in input, combined with linear buffers (LB), with a
40 * single segment, in output
42 #define RTE_COMP_FF_OOP_LB_IN_SGL_OUT (1ULL << 4)
43 /**< Out-of-place Scatter-gather (SGL) buffers are supported
44 * in output, combined with linear buffers (LB) in input
46 #define RTE_COMP_FF_ADLER32_CHECKSUM (1ULL << 5)
47 /**< Adler-32 Checksum is supported */
48 #define RTE_COMP_FF_CRC32_CHECKSUM (1ULL << 6)
49 /**< CRC32 Checksum is supported */
50 #define RTE_COMP_FF_CRC32_ADLER32_CHECKSUM (1ULL << 7)
51 /**< Adler-32/CRC32 Checksum is supported */
52 #define RTE_COMP_FF_MULTI_PKT_CHECKSUM (1ULL << 8)
53 /**< Generation of checksum across multiple stateless packets is supported */
54 #define RTE_COMP_FF_SHA1_HASH (1ULL << 9)
55 /**< SHA1 Hash is supported */
56 #define RTE_COMP_FF_SHA2_SHA256_HASH (1ULL << 10)
57 /**< SHA256 Hash of SHA2 family is supported */
58 #define RTE_COMP_FF_NONCOMPRESSED_BLOCKS (1ULL << 11)
59 /**< Creation of non-compressed blocks using RTE_COMP_LEVEL_NONE is supported */
60 #define RTE_COMP_FF_SHAREABLE_PRIV_XFORM (1ULL << 12)
61 /**< Private xforms created by the PMD can be shared
62 * across multiple stateless operations. If not set, then app needs
63 * to create as many priv_xforms as it expects to have stateless
64 * operations in-flight.
66 #define RTE_COMP_FF_HUFFMAN_FIXED (1ULL << 13)
67 /**< Fixed huffman encoding is supported */
68 #define RTE_COMP_FF_HUFFMAN_DYNAMIC (1ULL << 14)
69 /**< Dynamic huffman encoding is supported */
71 /** Status of comp operation */
72 enum rte_comp_op_status {
73 RTE_COMP_OP_STATUS_SUCCESS = 0,
74 /**< Operation completed successfully */
75 RTE_COMP_OP_STATUS_NOT_PROCESSED,
76 /**< Operation has not yet been processed by the device */
77 RTE_COMP_OP_STATUS_INVALID_ARGS,
78 /**< Operation failed due to invalid arguments in request */
79 RTE_COMP_OP_STATUS_ERROR,
80 /**< Error handling operation */
81 RTE_COMP_OP_STATUS_INVALID_STATE,
82 /**< Operation is invoked in invalid state */
83 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED,
84 /**< Output buffer ran out of space before operation completed.
85 * Error case. Application must resubmit all data with a larger
88 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE,
89 /**< Output buffer ran out of space before operation completed, but this
90 * is not an error case. Output data up to op.produced can be used and
91 * next op in the stream should continue on from op.consumed+1.
95 /** Compression Algorithms */
96 enum rte_comp_algorithm {
97 RTE_COMP_ALGO_UNSPECIFIED = 0,
98 /** No Compression algorithm */
101 * Pass-through, data is copied unchanged from source buffer to
102 * destination buffer.
104 RTE_COMP_ALGO_DEFLATE,
105 /**< DEFLATE compression algorithm
106 * https://tools.ietf.org/html/rfc1951
109 /**< LZS compression algorithm
110 * https://tools.ietf.org/html/rfc2395
112 RTE_COMP_ALGO_LIST_END
115 /** Compression Hash Algorithms */
116 enum rte_comp_hash_algorithm {
117 RTE_COMP_HASH_ALGO_NONE = 0,
119 RTE_COMP_HASH_ALGO_SHA1,
120 /**< SHA1 hash algorithm */
121 RTE_COMP_HASH_ALGO_SHA2_256,
122 /**< SHA256 hash algorithm of SHA2 family */
123 RTE_COMP_HASH_ALGO_LIST_END
126 /**< Compression Level.
127 * The number is interpreted by each PMD differently. However, lower numbers
128 * give fastest compression, at the expense of compression ratio while
129 * higher numbers may give better compression ratios but are likely slower.
131 #define RTE_COMP_LEVEL_PMD_DEFAULT (-1)
132 /** Use PMD Default */
133 #define RTE_COMP_LEVEL_NONE (0)
134 /** Output uncompressed blocks if supported by the specified algorithm */
135 #define RTE_COMP_LEVEL_MIN (1)
136 /** Use minimum compression level supported by the PMD */
137 #define RTE_COMP_LEVEL_MAX (9)
138 /** Use maximum compression level supported by the PMD */
140 /** Compression checksum types */
141 enum rte_comp_checksum_type {
142 RTE_COMP_CHECKSUM_NONE,
143 /**< No checksum generated */
144 RTE_COMP_CHECKSUM_CRC32,
145 /**< Generates a CRC32 checksum, as used by gzip */
146 RTE_COMP_CHECKSUM_ADLER32,
147 /**< Generates an Adler-32 checksum, as used by zlib */
148 RTE_COMP_CHECKSUM_CRC32_ADLER32,
149 /**< Generates both Adler-32 and CRC32 checksums, concatenated.
150 * CRC32 is in the lower 32bits, Adler-32 in the upper 32 bits.
155 /** Compression Huffman Type - used by DEFLATE algorithm */
156 enum rte_comp_huffman {
157 RTE_COMP_HUFFMAN_DEFAULT,
158 /**< PMD may choose which Huffman codes to use */
159 RTE_COMP_HUFFMAN_FIXED,
160 /**< Use Fixed Huffman codes */
161 RTE_COMP_HUFFMAN_DYNAMIC,
162 /**< Use Dynamic Huffman codes */
165 /** Compression flush flags */
166 enum rte_comp_flush_flag {
168 /**< Data is not flushed. Output may remain in the compressor and be
169 * processed during a following op. It may not be possible to decompress
170 * output until a later op with some other flush flag has been sent.
173 /**< All data should be flushed to output buffer. Output data can be
174 * decompressed. However state and history is not cleared, so future
175 * operations may use history from this operation.
178 /**< All data should be flushed to output buffer. Output data can be
179 * decompressed. State and history data is cleared, so future
180 * ops will be independent of ops processed before this.
183 /**< Same as RTE_COMP_FLUSH_FULL but if op.algo is RTE_COMP_ALGO_DEFLATE
184 * then bfinal bit is set in the last block.
188 /** Compression transform types */
189 enum rte_comp_xform_type {
191 /**< Compression service - compress */
193 /**< Compression service - decompress */
196 /** Compression operation type */
197 enum rte_comp_op_type {
198 RTE_COMP_OP_STATELESS,
199 /**< All data to be processed is submitted in the op, no state or
200 * history from previous ops is used and none will be stored for future
201 * ops. Flush flag must be set to either FLUSH_FULL or FLUSH_FINAL.
204 /**< There may be more data to be processed after this op, it's part of
205 * a stream of data. State and history from previous ops can be used
206 * and resulting state and history can be stored for future ops,
207 * depending on flush flag.
212 /** Parameters specific to the deflate algorithm */
213 struct rte_comp_deflate_params {
214 enum rte_comp_huffman huffman;
215 /**< Compression huffman encoding type */
218 /** Setup Data for compression */
219 struct rte_comp_compress_xform {
220 enum rte_comp_algorithm algo;
221 /**< Algorithm to use for compress operation */
223 struct rte_comp_deflate_params deflate;
224 /**< Parameters specific to the deflate algorithm */
225 }; /**< Algorithm specific parameters */
227 /**< Compression level */
229 /**< Base two log value of sliding window to be used. If window size
230 * can't be supported by the PMD then it may fall back to a smaller
231 * size. This is likely to result in a worse compression ratio.
233 enum rte_comp_checksum_type chksum;
234 /**< Type of checksum to generate on the uncompressed data */
235 enum rte_comp_hash_algorithm hash_algo;
236 /**< Hash algorithm to be used with compress operation. Hash is always
242 * Setup Data for decompression.
244 struct rte_comp_decompress_xform {
245 enum rte_comp_algorithm algo;
246 /**< Algorithm to use for decompression */
247 enum rte_comp_checksum_type chksum;
248 /**< Type of checksum to generate on the decompressed data */
250 /**< Base two log value of sliding window which was used to generate
251 * compressed data. If window size can't be supported by the PMD then
252 * setup of stream or private_xform should fail.
254 enum rte_comp_hash_algorithm hash_algo;
255 /**< Hash algorithm to be used with decompress operation. Hash is always
261 * Compression transform structure.
263 * This is used to specify the compression transforms required.
264 * Each transform structure can hold a single transform, the type field is
265 * used to specify which transform is contained within the union.
267 struct rte_comp_xform {
268 enum rte_comp_xform_type type;
271 struct rte_comp_compress_xform compress;
272 /**< xform for compress operation */
273 struct rte_comp_decompress_xform decompress;
274 /**< decompress xform */
279 * Compression Operation.
281 * This structure contains data relating to performing a compression
282 * operation on the referenced mbuf data buffers.
284 * Comp operations are enqueued and dequeued in comp PMDs using the
285 * rte_compressdev_enqueue_burst() / rte_compressdev_dequeue_burst() APIs
288 enum rte_comp_op_type op_type;
291 /**< Stateless private PMD data derived from an rte_comp_xform.
292 * A handle returned by rte_compressdev_private_xform_create()
293 * must be attached to operations of op_type RTE_COMP_STATELESS.
296 /**< Private PMD data derived initially from an rte_comp_xform,
297 * which holds state and history data and evolves as operations
298 * are processed. rte_compressdev_stream_create() must be called
299 * on a device for all STATEFUL data streams and the resulting
300 * stream attached to the one or more operations associated
301 * with the data stream.
302 * All operations in a stream must be sent to the same device.
306 struct rte_mempool *mempool;
307 /**< Pool from which operation is allocated */
308 rte_iova_t iova_addr;
309 /**< IOVA address of this operation */
310 struct rte_mbuf *m_src;
312 * The total size of the input buffer(s) can be retrieved using
313 * rte_pktmbuf_pkt_len(m_src). The max data size which can fit in a
314 * single mbuf is limited by the uint16_t rte_mbuf.data_len to 64k-1.
315 * If the input data is bigger than this it can be passed to the PMD in
316 * a chain of mbufs if the PMD's capabilities indicate it supports this.
318 struct rte_mbuf *m_dst;
319 /**< destination mbuf
320 * The total size of the output buffer(s) can be retrieved using
321 * rte_pktmbuf_pkt_len(m_dst). The max data size which can fit in a
322 * single mbuf is limited by the uint16_t rte_mbuf.data_len to 64k-1.
323 * If the output data is expected to be bigger than this a chain of
324 * mbufs can be passed to the PMD if the PMD's capabilities indicate
327 * @note, if incompressible data is passed to an engine for compression
328 * using RTE_COMP_ALGO_DEFLATE, it's possible for the output data
329 * to be larger than the uncompressed data, due to the inclusion
330 * of the DEFLATE header blocks. The size of m_dst should accommodate
331 * this, else OUT_OF_SPACE errors can be expected in this case.
336 /**< Starting point for compression or decompression,
337 * specified as number of bytes from start of packet in
339 * This offset starts from the first segment
340 * of the buffer, in case the m_src is a chain of mbufs.
341 * Starting point for checksum generation in compress direction.
344 /**< The length, in bytes, of the data in source buffer
345 * to be compressed or decompressed.
346 * Also the length of the data over which the checksum
347 * should be generated in compress direction
352 /**< Starting point for writing output data, specified as
353 * number of bytes from start of packet in dest
355 * This offset starts from the first segment
356 * of the buffer, in case the m_dst is a chain of mbufs.
357 * Starting point for checksum generation in
358 * decompress direction.
363 /**< Output buffer to store hash output, if enabled in xform.
364 * Buffer would contain valid value only after an op with
365 * flush flag = RTE_COMP_FLUSH_FULL/FLUSH_FINAL is processed
368 * Length of buffer should be contiguous and large enough to
369 * accommodate digest produced by specific hash algo.
371 rte_iova_t iova_addr;
372 /**< IO address of the buffer */
374 enum rte_comp_flush_flag flush_flag;
375 /**< Defines flush characteristics for the output data.
376 * Only applicable in compress direction
378 uint64_t input_chksum;
379 /**< An input checksum can be provided to generate a
380 * cumulative checksum across sequential blocks in a STATELESS stream.
381 * Checksum type is as specified in xform chksum_type
383 uint64_t output_chksum;
384 /**< If a checksum is generated it will be written in here.
385 * Checksum type is as specified in xform chksum_type.
388 /**< The number of bytes from the source buffer
389 * which were compressed/decompressed.
392 /**< The number of bytes written to the destination buffer
393 * which were compressed/decompressed.
395 uint64_t debug_status;
397 * Status of the operation is returned in the status param.
398 * This field allows the PMD to pass back extra
399 * pmd-specific debug information. Value is not defined on the API.
403 * Operation status - use values from enum rte_comp_status.
405 * RTE_COMP_OP_STATUS_NOT_PROCESSED on allocation from mempool and
406 * will be set to RTE_COMP_OP_STATUS_SUCCESS after operation
407 * is successfully processed by a PMD
409 } __rte_cache_aligned;
412 * Creates an operation pool
417 * Number of elements in pool
419 * Number of elements to cache on lcore, see
420 * *rte_mempool_create* for further details about cache size
422 * Size of private data to allocate for user with each operation
424 * Socket to identifier allocate memory on
426 * - On success pointer to mempool
431 rte_comp_op_pool_create(const char *name,
432 unsigned int nb_elts, unsigned int cache_size,
433 uint16_t user_size, int socket_id);
436 * Allocate an operation from a mempool with default parameters set
439 * Compress operation mempool
442 * - On success returns a valid rte_comp_op structure
443 * - On failure returns NULL
447 rte_comp_op_alloc(struct rte_mempool *mempool);
450 * Bulk allocate operations from a mempool with default parameters set
453 * Compress operation mempool
455 * Array to place allocated operations
457 * Number of operations to allocate
459 * - nb_ops: Success, the nb_ops requested was allocated
460 * - 0: Not enough entries in the mempool; no ops are retrieved.
464 rte_comp_op_bulk_alloc(struct rte_mempool *mempool,
465 struct rte_comp_op **ops, uint16_t nb_ops);
468 * Free operation structure
469 * If operation has been allocate from a rte_mempool, then the operation will
470 * be returned to the mempool.
477 rte_comp_op_free(struct rte_comp_op *op);
480 * Bulk free operation structures
481 * If operations have been allocated from an rte_mempool, then the operations
482 * will be returned to the mempool.
483 * The array entry will be cleared.
486 * Array of Compress operations
488 * Number of operations to free
492 rte_comp_op_bulk_free(struct rte_comp_op **ops, uint16_t nb_ops);
495 * Get the name of a compress service feature flag
498 * The mask describing the flag
501 * The name of this flag, or NULL if it's not a valid feature flag.
505 rte_comp_get_feature_name(uint64_t flag);
511 #endif /* _RTE_COMP_H_ */