1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
11 * RTE definitions for Data Compression Service
22 * compression service feature flags
24 * @note New features flags should be added to the end of the list
26 * Keep these flags synchronised with rte_comp_get_feature_name()
28 #define RTE_COMP_FF_STATEFUL_COMPRESSION (1ULL << 0)
29 /**< Stateful compression is supported */
30 #define RTE_COMP_FF_STATEFUL_DECOMPRESSION (1ULL << 1)
31 /**< Stateful decompression is supported */
32 #define RTE_COMP_FF_OOP_SGL_IN_SGL_OUT (1ULL << 2)
33 /**< Out-of-place Scatter-gather (SGL) buffers,
34 * with multiple segments, are supported in input and output
36 #define RTE_COMP_FF_OOP_SGL_IN_LB_OUT (1ULL << 3)
37 /**< Out-of-place Scatter-gather (SGL) buffers are supported
38 * in input, combined with linear buffers (LB), with a
39 * single segment, in output
41 #define RTE_COMP_FF_OOP_LB_IN_SGL_OUT (1ULL << 4)
42 /**< Out-of-place Scatter-gather (SGL) buffers are supported
43 * in output, combined with linear buffers (LB) in input
45 #define RTE_COMP_FF_ADLER32_CHECKSUM (1ULL << 5)
46 /**< Adler-32 Checksum is supported */
47 #define RTE_COMP_FF_CRC32_CHECKSUM (1ULL << 6)
48 /**< CRC32 Checksum is supported */
49 #define RTE_COMP_FF_CRC32_ADLER32_CHECKSUM (1ULL << 7)
50 /**< Adler-32/CRC32 Checksum is supported */
51 #define RTE_COMP_FF_MULTI_PKT_CHECKSUM (1ULL << 8)
52 /**< Generation of checksum across multiple stateless packets is supported */
53 #define RTE_COMP_FF_SHA1_HASH (1ULL << 9)
54 /**< SHA1 Hash is supported */
55 #define RTE_COMP_FF_SHA2_SHA256_HASH (1ULL << 10)
56 /**< SHA256 Hash of SHA2 family is supported */
57 #define RTE_COMP_FF_NONCOMPRESSED_BLOCKS (1ULL << 11)
58 /**< Creation of non-compressed blocks using RTE_COMP_LEVEL_NONE is supported */
59 #define RTE_COMP_FF_SHAREABLE_PRIV_XFORM (1ULL << 12)
60 /**< Private xforms created by the PMD can be shared
61 * across multiple stateless operations. If not set, then app needs
62 * to create as many priv_xforms as it expects to have stateless
63 * operations in-flight.
65 #define RTE_COMP_FF_HUFFMAN_FIXED (1ULL << 13)
66 /**< Fixed huffman encoding is supported */
67 #define RTE_COMP_FF_HUFFMAN_DYNAMIC (1ULL << 14)
68 /**< Dynamic huffman encoding is supported */
70 /** Status of comp operation */
71 enum rte_comp_op_status {
72 RTE_COMP_OP_STATUS_SUCCESS = 0,
73 /**< Operation completed successfully */
74 RTE_COMP_OP_STATUS_NOT_PROCESSED,
75 /**< Operation has not yet been processed by the device */
76 RTE_COMP_OP_STATUS_INVALID_ARGS,
77 /**< Operation failed due to invalid arguments in request */
78 RTE_COMP_OP_STATUS_ERROR,
79 /**< Error handling operation */
80 RTE_COMP_OP_STATUS_INVALID_STATE,
81 /**< Operation is invoked in invalid state */
82 RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED,
83 /**< Output buffer ran out of space before operation completed.
84 * Error case. Application must resubmit all data with a larger
87 RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE,
88 /**< Output buffer ran out of space before operation completed, but this
89 * is not an error case. Output data up to op.produced can be used and
90 * next op in the stream should continue on from op.consumed+1.
94 /** Compression Algorithms */
95 enum rte_comp_algorithm {
96 RTE_COMP_ALGO_UNSPECIFIED = 0,
97 /** No Compression algorithm */
100 * Pass-through, data is copied unchanged from source buffer to
101 * destination buffer.
103 RTE_COMP_ALGO_DEFLATE,
104 /**< DEFLATE compression algorithm
105 * https://tools.ietf.org/html/rfc1951
108 /**< LZS compression algorithm
109 * https://tools.ietf.org/html/rfc2395
111 RTE_COMP_ALGO_LIST_END
114 /** Compression Hash Algorithms */
115 enum rte_comp_hash_algorithm {
116 RTE_COMP_HASH_ALGO_NONE = 0,
118 RTE_COMP_HASH_ALGO_SHA1,
119 /**< SHA1 hash algorithm */
120 RTE_COMP_HASH_ALGO_SHA2_256,
121 /**< SHA256 hash algorithm of SHA2 family */
122 RTE_COMP_HASH_ALGO_LIST_END
125 /**< Compression Level.
126 * The number is interpreted by each PMD differently. However, lower numbers
127 * give fastest compression, at the expense of compression ratio while
128 * higher numbers may give better compression ratios but are likely slower.
130 #define RTE_COMP_LEVEL_PMD_DEFAULT (-1)
131 /** Use PMD Default */
132 #define RTE_COMP_LEVEL_NONE (0)
133 /** Output uncompressed blocks if supported by the specified algorithm */
134 #define RTE_COMP_LEVEL_MIN (1)
135 /** Use minimum compression level supported by the PMD */
136 #define RTE_COMP_LEVEL_MAX (9)
137 /** Use maximum compression level supported by the PMD */
139 /** Compression checksum types */
140 enum rte_comp_checksum_type {
141 RTE_COMP_CHECKSUM_NONE,
142 /**< No checksum generated */
143 RTE_COMP_CHECKSUM_CRC32,
144 /**< Generates a CRC32 checksum, as used by gzip */
145 RTE_COMP_CHECKSUM_ADLER32,
146 /**< Generates an Adler-32 checksum, as used by zlib */
147 RTE_COMP_CHECKSUM_CRC32_ADLER32,
148 /**< Generates both Adler-32 and CRC32 checksums, concatenated.
149 * CRC32 is in the lower 32bits, Adler-32 in the upper 32 bits.
154 /** Compression Huffman Type - used by DEFLATE algorithm */
155 enum rte_comp_huffman {
156 RTE_COMP_HUFFMAN_DEFAULT,
157 /**< PMD may choose which Huffman codes to use */
158 RTE_COMP_HUFFMAN_FIXED,
159 /**< Use Fixed Huffman codes */
160 RTE_COMP_HUFFMAN_DYNAMIC,
161 /**< Use Dynamic Huffman codes */
164 /** Compression flush flags */
165 enum rte_comp_flush_flag {
167 /**< Data is not flushed. Output may remain in the compressor and be
168 * processed during a following op. It may not be possible to decompress
169 * output until a later op with some other flush flag has been sent.
172 /**< All data should be flushed to output buffer. Output data can be
173 * decompressed. However state and history is not cleared, so future
174 * operations may use history from this operation.
177 /**< All data should be flushed to output buffer. Output data can be
178 * decompressed. State and history data is cleared, so future
179 * ops will be independent of ops processed before this.
182 /**< Same as RTE_COMP_FLUSH_FULL but if op.algo is RTE_COMP_ALGO_DEFLATE
183 * then bfinal bit is set in the last block.
187 /** Compression transform types */
188 enum rte_comp_xform_type {
190 /**< Compression service - compress */
192 /**< Compression service - decompress */
195 /** Compression operation type */
196 enum rte_comp_op_type {
197 RTE_COMP_OP_STATELESS,
198 /**< All data to be processed is submitted in the op, no state or
199 * history from previous ops is used and none will be stored for future
200 * ops. Flush flag must be set to either FLUSH_FULL or FLUSH_FINAL.
203 /**< There may be more data to be processed after this op, it's part of
204 * a stream of data. State and history from previous ops can be used
205 * and resulting state and history can be stored for future ops,
206 * depending on flush flag.
211 /** Parameters specific to the deflate algorithm */
212 struct rte_comp_deflate_params {
213 enum rte_comp_huffman huffman;
214 /**< Compression huffman encoding type */
217 /** Setup Data for compression */
218 struct rte_comp_compress_xform {
219 enum rte_comp_algorithm algo;
220 /**< Algorithm to use for compress operation */
222 struct rte_comp_deflate_params deflate;
223 /**< Parameters specific to the deflate algorithm */
224 }; /**< Algorithm specific parameters */
226 /**< Compression level */
228 /**< Base two log value of sliding window to be used. If window size
229 * can't be supported by the PMD then it may fall back to a smaller
230 * size. This is likely to result in a worse compression ratio.
232 enum rte_comp_checksum_type chksum;
233 /**< Type of checksum to generate on the uncompressed data */
234 enum rte_comp_hash_algorithm hash_algo;
235 /**< Hash algorithm to be used with compress operation. Hash is always
241 * Setup Data for decompression.
243 struct rte_comp_decompress_xform {
244 enum rte_comp_algorithm algo;
245 /**< Algorithm to use for decompression */
246 enum rte_comp_checksum_type chksum;
247 /**< Type of checksum to generate on the decompressed data */
249 /**< Base two log value of sliding window which was used to generate
250 * compressed data. If window size can't be supported by the PMD then
251 * setup of stream or private_xform should fail.
253 enum rte_comp_hash_algorithm hash_algo;
254 /**< Hash algorithm to be used with decompress operation. Hash is always
260 * Compression transform structure.
262 * This is used to specify the compression transforms required.
263 * Each transform structure can hold a single transform, the type field is
264 * used to specify which transform is contained within the union.
266 struct rte_comp_xform {
267 enum rte_comp_xform_type type;
270 struct rte_comp_compress_xform compress;
271 /**< xform for compress operation */
272 struct rte_comp_decompress_xform decompress;
273 /**< decompress xform */
278 * Compression Operation.
280 * This structure contains data relating to performing a compression
281 * operation on the referenced mbuf data buffers.
283 * Comp operations are enqueued and dequeued in comp PMDs using the
284 * rte_compressdev_enqueue_burst() / rte_compressdev_dequeue_burst() APIs
287 enum rte_comp_op_type op_type;
290 /**< Stateless private PMD data derived from an rte_comp_xform.
291 * A handle returned by rte_compressdev_private_xform_create()
292 * must be attached to operations of op_type RTE_COMP_STATELESS.
295 /**< Private PMD data derived initially from an rte_comp_xform,
296 * which holds state and history data and evolves as operations
297 * are processed. rte_compressdev_stream_create() must be called
298 * on a device for all STATEFUL data streams and the resulting
299 * stream attached to the one or more operations associated
300 * with the data stream.
301 * All operations in a stream must be sent to the same device.
305 struct rte_mempool *mempool;
306 /**< Pool from which operation is allocated */
307 rte_iova_t iova_addr;
308 /**< IOVA address of this operation */
309 struct rte_mbuf *m_src;
311 * The total size of the input buffer(s) can be retrieved using
312 * rte_pktmbuf_pkt_len(m_src). The max data size which can fit in a
313 * single mbuf is limited by the uint16_t rte_mbuf.data_len to 64k-1.
314 * If the input data is bigger than this it can be passed to the PMD in
315 * a chain of mbufs if the PMD's capabilities indicate it supports this.
317 struct rte_mbuf *m_dst;
318 /**< destination mbuf
319 * The total size of the output buffer(s) can be retrieved using
320 * rte_pktmbuf_pkt_len(m_dst). The max data size which can fit in a
321 * single mbuf is limited by the uint16_t rte_mbuf.data_len to 64k-1.
322 * If the output data is expected to be bigger than this a chain of
323 * mbufs can be passed to the PMD if the PMD's capabilities indicate
326 * @note, if incompressible data is passed to an engine for compression
327 * using RTE_COMP_ALGO_DEFLATE, it's possible for the output data
328 * to be larger than the uncompressed data, due to the inclusion
329 * of the DEFLATE header blocks. The size of m_dst should accommodate
330 * this, else OUT_OF_SPACE errors can be expected in this case.
335 /**< Starting point for compression or decompression,
336 * specified as number of bytes from start of packet in
338 * This offset starts from the first segment
339 * of the buffer, in case the m_src is a chain of mbufs.
340 * Starting point for checksum generation in compress direction.
343 /**< The length, in bytes, of the data in source buffer
344 * to be compressed or decompressed.
345 * Also the length of the data over which the checksum
346 * should be generated in compress direction
351 /**< Starting point for writing output data, specified as
352 * number of bytes from start of packet in dest
354 * This offset starts from the first segment
355 * of the buffer, in case the m_dst is a chain of mbufs.
356 * Starting point for checksum generation in
357 * decompress direction.
362 /**< Output buffer to store hash output, if enabled in xform.
363 * Buffer would contain valid value only after an op with
364 * flush flag = RTE_COMP_FLUSH_FULL/FLUSH_FINAL is processed
367 * Length of buffer should be contiguous and large enough to
368 * accommodate digest produced by specific hash algo.
370 rte_iova_t iova_addr;
371 /**< IO address of the buffer */
373 enum rte_comp_flush_flag flush_flag;
374 /**< Defines flush characteristics for the output data.
375 * Only applicable in compress direction
377 uint64_t input_chksum;
378 /**< An input checksum can be provided to generate a
379 * cumulative checksum across sequential blocks in a STATELESS stream.
380 * Checksum type is as specified in xform chksum_type
382 uint64_t output_chksum;
383 /**< If a checksum is generated it will be written in here.
384 * Checksum type is as specified in xform chksum_type.
387 /**< The number of bytes from the source buffer
388 * which were compressed/decompressed.
391 /**< The number of bytes written to the destination buffer
392 * which were compressed/decompressed.
394 uint64_t debug_status;
396 * Status of the operation is returned in the status param.
397 * This field allows the PMD to pass back extra
398 * pmd-specific debug information. Value is not defined on the API.
402 * Operation status - use values from enum rte_comp_status.
404 * RTE_COMP_OP_STATUS_NOT_PROCESSED on allocation from mempool and
405 * will be set to RTE_COMP_OP_STATUS_SUCCESS after operation
406 * is successfully processed by a PMD
408 } __rte_cache_aligned;
411 * Creates an operation pool
416 * Number of elements in pool
418 * Number of elements to cache on lcore, see
419 * *rte_mempool_create* for further details about cache size
421 * Size of private data to allocate for user with each operation
423 * Socket to identifier allocate memory on
425 * - On success pointer to mempool
430 rte_comp_op_pool_create(const char *name,
431 unsigned int nb_elts, unsigned int cache_size,
432 uint16_t user_size, int socket_id);
435 * Allocate an operation from a mempool with default parameters set
438 * Compress operation mempool
441 * - On success returns a valid rte_comp_op structure
442 * - On failure returns NULL
446 rte_comp_op_alloc(struct rte_mempool *mempool);
449 * Bulk allocate operations from a mempool with default parameters set
452 * Compress operation mempool
454 * Array to place allocated operations
456 * Number of operations to allocate
458 * - nb_ops: Success, the nb_ops requested was allocated
459 * - 0: Not enough entries in the mempool; no ops are retrieved.
463 rte_comp_op_bulk_alloc(struct rte_mempool *mempool,
464 struct rte_comp_op **ops, uint16_t nb_ops);
467 * Free operation structure
468 * If operation has been allocate from a rte_mempool, then the operation will
469 * be returned to the mempool.
476 rte_comp_op_free(struct rte_comp_op *op);
479 * Bulk free operation structures
480 * If operations have been allocated from an rte_mempool, then the operations
481 * will be returned to the mempool.
482 * The array entry will be cleared.
485 * Array of Compress operations
487 * Number of operations to free
491 rte_comp_op_bulk_free(struct rte_comp_op **ops, uint16_t nb_ops);
494 * Get the name of a compress service feature flag
497 * The mask describing the flag
500 * The name of this flag, or NULL if it's not a valid feature flag.
504 rte_comp_get_feature_name(uint64_t flag);
510 #endif /* _RTE_COMP_H_ */