1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
5 #ifndef _RTE_COMPRESSDEV_H_
6 #define _RTE_COMPRESSDEV_H_
9 * @file rte_compressdev.h
11 * RTE Compression Device APIs
13 * Defines comp device APIs for the provisioning of compression operations.
20 #include <rte_common.h>
25 * Parameter log base 2 range description.
26 * Final value will be 2^value.
28 struct rte_param_log2_range {
29 uint8_t min; /**< Minimum log2 value */
30 uint8_t max; /**< Maximum log2 value */
32 /**< If a range of sizes are supported,
33 * this parameter is used to indicate
34 * increments in base 2 log byte value
35 * that are supported between the minimum and maximum
39 /** Structure used to capture a capability of a comp device */
40 struct rte_compressdev_capabilities {
41 enum rte_comp_algorithm algo;
42 /* Compression algorithm */
43 uint64_t comp_feature_flags;
44 /**< Bitmask of flags for compression service features */
45 struct rte_param_log2_range window_size;
46 /**< Window size range in base two log byte values */
49 /** Macro used at end of comp PMD list */
50 #define RTE_COMP_END_OF_CAPABILITIES_LIST() \
51 { RTE_COMP_ALGO_UNSPECIFIED }
53 const struct rte_compressdev_capabilities * __rte_experimental
54 rte_compressdev_capability_get(uint8_t dev_id,
55 enum rte_comp_algorithm algo);
58 * compression device supported feature flags
60 * @note New features flags should be added to the end of the list
62 * Keep these flags synchronised with rte_compressdev_get_feature_name()
64 #define RTE_COMPDEV_FF_HW_ACCELERATED (1ULL << 0)
65 /**< Operations are off-loaded to an external hardware accelerator */
66 #define RTE_COMPDEV_FF_CPU_SSE (1ULL << 1)
67 /**< Utilises CPU SIMD SSE instructions */
68 #define RTE_COMPDEV_FF_CPU_AVX (1ULL << 2)
69 /**< Utilises CPU SIMD AVX instructions */
70 #define RTE_COMPDEV_FF_CPU_AVX2 (1ULL << 3)
71 /**< Utilises CPU SIMD AVX2 instructions */
72 #define RTE_COMPDEV_FF_CPU_AVX512 (1ULL << 4)
73 /**< Utilises CPU SIMD AVX512 instructions */
74 #define RTE_COMPDEV_FF_CPU_NEON (1ULL << 5)
75 /**< Utilises CPU NEON instructions */
78 * Get the name of a compress device feature flag.
81 * The mask describing the flag
84 * The name of this flag, or NULL if it's not a valid feature flag.
86 const char * __rte_experimental
87 rte_compressdev_get_feature_name(uint64_t flag);
89 /** comp device information */
90 struct rte_compressdev_info {
91 const char *driver_name; /**< Driver name. */
92 uint64_t feature_flags; /**< Feature flags */
93 const struct rte_compressdev_capabilities *capabilities;
94 /**< Array of devices supported capabilities */
95 uint16_t max_nb_queue_pairs;
96 /**< Maximum number of queues pairs supported by device.
97 * (If 0, there is no limit in maximum number of queue pairs)
101 /** comp device statistics */
102 struct rte_compressdev_stats {
103 uint64_t enqueued_count;
104 /**< Count of all operations enqueued */
105 uint64_t dequeued_count;
106 /**< Count of all operations dequeued */
108 uint64_t enqueue_err_count;
109 /**< Total error count on operations enqueued */
110 uint64_t dequeue_err_count;
111 /**< Total error count on operations dequeued */
115 * Get the compress device name given a device identifier.
118 * Compress device identifier
120 * - Returns compress device name.
121 * - Returns NULL if compress device is not present.
123 const char * __rte_experimental
124 rte_compressdev_name_get(uint8_t dev_id);
127 * Get the total number of compress devices that have been successfully
131 * - The total number of usable compress devices.
133 uint8_t __rte_experimental
134 rte_compressdev_count(void);
137 * Get number and identifiers of attached comp devices that
138 * use the same compress driver.
143 * Output devices identifiers
145 * Maximal number of devices
148 * Returns number of attached compress devices.
150 uint8_t __rte_experimental
151 rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
155 * Return the NUMA socket to which a device is connected.
158 * Compress device identifier
160 * The NUMA socket id to which the device is connected or
161 * a default of zero if the socket could not be determined.
162 * -1 if returned is the dev_id value is out of range.
164 int __rte_experimental
165 rte_compressdev_socket_id(uint8_t dev_id);
167 /** Compress device configuration structure */
168 struct rte_compressdev_config {
170 /**< Socket on which to allocate resources */
171 uint16_t nb_queue_pairs;
172 /**< Total number of queue pairs to configure on a device */
173 uint16_t max_nb_priv_xforms;
174 /**< Max number of private_xforms which will be created on the device */
175 uint16_t max_nb_streams;
176 /**< Max number of streams which will be created on the device */
180 * Configure a device.
182 * This function must be invoked first before any other function in the
183 * API. This function can also be re-invoked when a device is in the
187 * Compress device identifier
189 * The compress device configuration
191 * - 0: Success, device configured.
192 * - <0: Error code returned by the driver configuration function.
194 int __rte_experimental
195 rte_compressdev_configure(uint8_t dev_id,
196 struct rte_compressdev_config *config);
201 * The device start step is called after configuring the device and setting up
203 * On success, data-path functions exported by the API (enqueue/dequeue, etc)
207 * Compress device identifier
209 * - 0: Success, device started.
210 * - <0: Error code of the driver device start function.
212 int __rte_experimental
213 rte_compressdev_start(uint8_t dev_id);
216 * Stop a device. The device can be restarted with a call to
217 * rte_compressdev_start()
220 * Compress device identifier
222 void __rte_experimental
223 rte_compressdev_stop(uint8_t dev_id);
227 * The memory allocated in the device gets freed.
228 * After calling this function, in order to use
229 * the device again, it is required to
230 * configure the device again.
233 * Compress device identifier
236 * - 0 on successfully closing device
237 * - <0 on failure to close device
239 int __rte_experimental
240 rte_compressdev_close(uint8_t dev_id);
243 * Allocate and set up a receive queue pair for a device.
244 * This should only be called when the device is stopped.
248 * Compress device identifier
249 * @param queue_pair_id
250 * The index of the queue pairs to set up. The
251 * value must be in the range [0, nb_queue_pair - 1]
252 * previously supplied to rte_compressdev_configure()
253 * @param max_inflight_ops
254 * Max number of ops which the qp will have to
255 * accommodate simultaneously
257 * The *socket_id* argument is the socket identifier
258 * in case of NUMA. The value can be *SOCKET_ID_ANY*
259 * if there is no NUMA constraint for the DMA memory
260 * allocated for the receive queue pair
262 * - 0: Success, queue pair correctly set up.
263 * - <0: Queue pair configuration failed
265 int __rte_experimental
266 rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
267 uint32_t max_inflight_ops, int socket_id);
270 * Get the number of queue pairs on a specific comp device
273 * Compress device identifier
275 * - The number of configured queue pairs.
277 uint16_t __rte_experimental
278 rte_compressdev_queue_pair_count(uint8_t dev_id);
282 * Retrieve the general I/O statistics of a device.
285 * The identifier of the device
287 * A pointer to a structure of type
288 * *rte_compressdev_stats* to be filled with the
289 * values of device counters
291 * - Zero if successful.
292 * - Non-zero otherwise.
294 int __rte_experimental
295 rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats);
298 * Reset the general I/O statistics of a device.
301 * The identifier of the device.
303 void __rte_experimental
304 rte_compressdev_stats_reset(uint8_t dev_id);
307 * Retrieve the contextual information of a device.
310 * Compress device identifier
312 * A pointer to a structure of type *rte_compressdev_info*
313 * to be filled with the contextual information of the device
315 * @note The capabilities field of dev_info is set to point to the first
316 * element of an array of struct rte_compressdev_capabilities.
317 * The element after the last valid element has it's op field set to
318 * RTE_COMP_ALGO_LIST_END.
320 void __rte_experimental
321 rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info);
325 * Dequeue a burst of processed compression operations from a queue on the comp
326 * device. The dequeued operation are stored in *rte_comp_op* structures
327 * whose pointers are supplied in the *ops* array.
329 * The rte_compressdev_dequeue_burst() function returns the number of ops
330 * actually dequeued, which is the number of *rte_comp_op* data structures
331 * effectively supplied into the *ops* array.
333 * A return value equal to *nb_ops* indicates that the queue contained
334 * at least *nb_ops* operations, and this is likely to signify that other
335 * processed operations remain in the devices output queue. Applications
336 * implementing a "retrieve as many processed operations as possible" policy
337 * can check this specific case and keep invoking the
338 * rte_compressdev_dequeue_burst() function until a value less than
339 * *nb_ops* is returned.
341 * The rte_compressdev_dequeue_burst() function does not provide any error
342 * notification to avoid the corresponding overhead.
344 * @note: operation ordering is not maintained within the queue pair.
346 * @note: In case op status = OUT_OF_SPACE_TERMINATED, op.consumed=0 and the
347 * op must be resubmitted with the same input data and a larger output buffer.
348 * op.produced is usually 0, but in decompression cases a PMD may return > 0
349 * and the application may find it useful to inspect that data.
350 * This status is only returned on STATELESS ops.
352 * @note: In case op status = OUT_OF_SPACE_RECOVERABLE, op.produced can be used
353 * and next op in stream should continue on from op.consumed+1 with a fresh
355 * Consumed=0, produced=0 is an unusual but allowed case. There may be useful
356 * state/history stored in the PMD, even though no output was produced yet.
360 * Compress device identifier
362 * The index of the queue pair from which to retrieve
363 * processed operations. The value must be in the range
364 * [0, nb_queue_pair - 1] previously supplied to
365 * rte_compressdev_configure()
367 * The address of an array of pointers to
368 * *rte_comp_op* structures that must be
369 * large enough to store *nb_ops* pointers in it
371 * The maximum number of operations to dequeue
373 * - The number of operations actually dequeued, which is the number
374 * of pointers to *rte_comp_op* structures effectively supplied to the
377 uint16_t __rte_experimental
378 rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
379 struct rte_comp_op **ops, uint16_t nb_ops);
382 * Enqueue a burst of operations for processing on a compression device.
384 * The rte_compressdev_enqueue_burst() function is invoked to place
385 * comp operations on the queue *qp_id* of the device designated by
388 * The *nb_ops* parameter is the number of operations to process which are
389 * supplied in the *ops* array of *rte_comp_op* structures.
391 * The rte_compressdev_enqueue_burst() function returns the number of
392 * operations it actually enqueued for processing. A return value equal to
393 * *nb_ops* means that all packets have been enqueued.
395 * @note All compression operations are Out-of-place (OOP) operations,
396 * as the size of the output data is different to the size of the input data.
398 * @note The flush flag only applies to operations which return SUCCESS.
399 * In OUT_OF_SPACE cases whether STATEFUL or STATELESS, data in dest buffer
400 * is as if flush flag was FLUSH_NONE.
401 * @note flush flag only applies in compression direction. It has no meaning
403 * @note: operation ordering is not maintained within the queue pair.
406 * Compress device identifier
408 * The index of the queue pair on which operations
409 * are to be enqueued for processing. The value
410 * must be in the range [0, nb_queue_pairs - 1]
411 * previously supplied to *rte_compressdev_configure*
413 * The address of an array of *nb_ops* pointers
414 * to *rte_comp_op* structures which contain
415 * the operations to be processed
417 * The number of operations to process
419 * The number of operations actually enqueued on the device. The return
420 * value can be less than the value of the *nb_ops* parameter when the
421 * comp devices queue is full or if invalid parameters are specified in
424 uint16_t __rte_experimental
425 rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
426 struct rte_comp_op **ops, uint16_t nb_ops);
429 * This should alloc a stream from the device's mempool and initialise it.
430 * The application should call this API when setting up for the stateful
431 * processing of a set of data on a device. The API can be called multiple
432 * times to set up a stream for each data set. The handle returned is only for
433 * use with ops of op_type STATEFUL and must be passed to the PMD
434 * with every op in the data stream
437 * Compress device identifier
441 * Pointer to where PMD's private stream handle should be stored
444 * - 0 if successful and valid stream handle
445 * - <0 in error cases
446 * - Returns -EINVAL if input parameters are invalid.
447 * - Returns -ENOTSUP if comp device does not support STATEFUL operations.
448 * - Returns -ENOTSUP if comp device does not support the comp transform.
449 * - Returns -ENOMEM if the private stream could not be allocated.
452 int __rte_experimental
453 rte_compressdev_stream_create(uint8_t dev_id,
454 const struct rte_comp_xform *xform,
458 * This should clear the stream and return it to the device's mempool.
461 * Compress device identifier
464 * PMD's private stream data
468 * - <0 in error cases
469 * - Returns -EINVAL if input parameters are invalid.
470 * - Returns -ENOTSUP if comp device does not support STATEFUL operations.
471 * - Returns -EBUSY if can't free stream as there are inflight operations
473 int __rte_experimental
474 rte_compressdev_stream_free(uint8_t dev_id, void *stream);
477 * This should alloc a private_xform from the device's mempool and initialise
478 * it. The application should call this API when setting up for stateless
479 * processing on a device. If it returns non-shareable, then the appl cannot
480 * share this handle with multiple in-flight ops and should call this API again
481 * to get a separate handle for every in-flight op.
482 * The handle returned is only valid for use with ops of op_type STATELESS.
485 * Compress device identifier
488 * @param private_xform
489 * Pointer to where PMD's private_xform handle should be stored
492 * - if successful returns 0
493 * and valid private_xform handle
494 * - <0 in error cases
495 * - Returns -EINVAL if input parameters are invalid.
496 * - Returns -ENOTSUP if comp device does not support the comp transform.
497 * - Returns -ENOMEM if the private_xform could not be allocated.
499 int __rte_experimental
500 rte_compressdev_private_xform_create(uint8_t dev_id,
501 const struct rte_comp_xform *xform,
502 void **private_xform);
505 * This should clear the private_xform and return it to the device's mempool.
508 * Compress device identifier
510 * @param private_xform
511 * PMD's private_xform data
515 * - <0 in error cases
516 * - Returns -EINVAL if input parameters are invalid.
517 * - Returns -EBUSY if can't free private_xform due to inflight operations
519 int __rte_experimental
520 rte_compressdev_private_xform_free(uint8_t dev_id, void *private_xform);
526 #endif /* _RTE_COMPRESSDEV_H_ */