1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
5 #ifndef _RTE_COMPRESSDEV_H_
6 #define _RTE_COMPRESSDEV_H_
9 * @file rte_compressdev.h
11 * RTE Compression Device APIs
13 * Defines comp device APIs for the provisioning of compression operations.
20 #include <rte_common.h>
25 * Parameter log base 2 range description.
26 * Final value will be 2^value.
28 struct rte_param_log2_range {
29 uint8_t min; /**< Minimum log2 value */
30 uint8_t max; /**< Maximum log2 value */
32 /**< If a range of sizes are supported,
33 * this parameter is used to indicate
34 * increments in base 2 log byte value
35 * that are supported between the minimum and maximum
39 /** Structure used to capture a capability of a comp device */
40 struct rte_compressdev_capabilities {
41 enum rte_comp_algorithm algo;
42 /* Compression algorithm */
43 uint64_t comp_feature_flags;
44 /**< Bitmask of flags for compression service features */
45 struct rte_param_log2_range window_size;
46 /**< Window size range in base two log byte values */
49 /** Macro used at end of comp PMD list */
50 #define RTE_COMP_END_OF_CAPABILITIES_LIST() \
51 { RTE_COMP_ALGO_UNSPECIFIED }
53 const struct rte_compressdev_capabilities * __rte_experimental
54 rte_compressdev_capability_get(uint8_t dev_id,
55 enum rte_comp_algorithm algo);
58 * compression device supported feature flags
60 * @note New features flags should be added to the end of the list
62 * Keep these flags synchronised with rte_compressdev_get_feature_name()
64 #define RTE_COMPDEV_FF_HW_ACCELERATED (1ULL << 0)
65 /**< Operations are off-loaded to an external hardware accelerator */
66 #define RTE_COMPDEV_FF_CPU_SSE (1ULL << 1)
67 /**< Utilises CPU SIMD SSE instructions */
68 #define RTE_COMPDEV_FF_CPU_AVX (1ULL << 2)
69 /**< Utilises CPU SIMD AVX instructions */
70 #define RTE_COMPDEV_FF_CPU_AVX2 (1ULL << 3)
71 /**< Utilises CPU SIMD AVX2 instructions */
72 #define RTE_COMPDEV_FF_CPU_AVX512 (1ULL << 4)
73 /**< Utilises CPU SIMD AVX512 instructions */
74 #define RTE_COMPDEV_FF_CPU_NEON (1ULL << 5)
75 /**< Utilises CPU NEON instructions */
78 * Get the name of a compress device feature flag.
81 * The mask describing the flag
84 * The name of this flag, or NULL if it's not a valid feature flag.
86 const char * __rte_experimental
87 rte_compressdev_get_feature_name(uint64_t flag);
89 /** comp device information */
90 struct rte_compressdev_info {
91 const char *driver_name; /**< Driver name. */
92 uint64_t feature_flags; /**< Feature flags */
93 const struct rte_compressdev_capabilities *capabilities;
94 /**< Array of devices supported capabilities */
95 uint16_t max_nb_queue_pairs;
96 /**< Maximum number of queues pairs supported by device.
97 * (If 0, there is no limit in maximum number of queue pairs)
101 /** comp device statistics */
102 struct rte_compressdev_stats {
103 uint64_t enqueued_count;
104 /**< Count of all operations enqueued */
105 uint64_t dequeued_count;
106 /**< Count of all operations dequeued */
108 uint64_t enqueue_err_count;
109 /**< Total error count on operations enqueued */
110 uint64_t dequeue_err_count;
111 /**< Total error count on operations dequeued */
116 * Get the device identifier for the named compress device.
119 * Device name to select the device structure
121 * - Returns compress device identifier on success.
122 * - Return -1 on failure to find named compress device.
124 int __rte_experimental
125 rte_compressdev_get_dev_id(const char *name);
128 * Get the compress device name given a device identifier.
131 * Compress device identifier
133 * - Returns compress device name.
134 * - Returns NULL if compress device is not present.
136 const char * __rte_experimental
137 rte_compressdev_name_get(uint8_t dev_id);
140 * Get the total number of compress devices that have been successfully
144 * - The total number of usable compress devices.
146 uint8_t __rte_experimental
147 rte_compressdev_count(void);
150 * Get number and identifiers of attached comp devices that
151 * use the same compress driver.
156 * Output devices identifiers
158 * Maximal number of devices
161 * Returns number of attached compress devices.
163 uint8_t __rte_experimental
164 rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
168 * Return the NUMA socket to which a device is connected.
171 * Compress device identifier
173 * The NUMA socket id to which the device is connected or
174 * a default of zero if the socket could not be determined.
175 * -1 if returned is the dev_id value is out of range.
177 int __rte_experimental
178 rte_compressdev_socket_id(uint8_t dev_id);
180 /** Compress device configuration structure */
181 struct rte_compressdev_config {
183 /**< Socket on which to allocate resources */
184 uint16_t nb_queue_pairs;
185 /**< Total number of queue pairs to configure on a device */
186 uint16_t max_nb_priv_xforms;
187 /**< Max number of private_xforms which will be created on the device */
188 uint16_t max_nb_streams;
189 /**< Max number of streams which will be created on the device */
193 * Configure a device.
195 * This function must be invoked first before any other function in the
196 * API. This function can also be re-invoked when a device is in the
200 * Compress device identifier
202 * The compress device configuration
204 * - 0: Success, device configured.
205 * - <0: Error code returned by the driver configuration function.
207 int __rte_experimental
208 rte_compressdev_configure(uint8_t dev_id,
209 struct rte_compressdev_config *config);
214 * The device start step is called after configuring the device and setting up
216 * On success, data-path functions exported by the API (enqueue/dequeue, etc)
220 * Compress device identifier
222 * - 0: Success, device started.
223 * - <0: Error code of the driver device start function.
225 int __rte_experimental
226 rte_compressdev_start(uint8_t dev_id);
229 * Stop a device. The device can be restarted with a call to
230 * rte_compressdev_start()
233 * Compress device identifier
235 void __rte_experimental
236 rte_compressdev_stop(uint8_t dev_id);
240 * The memory allocated in the device gets freed.
241 * After calling this function, in order to use
242 * the device again, it is required to
243 * configure the device again.
246 * Compress device identifier
249 * - 0 on successfully closing device
250 * - <0 on failure to close device
252 int __rte_experimental
253 rte_compressdev_close(uint8_t dev_id);
256 * Allocate and set up a receive queue pair for a device.
257 * This should only be called when the device is stopped.
261 * Compress device identifier
262 * @param queue_pair_id
263 * The index of the queue pairs to set up. The
264 * value must be in the range [0, nb_queue_pair - 1]
265 * previously supplied to rte_compressdev_configure()
266 * @param max_inflight_ops
267 * Max number of ops which the qp will have to
268 * accommodate simultaneously
270 * The *socket_id* argument is the socket identifier
271 * in case of NUMA. The value can be *SOCKET_ID_ANY*
272 * if there is no NUMA constraint for the DMA memory
273 * allocated for the receive queue pair
275 * - 0: Success, queue pair correctly set up.
276 * - <0: Queue pair configuration failed
278 int __rte_experimental
279 rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
280 uint32_t max_inflight_ops, int socket_id);
283 * Get the number of queue pairs on a specific comp device
286 * Compress device identifier
288 * - The number of configured queue pairs.
290 uint16_t __rte_experimental
291 rte_compressdev_queue_pair_count(uint8_t dev_id);
295 * Retrieve the general I/O statistics of a device.
298 * The identifier of the device
300 * A pointer to a structure of type
301 * *rte_compressdev_stats* to be filled with the
302 * values of device counters
304 * - Zero if successful.
305 * - Non-zero otherwise.
307 int __rte_experimental
308 rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats);
311 * Reset the general I/O statistics of a device.
314 * The identifier of the device.
316 void __rte_experimental
317 rte_compressdev_stats_reset(uint8_t dev_id);
320 * Retrieve the contextual information of a device.
323 * Compress device identifier
325 * A pointer to a structure of type *rte_compressdev_info*
326 * to be filled with the contextual information of the device
328 * @note The capabilities field of dev_info is set to point to the first
329 * element of an array of struct rte_compressdev_capabilities.
330 * The element after the last valid element has it's op field set to
331 * RTE_COMP_ALGO_LIST_END.
333 void __rte_experimental
334 rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info);
338 * Dequeue a burst of processed compression operations from a queue on the comp
339 * device. The dequeued operation are stored in *rte_comp_op* structures
340 * whose pointers are supplied in the *ops* array.
342 * The rte_compressdev_dequeue_burst() function returns the number of ops
343 * actually dequeued, which is the number of *rte_comp_op* data structures
344 * effectively supplied into the *ops* array.
346 * A return value equal to *nb_ops* indicates that the queue contained
347 * at least *nb_ops* operations, and this is likely to signify that other
348 * processed operations remain in the devices output queue. Applications
349 * implementing a "retrieve as many processed operations as possible" policy
350 * can check this specific case and keep invoking the
351 * rte_compressdev_dequeue_burst() function until a value less than
352 * *nb_ops* is returned.
354 * The rte_compressdev_dequeue_burst() function does not provide any error
355 * notification to avoid the corresponding overhead.
357 * @note: operation ordering is not maintained within the queue pair.
359 * @note: In case op status = OUT_OF_SPACE_TERMINATED, op.consumed=0 and the
360 * op must be resubmitted with the same input data and a larger output buffer.
361 * op.produced is usually 0, but in decompression cases a PMD may return > 0
362 * and the application may find it useful to inspect that data.
363 * This status is only returned on STATELESS ops.
365 * @note: In case op status = OUT_OF_SPACE_RECOVERABLE, op.produced can be used
366 * and next op in stream should continue on from op.consumed+1 with a fresh
368 * Consumed=0, produced=0 is an unusual but allowed case. There may be useful
369 * state/history stored in the PMD, even though no output was produced yet.
373 * Compress device identifier
375 * The index of the queue pair from which to retrieve
376 * processed operations. The value must be in the range
377 * [0, nb_queue_pair - 1] previously supplied to
378 * rte_compressdev_configure()
380 * The address of an array of pointers to
381 * *rte_comp_op* structures that must be
382 * large enough to store *nb_ops* pointers in it
384 * The maximum number of operations to dequeue
386 * - The number of operations actually dequeued, which is the number
387 * of pointers to *rte_comp_op* structures effectively supplied to the
390 uint16_t __rte_experimental
391 rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
392 struct rte_comp_op **ops, uint16_t nb_ops);
395 * Enqueue a burst of operations for processing on a compression device.
397 * The rte_compressdev_enqueue_burst() function is invoked to place
398 * comp operations on the queue *qp_id* of the device designated by
401 * The *nb_ops* parameter is the number of operations to process which are
402 * supplied in the *ops* array of *rte_comp_op* structures.
404 * The rte_compressdev_enqueue_burst() function returns the number of
405 * operations it actually enqueued for processing. A return value equal to
406 * *nb_ops* means that all packets have been enqueued.
408 * @note All compression operations are Out-of-place (OOP) operations,
409 * as the size of the output data is different to the size of the input data.
411 * @note The flush flag only applies to operations which return SUCCESS.
412 * In OUT_OF_SPACE cases whether STATEFUL or STATELESS, data in dest buffer
413 * is as if flush flag was FLUSH_NONE.
414 * @note flush flag only applies in compression direction. It has no meaning
416 * @note: operation ordering is not maintained within the queue pair.
419 * Compress device identifier
421 * The index of the queue pair on which operations
422 * are to be enqueued for processing. The value
423 * must be in the range [0, nb_queue_pairs - 1]
424 * previously supplied to *rte_compressdev_configure*
426 * The address of an array of *nb_ops* pointers
427 * to *rte_comp_op* structures which contain
428 * the operations to be processed
430 * The number of operations to process
432 * The number of operations actually enqueued on the device. The return
433 * value can be less than the value of the *nb_ops* parameter when the
434 * comp devices queue is full or if invalid parameters are specified in
437 uint16_t __rte_experimental
438 rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
439 struct rte_comp_op **ops, uint16_t nb_ops);
442 * This should alloc a stream from the device's mempool and initialise it.
443 * The application should call this API when setting up for the stateful
444 * processing of a set of data on a device. The API can be called multiple
445 * times to set up a stream for each data set. The handle returned is only for
446 * use with ops of op_type STATEFUL and must be passed to the PMD
447 * with every op in the data stream
450 * Compress device identifier
454 * Pointer to where PMD's private stream handle should be stored
457 * - 0 if successful and valid stream handle
458 * - <0 in error cases
459 * - Returns -EINVAL if input parameters are invalid.
460 * - Returns -ENOTSUP if comp device does not support STATEFUL operations.
461 * - Returns -ENOTSUP if comp device does not support the comp transform.
462 * - Returns -ENOMEM if the private stream could not be allocated.
465 int __rte_experimental
466 rte_compressdev_stream_create(uint8_t dev_id,
467 const struct rte_comp_xform *xform,
471 * This should clear the stream and return it to the device's mempool.
474 * Compress device identifier
477 * PMD's private stream data
481 * - <0 in error cases
482 * - Returns -EINVAL if input parameters are invalid.
483 * - Returns -ENOTSUP if comp device does not support STATEFUL operations.
484 * - Returns -EBUSY if can't free stream as there are inflight operations
486 int __rte_experimental
487 rte_compressdev_stream_free(uint8_t dev_id, void *stream);
490 * This should alloc a private_xform from the device's mempool and initialise
491 * it. The application should call this API when setting up for stateless
492 * processing on a device. If it returns non-shareable, then the appl cannot
493 * share this handle with multiple in-flight ops and should call this API again
494 * to get a separate handle for every in-flight op.
495 * The handle returned is only valid for use with ops of op_type STATELESS.
498 * Compress device identifier
501 * @param private_xform
502 * Pointer to where PMD's private_xform handle should be stored
505 * - if successful returns 0
506 * and valid private_xform handle
507 * - <0 in error cases
508 * - Returns -EINVAL if input parameters are invalid.
509 * - Returns -ENOTSUP if comp device does not support the comp transform.
510 * - Returns -ENOMEM if the private_xform could not be allocated.
512 int __rte_experimental
513 rte_compressdev_private_xform_create(uint8_t dev_id,
514 const struct rte_comp_xform *xform,
515 void **private_xform);
518 * This should clear the private_xform and return it to the device's mempool.
521 * Compress device identifier
523 * @param private_xform
524 * PMD's private_xform data
528 * - <0 in error cases
529 * - Returns -EINVAL if input parameters are invalid.
530 * - Returns -EBUSY if can't free private_xform due to inflight operations
532 int __rte_experimental
533 rte_compressdev_private_xform_free(uint8_t dev_id, void *private_xform);
539 #endif /* _RTE_COMPRESSDEV_H_ */