/**< Stateful compression is supported */
#define RTE_COMP_FF_STATEFUL_DECOMPRESSION (1ULL << 1)
/**< Stateful decompression is supported */
-#define RTE_COMP_FF_MBUF_SCATTER_GATHER (1ULL << 2)
-/**< Scatter-gather mbufs are supported */
-#define RTE_COMP_FF_ADLER32_CHECKSUM (1ULL << 3)
+#define RTE_COMP_FF_OOP_SGL_IN_SGL_OUT (1ULL << 2)
+/**< Out-of-place Scatter-gather (SGL) buffers,
+ * with multiple segments, are supported in input and output
+ */
+#define RTE_COMP_FF_OOP_SGL_IN_LB_OUT (1ULL << 3)
+/**< Out-of-place Scatter-gather (SGL) buffers are supported
+ * in input, combined with linear buffers (LB), with a
+ * single segment, in output
+ */
+#define RTE_COMP_FF_OOP_LB_IN_SGL_OUT (1ULL << 4)
+/**< Out-of-place Scatter-gather (SGL) buffers are supported
+ * in output, combined with linear buffers (LB) in input
+ */
+#define RTE_COMP_FF_ADLER32_CHECKSUM (1ULL << 5)
/**< Adler-32 Checksum is supported */
-#define RTE_COMP_FF_CRC32_CHECKSUM (1ULL << 4)
+#define RTE_COMP_FF_CRC32_CHECKSUM (1ULL << 6)
/**< CRC32 Checksum is supported */
-#define RTE_COMP_FF_CRC32_ADLER32_CHECKSUM (1ULL << 5)
+#define RTE_COMP_FF_CRC32_ADLER32_CHECKSUM (1ULL << 7)
/**< Adler-32/CRC32 Checksum is supported */
-#define RTE_COMP_FF_MULTI_PKT_CHECKSUM (1ULL << 6)
+#define RTE_COMP_FF_MULTI_PKT_CHECKSUM (1ULL << 8)
/**< Generation of checksum across multiple stateless packets is supported */
-#define RTE_COMP_FF_SHA1_HASH (1ULL << 7)
+#define RTE_COMP_FF_SHA1_HASH (1ULL << 9)
/**< SHA1 Hash is supported */
-#define RTE_COMP_FF_SHA2_SHA256_HASH (1ULL << 8)
+#define RTE_COMP_FF_SHA2_SHA256_HASH (1ULL << 10)
/**< SHA256 Hash of SHA2 family is supported */
-#define RTE_COMP_FF_NONCOMPRESSED_BLOCKS (1ULL << 9)
+#define RTE_COMP_FF_NONCOMPRESSED_BLOCKS (1ULL << 11)
/**< Creation of non-compressed blocks using RTE_COMP_LEVEL_NONE is supported */
-#define RTE_COMP_FF_SHAREABLE_PRIV_XFORM (1ULL << 10)
+#define RTE_COMP_FF_SHAREABLE_PRIV_XFORM (1ULL << 12)
/**< Private xforms created by the PMD can be shared
* across multiple stateless operations. If not set, then app needs
* to create as many priv_xforms as it expects to have stateless
* operations in-flight.
*/
+#define RTE_COMP_FF_HUFFMAN_FIXED (1ULL << 13)
+/**< Fixed huffman encoding is supported */
+#define RTE_COMP_FF_HUFFMAN_DYNAMIC (1ULL << 14)
+/**< Dynamic huffman encoding is supported */
/** Status of comp operation */
enum rte_comp_op_status {
struct rte_mbuf *m_src;
/**< source mbuf
* The total size of the input buffer(s) can be retrieved using
- * rte_pktmbuf_data_len(m_src). The max data size which can fit in a
+ * rte_pktmbuf_pkt_len(m_src). The max data size which can fit in a
* single mbuf is limited by the uint16_t rte_mbuf.data_len to 64k-1.
* If the input data is bigger than this it can be passed to the PMD in
* a chain of mbufs if the PMD's capabilities indicate it supports this.
struct rte_mbuf *m_dst;
/**< destination mbuf
* The total size of the output buffer(s) can be retrieved using
- * rte_pktmbuf_data_len(m_dst). The max data size which can fit in a
+ * rte_pktmbuf_pkt_len(m_dst). The max data size which can fit in a
* single mbuf is limited by the uint16_t rte_mbuf.data_len to 64k-1.
* If the output data is expected to be bigger than this a chain of
* mbufs can be passed to the PMD if the PMD's capabilities indicate
* it supports this.
+ *
+ * @note, if incompressible data is passed to an engine for compression
+ * using RTE_COMP_ALGO_DEFLATE, it's possible for the output data
+ * to be larger than the uncompressed data, due to the inclusion
+ * of the DEFLATE header blocks. The size of m_dst should accommodate
+ * this, else OUT_OF_SPACE errors can be expected in this case.
*/
struct {
* - On success pointer to mempool
* - On failure NULL
*/
-struct rte_mempool * __rte_experimental
+__rte_experimental
+struct rte_mempool *
rte_comp_op_pool_create(const char *name,
unsigned int nb_elts, unsigned int cache_size,
uint16_t user_size, int socket_id);
* - On success returns a valid rte_comp_op structure
* - On failure returns NULL
*/
-struct rte_comp_op * __rte_experimental
+__rte_experimental
+struct rte_comp_op *
rte_comp_op_alloc(struct rte_mempool *mempool);
/**
* @param nb_ops
* Number of operations to allocate
* @return
- * - 0: Success
- * - -ENOENT: Not enough entries in the mempool; no ops are retrieved.
+ * - nb_ops: Success, the nb_ops requested was allocated
+ * - 0: Not enough entries in the mempool; no ops are retrieved.
*/
-int __rte_experimental
+__rte_experimental
+int
rte_comp_op_bulk_alloc(struct rte_mempool *mempool,
struct rte_comp_op **ops, uint16_t nb_ops);
* @param op
* Compress operation
*/
-void __rte_experimental
+__rte_experimental
+void
rte_comp_op_free(struct rte_comp_op *op);
+/**
+ * Bulk free operation structures
+ * If operations have been allocated from an rte_mempool, then the operations
+ * will be returned to the mempool.
+ * The array entry will be cleared.
+ *
+ * @param ops
+ * Array of Compress operations
+ * @param nb_ops
+ * Number of operations to free
+ */
+__rte_experimental
+void
+rte_comp_op_bulk_free(struct rte_comp_op **ops, uint16_t nb_ops);
+
/**
* Get the name of a compress service feature flag
*
* @return
* The name of this flag, or NULL if it's not a valid feature flag.
*/
-const char * __rte_experimental
+__rte_experimental
+const char *
rte_comp_get_feature_name(uint64_t flag);
#ifdef __cplusplus