* Copyright(c) 2018 Cavium Networks
*/
-#ifndef _RTE_ZLIB_PMD_PRIVATE_H_
-#define _RTE_ZLIB_PMD_PRIVATE_H_
+#ifndef _ZLIB_PMD_PRIVATE_H_
+#define _ZLIB_PMD_PRIVATE_H_
#include <zlib.h>
#include <rte_compressdev.h>
/** Device specific operations function pointer structure */
extern struct rte_compressdev_ops *rte_zlib_pmd_ops;
-#endif /* _RTE_ZLIB_PMD_PRIVATE_H_ */
+#endif /* _ZLIB_PMD_PRIVATE_H_ */
* Copyright(c) 2016-2017 Intel Corporation
*/
-#ifndef _RTE_AESNI_GCM_PMD_PRIVATE_H_
-#define _RTE_AESNI_GCM_PMD_PRIVATE_H_
+#ifndef _AESNI_GCM_PMD_PRIVATE_H_
+#define _AESNI_GCM_PMD_PRIVATE_H_
#include "aesni_gcm_ops.h"
extern struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops;
-#endif /* _RTE_AESNI_GCM_PMD_PRIVATE_H_ */
+#endif /* _AESNI_GCM_PMD_PRIVATE_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2016 Intel Corporation
+ */
+
+#ifndef _AESNI_MB_PMD_PRIVATE_H_
+#define _AESNI_MB_PMD_PRIVATE_H_
+
+#include <intel-ipsec-mb.h>
+
+enum aesni_mb_vector_mode {
+ RTE_AESNI_MB_NOT_SUPPORTED = 0,
+ RTE_AESNI_MB_SSE,
+ RTE_AESNI_MB_AVX,
+ RTE_AESNI_MB_AVX2,
+ RTE_AESNI_MB_AVX512
+};
+
+#define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
+/**< AES-NI Multi buffer PMD device name */
+
+/** AESNI_MB PMD LOGTYPE DRIVER */
+int aesni_mb_logtype_driver;
+
+#define AESNI_MB_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, aesni_mb_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+
+#define HMAC_IPAD_VALUE (0x36)
+#define HMAC_OPAD_VALUE (0x5C)
+
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX 64
+static const unsigned auth_blocksize[] = {
+ [NULL_HASH] = 0,
+ [MD5] = 64,
+ [SHA1] = 64,
+ [SHA_224] = 64,
+ [SHA_256] = 64,
+ [SHA_384] = 128,
+ [SHA_512] = 128,
+ [AES_XCBC] = 16,
+ [AES_CCM] = 16,
+ [AES_CMAC] = 16,
+ [AES_GMAC] = 16,
+ [PLAIN_SHA1] = 64,
+ [PLAIN_SHA_224] = 64,
+ [PLAIN_SHA_256] = 64,
+ [PLAIN_SHA_384] = 128,
+ [PLAIN_SHA_512] = 128
+};
+
+/**
+ * Get the blocksize in bytes for a specified authentication algorithm
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned
+get_auth_algo_blocksize(JOB_HASH_ALG algo)
+{
+ return auth_blocksize[algo];
+}
+
+static const unsigned auth_truncated_digest_byte_lengths[] = {
+ [MD5] = 12,
+ [SHA1] = 12,
+ [SHA_224] = 14,
+ [SHA_256] = 16,
+ [SHA_384] = 24,
+ [SHA_512] = 32,
+ [AES_XCBC] = 12,
+ [AES_CMAC] = 12,
+ [AES_CCM] = 8,
+ [NULL_HASH] = 0,
+ [AES_GMAC] = 16,
+ [PLAIN_SHA1] = 20,
+ [PLAIN_SHA_224] = 28,
+ [PLAIN_SHA_256] = 32,
+ [PLAIN_SHA_384] = 48,
+ [PLAIN_SHA_512] = 64
+};
+
+/**
+ * Get the IPsec specified truncated length in bytes of the HMAC digest for a
+ * specified authentication algorithm
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned
+get_truncated_digest_byte_length(JOB_HASH_ALG algo)
+{
+ return auth_truncated_digest_byte_lengths[algo];
+}
+
+static const unsigned auth_digest_byte_lengths[] = {
+ [MD5] = 16,
+ [SHA1] = 20,
+ [SHA_224] = 28,
+ [SHA_256] = 32,
+ [SHA_384] = 48,
+ [SHA_512] = 64,
+ [AES_XCBC] = 16,
+ [AES_CMAC] = 16,
+ [AES_CCM] = 16,
+ [AES_GMAC] = 12,
+ [NULL_HASH] = 0,
+ [PLAIN_SHA1] = 20,
+ [PLAIN_SHA_224] = 28,
+ [PLAIN_SHA_256] = 32,
+ [PLAIN_SHA_384] = 48,
+ [PLAIN_SHA_512] = 64
+ /**< Vector mode dependent pointer table of the multi-buffer APIs */
+
+};
+
+/**
+ * Get the full digest size in bytes for a specified authentication algorithm
+ * (if available in the Multi-buffer library)
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned
+get_digest_byte_length(JOB_HASH_ALG algo)
+{
+ return auth_digest_byte_lengths[algo];
+}
+
+enum aesni_mb_operation {
+ AESNI_MB_OP_HASH_CIPHER,
+ AESNI_MB_OP_CIPHER_HASH,
+ AESNI_MB_OP_HASH_ONLY,
+ AESNI_MB_OP_CIPHER_ONLY,
+ AESNI_MB_OP_AEAD_HASH_CIPHER,
+ AESNI_MB_OP_AEAD_CIPHER_HASH,
+ AESNI_MB_OP_NOT_SUPPORTED
+};
+
+/** private data structure for each virtual AESNI device */
+struct aesni_mb_private {
+ enum aesni_mb_vector_mode vector_mode;
+ /**< CPU vector instruction set mode */
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+ MB_MGR *mb_mgr;
+ /**< Multi-buffer instance */
+};
+
+/** AESNI Multi buffer queue pair */
+struct aesni_mb_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ MB_MGR *mb_mgr;
+ /**< Multi-buffer instance */
+ struct rte_ring *ingress_queue;
+ /**< Ring for placing operations ready for processing */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_mempool *sess_mp_priv;
+ /**< Session Private Data Mempool */
+ struct rte_cryptodev_stats stats;
+ /**< Queue pair statistics */
+ uint8_t digest_idx;
+ /**< Index of the next slot to be used in temp_digests,
+ * to store the digest for a given operation
+ */
+ uint8_t temp_digests[MAX_JOBS][DIGEST_LENGTH_MAX];
+ /**< Buffers used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+/** AES-NI multi-buffer private session structure */
+struct aesni_mb_session {
+ JOB_CHAIN_ORDER chain_order;
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+
+ /** Cipher Parameters */const struct aesni_mb_op_fns *op_fns;
+ /**< Vector mode dependent pointer table of the multi-buffer APIs */
+
+ struct {
+ /** Cipher direction - encrypt / decrypt */
+ JOB_CIPHER_DIRECTION direction;
+ /** Cipher mode - CBC / Counter */
+ JOB_CIPHER_MODE mode;
+
+ uint64_t key_length_in_bytes;
+
+ union {
+ struct {
+ uint32_t encode[60] __rte_aligned(16);
+ /**< encode key */
+ uint32_t decode[60] __rte_aligned(16);
+ /**< decode key */
+ } expanded_aes_keys;
+ struct {
+ const void *ks_ptr[3];
+ uint64_t key[3][16];
+ } exp_3des_keys;
+
+ struct gcm_key_data gcm_key;
+ };
+ /**< Expanded AES keys - Allocating space to
+ * contain the maximum expanded key size which
+ * is 240 bytes for 256 bit AES, calculate by:
+ * ((key size (bytes)) *
+ * ((number of rounds) + 1))
+ */
+ } cipher;
+
+ /** Authentication Parameters */
+ struct {
+ JOB_HASH_ALG algo; /**< Authentication Algorithm */
+ enum rte_crypto_auth_operation operation;
+ /**< auth operation generate or verify */
+ union {
+ struct {
+ uint8_t inner[128] __rte_aligned(16);
+ /**< inner pad */
+ uint8_t outer[128] __rte_aligned(16);
+ /**< outer pad */
+ } pads;
+ /**< HMAC Authentication pads -
+ * allocating space for the maximum pad
+ * size supported which is 128 bytes for
+ * SHA512
+ */
+
+ struct {
+ uint32_t k1_expanded[44] __rte_aligned(16);
+ /**< k1 (expanded key). */
+ uint8_t k2[16] __rte_aligned(16);
+ /**< k2. */
+ uint8_t k3[16] __rte_aligned(16);
+ /**< k3. */
+ } xcbc;
+
+ struct {
+ uint32_t expkey[60] __rte_aligned(16);
+ /**< k1 (expanded key). */
+ uint32_t skey1[4] __rte_aligned(16);
+ /**< k2. */
+ uint32_t skey2[4] __rte_aligned(16);
+ /**< k3. */
+ } cmac;
+ /**< Expanded XCBC authentication keys */
+ };
+ /** Generated digest size by the Multi-buffer library */
+ uint16_t gen_digest_len;
+ /** Requested digest size from Cryptodev */
+ uint16_t req_digest_len;
+
+ } auth;
+ struct {
+ /** AAD data length */
+ uint16_t aad_len;
+ } aead;
+} __rte_cache_aligned;
+
+extern int
+aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops;
+
+
+
+#endif /* _AESNI_MB_PMD_PRIVATE_H_ */
#include <rte_malloc.h>
#include <rte_cpuflags.h>
-#include "rte_aesni_mb_pmd_private.h"
+#include "aesni_mb_pmd_private.h"
#define AES_CCM_DIGEST_MIN_LEN 4
#define AES_CCM_DIGEST_MAX_LEN 16
#include <rte_malloc.h>
#include <rte_cryptodev_pmd.h>
-#include "rte_aesni_mb_pmd_private.h"
+#include "aesni_mb_pmd_private.h"
static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2016 Intel Corporation
- */
-
-#ifndef _RTE_AESNI_MB_PMD_PRIVATE_H_
-#define _RTE_AESNI_MB_PMD_PRIVATE_H_
-
-#include <intel-ipsec-mb.h>
-
-enum aesni_mb_vector_mode {
- RTE_AESNI_MB_NOT_SUPPORTED = 0,
- RTE_AESNI_MB_SSE,
- RTE_AESNI_MB_AVX,
- RTE_AESNI_MB_AVX2,
- RTE_AESNI_MB_AVX512
-};
-
-#define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
-/**< AES-NI Multi buffer PMD device name */
-
-/** AESNI_MB PMD LOGTYPE DRIVER */
-int aesni_mb_logtype_driver;
-
-#define AESNI_MB_LOG(level, fmt, ...) \
- rte_log(RTE_LOG_ ## level, aesni_mb_logtype_driver, \
- "%s() line %u: " fmt "\n", __func__, __LINE__, \
- ## __VA_ARGS__)
-
-
-#define HMAC_IPAD_VALUE (0x36)
-#define HMAC_OPAD_VALUE (0x5C)
-
-/* Maximum length for digest */
-#define DIGEST_LENGTH_MAX 64
-static const unsigned auth_blocksize[] = {
- [NULL_HASH] = 0,
- [MD5] = 64,
- [SHA1] = 64,
- [SHA_224] = 64,
- [SHA_256] = 64,
- [SHA_384] = 128,
- [SHA_512] = 128,
- [AES_XCBC] = 16,
- [AES_CCM] = 16,
- [AES_CMAC] = 16,
- [AES_GMAC] = 16,
- [PLAIN_SHA1] = 64,
- [PLAIN_SHA_224] = 64,
- [PLAIN_SHA_256] = 64,
- [PLAIN_SHA_384] = 128,
- [PLAIN_SHA_512] = 128
-};
-
-/**
- * Get the blocksize in bytes for a specified authentication algorithm
- *
- * @Note: this function will not return a valid value for a non-valid
- * authentication algorithm
- */
-static inline unsigned
-get_auth_algo_blocksize(JOB_HASH_ALG algo)
-{
- return auth_blocksize[algo];
-}
-
-static const unsigned auth_truncated_digest_byte_lengths[] = {
- [MD5] = 12,
- [SHA1] = 12,
- [SHA_224] = 14,
- [SHA_256] = 16,
- [SHA_384] = 24,
- [SHA_512] = 32,
- [AES_XCBC] = 12,
- [AES_CMAC] = 12,
- [AES_CCM] = 8,
- [NULL_HASH] = 0,
- [AES_GMAC] = 16,
- [PLAIN_SHA1] = 20,
- [PLAIN_SHA_224] = 28,
- [PLAIN_SHA_256] = 32,
- [PLAIN_SHA_384] = 48,
- [PLAIN_SHA_512] = 64
-};
-
-/**
- * Get the IPsec specified truncated length in bytes of the HMAC digest for a
- * specified authentication algorithm
- *
- * @Note: this function will not return a valid value for a non-valid
- * authentication algorithm
- */
-static inline unsigned
-get_truncated_digest_byte_length(JOB_HASH_ALG algo)
-{
- return auth_truncated_digest_byte_lengths[algo];
-}
-
-static const unsigned auth_digest_byte_lengths[] = {
- [MD5] = 16,
- [SHA1] = 20,
- [SHA_224] = 28,
- [SHA_256] = 32,
- [SHA_384] = 48,
- [SHA_512] = 64,
- [AES_XCBC] = 16,
- [AES_CMAC] = 16,
- [AES_CCM] = 16,
- [AES_GMAC] = 12,
- [NULL_HASH] = 0,
- [PLAIN_SHA1] = 20,
- [PLAIN_SHA_224] = 28,
- [PLAIN_SHA_256] = 32,
- [PLAIN_SHA_384] = 48,
- [PLAIN_SHA_512] = 64
- /**< Vector mode dependent pointer table of the multi-buffer APIs */
-
-};
-
-/**
- * Get the full digest size in bytes for a specified authentication algorithm
- * (if available in the Multi-buffer library)
- *
- * @Note: this function will not return a valid value for a non-valid
- * authentication algorithm
- */
-static inline unsigned
-get_digest_byte_length(JOB_HASH_ALG algo)
-{
- return auth_digest_byte_lengths[algo];
-}
-
-enum aesni_mb_operation {
- AESNI_MB_OP_HASH_CIPHER,
- AESNI_MB_OP_CIPHER_HASH,
- AESNI_MB_OP_HASH_ONLY,
- AESNI_MB_OP_CIPHER_ONLY,
- AESNI_MB_OP_AEAD_HASH_CIPHER,
- AESNI_MB_OP_AEAD_CIPHER_HASH,
- AESNI_MB_OP_NOT_SUPPORTED
-};
-
-/** private data structure for each virtual AESNI device */
-struct aesni_mb_private {
- enum aesni_mb_vector_mode vector_mode;
- /**< CPU vector instruction set mode */
- unsigned max_nb_queue_pairs;
- /**< Max number of queue pairs supported by device */
- MB_MGR *mb_mgr;
- /**< Multi-buffer instance */
-};
-
-/** AESNI Multi buffer queue pair */
-struct aesni_mb_qp {
- uint16_t id;
- /**< Queue Pair Identifier */
- char name[RTE_CRYPTODEV_NAME_MAX_LEN];
- /**< Unique Queue Pair Name */
- MB_MGR *mb_mgr;
- /**< Multi-buffer instance */
- struct rte_ring *ingress_queue;
- /**< Ring for placing operations ready for processing */
- struct rte_mempool *sess_mp;
- /**< Session Mempool */
- struct rte_mempool *sess_mp_priv;
- /**< Session Private Data Mempool */
- struct rte_cryptodev_stats stats;
- /**< Queue pair statistics */
- uint8_t digest_idx;
- /**< Index of the next slot to be used in temp_digests,
- * to store the digest for a given operation
- */
- uint8_t temp_digests[MAX_JOBS][DIGEST_LENGTH_MAX];
- /**< Buffers used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-} __rte_cache_aligned;
-
-/** AES-NI multi-buffer private session structure */
-struct aesni_mb_session {
- JOB_CHAIN_ORDER chain_order;
- struct {
- uint16_t length;
- uint16_t offset;
- } iv;
- /**< IV parameters */
-
- /** Cipher Parameters */const struct aesni_mb_op_fns *op_fns;
- /**< Vector mode dependent pointer table of the multi-buffer APIs */
-
- struct {
- /** Cipher direction - encrypt / decrypt */
- JOB_CIPHER_DIRECTION direction;
- /** Cipher mode - CBC / Counter */
- JOB_CIPHER_MODE mode;
-
- uint64_t key_length_in_bytes;
-
- union {
- struct {
- uint32_t encode[60] __rte_aligned(16);
- /**< encode key */
- uint32_t decode[60] __rte_aligned(16);
- /**< decode key */
- } expanded_aes_keys;
- struct {
- const void *ks_ptr[3];
- uint64_t key[3][16];
- } exp_3des_keys;
-
- struct gcm_key_data gcm_key;
- };
- /**< Expanded AES keys - Allocating space to
- * contain the maximum expanded key size which
- * is 240 bytes for 256 bit AES, calculate by:
- * ((key size (bytes)) *
- * ((number of rounds) + 1))
- */
- } cipher;
-
- /** Authentication Parameters */
- struct {
- JOB_HASH_ALG algo; /**< Authentication Algorithm */
- enum rte_crypto_auth_operation operation;
- /**< auth operation generate or verify */
- union {
- struct {
- uint8_t inner[128] __rte_aligned(16);
- /**< inner pad */
- uint8_t outer[128] __rte_aligned(16);
- /**< outer pad */
- } pads;
- /**< HMAC Authentication pads -
- * allocating space for the maximum pad
- * size supported which is 128 bytes for
- * SHA512
- */
-
- struct {
- uint32_t k1_expanded[44] __rte_aligned(16);
- /**< k1 (expanded key). */
- uint8_t k2[16] __rte_aligned(16);
- /**< k2. */
- uint8_t k3[16] __rte_aligned(16);
- /**< k3. */
- } xcbc;
-
- struct {
- uint32_t expkey[60] __rte_aligned(16);
- /**< k1 (expanded key). */
- uint32_t skey1[4] __rte_aligned(16);
- /**< k2. */
- uint32_t skey2[4] __rte_aligned(16);
- /**< k3. */
- } cmac;
- /**< Expanded XCBC authentication keys */
- };
- /** Generated digest size by the Multi-buffer library */
- uint16_t gen_digest_len;
- /** Requested digest size from Cryptodev */
- uint16_t req_digest_len;
-
- } auth;
- struct {
- /** AAD data length */
- uint16_t aad_len;
- } aead;
-} __rte_cache_aligned;
-
-extern int
-aesni_mb_set_session_parameters(const MB_MGR *mb_mgr,
- struct aesni_mb_session *sess,
- const struct rte_crypto_sym_xform *xform);
-
-/** device specific operations function pointer structure */
-extern struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops;
-
-
-
-#endif /* _RTE_AESNI_MB_PMD_PRIVATE_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _ARMV8_PMD_PRIVATE_H_
+#define _ARMV8_PMD_PRIVATE_H_
+
+#define CRYPTODEV_NAME_ARMV8_PMD crypto_armv8
+/**< ARMv8 Crypto PMD device name */
+
+#define ARMV8_CRYPTO_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_ARMV8_CRYPTO_DEBUG
+#define ARMV8_CRYPTO_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+ __func__, __LINE__, ## args)
+
+#define ARMV8_CRYPTO_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+ __func__, __LINE__, ## args)
+
+#define ARMV8_CRYPTO_ASSERT(con) \
+do { \
+ if (!(con)) { \
+ rte_panic("%s(): " \
+ con "condition failed, line %u", __func__); \
+ } \
+} while (0)
+
+#else
+#define ARMV8_CRYPTO_LOG_INFO(fmt, args...)
+#define ARMV8_CRYPTO_LOG_DBG(fmt, args...)
+#define ARMV8_CRYPTO_ASSERT(con)
+#endif
+
+#define NBBY 8 /* Number of bits in a byte */
+#define BYTE_LENGTH(x) ((x) / NBBY) /* Number of bytes in x (round down) */
+
+/* Maximum length for digest (SHA-256 needs 32 bytes) */
+#define DIGEST_LENGTH_MAX 32
+
+/** ARMv8 operation order mode enumerator */
+enum armv8_crypto_chain_order {
+ ARMV8_CRYPTO_CHAIN_CIPHER_AUTH,
+ ARMV8_CRYPTO_CHAIN_AUTH_CIPHER,
+ ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED,
+ ARMV8_CRYPTO_CHAIN_LIST_END = ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED
+};
+
+/** ARMv8 cipher operation enumerator */
+enum armv8_crypto_cipher_operation {
+ ARMV8_CRYPTO_CIPHER_OP_ENCRYPT = RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ ARMV8_CRYPTO_CIPHER_OP_DECRYPT = RTE_CRYPTO_CIPHER_OP_DECRYPT,
+ ARMV8_CRYPTO_CIPHER_OP_NOT_SUPPORTED,
+ ARMV8_CRYPTO_CIPHER_OP_LIST_END = ARMV8_CRYPTO_CIPHER_OP_NOT_SUPPORTED
+};
+
+enum armv8_crypto_cipher_keylen {
+ ARMV8_CRYPTO_CIPHER_KEYLEN_128,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_192,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_256,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_NOT_SUPPORTED,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_LIST_END =
+ ARMV8_CRYPTO_CIPHER_KEYLEN_NOT_SUPPORTED
+};
+
+/** ARMv8 auth mode enumerator */
+enum armv8_crypto_auth_mode {
+ ARMV8_CRYPTO_AUTH_AS_AUTH,
+ ARMV8_CRYPTO_AUTH_AS_HMAC,
+ ARMV8_CRYPTO_AUTH_AS_CIPHER,
+ ARMV8_CRYPTO_AUTH_NOT_SUPPORTED,
+ ARMV8_CRYPTO_AUTH_LIST_END = ARMV8_CRYPTO_AUTH_NOT_SUPPORTED
+};
+
+#define CRYPTO_ORDER_MAX ARMV8_CRYPTO_CHAIN_LIST_END
+#define CRYPTO_CIPHER_OP_MAX ARMV8_CRYPTO_CIPHER_OP_LIST_END
+#define CRYPTO_CIPHER_KEYLEN_MAX ARMV8_CRYPTO_CIPHER_KEYLEN_LIST_END
+#define CRYPTO_CIPHER_MAX RTE_CRYPTO_CIPHER_LIST_END
+#define CRYPTO_AUTH_MAX RTE_CRYPTO_AUTH_LIST_END
+
+#define HMAC_IPAD_VALUE (0x36)
+#define HMAC_OPAD_VALUE (0x5C)
+
+#define SHA256_AUTH_KEY_LENGTH (BYTE_LENGTH(256))
+#define SHA256_BLOCK_SIZE (BYTE_LENGTH(512))
+
+#define SHA1_AUTH_KEY_LENGTH (BYTE_LENGTH(160))
+#define SHA1_BLOCK_SIZE (BYTE_LENGTH(512))
+
+#define SHA_AUTH_KEY_MAX SHA256_AUTH_KEY_LENGTH
+#define SHA_BLOCK_MAX SHA256_BLOCK_SIZE
+
+typedef int (*crypto_func_t)(uint8_t *, uint8_t *, uint64_t,
+ uint8_t *, uint8_t *, uint64_t,
+ crypto_arg_t *);
+
+typedef void (*crypto_key_sched_t)(uint8_t *, const uint8_t *);
+
+/** private data structure for each ARMv8 crypto device */
+struct armv8_crypto_private {
+ unsigned int max_nb_qpairs;
+ /**< Max number of queue pairs */
+};
+
+/** ARMv8 crypto queue pair */
+struct armv8_crypto_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_mempool *sess_mp_priv;
+ /**< Session Private Data Mempool */
+ struct rte_cryptodev_stats stats;
+ /**< Queue pair statistics */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+/** ARMv8 crypto private session structure */
+struct armv8_crypto_session {
+ enum armv8_crypto_chain_order chain_order;
+ /**< chain order mode */
+ crypto_func_t crypto_func;
+ /**< cryptographic function to use for this session */
+
+ /** Cipher Parameters */
+ struct {
+ enum rte_crypto_cipher_operation direction;
+ /**< cipher operation direction */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< cipher algorithm */
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+
+ struct {
+ uint8_t data[256];
+ /**< key data */
+ size_t length;
+ /**< key length in bytes */
+ } key;
+
+ crypto_key_sched_t key_sched;
+ /**< Key schedule function */
+ } cipher;
+
+ /** Authentication Parameters */
+ struct {
+ enum rte_crypto_auth_operation operation;
+ /**< auth operation generate or verify */
+ enum armv8_crypto_auth_mode mode;
+ /**< auth operation mode */
+
+ union {
+ struct {
+ /* Add data if needed */
+ } auth;
+
+ struct {
+ uint8_t i_key_pad[SHA_BLOCK_MAX]
+ __rte_cache_aligned;
+ /**< inner pad (max supported block length) */
+ uint8_t o_key_pad[SHA_BLOCK_MAX]
+ __rte_cache_aligned;
+ /**< outer pad (max supported block length) */
+ uint8_t key[SHA_BLOCK_MAX];
+ /**< HMAC key (max supported block length)*/
+ } hmac;
+ };
+ uint16_t digest_length;
+ /* Digest length */
+ } auth;
+
+} __rte_cache_aligned;
+
+/** Set and validate ARMv8 crypto session parameters */
+extern int armv8_crypto_set_session_parameters(
+ struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops;
+
+#endif /* _ARMV8_PMD_PRIVATE_H_ */
#include "armv8_crypto_defs.h"
-#include "rte_armv8_pmd_private.h"
+#include "armv8_pmd_private.h"
static uint8_t cryptodev_driver_id;
#include "armv8_crypto_defs.h"
-#include "rte_armv8_pmd_private.h"
+#include "armv8_pmd_private.h"
static const struct rte_cryptodev_capabilities
armv8_crypto_pmd_capabilities[] = {
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Cavium, Inc
- */
-
-#ifndef _RTE_ARMV8_PMD_PRIVATE_H_
-#define _RTE_ARMV8_PMD_PRIVATE_H_
-
-#define CRYPTODEV_NAME_ARMV8_PMD crypto_armv8
-/**< ARMv8 Crypto PMD device name */
-
-#define ARMV8_CRYPTO_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
- __func__, __LINE__, ## args)
-
-#ifdef RTE_LIBRTE_ARMV8_CRYPTO_DEBUG
-#define ARMV8_CRYPTO_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
- __func__, __LINE__, ## args)
-
-#define ARMV8_CRYPTO_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
- __func__, __LINE__, ## args)
-
-#define ARMV8_CRYPTO_ASSERT(con) \
-do { \
- if (!(con)) { \
- rte_panic("%s(): " \
- con "condition failed, line %u", __func__); \
- } \
-} while (0)
-
-#else
-#define ARMV8_CRYPTO_LOG_INFO(fmt, args...)
-#define ARMV8_CRYPTO_LOG_DBG(fmt, args...)
-#define ARMV8_CRYPTO_ASSERT(con)
-#endif
-
-#define NBBY 8 /* Number of bits in a byte */
-#define BYTE_LENGTH(x) ((x) / NBBY) /* Number of bytes in x (round down) */
-
-/* Maximum length for digest (SHA-256 needs 32 bytes) */
-#define DIGEST_LENGTH_MAX 32
-
-/** ARMv8 operation order mode enumerator */
-enum armv8_crypto_chain_order {
- ARMV8_CRYPTO_CHAIN_CIPHER_AUTH,
- ARMV8_CRYPTO_CHAIN_AUTH_CIPHER,
- ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED,
- ARMV8_CRYPTO_CHAIN_LIST_END = ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED
-};
-
-/** ARMv8 cipher operation enumerator */
-enum armv8_crypto_cipher_operation {
- ARMV8_CRYPTO_CIPHER_OP_ENCRYPT = RTE_CRYPTO_CIPHER_OP_ENCRYPT,
- ARMV8_CRYPTO_CIPHER_OP_DECRYPT = RTE_CRYPTO_CIPHER_OP_DECRYPT,
- ARMV8_CRYPTO_CIPHER_OP_NOT_SUPPORTED,
- ARMV8_CRYPTO_CIPHER_OP_LIST_END = ARMV8_CRYPTO_CIPHER_OP_NOT_SUPPORTED
-};
-
-enum armv8_crypto_cipher_keylen {
- ARMV8_CRYPTO_CIPHER_KEYLEN_128,
- ARMV8_CRYPTO_CIPHER_KEYLEN_192,
- ARMV8_CRYPTO_CIPHER_KEYLEN_256,
- ARMV8_CRYPTO_CIPHER_KEYLEN_NOT_SUPPORTED,
- ARMV8_CRYPTO_CIPHER_KEYLEN_LIST_END =
- ARMV8_CRYPTO_CIPHER_KEYLEN_NOT_SUPPORTED
-};
-
-/** ARMv8 auth mode enumerator */
-enum armv8_crypto_auth_mode {
- ARMV8_CRYPTO_AUTH_AS_AUTH,
- ARMV8_CRYPTO_AUTH_AS_HMAC,
- ARMV8_CRYPTO_AUTH_AS_CIPHER,
- ARMV8_CRYPTO_AUTH_NOT_SUPPORTED,
- ARMV8_CRYPTO_AUTH_LIST_END = ARMV8_CRYPTO_AUTH_NOT_SUPPORTED
-};
-
-#define CRYPTO_ORDER_MAX ARMV8_CRYPTO_CHAIN_LIST_END
-#define CRYPTO_CIPHER_OP_MAX ARMV8_CRYPTO_CIPHER_OP_LIST_END
-#define CRYPTO_CIPHER_KEYLEN_MAX ARMV8_CRYPTO_CIPHER_KEYLEN_LIST_END
-#define CRYPTO_CIPHER_MAX RTE_CRYPTO_CIPHER_LIST_END
-#define CRYPTO_AUTH_MAX RTE_CRYPTO_AUTH_LIST_END
-
-#define HMAC_IPAD_VALUE (0x36)
-#define HMAC_OPAD_VALUE (0x5C)
-
-#define SHA256_AUTH_KEY_LENGTH (BYTE_LENGTH(256))
-#define SHA256_BLOCK_SIZE (BYTE_LENGTH(512))
-
-#define SHA1_AUTH_KEY_LENGTH (BYTE_LENGTH(160))
-#define SHA1_BLOCK_SIZE (BYTE_LENGTH(512))
-
-#define SHA_AUTH_KEY_MAX SHA256_AUTH_KEY_LENGTH
-#define SHA_BLOCK_MAX SHA256_BLOCK_SIZE
-
-typedef int (*crypto_func_t)(uint8_t *, uint8_t *, uint64_t,
- uint8_t *, uint8_t *, uint64_t,
- crypto_arg_t *);
-
-typedef void (*crypto_key_sched_t)(uint8_t *, const uint8_t *);
-
-/** private data structure for each ARMv8 crypto device */
-struct armv8_crypto_private {
- unsigned int max_nb_qpairs;
- /**< Max number of queue pairs */
-};
-
-/** ARMv8 crypto queue pair */
-struct armv8_crypto_qp {
- uint16_t id;
- /**< Queue Pair Identifier */
- struct rte_ring *processed_ops;
- /**< Ring for placing process packets */
- struct rte_mempool *sess_mp;
- /**< Session Mempool */
- struct rte_mempool *sess_mp_priv;
- /**< Session Private Data Mempool */
- struct rte_cryptodev_stats stats;
- /**< Queue pair statistics */
- char name[RTE_CRYPTODEV_NAME_MAX_LEN];
- /**< Unique Queue Pair Name */
- uint8_t temp_digest[DIGEST_LENGTH_MAX];
- /**< Buffer used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-} __rte_cache_aligned;
-
-/** ARMv8 crypto private session structure */
-struct armv8_crypto_session {
- enum armv8_crypto_chain_order chain_order;
- /**< chain order mode */
- crypto_func_t crypto_func;
- /**< cryptographic function to use for this session */
-
- /** Cipher Parameters */
- struct {
- enum rte_crypto_cipher_operation direction;
- /**< cipher operation direction */
- enum rte_crypto_cipher_algorithm algo;
- /**< cipher algorithm */
- struct {
- uint16_t length;
- uint16_t offset;
- } iv;
- /**< IV parameters */
-
- struct {
- uint8_t data[256];
- /**< key data */
- size_t length;
- /**< key length in bytes */
- } key;
-
- crypto_key_sched_t key_sched;
- /**< Key schedule function */
- } cipher;
-
- /** Authentication Parameters */
- struct {
- enum rte_crypto_auth_operation operation;
- /**< auth operation generate or verify */
- enum armv8_crypto_auth_mode mode;
- /**< auth operation mode */
-
- union {
- struct {
- /* Add data if needed */
- } auth;
-
- struct {
- uint8_t i_key_pad[SHA_BLOCK_MAX]
- __rte_cache_aligned;
- /**< inner pad (max supported block length) */
- uint8_t o_key_pad[SHA_BLOCK_MAX]
- __rte_cache_aligned;
- /**< outer pad (max supported block length) */
- uint8_t key[SHA_BLOCK_MAX];
- /**< HMAC key (max supported block length)*/
- } hmac;
- };
- uint16_t digest_length;
- /* Digest length */
- } auth;
-
-} __rte_cache_aligned;
-
-/** Set and validate ARMv8 crypto session parameters */
-extern int armv8_crypto_set_session_parameters(
- struct armv8_crypto_session *sess,
- const struct rte_crypto_sym_xform *xform);
-
-/** device specific operations function pointer structure */
-extern struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops;
-
-#endif /* _RTE_ARMV8_PMD_PRIVATE_H_ */
*
*/
-#ifndef _RTE_DPAA2_SEC_PMD_PRIVATE_H_
-#define _RTE_DPAA2_SEC_PMD_PRIVATE_H_
+#ifndef _DPAA2_SEC_PMD_PRIVATE_H_
+#define _DPAA2_SEC_PMD_PRIVATE_H_
#ifdef RTE_LIBRTE_SECURITY
#include <rte_security_driver.h>
return result;
}
-#endif /* _RTE_DPAA2_SEC_PMD_PRIVATE_H_ */
+#endif /* _DPAA2_SEC_PMD_PRIVATE_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#ifndef _KASUMI_PMD_PRIVATE_H_
+#define _KASUMI_PMD_PRIVATE_H_
+
+#include <sso_kasumi.h>
+
+#define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
+/**< KASUMI PMD device name */
+
+/** KASUMI PMD LOGTYPE DRIVER */
+int kasumi_logtype_driver;
+
+#define KASUMI_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, kasumi_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+#define KASUMI_DIGEST_LENGTH 4
+
+/** private data structure for each virtual KASUMI device */
+struct kasumi_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+};
+
+/** KASUMI buffer queue pair */
+struct kasumi_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing processed ops */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_mempool *sess_mp_priv;
+ /**< Session Private Data Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+ uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+enum kasumi_operation {
+ KASUMI_OP_ONLY_CIPHER,
+ KASUMI_OP_ONLY_AUTH,
+ KASUMI_OP_CIPHER_AUTH,
+ KASUMI_OP_AUTH_CIPHER,
+ KASUMI_OP_NOT_SUPPORTED
+};
+
+/** KASUMI private session structure */
+struct kasumi_session {
+ /* Keys have to be 16-byte aligned */
+ sso_kasumi_key_sched_t pKeySched_cipher;
+ sso_kasumi_key_sched_t pKeySched_hash;
+ enum kasumi_operation op;
+ enum rte_crypto_auth_operation auth_op;
+ uint16_t cipher_iv_offset;
+} __rte_cache_aligned;
+
+
+int
+kasumi_set_session_parameters(struct kasumi_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+struct rte_cryptodev_ops *rte_kasumi_pmd_ops;
+
+#endif /* _KASUMI_PMD_PRIVATE_H_ */
#include <rte_malloc.h>
#include <rte_cpuflags.h>
-#include "rte_kasumi_pmd_private.h"
+#include "kasumi_pmd_private.h"
#define KASUMI_KEY_LENGTH 16
#define KASUMI_IV_LENGTH 8
#include <rte_malloc.h>
#include <rte_cryptodev_pmd.h>
-#include "rte_kasumi_pmd_private.h"
+#include "kasumi_pmd_private.h"
static const struct rte_cryptodev_capabilities kasumi_pmd_capabilities[] = {
{ /* KASUMI (F9) */
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
- */
-
-#ifndef _RTE_KASUMI_PMD_PRIVATE_H_
-#define _RTE_KASUMI_PMD_PRIVATE_H_
-
-#include <sso_kasumi.h>
-
-#define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
-/**< KASUMI PMD device name */
-
-/** KASUMI PMD LOGTYPE DRIVER */
-int kasumi_logtype_driver;
-
-#define KASUMI_LOG(level, fmt, ...) \
- rte_log(RTE_LOG_ ## level, kasumi_logtype_driver, \
- "%s() line %u: " fmt "\n", __func__, __LINE__, \
- ## __VA_ARGS__)
-
-#define KASUMI_DIGEST_LENGTH 4
-
-/** private data structure for each virtual KASUMI device */
-struct kasumi_private {
- unsigned max_nb_queue_pairs;
- /**< Max number of queue pairs supported by device */
-};
-
-/** KASUMI buffer queue pair */
-struct kasumi_qp {
- uint16_t id;
- /**< Queue Pair Identifier */
- char name[RTE_CRYPTODEV_NAME_MAX_LEN];
- /**< Unique Queue Pair Name */
- struct rte_ring *processed_ops;
- /**< Ring for placing processed ops */
- struct rte_mempool *sess_mp;
- /**< Session Mempool */
- struct rte_mempool *sess_mp_priv;
- /**< Session Private Data Mempool */
- struct rte_cryptodev_stats qp_stats;
- /**< Queue pair statistics */
- uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
- /**< Buffer used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-} __rte_cache_aligned;
-
-enum kasumi_operation {
- KASUMI_OP_ONLY_CIPHER,
- KASUMI_OP_ONLY_AUTH,
- KASUMI_OP_CIPHER_AUTH,
- KASUMI_OP_AUTH_CIPHER,
- KASUMI_OP_NOT_SUPPORTED
-};
-
-/** KASUMI private session structure */
-struct kasumi_session {
- /* Keys have to be 16-byte aligned */
- sso_kasumi_key_sched_t pKeySched_cipher;
- sso_kasumi_key_sched_t pKeySched_hash;
- enum kasumi_operation op;
- enum rte_crypto_auth_operation auth_op;
- uint16_t cipher_iv_offset;
-} __rte_cache_aligned;
-
-
-int
-kasumi_set_session_parameters(struct kasumi_session *sess,
- const struct rte_crypto_sym_xform *xform);
-
-
-/** device specific operations function pointer structure */
-struct rte_cryptodev_ops *rte_kasumi_pmd_ops;
-
-#endif /* _RTE_KASUMI_PMD_PRIVATE_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ */
+
+#ifndef _MRVL_PMD_PRIVATE_H_
+#define _MRVL_PMD_PRIVATE_H_
+
+#include "rte_mrvl_compat.h"
+
+#define CRYPTODEV_NAME_MRVL_PMD crypto_mvsam
+/**< Marvell PMD device name */
+
+/** MRVL PMD LOGTYPE DRIVER */
+int mrvl_logtype_driver;
+
+#define MRVL_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, mrvl_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+/**
+ * Handy bits->bytes conversion macro.
+ */
+#define BITS2BYTES(x) ((x) >> 3)
+
+#define MRVL_MAX_SEGMENTS 16
+
+/** The operation order mode enumerator. */
+enum mrvl_crypto_chain_order {
+ MRVL_CRYPTO_CHAIN_CIPHER_ONLY,
+ MRVL_CRYPTO_CHAIN_AUTH_ONLY,
+ MRVL_CRYPTO_CHAIN_CIPHER_AUTH,
+ MRVL_CRYPTO_CHAIN_AUTH_CIPHER,
+ MRVL_CRYPTO_CHAIN_COMBINED,
+ MRVL_CRYPTO_CHAIN_NOT_SUPPORTED,
+};
+
+/** Private data structure for each crypto device. */
+struct mrvl_crypto_private {
+ unsigned int max_nb_qpairs; /**< Max number of queue pairs */
+ unsigned int max_nb_sessions; /**< Max number of sessions */
+};
+
+/** MRVL crypto queue pair structure. */
+struct mrvl_crypto_qp {
+ /** SAM CIO (MUSDK Queue Pair equivalent).*/
+ struct sam_cio *cio;
+
+ /** Session Mempool. */
+ struct rte_mempool *sess_mp;
+
+ /** Session Private Data Mempool. */
+ struct rte_mempool *sess_mp_priv;
+
+ /** Queue pair statistics. */
+ struct rte_cryptodev_stats stats;
+
+ /** CIO initialization parameters.*/
+ struct sam_cio_params cio_params;
+} __rte_cache_aligned;
+
+/** MRVL crypto private session structure. */
+struct mrvl_crypto_session {
+ /** Crypto operations chain order. */
+ enum mrvl_crypto_chain_order chain_order;
+
+ /** Session initialization parameters. */
+ struct sam_session_params sam_sess_params;
+
+ /** SAM session pointer. */
+ struct sam_sa *sam_sess;
+
+ /** Cipher IV offset. */
+ uint16_t cipher_iv_offset;
+} __rte_cache_aligned;
+
+struct mrvl_crypto_src_table {
+ uint16_t iter_ops;
+ struct sam_buf_info src_bd[MRVL_MAX_SEGMENTS];
+} __rte_cache_aligned;
+
+/** Set and validate MRVL crypto session parameters */
+extern int
+mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops;
+
+#endif /* _MRVL_PMD_PRIVATE_H_ */
#include <rte_kvargs.h>
#include <rte_mvep_common.h>
-#include "rte_mrvl_pmd_private.h"
+#include "mrvl_pmd_private.h"
#define MRVL_PMD_MAX_NB_SESS_ARG ("max_nb_sessions")
#define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS 2048
#include <rte_malloc.h>
#include <rte_cryptodev_pmd.h>
-#include "rte_mrvl_pmd_private.h"
+#include "mrvl_pmd_private.h"
/**
* Capabilities list to be used in reporting to DPDK.
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- */
-
-#ifndef _RTE_MRVL_PMD_PRIVATE_H_
-#define _RTE_MRVL_PMD_PRIVATE_H_
-
-#include "rte_mrvl_compat.h"
-
-#define CRYPTODEV_NAME_MRVL_PMD crypto_mvsam
-/**< Marvell PMD device name */
-
-/** MRVL PMD LOGTYPE DRIVER */
-int mrvl_logtype_driver;
-
-#define MRVL_LOG(level, fmt, ...) \
- rte_log(RTE_LOG_ ## level, mrvl_logtype_driver, \
- "%s() line %u: " fmt "\n", __func__, __LINE__, \
- ## __VA_ARGS__)
-
-/**
- * Handy bits->bytes conversion macro.
- */
-#define BITS2BYTES(x) ((x) >> 3)
-
-#define MRVL_MAX_SEGMENTS 16
-
-/** The operation order mode enumerator. */
-enum mrvl_crypto_chain_order {
- MRVL_CRYPTO_CHAIN_CIPHER_ONLY,
- MRVL_CRYPTO_CHAIN_AUTH_ONLY,
- MRVL_CRYPTO_CHAIN_CIPHER_AUTH,
- MRVL_CRYPTO_CHAIN_AUTH_CIPHER,
- MRVL_CRYPTO_CHAIN_COMBINED,
- MRVL_CRYPTO_CHAIN_NOT_SUPPORTED,
-};
-
-/** Private data structure for each crypto device. */
-struct mrvl_crypto_private {
- unsigned int max_nb_qpairs; /**< Max number of queue pairs */
- unsigned int max_nb_sessions; /**< Max number of sessions */
-};
-
-/** MRVL crypto queue pair structure. */
-struct mrvl_crypto_qp {
- /** SAM CIO (MUSDK Queue Pair equivalent).*/
- struct sam_cio *cio;
-
- /** Session Mempool. */
- struct rte_mempool *sess_mp;
-
- /** Session Private Data Mempool. */
- struct rte_mempool *sess_mp_priv;
-
- /** Queue pair statistics. */
- struct rte_cryptodev_stats stats;
-
- /** CIO initialization parameters.*/
- struct sam_cio_params cio_params;
-} __rte_cache_aligned;
-
-/** MRVL crypto private session structure. */
-struct mrvl_crypto_session {
- /** Crypto operations chain order. */
- enum mrvl_crypto_chain_order chain_order;
-
- /** Session initialization parameters. */
- struct sam_session_params sam_sess_params;
-
- /** SAM session pointer. */
- struct sam_sa *sam_sess;
-
- /** Cipher IV offset. */
- uint16_t cipher_iv_offset;
-} __rte_cache_aligned;
-
-struct mrvl_crypto_src_table {
- uint16_t iter_ops;
- struct sam_buf_info src_bd[MRVL_MAX_SEGMENTS];
-} __rte_cache_aligned;
-
-/** Set and validate MRVL crypto session parameters */
-extern int
-mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
- const struct rte_crypto_sym_xform *xform);
-
-/** device specific operations function pointer structure */
-extern struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops;
-
-#endif /* _RTE_MRVL_PMD_PRIVATE_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+#ifndef _OPENSSL_PMD_PRIVATE_H_
+#define _OPENSSL_PMD_PRIVATE_H_
+
+#include <openssl/evp.h>
+#include <openssl/hmac.h>
+#include <openssl/des.h>
+#include <openssl/rsa.h>
+#include <openssl/dh.h>
+#include <openssl/dsa.h>
+
+#define CRYPTODEV_NAME_OPENSSL_PMD crypto_openssl
+/**< Open SSL Crypto PMD device name */
+
+/** OPENSSL PMD LOGTYPE DRIVER */
+int openssl_logtype_driver;
+#define OPENSSL_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, openssl_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+/* Maximum length for digest (SHA-512 needs 64 bytes) */
+#define DIGEST_LENGTH_MAX 64
+
+/** OPENSSL operation order mode enumerator */
+enum openssl_chain_order {
+ OPENSSL_CHAIN_ONLY_CIPHER,
+ OPENSSL_CHAIN_ONLY_AUTH,
+ OPENSSL_CHAIN_CIPHER_BPI,
+ OPENSSL_CHAIN_CIPHER_AUTH,
+ OPENSSL_CHAIN_AUTH_CIPHER,
+ OPENSSL_CHAIN_COMBINED,
+ OPENSSL_CHAIN_NOT_SUPPORTED
+};
+
+/** OPENSSL cipher mode enumerator */
+enum openssl_cipher_mode {
+ OPENSSL_CIPHER_LIB,
+ OPENSSL_CIPHER_DES3CTR,
+};
+
+/** OPENSSL auth mode enumerator */
+enum openssl_auth_mode {
+ OPENSSL_AUTH_AS_AUTH,
+ OPENSSL_AUTH_AS_HMAC,
+};
+
+/** private data structure for each OPENSSL crypto device */
+struct openssl_private {
+ unsigned int max_nb_qpairs;
+ /**< Max number of queue pairs */
+};
+
+/** OPENSSL crypto queue pair */
+struct openssl_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_mempool *sess_mp_priv;
+ /**< Session Private Data Mempool */
+ struct rte_cryptodev_stats stats;
+ /**< Queue pair statistics */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+/** OPENSSL crypto private session structure */
+struct openssl_session {
+ enum openssl_chain_order chain_order;
+ /**< chain order mode */
+
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+
+ enum rte_crypto_aead_algorithm aead_algo;
+ /**< AEAD algorithm */
+
+ /** Cipher Parameters */
+ struct {
+ enum rte_crypto_cipher_operation direction;
+ /**< cipher operation direction */
+ enum openssl_cipher_mode mode;
+ /**< cipher operation mode */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< cipher algorithm */
+
+ struct {
+ uint8_t data[32];
+ /**< key data */
+ size_t length;
+ /**< key length in bytes */
+ } key;
+
+ const EVP_CIPHER *evp_algo;
+ /**< pointer to EVP algorithm function */
+ EVP_CIPHER_CTX *ctx;
+ /**< pointer to EVP context structure */
+ EVP_CIPHER_CTX *bpi_ctx;
+ } cipher;
+
+ /** Authentication Parameters */
+ struct {
+ enum rte_crypto_auth_operation operation;
+ /**< auth operation generate or verify */
+ enum openssl_auth_mode mode;
+ /**< auth operation mode */
+ enum rte_crypto_auth_algorithm algo;
+ /**< cipher algorithm */
+
+ union {
+ struct {
+ const EVP_MD *evp_algo;
+ /**< pointer to EVP algorithm function */
+ EVP_MD_CTX *ctx;
+ /**< pointer to EVP context structure */
+ } auth;
+
+ struct {
+ EVP_PKEY *pkey;
+ /**< pointer to EVP key */
+ const EVP_MD *evp_algo;
+ /**< pointer to EVP algorithm function */
+ HMAC_CTX *ctx;
+ /**< pointer to EVP context structure */
+ } hmac;
+ };
+
+ uint16_t aad_length;
+ /**< AAD length */
+ uint16_t digest_length;
+ /**< digest length */
+ } auth;
+
+} __rte_cache_aligned;
+
+/** OPENSSL crypto private asymmetric session structure */
+struct openssl_asym_session {
+ enum rte_crypto_asym_xform_type xfrm_type;
+ union {
+ struct rsa {
+ RSA *rsa;
+ } r;
+ struct exp {
+ BIGNUM *exp;
+ BIGNUM *mod;
+ BN_CTX *ctx;
+ } e;
+ struct mod {
+ BIGNUM *modulus;
+ BN_CTX *ctx;
+ } m;
+ struct dh {
+ DH *dh_key;
+ uint32_t key_op;
+ } dh;
+ struct {
+ DSA *dsa;
+ } s;
+ } u;
+} __rte_cache_aligned;
+/** Set and validate OPENSSL crypto session parameters */
+extern int
+openssl_set_session_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** Reset OPENSSL crypto session parameters */
+extern void
+openssl_reset_session(struct openssl_session *sess);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_openssl_pmd_ops;
+
+#endif /* _OPENSSL_PMD_PRIVATE_H_ */
#include <openssl/hmac.h>
#include <openssl/evp.h>
-#include "rte_openssl_pmd_private.h"
+#include "openssl_pmd_private.h"
#include "compat.h"
#define DES_BLOCK_SIZE 8
#include <rte_malloc.h>
#include <rte_cryptodev_pmd.h>
-#include "rte_openssl_pmd_private.h"
+#include "openssl_pmd_private.h"
#include "compat.h"
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
- */
-
-#ifndef _OPENSSL_PMD_PRIVATE_H_
-#define _OPENSSL_PMD_PRIVATE_H_
-
-#include <openssl/evp.h>
-#include <openssl/hmac.h>
-#include <openssl/des.h>
-#include <openssl/rsa.h>
-#include <openssl/dh.h>
-#include <openssl/dsa.h>
-
-#define CRYPTODEV_NAME_OPENSSL_PMD crypto_openssl
-/**< Open SSL Crypto PMD device name */
-
-/** OPENSSL PMD LOGTYPE DRIVER */
-int openssl_logtype_driver;
-#define OPENSSL_LOG(level, fmt, ...) \
- rte_log(RTE_LOG_ ## level, openssl_logtype_driver, \
- "%s() line %u: " fmt "\n", __func__, __LINE__, \
- ## __VA_ARGS__)
-
-/* Maximum length for digest (SHA-512 needs 64 bytes) */
-#define DIGEST_LENGTH_MAX 64
-
-/** OPENSSL operation order mode enumerator */
-enum openssl_chain_order {
- OPENSSL_CHAIN_ONLY_CIPHER,
- OPENSSL_CHAIN_ONLY_AUTH,
- OPENSSL_CHAIN_CIPHER_BPI,
- OPENSSL_CHAIN_CIPHER_AUTH,
- OPENSSL_CHAIN_AUTH_CIPHER,
- OPENSSL_CHAIN_COMBINED,
- OPENSSL_CHAIN_NOT_SUPPORTED
-};
-
-/** OPENSSL cipher mode enumerator */
-enum openssl_cipher_mode {
- OPENSSL_CIPHER_LIB,
- OPENSSL_CIPHER_DES3CTR,
-};
-
-/** OPENSSL auth mode enumerator */
-enum openssl_auth_mode {
- OPENSSL_AUTH_AS_AUTH,
- OPENSSL_AUTH_AS_HMAC,
-};
-
-/** private data structure for each OPENSSL crypto device */
-struct openssl_private {
- unsigned int max_nb_qpairs;
- /**< Max number of queue pairs */
-};
-
-/** OPENSSL crypto queue pair */
-struct openssl_qp {
- uint16_t id;
- /**< Queue Pair Identifier */
- char name[RTE_CRYPTODEV_NAME_MAX_LEN];
- /**< Unique Queue Pair Name */
- struct rte_ring *processed_ops;
- /**< Ring for placing process packets */
- struct rte_mempool *sess_mp;
- /**< Session Mempool */
- struct rte_mempool *sess_mp_priv;
- /**< Session Private Data Mempool */
- struct rte_cryptodev_stats stats;
- /**< Queue pair statistics */
- uint8_t temp_digest[DIGEST_LENGTH_MAX];
- /**< Buffer used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-} __rte_cache_aligned;
-
-/** OPENSSL crypto private session structure */
-struct openssl_session {
- enum openssl_chain_order chain_order;
- /**< chain order mode */
-
- struct {
- uint16_t length;
- uint16_t offset;
- } iv;
- /**< IV parameters */
-
- enum rte_crypto_aead_algorithm aead_algo;
- /**< AEAD algorithm */
-
- /** Cipher Parameters */
- struct {
- enum rte_crypto_cipher_operation direction;
- /**< cipher operation direction */
- enum openssl_cipher_mode mode;
- /**< cipher operation mode */
- enum rte_crypto_cipher_algorithm algo;
- /**< cipher algorithm */
-
- struct {
- uint8_t data[32];
- /**< key data */
- size_t length;
- /**< key length in bytes */
- } key;
-
- const EVP_CIPHER *evp_algo;
- /**< pointer to EVP algorithm function */
- EVP_CIPHER_CTX *ctx;
- /**< pointer to EVP context structure */
- EVP_CIPHER_CTX *bpi_ctx;
- } cipher;
-
- /** Authentication Parameters */
- struct {
- enum rte_crypto_auth_operation operation;
- /**< auth operation generate or verify */
- enum openssl_auth_mode mode;
- /**< auth operation mode */
- enum rte_crypto_auth_algorithm algo;
- /**< cipher algorithm */
-
- union {
- struct {
- const EVP_MD *evp_algo;
- /**< pointer to EVP algorithm function */
- EVP_MD_CTX *ctx;
- /**< pointer to EVP context structure */
- } auth;
-
- struct {
- EVP_PKEY *pkey;
- /**< pointer to EVP key */
- const EVP_MD *evp_algo;
- /**< pointer to EVP algorithm function */
- HMAC_CTX *ctx;
- /**< pointer to EVP context structure */
- } hmac;
- };
-
- uint16_t aad_length;
- /**< AAD length */
- uint16_t digest_length;
- /**< digest length */
- } auth;
-
-} __rte_cache_aligned;
-
-/** OPENSSL crypto private asymmetric session structure */
-struct openssl_asym_session {
- enum rte_crypto_asym_xform_type xfrm_type;
- union {
- struct rsa {
- RSA *rsa;
- } r;
- struct exp {
- BIGNUM *exp;
- BIGNUM *mod;
- BN_CTX *ctx;
- } e;
- struct mod {
- BIGNUM *modulus;
- BN_CTX *ctx;
- } m;
- struct dh {
- DH *dh_key;
- uint32_t key_op;
- } dh;
- struct {
- DSA *dsa;
- } s;
- } u;
-} __rte_cache_aligned;
-/** Set and validate OPENSSL crypto session parameters */
-extern int
-openssl_set_session_parameters(struct openssl_session *sess,
- const struct rte_crypto_sym_xform *xform);
-
-/** Reset OPENSSL crypto session parameters */
-extern void
-openssl_reset_session(struct openssl_session *sess);
-
-/** device specific operations function pointer structure */
-extern struct rte_cryptodev_ops *rte_openssl_pmd_ops;
-
-#endif /* _OPENSSL_PMD_PRIVATE_H_ */
#include <rte_malloc.h>
#include <rte_cpuflags.h>
-#include "rte_snow3g_pmd_private.h"
+#include "snow3g_pmd_private.h"
#define SNOW3G_IV_LENGTH 16
#define SNOW3G_MAX_BURST 8
#include <rte_malloc.h>
#include <rte_cryptodev_pmd.h>
-#include "rte_snow3g_pmd_private.h"
+#include "snow3g_pmd_private.h"
static const struct rte_cryptodev_capabilities snow3g_pmd_capabilities[] = {
{ /* SNOW 3G (UIA2) */
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
- */
-
-#ifndef _RTE_SNOW3G_PMD_PRIVATE_H_
-#define _RTE_SNOW3G_PMD_PRIVATE_H_
-
-#include <sso_snow3g.h>
-
-#define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
-/**< SNOW 3G PMD device name */
-
-/** SNOW 3G PMD LOGTYPE DRIVER */
-int snow3g_logtype_driver;
-
-#define SNOW3G_LOG(level, fmt, ...) \
- rte_log(RTE_LOG_ ## level, snow3g_logtype_driver, \
- "%s() line %u: " fmt "\n", __func__, __LINE__, \
- ## __VA_ARGS__)
-
-#define SNOW3G_DIGEST_LENGTH 4
-#define SNOW3G_MAX_KEY_SIZE 128
-
-/** private data structure for each virtual SNOW 3G device */
-struct snow3g_private {
- unsigned max_nb_queue_pairs;
- /**< Max number of queue pairs supported by device */
-};
-
-/** SNOW 3G buffer queue pair */
-struct snow3g_qp {
- uint16_t id;
- /**< Queue Pair Identifier */
- char name[RTE_CRYPTODEV_NAME_MAX_LEN];
- /**< Unique Queue Pair Name */
- struct rte_ring *processed_ops;
- /**< Ring for placing processed ops */
- struct rte_mempool *sess_mp;
- /**< Session Mempool */
- struct rte_mempool *sess_mp_priv;
- /**< Session Private Data Mempool */
- struct rte_cryptodev_stats qp_stats;
- /**< Queue pair statistics */
- uint8_t temp_digest[SNOW3G_DIGEST_LENGTH];
- /**< Buffer used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-} __rte_cache_aligned;
-
-enum snow3g_operation {
- SNOW3G_OP_ONLY_CIPHER,
- SNOW3G_OP_ONLY_AUTH,
- SNOW3G_OP_CIPHER_AUTH,
- SNOW3G_OP_AUTH_CIPHER,
- SNOW3G_OP_NOT_SUPPORTED
-};
-
-/** SNOW 3G private session structure */
-struct snow3g_session {
- enum snow3g_operation op;
- enum rte_crypto_auth_operation auth_op;
- sso_snow3g_key_schedule_t pKeySched_cipher;
- sso_snow3g_key_schedule_t pKeySched_hash;
- uint16_t cipher_iv_offset;
- uint16_t auth_iv_offset;
-} __rte_cache_aligned;
-
-
-extern int
-snow3g_set_session_parameters(struct snow3g_session *sess,
- const struct rte_crypto_sym_xform *xform);
-
-
-/** device specific operations function pointer structure */
-extern struct rte_cryptodev_ops *rte_snow3g_pmd_ops;
-
-
-
-#endif /* _RTE_SNOW3G_PMD_PRIVATE_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#ifndef _SNOW3G_PMD_PRIVATE_H_
+#define _SNOW3G_PMD_PRIVATE_H_
+
+#include <sso_snow3g.h>
+
+#define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
+/**< SNOW 3G PMD device name */
+
+/** SNOW 3G PMD LOGTYPE DRIVER */
+int snow3g_logtype_driver;
+
+#define SNOW3G_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, snow3g_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+#define SNOW3G_DIGEST_LENGTH 4
+#define SNOW3G_MAX_KEY_SIZE 128
+
+/** private data structure for each virtual SNOW 3G device */
+struct snow3g_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+};
+
+/** SNOW 3G buffer queue pair */
+struct snow3g_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing processed ops */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_mempool *sess_mp_priv;
+ /**< Session Private Data Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+ uint8_t temp_digest[SNOW3G_DIGEST_LENGTH];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+enum snow3g_operation {
+ SNOW3G_OP_ONLY_CIPHER,
+ SNOW3G_OP_ONLY_AUTH,
+ SNOW3G_OP_CIPHER_AUTH,
+ SNOW3G_OP_AUTH_CIPHER,
+ SNOW3G_OP_NOT_SUPPORTED
+};
+
+/** SNOW 3G private session structure */
+struct snow3g_session {
+ enum snow3g_operation op;
+ enum rte_crypto_auth_operation auth_op;
+ sso_snow3g_key_schedule_t pKeySched_cipher;
+ sso_snow3g_key_schedule_t pKeySched_hash;
+ uint16_t cipher_iv_offset;
+ uint16_t auth_iv_offset;
+} __rte_cache_aligned;
+
+
+extern int
+snow3g_set_session_parameters(struct snow3g_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_snow3g_pmd_ops;
+
+
+
+#endif /* _SNOW3G_PMD_PRIVATE_H_ */
#include <rte_malloc.h>
#include <rte_cpuflags.h>
-#include "rte_zuc_pmd_private.h"
+#include "zuc_pmd_private.h"
#define ZUC_MAX_BURST 4
#define BYTE_LEN 8
#include <rte_malloc.h>
#include <rte_cryptodev_pmd.h>
-#include "rte_zuc_pmd_private.h"
+#include "zuc_pmd_private.h"
static const struct rte_cryptodev_capabilities zuc_pmd_capabilities[] = {
{ /* ZUC (EIA3) */
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2018 Intel Corporation
- */
-
-#ifndef _RTE_ZUC_PMD_PRIVATE_H_
-#define _RTE_ZUC_PMD_PRIVATE_H_
-
-#include <sso_zuc.h>
-
-#define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
-/**< KASUMI PMD device name */
-
-/** ZUC PMD LOGTYPE DRIVER */
-int zuc_logtype_driver;
-#define ZUC_LOG(level, fmt, ...) \
- rte_log(RTE_LOG_ ## level, zuc_logtype_driver, \
- "%s()... line %u: " fmt "\n", __func__, __LINE__, \
- ## __VA_ARGS__)
-
-#define ZUC_IV_KEY_LENGTH 16
-#define ZUC_DIGEST_LENGTH 4
-
-/** private data structure for each virtual ZUC device */
-struct zuc_private {
- unsigned max_nb_queue_pairs;
- /**< Max number of queue pairs supported by device */
-};
-
-/** ZUC buffer queue pair */
-struct zuc_qp {
- uint16_t id;
- /**< Queue Pair Identifier */
- char name[RTE_CRYPTODEV_NAME_MAX_LEN];
- /**< Unique Queue Pair Name */
- struct rte_ring *processed_ops;
- /**< Ring for placing processed ops */
- struct rte_mempool *sess_mp;
- /**< Session Mempool */
- struct rte_mempool *sess_mp_priv;
- /**< Session Private Data Mempool */
- struct rte_cryptodev_stats qp_stats;
- /**< Queue pair statistics */
- uint8_t temp_digest[ZUC_DIGEST_LENGTH];
- /**< Buffer used to store the digest generated
- * by the driver when verifying a digest provided
- * by the user (using authentication verify operation)
- */
-} __rte_cache_aligned;
-
-enum zuc_operation {
- ZUC_OP_ONLY_CIPHER,
- ZUC_OP_ONLY_AUTH,
- ZUC_OP_CIPHER_AUTH,
- ZUC_OP_AUTH_CIPHER,
- ZUC_OP_NOT_SUPPORTED
-};
-
-/** ZUC private session structure */
-struct zuc_session {
- enum zuc_operation op;
- enum rte_crypto_auth_operation auth_op;
- uint8_t pKey_cipher[ZUC_IV_KEY_LENGTH];
- uint8_t pKey_hash[ZUC_IV_KEY_LENGTH];
- uint16_t cipher_iv_offset;
- uint16_t auth_iv_offset;
-} __rte_cache_aligned;
-
-
-extern int
-zuc_set_session_parameters(struct zuc_session *sess,
- const struct rte_crypto_sym_xform *xform);
-
-
-/** device specific operations function pointer structure */
-extern struct rte_cryptodev_ops *rte_zuc_pmd_ops;
-
-
-
-#endif /* _RTE_ZUC_PMD_PRIVATE_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation
+ */
+
+#ifndef _ZUC_PMD_PRIVATE_H_
+#define _ZUC_PMD_PRIVATE_H_
+
+#include <sso_zuc.h>
+
+#define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
+/**< KASUMI PMD device name */
+
+/** ZUC PMD LOGTYPE DRIVER */
+int zuc_logtype_driver;
+#define ZUC_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, zuc_logtype_driver, \
+ "%s()... line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
+#define ZUC_IV_KEY_LENGTH 16
+#define ZUC_DIGEST_LENGTH 4
+
+/** private data structure for each virtual ZUC device */
+struct zuc_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+};
+
+/** ZUC buffer queue pair */
+struct zuc_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing processed ops */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_mempool *sess_mp_priv;
+ /**< Session Private Data Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+ uint8_t temp_digest[ZUC_DIGEST_LENGTH];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+enum zuc_operation {
+ ZUC_OP_ONLY_CIPHER,
+ ZUC_OP_ONLY_AUTH,
+ ZUC_OP_CIPHER_AUTH,
+ ZUC_OP_AUTH_CIPHER,
+ ZUC_OP_NOT_SUPPORTED
+};
+
+/** ZUC private session structure */
+struct zuc_session {
+ enum zuc_operation op;
+ enum rte_crypto_auth_operation auth_op;
+ uint8_t pKey_cipher[ZUC_IV_KEY_LENGTH];
+ uint8_t pKey_hash[ZUC_IV_KEY_LENGTH];
+ uint16_t cipher_iv_offset;
+ uint16_t auth_iv_offset;
+} __rte_cache_aligned;
+
+
+extern int
+zuc_set_session_parameters(struct zuc_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_zuc_pmd_ops;
+
+
+
+#endif /* _ZUC_PMD_PRIVATE_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#ifndef _ETH_BOND_8023AD_PRIVATE_H_
+#define _ETH_BOND_8023AD_PRIVATE_H_
+
+#include <stdint.h>
+
+#include <rte_ether.h>
+#include <rte_byteorder.h>
+#include <rte_atomic.h>
+#include <rte_flow.h>
+
+#include "rte_eth_bond_8023ad.h"
+
+#define BOND_MODE_8023AX_UPDATE_TIMEOUT_MS 100
+/** Maximum number of packets to one slave queued in TX ring. */
+#define BOND_MODE_8023AX_SLAVE_RX_PKTS 3
+/** Maximum number of LACP packets from one slave queued in TX ring. */
+#define BOND_MODE_8023AX_SLAVE_TX_PKTS 1
+/**
+ * Timeouts deffinitions (5.4.4 in 802.1AX documentation).
+ */
+#define BOND_8023AD_FAST_PERIODIC_MS 900
+#define BOND_8023AD_SLOW_PERIODIC_MS 29000
+#define BOND_8023AD_SHORT_TIMEOUT_MS 3000
+#define BOND_8023AD_LONG_TIMEOUT_MS 90000
+#define BOND_8023AD_CHURN_DETECTION_TIMEOUT_MS 60000
+#define BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS 2000
+#define BOND_8023AD_TX_MACHINE_PERIOD_MS 500
+#define BOND_8023AD_RX_MARKER_PERIOD_MS 2000
+
+/**
+ * Interval of showing warning message from state machines. All messages will
+ * be held (and gathered together) to prevent flooding.
+ * This is no parto of 802.1AX standard.
+ */
+#define BOND_8023AD_WARNINGS_PERIOD_MS 1000
+
+
+
+/**
+ * State machine flags
+ */
+#define SM_FLAGS_BEGIN 0x0001
+#define SM_FLAGS_LACP_ENABLED 0x0002
+#define SM_FLAGS_ACTOR_CHURN 0x0004
+#define SM_FLAGS_PARTNER_CHURN 0x0008
+#define SM_FLAGS_MOVED 0x0100
+#define SM_FLAGS_PARTNER_SHORT_TIMEOUT 0x0200
+#define SM_FLAGS_NTT 0x0400
+
+#define BOND_LINK_FULL_DUPLEX_KEY 0x01
+#define BOND_LINK_SPEED_KEY_10M 0x02
+#define BOND_LINK_SPEED_KEY_100M 0x04
+#define BOND_LINK_SPEED_KEY_1000M 0x08
+#define BOND_LINK_SPEED_KEY_10G 0x10
+#define BOND_LINK_SPEED_KEY_20G 0x11
+#define BOND_LINK_SPEED_KEY_40G 0x12
+
+#define WRN_RX_MARKER_TO_FAST 0x01
+#define WRN_UNKNOWN_SLOW_TYPE 0x02
+#define WRN_UNKNOWN_MARKER_TYPE 0x04
+#define WRN_NOT_LACP_CAPABLE 0x08
+#define WRN_RX_QUEUE_FULL 0x10
+#define WRN_TX_QUEUE_FULL 0x20
+
+#define CHECK_FLAGS(_variable, _f) ((_variable) & (_f))
+#define SET_FLAGS(_variable, _f) ((_variable) |= (_f))
+#define CLEAR_FLAGS(_variable, _f) ((_variable) &= ~(_f))
+
+#define SM_FLAG(_p, _f) (!!CHECK_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f))
+#define SM_FLAG_SET(_p, _f) SET_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f)
+#define SM_FLAG_CLR(_p, _f) CLEAR_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f)
+
+#define ACTOR_STATE(_p, _f) (!!CHECK_FLAGS((_p)->actor_state, STATE_ ## _f))
+#define ACTOR_STATE_SET(_p, _f) SET_FLAGS((_p)->actor_state, STATE_ ## _f)
+#define ACTOR_STATE_CLR(_p, _f) CLEAR_FLAGS((_p)->actor_state, STATE_ ## _f)
+
+#define PARTNER_STATE(_p, _f) (!!CHECK_FLAGS((_p)->partner_state, STATE_ ## _f))
+#define PARTNER_STATE_SET(_p, _f) SET_FLAGS((_p)->partner_state, STATE_ ## _f)
+#define PARTNER_STATE_CLR(_p, _f) CLEAR_FLAGS((_p)->partner_state, STATE_ ## _f)
+
+/** Variables associated with each port (5.4.7 in 802.1AX documentation). */
+struct port {
+ /**
+ * The operational values of the Actor's state parameters. Bitmask
+ * of port states.
+ */
+ uint8_t actor_state;
+
+ /** The operational Actor's port parameters */
+ struct port_params actor;
+
+ /**
+ * The operational value of the Actor's view of the current values of
+ * the Partner's state parameters. The Actor sets this variable either
+ * to the value received from the Partner in an LACPDU, or to the value
+ * of Partner_Admin_Port_State. Bitmask of port states.
+ */
+ uint8_t partner_state;
+
+ /** The operational Partner's port parameters */
+ struct port_params partner;
+
+ /* Additional port parameters not listed in documentation */
+ /** State machine flags */
+ uint16_t sm_flags;
+ enum rte_bond_8023ad_selection selected;
+
+ /** Indicates if either allmulti or promisc has been enforced on the
+ * slave so that we can receive lacp packets
+ */
+#define BOND_8023AD_FORCED_ALLMULTI (1 << 0)
+#define BOND_8023AD_FORCED_PROMISC (1 << 1)
+ uint8_t forced_rx_flags;
+
+ uint64_t current_while_timer;
+ uint64_t periodic_timer;
+ uint64_t wait_while_timer;
+ uint64_t tx_machine_timer;
+ uint64_t tx_marker_timer;
+ /* Agregator parameters */
+ /** Used aggregator port ID */
+ uint16_t aggregator_port_id;
+
+ /** Memory pool used to allocate rings */
+ struct rte_mempool *mbuf_pool;
+
+ /** Ring of LACP packets from RX burst function */
+ struct rte_ring *rx_ring;
+
+ /** Ring of slow protocol packets (LACP and MARKERS) to TX burst function */
+ struct rte_ring *tx_ring;
+
+ /** Timer which is also used as mutex. If is 0 (not running) RX marker
+ * packet might be responded. Otherwise shall be dropped. It is zeroed in
+ * mode 4 callback function after expire. */
+ volatile uint64_t rx_marker_timer;
+
+ uint64_t warning_timer;
+ volatile uint16_t warnings_to_show;
+
+ /** Memory pool used to allocate slow queues */
+ struct rte_mempool *slow_pool;
+};
+
+struct mode8023ad_private {
+ uint64_t fast_periodic_timeout;
+ uint64_t slow_periodic_timeout;
+ uint64_t short_timeout;
+ uint64_t long_timeout;
+ uint64_t aggregate_wait_timeout;
+ uint64_t tx_period_timeout;
+ uint64_t rx_marker_timeout;
+ uint64_t update_timeout_us;
+ rte_eth_bond_8023ad_ext_slowrx_fn slowrx_cb;
+ uint8_t external_sm;
+ struct rte_ether_addr mac_addr;
+
+ struct rte_eth_link slave_link;
+ /***< slave link properties */
+
+ /**
+ * Configuration of dedicated hardware queues for control plane
+ * traffic
+ */
+ struct {
+ uint8_t enabled;
+
+ struct rte_flow *flow[RTE_MAX_ETHPORTS];
+
+ uint16_t rx_qid;
+ uint16_t tx_qid;
+ } dedicated_queues;
+ enum rte_bond_8023ad_agg_selection agg_selection;
+};
+
+/**
+ * @internal
+ * The pool of *port* structures. The size of the pool
+ * is configured at compile-time in the <rte_eth_bond_8023ad.c> file.
+ */
+extern struct port bond_mode_8023ad_ports[];
+
+/* Forward declaration */
+struct bond_dev_private;
+
+
+/**
+ * @internal
+ *
+ * Set mode 4 configuration of bonded interface.
+ *
+ * @pre Bonded interface must be stopped.
+ *
+ * @param dev Bonded interface
+ * @param conf new configuration. If NULL set default configuration.
+ */
+void
+bond_mode_8023ad_setup(struct rte_eth_dev *dev,
+ struct rte_eth_bond_8023ad_conf *conf);
+
+/**
+ * @internal
+ *
+ * Enables 802.1AX mode and all active slaves on bonded interface.
+ *
+ * @param dev Bonded interface
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+bond_mode_8023ad_enable(struct rte_eth_dev *dev);
+
+/**
+ * @internal
+ *
+ * Disables 802.1AX mode of the bonded interface and slaves.
+ *
+ * @param dev Bonded interface
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int bond_mode_8023ad_disable(struct rte_eth_dev *dev);
+
+/**
+ * @internal
+ *
+ * Starts 802.3AX state machines management logic.
+ * @param dev Bonded interface
+ * @return
+ * 0 if machines was started, 1 if machines was already running,
+ * negative value otherwise.
+ */
+int
+bond_mode_8023ad_start(struct rte_eth_dev *dev);
+
+/**
+ * @internal
+ *
+ * Stops 802.3AX state machines management logic.
+ * @param dev Bonded interface
+ * @return
+ * 0 if this call stopped state machines, -ENOENT if alarm was not set.
+ */
+void
+bond_mode_8023ad_stop(struct rte_eth_dev *dev);
+
+/**
+ * @internal
+ *
+ * Passes given slow packet to state machines management logic.
+ * @param internals Bonded device private data.
+ * @param slave_id Slave port id.
+ * @param slot_pkt Slow packet.
+ */
+void
+bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
+ uint16_t slave_id, struct rte_mbuf *pkt);
+
+/**
+ * @internal
+ *
+ * Appends given slave used slave
+ *
+ * @param dev Bonded interface.
+ * @param port_id Slave port ID to be added
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+void
+bond_mode_8023ad_activate_slave(struct rte_eth_dev *dev, uint16_t port_id);
+
+/**
+ * @internal
+ *
+ * Denitializes and removes given slave from 802.1AX mode.
+ *
+ * @param dev Bonded interface.
+ * @param slave_num Position of slave in active_slaves array
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+int
+bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *dev, uint16_t slave_pos);
+
+/**
+ * Updates state when MAC was changed on bonded device or one of its slaves.
+ * @param bond_dev Bonded device
+ */
+void
+bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev);
+
+int
+bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
+ uint16_t slave_port);
+
+int
+bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port);
+
+int
+bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id);
+
+#endif /* _ETH_BOND_8023AD_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2017 Intel Corporation
+ */
+
+#ifndef _ETH_BOND_PRIVATE_H_
+#define _ETH_BOND_PRIVATE_H_
+
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <rte_ethdev_driver.h>
+#include <rte_flow.h>
+#include <rte_spinlock.h>
+#include <rte_bitmap.h>
+#include <rte_flow_driver.h>
+
+#include "rte_eth_bond.h"
+#include "eth_bond_8023ad_private.h"
+#include "rte_eth_bond_alb.h"
+
+#define PMD_BOND_SLAVE_PORT_KVARG ("slave")
+#define PMD_BOND_PRIMARY_SLAVE_KVARG ("primary")
+#define PMD_BOND_MODE_KVARG ("mode")
+#define PMD_BOND_AGG_MODE_KVARG ("agg_mode")
+#define PMD_BOND_XMIT_POLICY_KVARG ("xmit_policy")
+#define PMD_BOND_SOCKET_ID_KVARG ("socket_id")
+#define PMD_BOND_MAC_ADDR_KVARG ("mac")
+#define PMD_BOND_LSC_POLL_PERIOD_KVARG ("lsc_poll_period_ms")
+#define PMD_BOND_LINK_UP_PROP_DELAY_KVARG ("up_delay")
+#define PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG ("down_delay")
+
+#define PMD_BOND_XMIT_POLICY_LAYER2_KVARG ("l2")
+#define PMD_BOND_XMIT_POLICY_LAYER23_KVARG ("l23")
+#define PMD_BOND_XMIT_POLICY_LAYER34_KVARG ("l34")
+
+extern int bond_logtype;
+
+#define RTE_BOND_LOG(lvl, msg, ...) \
+ rte_log(RTE_LOG_ ## lvl, bond_logtype, \
+ "%s(%d) - " msg "\n", __func__, __LINE__, ##__VA_ARGS__)
+
+#define BONDING_MODE_INVALID 0xFF
+
+extern const char *pmd_bond_init_valid_arguments[];
+
+extern struct rte_vdev_driver pmd_bond_drv;
+
+extern const struct rte_flow_ops bond_flow_ops;
+
+/** Port Queue Mapping Structure */
+struct bond_rx_queue {
+ uint16_t queue_id;
+ /**< Queue Id */
+ struct bond_dev_private *dev_private;
+ /**< Reference to eth_dev private structure */
+ uint16_t nb_rx_desc;
+ /**< Number of RX descriptors available for the queue */
+ struct rte_eth_rxconf rx_conf;
+ /**< Copy of RX configuration structure for queue */
+ struct rte_mempool *mb_pool;
+ /**< Reference to mbuf pool to use for RX queue */
+};
+
+struct bond_tx_queue {
+ uint16_t queue_id;
+ /**< Queue Id */
+ struct bond_dev_private *dev_private;
+ /**< Reference to dev private structure */
+ uint16_t nb_tx_desc;
+ /**< Number of TX descriptors available for the queue */
+ struct rte_eth_txconf tx_conf;
+ /**< Copy of TX configuration structure for queue */
+};
+
+/** Bonded slave devices structure */
+struct bond_ethdev_slave_ports {
+ uint16_t slaves[RTE_MAX_ETHPORTS]; /**< Slave port id array */
+ uint16_t slave_count; /**< Number of slaves */
+};
+
+struct bond_slave_details {
+ uint16_t port_id;
+
+ uint8_t link_status_poll_enabled;
+ uint8_t link_status_wait_to_complete;
+ uint8_t last_link_status;
+ /**< Port Id of slave eth_dev */
+ struct rte_ether_addr persisted_mac_addr;
+
+ uint16_t reta_size;
+};
+
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next;
+ /* Slaves flows */
+ struct rte_flow *flows[RTE_MAX_ETHPORTS];
+ /* Flow description for synchronization */
+ struct rte_flow_conv_rule rule;
+ uint8_t rule_data[];
+};
+
+typedef void (*burst_xmit_hash_t)(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint16_t slave_count, uint16_t *slaves);
+
+/** Link Bonding PMD device private configuration Structure */
+struct bond_dev_private {
+ uint16_t port_id; /**< Port Id of Bonded Port */
+ uint8_t mode; /**< Link Bonding Mode */
+
+ rte_spinlock_t lock;
+ rte_spinlock_t lsc_lock;
+
+ uint16_t primary_port; /**< Primary Slave Port */
+ uint16_t current_primary_port; /**< Primary Slave Port */
+ uint16_t user_defined_primary_port;
+ /**< Flag for whether primary port is user defined or not */
+
+ uint8_t balance_xmit_policy;
+ /**< Transmit policy - l2 / l23 / l34 for operation in balance mode */
+ burst_xmit_hash_t burst_xmit_hash;
+ /**< Transmit policy hash function */
+
+ uint8_t user_defined_mac;
+ /**< Flag for whether MAC address is user defined or not */
+
+ uint8_t link_status_polling_enabled;
+ uint32_t link_status_polling_interval_ms;
+
+ uint32_t link_down_delay_ms;
+ uint32_t link_up_delay_ms;
+
+ uint16_t nb_rx_queues; /**< Total number of rx queues */
+ uint16_t nb_tx_queues; /**< Total number of tx queues*/
+
+ uint16_t active_slave; /**< Next active_slave to poll */
+ uint16_t active_slave_count; /**< Number of active slaves */
+ uint16_t active_slaves[RTE_MAX_ETHPORTS]; /**< Active slave list */
+
+ uint16_t slave_count; /**< Number of bonded slaves */
+ struct bond_slave_details slaves[RTE_MAX_ETHPORTS];
+ /**< Arary of bonded slaves details */
+
+ struct mode8023ad_private mode4;
+ uint16_t tlb_slaves_order[RTE_MAX_ETHPORTS];
+ /**< TLB active slaves send order */
+ struct mode_alb_private mode6;
+
+ uint64_t rx_offload_capa; /** Rx offload capability */
+ uint64_t tx_offload_capa; /** Tx offload capability */
+ uint64_t rx_queue_offload_capa; /** per queue Rx offload capability */
+ uint64_t tx_queue_offload_capa; /** per queue Tx offload capability */
+
+ /**< List of the configured flows */
+ TAILQ_HEAD(sub_flows, rte_flow) flow_list;
+
+ /**< Flow isolation state */
+ int flow_isolated;
+ int flow_isolated_valid;
+
+ /** Bit mask of RSS offloads, the bit offset also means flow type */
+ uint64_t flow_type_rss_offloads;
+
+ struct rte_eth_rxconf default_rxconf; /**< Default RxQ conf. */
+ struct rte_eth_txconf default_txconf; /**< Default TxQ conf. */
+ struct rte_eth_desc_lim rx_desc_lim; /**< Rx descriptor limits */
+ struct rte_eth_desc_lim tx_desc_lim; /**< Tx descriptor limits */
+
+ uint16_t reta_size;
+ struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 /
+ RTE_RETA_GROUP_SIZE];
+
+ uint8_t rss_key[52]; /**< 52-byte hash key buffer. */
+ uint8_t rss_key_len; /**< hash key length in bytes. */
+
+ struct rte_kvargs *kvlist;
+ uint8_t slave_update_idx;
+
+ uint32_t candidate_max_rx_pktlen;
+ uint32_t max_rx_pktlen;
+
+ void *vlan_filter_bmpmem; /* enabled vlan filter bitmap */
+ struct rte_bitmap *vlan_filter_bmp;
+};
+
+extern const struct eth_dev_ops default_dev_ops;
+
+int
+check_for_master_bonded_ethdev(const struct rte_eth_dev *eth_dev);
+
+int
+check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev);
+
+/* Search given slave array to find position of given id.
+ * Return slave pos or slaves_count if not found. */
+static inline uint16_t
+find_slave_by_id(uint16_t *slaves, uint16_t slaves_count, uint16_t slave_id) {
+
+ uint16_t pos;
+ for (pos = 0; pos < slaves_count; pos++) {
+ if (slave_id == slaves[pos])
+ break;
+ }
+
+ return pos;
+}
+
+int
+valid_port_id(uint16_t port_id);
+
+int
+valid_bonded_port_id(uint16_t port_id);
+
+int
+valid_slave_port_id(uint16_t port_id, uint8_t mode);
+
+void
+deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id);
+
+void
+activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id);
+
+int
+mac_address_set(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *new_mac_addr);
+
+int
+mac_address_get(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *dst_mac_addr);
+
+int
+mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev);
+
+int
+slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
+ uint16_t slave_port_id);
+
+int
+slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
+ uint16_t slave_port_id);
+
+int
+bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode);
+
+int
+slave_configure(struct rte_eth_dev *bonded_eth_dev,
+ struct rte_eth_dev *slave_eth_dev);
+
+void
+slave_remove(struct bond_dev_private *internals,
+ struct rte_eth_dev *slave_eth_dev);
+
+void
+slave_add(struct bond_dev_private *internals,
+ struct rte_eth_dev *slave_eth_dev);
+
+void
+burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint16_t slave_count, uint16_t *slaves);
+
+void
+burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint16_t slave_count, uint16_t *slaves);
+
+void
+burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
+ uint16_t slave_count, uint16_t *slaves);
+
+
+void
+bond_ethdev_primary_set(struct bond_dev_private *internals,
+ uint16_t slave_port_id);
+
+int
+bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
+ void *param, void *ret_param);
+
+int
+bond_ethdev_parse_slave_port_kvarg(const char *key,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_slave_mode_kvarg(const char *key,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_slave_agg_mode_kvarg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_socket_id_kvarg(const char *key,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_balance_xmit_policy_kvarg(const char *key,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_bond_mac_addr_kvarg(const char *key,
+ const char *value, void *extra_args);
+
+int
+bond_ethdev_parse_time_ms_kvarg(const char *key,
+ const char *value, void *extra_args);
+
+void
+bond_tlb_disable(struct bond_dev_private *internals);
+
+void
+bond_tlb_enable(struct bond_dev_private *internals);
+
+void
+bond_tlb_activate_slave(struct bond_dev_private *internals);
+
+void
+bond_ethdev_stop(struct rte_eth_dev *eth_dev);
+
+void
+bond_ethdev_close(struct rte_eth_dev *dev);
+
+#endif
#include <rte_cycles.h>
#include <rte_compat.h>
-#include "rte_eth_bond_private.h"
+#include "eth_bond_private.h"
static void bond_mode_8023ad_ext_periodic_cb(void *arg);
#ifdef RTE_LIBRTE_BOND_DEBUG_8023AD
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-
-#ifndef RTE_ETH_BOND_8023AD_PRIVATE_H_
-#define RTE_ETH_BOND_8023AD_PRIVATE_H_
-
-#include <stdint.h>
-
-#include <rte_ether.h>
-#include <rte_byteorder.h>
-#include <rte_atomic.h>
-#include <rte_flow.h>
-
-#include "rte_eth_bond_8023ad.h"
-
-#define BOND_MODE_8023AX_UPDATE_TIMEOUT_MS 100
-/** Maximum number of packets to one slave queued in TX ring. */
-#define BOND_MODE_8023AX_SLAVE_RX_PKTS 3
-/** Maximum number of LACP packets from one slave queued in TX ring. */
-#define BOND_MODE_8023AX_SLAVE_TX_PKTS 1
-/**
- * Timeouts deffinitions (5.4.4 in 802.1AX documentation).
- */
-#define BOND_8023AD_FAST_PERIODIC_MS 900
-#define BOND_8023AD_SLOW_PERIODIC_MS 29000
-#define BOND_8023AD_SHORT_TIMEOUT_MS 3000
-#define BOND_8023AD_LONG_TIMEOUT_MS 90000
-#define BOND_8023AD_CHURN_DETECTION_TIMEOUT_MS 60000
-#define BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS 2000
-#define BOND_8023AD_TX_MACHINE_PERIOD_MS 500
-#define BOND_8023AD_RX_MARKER_PERIOD_MS 2000
-
-/**
- * Interval of showing warning message from state machines. All messages will
- * be held (and gathered together) to prevent flooding.
- * This is no parto of 802.1AX standard.
- */
-#define BOND_8023AD_WARNINGS_PERIOD_MS 1000
-
-
-
-/**
- * State machine flags
- */
-#define SM_FLAGS_BEGIN 0x0001
-#define SM_FLAGS_LACP_ENABLED 0x0002
-#define SM_FLAGS_ACTOR_CHURN 0x0004
-#define SM_FLAGS_PARTNER_CHURN 0x0008
-#define SM_FLAGS_MOVED 0x0100
-#define SM_FLAGS_PARTNER_SHORT_TIMEOUT 0x0200
-#define SM_FLAGS_NTT 0x0400
-
-#define BOND_LINK_FULL_DUPLEX_KEY 0x01
-#define BOND_LINK_SPEED_KEY_10M 0x02
-#define BOND_LINK_SPEED_KEY_100M 0x04
-#define BOND_LINK_SPEED_KEY_1000M 0x08
-#define BOND_LINK_SPEED_KEY_10G 0x10
-#define BOND_LINK_SPEED_KEY_20G 0x11
-#define BOND_LINK_SPEED_KEY_40G 0x12
-
-#define WRN_RX_MARKER_TO_FAST 0x01
-#define WRN_UNKNOWN_SLOW_TYPE 0x02
-#define WRN_UNKNOWN_MARKER_TYPE 0x04
-#define WRN_NOT_LACP_CAPABLE 0x08
-#define WRN_RX_QUEUE_FULL 0x10
-#define WRN_TX_QUEUE_FULL 0x20
-
-#define CHECK_FLAGS(_variable, _f) ((_variable) & (_f))
-#define SET_FLAGS(_variable, _f) ((_variable) |= (_f))
-#define CLEAR_FLAGS(_variable, _f) ((_variable) &= ~(_f))
-
-#define SM_FLAG(_p, _f) (!!CHECK_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f))
-#define SM_FLAG_SET(_p, _f) SET_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f)
-#define SM_FLAG_CLR(_p, _f) CLEAR_FLAGS((_p)->sm_flags, SM_FLAGS_ ## _f)
-
-#define ACTOR_STATE(_p, _f) (!!CHECK_FLAGS((_p)->actor_state, STATE_ ## _f))
-#define ACTOR_STATE_SET(_p, _f) SET_FLAGS((_p)->actor_state, STATE_ ## _f)
-#define ACTOR_STATE_CLR(_p, _f) CLEAR_FLAGS((_p)->actor_state, STATE_ ## _f)
-
-#define PARTNER_STATE(_p, _f) (!!CHECK_FLAGS((_p)->partner_state, STATE_ ## _f))
-#define PARTNER_STATE_SET(_p, _f) SET_FLAGS((_p)->partner_state, STATE_ ## _f)
-#define PARTNER_STATE_CLR(_p, _f) CLEAR_FLAGS((_p)->partner_state, STATE_ ## _f)
-
-/** Variables associated with each port (5.4.7 in 802.1AX documentation). */
-struct port {
- /**
- * The operational values of the Actor's state parameters. Bitmask
- * of port states.
- */
- uint8_t actor_state;
-
- /** The operational Actor's port parameters */
- struct port_params actor;
-
- /**
- * The operational value of the Actor's view of the current values of
- * the Partner's state parameters. The Actor sets this variable either
- * to the value received from the Partner in an LACPDU, or to the value
- * of Partner_Admin_Port_State. Bitmask of port states.
- */
- uint8_t partner_state;
-
- /** The operational Partner's port parameters */
- struct port_params partner;
-
- /* Additional port parameters not listed in documentation */
- /** State machine flags */
- uint16_t sm_flags;
- enum rte_bond_8023ad_selection selected;
-
- /** Indicates if either allmulti or promisc has been enforced on the
- * slave so that we can receive lacp packets
- */
-#define BOND_8023AD_FORCED_ALLMULTI (1 << 0)
-#define BOND_8023AD_FORCED_PROMISC (1 << 1)
- uint8_t forced_rx_flags;
-
- uint64_t current_while_timer;
- uint64_t periodic_timer;
- uint64_t wait_while_timer;
- uint64_t tx_machine_timer;
- uint64_t tx_marker_timer;
- /* Agregator parameters */
- /** Used aggregator port ID */
- uint16_t aggregator_port_id;
-
- /** Memory pool used to allocate rings */
- struct rte_mempool *mbuf_pool;
-
- /** Ring of LACP packets from RX burst function */
- struct rte_ring *rx_ring;
-
- /** Ring of slow protocol packets (LACP and MARKERS) to TX burst function */
- struct rte_ring *tx_ring;
-
- /** Timer which is also used as mutex. If is 0 (not running) RX marker
- * packet might be responded. Otherwise shall be dropped. It is zeroed in
- * mode 4 callback function after expire. */
- volatile uint64_t rx_marker_timer;
-
- uint64_t warning_timer;
- volatile uint16_t warnings_to_show;
-
- /** Memory pool used to allocate slow queues */
- struct rte_mempool *slow_pool;
-};
-
-struct mode8023ad_private {
- uint64_t fast_periodic_timeout;
- uint64_t slow_periodic_timeout;
- uint64_t short_timeout;
- uint64_t long_timeout;
- uint64_t aggregate_wait_timeout;
- uint64_t tx_period_timeout;
- uint64_t rx_marker_timeout;
- uint64_t update_timeout_us;
- rte_eth_bond_8023ad_ext_slowrx_fn slowrx_cb;
- uint8_t external_sm;
- struct rte_ether_addr mac_addr;
-
- struct rte_eth_link slave_link;
- /***< slave link properties */
-
- /**
- * Configuration of dedicated hardware queues for control plane
- * traffic
- */
- struct {
- uint8_t enabled;
-
- struct rte_flow *flow[RTE_MAX_ETHPORTS];
-
- uint16_t rx_qid;
- uint16_t tx_qid;
- } dedicated_queues;
- enum rte_bond_8023ad_agg_selection agg_selection;
-};
-
-/**
- * @internal
- * The pool of *port* structures. The size of the pool
- * is configured at compile-time in the <rte_eth_bond_8023ad.c> file.
- */
-extern struct port bond_mode_8023ad_ports[];
-
-/* Forward declaration */
-struct bond_dev_private;
-
-
-/**
- * @internal
- *
- * Set mode 4 configuration of bonded interface.
- *
- * @pre Bonded interface must be stopped.
- *
- * @param dev Bonded interface
- * @param conf new configuration. If NULL set default configuration.
- */
-void
-bond_mode_8023ad_setup(struct rte_eth_dev *dev,
- struct rte_eth_bond_8023ad_conf *conf);
-
-/**
- * @internal
- *
- * Enables 802.1AX mode and all active slaves on bonded interface.
- *
- * @param dev Bonded interface
- * @return
- * 0 on success, negative value otherwise.
- */
-int
-bond_mode_8023ad_enable(struct rte_eth_dev *dev);
-
-/**
- * @internal
- *
- * Disables 802.1AX mode of the bonded interface and slaves.
- *
- * @param dev Bonded interface
- * @return
- * 0 on success, negative value otherwise.
- */
-int bond_mode_8023ad_disable(struct rte_eth_dev *dev);
-
-/**
- * @internal
- *
- * Starts 802.3AX state machines management logic.
- * @param dev Bonded interface
- * @return
- * 0 if machines was started, 1 if machines was already running,
- * negative value otherwise.
- */
-int
-bond_mode_8023ad_start(struct rte_eth_dev *dev);
-
-/**
- * @internal
- *
- * Stops 802.3AX state machines management logic.
- * @param dev Bonded interface
- * @return
- * 0 if this call stopped state machines, -ENOENT if alarm was not set.
- */
-void
-bond_mode_8023ad_stop(struct rte_eth_dev *dev);
-
-/**
- * @internal
- *
- * Passes given slow packet to state machines management logic.
- * @param internals Bonded device private data.
- * @param slave_id Slave port id.
- * @param slot_pkt Slow packet.
- */
-void
-bond_mode_8023ad_handle_slow_pkt(struct bond_dev_private *internals,
- uint16_t slave_id, struct rte_mbuf *pkt);
-
-/**
- * @internal
- *
- * Appends given slave used slave
- *
- * @param dev Bonded interface.
- * @param port_id Slave port ID to be added
- *
- * @return
- * 0 on success, negative value otherwise.
- */
-void
-bond_mode_8023ad_activate_slave(struct rte_eth_dev *dev, uint16_t port_id);
-
-/**
- * @internal
- *
- * Denitializes and removes given slave from 802.1AX mode.
- *
- * @param dev Bonded interface.
- * @param slave_num Position of slave in active_slaves array
- *
- * @return
- * 0 on success, negative value otherwise.
- */
-int
-bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *dev, uint16_t slave_pos);
-
-/**
- * Updates state when MAC was changed on bonded device or one of its slaves.
- * @param bond_dev Bonded device
- */
-void
-bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev);
-
-int
-bond_ethdev_8023ad_flow_verify(struct rte_eth_dev *bond_dev,
- uint16_t slave_port);
-
-int
-bond_ethdev_8023ad_flow_set(struct rte_eth_dev *bond_dev, uint16_t slave_port);
-
-int
-bond_8023ad_slow_pkt_hw_filter_supported(uint16_t port_id);
-
-#endif /* RTE_ETH_BOND_8023AD_H_ */
* Copyright(c) 2010-2015 Intel Corporation
*/
-#include "rte_eth_bond_private.h"
+#include "eth_bond_private.h"
#include "rte_eth_bond_alb.h"
static inline uint8_t
#include <rte_kvargs.h>
#include "rte_eth_bond.h"
-#include "rte_eth_bond_private.h"
-#include "rte_eth_bond_8023ad_private.h"
+#include "eth_bond_private.h"
+#include "eth_bond_8023ad_private.h"
int
check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev)
#include <rte_kvargs.h>
#include "rte_eth_bond.h"
-#include "rte_eth_bond_private.h"
+#include "eth_bond_private.h"
const char *pmd_bond_init_valid_arguments[] = {
PMD_BOND_SLAVE_PORT_KVARG,
#include <rte_tailq.h>
#include <rte_flow.h>
-#include "rte_eth_bond_private.h"
+#include "eth_bond_private.h"
static struct rte_flow *
bond_flow_alloc(int numa_node, const struct rte_flow_attr *attr,
#include <rte_string_fns.h>
#include "rte_eth_bond.h"
-#include "rte_eth_bond_private.h"
-#include "rte_eth_bond_8023ad_private.h"
+#include "eth_bond_private.h"
+#include "eth_bond_8023ad_private.h"
#define REORDER_PERIOD_MS 10
#define DEFAULT_POLLING_INTERVAL_10_MS (10)
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2017 Intel Corporation
- */
-
-#ifndef _RTE_ETH_BOND_PRIVATE_H_
-#define _RTE_ETH_BOND_PRIVATE_H_
-
-#include <stdint.h>
-#include <sys/queue.h>
-
-#include <rte_ethdev_driver.h>
-#include <rte_flow.h>
-#include <rte_spinlock.h>
-#include <rte_bitmap.h>
-#include <rte_flow_driver.h>
-
-#include "rte_eth_bond.h"
-#include "rte_eth_bond_8023ad_private.h"
-#include "rte_eth_bond_alb.h"
-
-#define PMD_BOND_SLAVE_PORT_KVARG ("slave")
-#define PMD_BOND_PRIMARY_SLAVE_KVARG ("primary")
-#define PMD_BOND_MODE_KVARG ("mode")
-#define PMD_BOND_AGG_MODE_KVARG ("agg_mode")
-#define PMD_BOND_XMIT_POLICY_KVARG ("xmit_policy")
-#define PMD_BOND_SOCKET_ID_KVARG ("socket_id")
-#define PMD_BOND_MAC_ADDR_KVARG ("mac")
-#define PMD_BOND_LSC_POLL_PERIOD_KVARG ("lsc_poll_period_ms")
-#define PMD_BOND_LINK_UP_PROP_DELAY_KVARG ("up_delay")
-#define PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG ("down_delay")
-
-#define PMD_BOND_XMIT_POLICY_LAYER2_KVARG ("l2")
-#define PMD_BOND_XMIT_POLICY_LAYER23_KVARG ("l23")
-#define PMD_BOND_XMIT_POLICY_LAYER34_KVARG ("l34")
-
-extern int bond_logtype;
-
-#define RTE_BOND_LOG(lvl, msg, ...) \
- rte_log(RTE_LOG_ ## lvl, bond_logtype, \
- "%s(%d) - " msg "\n", __func__, __LINE__, ##__VA_ARGS__)
-
-#define BONDING_MODE_INVALID 0xFF
-
-extern const char *pmd_bond_init_valid_arguments[];
-
-extern struct rte_vdev_driver pmd_bond_drv;
-
-extern const struct rte_flow_ops bond_flow_ops;
-
-/** Port Queue Mapping Structure */
-struct bond_rx_queue {
- uint16_t queue_id;
- /**< Queue Id */
- struct bond_dev_private *dev_private;
- /**< Reference to eth_dev private structure */
- uint16_t nb_rx_desc;
- /**< Number of RX descriptors available for the queue */
- struct rte_eth_rxconf rx_conf;
- /**< Copy of RX configuration structure for queue */
- struct rte_mempool *mb_pool;
- /**< Reference to mbuf pool to use for RX queue */
-};
-
-struct bond_tx_queue {
- uint16_t queue_id;
- /**< Queue Id */
- struct bond_dev_private *dev_private;
- /**< Reference to dev private structure */
- uint16_t nb_tx_desc;
- /**< Number of TX descriptors available for the queue */
- struct rte_eth_txconf tx_conf;
- /**< Copy of TX configuration structure for queue */
-};
-
-/** Bonded slave devices structure */
-struct bond_ethdev_slave_ports {
- uint16_t slaves[RTE_MAX_ETHPORTS]; /**< Slave port id array */
- uint16_t slave_count; /**< Number of slaves */
-};
-
-struct bond_slave_details {
- uint16_t port_id;
-
- uint8_t link_status_poll_enabled;
- uint8_t link_status_wait_to_complete;
- uint8_t last_link_status;
- /**< Port Id of slave eth_dev */
- struct rte_ether_addr persisted_mac_addr;
-
- uint16_t reta_size;
-};
-
-struct rte_flow {
- TAILQ_ENTRY(rte_flow) next;
- /* Slaves flows */
- struct rte_flow *flows[RTE_MAX_ETHPORTS];
- /* Flow description for synchronization */
- struct rte_flow_conv_rule rule;
- uint8_t rule_data[];
-};
-
-typedef void (*burst_xmit_hash_t)(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint16_t slave_count, uint16_t *slaves);
-
-/** Link Bonding PMD device private configuration Structure */
-struct bond_dev_private {
- uint16_t port_id; /**< Port Id of Bonded Port */
- uint8_t mode; /**< Link Bonding Mode */
-
- rte_spinlock_t lock;
- rte_spinlock_t lsc_lock;
-
- uint16_t primary_port; /**< Primary Slave Port */
- uint16_t current_primary_port; /**< Primary Slave Port */
- uint16_t user_defined_primary_port;
- /**< Flag for whether primary port is user defined or not */
-
- uint8_t balance_xmit_policy;
- /**< Transmit policy - l2 / l23 / l34 for operation in balance mode */
- burst_xmit_hash_t burst_xmit_hash;
- /**< Transmit policy hash function */
-
- uint8_t user_defined_mac;
- /**< Flag for whether MAC address is user defined or not */
-
- uint8_t link_status_polling_enabled;
- uint32_t link_status_polling_interval_ms;
-
- uint32_t link_down_delay_ms;
- uint32_t link_up_delay_ms;
-
- uint16_t nb_rx_queues; /**< Total number of rx queues */
- uint16_t nb_tx_queues; /**< Total number of tx queues*/
-
- uint16_t active_slave; /**< Next active_slave to poll */
- uint16_t active_slave_count; /**< Number of active slaves */
- uint16_t active_slaves[RTE_MAX_ETHPORTS]; /**< Active slave list */
-
- uint16_t slave_count; /**< Number of bonded slaves */
- struct bond_slave_details slaves[RTE_MAX_ETHPORTS];
- /**< Arary of bonded slaves details */
-
- struct mode8023ad_private mode4;
- uint16_t tlb_slaves_order[RTE_MAX_ETHPORTS];
- /**< TLB active slaves send order */
- struct mode_alb_private mode6;
-
- uint64_t rx_offload_capa; /** Rx offload capability */
- uint64_t tx_offload_capa; /** Tx offload capability */
- uint64_t rx_queue_offload_capa; /** per queue Rx offload capability */
- uint64_t tx_queue_offload_capa; /** per queue Tx offload capability */
-
- /**< List of the configured flows */
- TAILQ_HEAD(sub_flows, rte_flow) flow_list;
-
- /**< Flow isolation state */
- int flow_isolated;
- int flow_isolated_valid;
-
- /** Bit mask of RSS offloads, the bit offset also means flow type */
- uint64_t flow_type_rss_offloads;
-
- struct rte_eth_rxconf default_rxconf; /**< Default RxQ conf. */
- struct rte_eth_txconf default_txconf; /**< Default TxQ conf. */
- struct rte_eth_desc_lim rx_desc_lim; /**< Rx descriptor limits */
- struct rte_eth_desc_lim tx_desc_lim; /**< Tx descriptor limits */
-
- uint16_t reta_size;
- struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_512 /
- RTE_RETA_GROUP_SIZE];
-
- uint8_t rss_key[52]; /**< 52-byte hash key buffer. */
- uint8_t rss_key_len; /**< hash key length in bytes. */
-
- struct rte_kvargs *kvlist;
- uint8_t slave_update_idx;
-
- uint32_t candidate_max_rx_pktlen;
- uint32_t max_rx_pktlen;
-
- void *vlan_filter_bmpmem; /* enabled vlan filter bitmap */
- struct rte_bitmap *vlan_filter_bmp;
-};
-
-extern const struct eth_dev_ops default_dev_ops;
-
-int
-check_for_master_bonded_ethdev(const struct rte_eth_dev *eth_dev);
-
-int
-check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev);
-
-/* Search given slave array to find position of given id.
- * Return slave pos or slaves_count if not found. */
-static inline uint16_t
-find_slave_by_id(uint16_t *slaves, uint16_t slaves_count, uint16_t slave_id) {
-
- uint16_t pos;
- for (pos = 0; pos < slaves_count; pos++) {
- if (slave_id == slaves[pos])
- break;
- }
-
- return pos;
-}
-
-int
-valid_port_id(uint16_t port_id);
-
-int
-valid_bonded_port_id(uint16_t port_id);
-
-int
-valid_slave_port_id(uint16_t port_id, uint8_t mode);
-
-void
-deactivate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id);
-
-void
-activate_slave(struct rte_eth_dev *eth_dev, uint16_t port_id);
-
-int
-mac_address_set(struct rte_eth_dev *eth_dev,
- struct rte_ether_addr *new_mac_addr);
-
-int
-mac_address_get(struct rte_eth_dev *eth_dev,
- struct rte_ether_addr *dst_mac_addr);
-
-int
-mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev);
-
-int
-slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
- uint16_t slave_port_id);
-
-int
-slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
- uint16_t slave_port_id);
-
-int
-bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode);
-
-int
-slave_configure(struct rte_eth_dev *bonded_eth_dev,
- struct rte_eth_dev *slave_eth_dev);
-
-void
-slave_remove(struct bond_dev_private *internals,
- struct rte_eth_dev *slave_eth_dev);
-
-void
-slave_add(struct bond_dev_private *internals,
- struct rte_eth_dev *slave_eth_dev);
-
-void
-burst_xmit_l2_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint16_t slave_count, uint16_t *slaves);
-
-void
-burst_xmit_l23_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint16_t slave_count, uint16_t *slaves);
-
-void
-burst_xmit_l34_hash(struct rte_mbuf **buf, uint16_t nb_pkts,
- uint16_t slave_count, uint16_t *slaves);
-
-
-void
-bond_ethdev_primary_set(struct bond_dev_private *internals,
- uint16_t slave_port_id);
-
-int
-bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
- void *param, void *ret_param);
-
-int
-bond_ethdev_parse_slave_port_kvarg(const char *key,
- const char *value, void *extra_args);
-
-int
-bond_ethdev_parse_slave_mode_kvarg(const char *key,
- const char *value, void *extra_args);
-
-int
-bond_ethdev_parse_slave_agg_mode_kvarg(const char *key __rte_unused,
- const char *value, void *extra_args);
-
-int
-bond_ethdev_parse_socket_id_kvarg(const char *key,
- const char *value, void *extra_args);
-
-int
-bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key,
- const char *value, void *extra_args);
-
-int
-bond_ethdev_parse_balance_xmit_policy_kvarg(const char *key,
- const char *value, void *extra_args);
-
-int
-bond_ethdev_parse_bond_mac_addr_kvarg(const char *key,
- const char *value, void *extra_args);
-
-int
-bond_ethdev_parse_time_ms_kvarg(const char *key,
- const char *value, void *extra_args);
-
-void
-bond_tlb_disable(struct bond_dev_private *internals);
-
-void
-bond_tlb_enable(struct bond_dev_private *internals);
-
-void
-bond_tlb_activate_slave(struct bond_dev_private *internals);
-
-void
-bond_ethdev_stop(struct rte_eth_dev *eth_dev);
-
-void
-bond_ethdev_close(struct rte_eth_dev *dev);
-
-#endif
* Copyright 2017 Mellanox Technologies, Ltd
*/
-#ifndef _RTE_ETH_FAILSAFE_PRIVATE_H_
-#define _RTE_ETH_FAILSAFE_PRIVATE_H_
+#ifndef _ETH_FAILSAFE_PRIVATE_H_
+#define _ETH_FAILSAFE_PRIVATE_H_
#include <stdint.h>
#include <sys/queue.h>
return rte_errno = 0;
return err;
}
-#endif /* _RTE_ETH_FAILSAFE_PRIVATE_H_ */
+#endif /* _ETH_FAILSAFE_PRIVATE_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _DIST_PRIV_H_
+#define _DIST_PRIV_H_
+
+/**
+ * @file
+ * RTE distributor
+ *
+ * The distributor is a component which is designed to pass packets
+ * one-at-a-time to workers, with dynamic load balancing.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define NO_FLAGS 0
+#define RTE_DISTRIB_PREFIX "DT_"
+
+/*
+ * We will use the bottom four bits of pointer for flags, shifting out
+ * the top four bits to make room (since a 64-bit pointer actually only uses
+ * 48 bits). An arithmetic-right-shift will then appropriately restore the
+ * original pointer value with proper sign extension into the top bits.
+ */
+#define RTE_DISTRIB_FLAG_BITS 4
+#define RTE_DISTRIB_FLAGS_MASK (0x0F)
+#define RTE_DISTRIB_NO_BUF 0 /**< empty flags: no buffer requested */
+#define RTE_DISTRIB_GET_BUF (1) /**< worker requests a buffer, returns old */
+#define RTE_DISTRIB_RETURN_BUF (2) /**< worker returns a buffer, no request */
+#define RTE_DISTRIB_VALID_BUF (4) /**< set if bufptr contains ptr */
+
+#define RTE_DISTRIB_BACKLOG_SIZE 8
+#define RTE_DISTRIB_BACKLOG_MASK (RTE_DISTRIB_BACKLOG_SIZE - 1)
+
+#define RTE_DISTRIB_MAX_RETURNS 128
+#define RTE_DISTRIB_RETURNS_MASK (RTE_DISTRIB_MAX_RETURNS - 1)
+
+/**
+ * Maximum number of workers allowed.
+ * Be aware of increasing the limit, because it is limited by how we track
+ * in-flight tags. See in_flight_bitmask and rte_distributor_process
+ */
+#define RTE_DISTRIB_MAX_WORKERS 64
+
+#define RTE_DISTRIBUTOR_NAMESIZE 32 /**< Length of name for instance */
+
+/**
+ * Buffer structure used to pass the pointer data between cores. This is cache
+ * line aligned, but to improve performance and prevent adjacent cache-line
+ * prefetches of buffers for other workers, e.g. when worker 1's buffer is on
+ * the next cache line to worker 0, we pad this out to three cache lines.
+ * Only 64-bits of the memory is actually used though.
+ */
+union rte_distributor_buffer_v20 {
+ volatile int64_t bufptr64;
+ char pad[RTE_CACHE_LINE_SIZE*3];
+} __rte_cache_aligned;
+
+/*
+ * Transfer up to 8 mbufs at a time to/from workers, and
+ * flow matching algorithm optimized for 8 flow IDs at a time
+ */
+#define RTE_DIST_BURST_SIZE 8
+
+struct rte_distributor_backlog {
+ unsigned int start;
+ unsigned int count;
+ int64_t pkts[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
+ uint16_t *tags; /* will point to second cacheline of inflights */
+} __rte_cache_aligned;
+
+
+struct rte_distributor_returned_pkts {
+ unsigned int start;
+ unsigned int count;
+ struct rte_mbuf *mbufs[RTE_DISTRIB_MAX_RETURNS];
+};
+
+struct rte_distributor_v20 {
+ TAILQ_ENTRY(rte_distributor_v20) next; /**< Next in list. */
+
+ char name[RTE_DISTRIBUTOR_NAMESIZE]; /**< Name of the ring. */
+ unsigned int num_workers; /**< Number of workers polling */
+
+ uint32_t in_flight_tags[RTE_DISTRIB_MAX_WORKERS];
+ /**< Tracks the tag being processed per core */
+ uint64_t in_flight_bitmask;
+ /**< on/off bits for in-flight tags.
+ * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64 then
+ * the bitmask has to expand.
+ */
+
+ struct rte_distributor_backlog backlog[RTE_DISTRIB_MAX_WORKERS];
+
+ union rte_distributor_buffer_v20 bufs[RTE_DISTRIB_MAX_WORKERS];
+
+ struct rte_distributor_returned_pkts returns;
+};
+
+/* All different signature compare functions */
+enum rte_distributor_match_function {
+ RTE_DIST_MATCH_SCALAR = 0,
+ RTE_DIST_MATCH_VECTOR,
+ RTE_DIST_NUM_MATCH_FNS
+};
+
+/**
+ * Buffer structure used to pass the pointer data between cores. This is cache
+ * line aligned, but to improve performance and prevent adjacent cache-line
+ * prefetches of buffers for other workers, e.g. when worker 1's buffer is on
+ * the next cache line to worker 0, we pad this out to two cache lines.
+ * We can pass up to 8 mbufs at a time in one cacheline.
+ * There is a separate cacheline for returns in the burst API.
+ */
+struct rte_distributor_buffer {
+ volatile int64_t bufptr64[RTE_DIST_BURST_SIZE]
+ __rte_cache_aligned; /* <= outgoing to worker */
+
+ int64_t pad1 __rte_cache_aligned; /* <= one cache line */
+
+ volatile int64_t retptr64[RTE_DIST_BURST_SIZE]
+ __rte_cache_aligned; /* <= incoming from worker */
+
+ int64_t pad2 __rte_cache_aligned; /* <= one cache line */
+
+ int count __rte_cache_aligned; /* <= number of current mbufs */
+};
+
+struct rte_distributor {
+ TAILQ_ENTRY(rte_distributor) next; /**< Next in list. */
+
+ char name[RTE_DISTRIBUTOR_NAMESIZE]; /**< Name of the ring. */
+ unsigned int num_workers; /**< Number of workers polling */
+ unsigned int alg_type; /**< Number of alg types */
+
+ /**>
+ * First cache line in the this array are the tags inflight
+ * on the worker core. Second cache line are the backlog
+ * that are going to go to the worker core.
+ */
+ uint16_t in_flight_tags[RTE_DISTRIB_MAX_WORKERS][RTE_DIST_BURST_SIZE*2]
+ __rte_cache_aligned;
+
+ struct rte_distributor_backlog backlog[RTE_DISTRIB_MAX_WORKERS]
+ __rte_cache_aligned;
+
+ struct rte_distributor_buffer bufs[RTE_DISTRIB_MAX_WORKERS];
+
+ struct rte_distributor_returned_pkts returns;
+
+ enum rte_distributor_match_function dist_match_fn;
+
+ struct rte_distributor_v20 *d_v20;
+};
+
+void
+find_match_scalar(struct rte_distributor *d,
+ uint16_t *data_ptr,
+ uint16_t *output_ptr);
+
+void
+find_match_vec(struct rte_distributor *d,
+ uint16_t *data_ptr,
+ uint16_t *output_ptr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _DIST_PRIV_H_ */
#include <rte_pause.h>
#include <rte_tailq.h>
-#include "rte_distributor_private.h"
#include "rte_distributor.h"
#include "rte_distributor_v20.h"
#include "rte_distributor_v1705.h"
+#include "distributor_private.h"
TAILQ_HEAD(rte_dist_burst_list, rte_distributor);
*/
#include <rte_mbuf.h>
-#include "rte_distributor_private.h"
#include "rte_distributor.h"
+#include "distributor_private.h"
void
find_match_vec(struct rte_distributor *d,
*/
#include <rte_mbuf.h>
-#include "rte_distributor_private.h"
#include "rte_distributor.h"
+#include "distributor_private.h"
#include "smmintrin.h"
#include "nmmintrin.h"
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Intel Corporation
- */
-
-#ifndef _RTE_DIST_PRIV_H_
-#define _RTE_DIST_PRIV_H_
-
-/**
- * @file
- * RTE distributor
- *
- * The distributor is a component which is designed to pass packets
- * one-at-a-time to workers, with dynamic load balancing.
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define NO_FLAGS 0
-#define RTE_DISTRIB_PREFIX "DT_"
-
-/*
- * We will use the bottom four bits of pointer for flags, shifting out
- * the top four bits to make room (since a 64-bit pointer actually only uses
- * 48 bits). An arithmetic-right-shift will then appropriately restore the
- * original pointer value with proper sign extension into the top bits.
- */
-#define RTE_DISTRIB_FLAG_BITS 4
-#define RTE_DISTRIB_FLAGS_MASK (0x0F)
-#define RTE_DISTRIB_NO_BUF 0 /**< empty flags: no buffer requested */
-#define RTE_DISTRIB_GET_BUF (1) /**< worker requests a buffer, returns old */
-#define RTE_DISTRIB_RETURN_BUF (2) /**< worker returns a buffer, no request */
-#define RTE_DISTRIB_VALID_BUF (4) /**< set if bufptr contains ptr */
-
-#define RTE_DISTRIB_BACKLOG_SIZE 8
-#define RTE_DISTRIB_BACKLOG_MASK (RTE_DISTRIB_BACKLOG_SIZE - 1)
-
-#define RTE_DISTRIB_MAX_RETURNS 128
-#define RTE_DISTRIB_RETURNS_MASK (RTE_DISTRIB_MAX_RETURNS - 1)
-
-/**
- * Maximum number of workers allowed.
- * Be aware of increasing the limit, because it is limited by how we track
- * in-flight tags. See in_flight_bitmask and rte_distributor_process
- */
-#define RTE_DISTRIB_MAX_WORKERS 64
-
-#define RTE_DISTRIBUTOR_NAMESIZE 32 /**< Length of name for instance */
-
-/**
- * Buffer structure used to pass the pointer data between cores. This is cache
- * line aligned, but to improve performance and prevent adjacent cache-line
- * prefetches of buffers for other workers, e.g. when worker 1's buffer is on
- * the next cache line to worker 0, we pad this out to three cache lines.
- * Only 64-bits of the memory is actually used though.
- */
-union rte_distributor_buffer_v20 {
- volatile int64_t bufptr64;
- char pad[RTE_CACHE_LINE_SIZE*3];
-} __rte_cache_aligned;
-
-/*
- * Transfer up to 8 mbufs at a time to/from workers, and
- * flow matching algorithm optimized for 8 flow IDs at a time
- */
-#define RTE_DIST_BURST_SIZE 8
-
-struct rte_distributor_backlog {
- unsigned int start;
- unsigned int count;
- int64_t pkts[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
- uint16_t *tags; /* will point to second cacheline of inflights */
-} __rte_cache_aligned;
-
-
-struct rte_distributor_returned_pkts {
- unsigned int start;
- unsigned int count;
- struct rte_mbuf *mbufs[RTE_DISTRIB_MAX_RETURNS];
-};
-
-struct rte_distributor_v20 {
- TAILQ_ENTRY(rte_distributor_v20) next; /**< Next in list. */
-
- char name[RTE_DISTRIBUTOR_NAMESIZE]; /**< Name of the ring. */
- unsigned int num_workers; /**< Number of workers polling */
-
- uint32_t in_flight_tags[RTE_DISTRIB_MAX_WORKERS];
- /**< Tracks the tag being processed per core */
- uint64_t in_flight_bitmask;
- /**< on/off bits for in-flight tags.
- * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64 then
- * the bitmask has to expand.
- */
-
- struct rte_distributor_backlog backlog[RTE_DISTRIB_MAX_WORKERS];
-
- union rte_distributor_buffer_v20 bufs[RTE_DISTRIB_MAX_WORKERS];
-
- struct rte_distributor_returned_pkts returns;
-};
-
-/* All different signature compare functions */
-enum rte_distributor_match_function {
- RTE_DIST_MATCH_SCALAR = 0,
- RTE_DIST_MATCH_VECTOR,
- RTE_DIST_NUM_MATCH_FNS
-};
-
-/**
- * Buffer structure used to pass the pointer data between cores. This is cache
- * line aligned, but to improve performance and prevent adjacent cache-line
- * prefetches of buffers for other workers, e.g. when worker 1's buffer is on
- * the next cache line to worker 0, we pad this out to two cache lines.
- * We can pass up to 8 mbufs at a time in one cacheline.
- * There is a separate cacheline for returns in the burst API.
- */
-struct rte_distributor_buffer {
- volatile int64_t bufptr64[RTE_DIST_BURST_SIZE]
- __rte_cache_aligned; /* <= outgoing to worker */
-
- int64_t pad1 __rte_cache_aligned; /* <= one cache line */
-
- volatile int64_t retptr64[RTE_DIST_BURST_SIZE]
- __rte_cache_aligned; /* <= incoming from worker */
-
- int64_t pad2 __rte_cache_aligned; /* <= one cache line */
-
- int count __rte_cache_aligned; /* <= number of current mbufs */
-};
-
-struct rte_distributor {
- TAILQ_ENTRY(rte_distributor) next; /**< Next in list. */
-
- char name[RTE_DISTRIBUTOR_NAMESIZE]; /**< Name of the ring. */
- unsigned int num_workers; /**< Number of workers polling */
- unsigned int alg_type; /**< Number of alg types */
-
- /**>
- * First cache line in the this array are the tags inflight
- * on the worker core. Second cache line are the backlog
- * that are going to go to the worker core.
- */
- uint16_t in_flight_tags[RTE_DISTRIB_MAX_WORKERS][RTE_DIST_BURST_SIZE*2]
- __rte_cache_aligned;
-
- struct rte_distributor_backlog backlog[RTE_DISTRIB_MAX_WORKERS]
- __rte_cache_aligned;
-
- struct rte_distributor_buffer bufs[RTE_DISTRIB_MAX_WORKERS];
-
- struct rte_distributor_returned_pkts returns;
-
- enum rte_distributor_match_function dist_match_fn;
-
- struct rte_distributor_v20 *d_v20;
-};
-
-void
-find_match_scalar(struct rte_distributor *d,
- uint16_t *data_ptr,
- uint16_t *output_ptr);
-
-void
-find_match_vec(struct rte_distributor *d,
- uint16_t *data_ptr,
- uint16_t *output_ptr);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
#include <rte_tailq.h>
#include "rte_distributor_v20.h"
-#include "rte_distributor_private.h"
+#include "distributor_private.h"
TAILQ_HEAD(rte_distributor_list, rte_distributor_v20);
* Copyright(c) 2017 Intel Corporation
*/
-#ifndef _RTE_SERVICE_PRIVATE_H_
-#define _RTE_SERVICE_PRIVATE_H_
+#ifndef _SERVICE_PRIVATE_H_
+#define _SERVICE_PRIVATE_H_
/* This file specifies the internal service specification.
* Include this file if you are writing a component that requires CPU cycles to
*/
void rte_service_finalize(void);
-#endif /* _RTE_SERVICE_PRIVATE_H_ */
+#endif /* _SERVICE_PRIVATE_H_ */
* Copyright(c) 2018 Gaƫtan Rivet
*/
-#ifndef _RTE_ETH_PRIVATE_H_
-#define _RTE_ETH_PRIVATE_H_
+#ifndef _ETH_PRIVATE_H_
+#define _ETH_PRIVATE_H_
#include "rte_ethdev.h"
}
#endif
-#endif /* _RTE_ETH_PRIVATE_H_ */
+#endif /* _ETH_PRIVATE_H_ */
#include <rte_tailq.h>
#include "rte_stack.h"
-#include "rte_stack_pvt.h"
+#include "stack_pvt.h"
int stack_logtype;
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
- */
-
-#ifndef _RTE_STACK_PVT_H_
-#define _RTE_STACK_PVT_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <rte_log.h>
-
-extern int stack_logtype;
-
-#define STACK_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ##level, stack_logtype, "%s(): "fmt "\n", \
- __func__, ##args)
-
-#define STACK_LOG_ERR(fmt, args...) \
- STACK_LOG(ERR, fmt, ## args)
-
-#define STACK_LOG_WARN(fmt, args...) \
- STACK_LOG(WARNING, fmt, ## args)
-
-#define STACK_LOG_INFO(fmt, args...) \
- STACK_LOG(INFO, fmt, ## args)
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_STACK_PVT_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _STACK_PVT_H_
+#define _STACK_PVT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_log.h>
+
+extern int stack_logtype;
+
+#define STACK_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ##level, stack_logtype, "%s(): "fmt "\n", \
+ __func__, ##args)
+
+#define STACK_LOG_ERR(fmt, args...) \
+ STACK_LOG(ERR, fmt, ## args)
+
+#define STACK_LOG_WARN(fmt, args...) \
+ STACK_LOG(WARNING, fmt, ## args)
+
+#define STACK_LOG_INFO(fmt, args...) \
+ STACK_LOG(INFO, fmt, ## args)
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _STACK_PVT_H_ */