X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fscheduler%2Fscheduler_pmd_private.h;h=e606716ac72e0985ca2350b891632d4b86231205;hb=6f22f2f67268254305f24d9bdc8629793b112eb7;hp=faa9c99f8dafe74f96e124e3964043c361682ad6;hpb=8a48e039432b1d7cd95bad26d7bc2e18ce9bfc40;p=dpdk.git diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h index faa9c99f8d..e606716ac7 100644 --- a/drivers/crypto/scheduler/scheduler_pmd_private.h +++ b/drivers/crypto/scheduler/scheduler_pmd_private.h @@ -36,10 +36,8 @@ #include "rte_cryptodev_scheduler.h" -/**< Maximum number of bonded devices per devices */ -#ifndef MAX_SLAVES_NUM -#define MAX_SLAVES_NUM (8) -#endif +#define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler +/**< Scheduler Crypto PMD device name */ #define PER_SLAVE_BUFF_SIZE (256) @@ -68,7 +66,7 @@ struct scheduler_slave { uint16_t qp_id; uint32_t nb_inflight_cops; - enum rte_cryptodev_type dev_type; + uint8_t driver_id; }; struct scheduler_ctx { @@ -80,7 +78,7 @@ struct scheduler_ctx { uint32_t max_nb_queue_pairs; - struct scheduler_slave slaves[MAX_SLAVES_NUM]; + struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]; uint32_t nb_slaves; enum rte_cryptodev_scheduler_mode mode; @@ -91,20 +89,26 @@ struct scheduler_ctx { char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN]; char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN]; + uint16_t wc_pool[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES]; + uint16_t nb_wc; + + char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]; + int nb_init_slaves; } __rte_cache_aligned; struct scheduler_qp_ctx { void *private_qp_ctx; + uint32_t max_nb_objs; + struct rte_ring *order_ring; uint32_t seqn; } __rte_cache_aligned; -struct scheduler_session { - struct rte_cryptodev_sym_session *sessions[MAX_SLAVES_NUM]; -}; -static inline uint16_t __attribute__((always_inline)) +extern uint8_t cryptodev_driver_id; + +static __rte_always_inline uint16_t get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops) { uint32_t count = rte_ring_free_count(order_ring); @@ -112,7 +116,7 @@ get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops) return count > nb_ops ? nb_ops : count; } -static inline void __attribute__((always_inline)) +static __rte_always_inline void scheduler_order_insert(struct rte_ring *order_ring, struct rte_crypto_op **ops, uint16_t nb_ops) { @@ -124,14 +128,14 @@ scheduler_order_insert(struct rte_ring *order_ring, op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \ } while (0) -static inline uint16_t __attribute__((always_inline)) +static __rte_always_inline uint16_t scheduler_order_drain(struct rte_ring *order_ring, struct rte_crypto_op **ops, uint16_t nb_ops) { struct rte_crypto_op *op; uint32_t nb_objs = rte_ring_count(order_ring); uint32_t nb_ops_to_deq = 0; - int status = -1; + uint32_t nb_ops_deqd = 0; if (nb_objs > nb_ops) nb_objs = nb_ops; @@ -144,10 +148,10 @@ scheduler_order_drain(struct rte_ring *order_ring, } if (nb_ops_to_deq) - status = rte_ring_sc_dequeue_bulk(order_ring, (void **)ops, - nb_ops_to_deq, NULL); + nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring, + (void **)ops, nb_ops_to_deq, NULL); - return (status == 0) ? nb_ops_to_deq : 0; + return nb_ops_deqd; } /** device specific operations function pointer structure */ extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;