X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fscheduler%2Fscheduler_pmd_private.h;h=421dae3717f88a3da566f02f572c700249175f30;hb=131fe443469f1de5fda3614e20077fad93eae907;hp=ac4690e11532dd5209ae996a7193b6a98272f111;hpb=870babeb53cf0a79fc3e4fdc901741947073bc25;p=dpdk.git diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h index ac4690e115..421dae3717 100644 --- a/drivers/crypto/scheduler/scheduler_pmd_private.h +++ b/drivers/crypto/scheduler/scheduler_pmd_private.h @@ -34,14 +34,7 @@ #ifndef _SCHEDULER_PMD_PRIVATE_H #define _SCHEDULER_PMD_PRIVATE_H -#include -#include -#include - -/**< Maximum number of bonded devices per devices */ -#ifndef MAX_SLAVES_NUM -#define MAX_SLAVES_NUM (8) -#endif +#include "rte_cryptodev_scheduler.h" #define PER_SLAVE_BUFF_SIZE (256) @@ -82,7 +75,7 @@ struct scheduler_ctx { uint32_t max_nb_queue_pairs; - struct scheduler_slave slaves[MAX_SLAVES_NUM]; + struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]; uint32_t nb_slaves; enum rte_cryptodev_scheduler_mode mode; @@ -93,22 +86,70 @@ struct scheduler_ctx { char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN]; char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN]; + + char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]; + int nb_init_slaves; } __rte_cache_aligned; struct scheduler_qp_ctx { void *private_qp_ctx; - rte_cryptodev_scheduler_burst_enqueue_t schedule_enqueue; - rte_cryptodev_scheduler_burst_dequeue_t schedule_dequeue; + uint32_t max_nb_objs; - struct rte_reorder_buffer *reorder_buf; + struct rte_ring *order_ring; uint32_t seqn; } __rte_cache_aligned; struct scheduler_session { - struct rte_cryptodev_sym_session *sessions[MAX_SLAVES_NUM]; + struct rte_cryptodev_sym_session *sessions[ + RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]; }; +static inline uint16_t __attribute__((always_inline)) +get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops) +{ + uint32_t count = rte_ring_free_count(order_ring); + + return count > nb_ops ? nb_ops : count; +} + +static inline void __attribute__((always_inline)) +scheduler_order_insert(struct rte_ring *order_ring, + struct rte_crypto_op **ops, uint16_t nb_ops) +{ + rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL); +} + +#define SCHEDULER_GET_RING_OBJ(order_ring, pos, op) do { \ + struct rte_crypto_op **ring = (void *)&order_ring[1]; \ + op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \ +} while (0) + +static inline uint16_t __attribute__((always_inline)) +scheduler_order_drain(struct rte_ring *order_ring, + struct rte_crypto_op **ops, uint16_t nb_ops) +{ + struct rte_crypto_op *op; + uint32_t nb_objs = rte_ring_count(order_ring); + uint32_t nb_ops_to_deq = 0; + uint32_t nb_ops_deqd = 0; + + if (nb_objs > nb_ops) + nb_objs = nb_ops; + + while (nb_ops_to_deq < nb_objs) { + SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op); + if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + break; + nb_ops_to_deq++; + } + + if (nb_ops_to_deq) + nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring, + (void **)ops, nb_ops_to_deq, NULL); + + return nb_ops_deqd; +} /** device specific operations function pointer structure */ extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;