X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fscheduler%2Fscheduler_pmd_private.h;h=adb4eb06321a1c5fe94411a659e0a3c186438baa;hb=880bc71de1c02fb2248adeb5d94a31819c280b44;hp=f0861cd7991e58fc660438ff6ee8279bc8be4ff7;hpb=a783aa634410c638691077c174c604dc0fa54bae;p=dpdk.git diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h index f0861cd799..adb4eb0632 100644 --- a/drivers/crypto/scheduler/scheduler_pmd_private.h +++ b/drivers/crypto/scheduler/scheduler_pmd_private.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation */ #ifndef _SCHEDULER_PMD_PRIVATE_H @@ -36,39 +7,23 @@ #include "rte_cryptodev_scheduler.h" -/**< Maximum number of bonded devices per devices */ -#ifndef MAX_SLAVES_NUM -#define MAX_SLAVES_NUM (8) -#endif - -#define PER_SLAVE_BUFF_SIZE (256) - -#define CS_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \ - __func__, __LINE__, ## args) - -#ifdef RTE_LIBRTE_CRYPTO_SCHEDULER_DEBUG -#define CS_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \ - __func__, __LINE__, ## args) - -#define CS_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \ - __func__, __LINE__, ## args) -#else -#define CS_LOG_INFO(fmt, args...) -#define CS_LOG_DBG(fmt, args...) -#endif - -struct scheduler_slave { +#define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler +/**< Scheduler Crypto PMD device name */ + +#define PER_WORKER_BUFF_SIZE (256) + +extern int scheduler_logtype_driver; + +#define CR_SCHED_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, scheduler_logtype_driver, \ + "%s() line %u: "fmt "\n", __func__, __LINE__, ##args) + +struct scheduler_worker { uint8_t dev_id; uint16_t qp_id; uint32_t nb_inflight_cops; - enum rte_cryptodev_type dev_type; + uint8_t driver_id; }; struct scheduler_ctx { @@ -80,8 +35,8 @@ struct scheduler_ctx { uint32_t max_nb_queue_pairs; - struct scheduler_slave slaves[MAX_SLAVES_NUM]; - uint32_t nb_slaves; + struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]; + uint32_t nb_workers; enum rte_cryptodev_scheduler_mode mode; @@ -89,24 +44,28 @@ struct scheduler_ctx { uint8_t reordering_enabled; - struct rte_cryptodev_qp_conf qp_conf; - char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN]; char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN]; + uint16_t wc_pool[RTE_MAX_LCORE]; + uint16_t nb_wc; + + char *init_worker_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]; + int nb_init_workers; } __rte_cache_aligned; struct scheduler_qp_ctx { void *private_qp_ctx; + uint32_t max_nb_objs; + struct rte_ring *order_ring; uint32_t seqn; } __rte_cache_aligned; -struct scheduler_session { - struct rte_cryptodev_sym_session *sessions[MAX_SLAVES_NUM]; -}; -static inline uint16_t __attribute__((always_inline)) +extern uint8_t cryptodev_scheduler_driver_id; + +static __rte_always_inline uint16_t get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops) { uint32_t count = rte_ring_free_count(order_ring); @@ -114,43 +73,35 @@ get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops) return count > nb_ops ? nb_ops : count; } -static inline void __attribute__((always_inline)) +static __rte_always_inline void scheduler_order_insert(struct rte_ring *order_ring, struct rte_crypto_op **ops, uint16_t nb_ops) { rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL); } -#define SCHEDULER_GET_RING_OBJ(order_ring, pos, op) do { \ - struct rte_crypto_op **ring = (void *)&order_ring[1]; \ - op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \ -} while (0) - -static inline uint16_t __attribute__((always_inline)) +static __rte_always_inline uint16_t scheduler_order_drain(struct rte_ring *order_ring, struct rte_crypto_op **ops, uint16_t nb_ops) { struct rte_crypto_op *op; - uint32_t nb_objs = rte_ring_count(order_ring); - uint32_t nb_ops_to_deq = 0; - int status = -1; + uint32_t nb_objs, nb_ops_to_deq; - if (nb_objs > nb_ops) - nb_objs = nb_ops; + nb_objs = rte_ring_dequeue_burst_start(order_ring, (void **)ops, + nb_ops, NULL); + if (nb_objs == 0) + return 0; - while (nb_ops_to_deq < nb_objs) { - SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op); + for (nb_ops_to_deq = 0; nb_ops_to_deq != nb_objs; nb_ops_to_deq++) { + op = ops[nb_ops_to_deq]; if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) break; - nb_ops_to_deq++; } - if (nb_ops_to_deq) - status = rte_ring_sc_dequeue_bulk(order_ring, (void **)ops, - nb_ops_to_deq, NULL); - - return (status == 0) ? nb_ops_to_deq : 0; + rte_ring_dequeue_finish(order_ring, nb_ops_to_deq); + return nb_ops_to_deq; } + /** device specific operations function pointer structure */ extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;