}
sched_ctx = dev->data->dev_private;
- if (sched_ctx->nb_slaves >= MAX_SLAVES_NUM) {
+ if (sched_ctx->nb_slaves >=
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
CS_LOG_ERR("Too many slaves attached");
return -ENOMEM;
}
return 0;
}
+
+int
+rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+ uint32_t nb_slaves = 0;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ nb_slaves = sched_ctx->nb_slaves;
+
+ if (slaves && nb_slaves) {
+ uint32_t i;
+
+ for (i = 0; i < nb_slaves; i++)
+ slaves[i] = sched_ctx->slaves[i].dev_id;
+ }
+
+ return (int)nb_slaves;
+}
extern "C" {
#endif
+/**< Maximum number of bonded devices per device */
+#ifndef RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES (8)
+#endif
+
/**
* Crypto scheduler PMD operation modes
*/
int
rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id);
+/**
+ * Get the the attached slaves' count and/or ID
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param slaves
+ * If successful, the function will write back
+ * all slaves' device IDs to it. This
+ * parameter SHALL either be an uint8_t array
+ * of RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES
+ * elements or NULL.
+ *
+ * @return
+ * - non-negative number: the number of slaves attached
+ * - negative integer if error occurs.
+ */
+int
+rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves);
+
typedef uint16_t (*rte_cryptodev_scheduler_burst_enqueue_t)(void *qp_ctx,
struct rte_crypto_op **ops, uint16_t nb_ops);
rte_cryptodev_scheduler_ordering_get;
};
+
+DPDK_17.05 {
+ global:
+
+ rte_cryptodev_scheduler_slaves_get;
+
+} DPDK_17.02;
struct scheduler_init_params {
struct rte_crypto_vdev_init_params def_p;
uint32_t nb_slaves;
- uint8_t slaves[MAX_SLAVES_NUM];
+ uint8_t slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
};
#define RTE_CRYPTODEV_VDEV_NAME ("name")
return -1;
}
- if (param->nb_slaves >= MAX_SLAVES_NUM - 1) {
+ if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES - 1) {
CS_LOG_ERR("Too many slaves.\n");
return -1;
}
#include "rte_cryptodev_scheduler.h"
-/**< Maximum number of bonded devices per devices */
-#ifndef MAX_SLAVES_NUM
-#define MAX_SLAVES_NUM (8)
-#endif
-
#define PER_SLAVE_BUFF_SIZE (256)
#define CS_LOG_ERR(fmt, args...) \
uint32_t max_nb_queue_pairs;
- struct scheduler_slave slaves[MAX_SLAVES_NUM];
+ struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
uint32_t nb_slaves;
enum rte_cryptodev_scheduler_mode mode;
} __rte_cache_aligned;
struct scheduler_session {
- struct rte_cryptodev_sym_session *sessions[MAX_SLAVES_NUM];
+ struct rte_cryptodev_sym_session *sessions[
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
};
static inline uint16_t __attribute__((always_inline))
#include "scheduler_pmd_private.h"
struct rr_scheduler_qp_ctx {
- struct scheduler_slave slaves[MAX_SLAVES_NUM];
+ struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
uint32_t nb_slaves;
uint32_t last_enq_slave_idx;
qp_ctx->private_qp_ctx;
uint32_t j;
- memset(rr_qp_ctx->slaves, 0, MAX_SLAVES_NUM *
+ memset(rr_qp_ctx->slaves, 0,
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
sizeof(struct scheduler_slave));
for (j = 0; j < sched_ctx->nb_slaves; j++) {
rr_qp_ctx->slaves[j].dev_id =