"crypto_scheduler")) {
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
uint32_t nb_slaves =
- rte_cryptodev_scheduler_slaves_get(cdev_id,
+ rte_cryptodev_scheduler_workers_get(cdev_id,
NULL);
sessions_needed = enabled_cdev_count *
char vdev_args[VDEV_ARGS_SIZE] = {""};
char temp_str[VDEV_ARGS_SIZE] = {"mode=multi-core,"
"ordering=enable,name=cryptodev_test_scheduler,corelist="};
- uint16_t slave_core_count = 0;
+ uint16_t worker_core_count = 0;
uint16_t socket_id = 0;
if (gbl_driver_id == rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD))) {
- /* Identify the Slave Cores
- * Use 2 slave cores for the device args
+ /* Identify the Worker Cores
+ * Use 2 worker cores for the device args
*/
RTE_LCORE_FOREACH_SLAVE(i) {
- if (slave_core_count > 1)
+ if (worker_core_count > 1)
break;
snprintf(vdev_args, sizeof(vdev_args),
"%s%d", temp_str, i);
strcpy(temp_str, vdev_args);
strlcat(temp_str, ";", sizeof(temp_str));
- slave_core_count++;
+ worker_core_count++;
socket_id = rte_lcore_to_socket_id(i);
}
- if (slave_core_count != 2) {
+ if (worker_core_count != 2) {
RTE_LOG(ERR, USER1,
"Cryptodev scheduler test require at least "
- "two slave cores to run. "
+ "two worker cores to run. "
"Please use the correct coremask.\n");
return TEST_FAILED;
}
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
-/* global AESNI slave IDs for the scheduler test */
+/* global AESNI worker IDs for the scheduler test */
uint8_t aesni_ids[2];
static int
ts_params->qp_conf.mp_session_private =
ts_params->session_priv_mpool;
- ret = rte_cryptodev_scheduler_slave_attach(sched_id,
+ ret = rte_cryptodev_scheduler_worker_attach(sched_id,
(uint8_t)i);
TEST_ASSERT(ret == 0,
int ret;
for (i = 0; i < 2; i++) {
- ret = rte_cryptodev_scheduler_slave_detach(sched_id,
+ ret = rte_cryptodev_scheduler_worker_detach(sched_id,
aesni_ids[i]);
TEST_ASSERT(ret == 0,
"Failed to detach device %u", aesni_ids[i]);
The cryptodev driver name is passed to the dpdk-test-crypto-perf tool in the "-devtype" parameter.
- The qat crypto device name is in the format of the slave parameter passed to the crypto scheduler.
+ The qat crypto device name is in the format of the worker parameter passed to the crypto scheduler.
* The qat compressdev driver name is "compress_qat".
The rte_compressdev_devices_get() returns the devices exposed by this driver.
The Cryptodev Scheduler PMD library (**librte_pmd_crypto_scheduler**) acts as
a software crypto PMD and shares the same API provided by librte_cryptodev.
The PMD supports attaching multiple crypto PMDs, software or hardware, as
-slaves, and distributes the crypto workload to them with certain behavior.
+workers, and distributes the crypto workload to them with certain behavior.
The behaviors are categorizes as different "modes". Basically, a scheduling
-mode defines certain actions for scheduling crypto ops to its slaves.
+mode defines certain actions for scheduling crypto ops to its workers.
The librte_pmd_crypto_scheduler library exports a C API which provides an API
-for attaching/detaching slaves, set/get scheduling modes, and enable/disable
+for attaching/detaching workers, set/get scheduling modes, and enable/disable
crypto ops reordering.
Limitations
created. This value may be overwritten internally if there are too
many devices are attached.
-* slave: If a cryptodev has been initialized with specific name, it can be
+* worker: If a cryptodev has been initialized with specific name, it can be
attached to the scheduler using this parameter, simply filling the name
here. Multiple cryptodevs can be attached initially by presenting this
parameter multiple times.
.. code-block:: console
- ... --vdev "crypto_aesni_mb0,name=aesni_mb_1" --vdev "crypto_aesni_mb1,name=aesni_mb_2" --vdev "crypto_scheduler,slave=aesni_mb_1,slave=aesni_mb_2" ...
+ ... --vdev "crypto_aesni_mb0,name=aesni_mb_1" --vdev "crypto_aesni_mb1,name=aesni_mb_2" --vdev "crypto_scheduler,worker=aesni_mb_1,worker=aesni_mb_2" ...
.. note::
* The scheduler cryptodev cannot be started unless the scheduling mode
- is set and at least one slave is attached. Also, to configure the
- scheduler in the run-time, like attach/detach slave(s), change
+ is set and at least one worker is attached. Also, to configure the
+ scheduler in the run-time, like attach/detach worker(s), change
scheduling mode, or enable/disable crypto op ordering, one should stop
the scheduler first, otherwise an error will be returned.
*Initialization mode parameter*: **round-robin**
Round-robin mode, which distributes the enqueued burst of crypto ops
- among its slaves in a round-robin manner. This mode may help to fill
+ among its workers in a round-robin manner. This mode may help to fill
the throughput gap between the physical core and the existing cryptodevs
to increase the overall performance.
*Initialization mode parameter*: **packet-size-distr**
- Packet-size based distribution mode, which works with 2 slaves, the primary
- slave and the secondary slave, and distributes the enqueued crypto
+ Packet-size based distribution mode, which works with 2 workers, the primary
+ worker and the secondary worker, and distributes the enqueued crypto
operations to them based on their data lengths. A crypto operation will be
- distributed to the primary slave if its data length is equal to or bigger
+ distributed to the primary worker if its data length is equal to or bigger
than the designated threshold, otherwise it will be handled by the secondary
- slave.
+ worker.
A typical usecase in this mode is with the QAT cryptodev as the primary and
- a software cryptodev as the secondary slave. This may help applications to
+ a software cryptodev as the secondary worker. This may help applications to
process additional crypto workload than what the QAT cryptodev can handle on
its own, by making use of the available CPU cycles to deal with smaller
crypto workloads.
*Initialization mode parameter*: **fail-over**
- Fail-over mode, which works with 2 slaves, the primary slave and the
- secondary slave. In this mode, the scheduler will enqueue the incoming
- crypto operation burst to the primary slave. When one or more crypto
+ Fail-over mode, which works with 2 workers, the primary worker and the
+ secondary worker. In this mode, the scheduler will enqueue the incoming
+ crypto operation burst to the primary worker. When one or more crypto
operations fail to be enqueued, then they will be enqueued to the secondary
- slave.
+ worker.
* **CDEV_SCHED_MODE_MULTICORE:**
For mixed traffic (IMIX) the optimal number of worker cores is around 2-3.
For large packets (1.5 kbytes) scheduler shows linear scaling in performance
up to eight cores.
- Each worker uses its own slave cryptodev. Only software cryptodevs
+ Each worker uses its own cryptodev. Only software cryptodevs
are supported. Only the same type of cryptodevs should be used concurrently.
The multi-core mode uses one extra parameter:
* corelist: Semicolon-separated list of logical cores to be used as workers.
- The number of worker cores should be equal to the number of slave cryptodevs.
+ The number of worker cores should be equal to the number of worker cryptodevs.
These cores should be present in EAL core list parameter and
should not be used by the application or any other process.
Example:
... --vdev "crypto_aesni_mb1,name=aesni_mb_1" --vdev "crypto_aesni_mb_pmd2,name=aesni_mb_2" \
- --vdev "crypto_scheduler,slave=aesni_mb_1,slave=aesni_mb_2,mode=multi-core,corelist=23;24" ...
+ --vdev "crypto_scheduler,worker=aesni_mb_1,worker=aesni_mb_2,mode=multi-core,corelist=23;24" ...
to one it means it represents IV, when is set to zero it means J0 is used
directly, in this case 16 bytes of J0 need to be passed.
-* scheduler: The functions ``rte_cryptodev_scheduler_slave_attach``,
- ``rte_cryptodev_scheduler_slave_detach`` and
- ``rte_cryptodev_scheduler_slaves_get`` will be replaced in 20.11 by
- ``rte_cryptodev_scheduler_worker_attach``,
- ``rte_cryptodev_scheduler_worker_detach`` and
- ``rte_cryptodev_scheduler_workers_get`` accordingly.
-
* eventdev: Following structures will be modified to support DLB PMD
and future extensions:
* vhost: Moved vDPA APIs from experimental to stable.
+* scheduler: Renamed functions ``rte_cryptodev_scheduler_slave_attach``,
+ ``rte_cryptodev_scheduler_slave_detach`` and
+ ``rte_cryptodev_scheduler_slaves_get`` to
+ ``rte_cryptodev_scheduler_worker_attach``,
+ ``rte_cryptodev_scheduler_worker_detach`` and
+ ``rte_cryptodev_scheduler_workers_get`` accordingly.
+
+* scheduler: Renamed the configuration value
+ ``RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES`` to
+ ``RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS``.
+
* ipsec: ``RTE_SATP_LOG2_NUM`` has been dropped from ``enum`` and
subsequently moved ``rte_ipsec`` lib from experimental to stable.
/** update the scheduler pmd's capability with attaching device's
* capability.
* For each device to be attached, the scheduler's capability should be
- * the common capability set of all slaves
+ * the common capability set of all workers
**/
static uint32_t
sync_caps(struct rte_cryptodev_capabilities *caps,
uint32_t nb_caps,
- const struct rte_cryptodev_capabilities *slave_caps)
+ const struct rte_cryptodev_capabilities *worker_caps)
{
- uint32_t sync_nb_caps = nb_caps, nb_slave_caps = 0;
+ uint32_t sync_nb_caps = nb_caps, nb_worker_caps = 0;
uint32_t i;
- while (slave_caps[nb_slave_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
- nb_slave_caps++;
+ while (worker_caps[nb_worker_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
+ nb_worker_caps++;
if (nb_caps == 0) {
- rte_memcpy(caps, slave_caps, sizeof(*caps) * nb_slave_caps);
- return nb_slave_caps;
+ rte_memcpy(caps, worker_caps, sizeof(*caps) * nb_worker_caps);
+ return nb_worker_caps;
}
for (i = 0; i < sync_nb_caps; i++) {
struct rte_cryptodev_capabilities *cap = &caps[i];
uint32_t j;
- for (j = 0; j < nb_slave_caps; j++) {
+ for (j = 0; j < nb_worker_caps; j++) {
const struct rte_cryptodev_capabilities *s_cap =
- &slave_caps[j];
+ &worker_caps[j];
if (s_cap->op != cap->op || s_cap->sym.xform_type !=
cap->sym.xform_type)
break;
}
- if (j < nb_slave_caps)
+ if (j < nb_worker_caps)
continue;
/* remove a uncommon cap from the array */
sched_ctx->capabilities = NULL;
}
- for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ for (i = 0; i < sched_ctx->nb_workers; i++) {
struct rte_cryptodev_info dev_info;
- rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+ rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);
nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
if (nb_caps == 0)
dev->feature_flags = 0;
- for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ for (i = 0; i < sched_ctx->nb_workers; i++) {
struct rte_cryptodev_info dev_info;
- rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+ rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);
dev->feature_flags |= dev_info.feature_flags;
}
uint32_t i;
uint32_t max_nb_qp;
- if (!sched_ctx->nb_slaves)
+ if (!sched_ctx->nb_workers)
return;
- max_nb_qp = sched_ctx->nb_slaves ? UINT32_MAX : 0;
+ max_nb_qp = sched_ctx->nb_workers ? UINT32_MAX : 0;
- for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ for (i = 0; i < sched_ctx->nb_workers; i++) {
struct rte_cryptodev_info dev_info;
- rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+ rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);
max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ?
dev_info.max_nb_queue_pairs : max_nb_qp;
}
/** Attach a device to the scheduler. */
int
-rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
+rte_cryptodev_scheduler_worker_attach(uint8_t scheduler_id, uint8_t worker_id)
{
struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
struct scheduler_ctx *sched_ctx;
- struct scheduler_slave *slave;
+ struct scheduler_worker *worker;
struct rte_cryptodev_info dev_info;
uint32_t i;
}
sched_ctx = dev->data->dev_private;
- if (sched_ctx->nb_slaves >=
- RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
- CR_SCHED_LOG(ERR, "Too many slaves attached");
+ if (sched_ctx->nb_workers >=
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS) {
+ CR_SCHED_LOG(ERR, "Too many workers attached");
return -ENOMEM;
}
- for (i = 0; i < sched_ctx->nb_slaves; i++)
- if (sched_ctx->slaves[i].dev_id == slave_id) {
- CR_SCHED_LOG(ERR, "Slave already added");
+ for (i = 0; i < sched_ctx->nb_workers; i++)
+ if (sched_ctx->workers[i].dev_id == worker_id) {
+ CR_SCHED_LOG(ERR, "Worker already added");
return -ENOTSUP;
}
- slave = &sched_ctx->slaves[sched_ctx->nb_slaves];
+ worker = &sched_ctx->workers[sched_ctx->nb_workers];
- rte_cryptodev_info_get(slave_id, &dev_info);
+ rte_cryptodev_info_get(worker_id, &dev_info);
- slave->dev_id = slave_id;
- slave->driver_id = dev_info.driver_id;
- sched_ctx->nb_slaves++;
+ worker->dev_id = worker_id;
+ worker->driver_id = dev_info.driver_id;
+ sched_ctx->nb_workers++;
if (update_scheduler_capability(sched_ctx) < 0) {
- slave->dev_id = 0;
- slave->driver_id = 0;
- sched_ctx->nb_slaves--;
+ worker->dev_id = 0;
+ worker->driver_id = 0;
+ sched_ctx->nb_workers--;
CR_SCHED_LOG(ERR, "capabilities update failed");
return -ENOTSUP;
}
int
-rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
+rte_cryptodev_scheduler_worker_detach(uint8_t scheduler_id, uint8_t worker_id)
{
struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
struct scheduler_ctx *sched_ctx;
- uint32_t i, slave_pos;
+ uint32_t i, worker_pos;
if (!dev) {
CR_SCHED_LOG(ERR, "Operation not supported");
sched_ctx = dev->data->dev_private;
- for (slave_pos = 0; slave_pos < sched_ctx->nb_slaves; slave_pos++)
- if (sched_ctx->slaves[slave_pos].dev_id == slave_id)
+ for (worker_pos = 0; worker_pos < sched_ctx->nb_workers; worker_pos++)
+ if (sched_ctx->workers[worker_pos].dev_id == worker_id)
break;
- if (slave_pos == sched_ctx->nb_slaves) {
- CR_SCHED_LOG(ERR, "Cannot find slave");
+ if (worker_pos == sched_ctx->nb_workers) {
+ CR_SCHED_LOG(ERR, "Cannot find worker");
return -ENOTSUP;
}
- if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) {
- CR_SCHED_LOG(ERR, "Failed to detach slave");
+ if (sched_ctx->ops.worker_detach(dev, worker_id) < 0) {
+ CR_SCHED_LOG(ERR, "Failed to detach worker");
return -ENOTSUP;
}
- for (i = slave_pos; i < sched_ctx->nb_slaves - 1; i++) {
- memcpy(&sched_ctx->slaves[i], &sched_ctx->slaves[i+1],
- sizeof(struct scheduler_slave));
+ for (i = worker_pos; i < sched_ctx->nb_workers - 1; i++) {
+ memcpy(&sched_ctx->workers[i], &sched_ctx->workers[i+1],
+ sizeof(struct scheduler_worker));
}
- memset(&sched_ctx->slaves[sched_ctx->nb_slaves - 1], 0,
- sizeof(struct scheduler_slave));
- sched_ctx->nb_slaves--;
+ memset(&sched_ctx->workers[sched_ctx->nb_workers - 1], 0,
+ sizeof(struct scheduler_worker));
+ sched_ctx->nb_workers--;
if (update_scheduler_capability(sched_ctx) < 0) {
CR_SCHED_LOG(ERR, "capabilities update failed");
sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx;
sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start;
sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop;
- sched_ctx->ops.slave_attach = scheduler->ops->slave_attach;
- sched_ctx->ops.slave_detach = scheduler->ops->slave_detach;
+ sched_ctx->ops.worker_attach = scheduler->ops->worker_attach;
+ sched_ctx->ops.worker_detach = scheduler->ops->worker_detach;
sched_ctx->ops.option_set = scheduler->ops->option_set;
sched_ctx->ops.option_get = scheduler->ops->option_get;
}
int
-rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
+rte_cryptodev_scheduler_workers_get(uint8_t scheduler_id, uint8_t *workers)
{
struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
struct scheduler_ctx *sched_ctx;
- uint32_t nb_slaves = 0;
+ uint32_t nb_workers = 0;
if (!dev) {
CR_SCHED_LOG(ERR, "Operation not supported");
sched_ctx = dev->data->dev_private;
- nb_slaves = sched_ctx->nb_slaves;
+ nb_workers = sched_ctx->nb_workers;
- if (slaves && nb_slaves) {
+ if (workers && nb_workers) {
uint32_t i;
- for (i = 0; i < nb_slaves; i++)
- slaves[i] = sched_ctx->slaves[i].dev_id;
+ for (i = 0; i < nb_workers; i++)
+ workers[i] = sched_ctx->workers[i].dev_id;
}
- return (int)nb_slaves;
+ return (int)nb_workers;
}
int
*
* RTE Cryptodev Scheduler Device
*
- * The RTE Cryptodev Scheduler Device allows the aggregation of multiple (slave)
+ * The RTE Cryptodev Scheduler Device allows the aggregation of multiple worker
* Cryptodevs into a single logical crypto device, and the scheduling the
- * crypto operations to the slaves based on the mode of the specified mode of
+ * crypto operations to the workers based on the mode of the specified mode of
* operation specified and supported. This implementation supports 3 modes of
* operation: round robin, packet-size based, and fail-over.
*/
#endif
/** Maximum number of bonded devices per device */
-#ifndef RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES
-#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES (8)
+#ifndef RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS (8)
#endif
/** Maximum number of multi-core worker cores */
*
* @param scheduler_id
* The target scheduler device ID
- * @param slave_id
+ * @param worker_id
* Crypto device ID to be attached
*
* @return
- * - 0 if the slave is attached.
+ * - 0 if the worker is attached.
* - -ENOTSUP if the operation is not supported.
* - -EBUSY if device is started.
- * - -ENOMEM if the scheduler's slave list is full.
+ * - -ENOMEM if the scheduler's worker list is full.
*/
int
-rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id);
+rte_cryptodev_scheduler_worker_attach(uint8_t scheduler_id, uint8_t worker_id);
/**
* Detach a crypto device from the scheduler
*
* @param scheduler_id
* The target scheduler device ID
- * @param slave_id
+ * @param worker_id
* Crypto device ID to be detached
*
* @return
- * - 0 if the slave is detached.
+ * - 0 if the worker is detached.
* - -ENOTSUP if the operation is not supported.
* - -EBUSY if device is started.
*/
int
-rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id);
-
+rte_cryptodev_scheduler_worker_detach(uint8_t scheduler_id, uint8_t worker_id);
/**
* Set the scheduling mode
rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id);
/**
- * Get the attached slaves' count and/or ID
+ * Get the attached workers' count and/or ID
*
* @param scheduler_id
* The target scheduler device ID
- * @param slaves
- * If successful, the function will write back all slaves' device IDs to it.
+ * @param workers
+ * If successful, the function will write back all workers' device IDs to it.
* This parameter will either be an uint8_t array of
- * RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES elements or NULL.
+ * RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS elements or NULL.
*
* @return
- * - non-negative number: the number of slaves attached
+ * - non-negative number: the number of workers attached
* - -ENOTSUP if the operation is not supported.
*/
int
-rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves);
+rte_cryptodev_scheduler_workers_get(uint8_t scheduler_id, uint8_t *workers);
/**
* Set the mode specific option
extern "C" {
#endif
-typedef int (*rte_cryptodev_scheduler_slave_attach_t)(
- struct rte_cryptodev *dev, uint8_t slave_id);
-typedef int (*rte_cryptodev_scheduler_slave_detach_t)(
- struct rte_cryptodev *dev, uint8_t slave_id);
+typedef int (*rte_cryptodev_scheduler_worker_attach_t)(
+ struct rte_cryptodev *dev, uint8_t worker_id);
+typedef int (*rte_cryptodev_scheduler_worker_detach_t)(
+ struct rte_cryptodev *dev, uint8_t worker_id);
typedef int (*rte_cryptodev_scheduler_start_t)(struct rte_cryptodev *dev);
typedef int (*rte_cryptodev_scheduler_stop_t)(struct rte_cryptodev *dev);
void *option);
struct rte_cryptodev_scheduler_ops {
- rte_cryptodev_scheduler_slave_attach_t slave_attach;
- rte_cryptodev_scheduler_slave_attach_t slave_detach;
+ rte_cryptodev_scheduler_worker_attach_t worker_attach;
+ rte_cryptodev_scheduler_worker_attach_t worker_detach;
rte_cryptodev_scheduler_start_t scheduler_start;
rte_cryptodev_scheduler_stop_t scheduler_stop;
rte_cryptodev_scheduler_option_set;
rte_cryptodev_scheduler_ordering_get;
rte_cryptodev_scheduler_ordering_set;
- rte_cryptodev_scheduler_slave_attach;
- rte_cryptodev_scheduler_slave_detach;
- rte_cryptodev_scheduler_slaves_get;
+ rte_cryptodev_scheduler_worker_attach;
+ rte_cryptodev_scheduler_worker_detach;
+ rte_cryptodev_scheduler_workers_get;
local: *;
};
#include "rte_cryptodev_scheduler_operations.h"
#include "scheduler_pmd_private.h"
-#define PRIMARY_SLAVE_IDX 0
-#define SECONDARY_SLAVE_IDX 1
-#define NB_FAILOVER_SLAVES 2
-#define SLAVE_SWITCH_MASK (0x01)
+#define PRIMARY_WORKER_IDX 0
+#define SECONDARY_WORKER_IDX 1
+#define NB_FAILOVER_WORKERS 2
+#define WORKER_SWITCH_MASK (0x01)
struct fo_scheduler_qp_ctx {
- struct scheduler_slave primary_slave;
- struct scheduler_slave secondary_slave;
+ struct scheduler_worker primary_worker;
+ struct scheduler_worker secondary_worker;
uint8_t deq_idx;
};
static __rte_always_inline uint16_t
-failover_slave_enqueue(struct scheduler_slave *slave,
+failover_worker_enqueue(struct scheduler_worker *worker,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
uint16_t i, processed_ops;
for (i = 0; i < nb_ops && i < 4; i++)
rte_prefetch0(ops[i]->sym->session);
- processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
- slave->qp_id, ops, nb_ops);
- slave->nb_inflight_cops += processed_ops;
+ processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id,
+ worker->qp_id, ops, nb_ops);
+ worker->nb_inflight_cops += processed_ops;
return processed_ops;
}
if (unlikely(nb_ops == 0))
return 0;
- enqueued_ops = failover_slave_enqueue(&qp_ctx->primary_slave,
+ enqueued_ops = failover_worker_enqueue(&qp_ctx->primary_worker,
ops, nb_ops);
if (enqueued_ops < nb_ops)
- enqueued_ops += failover_slave_enqueue(&qp_ctx->secondary_slave,
+ enqueued_ops += failover_worker_enqueue(
+ &qp_ctx->secondary_worker,
&ops[enqueued_ops],
nb_ops - enqueued_ops);
{
struct fo_scheduler_qp_ctx *qp_ctx =
((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
- struct scheduler_slave *slaves[NB_FAILOVER_SLAVES] = {
- &qp_ctx->primary_slave, &qp_ctx->secondary_slave};
- struct scheduler_slave *slave = slaves[qp_ctx->deq_idx];
+ struct scheduler_worker *workers[NB_FAILOVER_WORKERS] = {
+ &qp_ctx->primary_worker, &qp_ctx->secondary_worker};
+ struct scheduler_worker *worker = workers[qp_ctx->deq_idx];
uint16_t nb_deq_ops = 0, nb_deq_ops2 = 0;
- if (slave->nb_inflight_cops) {
- nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
- slave->qp_id, ops, nb_ops);
- slave->nb_inflight_cops -= nb_deq_ops;
+ if (worker->nb_inflight_cops) {
+ nb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id,
+ worker->qp_id, ops, nb_ops);
+ worker->nb_inflight_cops -= nb_deq_ops;
}
- qp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_SWITCH_MASK;
+ qp_ctx->deq_idx = (~qp_ctx->deq_idx) & WORKER_SWITCH_MASK;
if (nb_deq_ops == nb_ops)
return nb_deq_ops;
- slave = slaves[qp_ctx->deq_idx];
+ worker = workers[qp_ctx->deq_idx];
- if (slave->nb_inflight_cops) {
- nb_deq_ops2 = rte_cryptodev_dequeue_burst(slave->dev_id,
- slave->qp_id, &ops[nb_deq_ops], nb_ops - nb_deq_ops);
- slave->nb_inflight_cops -= nb_deq_ops2;
+ if (worker->nb_inflight_cops) {
+ nb_deq_ops2 = rte_cryptodev_dequeue_burst(worker->dev_id,
+ worker->qp_id, &ops[nb_deq_ops], nb_ops - nb_deq_ops);
+ worker->nb_inflight_cops -= nb_deq_ops2;
}
return nb_deq_ops + nb_deq_ops2;
}
static int
-slave_attach(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint8_t slave_id)
+worker_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t worker_id)
{
return 0;
}
static int
-slave_detach(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint8_t slave_id)
+worker_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t worker_id)
{
return 0;
}
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint16_t i;
- if (sched_ctx->nb_slaves < 2) {
- CR_SCHED_LOG(ERR, "Number of slaves shall no less than 2");
+ if (sched_ctx->nb_workers < 2) {
+ CR_SCHED_LOG(ERR, "Number of workers shall no less than 2");
return -ENOMEM;
}
((struct scheduler_qp_ctx *)
dev->data->queue_pairs[i])->private_qp_ctx;
- rte_memcpy(&qp_ctx->primary_slave,
- &sched_ctx->slaves[PRIMARY_SLAVE_IDX],
- sizeof(struct scheduler_slave));
- rte_memcpy(&qp_ctx->secondary_slave,
- &sched_ctx->slaves[SECONDARY_SLAVE_IDX],
- sizeof(struct scheduler_slave));
+ rte_memcpy(&qp_ctx->primary_worker,
+ &sched_ctx->workers[PRIMARY_WORKER_IDX],
+ sizeof(struct scheduler_worker));
+ rte_memcpy(&qp_ctx->secondary_worker,
+ &sched_ctx->workers[SECONDARY_WORKER_IDX],
+ sizeof(struct scheduler_worker));
}
return 0;
}
static struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {
- slave_attach,
- slave_detach,
+ worker_attach,
+ worker_detach,
scheduler_start,
scheduler_stop,
scheduler_config_qp,
static struct rte_cryptodev_scheduler fo_scheduler = {
.name = "failover-scheduler",
- .description = "scheduler which enqueues to the primary slave, "
- "and only then enqueues to the secondary slave "
+ .description = "scheduler which enqueues to the primary worker, "
+ "and only then enqueues to the secondary worker "
"upon failing on enqueuing to primary",
.mode = CDEV_SCHED_MODE_FAILOVER,
.ops = &scheduler_fo_ops
};
struct mc_scheduler_qp_ctx {
- struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
- uint32_t nb_slaves;
+ struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
+ uint32_t nb_workers;
uint32_t last_enq_worker_idx;
uint32_t last_deq_worker_idx;
}
static int
-slave_attach(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint8_t slave_id)
+worker_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t worker_id)
{
return 0;
}
static int
-slave_detach(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint8_t slave_id)
+worker_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t worker_id)
{
return 0;
}
struct rte_ring *deq_ring;
uint32_t core_id = rte_lcore_id();
int i, worker_idx = -1;
- struct scheduler_slave *slave;
+ struct scheduler_worker *worker;
struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE];
struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE];
uint16_t processed_ops;
return -1;
}
- slave = &sched_ctx->slaves[worker_idx];
+ worker = &sched_ctx->workers[worker_idx];
enq_ring = mc_ctx->sched_enq_ring[worker_idx];
deq_ring = mc_ctx->sched_deq_ring[worker_idx];
while (!mc_ctx->stop_signal) {
if (pending_enq_ops) {
processed_ops =
- rte_cryptodev_enqueue_burst(slave->dev_id,
- slave->qp_id, &enq_ops[pending_enq_ops_idx],
+ rte_cryptodev_enqueue_burst(worker->dev_id,
+ worker->qp_id,
+ &enq_ops[pending_enq_ops_idx],
pending_enq_ops);
pending_enq_ops -= processed_ops;
pending_enq_ops_idx += processed_ops;
MC_SCHED_BUFFER_SIZE, NULL);
if (processed_ops) {
pending_enq_ops_idx = rte_cryptodev_enqueue_burst(
- slave->dev_id, slave->qp_id,
- enq_ops, processed_ops);
+ worker->dev_id, worker->qp_id,
+ enq_ops, processed_ops);
pending_enq_ops = processed_ops - pending_enq_ops_idx;
inflight_ops += pending_enq_ops_idx;
}
pending_deq_ops -= processed_ops;
pending_deq_ops_idx += processed_ops;
} else if (inflight_ops) {
- processed_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
- slave->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE);
+ processed_ops = rte_cryptodev_dequeue_burst(
+ worker->dev_id, worker->qp_id, deq_ops,
+ MC_SCHED_BUFFER_SIZE);
if (processed_ops) {
inflight_ops -= processed_ops;
if (reordering_enabled) {
qp_ctx->private_qp_ctx;
uint32_t j;
- memset(mc_qp_ctx->slaves, 0,
- RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
- sizeof(struct scheduler_slave));
- for (j = 0; j < sched_ctx->nb_slaves; j++) {
- mc_qp_ctx->slaves[j].dev_id =
- sched_ctx->slaves[j].dev_id;
- mc_qp_ctx->slaves[j].qp_id = i;
+ memset(mc_qp_ctx->workers, 0,
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS *
+ sizeof(struct scheduler_worker));
+ for (j = 0; j < sched_ctx->nb_workers; j++) {
+ mc_qp_ctx->workers[j].dev_id =
+ sched_ctx->workers[j].dev_id;
+ mc_qp_ctx->workers[j].qp_id = i;
}
- mc_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
+ mc_qp_ctx->nb_workers = sched_ctx->nb_workers;
mc_qp_ctx->last_enq_worker_idx = 0;
mc_qp_ctx->last_deq_worker_idx = 0;
mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name);
if (!mc_ctx->sched_enq_ring[i]) {
mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name,
- PER_SLAVE_BUFF_SIZE,
+ PER_WORKER_BUFF_SIZE,
rte_socket_id(),
RING_F_SC_DEQ | RING_F_SP_ENQ);
if (!mc_ctx->sched_enq_ring[i]) {
mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name);
if (!mc_ctx->sched_deq_ring[i]) {
mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name,
- PER_SLAVE_BUFF_SIZE,
+ PER_WORKER_BUFF_SIZE,
rte_socket_id(),
RING_F_SC_DEQ | RING_F_SP_ENQ);
if (!mc_ctx->sched_deq_ring[i]) {
}
static struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
- slave_attach,
- slave_detach,
+ worker_attach,
+ worker_detach,
scheduler_start,
scheduler_stop,
scheduler_config_qp,
#include "scheduler_pmd_private.h"
#define DEF_PKT_SIZE_THRESHOLD (0xffffff80)
-#define SLAVE_IDX_SWITCH_MASK (0x01)
-#define PRIMARY_SLAVE_IDX 0
-#define SECONDARY_SLAVE_IDX 1
-#define NB_PKT_SIZE_SLAVES 2
+#define WORKER_IDX_SWITCH_MASK (0x01)
+#define PRIMARY_WORKER_IDX 0
+#define SECONDARY_WORKER_IDX 1
+#define NB_PKT_SIZE_WORKERS 2
/** pkt size based scheduler context */
struct psd_scheduler_ctx {
/** pkt size based scheduler queue pair context */
struct psd_scheduler_qp_ctx {
- struct scheduler_slave primary_slave;
- struct scheduler_slave secondary_slave;
+ struct scheduler_worker primary_worker;
+ struct scheduler_worker secondary_worker;
uint32_t threshold;
uint8_t deq_idx;
} __rte_cache_aligned;
/** scheduling operation variables' wrapping */
struct psd_schedule_op {
- uint8_t slave_idx;
+ uint8_t worker_idx;
uint16_t pos;
};
{
struct scheduler_qp_ctx *qp_ctx = qp;
struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx;
- struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];
- uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {
- psd_qp_ctx->primary_slave.nb_inflight_cops,
- psd_qp_ctx->secondary_slave.nb_inflight_cops
+ struct rte_crypto_op *sched_ops[NB_PKT_SIZE_WORKERS][nb_ops];
+ uint32_t in_flight_ops[NB_PKT_SIZE_WORKERS] = {
+ psd_qp_ctx->primary_worker.nb_inflight_cops,
+ psd_qp_ctx->secondary_worker.nb_inflight_cops
};
- struct psd_schedule_op enq_ops[NB_PKT_SIZE_SLAVES] = {
- {PRIMARY_SLAVE_IDX, 0}, {SECONDARY_SLAVE_IDX, 0}
+ struct psd_schedule_op enq_ops[NB_PKT_SIZE_WORKERS] = {
+ {PRIMARY_WORKER_IDX, 0}, {SECONDARY_WORKER_IDX, 0}
};
struct psd_schedule_op *p_enq_op;
uint16_t i, processed_ops_pri = 0, processed_ops_sec = 0;
/* stop schedule cops before the queue is full, this shall
* prevent the failed enqueue
*/
- if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] ==
qp_ctx->max_nb_objs) {
i = nb_ops;
break;
}
- sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
+ sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i];
p_enq_op->pos++;
job_len = ops[i+1]->sym->cipher.data.length;
ops[i+1]->sym->auth.data.length;
p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
- if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] ==
qp_ctx->max_nb_objs) {
i = nb_ops;
break;
}
- sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+1];
+ sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+1];
p_enq_op->pos++;
job_len = ops[i+2]->sym->cipher.data.length;
ops[i+2]->sym->auth.data.length;
p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
- if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] ==
qp_ctx->max_nb_objs) {
i = nb_ops;
break;
}
- sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+2];
+ sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+2];
p_enq_op->pos++;
job_len = ops[i+3]->sym->cipher.data.length;
ops[i+3]->sym->auth.data.length;
p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
- if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] ==
qp_ctx->max_nb_objs) {
i = nb_ops;
break;
}
- sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+3];
+ sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+3];
p_enq_op->pos++;
}
ops[i]->sym->auth.data.length;
p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
- if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] ==
qp_ctx->max_nb_objs) {
i = nb_ops;
break;
}
- sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
+ sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i];
p_enq_op->pos++;
}
processed_ops_pri = rte_cryptodev_enqueue_burst(
- psd_qp_ctx->primary_slave.dev_id,
- psd_qp_ctx->primary_slave.qp_id,
- sched_ops[PRIMARY_SLAVE_IDX],
- enq_ops[PRIMARY_SLAVE_IDX].pos);
- /* enqueue shall not fail as the slave queue is monitored */
- RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_SLAVE_IDX].pos);
+ psd_qp_ctx->primary_worker.dev_id,
+ psd_qp_ctx->primary_worker.qp_id,
+ sched_ops[PRIMARY_WORKER_IDX],
+ enq_ops[PRIMARY_WORKER_IDX].pos);
+ /* enqueue shall not fail as the worker queue is monitored */
+ RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_WORKER_IDX].pos);
- psd_qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;
+ psd_qp_ctx->primary_worker.nb_inflight_cops += processed_ops_pri;
processed_ops_sec = rte_cryptodev_enqueue_burst(
- psd_qp_ctx->secondary_slave.dev_id,
- psd_qp_ctx->secondary_slave.qp_id,
- sched_ops[SECONDARY_SLAVE_IDX],
- enq_ops[SECONDARY_SLAVE_IDX].pos);
- RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_SLAVE_IDX].pos);
+ psd_qp_ctx->secondary_worker.dev_id,
+ psd_qp_ctx->secondary_worker.qp_id,
+ sched_ops[SECONDARY_WORKER_IDX],
+ enq_ops[SECONDARY_WORKER_IDX].pos);
+ RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_WORKER_IDX].pos);
- psd_qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;
+ psd_qp_ctx->secondary_worker.nb_inflight_cops += processed_ops_sec;
return processed_ops_pri + processed_ops_sec;
}
{
struct psd_scheduler_qp_ctx *qp_ctx =
((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
- struct scheduler_slave *slaves[NB_PKT_SIZE_SLAVES] = {
- &qp_ctx->primary_slave, &qp_ctx->secondary_slave};
- struct scheduler_slave *slave = slaves[qp_ctx->deq_idx];
+ struct scheduler_worker *workers[NB_PKT_SIZE_WORKERS] = {
+ &qp_ctx->primary_worker, &qp_ctx->secondary_worker};
+ struct scheduler_worker *worker = workers[qp_ctx->deq_idx];
uint16_t nb_deq_ops_pri = 0, nb_deq_ops_sec = 0;
- if (slave->nb_inflight_cops) {
- nb_deq_ops_pri = rte_cryptodev_dequeue_burst(slave->dev_id,
- slave->qp_id, ops, nb_ops);
- slave->nb_inflight_cops -= nb_deq_ops_pri;
+ if (worker->nb_inflight_cops) {
+ nb_deq_ops_pri = rte_cryptodev_dequeue_burst(worker->dev_id,
+ worker->qp_id, ops, nb_ops);
+ worker->nb_inflight_cops -= nb_deq_ops_pri;
}
- qp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_IDX_SWITCH_MASK;
+ qp_ctx->deq_idx = (~qp_ctx->deq_idx) & WORKER_IDX_SWITCH_MASK;
if (nb_deq_ops_pri == nb_ops)
return nb_deq_ops_pri;
- slave = slaves[qp_ctx->deq_idx];
+ worker = workers[qp_ctx->deq_idx];
- if (slave->nb_inflight_cops) {
- nb_deq_ops_sec = rte_cryptodev_dequeue_burst(slave->dev_id,
- slave->qp_id, &ops[nb_deq_ops_pri],
+ if (worker->nb_inflight_cops) {
+ nb_deq_ops_sec = rte_cryptodev_dequeue_burst(worker->dev_id,
+ worker->qp_id, &ops[nb_deq_ops_pri],
nb_ops - nb_deq_ops_pri);
- slave->nb_inflight_cops -= nb_deq_ops_sec;
+ worker->nb_inflight_cops -= nb_deq_ops_sec;
- if (!slave->nb_inflight_cops)
+ if (!worker->nb_inflight_cops)
qp_ctx->deq_idx = (~qp_ctx->deq_idx) &
- SLAVE_IDX_SWITCH_MASK;
+ WORKER_IDX_SWITCH_MASK;
}
return nb_deq_ops_pri + nb_deq_ops_sec;
}
static int
-slave_attach(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint8_t slave_id)
+worker_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t worker_id)
{
return 0;
}
static int
-slave_detach(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint8_t slave_id)
+worker_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t worker_id)
{
return 0;
}
struct psd_scheduler_ctx *psd_ctx = sched_ctx->private_ctx;
uint16_t i;
- /* for packet size based scheduler, nb_slaves have to >= 2 */
- if (sched_ctx->nb_slaves < NB_PKT_SIZE_SLAVES) {
- CR_SCHED_LOG(ERR, "not enough slaves to start");
+ /* for packet size based scheduler, nb_workers have to >= 2 */
+ if (sched_ctx->nb_workers < NB_PKT_SIZE_WORKERS) {
+ CR_SCHED_LOG(ERR, "not enough workers to start");
return -1;
}
struct psd_scheduler_qp_ctx *ps_qp_ctx =
qp_ctx->private_qp_ctx;
- ps_qp_ctx->primary_slave.dev_id =
- sched_ctx->slaves[PRIMARY_SLAVE_IDX].dev_id;
- ps_qp_ctx->primary_slave.qp_id = i;
- ps_qp_ctx->primary_slave.nb_inflight_cops = 0;
+ ps_qp_ctx->primary_worker.dev_id =
+ sched_ctx->workers[PRIMARY_WORKER_IDX].dev_id;
+ ps_qp_ctx->primary_worker.qp_id = i;
+ ps_qp_ctx->primary_worker.nb_inflight_cops = 0;
- ps_qp_ctx->secondary_slave.dev_id =
- sched_ctx->slaves[SECONDARY_SLAVE_IDX].dev_id;
- ps_qp_ctx->secondary_slave.qp_id = i;
- ps_qp_ctx->secondary_slave.nb_inflight_cops = 0;
+ ps_qp_ctx->secondary_worker.dev_id =
+ sched_ctx->workers[SECONDARY_WORKER_IDX].dev_id;
+ ps_qp_ctx->secondary_worker.qp_id = i;
+ ps_qp_ctx->secondary_worker.nb_inflight_cops = 0;
ps_qp_ctx->threshold = psd_ctx->threshold;
}
struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
struct psd_scheduler_qp_ctx *ps_qp_ctx = qp_ctx->private_qp_ctx;
- if (ps_qp_ctx->primary_slave.nb_inflight_cops +
- ps_qp_ctx->secondary_slave.nb_inflight_cops) {
- CR_SCHED_LOG(ERR, "Some crypto ops left in slave queue");
+ if (ps_qp_ctx->primary_worker.nb_inflight_cops +
+ ps_qp_ctx->secondary_worker.nb_inflight_cops) {
+ CR_SCHED_LOG(ERR, "Some crypto ops left in worker queue");
return -1;
}
}
}
static struct rte_cryptodev_scheduler_ops scheduler_ps_ops = {
- slave_attach,
- slave_detach,
+ worker_attach,
+ worker_detach,
scheduler_start,
scheduler_stop,
scheduler_config_qp,
struct scheduler_init_params {
struct rte_cryptodev_pmd_init_params def_p;
- uint32_t nb_slaves;
+ uint32_t nb_workers;
enum rte_cryptodev_scheduler_mode mode;
char mode_param_str[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
uint32_t enable_ordering;
uint16_t wc_pool[RTE_MAX_LCORE];
uint16_t nb_wc;
- char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
+ char worker_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]
[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
};
#define RTE_CRYPTODEV_VDEV_NAME ("name")
-#define RTE_CRYPTODEV_VDEV_SLAVE ("slave")
+#define RTE_CRYPTODEV_VDEV_WORKER ("worker")
#define RTE_CRYPTODEV_VDEV_MODE ("mode")
#define RTE_CRYPTODEV_VDEV_MODE_PARAM ("mode_param")
#define RTE_CRYPTODEV_VDEV_ORDERING ("ordering")
static const char * const scheduler_valid_params[] = {
RTE_CRYPTODEV_VDEV_NAME,
- RTE_CRYPTODEV_VDEV_SLAVE,
+ RTE_CRYPTODEV_VDEV_WORKER,
RTE_CRYPTODEV_VDEV_MODE,
RTE_CRYPTODEV_VDEV_MODE_PARAM,
RTE_CRYPTODEV_VDEV_ORDERING,
break;
}
- for (i = 0; i < init_params->nb_slaves; i++) {
- sched_ctx->init_slave_names[sched_ctx->nb_init_slaves] =
+ for (i = 0; i < init_params->nb_workers; i++) {
+ sched_ctx->init_worker_names[sched_ctx->nb_init_workers] =
rte_zmalloc_socket(
NULL,
RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN, 0,
SOCKET_ID_ANY);
- if (!sched_ctx->init_slave_names[
- sched_ctx->nb_init_slaves]) {
+ if (!sched_ctx->init_worker_names[
+ sched_ctx->nb_init_workers]) {
CR_SCHED_LOG(ERR, "driver %s: Insufficient memory",
name);
return -ENOMEM;
}
- strncpy(sched_ctx->init_slave_names[
- sched_ctx->nb_init_slaves],
- init_params->slave_names[i],
+ strncpy(sched_ctx->init_worker_names[
+ sched_ctx->nb_init_workers],
+ init_params->worker_names[i],
RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1);
- sched_ctx->nb_init_slaves++;
+ sched_ctx->nb_init_workers++;
}
/*
* Initialize capabilities structure as an empty structure,
- * in case device information is requested when no slaves are attached
+ * in case device information is requested when no workers are attached
*/
sched_ctx->capabilities = rte_zmalloc_socket(NULL,
sizeof(struct rte_cryptodev_capabilities),
sched_ctx = dev->data->dev_private;
- if (sched_ctx->nb_slaves) {
+ if (sched_ctx->nb_workers) {
uint32_t i;
- for (i = 0; i < sched_ctx->nb_slaves; i++)
- rte_cryptodev_scheduler_slave_detach(dev->data->dev_id,
- sched_ctx->slaves[i].dev_id);
+ for (i = 0; i < sched_ctx->nb_workers; i++)
+ rte_cryptodev_scheduler_worker_detach(dev->data->dev_id,
+ sched_ctx->workers[i].dev_id);
}
return rte_cryptodev_pmd_destroy(dev);
return 0;
}
-/** Parse slave */
+/** Parse worker */
static int
-parse_slave_arg(const char *key __rte_unused,
+parse_worker_arg(const char *key __rte_unused,
const char *value, void *extra_args)
{
struct scheduler_init_params *param = extra_args;
- if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
- CR_SCHED_LOG(ERR, "Too many slaves.");
+ if (param->nb_workers >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS) {
+ CR_SCHED_LOG(ERR, "Too many workers.");
return -ENOMEM;
}
- strncpy(param->slave_names[param->nb_slaves++], value,
+ strncpy(param->worker_names[param->nb_workers++], value,
RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1);
return 0;
if (ret < 0)
goto free_kvlist;
- ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SLAVE,
- &parse_slave_arg, params);
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_WORKER,
+ &parse_worker_arg, params);
if (ret < 0)
goto free_kvlist;
rte_socket_id(),
RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
},
- .nb_slaves = 0,
+ .nb_workers = 0,
.mode = CDEV_SCHED_MODE_NOT_SET,
.enable_ordering = 0,
- .slave_names = { {0} }
+ .worker_names = { {0} }
};
const char *name;
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
"max_nb_queue_pairs=<int> "
"socket_id=<int> "
- "slave=<name>");
+ "worker=<name>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv,
cryptodev_scheduler_pmd_drv.driver,
cryptodev_scheduler_driver_id);
#include "scheduler_pmd_private.h"
-/** attaching the slaves predefined by scheduler's EAL options */
+/** attaching the workers predefined by scheduler's EAL options */
static int
-scheduler_attach_init_slave(struct rte_cryptodev *dev)
+scheduler_attach_init_worker(struct rte_cryptodev *dev)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint8_t scheduler_id = dev->data->dev_id;
int i;
- for (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) {
- const char *dev_name = sched_ctx->init_slave_names[i];
- struct rte_cryptodev *slave_dev =
+ for (i = sched_ctx->nb_init_workers - 1; i >= 0; i--) {
+ const char *dev_name = sched_ctx->init_worker_names[i];
+ struct rte_cryptodev *worker_dev =
rte_cryptodev_pmd_get_named_dev(dev_name);
int status;
- if (!slave_dev) {
- CR_SCHED_LOG(ERR, "Failed to locate slave dev %s",
+ if (!worker_dev) {
+ CR_SCHED_LOG(ERR, "Failed to locate worker dev %s",
dev_name);
return -EINVAL;
}
- status = rte_cryptodev_scheduler_slave_attach(
- scheduler_id, slave_dev->data->dev_id);
+ status = rte_cryptodev_scheduler_worker_attach(
+ scheduler_id, worker_dev->data->dev_id);
if (status < 0) {
- CR_SCHED_LOG(ERR, "Failed to attach slave cryptodev %u",
- slave_dev->data->dev_id);
+ CR_SCHED_LOG(ERR, "Failed to attach worker cryptodev %u",
+ worker_dev->data->dev_id);
return status;
}
- CR_SCHED_LOG(INFO, "Scheduler %s attached slave %s",
+ CR_SCHED_LOG(INFO, "Scheduler %s attached worker %s",
dev->data->name,
- sched_ctx->init_slave_names[i]);
+ sched_ctx->init_worker_names[i]);
- rte_free(sched_ctx->init_slave_names[i]);
- sched_ctx->init_slave_names[i] = NULL;
+ rte_free(sched_ctx->init_worker_names[i]);
+ sched_ctx->init_worker_names[i] = NULL;
- sched_ctx->nb_init_slaves -= 1;
+ sched_ctx->nb_init_workers -= 1;
}
return 0;
uint32_t i;
int ret;
- /* although scheduler_attach_init_slave presents multiple times,
+ /* although scheduler_attach_init_worker presents multiple times,
* there will be only 1 meaningful execution.
*/
- ret = scheduler_attach_init_slave(dev);
+ ret = scheduler_attach_init_worker(dev);
if (ret < 0)
return ret;
- for (i = 0; i < sched_ctx->nb_slaves; i++) {
- uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ for (i = 0; i < sched_ctx->nb_workers; i++) {
+ uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
- ret = rte_cryptodev_configure(slave_dev_id, config);
+ ret = rte_cryptodev_configure(worker_dev_id, config);
if (ret < 0)
break;
}
if (sched_ctx->reordering_enabled) {
char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
uint32_t buff_size = rte_align32pow2(
- sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE);
+ sched_ctx->nb_workers * PER_WORKER_BUFF_SIZE);
if (qp_ctx->order_ring) {
rte_ring_free(qp_ctx->order_ring);
if (dev->data->dev_started)
return 0;
- /* although scheduler_attach_init_slave presents multiple times,
+ /* although scheduler_attach_init_worker presents multiple times,
* there will be only 1 meaningful execution.
*/
- ret = scheduler_attach_init_slave(dev);
+ ret = scheduler_attach_init_worker(dev);
if (ret < 0)
return ret;
return -1;
}
- if (!sched_ctx->nb_slaves) {
- CR_SCHED_LOG(ERR, "No slave in the scheduler");
+ if (!sched_ctx->nb_workers) {
+ CR_SCHED_LOG(ERR, "No worker in the scheduler");
return -1;
}
- RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.worker_attach, -ENOTSUP);
- for (i = 0; i < sched_ctx->nb_slaves; i++) {
- uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ for (i = 0; i < sched_ctx->nb_workers; i++) {
+ uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
- if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
- CR_SCHED_LOG(ERR, "Failed to attach slave");
+ if ((*sched_ctx->ops.worker_attach)(dev, worker_dev_id) < 0) {
+ CR_SCHED_LOG(ERR, "Failed to attach worker");
return -ENOTSUP;
}
}
return -1;
}
- /* start all slaves */
- for (i = 0; i < sched_ctx->nb_slaves; i++) {
- uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
- struct rte_cryptodev *slave_dev =
- rte_cryptodev_pmd_get_dev(slave_dev_id);
+ /* start all workers */
+ for (i = 0; i < sched_ctx->nb_workers; i++) {
+ uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
+ struct rte_cryptodev *worker_dev =
+ rte_cryptodev_pmd_get_dev(worker_dev_id);
- ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
+ ret = (*worker_dev->dev_ops->dev_start)(worker_dev);
if (ret < 0) {
- CR_SCHED_LOG(ERR, "Failed to start slave dev %u",
- slave_dev_id);
+ CR_SCHED_LOG(ERR, "Failed to start worker dev %u",
+ worker_dev_id);
return ret;
}
}
if (!dev->data->dev_started)
return;
- /* stop all slaves first */
- for (i = 0; i < sched_ctx->nb_slaves; i++) {
- uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
- struct rte_cryptodev *slave_dev =
- rte_cryptodev_pmd_get_dev(slave_dev_id);
+ /* stop all workers first */
+ for (i = 0; i < sched_ctx->nb_workers; i++) {
+ uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
+ struct rte_cryptodev *worker_dev =
+ rte_cryptodev_pmd_get_dev(worker_dev_id);
- (*slave_dev->dev_ops->dev_stop)(slave_dev);
+ (*worker_dev->dev_ops->dev_stop)(worker_dev);
}
if (*sched_ctx->ops.scheduler_stop)
(*sched_ctx->ops.scheduler_stop)(dev);
- for (i = 0; i < sched_ctx->nb_slaves; i++) {
- uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ for (i = 0; i < sched_ctx->nb_workers; i++) {
+ uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
- if (*sched_ctx->ops.slave_detach)
- (*sched_ctx->ops.slave_detach)(dev, slave_dev_id);
+ if (*sched_ctx->ops.worker_detach)
+ (*sched_ctx->ops.worker_detach)(dev, worker_dev_id);
}
}
if (dev->data->dev_started)
return -EBUSY;
- /* close all slaves first */
- for (i = 0; i < sched_ctx->nb_slaves; i++) {
- uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
- struct rte_cryptodev *slave_dev =
- rte_cryptodev_pmd_get_dev(slave_dev_id);
+ /* close all workers first */
+ for (i = 0; i < sched_ctx->nb_workers; i++) {
+ uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
+ struct rte_cryptodev *worker_dev =
+ rte_cryptodev_pmd_get_dev(worker_dev_id);
- ret = (*slave_dev->dev_ops->dev_close)(slave_dev);
+ ret = (*worker_dev->dev_ops->dev_close)(worker_dev);
if (ret < 0)
return ret;
}
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint32_t i;
- for (i = 0; i < sched_ctx->nb_slaves; i++) {
- uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
- struct rte_cryptodev *slave_dev =
- rte_cryptodev_pmd_get_dev(slave_dev_id);
- struct rte_cryptodev_stats slave_stats = {0};
+ for (i = 0; i < sched_ctx->nb_workers; i++) {
+ uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
+ struct rte_cryptodev *worker_dev =
+ rte_cryptodev_pmd_get_dev(worker_dev_id);
+ struct rte_cryptodev_stats worker_stats = {0};
- (*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats);
+ (*worker_dev->dev_ops->stats_get)(worker_dev, &worker_stats);
- stats->enqueued_count += slave_stats.enqueued_count;
- stats->dequeued_count += slave_stats.dequeued_count;
+ stats->enqueued_count += worker_stats.enqueued_count;
+ stats->dequeued_count += worker_stats.dequeued_count;
- stats->enqueue_err_count += slave_stats.enqueue_err_count;
- stats->dequeue_err_count += slave_stats.dequeue_err_count;
+ stats->enqueue_err_count += worker_stats.enqueue_err_count;
+ stats->dequeue_err_count += worker_stats.dequeue_err_count;
}
}
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint32_t i;
- for (i = 0; i < sched_ctx->nb_slaves; i++) {
- uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
- struct rte_cryptodev *slave_dev =
- rte_cryptodev_pmd_get_dev(slave_dev_id);
+ for (i = 0; i < sched_ctx->nb_workers; i++) {
+ uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
+ struct rte_cryptodev *worker_dev =
+ rte_cryptodev_pmd_get_dev(worker_dev_id);
- (*slave_dev->dev_ops->stats_reset)(slave_dev);
+ (*worker_dev->dev_ops->stats_reset)(worker_dev);
}
}
if (!dev_info)
return;
- /* although scheduler_attach_init_slave presents multiple times,
+ /* although scheduler_attach_init_worker presents multiple times,
* there will be only 1 meaningful execution.
*/
- scheduler_attach_init_slave(dev);
+ scheduler_attach_init_worker(dev);
- for (i = 0; i < sched_ctx->nb_slaves; i++) {
- uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
- struct rte_cryptodev_info slave_info;
+ for (i = 0; i < sched_ctx->nb_workers; i++) {
+ uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
+ struct rte_cryptodev_info worker_info;
- rte_cryptodev_info_get(slave_dev_id, &slave_info);
- uint32_t dev_max_sess = slave_info.sym.max_nb_sessions;
+ rte_cryptodev_info_get(worker_dev_id, &worker_info);
+ uint32_t dev_max_sess = worker_info.sym.max_nb_sessions;
if (dev_max_sess != 0) {
if (max_nb_sess == 0 || dev_max_sess < max_nb_sess)
- max_nb_sess = slave_info.sym.max_nb_sessions;
+ max_nb_sess = worker_info.sym.max_nb_sessions;
}
- /* Get the max headroom requirement among slave PMDs */
- headroom_sz = slave_info.min_mbuf_headroom_req >
+ /* Get the max headroom requirement among worker PMDs */
+ headroom_sz = worker_info.min_mbuf_headroom_req >
headroom_sz ?
- slave_info.min_mbuf_headroom_req :
+ worker_info.min_mbuf_headroom_req :
headroom_sz;
- /* Get the max tailroom requirement among slave PMDs */
- tailroom_sz = slave_info.min_mbuf_tailroom_req >
+ /* Get the max tailroom requirement among worker PMDs */
+ tailroom_sz = worker_info.min_mbuf_tailroom_req >
tailroom_sz ?
- slave_info.min_mbuf_tailroom_req :
+ worker_info.min_mbuf_tailroom_req :
tailroom_sz;
}
if (dev->data->queue_pairs[qp_id] != NULL)
scheduler_pmd_qp_release(dev, qp_id);
- for (i = 0; i < sched_ctx->nb_slaves; i++) {
- uint8_t slave_id = sched_ctx->slaves[i].dev_id;
+ for (i = 0; i < sched_ctx->nb_workers; i++) {
+ uint8_t worker_id = sched_ctx->workers[i].dev_id;
/*
- * All slaves will share the same session mempool
+ * All workers will share the same session mempool
* for session-less operations, so the objects
* must be big enough for all the drivers used.
*/
- ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id,
+ ret = rte_cryptodev_queue_pair_setup(worker_id, qp_id,
qp_conf, socket_id);
if (ret < 0)
return ret;
dev->data->queue_pairs[qp_id] = qp_ctx;
- /* although scheduler_attach_init_slave presents multiple times,
+ /* although scheduler_attach_init_worker presents multiple times,
* there will be only 1 meaningful execution.
*/
- ret = scheduler_attach_init_slave(dev);
+ ret = scheduler_attach_init_worker(dev);
if (ret < 0) {
- CR_SCHED_LOG(ERR, "Failed to attach slave");
+ CR_SCHED_LOG(ERR, "Failed to attach worker");
scheduler_pmd_qp_release(dev, qp_id);
return ret;
}
uint8_t i = 0;
uint32_t max_priv_sess_size = 0;
- /* Check what is the maximum private session size for all slaves */
- for (i = 0; i < sched_ctx->nb_slaves; i++) {
- uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
- struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id];
+ /* Check what is the maximum private session size for all workers */
+ for (i = 0; i < sched_ctx->nb_workers; i++) {
+ uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
+ struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id];
uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
if (max_priv_sess_size < priv_sess_size)
uint32_t i;
int ret;
- for (i = 0; i < sched_ctx->nb_slaves; i++) {
- struct scheduler_slave *slave = &sched_ctx->slaves[i];
+ for (i = 0; i < sched_ctx->nb_workers; i++) {
+ struct scheduler_worker *worker = &sched_ctx->workers[i];
- ret = rte_cryptodev_sym_session_init(slave->dev_id, sess,
+ ret = rte_cryptodev_sym_session_init(worker->dev_id, sess,
xform, mempool);
if (ret < 0) {
CR_SCHED_LOG(ERR, "unable to config sym session");
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint32_t i;
- /* Clear private data of slaves */
- for (i = 0; i < sched_ctx->nb_slaves; i++) {
- struct scheduler_slave *slave = &sched_ctx->slaves[i];
+ /* Clear private data of workers */
+ for (i = 0; i < sched_ctx->nb_workers; i++) {
+ struct scheduler_worker *worker = &sched_ctx->workers[i];
- rte_cryptodev_sym_session_clear(slave->dev_id, sess);
+ rte_cryptodev_sym_session_clear(worker->dev_id, sess);
}
}
#define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler
/**< Scheduler Crypto PMD device name */
-#define PER_SLAVE_BUFF_SIZE (256)
+#define PER_WORKER_BUFF_SIZE (256)
extern int scheduler_logtype_driver;
rte_log(RTE_LOG_ ## level, scheduler_logtype_driver, \
"%s() line %u: "fmt "\n", __func__, __LINE__, ##args)
-struct scheduler_slave {
+struct scheduler_worker {
uint8_t dev_id;
uint16_t qp_id;
uint32_t nb_inflight_cops;
uint32_t max_nb_queue_pairs;
- struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
- uint32_t nb_slaves;
+ struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
+ uint32_t nb_workers;
enum rte_cryptodev_scheduler_mode mode;
uint16_t wc_pool[RTE_MAX_LCORE];
uint16_t nb_wc;
- char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
- int nb_init_slaves;
+ char *init_worker_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
+ int nb_init_workers;
} __rte_cache_aligned;
struct scheduler_qp_ctx {
#include "scheduler_pmd_private.h"
struct rr_scheduler_qp_ctx {
- struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
- uint32_t nb_slaves;
+ struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
+ uint32_t nb_workers;
- uint32_t last_enq_slave_idx;
- uint32_t last_deq_slave_idx;
+ uint32_t last_enq_worker_idx;
+ uint32_t last_deq_worker_idx;
};
static uint16_t
{
struct rr_scheduler_qp_ctx *rr_qp_ctx =
((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
- uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
- struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
+ uint32_t worker_idx = rr_qp_ctx->last_enq_worker_idx;
+ struct scheduler_worker *worker = &rr_qp_ctx->workers[worker_idx];
uint16_t i, processed_ops;
if (unlikely(nb_ops == 0))
for (i = 0; i < nb_ops && i < 4; i++)
rte_prefetch0(ops[i]->sym->session);
- processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
- slave->qp_id, ops, nb_ops);
+ processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id,
+ worker->qp_id, ops, nb_ops);
- slave->nb_inflight_cops += processed_ops;
+ worker->nb_inflight_cops += processed_ops;
- rr_qp_ctx->last_enq_slave_idx += 1;
- rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
+ rr_qp_ctx->last_enq_worker_idx += 1;
+ rr_qp_ctx->last_enq_worker_idx %= rr_qp_ctx->nb_workers;
return processed_ops;
}
{
struct rr_scheduler_qp_ctx *rr_qp_ctx =
((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
- struct scheduler_slave *slave;
- uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
+ struct scheduler_worker *worker;
+ uint32_t last_worker_idx = rr_qp_ctx->last_deq_worker_idx;
uint16_t nb_deq_ops;
- if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
+ if (unlikely(rr_qp_ctx->workers[last_worker_idx].nb_inflight_cops
+ == 0)) {
do {
- last_slave_idx += 1;
+ last_worker_idx += 1;
- if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
- last_slave_idx = 0;
+ if (unlikely(last_worker_idx >= rr_qp_ctx->nb_workers))
+ last_worker_idx = 0;
/* looped back, means no inflight cops in the queue */
- if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
+ if (last_worker_idx == rr_qp_ctx->last_deq_worker_idx)
return 0;
- } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
+ } while (rr_qp_ctx->workers[last_worker_idx].nb_inflight_cops
== 0);
}
- slave = &rr_qp_ctx->slaves[last_slave_idx];
+ worker = &rr_qp_ctx->workers[last_worker_idx];
- nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
- slave->qp_id, ops, nb_ops);
+ nb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id,
+ worker->qp_id, ops, nb_ops);
- last_slave_idx += 1;
- last_slave_idx %= rr_qp_ctx->nb_slaves;
+ last_worker_idx += 1;
+ last_worker_idx %= rr_qp_ctx->nb_workers;
- rr_qp_ctx->last_deq_slave_idx = last_slave_idx;
+ rr_qp_ctx->last_deq_worker_idx = last_worker_idx;
- slave->nb_inflight_cops -= nb_deq_ops;
+ worker->nb_inflight_cops -= nb_deq_ops;
return nb_deq_ops;
}
}
static int
-slave_attach(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint8_t slave_id)
+worker_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t worker_id)
{
return 0;
}
static int
-slave_detach(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint8_t slave_id)
+worker_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t worker_id)
{
return 0;
}
qp_ctx->private_qp_ctx;
uint32_t j;
- memset(rr_qp_ctx->slaves, 0,
- RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
- sizeof(struct scheduler_slave));
- for (j = 0; j < sched_ctx->nb_slaves; j++) {
- rr_qp_ctx->slaves[j].dev_id =
- sched_ctx->slaves[j].dev_id;
- rr_qp_ctx->slaves[j].qp_id = i;
+ memset(rr_qp_ctx->workers, 0,
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS *
+ sizeof(struct scheduler_worker));
+ for (j = 0; j < sched_ctx->nb_workers; j++) {
+ rr_qp_ctx->workers[j].dev_id =
+ sched_ctx->workers[j].dev_id;
+ rr_qp_ctx->workers[j].qp_id = i;
}
- rr_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
+ rr_qp_ctx->nb_workers = sched_ctx->nb_workers;
- rr_qp_ctx->last_enq_slave_idx = 0;
- rr_qp_ctx->last_deq_slave_idx = 0;
+ rr_qp_ctx->last_enq_worker_idx = 0;
+ rr_qp_ctx->last_deq_worker_idx = 0;
}
return 0;
}
static struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
- slave_attach,
- slave_detach,
+ worker_attach,
+ worker_detach,
scheduler_start,
scheduler_stop,
scheduler_config_qp,
static struct rte_cryptodev_scheduler scheduler = {
.name = "roundrobin-scheduler",
.description = "scheduler which will round robin burst across "
- "slave crypto devices",
+ "worker crypto devices",
.mode = CDEV_SCHED_MODE_ROUNDROBIN,
.ops = &scheduler_rr_ops
};
*/
if (!strcmp(dev_info.driver_name, "crypto_scheduler")) {
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
- uint32_t nb_slaves =
- rte_cryptodev_scheduler_slaves_get(cdev_id,
+ uint32_t nb_workers =
+ rte_cryptodev_scheduler_workers_get(cdev_id,
NULL);
- sessions_needed = enabled_cdev_count * nb_slaves;
+ sessions_needed = enabled_cdev_count * nb_workers;
#endif
} else
sessions_needed = enabled_cdev_count;