1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_bus_vdev.h>
9 #include <rte_malloc.h>
11 #include <rte_kvargs.h>
13 #include <rte_bbdev.h>
14 #include <rte_bbdev_pmd.h>
16 #define DRIVER_NAME bbdev_null
18 /* Initialisation params structure that can be used by null BBDEV driver */
19 struct bbdev_null_params {
20 int socket_id; /*< Null BBDEV socket */
21 uint16_t queues_num; /*< Null BBDEV queues number */
24 /* Accecptable params for null BBDEV devices */
25 #define BBDEV_NULL_MAX_NB_QUEUES_ARG "max_nb_queues"
26 #define BBDEV_NULL_SOCKET_ID_ARG "socket_id"
28 static const char * const bbdev_null_valid_params[] = {
29 BBDEV_NULL_MAX_NB_QUEUES_ARG,
30 BBDEV_NULL_SOCKET_ID_ARG
33 /* private data structure */
34 struct bbdev_private {
35 unsigned int max_nb_queues; /**< Max number of queues */
40 struct rte_ring *processed_pkts; /* Ring for processed packets */
41 } __rte_cache_aligned;
45 info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
47 struct bbdev_private *internals = dev->data->dev_private;
49 static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
50 RTE_BBDEV_END_OF_CAPABILITIES_LIST(),
53 static struct rte_bbdev_queue_conf default_queue_conf = {
54 .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT,
57 default_queue_conf.socket = dev->data->socket_id;
59 dev_info->driver_name = RTE_STR(DRIVER_NAME);
60 dev_info->max_num_queues = internals->max_nb_queues;
61 dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
62 dev_info->hardware_accelerated = false;
63 dev_info->max_queue_priority = 0;
64 dev_info->default_queue_conf = default_queue_conf;
65 dev_info->capabilities = bbdev_capabilities;
66 dev_info->cpu_flag_reqs = NULL;
67 dev_info->min_alignment = 0;
69 rte_bbdev_log_debug("got device info from %u", dev->data->dev_id);
74 q_release(struct rte_bbdev *dev, uint16_t q_id)
76 struct bbdev_queue *q = dev->data->queues[q_id].queue_private;
79 rte_ring_free(q->processed_pkts);
81 dev->data->queues[q_id].queue_private = NULL;
84 rte_bbdev_log_debug("released device queue %u:%u",
85 dev->data->dev_id, q_id);
91 q_setup(struct rte_bbdev *dev, uint16_t q_id,
92 const struct rte_bbdev_queue_conf *queue_conf)
94 struct bbdev_queue *q;
95 char ring_name[RTE_RING_NAMESIZE];
96 snprintf(ring_name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME) "%u:%u",
97 dev->data->dev_id, q_id);
99 /* Allocate the queue data structure. */
100 q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),
101 RTE_CACHE_LINE_SIZE, queue_conf->socket);
103 rte_bbdev_log(ERR, "Failed to allocate queue memory");
107 q->processed_pkts = rte_ring_create(ring_name, queue_conf->queue_size,
108 queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
109 if (q->processed_pkts == NULL) {
110 rte_bbdev_log(ERR, "Failed to create ring");
114 dev->data->queues[q_id].queue_private = q;
115 rte_bbdev_log_debug("setup device queue %s", ring_name);
123 static const struct rte_bbdev_ops pmd_ops = {
124 .info_get = info_get,
125 .queue_setup = q_setup,
126 .queue_release = q_release
129 /* Enqueue decode burst */
131 enqueue_dec_ops(struct rte_bbdev_queue_data *q_data,
132 struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
134 struct bbdev_queue *q = q_data->queue_private;
135 uint16_t nb_enqueued = rte_ring_enqueue_burst(q->processed_pkts,
136 (void **)ops, nb_ops, NULL);
138 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
139 q_data->queue_stats.enqueued_count += nb_enqueued;
144 /* Enqueue encode burst */
146 enqueue_enc_ops(struct rte_bbdev_queue_data *q_data,
147 struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
149 struct bbdev_queue *q = q_data->queue_private;
150 uint16_t nb_enqueued = rte_ring_enqueue_burst(q->processed_pkts,
151 (void **)ops, nb_ops, NULL);
153 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
154 q_data->queue_stats.enqueued_count += nb_enqueued;
159 /* Dequeue decode burst */
161 dequeue_dec_ops(struct rte_bbdev_queue_data *q_data,
162 struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
164 struct bbdev_queue *q = q_data->queue_private;
165 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
166 (void **)ops, nb_ops, NULL);
167 q_data->queue_stats.dequeued_count += nb_dequeued;
172 /* Dequeue encode burst */
174 dequeue_enc_ops(struct rte_bbdev_queue_data *q_data,
175 struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
177 struct bbdev_queue *q = q_data->queue_private;
178 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
179 (void **)ops, nb_ops, NULL);
180 q_data->queue_stats.dequeued_count += nb_dequeued;
185 /* Parse 16bit integer from string argument */
187 parse_u16_arg(const char *key, const char *value, void *extra_args)
189 uint16_t *u16 = extra_args;
190 unsigned int long result;
192 if ((value == NULL) || (extra_args == NULL))
195 result = strtoul(value, NULL, 0);
196 if ((result >= (1 << 16)) || (errno != 0)) {
197 rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key);
200 *u16 = (uint16_t)result;
204 /* Parse parameters used to create device */
206 parse_bbdev_null_params(struct bbdev_null_params *params,
207 const char *input_args)
209 struct rte_kvargs *kvlist = NULL;
215 kvlist = rte_kvargs_parse(input_args, bbdev_null_valid_params);
219 ret = rte_kvargs_process(kvlist, bbdev_null_valid_params[0],
220 &parse_u16_arg, ¶ms->queues_num);
224 ret = rte_kvargs_process(kvlist, bbdev_null_valid_params[1],
225 &parse_u16_arg, ¶ms->socket_id);
229 if (params->socket_id >= RTE_MAX_NUMA_NODES) {
230 rte_bbdev_log(ERR, "Invalid socket, must be < %u",
238 rte_kvargs_free(kvlist);
244 null_bbdev_create(struct rte_vdev_device *vdev,
245 struct bbdev_null_params *init_params)
247 struct rte_bbdev *bbdev;
248 const char *name = rte_vdev_device_name(vdev);
250 bbdev = rte_bbdev_allocate(name);
254 bbdev->data->dev_private = rte_zmalloc_socket(name,
255 sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE,
256 init_params->socket_id);
257 if (bbdev->data->dev_private == NULL) {
258 rte_bbdev_release(bbdev);
262 bbdev->dev_ops = &pmd_ops;
263 bbdev->device = &vdev->device;
264 bbdev->data->socket_id = init_params->socket_id;
265 bbdev->intr_handle = NULL;
267 /* register rx/tx burst functions for data path */
268 bbdev->dequeue_enc_ops = dequeue_enc_ops;
269 bbdev->dequeue_dec_ops = dequeue_dec_ops;
270 bbdev->enqueue_enc_ops = enqueue_enc_ops;
271 bbdev->enqueue_dec_ops = enqueue_dec_ops;
272 ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues =
273 init_params->queues_num;
278 /* Initialise device */
280 null_bbdev_probe(struct rte_vdev_device *vdev)
282 struct bbdev_null_params init_params = {
284 RTE_BBDEV_DEFAULT_MAX_NB_QUEUES
287 const char *input_args;
292 name = rte_vdev_device_name(vdev);
296 input_args = rte_vdev_device_args(vdev);
297 parse_bbdev_null_params(&init_params, input_args);
299 rte_bbdev_log_debug("Init %s on NUMA node %d with max queues: %d",
300 name, init_params.socket_id, init_params.queues_num);
302 return null_bbdev_create(vdev, &init_params);
305 /* Uninitialise device */
307 null_bbdev_remove(struct rte_vdev_device *vdev)
309 struct rte_bbdev *bbdev;
315 name = rte_vdev_device_name(vdev);
319 bbdev = rte_bbdev_get_named_dev(name);
323 rte_free(bbdev->data->dev_private);
325 return rte_bbdev_release(bbdev);
328 static struct rte_vdev_driver bbdev_null_pmd_drv = {
329 .probe = null_bbdev_probe,
330 .remove = null_bbdev_remove
333 RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_null_pmd_drv);
334 RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
335 BBDEV_NULL_MAX_NB_QUEUES_ARG"=<int> "
336 BBDEV_NULL_SOCKET_ID_ARG"=<int>");
339 RTE_INIT(null_bbdev_init_log);
341 null_bbdev_init_log(void)
343 bbdev_logtype = rte_log_register("pmd.bbdev.null");
344 if (bbdev_logtype >= 0)
345 rte_log_set_level(bbdev_logtype, RTE_LOG_NOTICE);