1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_bus_vdev.h>
9 #include <rte_malloc.h>
11 #include <rte_kvargs.h>
13 #include <rte_bbdev.h>
14 #include <rte_bbdev_pmd.h>
16 #define DRIVER_NAME baseband_null
18 RTE_LOG_REGISTER(bbdev_null_logtype, pmd.bb.null, NOTICE);
20 /* Helper macro for logging */
21 #define rte_bbdev_log(level, fmt, ...) \
22 rte_log(RTE_LOG_ ## level, bbdev_null_logtype, fmt "\n", ##__VA_ARGS__)
24 #define rte_bbdev_log_debug(fmt, ...) \
25 rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
28 /* Initialisation params structure that can be used by null BBDEV driver */
29 struct bbdev_null_params {
30 int socket_id; /*< Null BBDEV socket */
31 uint16_t queues_num; /*< Null BBDEV queues number */
34 /* Accecptable params for null BBDEV devices */
35 #define BBDEV_NULL_MAX_NB_QUEUES_ARG "max_nb_queues"
36 #define BBDEV_NULL_SOCKET_ID_ARG "socket_id"
38 static const char * const bbdev_null_valid_params[] = {
39 BBDEV_NULL_MAX_NB_QUEUES_ARG,
40 BBDEV_NULL_SOCKET_ID_ARG
43 /* private data structure */
44 struct bbdev_private {
45 unsigned int max_nb_queues; /**< Max number of queues */
50 struct rte_ring *processed_pkts; /* Ring for processed packets */
51 } __rte_cache_aligned;
55 info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
57 struct bbdev_private *internals = dev->data->dev_private;
59 static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
60 RTE_BBDEV_END_OF_CAPABILITIES_LIST(),
63 static struct rte_bbdev_queue_conf default_queue_conf = {
64 .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT,
67 default_queue_conf.socket = dev->data->socket_id;
69 dev_info->driver_name = RTE_STR(DRIVER_NAME);
70 dev_info->max_num_queues = internals->max_nb_queues;
71 dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
72 dev_info->hardware_accelerated = false;
73 dev_info->max_dl_queue_priority = 0;
74 dev_info->max_ul_queue_priority = 0;
75 dev_info->default_queue_conf = default_queue_conf;
76 dev_info->capabilities = bbdev_capabilities;
77 dev_info->cpu_flag_reqs = NULL;
78 dev_info->min_alignment = 0;
80 rte_bbdev_log_debug("got device info from %u", dev->data->dev_id);
85 q_release(struct rte_bbdev *dev, uint16_t q_id)
87 struct bbdev_queue *q = dev->data->queues[q_id].queue_private;
90 rte_ring_free(q->processed_pkts);
92 dev->data->queues[q_id].queue_private = NULL;
95 rte_bbdev_log_debug("released device queue %u:%u",
96 dev->data->dev_id, q_id);
102 q_setup(struct rte_bbdev *dev, uint16_t q_id,
103 const struct rte_bbdev_queue_conf *queue_conf)
105 struct bbdev_queue *q;
106 char ring_name[RTE_RING_NAMESIZE];
107 snprintf(ring_name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME) "%u:%u",
108 dev->data->dev_id, q_id);
110 /* Allocate the queue data structure. */
111 q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),
112 RTE_CACHE_LINE_SIZE, queue_conf->socket);
114 rte_bbdev_log(ERR, "Failed to allocate queue memory");
118 q->processed_pkts = rte_ring_create(ring_name, queue_conf->queue_size,
119 queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
120 if (q->processed_pkts == NULL) {
121 rte_bbdev_log(ERR, "Failed to create ring");
125 dev->data->queues[q_id].queue_private = q;
126 rte_bbdev_log_debug("setup device queue %s", ring_name);
134 static const struct rte_bbdev_ops pmd_ops = {
135 .info_get = info_get,
136 .queue_setup = q_setup,
137 .queue_release = q_release
140 /* Enqueue decode burst */
142 enqueue_dec_ops(struct rte_bbdev_queue_data *q_data,
143 struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
145 struct bbdev_queue *q = q_data->queue_private;
146 uint16_t nb_enqueued = rte_ring_enqueue_burst(q->processed_pkts,
147 (void **)ops, nb_ops, NULL);
149 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
150 q_data->queue_stats.enqueued_count += nb_enqueued;
155 /* Enqueue encode burst */
157 enqueue_enc_ops(struct rte_bbdev_queue_data *q_data,
158 struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
160 struct bbdev_queue *q = q_data->queue_private;
161 uint16_t nb_enqueued = rte_ring_enqueue_burst(q->processed_pkts,
162 (void **)ops, nb_ops, NULL);
164 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
165 q_data->queue_stats.enqueued_count += nb_enqueued;
170 /* Dequeue decode burst */
172 dequeue_dec_ops(struct rte_bbdev_queue_data *q_data,
173 struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
175 struct bbdev_queue *q = q_data->queue_private;
176 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
177 (void **)ops, nb_ops, NULL);
178 q_data->queue_stats.dequeued_count += nb_dequeued;
183 /* Dequeue encode burst */
185 dequeue_enc_ops(struct rte_bbdev_queue_data *q_data,
186 struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
188 struct bbdev_queue *q = q_data->queue_private;
189 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts,
190 (void **)ops, nb_ops, NULL);
191 q_data->queue_stats.dequeued_count += nb_dequeued;
196 /* Parse 16bit integer from string argument */
198 parse_u16_arg(const char *key, const char *value, void *extra_args)
200 uint16_t *u16 = extra_args;
201 unsigned int long result;
203 if ((value == NULL) || (extra_args == NULL))
206 result = strtoul(value, NULL, 0);
207 if ((result >= (1 << 16)) || (errno != 0)) {
208 rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key);
211 *u16 = (uint16_t)result;
215 /* Parse parameters used to create device */
217 parse_bbdev_null_params(struct bbdev_null_params *params,
218 const char *input_args)
220 struct rte_kvargs *kvlist = NULL;
226 kvlist = rte_kvargs_parse(input_args, bbdev_null_valid_params);
230 ret = rte_kvargs_process(kvlist, bbdev_null_valid_params[0],
231 &parse_u16_arg, ¶ms->queues_num);
235 ret = rte_kvargs_process(kvlist, bbdev_null_valid_params[1],
236 &parse_u16_arg, ¶ms->socket_id);
240 if (params->socket_id >= RTE_MAX_NUMA_NODES) {
241 rte_bbdev_log(ERR, "Invalid socket, must be < %u",
249 rte_kvargs_free(kvlist);
255 null_bbdev_create(struct rte_vdev_device *vdev,
256 struct bbdev_null_params *init_params)
258 struct rte_bbdev *bbdev;
259 const char *name = rte_vdev_device_name(vdev);
261 bbdev = rte_bbdev_allocate(name);
265 bbdev->data->dev_private = rte_zmalloc_socket(name,
266 sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE,
267 init_params->socket_id);
268 if (bbdev->data->dev_private == NULL) {
269 rte_bbdev_release(bbdev);
273 bbdev->dev_ops = &pmd_ops;
274 bbdev->device = &vdev->device;
275 bbdev->data->socket_id = init_params->socket_id;
276 bbdev->intr_handle = NULL;
278 /* register rx/tx burst functions for data path */
279 bbdev->dequeue_enc_ops = dequeue_enc_ops;
280 bbdev->dequeue_dec_ops = dequeue_dec_ops;
281 bbdev->enqueue_enc_ops = enqueue_enc_ops;
282 bbdev->enqueue_dec_ops = enqueue_dec_ops;
283 ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues =
284 init_params->queues_num;
289 /* Initialise device */
291 null_bbdev_probe(struct rte_vdev_device *vdev)
293 struct bbdev_null_params init_params = {
295 RTE_BBDEV_DEFAULT_MAX_NB_QUEUES
298 const char *input_args;
303 name = rte_vdev_device_name(vdev);
307 input_args = rte_vdev_device_args(vdev);
308 parse_bbdev_null_params(&init_params, input_args);
310 rte_bbdev_log_debug("Init %s on NUMA node %d with max queues: %d",
311 name, init_params.socket_id, init_params.queues_num);
313 return null_bbdev_create(vdev, &init_params);
316 /* Uninitialise device */
318 null_bbdev_remove(struct rte_vdev_device *vdev)
320 struct rte_bbdev *bbdev;
326 name = rte_vdev_device_name(vdev);
330 bbdev = rte_bbdev_get_named_dev(name);
334 rte_free(bbdev->data->dev_private);
336 return rte_bbdev_release(bbdev);
339 static struct rte_vdev_driver bbdev_null_pmd_drv = {
340 .probe = null_bbdev_probe,
341 .remove = null_bbdev_remove
344 RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_null_pmd_drv);
345 RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
346 BBDEV_NULL_MAX_NB_QUEUES_ARG"=<int> "
347 BBDEV_NULL_SOCKET_ID_ARG"=<int>");
348 RTE_PMD_REGISTER_ALIAS(DRIVER_NAME, bbdev_null);