1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
7 #include <rte_common.h>
11 #include <rte_ethdev_driver.h>
12 #include <rte_event_eth_rx_adapter.h>
13 #include <rte_kvargs.h>
14 #include <rte_lcore.h>
16 #include <rte_malloc.h>
17 #include <rte_memory.h>
18 #include <rte_bus_vdev.h>
20 #include "ssovf_evdev.h"
21 #include "timvf_evdev.h"
23 static uint8_t timvf_enable_stats;
25 RTE_LOG_REGISTER(otx_logtype_ssovf, pmd.event.octeontx, NOTICE);
27 /* SSOPF Mailbox messages */
29 struct ssovf_mbox_dev_info {
30 uint64_t min_deq_timeout_ns;
31 uint64_t max_deq_timeout_ns;
32 uint32_t max_num_events;
36 ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info)
38 struct octeontx_mbox_hdr hdr = {0};
39 uint16_t len = sizeof(struct ssovf_mbox_dev_info);
41 hdr.coproc = SSO_COPROC;
42 hdr.msg = SSO_GET_DEV_INFO;
46 return octeontx_mbox_send(&hdr, NULL, 0, info, len);
49 struct ssovf_mbox_getwork_wait {
54 ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns)
56 struct octeontx_mbox_hdr hdr = {0};
57 struct ssovf_mbox_getwork_wait tmo_set;
58 uint16_t len = sizeof(struct ssovf_mbox_getwork_wait);
61 hdr.coproc = SSO_COPROC;
62 hdr.msg = SSO_SET_GETWORK_WAIT;
65 tmo_set.wait_ns = timeout_ns;
66 ret = octeontx_mbox_send(&hdr, &tmo_set, len, NULL, 0);
68 ssovf_log_err("Failed to set getwork timeout(%d)", ret);
73 struct ssovf_mbox_grp_pri {
75 uint8_t wgt_left; /* Read only */
82 ssovf_mbox_priority_set(uint8_t queue, uint8_t prio)
84 struct octeontx_mbox_hdr hdr = {0};
85 struct ssovf_mbox_grp_pri grp;
86 uint16_t len = sizeof(struct ssovf_mbox_grp_pri);
89 hdr.coproc = SSO_COPROC;
90 hdr.msg = SSO_GRP_SET_PRIORITY;
96 grp.priority = prio / 32; /* Normalize to 0 to 7 */
98 ret = octeontx_mbox_send(&hdr, &grp, len, NULL, 0);
100 ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio);
105 struct ssovf_mbox_convert_ns_getworks_iter {
107 uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */
111 ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks)
113 struct octeontx_mbox_hdr hdr = {0};
114 struct ssovf_mbox_convert_ns_getworks_iter ns2iter;
115 uint16_t len = sizeof(ns2iter);
118 hdr.coproc = SSO_COPROC;
119 hdr.msg = SSO_CONVERT_NS_GETWORK_ITER;
122 memset(&ns2iter, 0, len);
123 ns2iter.wait_ns = ns;
124 ret = octeontx_mbox_send(&hdr, &ns2iter, len, &ns2iter, len);
125 if (ret < 0 || (ret != len)) {
126 ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns);
130 *tmo_ticks = ns2iter.getwork_iter;
135 ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
137 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
139 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD);
140 dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns;
141 dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns;
142 dev_info->max_event_queues = edev->max_event_queues;
143 dev_info->max_event_queue_flows = (1ULL << 20);
144 dev_info->max_event_queue_priority_levels = 8;
145 dev_info->max_event_priority_levels = 1;
146 dev_info->max_event_ports = edev->max_event_ports;
147 dev_info->max_event_port_dequeue_depth = 1;
148 dev_info->max_event_port_enqueue_depth = 1;
149 dev_info->max_num_events = edev->max_num_events;
150 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
151 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
152 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES|
153 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
154 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
155 RTE_EVENT_DEV_CAP_NONSEQ_MODE;
160 ssovf_configure(const struct rte_eventdev *dev)
162 struct rte_event_dev_config *conf = &dev->data->dev_conf;
163 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
167 deq_tmo_ns = conf->dequeue_timeout_ns;
169 deq_tmo_ns = edev->min_deq_timeout_ns;
171 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
172 edev->is_timeout_deq = 1;
173 deq_tmo_ns = edev->min_deq_timeout_ns;
175 edev->nb_event_queues = conf->nb_event_queues;
176 edev->nb_event_ports = conf->nb_event_ports;
178 return ssovf_mbox_getwork_tmo_set(deq_tmo_ns);
182 ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
183 struct rte_event_queue_conf *queue_conf)
186 RTE_SET_USED(queue_id);
188 queue_conf->nb_atomic_flows = (1ULL << 20);
189 queue_conf->nb_atomic_order_sequences = (1ULL << 20);
190 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
191 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
195 ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
198 RTE_SET_USED(queue_id);
202 ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
203 const struct rte_event_queue_conf *queue_conf)
206 ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority);
208 return ssovf_mbox_priority_set(queue_id, queue_conf->priority);
212 ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
213 struct rte_event_port_conf *port_conf)
215 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
217 RTE_SET_USED(port_id);
218 port_conf->new_event_threshold = edev->max_num_events;
219 port_conf->dequeue_depth = 1;
220 port_conf->enqueue_depth = 1;
221 port_conf->disable_implicit_release = 0;
225 ssovf_port_release(void *port)
231 ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
232 const struct rte_event_port_conf *port_conf)
237 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
239 ssovf_func_trace("port=%d", port_id);
240 RTE_SET_USED(port_conf);
242 /* Free memory prior to re-allocation if needed */
243 if (dev->data->ports[port_id] != NULL) {
244 ssovf_port_release(dev->data->ports[port_id]);
245 dev->data->ports[port_id] = NULL;
248 /* Allocate event port memory */
249 ws = rte_zmalloc_socket("eventdev ssows",
250 sizeof(struct ssows), RTE_CACHE_LINE_SIZE,
251 dev->data->socket_id);
253 ssovf_log_err("Failed to alloc memory for port=%d", port_id);
257 ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
258 if (ws->base == NULL) {
260 ssovf_log_err("Failed to get hws base addr port=%d", port_id);
264 reg_off = SSOW_VHWS_OP_GET_WORK0;
265 reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */
266 reg_off |= 1 << 16; /* Wait */
267 ws->getwork = ws->base + reg_off;
269 ws->lookup_mem = octeontx_fastpath_lookup_mem_get();
271 for (q = 0; q < edev->nb_event_queues; q++) {
272 ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
273 if (ws->grps[q] == NULL) {
275 ssovf_log_err("Failed to get grp%d base addr", q);
280 dev->data->ports[port_id] = ws;
281 ssovf_log_dbg("port=%d ws=%p", port_id, ws);
286 ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
287 const uint8_t priorities[], uint16_t nb_links)
291 struct ssows *ws = port;
293 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links);
295 RTE_SET_USED(priorities);
297 for (link = 0; link < nb_links; link++) {
299 val |= (1ULL << 24); /* Set membership */
300 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
302 return (int)nb_links;
306 ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
311 struct ssows *ws = port;
313 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks);
316 for (unlink = 0; unlink < nb_unlinks; unlink++) {
317 val = queues[unlink];
318 val &= ~(1ULL << 24); /* Clear membership */
319 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
321 return (int)nb_unlinks;
325 ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks)
329 return ssovf_mbox_timeout_ticks(ns, tmo_ticks);
333 ssows_dump(struct ssows *ws, FILE *f)
335 uint8_t *base = ws->base;
338 fprintf(f, "\t---------------port%d---------------\n", ws->port);
339 val = ssovf_read64(base + SSOW_VHWS_TAG);
340 fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n",
341 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
342 (int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1,
343 (int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff,
344 (int)(val >> 63) & 0x1);
346 val = ssovf_read64(base + SSOW_VHWS_WQP);
347 fprintf(f, "\twqp=0x%"PRIx64"\n", val);
349 val = ssovf_read64(base + SSOW_VHWS_LINKS);
350 fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n",
351 (int)(val & 0x3ff), (int)(val >> 10) & 0x1,
352 (int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1,
353 (int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff);
355 val = ssovf_read64(base + SSOW_VHWS_PENDTAG);
356 fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n",
357 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
358 (int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1,
359 (int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1,
360 (int)(val >> 63) & 0x1);
362 val = ssovf_read64(base + SSOW_VHWS_PENDWQP);
363 fprintf(f, "\tpwqp=0x%"PRIx64"\n", val);
367 ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
368 const struct rte_eth_dev *eth_dev, uint32_t *caps)
373 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
375 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
377 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT;
383 ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
384 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id,
385 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
388 const struct octeontx_nic *nic = eth_dev->data->dev_private;
389 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
390 pki_mod_qos_t pki_qos;
393 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
397 if (rx_queue_id >= 0)
400 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
403 memset(&pki_qos, 0, sizeof(pki_mod_qos_t));
405 pki_qos.port_type = 0;
407 pki_qos.mmask.f_tag_type = 1;
408 pki_qos.mmask.f_port_add = 1;
409 pki_qos.mmask.f_grp_ok = 1;
410 pki_qos.mmask.f_grp_bad = 1;
411 pki_qos.mmask.f_grptag_ok = 1;
412 pki_qos.mmask.f_grptag_bad = 1;
414 pki_qos.qos_entry.tag_type = queue_conf->ev.sched_type;
415 pki_qos.qos_entry.port_add = 0;
416 pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id;
417 pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id;
418 pki_qos.qos_entry.grptag_bad = 0;
419 pki_qos.qos_entry.grptag_ok = 0;
421 ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos);
423 ssovf_log_err("failed to modify QOS, port=%d, q=%d",
424 nic->port_id, queue_conf->ev.queue_id);
426 edev->rx_offload_flags = nic->rx_offload_flags;
427 edev->tx_offload_flags = nic->tx_offload_flags;
432 ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
433 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id)
436 const struct octeontx_nic *nic = eth_dev->data->dev_private;
437 pki_del_qos_t pki_qos;
440 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
444 pki_qos.port_type = 0;
446 memset(&pki_qos, 0, sizeof(pki_del_qos_t));
447 ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos);
449 ssovf_log_err("Failed to delete QOS port=%d, q=%d",
450 nic->port_id, rx_queue_id);
455 ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev,
456 const struct rte_eth_dev *eth_dev)
459 RTE_SET_USED(eth_dev);
466 ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
467 const struct rte_eth_dev *eth_dev)
470 RTE_SET_USED(eth_dev);
476 ssovf_eth_tx_adapter_caps_get(const struct rte_eventdev *dev,
477 const struct rte_eth_dev *eth_dev, uint32_t *caps)
482 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
486 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
492 ssovf_eth_tx_adapter_create(uint8_t id, const struct rte_eventdev *dev)
500 ssovf_eth_tx_adapter_free(uint8_t id, const struct rte_eventdev *dev)
508 ssovf_eth_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *dev,
509 const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
513 RTE_SET_USED(eth_dev);
514 RTE_SET_USED(tx_queue_id);
519 ssovf_eth_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *dev,
520 const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
524 RTE_SET_USED(eth_dev);
525 RTE_SET_USED(tx_queue_id);
530 ssovf_eth_tx_adapter_start(uint8_t id, const struct rte_eventdev *dev)
538 ssovf_eth_tx_adapter_stop(uint8_t id, const struct rte_eventdev *dev)
547 ssovf_dump(struct rte_eventdev *dev, FILE *f)
549 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
552 /* Dump SSOWVF debug registers */
553 for (port = 0; port < edev->nb_event_ports; port++)
554 ssows_dump(dev->data->ports[port], f);
558 ssovf_start(struct rte_eventdev *dev)
560 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
566 for (i = 0; i < edev->nb_event_ports; i++) {
567 ws = dev->data->ports[i];
572 for (i = 0; i < edev->nb_event_queues; i++) {
573 /* Consume all the events through HWS0 */
574 ssows_flush_events(dev->data->ports[0], i, NULL, NULL);
576 base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
577 base += SSO_VHGRP_QCTL;
578 ssovf_write64(1, base); /* Enable SSO group */
581 ssovf_fastpath_fns_set(dev);
586 ssows_handle_event(void *arg, struct rte_event event)
588 struct rte_eventdev *dev = arg;
590 if (dev->dev_ops->dev_stop_flush != NULL)
591 dev->dev_ops->dev_stop_flush(dev->data->dev_id, event,
592 dev->data->dev_stop_flush_arg);
596 ssovf_stop(struct rte_eventdev *dev)
598 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
604 for (i = 0; i < edev->nb_event_ports; i++) {
605 ws = dev->data->ports[i];
610 for (i = 0; i < edev->nb_event_queues; i++) {
611 /* Consume all the events through HWS0 */
612 ssows_flush_events(dev->data->ports[0], i,
613 ssows_handle_event, dev);
615 base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
616 base += SSO_VHGRP_QCTL;
617 ssovf_write64(0, base); /* Disable SSO group */
622 ssovf_close(struct rte_eventdev *dev)
624 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
625 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
628 for (i = 0; i < edev->nb_event_queues; i++)
631 for (i = 0; i < edev->nb_event_ports; i++)
632 ssovf_port_unlink(dev, dev->data->ports[i], all_queues,
633 edev->nb_event_queues);
638 ssovf_selftest(const char *key __rte_unused, const char *value,
642 *flag = !!atoi(value);
647 ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags,
648 uint32_t *caps, const struct rte_event_timer_adapter_ops **ops)
650 return timvf_timer_adapter_caps_get(dev, flags, caps, ops,
654 /* Initialize and register event driver with DPDK Application */
655 static struct rte_eventdev_ops ssovf_ops = {
656 .dev_infos_get = ssovf_info_get,
657 .dev_configure = ssovf_configure,
658 .queue_def_conf = ssovf_queue_def_conf,
659 .queue_setup = ssovf_queue_setup,
660 .queue_release = ssovf_queue_release,
661 .port_def_conf = ssovf_port_def_conf,
662 .port_setup = ssovf_port_setup,
663 .port_release = ssovf_port_release,
664 .port_link = ssovf_port_link,
665 .port_unlink = ssovf_port_unlink,
666 .timeout_ticks = ssovf_timeout_ticks,
668 .eth_rx_adapter_caps_get = ssovf_eth_rx_adapter_caps_get,
669 .eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add,
670 .eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del,
671 .eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
672 .eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
674 .eth_tx_adapter_caps_get = ssovf_eth_tx_adapter_caps_get,
675 .eth_tx_adapter_create = ssovf_eth_tx_adapter_create,
676 .eth_tx_adapter_free = ssovf_eth_tx_adapter_free,
677 .eth_tx_adapter_queue_add = ssovf_eth_tx_adapter_queue_add,
678 .eth_tx_adapter_queue_del = ssovf_eth_tx_adapter_queue_del,
679 .eth_tx_adapter_start = ssovf_eth_tx_adapter_start,
680 .eth_tx_adapter_stop = ssovf_eth_tx_adapter_stop,
682 .timer_adapter_caps_get = ssovf_timvf_caps_get,
684 .dev_selftest = test_eventdev_octeontx,
687 .dev_start = ssovf_start,
688 .dev_stop = ssovf_stop,
689 .dev_close = ssovf_close
693 ssovf_vdev_probe(struct rte_vdev_device *vdev)
695 struct ssovf_info oinfo;
696 struct ssovf_mbox_dev_info info;
697 struct ssovf_evdev *edev;
698 struct rte_eventdev *eventdev;
699 static int ssovf_init_once;
705 static const char *const args[] = {
707 TIMVF_ENABLE_STATS_ARG,
711 name = rte_vdev_device_name(vdev);
712 /* More than one instance is not supported */
713 if (ssovf_init_once) {
714 ssovf_log_err("Request to create >1 %s instance", name);
718 params = rte_vdev_device_args(vdev);
719 if (params != NULL && params[0] != '\0') {
720 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
724 "Ignoring unsupported params supplied '%s'",
727 int ret = rte_kvargs_process(kvlist,
729 ssovf_selftest, &selftest);
731 ssovf_log_err("%s: Error in selftest", name);
732 rte_kvargs_free(kvlist);
736 ret = rte_kvargs_process(kvlist,
737 TIMVF_ENABLE_STATS_ARG,
738 ssovf_selftest, &timvf_enable_stats);
740 ssovf_log_err("%s: Error in timvf stats", name);
741 rte_kvargs_free(kvlist);
746 rte_kvargs_free(kvlist);
749 eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev),
751 if (eventdev == NULL) {
752 ssovf_log_err("Failed to create eventdev vdev %s", name);
755 eventdev->dev_ops = &ssovf_ops;
757 /* For secondary processes, the primary has done all the work */
758 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
759 ssovf_fastpath_fns_set(eventdev);
763 octeontx_mbox_init();
764 ret = ssovf_info(&oinfo);
766 ssovf_log_err("Failed to probe and validate ssovfs %d", ret);
770 edev = ssovf_pmd_priv(eventdev);
771 edev->max_event_ports = oinfo.total_ssowvfs;
772 edev->max_event_queues = oinfo.total_ssovfs;
773 edev->is_timeout_deq = 0;
775 ret = ssovf_mbox_dev_info(&info);
776 if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) {
777 ssovf_log_err("Failed to get mbox devinfo %d", ret);
781 edev->min_deq_timeout_ns = info.min_deq_timeout_ns;
782 edev->max_deq_timeout_ns = info.max_deq_timeout_ns;
783 edev->max_num_events = info.max_num_events;
784 ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d",
785 info.min_deq_timeout_ns, info.max_deq_timeout_ns,
786 info.max_num_events);
788 if (!edev->max_event_ports || !edev->max_event_queues) {
789 ssovf_log_err("Not enough eventdev resource queues=%d ports=%d",
790 edev->max_event_queues, edev->max_event_ports);
795 ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d",
796 name, oinfo.domain, edev->max_event_queues,
797 edev->max_event_ports);
801 test_eventdev_octeontx();
805 rte_event_pmd_vdev_uninit(name);
810 ssovf_vdev_remove(struct rte_vdev_device *vdev)
814 name = rte_vdev_device_name(vdev);
815 ssovf_log_info("Closing %s", name);
816 return rte_event_pmd_vdev_uninit(name);
819 static struct rte_vdev_driver vdev_ssovf_pmd = {
820 .probe = ssovf_vdev_probe,
821 .remove = ssovf_vdev_remove
824 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd);