4 * Copyright (C) Cavium, Inc. 2017.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_common.h>
36 #include <rte_debug.h>
39 #include <rte_ethdev.h>
40 #include <rte_event_eth_rx_adapter.h>
41 #include <rte_lcore.h>
43 #include <rte_malloc.h>
44 #include <rte_memory.h>
45 #include <rte_bus_vdev.h>
47 #include "ssovf_evdev.h"
49 /* SSOPF Mailbox messages */
51 struct ssovf_mbox_dev_info {
52 uint64_t min_deq_timeout_ns;
53 uint64_t max_deq_timeout_ns;
54 uint32_t max_num_events;
58 ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info)
60 struct octeontx_mbox_hdr hdr = {0};
61 uint16_t len = sizeof(struct ssovf_mbox_dev_info);
63 hdr.coproc = SSO_COPROC;
64 hdr.msg = SSO_GET_DEV_INFO;
68 return octeontx_ssovf_mbox_send(&hdr, NULL, 0, info, len);
71 struct ssovf_mbox_getwork_wait {
76 ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns)
78 struct octeontx_mbox_hdr hdr = {0};
79 struct ssovf_mbox_getwork_wait tmo_set;
80 uint16_t len = sizeof(struct ssovf_mbox_getwork_wait);
83 hdr.coproc = SSO_COPROC;
84 hdr.msg = SSO_SET_GETWORK_WAIT;
87 tmo_set.wait_ns = timeout_ns;
88 ret = octeontx_ssovf_mbox_send(&hdr, &tmo_set, len, NULL, 0);
90 ssovf_log_err("Failed to set getwork timeout(%d)", ret);
95 struct ssovf_mbox_grp_pri {
96 uint8_t wgt_left; /* Read only */
103 ssovf_mbox_priority_set(uint8_t queue, uint8_t prio)
105 struct octeontx_mbox_hdr hdr = {0};
106 struct ssovf_mbox_grp_pri grp;
107 uint16_t len = sizeof(struct ssovf_mbox_grp_pri);
110 hdr.coproc = SSO_COPROC;
111 hdr.msg = SSO_GRP_SET_PRIORITY;
116 grp.priority = prio / 32; /* Normalize to 0 to 7 */
118 ret = octeontx_ssovf_mbox_send(&hdr, &grp, len, NULL, 0);
120 ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio);
125 struct ssovf_mbox_convert_ns_getworks_iter {
127 uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */
131 ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks)
133 struct octeontx_mbox_hdr hdr = {0};
134 struct ssovf_mbox_convert_ns_getworks_iter ns2iter;
135 uint16_t len = sizeof(ns2iter);
138 hdr.coproc = SSO_COPROC;
139 hdr.msg = SSO_CONVERT_NS_GETWORK_ITER;
142 memset(&ns2iter, 0, len);
143 ns2iter.wait_ns = ns;
144 ret = octeontx_ssovf_mbox_send(&hdr, &ns2iter, len, &ns2iter, len);
145 if (ret < 0 || (ret != len)) {
146 ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns);
150 *tmo_ticks = ns2iter.getwork_iter;
155 ssovf_fastpath_fns_set(struct rte_eventdev *dev)
157 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
159 dev->enqueue = ssows_enq;
160 dev->enqueue_burst = ssows_enq_burst;
161 dev->enqueue_new_burst = ssows_enq_new_burst;
162 dev->enqueue_forward_burst = ssows_enq_fwd_burst;
163 dev->dequeue = ssows_deq;
164 dev->dequeue_burst = ssows_deq_burst;
166 if (edev->is_timeout_deq) {
167 dev->dequeue = ssows_deq_timeout;
168 dev->dequeue_burst = ssows_deq_timeout_burst;
173 ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
175 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
177 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD);
178 dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns;
179 dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns;
180 dev_info->max_event_queues = edev->max_event_queues;
181 dev_info->max_event_queue_flows = (1ULL << 20);
182 dev_info->max_event_queue_priority_levels = 8;
183 dev_info->max_event_priority_levels = 1;
184 dev_info->max_event_ports = edev->max_event_ports;
185 dev_info->max_event_port_dequeue_depth = 1;
186 dev_info->max_event_port_enqueue_depth = 1;
187 dev_info->max_num_events = edev->max_num_events;
188 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
189 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
190 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES;
194 ssovf_configure(const struct rte_eventdev *dev)
196 struct rte_event_dev_config *conf = &dev->data->dev_conf;
197 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
201 deq_tmo_ns = conf->dequeue_timeout_ns;
203 deq_tmo_ns = edev->min_deq_timeout_ns;
205 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
206 edev->is_timeout_deq = 1;
207 deq_tmo_ns = edev->min_deq_timeout_ns;
209 edev->nb_event_queues = conf->nb_event_queues;
210 edev->nb_event_ports = conf->nb_event_ports;
212 return ssovf_mbox_getwork_tmo_set(deq_tmo_ns);
216 ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
217 struct rte_event_queue_conf *queue_conf)
220 RTE_SET_USED(queue_id);
222 queue_conf->nb_atomic_flows = (1ULL << 20);
223 queue_conf->nb_atomic_order_sequences = (1ULL << 20);
224 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
225 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
229 ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
232 RTE_SET_USED(queue_id);
236 ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
237 const struct rte_event_queue_conf *queue_conf)
240 ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority);
242 return ssovf_mbox_priority_set(queue_id, queue_conf->priority);
246 ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
247 struct rte_event_port_conf *port_conf)
249 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
251 RTE_SET_USED(port_id);
252 port_conf->new_event_threshold = edev->max_num_events;
253 port_conf->dequeue_depth = 1;
254 port_conf->enqueue_depth = 1;
258 ssovf_port_release(void *port)
264 ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
265 const struct rte_event_port_conf *port_conf)
270 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
272 ssovf_func_trace("port=%d", port_id);
273 RTE_SET_USED(port_conf);
275 /* Free memory prior to re-allocation if needed */
276 if (dev->data->ports[port_id] != NULL) {
277 ssovf_port_release(dev->data->ports[port_id]);
278 dev->data->ports[port_id] = NULL;
281 /* Allocate event port memory */
282 ws = rte_zmalloc_socket("eventdev ssows",
283 sizeof(struct ssows), RTE_CACHE_LINE_SIZE,
284 dev->data->socket_id);
286 ssovf_log_err("Failed to alloc memory for port=%d", port_id);
290 ws->base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
291 if (ws->base == NULL) {
293 ssovf_log_err("Failed to get hws base addr port=%d", port_id);
297 reg_off = SSOW_VHWS_OP_GET_WORK0;
298 reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */
299 reg_off |= 1 << 16; /* Wait */
300 ws->getwork = ws->base + reg_off;
303 for (q = 0; q < edev->nb_event_queues; q++) {
304 ws->grps[q] = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
305 if (ws->grps[q] == NULL) {
307 ssovf_log_err("Failed to get grp%d base addr", q);
312 dev->data->ports[port_id] = ws;
313 ssovf_log_dbg("port=%d ws=%p", port_id, ws);
318 ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
319 const uint8_t priorities[], uint16_t nb_links)
323 struct ssows *ws = port;
325 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links);
327 RTE_SET_USED(priorities);
329 for (link = 0; link < nb_links; link++) {
331 val |= (1ULL << 24); /* Set membership */
332 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
334 return (int)nb_links;
338 ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
343 struct ssows *ws = port;
345 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks);
348 for (unlink = 0; unlink < nb_unlinks; unlink++) {
349 val = queues[unlink];
350 val &= ~(1ULL << 24); /* Clear membership */
351 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
353 return (int)nb_unlinks;
357 ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks)
361 return ssovf_mbox_timeout_ticks(ns, tmo_ticks);
365 ssows_dump(struct ssows *ws, FILE *f)
367 uint8_t *base = ws->base;
370 fprintf(f, "\t---------------port%d---------------\n", ws->port);
371 val = ssovf_read64(base + SSOW_VHWS_TAG);
372 fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n",
373 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
374 (int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1,
375 (int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff,
376 (int)(val >> 63) & 0x1);
378 val = ssovf_read64(base + SSOW_VHWS_WQP);
379 fprintf(f, "\twqp=0x%"PRIx64"\n", val);
381 val = ssovf_read64(base + SSOW_VHWS_LINKS);
382 fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n",
383 (int)(val & 0x3ff), (int)(val >> 10) & 0x1,
384 (int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1,
385 (int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff);
387 val = ssovf_read64(base + SSOW_VHWS_PENDTAG);
388 fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n",
389 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
390 (int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1,
391 (int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1,
392 (int)(val >> 63) & 0x1);
394 val = ssovf_read64(base + SSOW_VHWS_PENDWQP);
395 fprintf(f, "\tpwqp=0x%"PRIx64"\n", val);
399 ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
400 const struct rte_eth_dev *eth_dev, uint32_t *caps)
405 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
407 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
409 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT;
415 ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
416 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id,
417 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
420 const struct octeontx_nic *nic = eth_dev->data->dev_private;
421 pki_mod_qos_t pki_qos;
424 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
428 if (rx_queue_id >= 0)
431 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
434 memset(&pki_qos, 0, sizeof(pki_mod_qos_t));
436 pki_qos.port_type = 0;
438 pki_qos.mmask.f_tag_type = 1;
439 pki_qos.mmask.f_port_add = 1;
440 pki_qos.mmask.f_grp_ok = 1;
441 pki_qos.mmask.f_grp_bad = 1;
442 pki_qos.mmask.f_grptag_ok = 1;
443 pki_qos.mmask.f_grptag_bad = 1;
445 pki_qos.tag_type = queue_conf->ev.sched_type;
446 pki_qos.qos_entry.port_add = 0;
447 pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id;
448 pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id;
449 pki_qos.qos_entry.grptag_bad = 0;
450 pki_qos.qos_entry.grptag_ok = 0;
452 ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos);
454 ssovf_log_err("failed to modify QOS, port=%d, q=%d",
455 nic->port_id, queue_conf->ev.queue_id);
461 ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
462 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id)
465 const struct octeontx_nic *nic = eth_dev->data->dev_private;
466 pki_del_qos_t pki_qos;
468 RTE_SET_USED(rx_queue_id);
470 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
474 pki_qos.port_type = 0;
476 memset(&pki_qos, 0, sizeof(pki_del_qos_t));
477 ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos);
479 ssovf_log_err("Failed to delete QOS port=%d, q=%d",
480 nic->port_id, queue_conf->ev.queue_id);
485 ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev,
486 const struct rte_eth_dev *eth_dev)
489 const struct octeontx_nic *nic = eth_dev->data->dev_private;
492 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
495 octeontx_pki_port_start(nic->port_id);
501 ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
502 const struct rte_eth_dev *eth_dev)
505 const struct octeontx_nic *nic = eth_dev->data->dev_private;
508 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
511 octeontx_pki_port_stop(nic->port_id);
516 ssovf_dump(struct rte_eventdev *dev, FILE *f)
518 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
521 /* Dump SSOWVF debug registers */
522 for (port = 0; port < edev->nb_event_ports; port++)
523 ssows_dump(dev->data->ports[port], f);
527 ssovf_start(struct rte_eventdev *dev)
529 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
535 for (i = 0; i < edev->nb_event_ports; i++) {
536 ws = dev->data->ports[i];
541 for (i = 0; i < edev->nb_event_queues; i++) {
542 /* Consume all the events through HWS0 */
543 ssows_flush_events(dev->data->ports[0], i);
545 base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
546 base += SSO_VHGRP_QCTL;
547 ssovf_write64(1, base); /* Enable SSO group */
550 ssovf_fastpath_fns_set(dev);
555 ssovf_stop(struct rte_eventdev *dev)
557 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
563 for (i = 0; i < edev->nb_event_ports; i++) {
564 ws = dev->data->ports[i];
569 for (i = 0; i < edev->nb_event_queues; i++) {
570 /* Consume all the events through HWS0 */
571 ssows_flush_events(dev->data->ports[0], i);
573 base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
574 base += SSO_VHGRP_QCTL;
575 ssovf_write64(0, base); /* Disable SSO group */
580 ssovf_close(struct rte_eventdev *dev)
582 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
583 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
586 for (i = 0; i < edev->nb_event_queues; i++)
589 for (i = 0; i < edev->nb_event_ports; i++)
590 ssovf_port_unlink(dev, dev->data->ports[i], all_queues,
591 edev->nb_event_queues);
595 /* Initialize and register event driver with DPDK Application */
596 static const struct rte_eventdev_ops ssovf_ops = {
597 .dev_infos_get = ssovf_info_get,
598 .dev_configure = ssovf_configure,
599 .queue_def_conf = ssovf_queue_def_conf,
600 .queue_setup = ssovf_queue_setup,
601 .queue_release = ssovf_queue_release,
602 .port_def_conf = ssovf_port_def_conf,
603 .port_setup = ssovf_port_setup,
604 .port_release = ssovf_port_release,
605 .port_link = ssovf_port_link,
606 .port_unlink = ssovf_port_unlink,
607 .timeout_ticks = ssovf_timeout_ticks,
609 .eth_rx_adapter_caps_get = ssovf_eth_rx_adapter_caps_get,
610 .eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add,
611 .eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del,
612 .eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
613 .eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
616 .dev_start = ssovf_start,
617 .dev_stop = ssovf_stop,
618 .dev_close = ssovf_close
622 ssovf_vdev_probe(struct rte_vdev_device *vdev)
624 struct octeontx_ssovf_info oinfo;
625 struct ssovf_mbox_dev_info info;
626 struct ssovf_evdev *edev;
627 struct rte_eventdev *eventdev;
628 static int ssovf_init_once;
632 name = rte_vdev_device_name(vdev);
633 /* More than one instance is not supported */
634 if (ssovf_init_once) {
635 ssovf_log_err("Request to create >1 %s instance", name);
639 eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev),
641 if (eventdev == NULL) {
642 ssovf_log_err("Failed to create eventdev vdev %s", name);
645 eventdev->dev_ops = &ssovf_ops;
647 /* For secondary processes, the primary has done all the work */
648 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
649 ssovf_fastpath_fns_set(eventdev);
653 ret = octeontx_ssovf_info(&oinfo);
655 ssovf_log_err("Failed to probe and validate ssovfs %d", ret);
659 edev = ssovf_pmd_priv(eventdev);
660 edev->max_event_ports = oinfo.total_ssowvfs;
661 edev->max_event_queues = oinfo.total_ssovfs;
662 edev->is_timeout_deq = 0;
664 ret = ssovf_mbox_dev_info(&info);
665 if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) {
666 ssovf_log_err("Failed to get mbox devinfo %d", ret);
670 edev->min_deq_timeout_ns = info.min_deq_timeout_ns;
671 edev->max_deq_timeout_ns = info.max_deq_timeout_ns;
672 edev->max_num_events = info.max_num_events;
673 ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d",
674 info.min_deq_timeout_ns, info.max_deq_timeout_ns,
675 info.max_num_events);
677 if (!edev->max_event_ports || !edev->max_event_queues) {
678 ssovf_log_err("Not enough eventdev resource queues=%d ports=%d",
679 edev->max_event_queues, edev->max_event_ports);
684 ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d",
685 name, oinfo.domain, edev->max_event_queues,
686 edev->max_event_ports);
692 rte_event_pmd_vdev_uninit(name);
697 ssovf_vdev_remove(struct rte_vdev_device *vdev)
701 name = rte_vdev_device_name(vdev);
702 ssovf_log_info("Closing %s", name);
703 return rte_event_pmd_vdev_uninit(name);
706 static struct rte_vdev_driver vdev_ssovf_pmd = {
707 .probe = ssovf_vdev_probe,
708 .remove = ssovf_vdev_remove
711 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd);