4 * Copyright (C) Cavium, Inc. 2017.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_common.h>
36 #include <rte_debug.h>
39 #include <rte_ethdev.h>
40 #include <rte_event_eth_rx_adapter.h>
41 #include <rte_lcore.h>
43 #include <rte_malloc.h>
44 #include <rte_memory.h>
45 #include <rte_memzone.h>
48 #include "ssovf_evdev.h"
50 /* SSOPF Mailbox messages */
52 struct ssovf_mbox_dev_info {
53 uint64_t min_deq_timeout_ns;
54 uint64_t max_deq_timeout_ns;
55 uint32_t max_num_events;
59 ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info)
61 struct octeontx_mbox_hdr hdr = {0};
62 uint16_t len = sizeof(struct ssovf_mbox_dev_info);
64 hdr.coproc = SSO_COPROC;
65 hdr.msg = SSO_GET_DEV_INFO;
69 return octeontx_ssovf_mbox_send(&hdr, NULL, 0, info, len);
72 struct ssovf_mbox_getwork_wait {
77 ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns)
79 struct octeontx_mbox_hdr hdr = {0};
80 struct ssovf_mbox_getwork_wait tmo_set;
81 uint16_t len = sizeof(struct ssovf_mbox_getwork_wait);
84 hdr.coproc = SSO_COPROC;
85 hdr.msg = SSO_SET_GETWORK_WAIT;
88 tmo_set.wait_ns = timeout_ns;
89 ret = octeontx_ssovf_mbox_send(&hdr, &tmo_set, len, NULL, 0);
91 ssovf_log_err("Failed to set getwork timeout(%d)", ret);
96 struct ssovf_mbox_grp_pri {
97 uint8_t wgt_left; /* Read only */
104 ssovf_mbox_priority_set(uint8_t queue, uint8_t prio)
106 struct octeontx_mbox_hdr hdr = {0};
107 struct ssovf_mbox_grp_pri grp;
108 uint16_t len = sizeof(struct ssovf_mbox_grp_pri);
111 hdr.coproc = SSO_COPROC;
112 hdr.msg = SSO_GRP_SET_PRIORITY;
117 grp.priority = prio / 32; /* Normalize to 0 to 7 */
119 ret = octeontx_ssovf_mbox_send(&hdr, &grp, len, NULL, 0);
121 ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio);
126 struct ssovf_mbox_convert_ns_getworks_iter {
128 uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */
132 ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks)
134 struct octeontx_mbox_hdr hdr = {0};
135 struct ssovf_mbox_convert_ns_getworks_iter ns2iter;
136 uint16_t len = sizeof(ns2iter);
139 hdr.coproc = SSO_COPROC;
140 hdr.msg = SSO_CONVERT_NS_GETWORK_ITER;
143 memset(&ns2iter, 0, len);
144 ns2iter.wait_ns = ns;
145 ret = octeontx_ssovf_mbox_send(&hdr, &ns2iter, len, &ns2iter, len);
146 if (ret < 0 || (ret != len)) {
147 ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns);
151 *tmo_ticks = ns2iter.getwork_iter;
156 ssovf_fastpath_fns_set(struct rte_eventdev *dev)
158 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
160 dev->enqueue = ssows_enq;
161 dev->enqueue_burst = ssows_enq_burst;
162 dev->enqueue_new_burst = ssows_enq_new_burst;
163 dev->enqueue_forward_burst = ssows_enq_fwd_burst;
164 dev->dequeue = ssows_deq;
165 dev->dequeue_burst = ssows_deq_burst;
167 if (edev->is_timeout_deq) {
168 dev->dequeue = ssows_deq_timeout;
169 dev->dequeue_burst = ssows_deq_timeout_burst;
174 ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
176 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
178 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD);
179 dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns;
180 dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns;
181 dev_info->max_event_queues = edev->max_event_queues;
182 dev_info->max_event_queue_flows = (1ULL << 20);
183 dev_info->max_event_queue_priority_levels = 8;
184 dev_info->max_event_priority_levels = 1;
185 dev_info->max_event_ports = edev->max_event_ports;
186 dev_info->max_event_port_dequeue_depth = 1;
187 dev_info->max_event_port_enqueue_depth = 1;
188 dev_info->max_num_events = edev->max_num_events;
189 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
190 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
191 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES;
195 ssovf_configure(const struct rte_eventdev *dev)
197 struct rte_event_dev_config *conf = &dev->data->dev_conf;
198 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
202 deq_tmo_ns = conf->dequeue_timeout_ns;
204 deq_tmo_ns = edev->min_deq_timeout_ns;
206 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
207 edev->is_timeout_deq = 1;
208 deq_tmo_ns = edev->min_deq_timeout_ns;
210 edev->nb_event_queues = conf->nb_event_queues;
211 edev->nb_event_ports = conf->nb_event_ports;
213 return ssovf_mbox_getwork_tmo_set(deq_tmo_ns);
217 ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
218 struct rte_event_queue_conf *queue_conf)
221 RTE_SET_USED(queue_id);
223 queue_conf->nb_atomic_flows = (1ULL << 20);
224 queue_conf->nb_atomic_order_sequences = (1ULL << 20);
225 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
226 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
230 ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
233 RTE_SET_USED(queue_id);
237 ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
238 const struct rte_event_queue_conf *queue_conf)
241 ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority);
243 return ssovf_mbox_priority_set(queue_id, queue_conf->priority);
247 ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
248 struct rte_event_port_conf *port_conf)
250 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
252 RTE_SET_USED(port_id);
253 port_conf->new_event_threshold = edev->max_num_events;
254 port_conf->dequeue_depth = 1;
255 port_conf->enqueue_depth = 1;
259 ssovf_port_release(void *port)
265 ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
266 const struct rte_event_port_conf *port_conf)
271 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
273 ssovf_func_trace("port=%d", port_id);
274 RTE_SET_USED(port_conf);
276 /* Free memory prior to re-allocation if needed */
277 if (dev->data->ports[port_id] != NULL) {
278 ssovf_port_release(dev->data->ports[port_id]);
279 dev->data->ports[port_id] = NULL;
282 /* Allocate event port memory */
283 ws = rte_zmalloc_socket("eventdev ssows",
284 sizeof(struct ssows), RTE_CACHE_LINE_SIZE,
285 dev->data->socket_id);
287 ssovf_log_err("Failed to alloc memory for port=%d", port_id);
291 ws->base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
292 if (ws->base == NULL) {
294 ssovf_log_err("Failed to get hws base addr port=%d", port_id);
298 reg_off = SSOW_VHWS_OP_GET_WORK0;
299 reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */
300 reg_off |= 1 << 16; /* Wait */
301 ws->getwork = ws->base + reg_off;
304 for (q = 0; q < edev->nb_event_queues; q++) {
305 ws->grps[q] = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
306 if (ws->grps[q] == NULL) {
308 ssovf_log_err("Failed to get grp%d base addr", q);
313 dev->data->ports[port_id] = ws;
314 ssovf_log_dbg("port=%d ws=%p", port_id, ws);
319 ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
320 const uint8_t priorities[], uint16_t nb_links)
324 struct ssows *ws = port;
326 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links);
328 RTE_SET_USED(priorities);
330 for (link = 0; link < nb_links; link++) {
332 val |= (1ULL << 24); /* Set membership */
333 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
335 return (int)nb_links;
339 ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
344 struct ssows *ws = port;
346 ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks);
349 for (unlink = 0; unlink < nb_unlinks; unlink++) {
350 val = queues[unlink];
351 val &= ~(1ULL << 24); /* Clear membership */
352 ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
354 return (int)nb_unlinks;
358 ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks)
362 return ssovf_mbox_timeout_ticks(ns, tmo_ticks);
366 ssows_dump(struct ssows *ws, FILE *f)
368 uint8_t *base = ws->base;
371 fprintf(f, "\t---------------port%d---------------\n", ws->port);
372 val = ssovf_read64(base + SSOW_VHWS_TAG);
373 fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n",
374 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
375 (int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1,
376 (int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff,
377 (int)(val >> 63) & 0x1);
379 val = ssovf_read64(base + SSOW_VHWS_WQP);
380 fprintf(f, "\twqp=0x%"PRIx64"\n", val);
382 val = ssovf_read64(base + SSOW_VHWS_LINKS);
383 fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n",
384 (int)(val & 0x3ff), (int)(val >> 10) & 0x1,
385 (int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1,
386 (int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff);
388 val = ssovf_read64(base + SSOW_VHWS_PENDTAG);
389 fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n",
390 (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
391 (int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1,
392 (int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1,
393 (int)(val >> 63) & 0x1);
395 val = ssovf_read64(base + SSOW_VHWS_PENDWQP);
396 fprintf(f, "\tpwqp=0x%"PRIx64"\n", val);
400 ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
401 const struct rte_eth_dev *eth_dev, uint32_t *caps)
406 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
408 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
410 *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT;
416 ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
417 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id,
418 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
421 const struct octeontx_nic *nic = eth_dev->data->dev_private;
422 pki_mod_qos_t pki_qos;
425 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
429 if (rx_queue_id >= 0)
432 if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
435 memset(&pki_qos, 0, sizeof(pki_mod_qos_t));
437 pki_qos.port_type = 0;
439 pki_qos.mmask.f_tag_type = 1;
440 pki_qos.mmask.f_port_add = 1;
441 pki_qos.mmask.f_grp_ok = 1;
442 pki_qos.mmask.f_grp_bad = 1;
443 pki_qos.mmask.f_grptag_ok = 1;
444 pki_qos.mmask.f_grptag_bad = 1;
446 pki_qos.tag_type = queue_conf->ev.sched_type;
447 pki_qos.qos_entry.port_add = 0;
448 pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id;
449 pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id;
450 pki_qos.qos_entry.grptag_bad = 0;
451 pki_qos.qos_entry.grptag_ok = 0;
453 ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos);
455 ssovf_log_err("failed to modify QOS, port=%d, q=%d",
456 nic->port_id, queue_conf->ev.queue_id);
462 ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
463 const struct rte_eth_dev *eth_dev, int32_t rx_queue_id)
466 const struct octeontx_nic *nic = eth_dev->data->dev_private;
467 pki_del_qos_t pki_qos;
469 RTE_SET_USED(rx_queue_id);
471 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
475 pki_qos.port_type = 0;
477 memset(&pki_qos, 0, sizeof(pki_del_qos_t));
478 ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos);
480 ssovf_log_err("Failed to delete QOS port=%d, q=%d",
481 nic->port_id, queue_conf->ev.queue_id);
486 ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev,
487 const struct rte_eth_dev *eth_dev)
490 const struct octeontx_nic *nic = eth_dev->data->dev_private;
493 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
496 octeontx_pki_port_start(nic->port_id);
502 ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
503 const struct rte_eth_dev *eth_dev)
506 const struct octeontx_nic *nic = eth_dev->data->dev_private;
509 ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
512 octeontx_pki_port_stop(nic->port_id);
517 ssovf_dump(struct rte_eventdev *dev, FILE *f)
519 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
522 /* Dump SSOWVF debug registers */
523 for (port = 0; port < edev->nb_event_ports; port++)
524 ssows_dump(dev->data->ports[port], f);
528 ssovf_start(struct rte_eventdev *dev)
530 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
536 for (i = 0; i < edev->nb_event_ports; i++) {
537 ws = dev->data->ports[i];
542 for (i = 0; i < edev->nb_event_queues; i++) {
543 /* Consume all the events through HWS0 */
544 ssows_flush_events(dev->data->ports[0], i);
546 base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
547 base += SSO_VHGRP_QCTL;
548 ssovf_write64(1, base); /* Enable SSO group */
551 ssovf_fastpath_fns_set(dev);
556 ssovf_stop(struct rte_eventdev *dev)
558 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
564 for (i = 0; i < edev->nb_event_ports; i++) {
565 ws = dev->data->ports[i];
570 for (i = 0; i < edev->nb_event_queues; i++) {
571 /* Consume all the events through HWS0 */
572 ssows_flush_events(dev->data->ports[0], i);
574 base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
575 base += SSO_VHGRP_QCTL;
576 ssovf_write64(0, base); /* Disable SSO group */
581 ssovf_close(struct rte_eventdev *dev)
583 struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
584 uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
587 for (i = 0; i < edev->nb_event_queues; i++)
590 for (i = 0; i < edev->nb_event_ports; i++)
591 ssovf_port_unlink(dev, dev->data->ports[i], all_queues,
592 edev->nb_event_queues);
596 /* Initialize and register event driver with DPDK Application */
597 static const struct rte_eventdev_ops ssovf_ops = {
598 .dev_infos_get = ssovf_info_get,
599 .dev_configure = ssovf_configure,
600 .queue_def_conf = ssovf_queue_def_conf,
601 .queue_setup = ssovf_queue_setup,
602 .queue_release = ssovf_queue_release,
603 .port_def_conf = ssovf_port_def_conf,
604 .port_setup = ssovf_port_setup,
605 .port_release = ssovf_port_release,
606 .port_link = ssovf_port_link,
607 .port_unlink = ssovf_port_unlink,
608 .timeout_ticks = ssovf_timeout_ticks,
610 .eth_rx_adapter_caps_get = ssovf_eth_rx_adapter_caps_get,
611 .eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add,
612 .eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del,
613 .eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
614 .eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
617 .dev_start = ssovf_start,
618 .dev_stop = ssovf_stop,
619 .dev_close = ssovf_close
623 ssovf_vdev_probe(struct rte_vdev_device *vdev)
625 struct octeontx_ssovf_info oinfo;
626 struct ssovf_mbox_dev_info info;
627 struct ssovf_evdev *edev;
628 struct rte_eventdev *eventdev;
629 static int ssovf_init_once;
633 name = rte_vdev_device_name(vdev);
634 /* More than one instance is not supported */
635 if (ssovf_init_once) {
636 ssovf_log_err("Request to create >1 %s instance", name);
640 eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev),
642 if (eventdev == NULL) {
643 ssovf_log_err("Failed to create eventdev vdev %s", name);
646 eventdev->dev_ops = &ssovf_ops;
648 /* For secondary processes, the primary has done all the work */
649 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
650 ssovf_fastpath_fns_set(eventdev);
654 ret = octeontx_ssovf_info(&oinfo);
656 ssovf_log_err("Failed to probe and validate ssovfs %d", ret);
660 edev = ssovf_pmd_priv(eventdev);
661 edev->max_event_ports = oinfo.total_ssowvfs;
662 edev->max_event_queues = oinfo.total_ssovfs;
663 edev->is_timeout_deq = 0;
665 ret = ssovf_mbox_dev_info(&info);
666 if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) {
667 ssovf_log_err("Failed to get mbox devinfo %d", ret);
671 edev->min_deq_timeout_ns = info.min_deq_timeout_ns;
672 edev->max_deq_timeout_ns = info.max_deq_timeout_ns;
673 edev->max_num_events = info.max_num_events;
674 ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d",
675 info.min_deq_timeout_ns, info.max_deq_timeout_ns,
676 info.max_num_events);
678 if (!edev->max_event_ports || !edev->max_event_queues) {
679 ssovf_log_err("Not enough eventdev resource queues=%d ports=%d",
680 edev->max_event_queues, edev->max_event_ports);
685 ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d",
686 name, oinfo.domain, edev->max_event_queues,
687 edev->max_event_ports);
693 rte_event_pmd_vdev_uninit(name);
698 ssovf_vdev_remove(struct rte_vdev_device *vdev)
702 name = rte_vdev_device_name(vdev);
703 ssovf_log_info("Closing %s", name);
704 return rte_event_pmd_vdev_uninit(name);
707 static struct rte_vdev_driver vdev_ssovf_pmd = {
708 .probe = ssovf_vdev_probe,
709 .remove = ssovf_vdev_remove
712 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd);