event/octeontx: add start function
authorJerin Jacob <jerin.jacob@caviumnetworks.com>
Fri, 3 Mar 2017 17:28:02 +0000 (22:58 +0530)
committerJerin Jacob <jerin.jacob@caviumnetworks.com>
Tue, 4 Apr 2017 17:19:53 +0000 (19:19 +0200)
Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Acked-by: Gage Eads <gage.eads@intel.com>
drivers/event/octeontx/ssovf_evdev.c
drivers/event/octeontx/ssovf_evdev.h
drivers/event/octeontx/ssovf_worker.c

index c71427e..102e224 100644 (file)
@@ -148,6 +148,23 @@ ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks)
        return 0;
 }
 
+static void
+ssovf_fastpath_fns_set(struct rte_eventdev *dev)
+{
+       struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+       dev->schedule      = NULL;
+       dev->enqueue       = ssows_enq;
+       dev->enqueue_burst = ssows_enq_burst;
+       dev->dequeue       = ssows_deq;
+       dev->dequeue_burst = ssows_deq_burst;
+
+       if (edev->is_timeout_deq) {
+               dev->dequeue       = ssows_deq_timeout;
+               dev->dequeue_burst = ssows_deq_timeout_burst;
+       }
+}
+
 static void
 ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
 {
@@ -382,6 +399,33 @@ ssovf_dump(struct rte_eventdev *dev, FILE *f)
                ssows_dump(dev->data->ports[port], f);
 }
 
+static int
+ssovf_start(struct rte_eventdev *dev)
+{
+       struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+       struct ssows *ws;
+       uint8_t *base;
+       uint8_t i;
+
+       ssovf_func_trace();
+       for (i = 0; i < edev->nb_event_ports; i++) {
+               ws = dev->data->ports[i];
+               ssows_reset(ws);
+               ws->swtag_req = 0;
+       }
+
+       for (i = 0; i < edev->nb_event_queues; i++) {
+               /* Consume all the events through HWS0 */
+               ssows_flush_events(dev->data->ports[0], i);
+
+               base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
+               base += SSO_VHGRP_QCTL;
+               ssovf_write64(1, base); /* Enable SSO group */
+       }
+
+       ssovf_fastpath_fns_set(dev);
+       return 0;
+}
 /* Initialize and register event driver with DPDK Application */
 static const struct rte_eventdev_ops ssovf_ops = {
        .dev_infos_get    = ssovf_info_get,
@@ -396,6 +440,7 @@ static const struct rte_eventdev_ops ssovf_ops = {
        .port_unlink      = ssovf_port_unlink,
        .timeout_ticks    = ssovf_timeout_ticks,
        .dump             = ssovf_dump,
+       .dev_start        = ssovf_start,
 };
 
 static int
@@ -425,8 +470,10 @@ ssovf_vdev_probe(const char *name, const char *params)
        eventdev->dev_ops = &ssovf_ops;
 
        /* For secondary processes, the primary has done all the work */
-       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+               ssovf_fastpath_fns_set(eventdev);
                return 0;
+       }
 
        ret = octeontx_ssovf_info(&oinfo);
        if (ret) {
index 2cd9147..6e0a352 100644 (file)
@@ -197,5 +197,7 @@ uint16_t ssows_deq_timeout(void *port, struct rte_event *ev,
                uint64_t timeout_ticks);
 uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
                uint16_t nb_events, uint64_t timeout_ticks);
+void ssows_flush_events(struct ssows *ws, uint8_t queue_id);
+void ssows_reset(struct ssows *ws);
 
 #endif /* __SSOVF_EVDEV_H__ */
index 52c4d41..6a99df0 100644 (file)
@@ -199,3 +199,54 @@ ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
        RTE_SET_USED(nb_events);
        return ssows_enq(port, ev);
 }
+
+void
+ssows_flush_events(struct ssows *ws, uint8_t queue_id)
+{
+       uint32_t reg_off;
+       uint64_t aq_cnt = 1;
+       uint64_t cq_ds_cnt = 1;
+       uint64_t enable, get_work0, get_work1;
+       uint8_t *base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
+
+       RTE_SET_USED(get_work0);
+       RTE_SET_USED(get_work1);
+
+       enable = ssovf_read64(base + SSO_VHGRP_QCTL);
+       if (!enable)
+               return;
+
+       reg_off = SSOW_VHWS_OP_GET_WORK0;
+       reg_off |= 1 << 17; /* Grouped */
+       reg_off |= 1 << 16; /* WAIT */
+       reg_off |= queue_id << 4; /* INDEX_GGRP_MASK(group number) */
+       while (aq_cnt || cq_ds_cnt) {
+               aq_cnt = ssovf_read64(base + SSO_VHGRP_AQ_CNT);
+               cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
+               /* Extract cq and ds count */
+               cq_ds_cnt &= 0x1FFF1FFF0000;
+               ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
+       }
+}
+
+void
+ssows_reset(struct ssows *ws)
+{
+       uint64_t tag;
+       uint64_t pend_tag;
+       uint8_t pend_tt;
+       uint8_t tt;
+
+       tag = ssovf_read64(ws->base + SSOW_VHWS_TAG);
+       pend_tag = ssovf_read64(ws->base + SSOW_VHWS_PENDTAG);
+
+       if (pend_tag & (1ULL << 63)) { /* Tagswitch pending */
+               pend_tt = (pend_tag >> 32) & 0x3;
+               if (pend_tt == SSO_SYNC_ORDERED || pend_tt == SSO_SYNC_ATOMIC)
+                       ssows_desched(ws);
+       } else {
+               tt = (tag >> 32) & 0x3;
+               if (tt == SSO_SYNC_ORDERED || tt == SSO_SYNC_ATOMIC)
+                       ssows_swtag_untag(ws);
+       }
+}