eventdev: make vdev init and uninit functions optional
[dpdk.git] / drivers / event / octeontx / ssovf_evdev.h
index b8b89a7..03902e4 100644 (file)
@@ -34,7 +34,7 @@
 #define __SSOVF_EVDEV_H__
 
 #include <rte_config.h>
-#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_vdev.h>
 #include <rte_io.h>
 
 #include "rte_pmd_octeontx_ssovf.h"
 #define SSO_GETDOMAINCFG                  0x1
 #define SSO_IDENTIFY                      0x2
 #define SSO_GET_DEV_INFO                  0x3
+#define SSO_GET_GETWORK_WAIT              0x4
+#define SSO_SET_GETWORK_WAIT              0x5
+#define SSO_CONVERT_NS_GETWORK_ITER       0x6
+#define SSO_GRP_GET_PRIORITY              0x7
+#define SSO_GRP_SET_PRIORITY              0x8
+
+/*
+ * In Cavium OcteonTX SoC, all accesses to the device registers are
+ * implictly strongly ordered. So, The relaxed version of IO operation is
+ * safe to use with out any IO memory barriers.
+ */
+#define ssovf_read64 rte_read64_relaxed
+#define ssovf_write64 rte_write64_relaxed
+
+/* ARM64 specific functions */
+#if defined(RTE_ARCH_ARM64)
+#define ssovf_load_pair(val0, val1, addr) ({           \
+                       asm volatile(                   \
+                       "ldp %x[x0], %x[x1], [%x[p1]]"  \
+                       :[x0]"=r"(val0), [x1]"=r"(val1) \
+                       :[p1]"r"(addr)                  \
+                       ); })
+
+#define ssovf_store_pair(val0, val1, addr) ({          \
+                       asm volatile(                   \
+                       "stp %x[x0], %x[x1], [%x[p1]]"  \
+                       ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
+                       ); })
+#else /* Un optimized functions for building on non arm64 arch */
+
+#define ssovf_load_pair(val0, val1, addr)              \
+do {                                                   \
+       val0 = rte_read64(addr);                        \
+       val1 = rte_read64(((uint8_t *)addr) + 8);       \
+} while (0)
+
+#define ssovf_store_pair(val0, val1, addr)             \
+do {                                                   \
+       rte_write64(val0, addr);                        \
+       rte_write64(val1, (((uint8_t *)addr) + 8));     \
+} while (0)
+#endif
+
 
 struct ssovf_evdev {
        uint8_t max_event_queues;
        uint8_t max_event_ports;
        uint8_t is_timeout_deq;
+       uint8_t nb_event_queues;
+       uint8_t nb_event_ports;
        uint32_t min_deq_timeout_ns;
        uint32_t max_deq_timeout_ns;
        int32_t max_num_events;
 } __rte_cache_aligned;
 
+/* Event port aka HWS */
+struct ssows {
+       uint8_t cur_tt;
+       uint8_t cur_grp;
+       uint8_t swtag_req;
+       uint8_t *base;
+       uint8_t *getwork;
+       uint8_t *grps[SSO_MAX_VHGRP];
+       uint8_t port;
+} __rte_cache_aligned;
+
 static inline struct ssovf_evdev *
 ssovf_pmd_priv(const struct rte_eventdev *eventdev)
 {
        return eventdev->data->dev_private;
 }
 
+uint16_t ssows_enq(void *port, const struct rte_event *ev);
+uint16_t ssows_enq_burst(void *port,
+               const struct rte_event ev[], uint16_t nb_events);
+uint16_t ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks);
+uint16_t ssows_deq_burst(void *port, struct rte_event ev[],
+               uint16_t nb_events, uint64_t timeout_ticks);
+uint16_t ssows_deq_timeout(void *port, struct rte_event *ev,
+               uint64_t timeout_ticks);
+uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
+               uint16_t nb_events, uint64_t timeout_ticks);
+void ssows_flush_events(struct ssows *ws, uint8_t queue_id);
+void ssows_reset(struct ssows *ws);
+
 #endif /* __SSOVF_EVDEV_H__ */