X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Focteontx%2Fssovf_evdev.h;h=1cdc81047d5c461fc7fc4a8fa8a8d1e7f86b4fb5;hb=f0e0e86aa35d895d4e5a0d95f054b701bcdf3c9e;hp=b8b89a77c82c7546b7fc7a64c1c8c087432694cd;hpb=34498de6000f844fb5de9fd605a466b1b465c5c3;p=dpdk.git diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h index b8b89a77c8..1cdc81047d 100644 --- a/drivers/event/octeontx/ssovf_evdev.h +++ b/drivers/event/octeontx/ssovf_evdev.h @@ -1,7 +1,7 @@ /* * BSD LICENSE * - * Copyright (C) Cavium networks Ltd. 2017. + * Copyright (C) Cavium, Inc. 2017. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -13,7 +13,7 @@ * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Cavium networks nor the names of its + * * Neither the name of Cavium, Inc nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -34,7 +34,7 @@ #define __SSOVF_EVDEV_H__ #include -#include +#include #include #include "rte_pmd_octeontx_ssovf.h" @@ -115,20 +115,93 @@ #define SSO_GETDOMAINCFG 0x1 #define SSO_IDENTIFY 0x2 #define SSO_GET_DEV_INFO 0x3 +#define SSO_GET_GETWORK_WAIT 0x4 +#define SSO_SET_GETWORK_WAIT 0x5 +#define SSO_CONVERT_NS_GETWORK_ITER 0x6 +#define SSO_GRP_GET_PRIORITY 0x7 +#define SSO_GRP_SET_PRIORITY 0x8 + +/* + * In Cavium OcteonTX SoC, all accesses to the device registers are + * implictly strongly ordered. So, The relaxed version of IO operation is + * safe to use with out any IO memory barriers. + */ +#define ssovf_read64 rte_read64_relaxed +#define ssovf_write64 rte_write64_relaxed + +/* ARM64 specific functions */ +#if defined(RTE_ARCH_ARM64) +#define ssovf_load_pair(val0, val1, addr) ({ \ + asm volatile( \ + "ldp %x[x0], %x[x1], [%x[p1]]" \ + :[x0]"=r"(val0), [x1]"=r"(val1) \ + :[p1]"r"(addr) \ + ); }) + +#define ssovf_store_pair(val0, val1, addr) ({ \ + asm volatile( \ + "stp %x[x0], %x[x1], [%x[p1]]" \ + ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \ + ); }) +#else /* Un optimized functions for building on non arm64 arch */ + +#define ssovf_load_pair(val0, val1, addr) \ +do { \ + val0 = rte_read64(addr); \ + val1 = rte_read64(((uint8_t *)addr) + 8); \ +} while (0) + +#define ssovf_store_pair(val0, val1, addr) \ +do { \ + rte_write64(val0, addr); \ + rte_write64(val1, (((uint8_t *)addr) + 8)); \ +} while (0) +#endif + struct ssovf_evdev { uint8_t max_event_queues; uint8_t max_event_ports; uint8_t is_timeout_deq; + uint8_t nb_event_queues; + uint8_t nb_event_ports; uint32_t min_deq_timeout_ns; uint32_t max_deq_timeout_ns; int32_t max_num_events; } __rte_cache_aligned; +/* Event port aka HWS */ +struct ssows { + uint8_t cur_tt; + uint8_t cur_grp; + uint8_t swtag_req; + uint8_t *base; + uint8_t *getwork; + uint8_t *grps[SSO_MAX_VHGRP]; + uint8_t port; +} __rte_cache_aligned; + static inline struct ssovf_evdev * ssovf_pmd_priv(const struct rte_eventdev *eventdev) { return eventdev->data->dev_private; } +uint16_t ssows_enq(void *port, const struct rte_event *ev); +uint16_t ssows_enq_burst(void *port, + const struct rte_event ev[], uint16_t nb_events); +uint16_t ssows_enq_new_burst(void *port, + const struct rte_event ev[], uint16_t nb_events); +uint16_t ssows_enq_fwd_burst(void *port, + const struct rte_event ev[], uint16_t nb_events); +uint16_t ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks); +uint16_t ssows_deq_burst(void *port, struct rte_event ev[], + uint16_t nb_events, uint64_t timeout_ticks); +uint16_t ssows_deq_timeout(void *port, struct rte_event *ev, + uint64_t timeout_ticks); +uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[], + uint16_t nb_events, uint64_t timeout_ticks); +void ssows_flush_events(struct ssows *ws, uint8_t queue_id); +void ssows_reset(struct ssows *ws); + #endif /* __SSOVF_EVDEV_H__ */