#define _SW_EVDEV_H_
#include <rte_eventdev.h>
-#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_vdev.h>
#include <rte_atomic.h>
#define SW_DEFAULT_CREDIT_QUANTA 32
#define EVENTDEV_NAME_SW_PMD event_sw
#define SW_PMD_NAME RTE_STR(event_sw)
+#define SW_PMD_NAME_MAX 64
#define SW_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)
+#define SW_NUM_POLL_BUCKETS (MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT)
+
enum {
QE_FLAG_VALID_SHIFT = 0,
QE_FLAG_COMPLETE_SHIFT,
uint32_t cq_num_mapped_cqs;
uint32_t cq_next_tx; /* cq to write next (non-atomic) packet */
uint32_t cq_map[SW_PORTS_MAX];
+ uint64_t to_port[SW_PORTS_MAX];
/* Track flow ids for atomic load balancing */
struct sw_fid_t fids[SW_QID_NUM_FIDS];
int16_t num_ordered_qids;
/** Ring and buffer for pulling events from workers for scheduling */
- struct qe_ring *rx_worker_ring __rte_cache_aligned;
+ struct rte_event_ring *rx_worker_ring __rte_cache_aligned;
/** Ring and buffer for pushing packets to workers after scheduling */
- struct qe_ring *cq_worker_ring;
+ struct rte_event_ring *cq_worker_ring;
/* hole */
uint64_t avg_pkt_ticks; /* tracks average over NUM_SAMPLES burst */
uint64_t total_polls; /* how many polls were counted in stats */
uint64_t zero_polls; /* tracks polls returning nothing */
- uint32_t poll_buckets[MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT];
+ uint32_t poll_buckets[SW_NUM_POLL_BUCKETS];
/* bucket values in 4s for shorter reporting */
/* History list structs, containing info on pkts egressed to worker */
uint32_t port_count;
uint32_t qid_count;
+ uint32_t xstats_count;
+ struct sw_xstats_entry *xstats;
+ uint32_t xstats_count_mode_dev;
+ uint32_t xstats_count_mode_port;
+ uint32_t xstats_count_mode_queue;
/* Contains all ports - load balanced and directed */
struct sw_port ports[SW_PORTS_MAX] __rte_cache_aligned;
/* Cache how many packets are in each cq */
uint16_t cq_ring_space[SW_PORTS_MAX] __rte_cache_aligned;
+ /* Array of pointers to load-balanced QIDs sorted by priority level */
+ struct sw_qid *qids_prioritized[RTE_EVENT_MAX_QUEUES_PER_DEV];
+
+ /* Stats */
+ struct sw_point_stats stats __rte_cache_aligned;
+ uint64_t sched_called;
int32_t sched_quanta;
+ uint64_t sched_no_iq_enqueues;
+ uint64_t sched_no_cq_enqueues;
+ uint64_t sched_cq_qid_called;
+ uint8_t started;
uint32_t credit_update_quanta;
+
+ /* store num stats and offset of the stats for each port */
+ uint16_t xstats_count_per_port[SW_PORTS_MAX];
+ uint16_t xstats_offset_for_port[SW_PORTS_MAX];
+ /* store num stats and offset of the stats for each queue */
+ uint16_t xstats_count_per_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint16_t xstats_offset_for_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
+
+ char service_name[SW_PMD_NAME_MAX];
};
static inline struct sw_evdev *
uint16_t sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
uint16_t sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
uint64_t wait);
+void sw_event_schedule(struct rte_eventdev *dev);
+int sw_xstats_init(struct sw_evdev *dev);
+int sw_xstats_uninit(struct sw_evdev *dev);
+int sw_xstats_get_names(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size);
+int sw_xstats_get(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ const unsigned int ids[], uint64_t values[], unsigned int n);
+uint64_t sw_xstats_get_by_name(const struct rte_eventdev *dev,
+ const char *name, unsigned int *id);
+int sw_xstats_reset(struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id,
+ const uint32_t ids[],
+ uint32_t nb_ids);
+
#endif /* _SW_EVDEV_H_ */