1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Ericsson AB
8 #include <rte_event_ring.h>
9 #include <rte_eventdev.h>
11 #define DSW_PMD_NAME RTE_STR(event_dsw)
13 /* Code changes are required to allow more ports. */
14 #define DSW_MAX_PORTS (64)
15 #define DSW_MAX_PORT_DEQUEUE_DEPTH (128)
16 #define DSW_MAX_PORT_ENQUEUE_DEPTH (128)
17 #define DSW_MAX_PORT_OUT_BUFFER (32)
19 #define DSW_MAX_QUEUES (16)
21 #define DSW_MAX_EVENTS (16384)
23 /* Code changes are required to allow more flows than 32k. */
24 #define DSW_MAX_FLOWS_BITS (15)
25 #define DSW_MAX_FLOWS (1<<(DSW_MAX_FLOWS_BITS))
26 #define DSW_MAX_FLOWS_MASK (DSW_MAX_FLOWS-1)
28 /* Eventdev RTE_SCHED_TYPE_PARALLEL doesn't have a concept of flows,
29 * but the 'dsw' scheduler (more or less) randomly assign flow id to
30 * events on parallel queues, to be able to reuse some of the
31 * migration mechanism and scheduling logic from
32 * RTE_SCHED_TYPE_ATOMIC. By moving one of the parallel "flows" from a
33 * particular port, the likely-hood of events being scheduled to this
34 * port is reduced, and thus a kind of statistical load balancing is
37 #define DSW_PARALLEL_FLOWS (1024)
39 /* Avoid making small 'loans' from the central in-flight event credit
40 * pool, to improve efficiency.
42 #define DSW_MIN_CREDIT_LOAN (64)
43 #define DSW_PORT_MAX_CREDITS (2*DSW_MIN_CREDIT_LOAN)
44 #define DSW_PORT_MIN_CREDITS (DSW_MIN_CREDIT_LOAN)
46 /* The rings are dimensioned so that all in-flight events can reside
47 * on any one of the port rings, to avoid the trouble of having to
48 * care about the case where there's no room on the destination port's
51 #define DSW_IN_RING_SIZE (DSW_MAX_EVENTS)
56 /* Keeping a pointer here to avoid container_of() calls, which
57 * are expensive since they are very frequent and will result
58 * in an integer multiplication (since the port id is an index
59 * into the dsw_evdev port array).
61 struct dsw_evdev *dsw;
63 uint16_t dequeue_depth;
64 uint16_t enqueue_depth;
66 int32_t inflight_credits;
68 int32_t new_event_threshold;
70 uint16_t pending_releases;
72 uint16_t next_parallel_flow_id;
74 uint16_t out_buffer_len[DSW_MAX_PORTS];
75 struct rte_event out_buffer[DSW_MAX_PORTS][DSW_MAX_PORT_OUT_BUFFER];
77 struct rte_event_ring *in_ring __rte_cache_aligned;
78 } __rte_cache_aligned;
81 uint8_t schedule_type;
82 uint8_t serving_ports[DSW_MAX_PORTS];
83 uint16_t num_serving_ports;
85 uint8_t flow_to_port_map[DSW_MAX_FLOWS] __rte_cache_aligned;
89 struct rte_eventdev_data *data;
91 struct dsw_port ports[DSW_MAX_PORTS];
93 struct dsw_queue queues[DSW_MAX_QUEUES];
97 rte_atomic32_t credits_on_loan __rte_cache_aligned;
100 uint16_t dsw_event_enqueue(void *port, const struct rte_event *event);
101 uint16_t dsw_event_enqueue_burst(void *port,
102 const struct rte_event events[],
103 uint16_t events_len);
104 uint16_t dsw_event_enqueue_new_burst(void *port,
105 const struct rte_event events[],
106 uint16_t events_len);
107 uint16_t dsw_event_enqueue_forward_burst(void *port,
108 const struct rte_event events[],
109 uint16_t events_len);
111 uint16_t dsw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
112 uint16_t dsw_event_dequeue_burst(void *port, struct rte_event *events,
113 uint16_t num, uint64_t wait);
115 static inline struct dsw_evdev *
116 dsw_pmd_priv(const struct rte_eventdev *eventdev)
118 return eventdev->data->dev_private;
121 #define DSW_LOG_DP(level, fmt, args...) \
122 RTE_LOG_DP(level, EVENTDEV, "[%s] %s() line %u: " fmt, \
124 __func__, __LINE__, ## args)
126 #define DSW_LOG_DP_PORT(level, port_id, fmt, args...) \
127 DSW_LOG_DP(level, "<Port %d> " fmt, port_id, ## args)