eventdev: add implicit release disable capability
[dpdk.git] / drivers / event / sw / sw_evdev_worker.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <rte_atomic.h>
6 #include <rte_cycles.h>
7 #include <rte_event_ring.h>
8
9 #include "sw_evdev.h"
10
11 #define PORT_ENQUEUE_MAX_BURST_SIZE 64
12
13 static inline void
14 sw_event_release(struct sw_port *p, uint8_t index)
15 {
16         /*
17          * Drops the next outstanding event in our history. Used on dequeue
18          * to clear any history before dequeuing more events.
19          */
20         RTE_SET_USED(index);
21
22         /* create drop message */
23         struct rte_event ev;
24         ev.op = sw_qe_flag_map[RTE_EVENT_OP_RELEASE];
25
26         uint16_t free_count;
27         rte_event_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count);
28
29         /* each release returns one credit */
30         p->outstanding_releases--;
31         p->inflight_credits++;
32 }
33
34 /*
35  * special-case of rte_event_ring enqueue, with overriding the ops member on
36  * the events that get written to the ring.
37  */
38 static inline unsigned int
39 enqueue_burst_with_ops(struct rte_event_ring *r, const struct rte_event *events,
40                 unsigned int n, uint8_t *ops)
41 {
42         struct rte_event tmp_evs[PORT_ENQUEUE_MAX_BURST_SIZE];
43         unsigned int i;
44
45         memcpy(tmp_evs, events, n * sizeof(events[0]));
46         for (i = 0; i < n; i++)
47                 tmp_evs[i].op = ops[i];
48
49         return rte_event_ring_enqueue_burst(r, tmp_evs, n, NULL);
50 }
51
52 uint16_t
53 sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
54 {
55         int32_t i;
56         uint8_t new_ops[PORT_ENQUEUE_MAX_BURST_SIZE];
57         struct sw_port *p = port;
58         struct sw_evdev *sw = (void *)p->sw;
59         uint32_t sw_inflights = rte_atomic32_read(&sw->inflights);
60         int new = 0;
61
62         if (num > PORT_ENQUEUE_MAX_BURST_SIZE)
63                 num = PORT_ENQUEUE_MAX_BURST_SIZE;
64
65         for (i = 0; i < num; i++)
66                 new += (ev[i].op == RTE_EVENT_OP_NEW);
67
68         if (unlikely(new > 0 && p->inflight_max < sw_inflights))
69                 return 0;
70
71         if (p->inflight_credits < new) {
72                 /* check if event enqueue brings port over max threshold */
73                 uint32_t credit_update_quanta = sw->credit_update_quanta;
74                 if (sw_inflights + credit_update_quanta > sw->nb_events_limit)
75                         return 0;
76
77                 rte_atomic32_add(&sw->inflights, credit_update_quanta);
78                 p->inflight_credits += (credit_update_quanta);
79
80                 if (p->inflight_credits < new)
81                         return 0;
82         }
83
84         uint32_t completions = 0;
85         for (i = 0; i < num; i++) {
86                 int op = ev[i].op;
87                 int outstanding = p->outstanding_releases > 0;
88                 const uint8_t invalid_qid = (ev[i].queue_id >= sw->qid_count);
89
90                 p->inflight_credits -= (op == RTE_EVENT_OP_NEW);
91                 p->inflight_credits += (op == RTE_EVENT_OP_RELEASE) *
92                                         outstanding;
93
94                 new_ops[i] = sw_qe_flag_map[op];
95                 new_ops[i] &= ~(invalid_qid << QE_FLAG_VALID_SHIFT);
96
97                 /* FWD and RELEASE packets will both resolve to taken (assuming
98                  * correct usage of the API), providing very high correct
99                  * prediction rate.
100                  */
101                 if ((new_ops[i] & QE_FLAG_COMPLETE) && outstanding) {
102                         p->outstanding_releases--;
103                         completions++;
104                 }
105
106                 /* error case: branch to avoid touching p->stats */
107                 if (unlikely(invalid_qid)) {
108                         p->stats.rx_dropped++;
109                         p->inflight_credits++;
110                 }
111         }
112
113         /* handle directed port forward and release credits */
114         p->inflight_credits -= completions * p->is_directed;
115
116         /* returns number of events actually enqueued */
117         uint32_t enq = enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
118                                              new_ops);
119         if (p->outstanding_releases == 0 && p->last_dequeue_burst_sz != 0) {
120                 uint64_t burst_ticks = rte_get_timer_cycles() -
121                                 p->last_dequeue_ticks;
122                 uint64_t burst_pkt_ticks =
123                         burst_ticks / p->last_dequeue_burst_sz;
124                 p->avg_pkt_ticks -= p->avg_pkt_ticks / NUM_SAMPLES;
125                 p->avg_pkt_ticks += burst_pkt_ticks / NUM_SAMPLES;
126                 p->last_dequeue_ticks = 0;
127         }
128         return enq;
129 }
130
131 uint16_t
132 sw_event_enqueue(void *port, const struct rte_event *ev)
133 {
134         return sw_event_enqueue_burst(port, ev, 1);
135 }
136
137 uint16_t
138 sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
139                 uint64_t wait)
140 {
141         RTE_SET_USED(wait);
142         struct sw_port *p = (void *)port;
143         struct sw_evdev *sw = (void *)p->sw;
144         struct rte_event_ring *ring = p->cq_worker_ring;
145         uint32_t credit_update_quanta = sw->credit_update_quanta;
146
147         /* check that all previous dequeues have been released */
148         if (p->implicit_release && !p->is_directed) {
149                 uint16_t out_rels = p->outstanding_releases;
150                 uint16_t i;
151                 for (i = 0; i < out_rels; i++)
152                         sw_event_release(p, i);
153         }
154
155         /* returns number of events actually dequeued */
156         uint16_t ndeq = rte_event_ring_dequeue_burst(ring, ev, num, NULL);
157         if (unlikely(ndeq == 0)) {
158                 p->zero_polls++;
159                 p->total_polls++;
160                 goto end;
161         }
162
163         /* only add credits for directed ports - LB ports send RELEASEs */
164         p->inflight_credits += ndeq * p->is_directed;
165         p->outstanding_releases += ndeq;
166         p->last_dequeue_burst_sz = ndeq;
167         p->last_dequeue_ticks = rte_get_timer_cycles();
168         p->poll_buckets[(ndeq - 1) >> SW_DEQ_STAT_BUCKET_SHIFT]++;
169         p->total_polls++;
170
171 end:
172         if (p->inflight_credits >= credit_update_quanta * 2 &&
173                         p->inflight_credits > credit_update_quanta + ndeq) {
174                 rte_atomic32_sub(&sw->inflights, credit_update_quanta);
175                 p->inflight_credits -= credit_update_quanta;
176         }
177         return ndeq;
178 }
179
180 uint16_t
181 sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait)
182 {
183         return sw_event_dequeue_burst(port, ev, 1, wait);
184 }