4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_atomic.h>
34 #include <rte_cycles.h>
37 #include "event_ring.h"
39 #define PORT_ENQUEUE_MAX_BURST_SIZE 64
42 sw_event_release(struct sw_port *p, uint8_t index)
45 * Drops the next outstanding event in our history. Used on dequeue
46 * to clear any history before dequeuing more events.
50 /* create drop message */
52 ev.op = sw_qe_flag_map[RTE_EVENT_OP_RELEASE];
55 qe_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count);
57 /* each release returns one credit */
58 p->outstanding_releases--;
59 p->inflight_credits++;
63 sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
66 uint8_t new_ops[PORT_ENQUEUE_MAX_BURST_SIZE];
67 struct sw_port *p = port;
68 struct sw_evdev *sw = (void *)p->sw;
69 uint32_t sw_inflights = rte_atomic32_read(&sw->inflights);
71 if (unlikely(p->inflight_max < sw_inflights))
74 if (num > PORT_ENQUEUE_MAX_BURST_SIZE)
75 num = PORT_ENQUEUE_MAX_BURST_SIZE;
77 if (p->inflight_credits < num) {
78 /* check if event enqueue brings port over max threshold */
79 uint32_t credit_update_quanta = sw->credit_update_quanta;
80 if (sw_inflights + credit_update_quanta > sw->nb_events_limit)
83 rte_atomic32_add(&sw->inflights, credit_update_quanta);
84 p->inflight_credits += (credit_update_quanta);
86 if (p->inflight_credits < num)
90 for (i = 0; i < num; i++) {
92 int outstanding = p->outstanding_releases > 0;
93 const uint8_t invalid_qid = (ev[i].queue_id >= sw->qid_count);
95 p->inflight_credits -= (op == RTE_EVENT_OP_NEW);
96 p->inflight_credits += (op == RTE_EVENT_OP_RELEASE) *
99 new_ops[i] = sw_qe_flag_map[op];
100 new_ops[i] &= ~(invalid_qid << QE_FLAG_VALID_SHIFT);
102 /* FWD and RELEASE packets will both resolve to taken (assuming
103 * correct usage of the API), providing very high correct
106 if ((new_ops[i] & QE_FLAG_COMPLETE) && outstanding)
107 p->outstanding_releases--;
109 /* error case: branch to avoid touching p->stats */
110 if (unlikely(invalid_qid)) {
111 p->stats.rx_dropped++;
112 p->inflight_credits++;
116 /* returns number of events actually enqueued */
117 uint32_t enq = qe_ring_enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
119 if (p->outstanding_releases == 0 && p->last_dequeue_burst_sz != 0) {
120 uint64_t burst_ticks = rte_get_timer_cycles() -
121 p->last_dequeue_ticks;
122 uint64_t burst_pkt_ticks =
123 burst_ticks / p->last_dequeue_burst_sz;
124 p->avg_pkt_ticks -= p->avg_pkt_ticks / NUM_SAMPLES;
125 p->avg_pkt_ticks += burst_pkt_ticks / NUM_SAMPLES;
126 p->last_dequeue_ticks = 0;
132 sw_event_enqueue(void *port, const struct rte_event *ev)
134 return sw_event_enqueue_burst(port, ev, 1);
138 sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
142 struct sw_port *p = (void *)port;
143 struct sw_evdev *sw = (void *)p->sw;
144 struct qe_ring *ring = p->cq_worker_ring;
145 uint32_t credit_update_quanta = sw->credit_update_quanta;
147 /* check that all previous dequeues have been released */
148 if (!p->is_directed) {
149 uint16_t out_rels = p->outstanding_releases;
151 for (i = 0; i < out_rels; i++)
152 sw_event_release(p, i);
155 /* returns number of events actually dequeued */
156 uint16_t ndeq = qe_ring_dequeue_burst(ring, ev, num);
157 if (unlikely(ndeq == 0)) {
158 p->outstanding_releases = 0;
164 /* only add credits for directed ports - LB ports send RELEASEs */
165 p->inflight_credits += ndeq * p->is_directed;
166 p->outstanding_releases = ndeq;
167 p->last_dequeue_burst_sz = ndeq;
168 p->last_dequeue_ticks = rte_get_timer_cycles();
169 p->poll_buckets[(ndeq - 1) >> SW_DEQ_STAT_BUCKET_SHIFT]++;
173 if (p->inflight_credits >= credit_update_quanta * 2 &&
174 p->inflight_credits > credit_update_quanta + ndeq) {
175 rte_atomic32_sub(&sw->inflights, credit_update_quanta);
176 p->inflight_credits -= credit_update_quanta;
182 sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait)
184 return sw_event_dequeue_burst(port, ev, 1, wait);