4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <rte_malloc.h>
40 #include <rte_cycles.h>
41 #include <rte_ethdev.h>
42 #include <rte_memcpy.h>
43 #include <rte_byteorder.h>
44 #include <rte_branch_prediction.h>
45 #include <rte_sched.h>
50 * QoS parameters are encoded as follows:
51 * Outer VLAN ID defines subport
52 * Inner VLAN ID defines pipe
53 * Destination IP 0.0.XXX.0 defines traffic class
54 * Destination IP host (0.0.0.XXX) defines queue
55 * Values below define offset to each field from start of frame
57 #define SUBPORT_OFFSET 7
60 #define QUEUE_OFFSET 20
61 #define COLOR_OFFSET 19
64 get_pkt_sched(struct rte_mbuf *m, uint32_t *subport, uint32_t *pipe,
65 uint32_t *traffic_class, uint32_t *queue, uint32_t *color)
67 uint16_t *pdata = rte_pktmbuf_mtod(m, uint16_t *);
69 *subport = (rte_be_to_cpu_16(pdata[SUBPORT_OFFSET]) & 0x0FFF) &
70 (port_params.n_subports_per_port - 1); /* Outer VLAN ID*/
71 *pipe = (rte_be_to_cpu_16(pdata[PIPE_OFFSET]) & 0x0FFF) &
72 (port_params.n_pipes_per_subport - 1); /* Inner VLAN ID */
73 *traffic_class = (pdata[QUEUE_OFFSET] & 0x0F) &
74 (RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1); /* Destination IP */
75 *queue = ((pdata[QUEUE_OFFSET] >> 8) & 0x0F) &
76 (RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS - 1) ; /* Destination IP */
77 *color = pdata[COLOR_OFFSET] & 0x03; /* Destination IP */
83 app_rx_thread(struct thread_conf **confs)
86 struct rte_mbuf *rx_mbufs[burst_conf.rx_burst] __rte_cache_aligned;
87 struct thread_conf *conf;
92 uint32_t traffic_class;
96 while ((conf = confs[conf_idx])) {
97 nb_rx = rte_eth_rx_burst(conf->rx_port, conf->rx_queue, rx_mbufs,
100 if (likely(nb_rx != 0)) {
101 APP_STATS_ADD(conf->stat.nb_rx, nb_rx);
103 for(i = 0; i < nb_rx; i++) {
104 get_pkt_sched(rx_mbufs[i],
105 &subport, &pipe, &traffic_class, &queue, &color);
106 rte_sched_port_pkt_write(rx_mbufs[i], subport, pipe,
107 traffic_class, queue, (enum rte_meter_color) color);
110 if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
111 (void **)rx_mbufs, nb_rx) != 0)) {
112 for(i = 0; i < nb_rx; i++) {
113 rte_pktmbuf_free(rx_mbufs[i]);
115 APP_STATS_ADD(conf->stat.nb_drop, 1);
120 if (confs[conf_idx] == NULL)
127 /* Send the packet to an output interface
128 * For performance reason function returns number of packets dropped, not sent,
129 * so 0 means that all packets were sent successfully
133 app_send_burst(struct thread_conf *qconf)
135 struct rte_mbuf **mbufs;
138 mbufs = (struct rte_mbuf **)qconf->m_table;
142 ret = rte_eth_tx_burst(qconf->tx_port, qconf->tx_queue, mbufs, (uint16_t)n);
143 if (unlikely(ret < n)) { /* we cannot drop the packets, so re-send */
144 /* update number of packets to be sent */
146 mbufs = (struct rte_mbuf **)&mbufs[ret];
147 /* limit number of retries to avoid endless loop */
148 /* reset retry counter if some packets were sent */
149 if (likely(ret != 0)) {
157 /* Send the packet to an output interface */
159 app_send_packets(struct thread_conf *qconf, struct rte_mbuf **mbufs, uint32_t nb_pkt)
163 len = qconf->n_mbufs;
164 for(i = 0; i < nb_pkt; i++) {
165 qconf->m_table[len] = mbufs[i];
167 /* enough pkts to be sent */
168 if (unlikely(len == burst_conf.tx_burst)) {
169 qconf->n_mbufs = len;
170 app_send_burst(qconf);
175 qconf->n_mbufs = len;
179 app_tx_thread(struct thread_conf **confs)
181 struct rte_mbuf *mbufs[burst_conf.qos_dequeue];
182 struct thread_conf *conf;
185 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
187 while ((conf = confs[conf_idx])) {
188 retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
189 burst_conf.qos_dequeue);
190 if (likely(retval == 0)) {
191 app_send_packets(conf, mbufs, burst_conf.qos_dequeue);
193 conf->counter = 0; /* reset empty read loop counter */
198 /* drain ring and TX queues */
199 if (unlikely(conf->counter > drain_tsc)) {
200 /* now check is there any packets left to be transmitted */
201 if (conf->n_mbufs != 0) {
202 app_send_burst(conf);
210 if (confs[conf_idx] == NULL)
217 app_worker_thread(struct thread_conf **confs)
219 struct rte_mbuf *mbufs[burst_conf.ring_burst];
220 struct thread_conf *conf;
223 while ((conf = confs[conf_idx])) {
227 /* Read packet from the ring */
228 retval = rte_ring_sc_dequeue_bulk(conf->rx_ring, (void **)mbufs,
229 burst_conf.ring_burst);
230 if (likely(retval == 0)) {
231 int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
232 burst_conf.ring_burst);
234 APP_STATS_ADD(conf->stat.nb_drop, burst_conf.ring_burst - nb_sent);
235 APP_STATS_ADD(conf->stat.nb_rx, burst_conf.ring_burst);
238 nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
239 burst_conf.qos_dequeue);
240 if (likely(nb_pkt > 0))
241 while (rte_ring_sp_enqueue_bulk(conf->tx_ring, (void **)mbufs, nb_pkt) != 0);
244 if (confs[conf_idx] == NULL)
251 app_mixed_thread(struct thread_conf **confs)
253 struct rte_mbuf *mbufs[burst_conf.ring_burst];
254 struct thread_conf *conf;
256 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
258 while ((conf = confs[conf_idx])) {
262 /* Read packet from the ring */
263 retval = rte_ring_sc_dequeue_bulk(conf->rx_ring, (void **)mbufs,
264 burst_conf.ring_burst);
265 if (likely(retval == 0)) {
266 int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
267 burst_conf.ring_burst);
269 APP_STATS_ADD(conf->stat.nb_drop, burst_conf.ring_burst - nb_sent);
270 APP_STATS_ADD(conf->stat.nb_rx, burst_conf.ring_burst);
274 nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
275 burst_conf.qos_dequeue);
276 if (likely(nb_pkt > 0)) {
277 app_send_packets(conf, mbufs, nb_pkt);
279 conf->counter = 0; /* reset empty read loop counter */
284 /* drain ring and TX queues */
285 if (unlikely(conf->counter > drain_tsc)) {
287 /* now check is there any packets left to be transmitted */
288 if (conf->n_mbufs != 0) {
289 app_send_burst(conf);
297 if (confs[conf_idx] == NULL)