1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
9 #include <rte_malloc.h>
10 #include <rte_cycles.h>
11 #include <rte_ethdev.h>
12 #include <rte_memcpy.h>
13 #include <rte_byteorder.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_sched.h>
20 * QoS parameters are encoded as follows:
21 * Outer VLAN ID defines subport
22 * Inner VLAN ID defines pipe
23 * Destination IP host (0.0.0.XXX) defines queue
24 * Values below define offset to each field from start of frame
26 #define SUBPORT_OFFSET 7
28 #define QUEUE_OFFSET 20
29 #define COLOR_OFFSET 19
32 get_pkt_sched(struct rte_mbuf *m, uint32_t *subport, uint32_t *pipe,
33 uint32_t *traffic_class, uint32_t *queue, uint32_t *color)
35 uint16_t *pdata = rte_pktmbuf_mtod(m, uint16_t *);
39 *subport = (rte_be_to_cpu_16(pdata[SUBPORT_OFFSET]) & 0x0FFF) &
40 (port_params.n_subports_per_port - 1);
43 *pipe = (rte_be_to_cpu_16(pdata[PIPE_OFFSET]) & 0x0FFF) &
44 (subport_params[*subport].n_pipes_per_subport_enabled - 1);
46 pipe_queue = active_queues[(pdata[QUEUE_OFFSET] >> 8) % n_active_queues];
48 /* Traffic class (Destination IP) */
49 *traffic_class = pipe_queue > RTE_SCHED_TRAFFIC_CLASS_BE ?
50 RTE_SCHED_TRAFFIC_CLASS_BE : pipe_queue;
52 /* Traffic class queue (Destination IP) */
53 *queue = pipe_queue - *traffic_class;
55 /* Color (Destination IP) */
56 *color = pdata[COLOR_OFFSET] & 0x03;
62 app_rx_thread(struct thread_conf **confs)
65 struct rte_mbuf *rx_mbufs[burst_conf.rx_burst] __rte_cache_aligned;
66 struct thread_conf *conf;
71 uint32_t traffic_class;
75 while ((conf = confs[conf_idx])) {
76 nb_rx = rte_eth_rx_burst(conf->rx_port, conf->rx_queue, rx_mbufs,
79 if (likely(nb_rx != 0)) {
80 APP_STATS_ADD(conf->stat.nb_rx, nb_rx);
82 for(i = 0; i < nb_rx; i++) {
83 get_pkt_sched(rx_mbufs[i],
84 &subport, &pipe, &traffic_class, &queue, &color);
85 rte_sched_port_pkt_write(conf->sched_port,
89 (enum rte_color) color);
92 if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
93 (void **)rx_mbufs, nb_rx, NULL) == 0)) {
94 for(i = 0; i < nb_rx; i++) {
95 rte_pktmbuf_free(rx_mbufs[i]);
97 APP_STATS_ADD(conf->stat.nb_drop, 1);
102 if (confs[conf_idx] == NULL)
109 /* Send the packet to an output interface
110 * For performance reason function returns number of packets dropped, not sent,
111 * so 0 means that all packets were sent successfully
115 app_send_burst(struct thread_conf *qconf)
117 struct rte_mbuf **mbufs;
120 mbufs = (struct rte_mbuf **)qconf->m_table;
124 ret = rte_eth_tx_burst(qconf->tx_port, qconf->tx_queue, mbufs, (uint16_t)n);
125 /* we cannot drop the packets, so re-send */
126 /* update number of packets to be sent */
128 mbufs = (struct rte_mbuf **)&mbufs[ret];
133 /* Send the packet to an output interface */
135 app_send_packets(struct thread_conf *qconf, struct rte_mbuf **mbufs, uint32_t nb_pkt)
139 len = qconf->n_mbufs;
140 for(i = 0; i < nb_pkt; i++) {
141 qconf->m_table[len] = mbufs[i];
143 /* enough pkts to be sent */
144 if (unlikely(len == burst_conf.tx_burst)) {
145 qconf->n_mbufs = len;
146 app_send_burst(qconf);
151 qconf->n_mbufs = len;
155 app_tx_thread(struct thread_conf **confs)
157 struct rte_mbuf *mbufs[burst_conf.qos_dequeue];
158 struct thread_conf *conf;
161 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
163 while ((conf = confs[conf_idx])) {
164 retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
165 burst_conf.qos_dequeue, NULL);
166 if (likely(retval != 0)) {
167 app_send_packets(conf, mbufs, burst_conf.qos_dequeue);
169 conf->counter = 0; /* reset empty read loop counter */
174 /* drain ring and TX queues */
175 if (unlikely(conf->counter > drain_tsc)) {
176 /* now check is there any packets left to be transmitted */
177 if (conf->n_mbufs != 0) {
178 app_send_burst(conf);
186 if (confs[conf_idx] == NULL)
193 app_worker_thread(struct thread_conf **confs)
195 struct rte_mbuf *mbufs[burst_conf.ring_burst];
196 struct thread_conf *conf;
199 while ((conf = confs[conf_idx])) {
202 /* Read packet from the ring */
203 nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
204 burst_conf.ring_burst, NULL);
205 if (likely(nb_pkt)) {
206 int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
209 APP_STATS_ADD(conf->stat.nb_drop, nb_pkt - nb_sent);
210 APP_STATS_ADD(conf->stat.nb_rx, nb_pkt);
213 nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
214 burst_conf.qos_dequeue);
215 if (likely(nb_pkt > 0))
216 while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
217 (void **)mbufs, nb_pkt, NULL) == 0)
221 if (confs[conf_idx] == NULL)
228 app_mixed_thread(struct thread_conf **confs)
230 struct rte_mbuf *mbufs[burst_conf.ring_burst];
231 struct thread_conf *conf;
233 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
235 while ((conf = confs[conf_idx])) {
238 /* Read packet from the ring */
239 nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
240 burst_conf.ring_burst, NULL);
241 if (likely(nb_pkt)) {
242 int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
245 APP_STATS_ADD(conf->stat.nb_drop, nb_pkt - nb_sent);
246 APP_STATS_ADD(conf->stat.nb_rx, nb_pkt);
250 nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
251 burst_conf.qos_dequeue);
252 if (likely(nb_pkt > 0)) {
253 app_send_packets(conf, mbufs, nb_pkt);
255 conf->counter = 0; /* reset empty read loop counter */
260 /* drain ring and TX queues */
261 if (unlikely(conf->counter > drain_tsc)) {
263 /* now check is there any packets left to be transmitted */
264 if (conf->n_mbufs != 0) {
265 app_send_burst(conf);
273 if (confs[conf_idx] == NULL)