1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
9 #include <rte_malloc.h>
10 #include <rte_cycles.h>
11 #include <rte_ethdev.h>
12 #include <rte_memcpy.h>
13 #include <rte_byteorder.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_sched.h>
20 * QoS parameters are encoded as follows:
21 * Outer VLAN ID defines subport
22 * Inner VLAN ID defines pipe
23 * Destination IP host (0.0.0.XXX) defines queue
24 * Values below define offset to each field from start of frame
26 #define SUBPORT_OFFSET 7
28 #define QUEUE_OFFSET 20
29 #define COLOR_OFFSET 19
32 get_pkt_sched(struct rte_mbuf *m, uint32_t *subport, uint32_t *pipe,
33 uint32_t *traffic_class, uint32_t *queue, uint32_t *color)
35 uint16_t *pdata = rte_pktmbuf_mtod(m, uint16_t *);
38 *subport = (rte_be_to_cpu_16(pdata[SUBPORT_OFFSET]) & 0x0FFF) &
39 (port_params.n_subports_per_port - 1); /* Outer VLAN ID*/
40 *pipe = (rte_be_to_cpu_16(pdata[PIPE_OFFSET]) & 0x0FFF) &
41 (port_params.n_pipes_per_subport - 1); /* Inner VLAN ID */
42 pipe_queue = active_queues[(pdata[QUEUE_OFFSET] >> 8) % n_active_queues];
43 *traffic_class = pipe_queue > RTE_SCHED_TRAFFIC_CLASS_BE ?
44 RTE_SCHED_TRAFFIC_CLASS_BE : pipe_queue; /* Destination IP */
45 *queue = pipe_queue - *traffic_class; /* Destination IP */
46 *color = pdata[COLOR_OFFSET] & 0x03; /* Destination IP */
52 app_rx_thread(struct thread_conf **confs)
55 struct rte_mbuf *rx_mbufs[burst_conf.rx_burst] __rte_cache_aligned;
56 struct thread_conf *conf;
61 uint32_t traffic_class;
65 while ((conf = confs[conf_idx])) {
66 nb_rx = rte_eth_rx_burst(conf->rx_port, conf->rx_queue, rx_mbufs,
69 if (likely(nb_rx != 0)) {
70 APP_STATS_ADD(conf->stat.nb_rx, nb_rx);
72 for(i = 0; i < nb_rx; i++) {
73 get_pkt_sched(rx_mbufs[i],
74 &subport, &pipe, &traffic_class, &queue, &color);
75 rte_sched_port_pkt_write(conf->sched_port,
79 (enum rte_color) color);
82 if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
83 (void **)rx_mbufs, nb_rx, NULL) == 0)) {
84 for(i = 0; i < nb_rx; i++) {
85 rte_pktmbuf_free(rx_mbufs[i]);
87 APP_STATS_ADD(conf->stat.nb_drop, 1);
92 if (confs[conf_idx] == NULL)
99 /* Send the packet to an output interface
100 * For performance reason function returns number of packets dropped, not sent,
101 * so 0 means that all packets were sent successfully
105 app_send_burst(struct thread_conf *qconf)
107 struct rte_mbuf **mbufs;
110 mbufs = (struct rte_mbuf **)qconf->m_table;
114 ret = rte_eth_tx_burst(qconf->tx_port, qconf->tx_queue, mbufs, (uint16_t)n);
115 /* we cannot drop the packets, so re-send */
116 /* update number of packets to be sent */
118 mbufs = (struct rte_mbuf **)&mbufs[ret];
123 /* Send the packet to an output interface */
125 app_send_packets(struct thread_conf *qconf, struct rte_mbuf **mbufs, uint32_t nb_pkt)
129 len = qconf->n_mbufs;
130 for(i = 0; i < nb_pkt; i++) {
131 qconf->m_table[len] = mbufs[i];
133 /* enough pkts to be sent */
134 if (unlikely(len == burst_conf.tx_burst)) {
135 qconf->n_mbufs = len;
136 app_send_burst(qconf);
141 qconf->n_mbufs = len;
145 app_tx_thread(struct thread_conf **confs)
147 struct rte_mbuf *mbufs[burst_conf.qos_dequeue];
148 struct thread_conf *conf;
151 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
153 while ((conf = confs[conf_idx])) {
154 retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
155 burst_conf.qos_dequeue, NULL);
156 if (likely(retval != 0)) {
157 app_send_packets(conf, mbufs, burst_conf.qos_dequeue);
159 conf->counter = 0; /* reset empty read loop counter */
164 /* drain ring and TX queues */
165 if (unlikely(conf->counter > drain_tsc)) {
166 /* now check is there any packets left to be transmitted */
167 if (conf->n_mbufs != 0) {
168 app_send_burst(conf);
176 if (confs[conf_idx] == NULL)
183 app_worker_thread(struct thread_conf **confs)
185 struct rte_mbuf *mbufs[burst_conf.ring_burst];
186 struct thread_conf *conf;
189 while ((conf = confs[conf_idx])) {
192 /* Read packet from the ring */
193 nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
194 burst_conf.ring_burst, NULL);
195 if (likely(nb_pkt)) {
196 int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
199 APP_STATS_ADD(conf->stat.nb_drop, nb_pkt - nb_sent);
200 APP_STATS_ADD(conf->stat.nb_rx, nb_pkt);
203 nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
204 burst_conf.qos_dequeue);
205 if (likely(nb_pkt > 0))
206 while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
207 (void **)mbufs, nb_pkt, NULL) == 0)
211 if (confs[conf_idx] == NULL)
218 app_mixed_thread(struct thread_conf **confs)
220 struct rte_mbuf *mbufs[burst_conf.ring_burst];
221 struct thread_conf *conf;
223 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
225 while ((conf = confs[conf_idx])) {
228 /* Read packet from the ring */
229 nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
230 burst_conf.ring_burst, NULL);
231 if (likely(nb_pkt)) {
232 int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
235 APP_STATS_ADD(conf->stat.nb_drop, nb_pkt - nb_sent);
236 APP_STATS_ADD(conf->stat.nb_rx, nb_pkt);
240 nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
241 burst_conf.qos_dequeue);
242 if (likely(nb_pkt > 0)) {
243 app_send_packets(conf, mbufs, nb_pkt);
245 conf->counter = 0; /* reset empty read loop counter */
250 /* drain ring and TX queues */
251 if (unlikely(conf->counter > drain_tsc)) {
253 /* now check is there any packets left to be transmitted */
254 if (conf->n_mbufs != 0) {
255 app_send_burst(conf);
263 if (confs[conf_idx] == NULL)