1 #include "l2fwd_poll.h"
4 l2fwd_poll_simple_forward(struct l2fwd_resources *rsrc, struct rte_mbuf *m,
7 struct rte_eth_dev_tx_buffer *buffer;
11 dst_port = rsrc->dst_ports[portid];
13 if (rsrc->mac_updating)
14 l2fwd_mac_updating(m, dst_port, &rsrc->eth_addr[dst_port]);
16 buffer = ((struct l2fwd_poll_resources *)rsrc->poll_rsrc)->tx_buffer[
18 sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
20 rsrc->port_stats[dst_port].tx += sent;
23 /* main poll mode processing loop */
25 l2fwd_poll_main_loop(struct l2fwd_resources *rsrc)
27 uint64_t prev_tsc, diff_tsc, cur_tsc, drain_tsc;
28 struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
29 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
30 struct rte_eth_dev_tx_buffer *buf;
31 struct lcore_queue_conf *qconf;
32 uint32_t i, j, port_id, nb_rx;
37 drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S *
41 lcore_id = rte_lcore_id();
42 qconf = &poll_rsrc->lcore_queue_conf[lcore_id];
44 if (qconf->n_rx_port == 0) {
45 printf("lcore %u has nothing to do\n", lcore_id);
49 printf("entering main loop on lcore %u\n", lcore_id);
51 for (i = 0; i < qconf->n_rx_port; i++) {
53 port_id = qconf->rx_port_list[i];
54 printf(" -- lcoreid=%u port_id=%u\n", lcore_id, port_id);
58 while (!rsrc->force_quit) {
60 cur_tsc = rte_rdtsc();
63 * TX burst queue drain
65 diff_tsc = cur_tsc - prev_tsc;
66 if (unlikely(diff_tsc > drain_tsc)) {
67 for (i = 0; i < qconf->n_rx_port; i++) {
69 rsrc->dst_ports[qconf->rx_port_list[i]];
70 buf = poll_rsrc->tx_buffer[port_id];
71 sent = rte_eth_tx_buffer_flush(port_id, 0, buf);
73 rsrc->port_stats[port_id].tx += sent;
80 * Read packet from RX queues
82 for (i = 0; i < qconf->n_rx_port; i++) {
84 port_id = qconf->rx_port_list[i];
85 nb_rx = rte_eth_rx_burst(port_id, 0, pkts_burst,
88 rsrc->port_stats[port_id].rx += nb_rx;
90 for (j = 0; j < nb_rx; j++) {
92 rte_prefetch0(rte_pktmbuf_mtod(m, void *));
93 l2fwd_poll_simple_forward(rsrc, m, port_id);
100 l2fwd_poll_lcore_config(struct l2fwd_resources *rsrc)
102 struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
103 struct lcore_queue_conf *qconf = NULL;
104 uint32_t rx_lcore_id = 0;
107 /* Initialize the port/queue configuration of each logical core */
108 RTE_ETH_FOREACH_DEV(port_id) {
109 /* skip ports that are not enabled */
110 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
113 /* get the lcore_id for this port */
114 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
115 poll_rsrc->lcore_queue_conf[rx_lcore_id].n_rx_port ==
116 rsrc->rx_queue_per_lcore) {
118 if (rx_lcore_id >= RTE_MAX_LCORE)
119 rte_panic("Not enough cores\n");
122 if (qconf != &poll_rsrc->lcore_queue_conf[rx_lcore_id]) {
123 /* Assigned a new logical core in the loop above. */
124 qconf = &poll_rsrc->lcore_queue_conf[rx_lcore_id];
127 qconf->rx_port_list[qconf->n_rx_port] = port_id;
129 printf("Lcore %u: RX port %u\n", rx_lcore_id, port_id);
134 l2fwd_poll_init_tx_buffers(struct l2fwd_resources *rsrc)
136 struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
140 RTE_ETH_FOREACH_DEV(port_id) {
141 /* Initialize TX buffers */
142 poll_rsrc->tx_buffer[port_id] = rte_zmalloc_socket("tx_buffer",
143 RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
144 rte_eth_dev_socket_id(port_id));
145 if (poll_rsrc->tx_buffer[port_id] == NULL)
146 rte_panic("Cannot allocate buffer for tx on port %u\n",
149 rte_eth_tx_buffer_init(poll_rsrc->tx_buffer[port_id],
152 ret = rte_eth_tx_buffer_set_err_callback(
153 poll_rsrc->tx_buffer[port_id],
154 rte_eth_tx_buffer_count_callback,
155 &rsrc->port_stats[port_id].dropped);
157 rte_panic("Cannot set error callback for tx buffer on port %u\n",
163 l2fwd_poll_resource_setup(struct l2fwd_resources *rsrc)
165 struct l2fwd_poll_resources *poll_rsrc;
167 poll_rsrc = rte_zmalloc("l2fwd_poll_rsrc",
168 sizeof(struct l2fwd_poll_resources), 0);
169 if (poll_rsrc == NULL)
170 rte_panic("Failed to allocate resources for l2fwd poll mode\n");
172 rsrc->poll_rsrc = poll_rsrc;
173 l2fwd_poll_lcore_config(rsrc);
174 l2fwd_poll_init_tx_buffers(rsrc);
176 poll_rsrc->poll_main_loop = l2fwd_poll_main_loop;