#define MAX_RX_QUEUE_PER_LCORE 16
#define MAX_TX_QUEUE_PER_PORT 16
+/* List of queues must be polled for a give lcore. 8< */
struct lcore_queue_conf {
unsigned n_rx_port;
unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
unsigned tx_queue_id;
} __rte_cache_aligned;
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+/* >8 End of list of queues to be polled. */
struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
+/* Global configuration stored in a static structure. 8< */
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
.lsc = 1, /**< lsc interrupt feature enabled */
},
};
+/* >8 End of global configuration stored in a static structure. */
struct rte_mempool * lsi_pktmbuf_pool = NULL;
fflush(stdout);
}
+/* Replacing the source and destination MAC addresses. 8< */
static void
lsi_simple_forward(struct rte_mbuf *m, unsigned portid)
{
if (sent)
port_statistics[dst_port].tx += sent;
}
+/* >8 End of replacing the source and destination MAC addresses. */
/* main processing loop */
static void
while (1) {
+ /* Draining TX queue in its main loop. 8< */
cur_tsc = rte_rdtsc();
/*
prev_tsc = cur_tsc;
}
+ /* >8 End of draining TX queue in its main loop. */
- /*
- * Read packet from RX queues
- */
+ /* Read packet from RX queues. 8< */
for (i = 0; i < qconf->n_rx_port; i++) {
portid = qconf->rx_port_list[i];
lsi_simple_forward(m, portid);
}
}
+ /* >8 End of reading packet from RX queues. */
}
}
* @return
* int.
*/
+
+/* lsi_event_callback 8< */
static int
lsi_event_callback(uint16_t port_id, enum rte_eth_event_type type, void *param,
void *ret_param)
return 0;
}
+/* >8 End of registering one or more callbacks. */
/* Check the link status of all ports in up to 9s, and print them finally */
static void
if (nb_ports == 0)
rte_panic("No Ethernet port - bye\n");
- /*
- * Each logical core is assigned a dedicated TX queue on each port.
- */
+ /* Each logical core is assigned a dedicated TX queue on each port. 8< */
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
if ((lsi_enabled_port_mask & (1 << portid)) == 0)
nb_ports_in_mask++;
}
+ /* >8 End of assigning logical core. */
if (nb_ports_in_mask < 2 || nb_ports_in_mask % 2)
rte_exit(EXIT_FAILURE, "Current enabled port number is %u, "
"but it should be even and at least 2\n",
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ /* Configure RX and TX queues. 8< */
ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
ret, (unsigned) portid);
+ /* >8 End of configure RX and TX queues. */
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
&nb_txd);
* lsc interrupt will be present, and below callback to
* be registered will never be called.
*/
+
+ /* RTE callback register. 8< */
rte_eth_dev_callback_register(portid,
RTE_ETH_EVENT_INTR_LSC, lsi_event_callback, NULL);
+ /* >8 End of registering lsi interrupt callback. */
ret = rte_eth_macaddr_get(portid,
&lsi_ports_eth_addr[portid]);
fflush(stdout);
rxq_conf = dev_info.default_rxconf;
rxq_conf.offloads = local_port_conf.rxmode.offloads;
+ /* RX queue initialization. 8< */
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
rte_eth_dev_socket_id(portid),
&rxq_conf,
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d, port=%u\n",
ret, (unsigned) portid);
+ /* >8 End of RX queue initialization. */
- /* init one TX queue logical core on each port */
+ /* init one TX queue logical core on each port. 8< */
fflush(stdout);
txq_conf = dev_info.default_txconf;
txq_conf.offloads = local_port_conf.txmode.offloads;
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d,port=%u\n",
ret, (unsigned) portid);
+ /* >8 End of init one TX queue. */
/* Initialize TX buffers */
tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",