+ nb_ports = rte_eth_dev_count_avail();
+ nb_lcores = rte_lcore_count();
+
+ /* Initialize all ports */
+ RTE_ETH_FOREACH_DEV(portid)
+ {
+ struct rte_eth_conf local_port_conf = port_conf;
+
+ /* Skip ports that are not enabled */
+ if ((enabled_port_mask & (1 << portid)) == 0) {
+ printf("\nSkipping disabled port %d\n", portid);
+ continue;
+ }
+
+ /* Init port */
+ printf("Initializing port %d ... ", portid);
+ fflush(stdout);
+
+ nb_rx_queue = get_port_n_rx_queues(portid);
+ n_tx_queue = nb_lcores;
+ if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
+ n_tx_queue = MAX_TX_QUEUE_PER_PORT;
+ printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
+ nb_rx_queue, n_tx_queue);
+
+ rte_eth_dev_info_get(portid, &dev_info);
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ local_port_conf.txmode.offloads |=
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on "
+ "hardware support,"
+ "requested:%#" PRIx64 " configured:%#" PRIx64
+ "\n",
+ portid, port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
+ ret = rte_eth_dev_configure(portid, nb_rx_queue,
+ n_tx_queue, &local_port_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot configure device: err=%d, port=%d\n",
+ ret, portid);
+
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, "
+ "port=%d\n",
+ ret, portid);
+
+ rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
+ print_ethaddr(" Address:", &ports_eth_addr[portid]);
+ printf(", ");
+ print_ethaddr(
+ "Destination:",
+ (const struct rte_ether_addr *)&dest_eth_addr[portid]);
+ printf(", ");
+
+ /*
+ * prepare src MACs for each port.
+ */
+ rte_ether_addr_copy(
+ &ports_eth_addr[portid],
+ (struct rte_ether_addr *)(val_eth + portid) + 1);
+
+ /* Init memory */
+ if (!per_port_pool) {
+ /* portid = 0; this is *not* signifying the first port,
+ * rather, it signifies that portid is ignored.
+ */
+ ret = init_mem(0, NB_MBUF(nb_ports));
+ } else {
+ ret = init_mem(portid, NB_MBUF(1));
+ }
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "init_mem() failed\n");
+
+ /* Init one TX queue per couple (lcore,port) */
+ queueid = 0;
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ if (rte_lcore_is_enabled(lcore_id) == 0)
+ continue;
+
+ qconf = &lcore_conf[lcore_id];
+
+ if (numa_on)
+ socketid = (uint8_t)rte_lcore_to_socket_id(
+ lcore_id);
+ else
+ socketid = 0;
+
+ printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
+ fflush(stdout);
+
+ txconf = &dev_info.default_txconf;
+ txconf->offloads = local_port_conf.txmode.offloads;
+ ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
+ socketid, txconf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_tx_queue_setup: err=%d, "
+ "port=%d\n",
+ ret, portid);
+ queueid++;
+ }
+
+ /* Setup ethdev node config */
+ ethdev_conf[nb_conf].port_id = portid;
+ ethdev_conf[nb_conf].num_rx_queues = nb_rx_queue;
+ ethdev_conf[nb_conf].num_tx_queues = n_tx_queue;
+ if (!per_port_pool)
+ ethdev_conf[nb_conf].mp = pktmbuf_pool[0];
+
+ else
+ ethdev_conf[nb_conf].mp = pktmbuf_pool[portid];
+ ethdev_conf[nb_conf].mp_count = NB_SOCKETS;
+
+ nb_conf++;
+ printf("\n");
+ }
+
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ if (rte_lcore_is_enabled(lcore_id) == 0)
+ continue;
+ qconf = &lcore_conf[lcore_id];
+ printf("\nInitializing rx queues on lcore %u ... ", lcore_id);
+ fflush(stdout);
+ /* Init RX queues */
+ for (queue = 0; queue < qconf->n_rx_queue; ++queue) {
+ struct rte_eth_rxconf rxq_conf;
+
+ portid = qconf->rx_queue_list[queue].port_id;
+ queueid = qconf->rx_queue_list[queue].queue_id;
+
+ if (numa_on)
+ socketid = (uint8_t)rte_lcore_to_socket_id(
+ lcore_id);
+ else
+ socketid = 0;
+
+ printf("rxq=%d,%d,%d ", portid, queueid, socketid);
+ fflush(stdout);
+
+ rte_eth_dev_info_get(portid, &dev_info);
+ rxq_conf = dev_info.default_rxconf;
+ rxq_conf.offloads = port_conf.rxmode.offloads;
+ if (!per_port_pool)
+ ret = rte_eth_rx_queue_setup(
+ portid, queueid, nb_rxd, socketid,
+ &rxq_conf, pktmbuf_pool[0][socketid]);
+ else
+ ret = rte_eth_rx_queue_setup(
+ portid, queueid, nb_rxd, socketid,
+ &rxq_conf,
+ pktmbuf_pool[portid][socketid]);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_rx_queue_setup: err=%d, "
+ "port=%d\n",
+ ret, portid);
+
+ /* Add this queue node to its graph */
+ snprintf(qconf->rx_queue_list[queue].node_name,
+ RTE_NODE_NAMESIZE, "ethdev_rx-%u-%u", portid,
+ queueid);
+ }
+
+ /* Alloc a graph to this lcore only if source exists */
+ if (qconf->n_rx_queue)
+ nb_graphs++;
+ }
+
+ printf("\n");
+
+ /* Ethdev node config, skip rx queue mapping */
+ ret = rte_node_eth_config(ethdev_conf, nb_conf, nb_graphs);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "rte_node_eth_config: err=%d\n", ret);
+
+ /* Start ports */
+ RTE_ETH_FOREACH_DEV(portid)
+ {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ /* Start device */
+ ret = rte_eth_dev_start(portid);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_start: err=%d, port=%d\n", ret,
+ portid);
+
+ /*
+ * If enabled, put device in promiscuous mode.
+ * This allows IO forwarding mode to forward packets
+ * to itself through 2 cross-connected ports of the
+ * target machine.
+ */
+ if (promiscuous_on)
+ rte_eth_promiscuous_enable(portid);
+ }
+
+ printf("\n");
+
+ check_all_ports_link_status(enabled_port_mask);
+
+ /* Graph Initialization */
+ nb_patterns = RTE_DIM(default_patterns);
+ node_patterns = malloc((MAX_RX_QUEUE_PER_LCORE + nb_patterns) *
+ sizeof(*node_patterns));
+ if (!node_patterns)
+ return -ENOMEM;
+ memcpy(node_patterns, default_patterns,
+ nb_patterns * sizeof(*node_patterns));
+
+ memset(&graph_conf, 0, sizeof(graph_conf));
+ graph_conf.node_patterns = node_patterns;
+
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ rte_graph_t graph_id;
+ rte_edge_t i;
+
+ if (rte_lcore_is_enabled(lcore_id) == 0)
+ continue;
+
+ qconf = &lcore_conf[lcore_id];
+
+ /* Skip graph creation if no source exists */
+ if (!qconf->n_rx_queue)
+ continue;
+
+ /* Add rx node patterns of this lcore */
+ for (i = 0; i < qconf->n_rx_queue; i++) {
+ graph_conf.node_patterns[nb_patterns + i] =
+ qconf->rx_queue_list[i].node_name;
+ }
+
+ graph_conf.nb_node_patterns = nb_patterns + i;
+ graph_conf.socket_id = rte_lcore_to_socket_id(lcore_id);
+
+ snprintf(qconf->name, sizeof(qconf->name), "worker_%u",
+ lcore_id);
+
+ graph_id = rte_graph_create(qconf->name, &graph_conf);
+ if (graph_id == RTE_GRAPH_ID_INVALID)
+ rte_exit(EXIT_FAILURE,
+ "rte_graph_create(): graph_id invalid"
+ " for lcore %u\n", lcore_id);
+
+ qconf->graph_id = graph_id;
+ qconf->graph = rte_graph_lookup(qconf->name);
+ if (!qconf->graph)
+ rte_exit(EXIT_FAILURE,
+ "rte_graph_lookup(): graph %s not found\n",
+ qconf->name);
+ }
+
+ memset(&rewrite_data, 0, sizeof(rewrite_data));
+ rewrite_len = sizeof(rewrite_data);
+
+ /* Add route to ip4 graph infra */
+ for (i = 0; i < IPV4_L3FWD_LPM_NUM_ROUTES; i++) {
+ char route_str[INET6_ADDRSTRLEN * 4];
+ char abuf[INET6_ADDRSTRLEN];
+ struct in_addr in;
+ uint32_t dst_port;
+
+ /* Skip unused ports */
+ if ((1 << ipv4_l3fwd_lpm_route_array[i].if_out &
+ enabled_port_mask) == 0)
+ continue;
+
+ dst_port = ipv4_l3fwd_lpm_route_array[i].if_out;
+
+ in.s_addr = htonl(ipv4_l3fwd_lpm_route_array[i].ip);
+ snprintf(route_str, sizeof(route_str), "%s / %d (%d)",
+ inet_ntop(AF_INET, &in, abuf, sizeof(abuf)),
+ ipv4_l3fwd_lpm_route_array[i].depth,
+ ipv4_l3fwd_lpm_route_array[i].if_out);
+
+ /* Use route index 'i' as next hop id */
+ ret = rte_node_ip4_route_add(
+ ipv4_l3fwd_lpm_route_array[i].ip,
+ ipv4_l3fwd_lpm_route_array[i].depth, i,
+ RTE_NODE_IP4_LOOKUP_NEXT_REWRITE);
+
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Unable to add ip4 route %s to graph\n",
+ route_str);
+
+ memcpy(rewrite_data, val_eth + dst_port, rewrite_len);
+
+ /* Add next hop rewrite data for id 'i' */
+ ret = rte_node_ip4_rewrite_add(i, rewrite_data,
+ rewrite_len, dst_port);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Unable to add next hop %u for "
+ "route %s\n", i, route_str);
+
+ RTE_LOG(INFO, L3FWD_GRAPH, "Added route %s, next_hop %u\n",
+ route_str, i);
+ }
+
+ /* Launch per-lcore init on every worker lcore */
+ rte_eal_mp_remote_launch(graph_main_loop, NULL, SKIP_MAIN);
+
+ /* Accumulate and print stats on main until exit */
+ if (rte_graph_has_stats_feature())
+ print_stats();
+
+ /* Wait for worker cores to exit */
+ ret = 0;
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
+ ret = rte_eal_wait_lcore(lcore_id);
+ /* Destroy graph */
+ if (ret < 0 || rte_graph_destroy(
+ rte_graph_from_name(lcore_conf[lcore_id].name))) {
+ ret = -1;
+ break;
+ }
+ }
+ free(node_patterns);
+
+ /* Stop ports */
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+ printf("Closing port %d...", portid);
+ ret = rte_eth_dev_stop(portid);
+ if (ret != 0)
+ printf("Failed to stop port %u: %s\n",
+ portid, rte_strerror(-ret));
+ rte_eth_dev_close(portid);
+ printf(" Done\n");
+ }